code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
# coding: utf-8
# Test run WFlow and LISFLOOD together using GLOFRIM.
# Make sure to first edit the .ini file so that the location of the libraries and model configuration files
# matches your own system!
import xarray as xr
import numpy as np
import rasterio
import sys, os
from datetime import datetime
# import Glofrim
from glofrim import Glofrim
# import barotse utils (first add path to be able to recognize it
sys.path.append('./utils')
import utils
# Setup the Glofrim object with the Glofrim .ini file
cbmi = Glofrim()
root_dir = os.path.abspath('.')
config_fn = os.path.join(root_dir, 'glofrim_barotse_1way1Donly.ini')
cbmi.logger.info('Reading config for cbmi model from {:s}'.format(config_fn))
cbmi.initialize_config(config_fn) #, env_fn=env_fn)
# Set a start and end time interactively. Now just a couple of days for testing
t_start = datetime(2000,1,1)
t_end = datetime(2000,12,31)
cbmi.set_start_time(t_start)
cbmi.set_end_time(t_end)
try:
t_start == cbmi.get_start_time()
t_end == cbmi.get_end_time()
except:
sys.exit('start or end time differ with set_var and get_var')
print('start time is: {:s}\nEnd time is {:s}'.format(t_start.strftime('%Y-%m-%d %H:%M:%S'), t_end.strftime('%Y-%m-%d %H:%M:%S')))
# Initialize the Glofrim coupled model instance
cbmi.logger.info('Initializing model')
cbmi.initialize_model()
# Run the model for a number of time steps and store results
# retrieve the subgrid channel elevation
z = cbmi.bmimodels['LFP']._bmi.get_var('SGCz')
# retrieve the DEM
dem = cbmi.bmimodels['LFP']._bmi.get_var('DEM')
H = []
f = []
c = []
Q = []
Qx = []
Qy = []
time = []
timesteps = 365
cbmi.logger.info('Running 1d experiment for {:d} timesteps'.format(timesteps))
try:
i = 0
while i < timesteps:
print(cbmi.get_current_time())
cbmi.update()
time.append(cbmi.get_current_time())
h = cbmi.get_value('LFP.H')
# compute the flood_depth (above terrain)
flood_depth = np.maximum(h+z-dem, 0)
# compute channel depth (below terrain, only in channels)
channel_depth = np.minimum(dem-z, h)
qx = cbmi.get_value('LFP.Qx')
qy = cbmi.get_value('LFP.Qy')
qx_mod = 0.5*qx[:-1, :-1]+0.5*qx[:-1, 1:]
# reverse flow so that positive is northward, and negative is southward
qy_mod = -0.5*qy[:-1, :-1]-0.5*qy[1:, :-1]
# append all retrievals
H.append(h)
f.append(flood_depth)
c.append(channel_depth)
Q.append(cbmi.get_value('LFP.SGCQin'))
Qx.append(qx_mod)
Qy.append(qy_mod)
i += 1
except Exception as e:
print(e)
sys.exit('something is going wrong in updating - please check!')
cbmi.logger.info('Setting up output structures')
# set up lists of names for all variables, lists of attributes dictionaries, and list of data
LFP_outputs = ['SGCQin', 'H', 'H_f', 'H_c', 'Qx', 'Qy']
LFP_attrs = [{'units': 'm**3 s**-1',
'short_name': 'river_flow',
'long_name': 'River Flow'
},
{'units': 'm',
'short_name': 'water_depth',
'long_name': 'Water Depth'
},
{'units': 'm',
'short_name': 'water_depth',
'long_name': 'Water Depth floodplain'
},
{'units': 'm',
'short_name': 'water_depth',
'long_name': 'Water Depth channel'
},
{'units': 'm**3 s**-1',
'long_name': '10 metre U wind component'
},
{'units': 'm**3 s**-1',
'long_name': '10 metre V wind component'
}
]
datas = [np.array(Q),
np.array(H),
np.array(f),
np.array(c),
np.array(Qx),
np.array(Qy),
]
# extract x and y axis from grid definition
xi, yi = np.meshgrid(np.arange(Q[0].shape[1]), np.arange(Q[0].shape[0]))
x = rasterio.transform.xy(cbmi.bmimodels['LFP'].grid.transform, yi[0,:].flatten(), xi[0,:].flatten())[0]
y = rasterio.transform.xy(cbmi.bmimodels['LFP'].grid.transform, yi[:,0].flatten(), xi[:,0].flatten())[1]
# put everything together in one ds, and store in file
cbmi.logger.info('Merging outputs to Dataset')
ds = utils.merge_outputs(datas, time, x, y, LFP_outputs, LFP_attrs)
# xr.merge([list_to_dataarray(data, t,xs, ys, name, attrs) for data, name, attrs in zip(datas, LFP_outputs, LFP_attrs)])
fn_out = os.path.abspath('test_oneyear.nc')
cbmi.logger.info('Writing outputs to {:s}'.format(fn_out))
ds.to_netcdf(fn_out)
# close model
cbmi.logger.info('Closing model')
cbmi.finalize()
| [
"datetime.datetime",
"glofrim.Glofrim",
"numpy.minimum",
"os.path.join",
"utils.merge_outputs",
"numpy.array",
"sys.exit",
"os.path.abspath",
"numpy.maximum",
"sys.path.append",
"numpy.arange"
] | [((442, 468), 'sys.path.append', 'sys.path.append', (['"""./utils"""'], {}), "('./utils')\n", (457, 468), False, 'import sys, os\n'), ((544, 553), 'glofrim.Glofrim', 'Glofrim', ([], {}), '()\n', (551, 553), False, 'from glofrim import Glofrim\n'), ((565, 585), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (580, 585), False, 'import sys, os\n'), ((598, 654), 'os.path.join', 'os.path.join', (['root_dir', '"""glofrim_barotse_1way1Donly.ini"""'], {}), "(root_dir, 'glofrim_barotse_1way1Donly.ini')\n", (610, 654), False, 'import sys, os\n'), ((876, 896), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (884, 896), False, 'from datetime import datetime\n'), ((903, 925), 'datetime.datetime', 'datetime', (['(2000)', '(12)', '(31)'], {}), '(2000, 12, 31)\n', (911, 925), False, 'from datetime import datetime\n'), ((4261, 4323), 'utils.merge_outputs', 'utils.merge_outputs', (['datas', 'time', 'x', 'y', 'LFP_outputs', 'LFP_attrs'], {}), '(datas, time, x, y, LFP_outputs, LFP_attrs)\n', (4280, 4323), False, 'import utils\n'), ((4454, 4488), 'os.path.abspath', 'os.path.abspath', (['"""test_oneyear.nc"""'], {}), "('test_oneyear.nc')\n", (4469, 4488), False, 'import sys, os\n'), ((3690, 3701), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (3698, 3701), True, 'import numpy as np\n'), ((3712, 3723), 'numpy.array', 'np.array', (['H'], {}), '(H)\n', (3720, 3723), True, 'import numpy as np\n'), ((3734, 3745), 'numpy.array', 'np.array', (['f'], {}), '(f)\n', (3742, 3745), True, 'import numpy as np\n'), ((3756, 3767), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (3764, 3767), True, 'import numpy as np\n'), ((3778, 3790), 'numpy.array', 'np.array', (['Qx'], {}), '(Qx)\n', (3786, 3790), True, 'import numpy as np\n'), ((3801, 3813), 'numpy.array', 'np.array', (['Qy'], {}), '(Qy)\n', (3809, 3813), True, 'import numpy as np\n'), ((3891, 3915), 'numpy.arange', 'np.arange', (['Q[0].shape[1]'], {}), '(Q[0].shape[1])\n', (3900, 3915), True, 'import numpy as np\n'), ((3917, 3941), 'numpy.arange', 'np.arange', (['Q[0].shape[0]'], {}), '(Q[0].shape[0])\n', (3926, 3941), True, 'import numpy as np\n'), ((1065, 1126), 'sys.exit', 'sys.exit', (['"""start or end time differ with set_var and get_var"""'], {}), "('start or end time differ with set_var and get_var')\n", (1073, 1126), False, 'import sys, os\n'), ((1990, 2016), 'numpy.maximum', 'np.maximum', (['(h + z - dem)', '(0)'], {}), '(h + z - dem, 0)\n', (2000, 2016), True, 'import numpy as np\n'), ((2103, 2125), 'numpy.minimum', 'np.minimum', (['(dem - z)', 'h'], {}), '(dem - z, h)\n', (2113, 2125), True, 'import numpy as np\n'), ((2649, 2713), 'sys.exit', 'sys.exit', (['"""something is going wrong in updating - please check!"""'], {}), "('something is going wrong in updating - please check!')\n", (2657, 2713), False, 'import sys, os\n')] |
import torch
import CONFIG
import numpy as np
import torch.nn as nn
import torchaudio
class DataLoader(torch.utils.data.Dataset):
def __init__(self, data, word_to_idx_map, transforms=None):
self.path = data[:, 0]
self.target_text = data[:, 1]
self.word_to_idx_map = word_to_idx_map
self.transforms = transforms
def __len__(self):
return len(self.path)
def __getitem__(self, idx):
path = self.path[idx]
target_text = self.target_text[idx]
target = []
for alphabet in target_text:
if alphabet == ' ':
target.append(self.word_to_idx_map['<SPACE>'])
else:
target.append(self.word_to_idx_map[alphabet])
waveform, sample_rate = torchaudio.load(path)
mel_spectrogram = torchaudio.transforms.MelSpectrogram(
sample_rate=16000,
n_mels=CONFIG.n_filters
)(waveform).squeeze(0)
if self.transforms:
for transform in self.transforms:
a = np.random.randint(0, 100)
if a < CONFIG.transform_threshold*100 :
mel_spectrogram = transform(mel_spectrogram).squeeze(0)
target_len = len(target)
mel_len = mel_spectrogram.shape[-1]//3
return {
'mel_spect' : mel_spectrogram,
'mel_len' : mel_len,
'target' : torch.tensor(target, dtype=torch.float),
'target_len' : target_len
}
class MyCollate:
def __init__(self, pad_idx):
self.pad_idx = pad_idx
def __call__(self, batch):
mel_spect = [item['mel_spect'] for item in batch]
max_len = max([item['mel_spect'].shape[-1] for item in batch])
target = [item['target'] for item in batch]
mel_len = [item['mel_len'] for item in batch]
target_len = [item['target_len'] for item in batch]
mel_spect_ = torch.zeros((len(batch), 1, CONFIG.n_filters, max_len))
for num, mel in enumerate(mel_spect):
mel_spect_[num, 0, :, :mel.shape[-1]] = mel
target = torch.nn.utils.rnn.pad_sequence(
target,
batch_first=True,
padding_value=28
)
return {
'mel_spect' : mel_spect_,
'target' : target,
'mel_len' : torch.tensor(mel_len),
'target_len' : torch.tensor(target_len)
}
if __name__ == "__main__":
import os
import glob
import pickle
import numpy as np
path = 'input/LibriSpeech/train-clean-100/19/198'
txt = glob.glob(path + '/*txt')[0]
f = open(txt).read().strip().split('\n')
path_and_label = []
for line in f:
audio_name = line.split()[0]
audio_file_full_path = os.path.join(path, audio_name + '.flac')
label = ' '.join(line.split()[1:])
label = label.lower()
path_and_label.append([audio_file_full_path, label])
path_and_label = np.array(path_and_label)
char_to_idx = pickle.load(open('input/char_to_idx.pickle', 'rb'))
data_loader = DataLoader(path_and_label, char_to_idx, None)
data_loader = torch.utils.data.DataLoader(
data_loader,
batch_size=8,
collate_fn=MyCollate(28)
)
for data in data_loader:
mel_spect = data['mel_spect']
target = data['target']
mel_len = data['mel_len']
target_len = data['target_len']
| [
"torchaudio.transforms.MelSpectrogram",
"torchaudio.load",
"os.path.join",
"torch.nn.utils.rnn.pad_sequence",
"numpy.array",
"torch.tensor",
"numpy.random.randint",
"glob.glob"
] | [((3012, 3036), 'numpy.array', 'np.array', (['path_and_label'], {}), '(path_and_label)\n', (3020, 3036), True, 'import numpy as np\n'), ((793, 814), 'torchaudio.load', 'torchaudio.load', (['path'], {}), '(path)\n', (808, 814), False, 'import torchaudio\n'), ((2147, 2222), 'torch.nn.utils.rnn.pad_sequence', 'torch.nn.utils.rnn.pad_sequence', (['target'], {'batch_first': '(True)', 'padding_value': '(28)'}), '(target, batch_first=True, padding_value=28)\n', (2178, 2222), False, 'import torch\n'), ((2631, 2656), 'glob.glob', 'glob.glob', (["(path + '/*txt')"], {}), "(path + '/*txt')\n", (2640, 2656), False, 'import glob\n'), ((2816, 2856), 'os.path.join', 'os.path.join', (['path', "(audio_name + '.flac')"], {}), "(path, audio_name + '.flac')\n", (2828, 2856), False, 'import os\n'), ((1430, 1469), 'torch.tensor', 'torch.tensor', (['target'], {'dtype': 'torch.float'}), '(target, dtype=torch.float)\n', (1442, 1469), False, 'import torch\n'), ((2379, 2400), 'torch.tensor', 'torch.tensor', (['mel_len'], {}), '(mel_len)\n', (2391, 2400), False, 'import torch\n'), ((2429, 2453), 'torch.tensor', 'torch.tensor', (['target_len'], {}), '(target_len)\n', (2441, 2453), False, 'import torch\n'), ((1073, 1098), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (1090, 1098), True, 'import numpy as np\n'), ((842, 927), 'torchaudio.transforms.MelSpectrogram', 'torchaudio.transforms.MelSpectrogram', ([], {'sample_rate': '(16000)', 'n_mels': 'CONFIG.n_filters'}), '(sample_rate=16000, n_mels=CONFIG.n_filters\n )\n', (878, 927), False, 'import torchaudio\n')] |
"""
Module description ...
Reference:
- Surename1, Forename1 Initials., Surename2, Forename2 Initials, YEAR. Publication/Book title
Publisher, Number(Volume No), pp.142-161.
"""
import sys
import numpy as np
import numpy.ma as ma
from operator import itemgetter
from scipy.integrate import solve_ivp
from functools import reduce
def energy_conservation_condition(phase_space_axes, H0, potential_energy, momentum_sign):
N_dim = len(phase_space_axes)
phase_space_positions = np.transpose(phase_space_axes[:int(N_dim/2)])
phase_space_momenta = np.transpose(phase_space_axes[int(N_dim/2):])
V = potential_energy(phase_space_positions)
points_dims_remaining = momentum_sign * \
np.sqrt(2*(H0 - V) - (phase_space_momenta**2).sum(axis=1))
return points_dims_remaining
def generate_points(grid_parameters):
"""
Returns a 1D array of all points from a on a uniform grid with dimensions and size defined by list of input parameters.
An additional dimension initiallised with zeros is added for the calculation of Lagrangian Descriptors.
NOTE: For n-DoF systems, currently energy conservation is only used to determine momenta dimensions.
Parameters
----------
grid_parameters : list (1-DoF systems) or dict (n-DoF systems)
if 1-DoF, list should have two 3-tuples of floats
entries are input parameters of limits and size of mesh per axis
if n-DoF, dict should have the following keys
* 'slice_parameters' : list, should have two 3-tuples of floats, for a 2D slice
* 'dims_slice' : list of 0 and 1, ones indicate slice axes
* 'dims_fixed' : list of 0 and 1, ones indicate fixed axes
* 'momentum_sign' : int, -1 / 1, for negative/positive momentum for remaining axis
* 'potential_energy' : func used by energy conservation condition to determine remaining momentum axis
* 'energy_level' : float, energy value for energy conservation condition
Returns
-------
mesh : 1d numpy array
flattened array of initial conditions
"""
if type(grid_parameters) == dict:
# Unpack extra grid parameters
slice_parameters = grid_parameters['slice_parameters']
dims_slice = grid_parameters['dims_slice']
dims_fixed, dims_fixed_values = grid_parameters['dims_fixed']
momentum_sign = grid_parameters['momentum_sign']
potential_energy = grid_parameters['potential_energy']
H0 = grid_parameters['energy_level']
N_dim = len(dims_slice) # Phase-space dimensions
# Check N DoF is even.
if N_dim % 2 != 0:
error_mssg = ("ERROR: Number of phase-space dimensions not even. ",
"Check your extra grid parameters")
print(error_mssg)
sys.exit()
# Determine number of dimensions for Energy conservation.
# There must be only one.
dims_remaining = 1 - (np.array(dims_fixed) + np.array(dims_slice))
if list(dims_remaining).count(1) > 1:
error_mssg = ("ERROR: More than one remaing dimension. ",
"Cannot use Energy conservation to define high-dim grid.")
print(error_mssg)
sys.exit()
# Axes of visualisation slice
def pass_axis_parameters(parameters): return np.linspace(*parameters)
points_slice_axes = list(map(pass_axis_parameters, slice_parameters))
slice_mesh = np.meshgrid(*points_slice_axes)
dims_slice_axes = [axis.flatten() for axis in slice_mesh]
N_points_slice = np.prod(list(map(itemgetter(-1), slice_parameters)))
phase_space_axes = {}
# Define and sort axes in phase space
k = 0
for i in range(N_dim):
if dims_slice[i] == 1:
phase_space_axes[i] = dims_slice_axes[k]
k += 1
k = 0
for i in range(N_dim):
if dims_fixed[i] == 1:
phase_space_axes[i] = dims_fixed_values[k] * \
np.ones(N_points_slice)
k += 1
# Set axis to be determined by energy conservation
idx_dims_H0 = list(set(range(N_dim))-set(phase_space_axes.keys()))[0]
# Check if remaining dimension falls in configuration space
if idx_dims_H0 < int(N_dim/2):
error_mssg = ("ERROR: The remaining dimension fall in configuration space.",
"Currently, cannot use Energy conservation to define high-dim grid.")
print(error_mssg)
sys.exit()
phase_space_axes[idx_dims_H0] = np.zeros(N_points_slice)
# List of all phase space axes
phase_space_axes = [phase_space_axes[i] for i in range(N_dim)]
# Determine undefined axis via energy conservation
phase_space_axes[idx_dims_H0] = energy_conservation_condition(
phase_space_axes, H0, potential_energy, momentum_sign)
mask = np.isnan(phase_space_axes[idx_dims_H0]) # Mask grid points
phase_space_axes[idx_dims_H0] = np.nan_to_num(phase_space_axes[idx_dims_H0])
# Return array of mesh points for integrator
lagrangian_descriptor_axis = [np.zeros(N_points_slice)]
mesh = np.transpose(phase_space_axes + lagrangian_descriptor_axis)
return mesh.flatten(), mask
else:
if len(grid_parameters) > 2:
error_mssg = ("ERROR: grid_parameters must be a list for 2D slices for 1DoF systems. ",
"Provide a dictionary for a higher-dimensional systems.")
else:
x_min, x_max, Nx = grid_parameters[0]
y_min, y_max, Ny = grid_parameters[1]
points_x = np.linspace(x_min, x_max, Nx)
points_y = np.linspace(y_min, y_max, Ny)
X, Y = np.meshgrid(points_x, points_y) # Grid in phase-space
# 2D grid + a zero column for LDs
mesh = np.transpose([X.flatten(), Y.flatten(), np.zeros(Nx*Ny)])
mask = False
return mesh.flatten(), mask
def perturb_field(vector_field, perturbation):
"""
Returns the vector field function with a linearly added pertubation
Both input function should input (t, u), with t: float, and u: ndarray
Also, the output of these funcs must be ndarrays of the same shape
Parameters
----------
vector_field: function
unperturbed vector field
perturbation: function
forcing added to the vector field
Returns
-------
perturbed function
"""
return lambda t, u: vector_field(t, u) + perturbation(t, u)
def check_if_points_escape_box(u, box_boundaries):
"""
Determine if points in phase-space u have scaped box with user-defined defined dimensions
Parameters
----------
u : array_like, shape(n, )
points in phase-space to check if outside box boundaries
box_boundaries : list of 2-tuples of floats
box lower and upper limits along X and Y axes
Returns
-------
u_indices : array_like, shape(n, )
array of True/False bool values if points inside/outside the box
"""
N_dim = u.shape[-1]
points_positions = u.T[:int(N_dim/2)]
if len(points_positions) == len(box_boundaries):
check = lambda x, box_axis_limits: (box_axis_limits[0]<=x)&(x<=box_axis_limits[1])
positions_within_box = [check(points_positions[i], box_boundaries[i]) for i in range(int(N_dim/2))]
u_indices = reduce(lambda x, y: x&y, positions_within_box)
return u_indices
else:
error_mssg = ("ERROR: Number of box axes and configuration space axes do not match. ",
"Check the defintion of your box boundaries.")
sys.exit()
def lagrangian_descriptor(u, v, p_value = 0.5):
"""
Vector field equation for Lagrangian descriptor.
Parameters
----------
v : ndarray, shape(n,2)
Vector field at given point.
p_value : float, optional
Exponent in Lagrangian descriptor definition.
0 is the acton-based LD,
0 < p_value < 1 is the Lp quasinorm,
1 <= p_value < 2 is the Lp norm LD,
2 is the arclength LD.
The default is 0.5.
Returns
-------
LD : ndarray, shape(n,1)
Vector field for Lagrangian descriptor dimension.
"""
if p_value == 0:
LD = np.abs(u[:,1]*v[:,0])
elif p_value>0:
LD = np.sum(np.abs(v)**p_value, axis=1)
else:
LD = np.zeros(len(u[:,0]))
return LD
def vector_field_flat(t, points, vector_field, p_value, box_boundaries):
"""
Returns vector field values for integration of flattened input array.
Parameters
----------
t : float
time
points : ndarray, shape(n,3)
vector_field: function
User defined vector field.
p_value : float, optional
Exponent in Lagrangian descriptor definition.
0 is the acton-based LD,
0 < p_value < 1 is the Lp quasinorm,
1 <= p_value < 2 is the Lp norm LD,
2 is the arclength LD.
The default is 0.5.
box_boundaries : list of 2-tuples, optional
box boundaries for escape condition of variable time integration
boundaries are infinite by default.
Returns
-------
1d array
y0 values for integrator
"""
N_mesh_axes = 2*len(box_boundaries)+1
u = points.reshape((-1,N_mesh_axes))
u = u[:,:-1] #remove LD-values axis
# Apply Escape condition
u_inbox = check_if_points_escape_box(u, box_boundaries)
# Define output vector field in combination with escape condition
v = np.zeros(u.shape)
v[u_inbox == True] = vector_field(t, u[u_inbox == True])
# Calculate LD vector field
LD_vec = np.zeros(len(u))
LD_vec [u_inbox == True] = lagrangian_descriptor(u[u_inbox == True], v[u_inbox == True], p_value)
# Add LD
v_out=np.column_stack((v, LD_vec))
return v_out.flatten()
def compute_lagrangian_descriptor(grid_parameters, vector_field, tau, p_value=0.5, box_boundaries=False):
"""
Returns the values of the LD function from integrated trajectories from initial conditions in phase-space.
Parameters
----------
grid_parameters : list of 3-tuples of floats
input parameters of limits and size of mesh per axis
vector_field: function
vector field over phase-space
tau : float
Upper limit of integration.
p_value : float, optional
Exponent in Lagrangian descriptor definition.
0 is the acton-based LD,
0 < p_value < 1 is the Lp quasinorm,
1 <= p_value < 2 is the Lp norm LD,
2 is the arclength LD.
The default is 0.5.
box_boundaries : list of 2-tuples, optional
Box boundaries for escape condition of variable time integration.
Boundaries are infinite by default.
Returns
-------
LD : ndarray, shape (Nx, Ny)
Array of computed Lagrangian descriptor values for all initial conditions.
"""
#get visualisation slice parameters and Number of DoF
if type(grid_parameters) == dict:
#n-DoF systems
slice_parameters = grid_parameters['slice_parameters'] # 2n-D grid
N_dim = len(grid_parameters['dims_slice'])
else:
#1-DoF systems
slice_parameters = grid_parameters # 2-D grid
N_dim = len(slice_parameters)
#set boundaries for escape-box condition, if not defined
if not box_boundaries:
box_boundaries = int(N_dim/2)*[[-np.infty, np.infty]] #restricted to configuration space
#solve initial value problem
f = lambda t, y: vector_field_flat(t, y, vector_field, p_value, box_boundaries)
y0, mask = generate_points(grid_parameters)
#mask y0 values
if type(mask) == np.ndarray:
mask_y0 = np.transpose([mask for i in range(N_dim+1)]).flatten()
y0 = ma.masked_array(y0, mask=mask_y0)
solution = solve_ivp(f, [0,tau], y0, t_eval=[tau], rtol=1.0e-4)
LD_values = solution.y[N_dim::N_dim+1] #values corresponding to LD
N_points_slice_axes = list( map(itemgetter(-1), slice_parameters))
LD = np.abs(LD_values).reshape(*N_points_slice_axes) #reshape to 2-D array
LD = ma.masked_array(LD, mask=mask) #mask LD values for slice
if p_value<=1:
return LD
else:
return LD**(1/p_value)
__author__ = '<NAME>, <NAME>, <NAME>'
__status__ = 'Development'
| [
"numpy.abs",
"numpy.ones",
"functools.reduce",
"scipy.integrate.solve_ivp",
"numpy.column_stack",
"operator.itemgetter",
"numpy.array",
"numpy.zeros",
"numpy.meshgrid",
"numpy.isnan",
"numpy.linspace",
"sys.exit",
"numpy.ma.masked_array",
"numpy.transpose",
"numpy.nan_to_num"
] | [((9799, 9816), 'numpy.zeros', 'np.zeros', (['u.shape'], {}), '(u.shape)\n', (9807, 9816), True, 'import numpy as np\n'), ((10075, 10103), 'numpy.column_stack', 'np.column_stack', (['(v, LD_vec)'], {}), '((v, LD_vec))\n', (10090, 10103), True, 'import numpy as np\n'), ((12163, 12216), 'scipy.integrate.solve_ivp', 'solve_ivp', (['f', '[0, tau]', 'y0'], {'t_eval': '[tau]', 'rtol': '(0.0001)'}), '(f, [0, tau], y0, t_eval=[tau], rtol=0.0001)\n', (12172, 12216), False, 'from scipy.integrate import solve_ivp\n'), ((12457, 12487), 'numpy.ma.masked_array', 'ma.masked_array', (['LD'], {'mask': 'mask'}), '(LD, mask=mask)\n', (12472, 12487), True, 'import numpy.ma as ma\n'), ((3484, 3515), 'numpy.meshgrid', 'np.meshgrid', (['*points_slice_axes'], {}), '(*points_slice_axes)\n', (3495, 3515), True, 'import numpy as np\n'), ((4649, 4673), 'numpy.zeros', 'np.zeros', (['N_points_slice'], {}), '(N_points_slice)\n', (4657, 4673), True, 'import numpy as np\n'), ((5007, 5046), 'numpy.isnan', 'np.isnan', (['phase_space_axes[idx_dims_H0]'], {}), '(phase_space_axes[idx_dims_H0])\n', (5015, 5046), True, 'import numpy as np\n'), ((5106, 5150), 'numpy.nan_to_num', 'np.nan_to_num', (['phase_space_axes[idx_dims_H0]'], {}), '(phase_space_axes[idx_dims_H0])\n', (5119, 5150), True, 'import numpy as np\n'), ((5292, 5351), 'numpy.transpose', 'np.transpose', (['(phase_space_axes + lagrangian_descriptor_axis)'], {}), '(phase_space_axes + lagrangian_descriptor_axis)\n', (5304, 5351), True, 'import numpy as np\n'), ((7595, 7643), 'functools.reduce', 'reduce', (['(lambda x, y: x & y)', 'positions_within_box'], {}), '(lambda x, y: x & y, positions_within_box)\n', (7601, 7643), False, 'from functools import reduce\n'), ((7849, 7859), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7857, 7859), False, 'import sys\n'), ((8498, 8523), 'numpy.abs', 'np.abs', (['(u[:, 1] * v[:, 0])'], {}), '(u[:, 1] * v[:, 0])\n', (8504, 8523), True, 'import numpy as np\n'), ((12109, 12142), 'numpy.ma.masked_array', 'ma.masked_array', (['y0'], {'mask': 'mask_y0'}), '(y0, mask=mask_y0)\n', (12124, 12142), True, 'import numpy.ma as ma\n'), ((2815, 2825), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2823, 2825), False, 'import sys\n'), ((3257, 3267), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3265, 3267), False, 'import sys\n'), ((3360, 3384), 'numpy.linspace', 'np.linspace', (['*parameters'], {}), '(*parameters)\n', (3371, 3384), True, 'import numpy as np\n'), ((4589, 4599), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4597, 4599), False, 'import sys\n'), ((5251, 5275), 'numpy.zeros', 'np.zeros', (['N_points_slice'], {}), '(N_points_slice)\n', (5259, 5275), True, 'import numpy as np\n'), ((5762, 5791), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', 'Nx'], {}), '(x_min, x_max, Nx)\n', (5773, 5791), True, 'import numpy as np\n'), ((5815, 5844), 'numpy.linspace', 'np.linspace', (['y_min', 'y_max', 'Ny'], {}), '(y_min, y_max, Ny)\n', (5826, 5844), True, 'import numpy as np\n'), ((5864, 5895), 'numpy.meshgrid', 'np.meshgrid', (['points_x', 'points_y'], {}), '(points_x, points_y)\n', (5875, 5895), True, 'import numpy as np\n'), ((12329, 12343), 'operator.itemgetter', 'itemgetter', (['(-1)'], {}), '(-1)\n', (12339, 12343), False, 'from operator import itemgetter\n'), ((12374, 12391), 'numpy.abs', 'np.abs', (['LD_values'], {}), '(LD_values)\n', (12380, 12391), True, 'import numpy as np\n'), ((2969, 2989), 'numpy.array', 'np.array', (['dims_fixed'], {}), '(dims_fixed)\n', (2977, 2989), True, 'import numpy as np\n'), ((2992, 3012), 'numpy.array', 'np.array', (['dims_slice'], {}), '(dims_slice)\n', (3000, 3012), True, 'import numpy as np\n'), ((3625, 3639), 'operator.itemgetter', 'itemgetter', (['(-1)'], {}), '(-1)\n', (3635, 3639), False, 'from operator import itemgetter\n'), ((4061, 4084), 'numpy.ones', 'np.ones', (['N_points_slice'], {}), '(N_points_slice)\n', (4068, 4084), True, 'import numpy as np\n'), ((6024, 6041), 'numpy.zeros', 'np.zeros', (['(Nx * Ny)'], {}), '(Nx * Ny)\n', (6032, 6041), True, 'import numpy as np\n'), ((8560, 8569), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (8566, 8569), True, 'import numpy as np\n')] |
"""
miscelallaneous functions and classes to extract connectivity metrics
Author: <NAME>, PhD [<EMAIL>], https://twitter.com/davemomi
"""
import numpy as np
import pandas as pd
from math import pi
import glob
import seaborn as sns
import matplotlib.pyplot as plt
import bct as bct
class Connectivity_metrics(object):
def __init__(self, matrices_files, net_label_txt, labels_dic):
self.matrices_files = matrices_files
self.net_label_txt = net_label_txt
self.labels_dic = labels_dic
def nodes_overall_conn(self, make_symmetric=True, upper_threshold=None,
lower_threshold=None):
'''
computing the overall connectivity of each node
regardless of network affiliation
Parameters
----------
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy array (dim number of subject X number of node)
representing the connectivity of each node regardless
of network affiliation
'''
self.nodes_conn = []
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
for nodes in range(self.matrix.shape[0]):
self._node_conn = np.sum(self.matrix[nodes])
self.nodes_conn.append(self._node_conn)
self.nodes_conn = np.array(self.nodes_conn)
self.nodes_conn = self.nodes_conn.reshape(len(self.matrices_files), self.matrix.shape[0])
return self.nodes_conn
def node_inner_conn(self, sbj_number, nodes_number, make_symmetric=True,
upper_threshold=None, lower_threshold=None):
'''
computing the connectivity of each node with its own network
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy array (dim number of subject X number of node)
representing the connectivity of each node with its own
network
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = np.zeros([sbj_number, nodes_number])
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
for network in net:
for nodes in self.labels_dic[network]:
self.sub_matrix =self.matrix[nodes]
self.streamlines_sum = np.sum(self.sub_matrix[self.labels_dic[network]])
self.all_conn[subj, nodes] = self.streamlines_sum/self.labels_dic[network].shape[0]
return self.all_conn
def node_outer_conn(self, sbj_number, nodes_number, make_symmetric=True,
upper_threshold=None, lower_threshold=None):
'''
computing the connectivity of each node with the other nodes
which don't belong to the same network
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy array (dim number of subject X number of node)
representing the connectivity of each node with regions that
are outsite the node's network
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = np.zeros([sbj_number, nodes_number])
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
self.nodes_ranges = np.arange(len(self.labels_dic['nodes']))
for network in net:
self.outer_idx = np.setdiff1d(self.nodes_ranges, self.labels_dic[network])
for nodes in self.outer_idx:
self.sub_matrix =self.matrix[nodes]
self.streamlines_sum = np.sum(self.sub_matrix[self.outer_idx])
self.all_conn[subj, nodes] = self.streamlines_sum/self.outer_idx.shape[0]
return self.all_conn
def node_ranking(self, sbj_number, nodes_number, networks_number,
make_symmetric=True, upper_threshold=None, lower_threshold=None):
'''
computing how much each node is connected with the each network
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
networks_number: int|
number of networks
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy a 3D array (dim number of subject X number of network X number of network)
representing the connectivity of each node with all the networks
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = np.zeros([sbj_number, nodes_number, networks_number])
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
for nodes in range(self.matrix.shape[0]):
self.node_conn = self.matrix[nodes]
for network in net:
self.streamlines_sum = np.sum(self.node_conn[self.labels_dic[network]])
self.all_conn[subj, nodes, net.index(network)] = self.streamlines_sum/self.labels_dic[network].shape[0]
return self.all_conn
def net_inner_conn(self, make_symmetric=True, upper_threshold=None,
lower_threshold=None):
'''
computing the how much each network is connected with itself
Parameters
----------
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy array (dim number of subject X number of network)
representing the connectivity of each network with itself
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = []
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
for network in net:
self.subj_matrix = self.matrix[self.labels_dic[network]]
self.subj_matrix = self.subj_matrix[:,self.labels_dic[network]]
self.streamlines_sum = np.sum(np.sum(self.subj_matrix))
self.conn_measure = self.streamlines_sum/len(self.labels_dic[network])
self.all_conn.append(self.conn_measure)
self.all_conn = np.array(self.all_conn)
self.all_conn = self.all_conn.reshape(len(self.matrices_files), len(net))
return self.all_conn
def net_outer_conn(self, make_symmetric=True, upper_threshold=None,
lower_threshold=None):
'''
computing how much each network is connected with the other
networks
Parameters
----------
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy array (dim number of subject X number of network)
representing the connectivity of each network with other networks
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = []
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
self.nodes_ranges = np.arange(len(self.labels_dic['nodes']))
for network in net:
self.outer_idx = np.setdiff1d(self.nodes_ranges, self.labels_dic[network])
self.subj_matrix = self.matrix[self.labels_dic[network]]
self.subj_matrix = self.subj_matrix[:,self.outer_idx]
self.streamlines_sum = np.sum(np.sum(self.subj_matrix))
self.conn_measure = self.streamlines_sum/self.outer_idx.shape[0]
self.all_conn.append(self.conn_measure)
self.all_conn = np.array(self.all_conn)
self.all_conn = self.all_conn.reshape(len(self.matrices_files), len(net))
return self.all_conn
def net_ranking(self, sbj_number, nodes_number, make_symmetric=True,
upper_threshold=None, lower_threshold=None,
percentage_value=False):
'''
computing how much each node is connected with the other network
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
percentage_value: Boolean|
True return values express in percentage_value
False return raw values
Returns
-------
float data : numpy array |
numpy a 3D array (dim number of subject X number of network X number of network)
representing the connectivity of each node with all the networks
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = self.node_ranking(sbj_number, nodes_number, len(net), make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold)
self.all_conn_rank = np.zeros([sbj_number, len(net), len(net)])
for subj in range(len(self.matrices_files)):
self.subj2use = self.all_conn[subj,:,:]
for network in net:
self.net2use = self.subj2use[self.labels_dic[network],:]
if percentage_value==False:
self.all_conn_rank[subj, net.index(network), :] = np.mean(self.net2use, axis=0)
else:
self.all_conn_rank[subj, net.index(network), :] = 100* np.mean(self.net2use, axis=0)/np.sum(np.mean(self.net2use, axis=0))
return self.all_conn_rank
def all_standard_metrics(self, sbj_number, nodes_number, networks_number,
make_symmetric=True, upper_threshold=None,
lower_threshold=None, percentage_value=False):
self.metrics_dict = {
"nodes_overall_conn": self.nodes_overall_conn(make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold),
"node_inner_conn": self.node_inner_conn(sbj_number, nodes_number, make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold),
"node_outer_conn": self.node_outer_conn(sbj_number, nodes_number, make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold),
"node_ranking": self.node_ranking(sbj_number, nodes_number, networks_number, make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold),
"net_inner_conn": self.net_inner_conn(make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold),
"net_outer_conn": self.net_outer_conn(make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold),
"net_ranking": self.net_ranking(sbj_number, nodes_number, make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold, percentage_value=percentage_value)
}
return self.metrics_dict
class Graph_Theory(object):
def __init__(self, matrices_files, net_label_txt, labels_dic):
self.matrices_files = matrices_files
self.net_label_txt = net_label_txt
self.labels_dic = labels_dic
def nodal_degree(self, sbj_number, nodes_number, make_symmetric=True,
upper_threshold=None, lower_threshold=None, binarize=False):
'''
computing graph theory node measures regardless of network affiliation
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
binarize: Boolean|
True will make the connectivity matrix binary
Default is False
Returns
-------
dict: : dictonary with the following keys |
degree: int | Number of links connected to the node
in_degree: int | Number of inward links
out_degree: int | Number of outward links
joint_in_degree: int | number of vertices with in_degree>out_degree
joint_out_degree: int | number of vertices with out_degree>in_degree
joint_bilateral: int | number of vertices with in_degree==out_degree
node_strength_dir: int | node strength (in-strength + out-strength)
node_strength_undir: int | sum of weights of links connected to the node
'''
self.all_nodal_degree = {
"degree": np.zeros([sbj_number, nodes_number]),
# "in_degree" : np.zeros([sbj_number, nodes_number]),
# "out_degree" : np.zeros([sbj_number, nodes_number]),
# "joint_in_degree" : np.zeros([sbj_number, nodes_number]),
# "joint_out_degree" : np.zeros([sbj_number, nodes_number]),
# "joint_bilateral" : np.zeros([sbj_number, nodes_number]),
# "node_strength_dir": np.zeros([sbj_number, nodes_number]),
"node_strength_undir":np.zeros([sbj_number, nodes_number])
}
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
if binarize==True:
self.matrix = bct.algorithms.binarize(self.matrix)
else:
self.matrix = self.matrix
np.fill_diagonal(self.matrix,0)
self.deg = bct.algorithms.degrees_und(self.matrix)
# self.all_nodal_degree['in_degree'][subj] = self.inp
# self.all_nodal_degree['out_degree'][subj] = self.od
self.all_nodal_degree['degree'][subj] = self.deg
# self.J, self.J_od, self.J_id, self.J_bl = bct.algorithms.jdegree(self.matrix)
# self.all_nodal_degree['joint_in_degree'][subj] = self.J_id
# self.all_nodal_degree['joint_out_degree'][subj] = self.J_od
# self.all_nodal_degree['joint_bilateral'][subj] = self.J_bl
# self.nodestr_dir = bct.algorithms.strengths_dir(self.matrix)
# self.all_nodal_degree['node_strength_dir'][subj] = self.nodestr_dir
self.nodestr_undir = bct.algorithms.strengths_und(self.matrix)
self.all_nodal_degree['node_strength_undir'][subj] = self.nodestr_undir
return self.all_nodal_degree
def network_level_degree(self, sbj_number, nodes_number, label_dic,
make_symmetric=True, upper_threshold=None,
lower_threshold=None, binarize=False,):
'''
computing graph theory node measures specific for each network
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
label_dic: dict |
dictonary computed using files.labels()
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
binarize: Boolean|
True will make the connectivity matrix binary
Default is False
Returns
-------
dict: : dictonary with the following keys |
degree: int | Number of links connected to the node
in_degree: int | Number of inward links
out_degree: int | Number of outward links
joint_in_degree: int | number of vertices with in_degree>out_degree
joint_out_degree: int | number of vertices with out_degree>in_degree
joint_bilateral: int | number of vertices with in_degree==out_degree
node_strength_dir: int | node strength (in-strength + out-strength)
node_strength_undir: int | sum of weights of links connected to the node
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.degree = self.nodal_degree(sbj_number, nodes_number, make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold, binarize=binarize)
self.values = np.zeros([sbj_number, len(self.degree.keys()), len(net)])
self.list = list(self.degree.keys())
for subject in range(sbj_number):
for key in self.list:
for network in net:
self.values[subject, self.list.index(key), net.index(network)] = np.mean(self.degree[key][subject][label_dic[network]])
self.d = {}
for i in self.degree.keys():
self.d[i] = self.values[:, self.list.index(i), :]
return self.d
def physical_connectivity(self, sbj_number, networks_number, label_dic,
make_symmetric=True, upper_threshold=None,
lower_threshold=None, binarize=False):
'''
Density is the fraction of present connections to possible connections.
Parameters
----------
sbj_number: int |
number of subjects
networks_number: int|
number of networks
label_dic: dict |
dictonary computed using files.labels()
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
binarize= Boolean|
True will make the connectivity matrix binary
Default is False
Returns
-------
dict: : dictonary with the following keys |
Density_und: int | Density is the fraction of present connections
to possible connections
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.physical_connectivity = {
"Density_und": np.zeros([sbj_number, networks_number]),
}
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
if binarize==True:
self.matrix = bct.algorithms.binarize(self.matrix)
else:
self.matrix = self.matrix
np.fill_diagonal(self.matrix,0)
for network in net:
self.net_matrix = self.matrix[label_dic[network]]
self.net_matrix = self.net_matrix[:,label_dic[network]]
self.kden, self.n, self.k = bct.algorithms.density_und(self.net_matrix)
self.physical_connectivity['Density_und'][subj, net.index(network)] = self.kden
return self.physical_connectivity
def modularity(self, sbj_number, networks_number, label_dic,
make_symmetric=True, upper_threshold=None,
lower_threshold=None, binarize=False):
'''
Computing modularity of the adjencency matrix
Parameters
----------
sbj_number: int |
number of subjects
networks_number: int|
number of networks
label_dic: dict |
dictonary computed using files.labels()
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
binarize= Boolean|
True will make the connectivity matrix binary
Default is False
Returns
-------
dict: : dictonary with the following keys |
community_louvain: int | Modularity values
similarity_idx: float32 | Values indicating how much each original
network is similar to the new modules found
with the modularity algorithm
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.modularity = {
"community_louvain": np.zeros([sbj_number, networks_number]),
"similarity_idx": np.zeros([sbj_number, networks_number])
}
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
if binarize==True:
self.matrix = bct.algorithms.binarize(self.matrix)
else:
self.matrix = self.matrix
np.fill_diagonal(self.matrix,0)
for network in net:
self.net_matrix = self.matrix[label_dic[network]]
self.net_matrix = self.net_matrix[:,label_dic[network]]
self.ci, self.q = bct.algorithms.community_louvain(self.net_matrix)
self.modularity['community_louvain'][subj, net.index(network)] = self.q
self.unico = np.unique(self.ci)
self.index={}
for values in self.unico:
self.index[values]= np.where(self.ci == values)
self.similarity_matrix = np.zeros([len(net), self.unico.shape[0]])
for network in net:
for module in self.unico:
self.simil = np.intersect1d(label_dic[network], self.index[module])
self.similarity_matrix[net.index(network), module-1] = self.simil.shape[0]
self.modularity['similarity_idx'][subj, net.index(network)] = (np.max(self.similarity_matrix[net.index(network)]) - np.min(self.similarity_matrix[net.index(network)])) / label_dic[network].shape[0]
return self.modularity
def centrality(self, sbj_number, nodes_number, atlas,
make_symmetric=True, upper_threshold=None,
lower_threshold=None, binarize=False):
'''
Computing centrality measures of the adjencency matrix
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
atlas: excel file |
please se example available in the repo (e.g. new_atlas_coords.xlsx)
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
binarize= Boolean|
True will make the connectivity matrix binary
Default is False
Returns
-------
dict: : dictonary with the following keys |
edge_betweeness_bin: | np.ndarray
Edge betweenness centrality is the fraction of all
shortest paths in the network that contain a given
edge. Edges with high values of betweenness centrality
participate in a large number of shortest paths.
It will return node betweenness centrality vector.
edge_betweeness_wei: | np.ndarray
Edge betweenness centrality is the fraction of all
shortest paths in the network that contain a given
edge. Edges with high values of betweenness centrality
participate in a large number of shortest paths.
It will return node betweenness centrality vector.
eigenvector_centrality_und: | np.ndarray
Eigenector centrality is a self-referential measure
of centrality: nodes have high eigenvector centrality
if they connect to other nodes that have high
eigenvector centrality. The eigenvector centrality of
node i is equivalent to the ith element in the eigenvector
corresponding to the largest eigenvalue of the adjacency matrix.
It will return the eigenvector associated with the
largest eigenvalue of the matrix
coreness_kcoreness_centrality_bu: | np.ndarray
The k-core is the largest subgraph comprising nodes
of degree at least k. The coreness of a node is k if
the node belongs to the k-core but not to the (k+1)-core.
This function computes the coreness of all nodes for a
given binary undirected connection matrix.
It will return the node coreness.
kn_kcoreness_centrality_bu: | np.ndarray
The k-core is the largest subgraph comprising nodes
of degree at least k. The coreness of a node is k if
the node belongs to the k-core but not to the (k+1)-core.
This function computes the coreness of all nodes for a
given binary undirected connection matrix.
It will return the size of k-core
module_degree_zscore: | np.ndarray
The within-module degree z-score is a within-module
version of degree centrality. It will return
within-module degree Z-score
participation_coef: | np.ndarray
Participation coefficient is a measure of diversity
of intermodular connections of individual nodes.
It will return the participation coefficient
subgraph_centrality: | np.ndarray
The subgraph centrality of a node is a weighted sum
of closed walks of different lengths in the network
starting and ending at the node. This function returns
a vector of subgraph centralities for each node of the
network. It will return the subgraph centrality
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.atlas = pd.read_excel(atlas, header=None)
self.atlas = np.array(self.atlas)
self.ci_original = self.atlas[:,8]
self.centrality = {
"edge_betweeness_bin": np.zeros([sbj_number, nodes_number]),
"edge_betweeness_wei": np.zeros([sbj_number, nodes_number]),
"eigenvector_centrality_und": np.zeros([sbj_number, nodes_number]),
"coreness_kcoreness_centrality_bu": np.zeros([sbj_number, nodes_number]),
"kn_kcoreness_centrality_bu": np.zeros([sbj_number, nodes_number]),
"module_degree_zscore": np.zeros([sbj_number, nodes_number]),
"participation_coef": np.zeros([sbj_number, nodes_number]),
"subgraph_centrality": np.zeros([sbj_number, nodes_number])
}
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
self.matrix_bin = bct.algorithms.binarize(self.matrix)
self.matrix_weight = self.matrix
if binarize==True:
self.matrix = bct.algorithms.binarize(self.matrix)
else:
self.matrix = self.matrix
np.fill_diagonal(self.matrix,0)
np.fill_diagonal(self.matrix_bin,0)
np.fill_diagonal(self.matrix_weight,0)
self.BC = bct.betweenness_bin(self.matrix_bin)
self.centrality['edge_betweeness_bin'][subj] = self.BC
self.BC_w = bct.betweenness_wei(self.matrix_weight)
self.centrality['edge_betweeness_wei'][subj] = self.BC_w
self.v = bct.eigenvector_centrality_und(self.matrix)
self.centrality['eigenvector_centrality_und'][subj] = self.v
self.coreness, self.kn = bct.kcoreness_centrality_bu(self.matrix_bin)
self.centrality['coreness_kcoreness_centrality_bu'][subj] = self.coreness
self.centrality['kn_kcoreness_centrality_bu'][subj] = self.kn
self.Z = bct.module_degree_zscore(self.matrix, ci=self.ci_original)
self.centrality['module_degree_zscore'][subj] = self.Z
self.P = bct.participation_coef(self.matrix, ci=self.ci_original)
self.centrality['participation_coef'][subj] = self.P
self.Cs = bct.subgraph_centrality(self.matrix_bin)
self.centrality['subgraph_centrality'][subj] = self.Cs
return self.centrality
| [
"pandas.read_csv",
"bct.subgraph_centrality",
"numpy.array",
"pandas.read_excel",
"bct.kcoreness_centrality_bu",
"bct.algorithms.strengths_und",
"numpy.mean",
"numpy.where",
"bct.module_degree_zscore",
"bct.betweenness_bin",
"numpy.fill_diagonal",
"bct.algorithms.degrees_und",
"bct.algorithm... | [((2856, 2881), 'numpy.array', 'np.array', (['self.nodes_conn'], {}), '(self.nodes_conn)\n', (2864, 2881), True, 'import numpy as np\n'), ((4515, 4551), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number]'], {}), '([sbj_number, nodes_number])\n', (4523, 4551), True, 'import numpy as np\n'), ((7323, 7359), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number]'], {}), '([sbj_number, nodes_number])\n', (7331, 7359), True, 'import numpy as np\n'), ((10295, 10348), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number, networks_number]'], {}), '([sbj_number, nodes_number, networks_number])\n', (10303, 10348), True, 'import numpy as np\n'), ((14148, 14171), 'numpy.array', 'np.array', (['self.all_conn'], {}), '(self.all_conn)\n', (14156, 14171), True, 'import numpy as np\n'), ((17043, 17066), 'numpy.array', 'np.array', (['self.all_conn'], {}), '(self.all_conn)\n', (17051, 17066), True, 'import numpy as np\n'), ((41382, 41415), 'pandas.read_excel', 'pd.read_excel', (['atlas'], {'header': 'None'}), '(atlas, header=None)\n', (41395, 41415), True, 'import pandas as pd\n'), ((41437, 41457), 'numpy.array', 'np.array', (['self.atlas'], {}), '(self.atlas)\n', (41445, 41457), True, 'import numpy as np\n'), ((1914, 1974), 'pandas.read_csv', 'pd.read_csv', (['self.matrices_files[subj]'], {'sep': '""" """', 'header': 'None'}), "(self.matrices_files[subj], sep=' ', header=None)\n", (1925, 1974), True, 'import pandas as pd\n'), ((2002, 2023), 'numpy.array', 'np.array', (['self.matrix'], {}), '(self.matrix)\n', (2010, 2023), True, 'import numpy as np\n'), ((2626, 2658), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.matrix', '(0)'], {}), '(self.matrix, 0)\n', (2642, 2658), True, 'import numpy as np\n'), ((4631, 4691), 'pandas.read_csv', 'pd.read_csv', (['self.matrices_files[subj]'], {'sep': '""" """', 'header': 'None'}), "(self.matrices_files[subj], sep=' ', header=None)\n", (4642, 4691), True, 'import pandas as pd\n'), ((4719, 4740), 'numpy.array', 'np.array', (['self.matrix'], {}), '(self.matrix)\n', (4727, 4740), True, 'import numpy as np\n'), ((5343, 5375), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.matrix', '(0)'], {}), '(self.matrix, 0)\n', (5359, 5375), True, 'import numpy as np\n'), ((7439, 7499), 'pandas.read_csv', 'pd.read_csv', (['self.matrices_files[subj]'], {'sep': '""" """', 'header': 'None'}), "(self.matrices_files[subj], sep=' ', header=None)\n", (7450, 7499), True, 'import pandas as pd\n'), ((7527, 7548), 'numpy.array', 'np.array', (['self.matrix'], {}), '(self.matrix)\n', (7535, 7548), True, 'import numpy as np\n'), ((8151, 8183), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.matrix', '(0)'], {}), '(self.matrix, 0)\n', (8167, 8183), True, 'import numpy as np\n'), ((10428, 10488), 'pandas.read_csv', 'pd.read_csv', (['self.matrices_files[subj]'], {'sep': '""" """', 'header': 'None'}), "(self.matrices_files[subj], sep=' ', header=None)\n", (10439, 10488), True, 'import pandas as pd\n'), ((10516, 10537), 'numpy.array', 'np.array', (['self.matrix'], {}), '(self.matrix)\n', (10524, 10537), True, 'import numpy as np\n'), ((11140, 11172), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.matrix', '(0)'], {}), '(self.matrix, 0)\n', (11156, 11172), True, 'import numpy as np\n'), ((12980, 13040), 'pandas.read_csv', 'pd.read_csv', (['self.matrices_files[subj]'], {'sep': '""" """', 'header': 'None'}), "(self.matrices_files[subj], sep=' ', header=None)\n", (12991, 13040), True, 'import pandas as pd\n'), ((13068, 13089), 'numpy.array', 'np.array', (['self.matrix'], {}), '(self.matrix)\n', (13076, 13089), True, 'import numpy as np\n'), ((13692, 13724), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.matrix', '(0)'], {}), '(self.matrix, 0)\n', (13708, 13724), True, 'import numpy as np\n'), ((15727, 15787), 'pandas.read_csv', 'pd.read_csv', (['self.matrices_files[subj]'], {'sep': '""" """', 'header': 'None'}), "(self.matrices_files[subj], sep=' ', header=None)\n", (15738, 15787), True, 'import pandas as pd\n'), ((15815, 15836), 'numpy.array', 'np.array', (['self.matrix'], {}), '(self.matrix)\n', (15823, 15836), True, 'import numpy as np\n'), ((16439, 16471), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.matrix', '(0)'], {}), '(self.matrix, 0)\n', (16455, 16471), True, 'import numpy as np\n'), ((23403, 23439), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number]'], {}), '([sbj_number, nodes_number])\n', (23411, 23439), True, 'import numpy as np\n'), ((23864, 23900), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number]'], {}), '([sbj_number, nodes_number])\n', (23872, 23900), True, 'import numpy as np\n'), ((23990, 24050), 'pandas.read_csv', 'pd.read_csv', (['self.matrices_files[subj]'], {'sep': '""" """', 'header': 'None'}), "(self.matrices_files[subj], sep=' ', header=None)\n", (24001, 24050), True, 'import pandas as pd\n'), ((24078, 24099), 'numpy.array', 'np.array', (['self.matrix'], {}), '(self.matrix)\n', (24086, 24099), True, 'import numpy as np\n'), ((24861, 24893), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.matrix', '(0)'], {}), '(self.matrix, 0)\n', (24877, 24893), True, 'import numpy as np\n'), ((24917, 24956), 'bct.algorithms.degrees_und', 'bct.algorithms.degrees_und', (['self.matrix'], {}), '(self.matrix)\n', (24943, 24956), True, 'import bct as bct\n'), ((25653, 25694), 'bct.algorithms.strengths_und', 'bct.algorithms.strengths_und', (['self.matrix'], {}), '(self.matrix)\n', (25681, 25694), True, 'import bct as bct\n'), ((30578, 30617), 'numpy.zeros', 'np.zeros', (['[sbj_number, networks_number]'], {}), '([sbj_number, networks_number])\n', (30586, 30617), True, 'import numpy as np\n'), ((30709, 30769), 'pandas.read_csv', 'pd.read_csv', (['self.matrices_files[subj]'], {'sep': '""" """', 'header': 'None'}), "(self.matrices_files[subj], sep=' ', header=None)\n", (30720, 30769), True, 'import pandas as pd\n'), ((30797, 30818), 'numpy.array', 'np.array', (['self.matrix'], {}), '(self.matrix)\n', (30805, 30818), True, 'import numpy as np\n'), ((31581, 31613), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.matrix', '(0)'], {}), '(self.matrix, 0)\n', (31597, 31613), True, 'import numpy as np\n'), ((33952, 33991), 'numpy.zeros', 'np.zeros', (['[sbj_number, networks_number]'], {}), '([sbj_number, networks_number])\n', (33960, 33991), True, 'import numpy as np\n'), ((34019, 34058), 'numpy.zeros', 'np.zeros', (['[sbj_number, networks_number]'], {}), '([sbj_number, networks_number])\n', (34027, 34058), True, 'import numpy as np\n'), ((34149, 34209), 'pandas.read_csv', 'pd.read_csv', (['self.matrices_files[subj]'], {'sep': '""" """', 'header': 'None'}), "(self.matrices_files[subj], sep=' ', header=None)\n", (34160, 34209), True, 'import pandas as pd\n'), ((34237, 34258), 'numpy.array', 'np.array', (['self.matrix'], {}), '(self.matrix)\n', (34245, 34258), True, 'import numpy as np\n'), ((35020, 35052), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.matrix', '(0)'], {}), '(self.matrix, 0)\n', (35036, 35052), True, 'import numpy as np\n'), ((35422, 35440), 'numpy.unique', 'np.unique', (['self.ci'], {}), '(self.ci)\n', (35431, 35440), True, 'import numpy as np\n'), ((41561, 41597), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number]'], {}), '([sbj_number, nodes_number])\n', (41569, 41597), True, 'import numpy as np\n'), ((41630, 41666), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number]'], {}), '([sbj_number, nodes_number])\n', (41638, 41666), True, 'import numpy as np\n'), ((41706, 41742), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number]'], {}), '([sbj_number, nodes_number])\n', (41714, 41742), True, 'import numpy as np\n'), ((41788, 41824), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number]'], {}), '([sbj_number, nodes_number])\n', (41796, 41824), True, 'import numpy as np\n'), ((41864, 41900), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number]'], {}), '([sbj_number, nodes_number])\n', (41872, 41900), True, 'import numpy as np\n'), ((41934, 41970), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number]'], {}), '([sbj_number, nodes_number])\n', (41942, 41970), True, 'import numpy as np\n'), ((42002, 42038), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number]'], {}), '([sbj_number, nodes_number])\n', (42010, 42038), True, 'import numpy as np\n'), ((42071, 42107), 'numpy.zeros', 'np.zeros', (['[sbj_number, nodes_number]'], {}), '([sbj_number, nodes_number])\n', (42079, 42107), True, 'import numpy as np\n'), ((42198, 42258), 'pandas.read_csv', 'pd.read_csv', (['self.matrices_files[subj]'], {'sep': '""" """', 'header': 'None'}), "(self.matrices_files[subj], sep=' ', header=None)\n", (42209, 42258), True, 'import pandas as pd\n'), ((42286, 42307), 'numpy.array', 'np.array', (['self.matrix'], {}), '(self.matrix)\n', (42294, 42307), True, 'import numpy as np\n'), ((42928, 42964), 'bct.algorithms.binarize', 'bct.algorithms.binarize', (['self.matrix'], {}), '(self.matrix)\n', (42951, 42964), True, 'import bct as bct\n'), ((43183, 43215), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.matrix', '(0)'], {}), '(self.matrix, 0)\n', (43199, 43215), True, 'import numpy as np\n'), ((43227, 43263), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.matrix_bin', '(0)'], {}), '(self.matrix_bin, 0)\n', (43243, 43263), True, 'import numpy as np\n'), ((43275, 43314), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.matrix_weight', '(0)'], {}), '(self.matrix_weight, 0)\n', (43291, 43314), True, 'import numpy as np\n'), ((43337, 43373), 'bct.betweenness_bin', 'bct.betweenness_bin', (['self.matrix_bin'], {}), '(self.matrix_bin)\n', (43356, 43373), True, 'import bct as bct\n'), ((43466, 43505), 'bct.betweenness_wei', 'bct.betweenness_wei', (['self.matrix_weight'], {}), '(self.matrix_weight)\n', (43485, 43505), True, 'import bct as bct\n'), ((43597, 43640), 'bct.eigenvector_centrality_und', 'bct.eigenvector_centrality_und', (['self.matrix'], {}), '(self.matrix)\n', (43627, 43640), True, 'import bct as bct\n'), ((43753, 43797), 'bct.kcoreness_centrality_bu', 'bct.kcoreness_centrality_bu', (['self.matrix_bin'], {}), '(self.matrix_bin)\n', (43780, 43797), True, 'import bct as bct\n'), ((43981, 44039), 'bct.module_degree_zscore', 'bct.module_degree_zscore', (['self.matrix'], {'ci': 'self.ci_original'}), '(self.matrix, ci=self.ci_original)\n', (44005, 44039), True, 'import bct as bct\n'), ((44129, 44185), 'bct.participation_coef', 'bct.participation_coef', (['self.matrix'], {'ci': 'self.ci_original'}), '(self.matrix, ci=self.ci_original)\n', (44151, 44185), True, 'import bct as bct\n'), ((44274, 44314), 'bct.subgraph_centrality', 'bct.subgraph_centrality', (['self.matrix_bin'], {}), '(self.matrix_bin)\n', (44297, 44314), True, 'import bct as bct\n'), ((2747, 2773), 'numpy.sum', 'np.sum', (['self.matrix[nodes]'], {}), '(self.matrix[nodes])\n', (2753, 2773), True, 'import numpy as np\n'), ((8321, 8378), 'numpy.setdiff1d', 'np.setdiff1d', (['self.nodes_ranges', 'self.labels_dic[network]'], {}), '(self.nodes_ranges, self.labels_dic[network])\n', (8333, 8378), True, 'import numpy as np\n'), ((16609, 16666), 'numpy.setdiff1d', 'np.setdiff1d', (['self.nodes_ranges', 'self.labels_dic[network]'], {}), '(self.nodes_ranges, self.labels_dic[network])\n', (16621, 16666), True, 'import numpy as np\n'), ((24751, 24787), 'bct.algorithms.binarize', 'bct.algorithms.binarize', (['self.matrix'], {}), '(self.matrix)\n', (24774, 24787), True, 'import bct as bct\n'), ((31471, 31507), 'bct.algorithms.binarize', 'bct.algorithms.binarize', (['self.matrix'], {}), '(self.matrix)\n', (31494, 31507), True, 'import bct as bct\n'), ((31828, 31871), 'bct.algorithms.density_und', 'bct.algorithms.density_und', (['self.net_matrix'], {}), '(self.net_matrix)\n', (31854, 31871), True, 'import bct as bct\n'), ((34910, 34946), 'bct.algorithms.binarize', 'bct.algorithms.binarize', (['self.matrix'], {}), '(self.matrix)\n', (34933, 34946), True, 'import bct as bct\n'), ((35258, 35307), 'bct.algorithms.community_louvain', 'bct.algorithms.community_louvain', (['self.net_matrix'], {}), '(self.net_matrix)\n', (35290, 35307), True, 'import bct as bct\n'), ((35541, 35568), 'numpy.where', 'np.where', (['(self.ci == values)'], {}), '(self.ci == values)\n', (35549, 35568), True, 'import numpy as np\n'), ((43072, 43108), 'bct.algorithms.binarize', 'bct.algorithms.binarize', (['self.matrix'], {}), '(self.matrix)\n', (43095, 43108), True, 'import bct as bct\n'), ((5561, 5610), 'numpy.sum', 'np.sum', (['self.sub_matrix[self.labels_dic[network]]'], {}), '(self.sub_matrix[self.labels_dic[network]])\n', (5567, 5610), True, 'import numpy as np\n'), ((8523, 8562), 'numpy.sum', 'np.sum', (['self.sub_matrix[self.outer_idx]'], {}), '(self.sub_matrix[self.outer_idx])\n', (8529, 8562), True, 'import numpy as np\n'), ((11358, 11406), 'numpy.sum', 'np.sum', (['self.node_conn[self.labels_dic[network]]'], {}), '(self.node_conn[self.labels_dic[network]])\n', (11364, 11406), True, 'import numpy as np\n'), ((13955, 13979), 'numpy.sum', 'np.sum', (['self.subj_matrix'], {}), '(self.subj_matrix)\n', (13961, 13979), True, 'import numpy as np\n'), ((16856, 16880), 'numpy.sum', 'np.sum', (['self.subj_matrix'], {}), '(self.subj_matrix)\n', (16862, 16880), True, 'import numpy as np\n'), ((19447, 19476), 'numpy.mean', 'np.mean', (['self.net2use'], {'axis': '(0)'}), '(self.net2use, axis=0)\n', (19454, 19476), True, 'import numpy as np\n'), ((28521, 28575), 'numpy.mean', 'np.mean', (['self.degree[key][subject][label_dic[network]]'], {}), '(self.degree[key][subject][label_dic[network]])\n', (28528, 28575), True, 'import numpy as np\n'), ((35756, 35810), 'numpy.intersect1d', 'np.intersect1d', (['label_dic[network]', 'self.index[module]'], {}), '(label_dic[network], self.index[module])\n', (35770, 35810), True, 'import numpy as np\n'), ((19574, 19603), 'numpy.mean', 'np.mean', (['self.net2use'], {'axis': '(0)'}), '(self.net2use, axis=0)\n', (19581, 19603), True, 'import numpy as np\n'), ((19611, 19640), 'numpy.mean', 'np.mean', (['self.net2use'], {'axis': '(0)'}), '(self.net2use, axis=0)\n', (19618, 19640), True, 'import numpy as np\n')] |
import sifutil
import numpy as np
import re
import requests
from bs4 import BeautifulSoup
import pandas as pd
import warnings
import numpy as np
# from mpl_toolkits.basemap import Basemap, cm
import math
from math import sin, cos, sqrt
from pyproj import Proj, transform
# requires netcdf4-python (netcdf4-python.googlecode.com)
# from netCDF4 import Dataset as NetCDFFile
import matplotlib.pyplot as plt
from io import StringIO
BASE_CDL_URL = 'https://nassgeodata.gmu.edu/axis2/services/CDLService/GetCDLStat'
CHAMPAIGN = 17019
COORD_INC = 20/3678.
LEFT = -124.45790626020
RIGHT = -69.277252174347
UPPER = 49.999999995507
LOWER = 29.994633289321
def convert_binary(num):
str_ = "{0:b}".format(num)
if len(str_) < 8:
str_ = '0'*(8-len(str_)) + str_
return str_
def get_cloud(num):
str_ = convert_binary(num)
return str_[1], str_[2]
def interpolation(x,y):
x = np.array(x)
matrix = np.array([x**i for i in range(len(x))]).transpose()
print(matrix)
coeffs = la.solve(matrix,y)
return coeffs
def get_smooth_line(x,y):
coeffs = interpolation(x,y)
x_values = np.linspace(min(x), max(x), 100)
y_values = []
for i in x_values:
value = 0
for j in range(len(coeffs)):
value += coeffs[j]*i**j
y_values.append(value)
return [list(x_values), y_values]
def clean_data(time_series, x_values, qc):
good_fpars = []
good_dates = []
for i in range(len(time_series)):
if get_cloud(qc[i])[0] == '0' and get_cloud(qc[i])[1] == '0':
good_fpars.append(time_series[i])
good_dates.append(x_values[i])
return good_dates, good_fpars
def good_qc(qc):
cnt = 0
for i in range(len(qc)):
if get_cloud(qc[i])[0] == '0' and get_cloud(qc[i])[1] == '0':
cnt += 1
return cnt/len(qc)
def coords_to_ind(lon, lat):
lon_diff = lon - LEFT
lat_diff = UPPER - lat
lon_ind = int(lon_diff / COORD_INC)
lat_ind = int(lat_diff / COORD_INC)
return (lon_ind, lat_ind)
def get_by_box(year, llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat):
from io import StringIO
x1, y1 = sifutil.convertProjection(llcrnrlon, llcrnrlat, sifutil.WGS84, sifutil.CONUS_ALBERS)
x2, y2 = sifutil.convertProjection(urcrnrlon, urcrnrlat, sifutil.WGS84, sifutil.CONUS_ALBERS)
print(x1,y1)
# url = BASE_CDL_URL + '?year=' + str(year) + '&bbox=' + str(min(x1,x2)) + "," +\
# str(min(y1, y2)) + "," + str(max(x1, x2)) + "," + str(max(y1, y2)) + "&format=csv"
# # print(url)
# # print('loldashabi ')
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# res = requests.get(url, verify = False)
# returnurl = BeautifulSoup(res.text, 'lxml').find('returnurl').text
# print(returnurl)
# with warnings.catch_warnings():
# rawdata = requests.get(returnurl, verify = False).text
# raw_iter = StringIO(rawdata)
# df = pd.read_csv(raw_iter, sep=" *, * ")\
# .apply(pd.to_numeric, errors='ignore')\
# .set_index("Category")
# return df
def get_fractions(cdl):
total_acre = sum(cdl['Acreage'])
if total_acre == 0:
corn = 0
soy = 0
forest = 0
grass = 0
return
if "Corn" in cdl.index:
corn = cdl['Acreage']['Corn'] / total_acre
else:
corn = 0
if "Soybeans" in cdl.index:
soy = cdl['Acreage']['Soybeans'] / total_acre
else:
soy = 0
pattern = re.compile(r' Forest')
trees = [cdl.index[i] for i in range(len(cdl.index))\
if re.search(pattern, cdl.index[i]) != None]
frst = 0
for tree in trees:
frst += cdl['Acreage'][tree]
forest = frst / total_acre
grass = 1 - (forest + corn + soy)
return np.array([corn, soy, forest, grass])
def fparreg_workflow():
big_mat = get_proportion_matrix()
# rhs = get_fpars('FPAR_A2016361.hdf')
print(big_mat)
save_matrix(big_mat)
mat = load_matrix('dajuzhen2.npy')
print(mat)
def get_proportion_matrix():
from my_functions import get_fractions, get_by_box
mat2 = np.zeros((16,4))
base_lat, base_lon = 38.3, -89.59
base_lat = 40.7
base_lon = -88.2
row = 0
for i in range(4):
cur_lon = base_lon
for j in range(4):
print(base_lat, cur_lon)
mat2[row,:] = get_fractions(get_by_box(2016, cur_lon - 0.01, base_lat, cur_lon, base_lat + 0.01))
cur_lon -= 0.01
print(row)
row += 1
base_lat += 0.01
return mat2
def get_processed_matrix_and_rhs(mat, rhs):
indices = []
for i in range(len(rhs)):
if rhs[i] != 0:
indices.append(i)
indices = np.array(indices)
# print(indices)
return mat[indices, :], rhs[indices]
def save_matrix(mat):
from tempfile import TemporaryFile
outfile = TemporaryFile()
np.save('dajuzhen.npy', mat)
def load_matrix(file):
return np.load(file)
def run_regression():
from my_functions import get_cloud, coords_to_ind
from scipy.optimize import lsq_linear
time_series = np.zeros((4, 45))
ct = 0
x_values = []
prefix ='FPAR_A2016'
suffix = '.hdf'
ct = 0
# print(prefix+suffix)
for i in range(1,361,8):
a = str(int(i))
if i < 10:
a = '00'+ a
elif i < 100:
a = '0' + a
query = prefix + a + suffix
# print(query)
try:
data = SD(query, SDC.READ)
m2 = load_matrix('dajuzhen.npy')
rhs = get_fpars(query)
# print(rhs)
mat2, rhs2 = get_processed_matrix_and_rhs(m2,rhs)
# print(mat2)
# result = np.linalg.lstsq(mat2,rhs2)
# print(result[0])
result = lsq_linear(mat2, rhs2, bounds = (0, 100))
# print(result.x)
# print('result', result[0])
ct += 1
# # print('result', result[0])
time_series[:,ct-1] = np.array(result.x)
x.append(i)
except Exception as e:
print(e)
continue
return x_values, time_series
def convertProjection(x, y, from_crs, to_crs):
inProj = Proj(init=from_crs)
outProj = Proj(init=to_crs)
x2, y2 = transform(inProj, outProj, x, y)
return (x2, y2)
def getCDLprojection(lon,lat):
return sifutil.convertProjection(lon, lat, sifutil.WGS84, sifutil.CONUS_ALBERS)
def getInverseProj(x,y):
return sifutil.invProjection(x,y, sifutil.WGS84, sifutil.CONUS_ALBERS) | [
"sifutil.convertProjection",
"sifutil.invProjection",
"re.compile",
"my_functions.get_cloud",
"pyproj.transform",
"my_functions.get_by_box",
"numpy.array",
"numpy.zeros",
"scipy.optimize.lsq_linear",
"pyproj.Proj",
"tempfile.TemporaryFile",
"numpy.load",
"numpy.save",
"re.search"
] | [((954, 965), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (962, 965), True, 'import numpy as np\n'), ((2280, 2369), 'sifutil.convertProjection', 'sifutil.convertProjection', (['llcrnrlon', 'llcrnrlat', 'sifutil.WGS84', 'sifutil.CONUS_ALBERS'], {}), '(llcrnrlon, llcrnrlat, sifutil.WGS84, sifutil.\n CONUS_ALBERS)\n', (2305, 2369), False, 'import sifutil\n'), ((2379, 2468), 'sifutil.convertProjection', 'sifutil.convertProjection', (['urcrnrlon', 'urcrnrlat', 'sifutil.WGS84', 'sifutil.CONUS_ALBERS'], {}), '(urcrnrlon, urcrnrlat, sifutil.WGS84, sifutil.\n CONUS_ALBERS)\n', (2404, 2468), False, 'import sifutil\n'), ((3670, 3691), 're.compile', 're.compile', (['""" Forest"""'], {}), "(' Forest')\n", (3680, 3691), False, 'import re\n'), ((3972, 4008), 'numpy.array', 'np.array', (['[corn, soy, forest, grass]'], {}), '([corn, soy, forest, grass])\n', (3980, 4008), True, 'import numpy as np\n'), ((4339, 4356), 'numpy.zeros', 'np.zeros', (['(16, 4)'], {}), '((16, 4))\n', (4347, 4356), True, 'import numpy as np\n'), ((4998, 5015), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (5006, 5015), True, 'import numpy as np\n'), ((5162, 5177), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (5175, 5177), False, 'from tempfile import TemporaryFile\n'), ((5183, 5211), 'numpy.save', 'np.save', (['"""dajuzhen.npy"""', 'mat'], {}), "('dajuzhen.npy', mat)\n", (5190, 5211), True, 'import numpy as np\n'), ((5266, 5279), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (5273, 5279), True, 'import numpy as np\n'), ((5428, 5445), 'numpy.zeros', 'np.zeros', (['(4, 45)'], {}), '((4, 45))\n', (5436, 5445), True, 'import numpy as np\n'), ((6663, 6682), 'pyproj.Proj', 'Proj', ([], {'init': 'from_crs'}), '(init=from_crs)\n', (6667, 6682), False, 'from pyproj import Proj, transform\n'), ((6698, 6715), 'pyproj.Proj', 'Proj', ([], {'init': 'to_crs'}), '(init=to_crs)\n', (6702, 6715), False, 'from pyproj import Proj, transform\n'), ((6730, 6762), 'pyproj.transform', 'transform', (['inProj', 'outProj', 'x', 'y'], {}), '(inProj, outProj, x, y)\n', (6739, 6762), False, 'from pyproj import Proj, transform\n'), ((6834, 6906), 'sifutil.convertProjection', 'sifutil.convertProjection', (['lon', 'lat', 'sifutil.WGS84', 'sifutil.CONUS_ALBERS'], {}), '(lon, lat, sifutil.WGS84, sifutil.CONUS_ALBERS)\n', (6859, 6906), False, 'import sifutil\n'), ((6947, 7011), 'sifutil.invProjection', 'sifutil.invProjection', (['x', 'y', 'sifutil.WGS84', 'sifutil.CONUS_ALBERS'], {}), '(x, y, sifutil.WGS84, sifutil.CONUS_ALBERS)\n', (6968, 7011), False, 'import sifutil\n'), ((6188, 6227), 'scipy.optimize.lsq_linear', 'lsq_linear', (['mat2', 'rhs2'], {'bounds': '(0, 100)'}), '(mat2, rhs2, bounds=(0, 100))\n', (6198, 6227), False, 'from scipy.optimize import lsq_linear\n'), ((6403, 6421), 'numpy.array', 'np.array', (['result.x'], {}), '(result.x)\n', (6411, 6421), True, 'import numpy as np\n'), ((3769, 3801), 're.search', 're.search', (['pattern', 'cdl.index[i]'], {}), '(pattern, cdl.index[i])\n', (3778, 3801), False, 'import re\n'), ((4629, 4697), 'my_functions.get_by_box', 'get_by_box', (['(2016)', '(cur_lon - 0.01)', 'base_lat', 'cur_lon', '(base_lat + 0.01)'], {}), '(2016, cur_lon - 0.01, base_lat, cur_lon, base_lat + 0.01)\n', (4639, 4697), False, 'from my_functions import get_fractions, get_by_box\n'), ((1575, 1591), 'my_functions.get_cloud', 'get_cloud', (['qc[i]'], {}), '(qc[i])\n', (1584, 1591), False, 'from my_functions import get_cloud, coords_to_ind\n'), ((1606, 1622), 'my_functions.get_cloud', 'get_cloud', (['qc[i]'], {}), '(qc[i])\n', (1615, 1622), False, 'from my_functions import get_cloud, coords_to_ind\n'), ((1845, 1861), 'my_functions.get_cloud', 'get_cloud', (['qc[i]'], {}), '(qc[i])\n', (1854, 1861), False, 'from my_functions import get_cloud, coords_to_ind\n'), ((1876, 1892), 'my_functions.get_cloud', 'get_cloud', (['qc[i]'], {}), '(qc[i])\n', (1885, 1892), False, 'from my_functions import get_cloud, coords_to_ind\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains mean absolute error loss function."""
import numpy as np
from .loss_function import LossFunction
class MAE(LossFunction):
"""Class that calculates the Mean Absolute Error loss.
"""
def __init__(self):
super(MAE, self).__init__(name="mae")
@staticmethod
def compute_loss(y_true: np.ndarray, y_pred: np.ndarray):
"""Calculates the Mean Absolute Error loss
Args:
y_true: expected output
y_pred: predictions
Returns:
The mean absolute error of the batch.
"""
return np.abs(y_true - y_pred).mean()
@staticmethod
def gradient(y_true: np.ndarray, y_pred: np.ndarray):
"""Calculates the gradient of the Mean Absolute Error loss with respect to the predictions
Args:
y_true: labels
y_pred: predictions
Returns:
The gradient of the loss with respect to the predictions.
"""
return np.sign(y_pred - y_true)
| [
"numpy.abs",
"numpy.sign"
] | [((1612, 1636), 'numpy.sign', 'np.sign', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (1619, 1636), True, 'import numpy as np\n'), ((1208, 1231), 'numpy.abs', 'np.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (1214, 1231), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
from __future__ import division, print_function
from ete3 import Tree
import numpy as np
from numpy import median, mean
import itertools as itl
from math import ceil, log
from string import ascii_uppercase
METRICS = [
'mean',
'median',
'min',
'max',
]
def pwdist(tree):
'''Finds the (off-diagonal) pairwise distances between all tips of `tree`.
'''
dists = []
for a, b in itl.combinations(tree.get_leaf_names(), 2):
a = tree&a
dists.append(a.get_distance(b))
return np.array(dists)
def normalise_tree(tree, to=1.0, metric='mean'):
'''
Normalise branch lengths of `tree` such that `metric` of the pairwise
distances is `to`.
By default, normalise such that the mean of all pairwise distances is 1.0.
'''
dists = pwdist(tree)
assert metric in METRICS
current = eval('{}(dists)'.format(metric))
for node in tree.iter_descendants():
node.dist /= current
return tree
def alphbetise_names(tree):
'''Replace numeric tip labels with alphabetic ones. 1 -> A, 2 -> B etc.
If there are more than 26 tips, labels are AA, AB, ..., ZZ and so forth for
any number of tips.
'''
label_len = ceil(log(len(tree)) / log(26)) # how many letters do we need?
labels = [''.join(letters)
for letters in itl.product(ascii_uppercase, repeat=label_len)]
tiplabels = list(sorted(tree.get_leaf_names(), key=int))
for i, leaf in enumerate(tiplabels):
node = tree&leaf
node.name = labels[i]
return tree
def main(treefile, to, metric):
with open(treefile) as fh:
for treeline in fh:
tree = Tree(treeline)
tree = alphbetise_names(tree)
tree = normalise_tree(tree, to, metric)
print(tree.write(format=5))
CLI = '''
USAGE:
scaletree [options] TREEFILE
OPTIONS:
-t TO Scale tree metric to TO. [default: 1.0]
-m METRIC Metric for scaling. Must be one of mean, min, max.
[default: mean]
'''
if __name__ == '__main__':
from docopt import docopt
opts = docopt(CLI)
treefile = opts['TREEFILE']
to = float(opts['-t'])
metric = opts['-m']
main(treefile, to, metric)
| [
"ete3.Tree",
"itertools.product",
"math.log",
"numpy.array",
"docopt.docopt"
] | [((547, 562), 'numpy.array', 'np.array', (['dists'], {}), '(dists)\n', (555, 562), True, 'import numpy as np\n'), ((2126, 2137), 'docopt.docopt', 'docopt', (['CLI'], {}), '(CLI)\n', (2132, 2137), False, 'from docopt import docopt\n'), ((1252, 1259), 'math.log', 'log', (['(26)'], {}), '(26)\n', (1255, 1259), False, 'from math import ceil, log\n'), ((1353, 1399), 'itertools.product', 'itl.product', (['ascii_uppercase'], {'repeat': 'label_len'}), '(ascii_uppercase, repeat=label_len)\n', (1364, 1399), True, 'import itertools as itl\n'), ((1687, 1701), 'ete3.Tree', 'Tree', (['treeline'], {}), '(treeline)\n', (1691, 1701), False, 'from ete3 import Tree\n')] |
#! /usr/bin/env python
"""
pulse_motion.py
<NAME>, Nov 2020
"""
import rospy
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
import actionlib
import pbr_gazebo.msg
from threading import Thread
from collections import deque
import numpy as np
from numpy import pi, sin
class PulseMotion(object):
def __init__(self, action_name='pulse_motion_server'):
self.default_vel = 0.0
# joint state subscriber
self.x = 0
rospy.Subscriber("/prismatic_box_controller/joint_states", JointState, self.jointstate_cb)
# velocity controller
self.Hz = 200
self.vel_pub = rospy.Publisher('/prismatic_box_controller/prismatic_joint_controller/command', Float64, queue_size=10)
self.vel_command = self.default_vel
self.vel_thread = Thread(target=self.send_vel, args=())
self.vel_thread.daemon = True
self.vel_thread.start()
# pulse motion action server
self._feedback = pbr_gazebo.msg.AFFeedback()
self._result = pbr_gazebo.msg.AFResult()
self._as = actionlib.SimpleActionServer(action_name, pbr_gazebo.msg.AFAction,
execute_cb=self.execute_cb, auto_start=False)
self._as.start()
rospy.loginfo("pulse_motion_planner/" + action_name + " has been initialized!")
def execute_cb(self, goal):
A = goal.A
F = goal.F
rate = rospy.Rate(self.Hz) # Hz
if A*F == 0:
# reset
err = - self.x
errs = deque(maxlen=5)
errs.append(0)
P = 1
I = 0.2
while abs(err)>0.001:
self.vel_command = P*err + I*np.array(errs).mean()
rate.sleep()
err = - self.x
errs.append(err)
self.vel_command = self.default_vel
self._result.success = True
self._as.set_succeeded(self._result)
rospy.loginfo('reset completed')
else:
# pulse motion
# displacement function: d = -A*cos(2*pi*F*t) + A
# velocity function: v = 2*pi*A*F*sin(2*pi*F*t)
# acceleration function: a = 4*pi^2*F^2*A*cos(2*pi*F*t)
print(goal)
T = 1. / F # T is rock displacement period
step_nm = int(T*self.Hz)+1 # self.Hz is control rate;
# step_nm is publishing number for controller
for j in range(step_nm):
t = j*(1./self.Hz)
self.vel_command = 2*pi*A*F*sin(2*pi*F*t)
# print('t', t)
# print('F', F)
# print('A', A)
# print(2*pi*t/F)
# print(self.vel_command)
# print('-')
rate.sleep()
self.vel_command = self.default_vel
self._result.success = True
self._as.set_succeeded(self._result)
rospy.loginfo('pulse motion completed')
def jointstate_cb(self, data):
self.x = data.position[0]
def send_vel(self):
rate = rospy.Rate(self.Hz) # Hz
while not rospy.is_shutdown():
self.vel_pub.publish(self.vel_command)
rate.sleep()
if __name__ == '__main__':
rospy.init_node("pulse_motion_planner", anonymous=False)
pulse_motion_planner = PulseMotion()
try:
rospy.spin()
except rospy.ROSInterruptException:
print("Node killed!")
| [
"collections.deque",
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"actionlib.SimpleActionServer",
"numpy.array",
"rospy.Rate",
"rospy.spin",
"numpy.sin",
"threading.Thread",
"rospy.Publisher",
"rospy.loginfo"
] | [((3281, 3337), 'rospy.init_node', 'rospy.init_node', (['"""pulse_motion_planner"""'], {'anonymous': '(False)'}), "('pulse_motion_planner', anonymous=False)\n", (3296, 3337), False, 'import rospy\n'), ((471, 566), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/prismatic_box_controller/joint_states"""', 'JointState', 'self.jointstate_cb'], {}), "('/prismatic_box_controller/joint_states', JointState, self\n .jointstate_cb)\n", (487, 566), False, 'import rospy\n'), ((638, 745), 'rospy.Publisher', 'rospy.Publisher', (['"""/prismatic_box_controller/prismatic_joint_controller/command"""', 'Float64'], {'queue_size': '(10)'}), "('/prismatic_box_controller/prismatic_joint_controller/command',\n Float64, queue_size=10)\n", (653, 745), False, 'import rospy\n'), ((812, 849), 'threading.Thread', 'Thread', ([], {'target': 'self.send_vel', 'args': '()'}), '(target=self.send_vel, args=())\n', (818, 849), False, 'from threading import Thread\n'), ((1080, 1196), 'actionlib.SimpleActionServer', 'actionlib.SimpleActionServer', (['action_name', 'pbr_gazebo.msg.AFAction'], {'execute_cb': 'self.execute_cb', 'auto_start': '(False)'}), '(action_name, pbr_gazebo.msg.AFAction,\n execute_cb=self.execute_cb, auto_start=False)\n', (1108, 1196), False, 'import actionlib\n'), ((1274, 1353), 'rospy.loginfo', 'rospy.loginfo', (["('pulse_motion_planner/' + action_name + ' has been initialized!')"], {}), "('pulse_motion_planner/' + action_name + ' has been initialized!')\n", (1287, 1353), False, 'import rospy\n'), ((1441, 1460), 'rospy.Rate', 'rospy.Rate', (['self.Hz'], {}), '(self.Hz)\n', (1451, 1460), False, 'import rospy\n'), ((3107, 3126), 'rospy.Rate', 'rospy.Rate', (['self.Hz'], {}), '(self.Hz)\n', (3117, 3126), False, 'import rospy\n'), ((3396, 3408), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3406, 3408), False, 'import rospy\n'), ((1554, 1569), 'collections.deque', 'deque', ([], {'maxlen': '(5)'}), '(maxlen=5)\n', (1559, 1569), False, 'from collections import deque\n'), ((1978, 2010), 'rospy.loginfo', 'rospy.loginfo', (['"""reset completed"""'], {}), "('reset completed')\n", (1991, 2010), False, 'import rospy\n'), ((2957, 2996), 'rospy.loginfo', 'rospy.loginfo', (['"""pulse motion completed"""'], {}), "('pulse motion completed')\n", (2970, 2996), False, 'import rospy\n'), ((3151, 3170), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (3168, 3170), False, 'import rospy\n'), ((2563, 2582), 'numpy.sin', 'sin', (['(2 * pi * F * t)'], {}), '(2 * pi * F * t)\n', (2566, 2582), False, 'from numpy import pi, sin\n'), ((1714, 1728), 'numpy.array', 'np.array', (['errs'], {}), '(errs)\n', (1722, 1728), True, 'import numpy as np\n')] |
# Making predictions
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import pickle
features = pickle.load(open('features.pkl', 'rb'))
clf = pickle.load(open('model.pkl', 'rb'))
df = pd.read_csv('report.csv')
x = df[features].values
outcome = ["malicious", "legit"]
x = np.array(x)
try:
print("This file is: "+outcome[list(clf.predict(x))[0]])
except:
print("Something wrong has occurred")
res = []
| [
"numpy.array",
"warnings.filterwarnings",
"pandas.read_csv"
] | [((37, 70), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (60, 70), False, 'import warnings\n'), ((227, 252), 'pandas.read_csv', 'pd.read_csv', (['"""report.csv"""'], {}), "('report.csv')\n", (238, 252), True, 'import pandas as pd\n'), ((314, 325), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (322, 325), True, 'import numpy as np\n')] |
import pickle
import numpy as np
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from scipy import interp
lw = 2
X = pickle.load(open('all_outputs_tmp.pkl', 'rb'))
X = np.hstack([np.squeeze(x) for x in X])
y = pickle.load(open('all_targets_tmp.pkl', 'rb'))
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(3):
fpr[i], tpr[i], _ = roc_curve(y==i, X[i, :])
roc_auc[i] = auc(fpr[i], tpr[i])
print(np.hstack([(y==0).astype(int), (y==1).astype(int), (y==2).astype(int)]).shape)
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(label_binarize(y, classes=[0,1,2]).astype(int).ravel(), X.T.ravel()) #np.hstack([(y==0).astype(int),
# (y==1).astype(int),
# (y==2).astype(int)]), X.T.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(3)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(3):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= 3
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='red', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['darkgreen', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(3), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i+1, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| [
"itertools.cycle",
"scipy.interp",
"sklearn.preprocessing.label_binarize",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.squeeze",
"matplotlib.pyplot.figure",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.ylim",
"matplotlib.py... | [((1040, 1071), 'sklearn.metrics.auc', 'auc', (["fpr['micro']", "tpr['micro']"], {}), "(fpr['micro'], tpr['micro'])\n", (1043, 1071), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1289, 1311), 'numpy.zeros_like', 'np.zeros_like', (['all_fpr'], {}), '(all_fpr)\n', (1302, 1311), True, 'import numpy as np\n'), ((1498, 1529), 'sklearn.metrics.auc', 'auc', (["fpr['macro']", "tpr['macro']"], {}), "(fpr['macro'], tpr['macro'])\n", (1501, 1529), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1553, 1565), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1563, 1565), True, 'import matplotlib.pyplot as plt\n'), ((1958, 2010), 'itertools.cycle', 'cycle', (["['darkgreen', 'darkorange', 'cornflowerblue']"], {}), "(['darkgreen', 'darkorange', 'cornflowerblue'])\n", (1963, 2010), False, 'from itertools import cycle\n'), ((2203, 2241), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {'lw': 'lw'}), "([0, 1], [0, 1], 'k--', lw=lw)\n", (2211, 2241), True, 'import matplotlib.pyplot as plt\n'), ((2242, 2262), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (2250, 2262), True, 'import matplotlib.pyplot as plt\n'), ((2263, 2284), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (2271, 2284), True, 'import matplotlib.pyplot as plt\n'), ((2285, 2318), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (2295, 2318), True, 'import matplotlib.pyplot as plt\n'), ((2319, 2351), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (2329, 2351), True, 'import matplotlib.pyplot as plt\n'), ((2434, 2463), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (2444, 2463), True, 'import matplotlib.pyplot as plt\n'), ((2464, 2474), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2472, 2474), True, 'import matplotlib.pyplot as plt\n'), ((517, 543), 'sklearn.metrics.roc_curve', 'roc_curve', (['(y == i)', 'X[i, :]'], {}), '(y == i, X[i, :])\n', (526, 543), False, 'from sklearn.metrics import roc_curve, auc\n'), ((559, 578), 'sklearn.metrics.auc', 'auc', (['fpr[i]', 'tpr[i]'], {}), '(fpr[i], tpr[i])\n', (562, 578), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1347, 1378), 'scipy.interp', 'interp', (['all_fpr', 'fpr[i]', 'tpr[i]'], {}), '(all_fpr, fpr[i], tpr[i])\n', (1353, 1378), False, 'from scipy import interp\n'), ((304, 317), 'numpy.squeeze', 'np.squeeze', (['x'], {}), '(x)\n', (314, 317), True, 'import numpy as np\n'), ((754, 790), 'sklearn.preprocessing.label_binarize', 'label_binarize', (['y'], {'classes': '[0, 1, 2]'}), '(y, classes=[0, 1, 2])\n', (768, 790), False, 'from sklearn.preprocessing import label_binarize\n')] |
from .ConfidenceIntervalsOnlySamples import ConfidenceIntervalsOnlySamples
import numpy as np
class ConfidenceIntervalsOnlySamplesClassification(ConfidenceIntervalsOnlySamples):
def _stats_and_plot(self, baseName, batch_samples_list, real_valu_list, extra_batch_dict):
all_samples = np.concatenate(batch_samples_list, axis=0)
y = np.concatenate(real_valu_list, axis=0)
nb, no, ns = all_samples.shape
cumulative_preds = np.sum(all_samples, axis=2)
predictions_forced = np.argmax(cumulative_preds, axis=1)
accuracy_forced = np.mean(np.equal(predictions_forced, y)) * 100
total = len(predictions_forced)
#refuse prediction if uncertainties is too high (Bayesian defense)
fracs = [0.5, 0.7, 0.9]
accuracy_over_fracs = []
for frac in fracs:
accuracy_over_fracs.append(self._accuracy_over_threshold(cumulative_preds, frac, ns, y))
with open(self._create_name("stats", baseName) + '.txt', 'w') as f:
f.write("forced -> accuracy: {:} total: {:}\n".format(accuracy_forced, total))
for frac, (acc_over, tot_over) in zip(fracs, accuracy_over_fracs):
f.write("over {:} -> accuracy: {:} total: {:}\n".format(frac, acc_over, tot_over))
def _accuracy_over_threshold(self, cumulative_preds, frac, ns, y):
threshold = ns * frac
more_than = np.array(cumulative_preds > threshold, dtype=np.int)
non_zero_indexes = np.logical_not(np.all(more_than == 0, axis=1))
predictions_more = np.argmax(more_than[non_zero_indexes], axis=1)
y_more = y[non_zero_indexes]
accuracy_more = np.mean(np.equal(predictions_more, y_more)) * 100
return accuracy_more, len(predictions_more)
#TODO what to plot for classification?
# try:
# self._triangle_plot(these_samples, these_y, self._create_name("contours", baseName) + '.pdf')
# plt.close()
# except Exception as e:
# print("ERROR: an Error occurred with plotGTC, continuing training... \n")
# print(traceback.format_exc())
| [
"numpy.argmax",
"numpy.equal",
"numpy.sum",
"numpy.array",
"numpy.concatenate",
"numpy.all"
] | [((300, 342), 'numpy.concatenate', 'np.concatenate', (['batch_samples_list'], {'axis': '(0)'}), '(batch_samples_list, axis=0)\n', (314, 342), True, 'import numpy as np\n'), ((355, 393), 'numpy.concatenate', 'np.concatenate', (['real_valu_list'], {'axis': '(0)'}), '(real_valu_list, axis=0)\n', (369, 393), True, 'import numpy as np\n'), ((462, 489), 'numpy.sum', 'np.sum', (['all_samples'], {'axis': '(2)'}), '(all_samples, axis=2)\n', (468, 489), True, 'import numpy as np\n'), ((520, 555), 'numpy.argmax', 'np.argmax', (['cumulative_preds'], {'axis': '(1)'}), '(cumulative_preds, axis=1)\n', (529, 555), True, 'import numpy as np\n'), ((1411, 1463), 'numpy.array', 'np.array', (['(cumulative_preds > threshold)'], {'dtype': 'np.int'}), '(cumulative_preds > threshold, dtype=np.int)\n', (1419, 1463), True, 'import numpy as np\n'), ((1565, 1611), 'numpy.argmax', 'np.argmax', (['more_than[non_zero_indexes]'], {'axis': '(1)'}), '(more_than[non_zero_indexes], axis=1)\n', (1574, 1611), True, 'import numpy as np\n'), ((1506, 1536), 'numpy.all', 'np.all', (['(more_than == 0)'], {'axis': '(1)'}), '(more_than == 0, axis=1)\n', (1512, 1536), True, 'import numpy as np\n'), ((590, 621), 'numpy.equal', 'np.equal', (['predictions_forced', 'y'], {}), '(predictions_forced, y)\n', (598, 621), True, 'import numpy as np\n'), ((1681, 1715), 'numpy.equal', 'np.equal', (['predictions_more', 'y_more'], {}), '(predictions_more, y_more)\n', (1689, 1715), True, 'import numpy as np\n')] |
import cv2, os, sys
import numpy as np
import imutils as iu
import sudoku_solver as solver
class ocrClass:
def __init__(self):
samples = np.loadtxt('ml/generalsamples.data',np.float32)
responses = np.loadtxt('ml/generalresponses.data',np.float32)
responses = responses.reshape((responses.size,1))
#.model uses kNearest to perform OCR
self.model = cv2.ml.KNearest_create()
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
def getNumber(self, img):
roi = cv2.resize(img, (25,35))
roismall = roi.reshape((1,875))
roismall = np.float32(roismall)
retval, results, neigh_resp, dists = self.model.findNearest(roismall, 1)
predictedNum = int(results[0][0])
return predictedNum
class imageClass:
def __init__(self):
self.captured = []
#.gray is the grayscale captured image
self.gray = []
#.thres is after adaptive thresholding is applied
self.thresh = []
#.contours contains information about the contours found in the image
self.contours = []
self.cuttedThresh = []
self.cuttedOrig = []
self.corners = np.array([])
def captureAndCrop(self, img):
height, width = img.shape[:2]
if height > 800 or width > 800:
if height > width:
self.captured = iu.resize(img, height=800)
else:
self.captured = iu.resize(img, width=800)
else:
self.captured = img
self.gray = cv2.cvtColor(self.captured, cv2.COLOR_BGR2GRAY)
#noise removal with gaussian blur
self.gray = cv2.GaussianBlur(self.gray,(5,5),0)
#then do adaptive thresholding
self.thresh = cv2.adaptiveThreshold(self.gray,255,1,1,11,5)
#cv2.imwrite('out/threshSudoku.png', self.thresh)
#find countours in threshold image
_, contours, _ = cv2.findContours(self.thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
maxArea = 0
biggest = None
for i in contours:
area = cv2.contourArea(i)
if area > 40000:
epsilon = 0.1*cv2.arcLength(i,True)
approx = cv2.approxPolyDP(i,epsilon,True)
#cv2.drawContours(self.captured, [i], 0, (0,0,255), 1)
if area > maxArea and len(approx)==4:
maxArea = area
biggest = i
self.corners = approx
# print( area )
if biggest is not None:
pts1 = np.float32(self.rotateCorners(self.corners))
pts2 = np.float32([[0,0],[0,450],[450,0],[450,450]])
M = cv2.getPerspectiveTransform(pts1,pts2)
self.cuttedThresh = cv2.warpPerspective(self.thresh,M,(450,450))
self.cuttedOrig = cv2.warpPerspective(self.captured,M,(450,450))
#cv2.drawContours(self.captured, [biggest], 0, (0,255,0), 3)
#cv2.imwrite('out/contour.png', self.captured)
cv2.imwrite('out/cuttedThresh.png', self.cuttedThresh)
return self.captured
return None
def readSudoku(self):
img = np.zeros([450,450,3],dtype=np.uint8)
sudoku = np.zeros([9,9],dtype=np.uint32)
#thresh = cv2.adaptiveThreshold(self.cutted,255,1,1,3,1)
#morph = cv2.morphologyEx(thresh,cv2.MORPH_ERODE,None,iterations = 0)
_, contours,_ = cv2.findContours(self.cuttedThresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
ocr = ocrClass()
fieldCount = 0
for i in contours:
area = cv2.contourArea(i)
if area > 50:
[x,y,w,h] = cv2.boundingRect(i)
if h > 15 and h < 45 and w > 8 and w < 45:
fieldCount += 1
roi = self.cuttedThresh[y:y+h,x:x+w]
num = ocr.getNumber(roi)
sudox = int((x+(w/2))//50)
sudoy = int((y+(h/2))//50)
sudoku[sudoy][sudox] = num
#cv2.imwrite('out/fields/' + str(num) + '/' + str(fieldCount) +'.png', roi)
#cv2.drawContours(img, [i], 0, (255,255,255), 1)
#cv2.imwrite('out/contours.png', img)
#cv2.imwrite('out/thresh.png', thresh)
#print ("%i numbers recognized"%fieldCount)
#print ("sudoku:\n", sudoku)
return sudoku
def writeSudoku(self, sudoku):
#solutionImg = np.zeros((450, 450, 4), dtype=np.uint8)
solutionImg = cv2.cvtColor(self.cuttedOrig, cv2.COLOR_RGB2RGBA)
#solutionImg = self.cuttedOrig
for y in range(9):
for x in range(9):
num = sudoku[y][x]
if num != 0:
sx = x * 50 + 15
sy = y * 50 + 38
cv2.putText(solutionImg,str(num),(sx,sy), 0 , 1, (0,0,0, 255), 2, 2)
cv2.imwrite("out/onlySolution.png", solutionImg)
pts1 = np.float32(self.rotateCorners(self.corners))
pts2 = np.float32([[0,0],[0,450],[450,0],[450,450]])
M = cv2.getPerspectiveTransform(pts2,pts1)
width, height = self.captured.shape[:2]
solutionImg = cv2.warpPerspective(solutionImg,M,(height,width))
solution = self.captured
y1, y2 = 0,0 +solutionImg.shape[0]
x1, x2 = 0,0 +solutionImg.shape[1]
alpha_s = solutionImg[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
solution[y1:y2, x1:x2, c] = (alpha_s * solutionImg[:, :, c] +
alpha_l * solution[y1:y2, x1:x2, c])
return solution
def invertSudoku(self, sudoku, solution):
# set all values in the solution which were given in the start sudoku to 0
for row in range(9):
for val in range(9):
if sudoku[row][val] != 0:
solution[row][val] = 0
return solution
def rotateCorners(self, corners):
# rotates the values of corners always in the same order
# top-left, bottom-left, top-right, bottom-right
tl = None # top left
bl = None # bottom left
tr = None # top right
br = None # bottom right
# getting the tl and br by getting the smallest
# and biggest sum of the corner tupel
biggest = 0
smallest = 1000000
rest = []
for corner in corners:
added = corner[0][0] + corner[0][1]
if added > biggest:
biggest = added
br = corner[0]
if added < smallest:
smallest = added
tl = corner[0]
# getting the bl and tr corners
for corner in corners:
if not np.array_equal(corner[0], br) and not np.array_equal(corner[0], tl):
rest.append(corner[0])
if len(rest) == 2:
if rest[0][0] > rest[1][0]:
bl = rest[1]
tr = rest[0]
else:
bl = rest[0]
tr = rest[1]
#print ("top-left: %a"%tl)
#print ("bottom-left: %a"%bl)
#print ("top-right: %a"%tr)
#print ("bottom-right: %a"%br)
return [[tl], [bl], [tr], [br]]
def getSolvedImage(img):
image = imageClass()
if img is not None:
sudoku = image.captureAndCrop(img)
if sudoku is not None:
sudoku = image.readSudoku()
if sudoku is not None:
grid = solver.sudokuToGrid(sudoku)
solvedGrid = solver.solve(grid)
if solvedGrid:
solution = solver.gridToSudoku(solvedGrid)
inverted = image.invertSudoku(sudoku, solution)
frame = image.writeSudoku(inverted)
return frame
return False
def main():
scanFrameNumber = 5
frameCount = 0
image = imageClass()
if sys.argv[1] == "cam":
cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
while rval:
img = image.captureAndCrop(frame)
if img is not None:
sudoku = image.readSudoku()
if sudoku is not None:
grid = solver.sudokuToGrid(sudoku)
solvedGrid = solver.solve(grid)
if solvedGrid:
solution = solver.gridToSudoku(solvedGrid)
inverted = image.invertSudoku(sudoku, solution)
frame = image.writeSudoku(inverted)
cv2.imwrite("out/solution.png", frame)
frame = iu.resize(frame, width=800)
cv2.imshow("preview", frame)
rval, frame = vc.read()
frameCount += 1
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
cv2.destroyWindow("preview")
else:
img = cv2.imread(sys.argv[1])
solvedImg = getSolvedImage(img)
#img = cv2.imread('out/cutted.png')
cv2.imshow("ref",solvedImg)
key = cv2.waitKey(0)
if key == 27:
sys.exit()
if __name__ == '__main__': main()
| [
"sudoku_solver.sudokuToGrid",
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"sudoku_solver.gridToSudoku",
"cv2.approxPolyDP",
"sys.exit",
"cv2.arcLength",
"cv2.contourArea",
"cv2.ml.KNearest_create",
"cv2.waitKey",
"cv2.getPerspectiveTransform",
"cv2.cvtColor",
"cv2.resize",
"cv2.G... | [((151, 199), 'numpy.loadtxt', 'np.loadtxt', (['"""ml/generalsamples.data"""', 'np.float32'], {}), "('ml/generalsamples.data', np.float32)\n", (161, 199), True, 'import numpy as np\n'), ((219, 269), 'numpy.loadtxt', 'np.loadtxt', (['"""ml/generalresponses.data"""', 'np.float32'], {}), "('ml/generalresponses.data', np.float32)\n", (229, 269), True, 'import numpy as np\n'), ((393, 417), 'cv2.ml.KNearest_create', 'cv2.ml.KNearest_create', ([], {}), '()\n', (415, 417), False, 'import cv2, os, sys\n'), ((527, 552), 'cv2.resize', 'cv2.resize', (['img', '(25, 35)'], {}), '(img, (25, 35))\n', (537, 552), False, 'import cv2, os, sys\n'), ((611, 631), 'numpy.float32', 'np.float32', (['roismall'], {}), '(roismall)\n', (621, 631), True, 'import numpy as np\n'), ((1196, 1208), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1204, 1208), True, 'import numpy as np\n'), ((1565, 1612), 'cv2.cvtColor', 'cv2.cvtColor', (['self.captured', 'cv2.COLOR_BGR2GRAY'], {}), '(self.captured, cv2.COLOR_BGR2GRAY)\n', (1577, 1612), False, 'import cv2, os, sys\n'), ((1676, 1714), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['self.gray', '(5, 5)', '(0)'], {}), '(self.gray, (5, 5), 0)\n', (1692, 1714), False, 'import cv2, os, sys\n'), ((1773, 1823), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['self.gray', '(255)', '(1)', '(1)', '(11)', '(5)'], {}), '(self.gray, 255, 1, 1, 11, 5)\n', (1794, 1823), False, 'import cv2, os, sys\n'), ((1947, 2016), 'cv2.findContours', 'cv2.findContours', (['self.thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(self.thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1963, 2016), False, 'import cv2, os, sys\n'), ((3206, 3245), 'numpy.zeros', 'np.zeros', (['[450, 450, 3]'], {'dtype': 'np.uint8'}), '([450, 450, 3], dtype=np.uint8)\n', (3214, 3245), True, 'import numpy as np\n'), ((3260, 3293), 'numpy.zeros', 'np.zeros', (['[9, 9]'], {'dtype': 'np.uint32'}), '([9, 9], dtype=np.uint32)\n', (3268, 3293), True, 'import numpy as np\n'), ((3460, 3535), 'cv2.findContours', 'cv2.findContours', (['self.cuttedThresh', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(self.cuttedThresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (3476, 3535), False, 'import cv2, os, sys\n'), ((4556, 4605), 'cv2.cvtColor', 'cv2.cvtColor', (['self.cuttedOrig', 'cv2.COLOR_RGB2RGBA'], {}), '(self.cuttedOrig, cv2.COLOR_RGB2RGBA)\n', (4568, 4605), False, 'import cv2, os, sys\n'), ((4941, 4989), 'cv2.imwrite', 'cv2.imwrite', (['"""out/onlySolution.png"""', 'solutionImg'], {}), "('out/onlySolution.png', solutionImg)\n", (4952, 4989), False, 'import cv2, os, sys\n'), ((5066, 5118), 'numpy.float32', 'np.float32', (['[[0, 0], [0, 450], [450, 0], [450, 450]]'], {}), '([[0, 0], [0, 450], [450, 0], [450, 450]])\n', (5076, 5118), True, 'import numpy as np\n'), ((5125, 5164), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts2', 'pts1'], {}), '(pts2, pts1)\n', (5152, 5164), False, 'import cv2, os, sys\n'), ((5252, 5304), 'cv2.warpPerspective', 'cv2.warpPerspective', (['solutionImg', 'M', '(height, width)'], {}), '(solutionImg, M, (height, width))\n', (5271, 5304), False, 'import cv2, os, sys\n'), ((8134, 8160), 'cv2.namedWindow', 'cv2.namedWindow', (['"""preview"""'], {}), "('preview')\n", (8149, 8160), False, 'import cv2, os, sys\n'), ((8174, 8193), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (8190, 8193), False, 'import cv2, os, sys\n'), ((9170, 9198), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""preview"""'], {}), "('preview')\n", (9187, 9198), False, 'import cv2, os, sys\n'), ((9224, 9247), 'cv2.imread', 'cv2.imread', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (9234, 9247), False, 'import cv2, os, sys\n'), ((9342, 9370), 'cv2.imshow', 'cv2.imshow', (['"""ref"""', 'solvedImg'], {}), "('ref', solvedImg)\n", (9352, 9370), False, 'import cv2, os, sys\n'), ((9384, 9398), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (9395, 9398), False, 'import cv2, os, sys\n'), ((2107, 2125), 'cv2.contourArea', 'cv2.contourArea', (['i'], {}), '(i)\n', (2122, 2125), False, 'import cv2, os, sys\n'), ((2655, 2707), 'numpy.float32', 'np.float32', (['[[0, 0], [0, 450], [450, 0], [450, 450]]'], {}), '([[0, 0], [0, 450], [450, 0], [450, 450]])\n', (2665, 2707), True, 'import numpy as np\n'), ((2718, 2757), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (2745, 2757), False, 'import cv2, os, sys\n'), ((2789, 2836), 'cv2.warpPerspective', 'cv2.warpPerspective', (['self.thresh', 'M', '(450, 450)'], {}), '(self.thresh, M, (450, 450))\n', (2808, 2836), False, 'import cv2, os, sys\n'), ((2864, 2913), 'cv2.warpPerspective', 'cv2.warpPerspective', (['self.captured', 'M', '(450, 450)'], {}), '(self.captured, M, (450, 450))\n', (2883, 2913), False, 'import cv2, os, sys\n'), ((3056, 3110), 'cv2.imwrite', 'cv2.imwrite', (['"""out/cuttedThresh.png"""', 'self.cuttedThresh'], {}), "('out/cuttedThresh.png', self.cuttedThresh)\n", (3067, 3110), False, 'import cv2, os, sys\n'), ((3632, 3650), 'cv2.contourArea', 'cv2.contourArea', (['i'], {}), '(i)\n', (3647, 3650), False, 'import cv2, os, sys\n'), ((8932, 8959), 'imutils.resize', 'iu.resize', (['frame'], {'width': '(800)'}), '(frame, width=800)\n', (8941, 8959), True, 'import imutils as iu\n'), ((8972, 9000), 'cv2.imshow', 'cv2.imshow', (['"""preview"""', 'frame'], {}), "('preview', frame)\n", (8982, 9000), False, 'import cv2, os, sys\n'), ((9084, 9099), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (9095, 9099), False, 'import cv2, os, sys\n'), ((9433, 9443), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9441, 9443), False, 'import cv2, os, sys\n'), ((1395, 1421), 'imutils.resize', 'iu.resize', (['img'], {'height': '(800)'}), '(img, height=800)\n', (1404, 1421), True, 'import imutils as iu\n'), ((1472, 1497), 'imutils.resize', 'iu.resize', (['img'], {'width': '(800)'}), '(img, width=800)\n', (1481, 1497), True, 'import imutils as iu\n'), ((2232, 2266), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['i', 'epsilon', '(True)'], {}), '(i, epsilon, True)\n', (2248, 2266), False, 'import cv2, os, sys\n'), ((3705, 3724), 'cv2.boundingRect', 'cv2.boundingRect', (['i'], {}), '(i)\n', (3721, 3724), False, 'import cv2, os, sys\n'), ((7621, 7648), 'sudoku_solver.sudokuToGrid', 'solver.sudokuToGrid', (['sudoku'], {}), '(sudoku)\n', (7640, 7648), True, 'import sudoku_solver as solver\n'), ((7686, 7704), 'sudoku_solver.solve', 'solver.solve', (['grid'], {}), '(grid)\n', (7698, 7704), True, 'import sudoku_solver as solver\n'), ((2185, 2207), 'cv2.arcLength', 'cv2.arcLength', (['i', '(True)'], {}), '(i, True)\n', (2198, 2207), False, 'import cv2, os, sys\n'), ((6835, 6864), 'numpy.array_equal', 'np.array_equal', (['corner[0]', 'br'], {}), '(corner[0], br)\n', (6849, 6864), True, 'import numpy as np\n'), ((6873, 6902), 'numpy.array_equal', 'np.array_equal', (['corner[0]', 'tl'], {}), '(corner[0], tl)\n', (6887, 6902), True, 'import numpy as np\n'), ((7783, 7814), 'sudoku_solver.gridToSudoku', 'solver.gridToSudoku', (['solvedGrid'], {}), '(solvedGrid)\n', (7802, 7814), True, 'import sudoku_solver as solver\n'), ((8534, 8561), 'sudoku_solver.sudokuToGrid', 'solver.sudokuToGrid', (['sudoku'], {}), '(sudoku)\n', (8553, 8561), True, 'import sudoku_solver as solver\n'), ((8595, 8613), 'sudoku_solver.solve', 'solver.solve', (['grid'], {}), '(grid)\n', (8607, 8613), True, 'import sudoku_solver as solver\n'), ((8684, 8715), 'sudoku_solver.gridToSudoku', 'solver.gridToSudoku', (['solvedGrid'], {}), '(solvedGrid)\n', (8703, 8715), True, 'import sudoku_solver as solver\n'), ((8872, 8910), 'cv2.imwrite', 'cv2.imwrite', (['"""out/solution.png"""', 'frame'], {}), "('out/solution.png', frame)\n", (8883, 8910), False, 'import cv2, os, sys\n')] |
import argparse
import collections
import json
import math
import os
from datetime import datetime
import torchvision
from allennlp.modules.vision.region_detector import RegionDetectorOutput
from torch import Tensor
from torchvision.models.detection.roi_heads import maskrcnn_inference
from torchvision.transforms import transforms
#from grolp.utils.geometry_utils import calculate_angles
from vision.finetune import MaskRCNN
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
from typing import cast, Iterable, Union, List, Tuple, NamedTuple, Optional, Dict
import numpy as np
import torch
from PIL import Image
import tqdm as tqdm
from allennlp.data import TorchImageLoader
from torch.utils.data import IterableDataset, DataLoader
#from grolp.envs.thor_env import ThorEnv
import torchvision.ops.boxes as box_ops
import torch.nn.functional as F
TRAJ_DATA_JSON_FILENAME = "traj_data.json"
render_settings = dict()
render_settings['renderImage'] = True
render_settings['renderDepthImage'] = False
render_settings['renderObjectImage'] = False
render_settings['renderClassImage'] = False
class MaskDetectorOutput(NamedTuple):
"""
The output type from the forward pass of a `RegionDetector`.
"""
box_features: List[Tensor]
"""
A list of tensors, each with shape `(num_boxes, feature_dim)`.
"""
boxes: List[Tensor]
masks: List[Tensor]
class_probs: Optional[List[Tensor]] = None
"""
An optional list of tensors. These tensors can have shape `(num_boxes,)` or
`(num_boxes, *)` if probabilities for multiple classes are given.
"""
class_labels: Optional[List[Tensor]] = None
"""
An optional list of tensors that give the labels corresponding to the `class_probs`
tensors. This should be non-`None` whenever `class_probs` is, and each tensor
should have the same shape as the corresponding tensor from `class_probs`.
"""
"""
A list of tensors containing the coordinates for each box. Each has shape `(num_boxes, 4)`.
"""
class MaskRCNNDetector(torch.nn.Module):
"""
!!! Note
This module does not have any trainable parameters by default.
All pretrained weights are frozen.
# Parameters
box_score_thresh : `float`, optional (default = `0.05`)
During inference, only proposal boxes / regions with a label classification score
greater than `box_score_thresh` will be returned.
box_nms_thresh : `float`, optional (default = `0.5`)
During inference, non-maximum suppression (NMS) will applied to groups of boxes
that share a common label.
NMS iteratively removes lower scoring boxes which have an intersection-over-union (IoU)
greater than `box_nms_thresh` with another higher scoring box.
max_boxes_per_image : `int`, optional (default = `100`)
During inference, at most `max_boxes_per_image` boxes will be returned. The
number of boxes returned will vary by image and will often be lower
than `max_boxes_per_image` depending on the values of `box_score_thresh`
and `box_nms_thresh`.
checkpoint: `str`, optional (default = `None`)
If specified, we assume that we're loading a fine-tuned MaskRCNN model and not the one from Torchvision
"""
def __init__(
self,
*,
box_score_thresh: float = 0.05,
box_nms_thresh: float = 0.5,
max_boxes_per_image: int = 100,
checkpoint_path: str = None,
device="cpu"
):
super().__init__()
if checkpoint_path is None:
self.detector = torchvision.models.detection.maskrcnn_resnet50_fpn(
pretrained=True,
box_score_thresh=box_score_thresh,
box_nms_thresh=box_nms_thresh,
box_detections_per_img=max_boxes_per_image,
)
else:
if "moca" in checkpoint_path:
maskrcnn = MaskRCNN(num_classes=119, hidden_size=256,
inference_params=dict(box_score_thresh=box_score_thresh,
box_nms_thresh=box_nms_thresh,
box_detections_per_img=max_boxes_per_image))
state_dict = torch.load(checkpoint_path, map_location="cpu")
new_state_dict = {"detector." + k: v for k, v in state_dict.items()}
maskrcnn.load_state_dict(new_state_dict)
self.detector = maskrcnn.detector
else:
self.detector = MaskRCNN.load_from_checkpoint(
checkpoint_path,
inference_params=dict(box_score_thresh=box_score_thresh,
box_nms_thresh=box_nms_thresh,
box_detections_per_img=max_boxes_per_image)
)
# access to the actual MaskRCNN reference
self.detector = self.detector.detector
# Freeze all weights.
for parameter in self.detector.parameters():
parameter.requires_grad = False
self.detector.eval()
def forward(
self,
images: torch.FloatTensor
):
"""
Extract regions and region features from the given images.
In most cases `image_features` should come directly from the `ResnetBackbone`
`GridEmbedder`. The `images` themselves should be standardized and resized
using the default settings for the `TorchImageLoader`.
"""
if self.detector.training:
raise RuntimeError(
"MaskRcnnRegionDetector can not be used for training at the moment"
)
original_image_sizes = torch.jit.annotate(List[Tuple[int, int]], [])
for img in images:
val = img.shape[-2:]
assert len(val) == 2
original_image_sizes.append((val[0], val[1]))
images, targets = self.detector.transform(images)
image_features = self.detector.backbone(images.tensors)
if isinstance(image_features, torch.Tensor):
image_features = collections.OrderedDict([('0', image_features)])
# # `proposals` is a list of tensors, one tensor per image, each representing a
# # fixed number of proposed regions/boxes.
# # shape (proposals[i]): (proposals_per_image, 4)
proposals: List[Tensor]
proposals, _ = self.detector.rpn(images, image_features)
#
# outputs = self.detector.roi_heads(image_features, proposals, image_shapes)
# # shape: (batch_size * proposals_per_image, *)
box_features = self.detector.roi_heads.box_roi_pool(image_features, proposals, images.image_sizes)
#
# # shape: (batch_size * proposals_per_image, *)
box_features = self.detector.roi_heads.box_head(box_features)
#
# # shape (class_logits): (batch_size * proposals_per_image, num_classes)
# # shape (box_regression): (batch_size * proposals_per_image, regression_output_size)
class_logits, box_regression = self.detector.roi_heads.box_predictor(box_features)
# This step filters down the `proposals` to only detections that reach
# a certain threshold.
# Each of these is a list of tensors, one for each image in the batch.
# shape (boxes[i]): (num_predicted_boxes, 4)
# shape (features[i]): (num_predicted_boxes, feature_size)
# shape (scores[i]): (num_predicted_classes,)
# shape (labels[i]): (num_predicted_classes,)
boxes, box_features, scores, labels = self._postprocess_detections(
class_logits, box_features, box_regression, proposals, images.image_sizes
)
num_images = len(boxes)
result = torch.jit.annotate(List[Dict[str, torch.Tensor]], [])
for i in range(num_images):
result.append(
{
"features": box_features[i],
"boxes": boxes[i],
"labels": labels[i],
"scores": scores[i],
}
)
# compute masks as well
mask_proposals = boxes
mask_features = self.detector.roi_heads.mask_roi_pool(image_features, mask_proposals, images.image_sizes)
mask_features = self.detector.roi_heads.mask_head(mask_features)
mask_logits = self.detector.roi_heads.mask_predictor(mask_features)
labels = [r["labels"] for r in result]
masks_probs = maskrcnn_inference(mask_logits, labels)
for mask_prob, r in zip(masks_probs, result):
r["masks"] = mask_prob
detections = self.detector.transform.postprocess(result, images.image_sizes, original_image_sizes)
return detections
def _postprocess_detections(
self,
class_logits: Tensor,
box_features: Tensor,
box_regression: Tensor,
proposals: List[Tensor],
image_shapes: List[Tuple[int, int]],
) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]:
"""
Adapted from https://github.com/pytorch/vision/blob/
4521f6d152875974e317fa247a633e9ad1ea05c8/torchvision/models/detection/roi_heads.py#L664.
The only reason we have to re-implement this method is so we can pull out the box
features that we want.
"""
device = class_logits.device
num_classes = class_logits.shape[-1]
boxes_per_image = [boxes_in_image.shape[0] for boxes_in_image in proposals]
# shape: (batch_size * boxes_per_image, num_classes, 4)
pred_boxes = self.detector.roi_heads.box_coder.decode(box_regression, proposals)
pred_scores = F.softmax(class_logits, -1)
pred_boxes_list = pred_boxes.split(boxes_per_image, 0)
features_list = box_features.split(boxes_per_image, dim=0)
pred_scores_list = pred_scores.split(boxes_per_image, 0)
all_boxes = []
all_features = []
all_scores = []
all_labels = []
for boxes, features, scores, image_shape in zip(
pred_boxes_list, features_list, pred_scores_list, image_shapes
):
# shape: (boxes_per_image, num_classes, 4)
boxes = box_ops.clip_boxes_to_image(boxes, image_shape)
# shape: (boxes_per_image, num_classes, feature_size)
features = features.unsqueeze(1).expand(boxes.shape[0], boxes.shape[1], -1)
# create labels for each prediction
# shape: (num_classes,)
labels = torch.arange(num_classes, device=device)
# shape: (boxes_per_image, num_classes,)
labels = labels.view(1, -1).expand_as(scores)
# remove predictions with the background label
# shape: (boxes_per_image, num_classes - 1, 4)
boxes = boxes[:, 1:]
# shape: (boxes_per_image, num_classes, feature_size)
features = features[:, 1:]
# shape: (boxes_per_image, num_classes - 1,)
scores = scores[:, 1:]
# shape: (boxes_per_image, num_classes - 1,)
labels = labels[:, 1:]
# batch everything, by making every class prediction be a separate instance
# shape: (boxes_per_image * (num_classes - 1), 4)
boxes = boxes.reshape(-1, 4)
# shape: (boxes_per_image * (num_classes - 1), feature_size)
features = features.reshape(boxes.shape[0], -1)
# shape: (boxes_per_image * (num_classes - 1),)
scores = scores.reshape(-1)
# shape: (boxes_per_image * (num_classes - 1),)
labels = labels.reshape(-1)
# remove low scoring boxes
inds = torch.where(scores > self.detector.roi_heads.score_thresh)[0]
boxes, features, scores, labels = (
boxes[inds],
features[inds],
scores[inds],
labels[inds],
)
# remove empty boxes
keep = box_ops.remove_small_boxes(boxes, min_size=1e-2)
boxes, features, scores, labels = (
boxes[keep],
features[keep],
scores[keep],
labels[keep],
)
# non-maximum suppression, independently done per class
keep = box_ops.batched_nms(boxes, scores, labels, self.detector.roi_heads.nms_thresh)
# keep only topk scoring predictions
keep = keep[: self.detector.roi_heads.detections_per_img]
boxes, features, scores, labels = (
boxes[keep],
features[keep],
scores[keep],
labels[keep],
)
all_boxes.append(boxes)
all_features.append(features)
all_scores.append(scores)
all_labels.append(labels)
return all_boxes, all_features, all_scores, all_labels
class CustomImageLoader(TorchImageLoader):
def __init__(self, *,
image_backend: str = None,
size_divisibility: int = 32,
**kwargs, ):
super().__init__(image_backend=image_backend, size_divisibility=size_divisibility, **kwargs)
self.transforms = transforms.Compose([
transforms.ToTensor()
])
def load(self, image):
return self.transforms(image)
def __call__(self, image_or_images: Union[Image.Image, Iterable[Image.Image]], pack=False):
if not isinstance(image_or_images, (list, tuple)):
image, size = self([image_or_images])
return image[0], size[0]
# return cast(torch.FloatTensor, image.squeeze(0)), cast(torch.IntTensor, size.squeeze(0))
images: List[torch.FloatTensor] = []
sizes: List[torch.IntTensor] = []
for image in image_or_images:
image = self.load(image).to(self.device)
size = cast(
torch.IntTensor,
torch.tensor(
[image.shape[-2], image.shape[-1]], dtype=torch.int32, device=self.device
),
)
images.append(image)
sizes.append(size)
if pack:
return torch.stack(images), torch.stack(sizes)
return images, sizes
def create_panorama(env, rotation_steps):
initial_agent = env.last_event.metadata["agent"]
curr_image = Image.fromarray(env.last_event.frame)
panorama_frames = [curr_image]
camera_info = [dict(
h_view_angle=env.last_event.metadata["agent"]["rotation"]["y"],
# flip direction of heading angle - negative will be down and positive will be up
v_view_angle=-env.last_event.metadata["agent"]["cameraHorizon"]
)]
return panorama_frames, camera_info
class FailedReplay(Exception):
def __init__(self, message):
super(FailedReplay, self).__init__(message)
class ALFREDImageDataset(IterableDataset):
def __init__(self, args):
# trajectories are scattered among several files
# we gather their file names here
self.trajectories = []
self.num_images = 0
self.image_size = args.image_width
self.rotation_degrees = 90
# we have already the current frame
self.rotation_steps = (360 // self.rotation_degrees) - 1
visited = set()
with open(args.splits) as in_file:
splits = json.load(in_file)
stop = False
for k, d in splits.items():
if args.split_id in k:
for task in d:
# load json file
json_path = os.path.join(args.data_path, k, task['task'], 'traj_data.json')
if json_path not in visited:
visited.add(json_path)
with open(json_path) as f:
ex = json.load(f)
# copy trajectory
r_idx = task['repeat_idx'] # repeat_idx is the index of the annotation for each trajectory
traj = ex.copy()
# root & split
traj['root'] = os.path.join(args.data_path, task['task'])
traj['split'] = k
traj['repeat_idx'] = r_idx
traj['path'] = json_path
self.trajectories.append(traj)
self.num_images += len(traj['plan']['low_actions']) * (self.rotation_steps + 1)
if args.debug and len(self.trajectories) == 3:
stop = True
break
if stop:
print("Debugging mode on!")
break
print(f"Discovered {len(self.trajectories)} files from the directory {args.data_path}")
if args.debug:
for traj in self.trajectories:
print(traj["path"])
self.start = 0
self.end = len(self.trajectories)
# assumes that we have a squared video frame
self.image_loader = CustomImageLoader(min_size=args.image_width, max_size=args.image_width)
self.failed = []
def __len__(self):
return self.num_images
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
# single worker
if worker_info is None:
# in this case we just process the entire dataset
iter_start = self.start
iter_end = self.end
else:
# we need to split the load among the workers making sure that there are no duplicates
per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
worker_id = worker_info.id
iter_start = self.start + worker_id * per_worker
iter_end = min(iter_start + per_worker, self.end)
# start THOR env
env = ThorEnv(player_screen_width=args.image_width,
player_screen_height=args.image_height)
try:
for idx in range(iter_start, iter_end):
try:
for root_dir, step_index, images, camera_infos, is_traj_finished in \
self._get_instance(env, self.trajectories[idx]):
for pano_idx, (image, camera, done) in enumerate(zip(images, camera_infos, is_traj_finished)):
norm_image, size = self.image_loader(image)
yield root_dir, step_index, pano_idx, norm_image, size, camera, done
except FailedReplay:
self.failed.append(self.trajectories[idx])
except KeyboardInterrupt:
pass
finally:
env.stop()
def _get_instance(self, env, traj_data):
json_file = traj_data["path"]
# make directories
root_dir = json_file.replace(TRAJ_DATA_JSON_FILENAME, "")
# scene setup
scene_num = traj_data['scene']['scene_num']
object_poses = traj_data['scene']['object_poses']
object_toggles = traj_data['scene']['object_toggles']
dirty_and_empty = traj_data['scene']['dirty_and_empty']
# reset
scene_name = 'FloorPlan%d' % scene_num
env.reset(scene_name)
env.restore_scene(object_poses, object_toggles, dirty_and_empty)
env.step(dict(traj_data['scene']['init_action']))
# setup task
env.set_task(traj_data, args, reward_type='dense')
num_actions = len(traj_data['plan']['low_actions'])
for ll_idx, ll_action in enumerate(traj_data['plan']['low_actions']):
# next cmd under the current hl_action
cmd = ll_action['api_action']
# remove unnecessary keys
cmd = {k: cmd[k] for k in ['action', 'objectId', 'receptacleObjectId', 'placeStationary', 'forceAction'] if
k in cmd}
# panorama_images = None
panorama_images, camera_infos = create_panorama(env, self.rotation_steps)
is_finished = [False] * (self.rotation_steps + 1)
yield root_dir, ll_idx, panorama_images, camera_infos, is_finished
event = env.step(cmd)
if not event.metadata['lastActionSuccess']:
raise FailedReplay("Replay Failed: %s" % (env.last_event.metadata['errorMessage']))
curr_image = Image.fromarray(env.last_event.frame)
panorama_images = [curr_image] + [Image.fromarray(np.zeros_like(curr_image)) for _ in
range(self.rotation_steps - 1)]
camera_info = dict(
h_view_angle=env.last_event.metadata["agent"]["rotation"]["y"],
# flip direction of heading angle - negative will be down and positive will be up
v_view_angle=-env.last_event.metadata["agent"]["cameraHorizon"]
)
camera_infos = [camera_info] + [dict(h_view_angle=0.0, v_view_angle=0.0) for _ in
range(self.rotation_steps)]
is_finished = [True] * (self.rotation_steps + 1)
yield root_dir, num_actions, panorama_images, camera_infos, is_finished
def run(args):
assert args.image_width == args.image_height, f"Squared video frames only (w={args.image_width} != h={args.image_height})"
region_detector = MaskRCNNDetector(
box_score_thresh=args.box_score_thresh,
box_nms_thresh=args.box_nms_thresh,
max_boxes_per_image=args.max_boxes_per_image,
checkpoint_path=args.model_checkpoint
)
device = torch.device(args.cuda_device) if args.cuda_device != -1 else torch.device("cpu")
region_detector.to(device)
dataset = ALFREDImageDataset(args)
loader = DataLoader(dataset, shuffle=False, batch_size=args.batch_size, num_workers=args.num_workers)
start_time = datetime.now()
for batch_idx, batch in enumerate(
tqdm.tqdm(loader, desc=f"Generating MaskRCNN features for ALFRED {args.split_id}")):
dirs, step_ids, pano_ids, images, sizes, camera_infos, is_finished = batch
with torch.no_grad():
# FasterRCNN feature extraction for the current frame
images = images.to(device)
detector_results = region_detector(images)
paths_to_tensors = [
(path, step_id, pano_ids[i], is_finished[i],
detector_results[i]["features"],
detector_results[i]["boxes"],
detector_results[i]["masks"],
detector_results[i]["scores"],
detector_results[i]["labels"]) for i, (path, step_id) in enumerate(zip(dirs, step_ids))
]
for i, data in enumerate(paths_to_tensors):
path, step_id, pano_id, done, box_features, boxes, masks, class_probs, class_labels = data
features_path = os.path.join(path, args.features_folder)
if not os.path.exists(features_path):
os.makedirs(features_path)
output_file = os.path.join(features_path, f"{str(step_id.item())}-{str(pano_id.item())}.npz")
num_boxes = args.panoramic_boxes[pano_id.item()]
if boxes.shape[0] > 0:
boxes = boxes.cpu().numpy()
center_coords = (boxes[:, 0] + boxes[:, 2]) // 2, (
boxes[:, 1] + boxes[:, 3]) // 2
h_angle, v_angle = calculate_angles(
center_coords[0],
center_coords[1],
camera_infos["h_view_angle"][i].item(),
camera_infos["v_view_angle"][i].item()
)
boxes_angles = np.stack([h_angle, v_angle], 1)
else:
boxes_angles = np.zeros((boxes.shape[0], 2))
boxes = boxes.cpu().numpy()
box_features = box_features[:num_boxes]
boxes_angles = boxes_angles[:num_boxes]
boxes = boxes[:num_boxes]
masks = masks[:num_boxes]
class_probs = class_probs[:num_boxes]
class_labels = class_labels[:num_boxes]
np.savez_compressed(
output_file,
box_features=box_features.cpu().numpy(),
roi_angles=boxes_angles,
boxes=boxes,
masks=(masks > 0.5).cpu().numpy(),
class_probs=class_probs.cpu().numpy(),
class_labels=class_labels.cpu().numpy(),
num_objects=box_features.shape[0],
pano_id=pano_id
)
done = done.item()
if done:
# this will store a file specifying that all the features have been generated for the current
# trajectory
with open(os.path.join(features_path, "done"), mode="w") as out_file:
out_file.write(str(done))
if len(dataset.failed) > 0:
print(f"Trajectory execution failed for {len(dataset.failed)} trajectories: ")
for traj in dataset.failed:
print(traj["path"])
end_time = datetime.now()
print(f"Total feature extraction time: {end_time - start_time}")
def create_panorama(env, rotation_steps):
# This is the front view of the agent
initial_agent = env.last_event.metadata["agent"]
curr_image = Image.fromarray(env.last_event.frame)
panorama_frames = [curr_image]
camera_info = [dict(
h_view_angle=env.last_event.metadata["agent"]["rotation"]["y"],
# flip direction of heading angle - negative will be down and positive will be up
v_view_angle=-env.last_event.metadata["agent"]["cameraHorizon"]
)]
return panorama_frames, camera_info
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--split_id', type=str, default="train",
help="The identifier of the split for which we should extract the features for")
parser.add_argument('--features_folder', type=str, default="torch_maskrcnn")
parser.add_argument('--model_checkpoint', type=str)
parser.add_argument('--data_path', type=str, default="storage/data/alfred/json_feat_2.1.0")
parser.add_argument('--splits', type=str, default="storage/data/alfred/splits/oct21.json")
parser.add_argument('--num_workers', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--reward_config', type=str, default='configs/rewards.json')
parser.add_argument('--cuda_device', type=int, default=-1)
parser.add_argument('--image_width', type=int, default=300)
parser.add_argument('--image_height', type=int, default=300)
## FasterRCNN parameters
parser.add_argument('--box_score_thresh', type=float, default=0.05)
parser.add_argument('--box_nms_thresh', type=float, default=0.5)
parser.add_argument('--max_boxes_per_image', type=float, default=36)
parser.add_argument('--panoramic_boxes', nargs="+", default=(36, 9, 9, 9))
args = parser.parse_args()
run(args)
| [
"torchvision.ops.boxes.remove_small_boxes",
"torch.nn.functional.softmax",
"torch.arange",
"torchvision.models.detection.roi_heads.maskrcnn_inference",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.stack",
"torchvision.transforms.transforms.ToTensor",
"torchvision.ops.boxes.clip_boxes_to_image... | [((14546, 14583), 'PIL.Image.fromarray', 'Image.fromarray', (['env.last_event.frame'], {}), '(env.last_event.frame)\n', (14561, 14583), False, 'from PIL import Image\n'), ((21902, 21999), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'shuffle': '(False)', 'batch_size': 'args.batch_size', 'num_workers': 'args.num_workers'}), '(dataset, shuffle=False, batch_size=args.batch_size, num_workers=\n args.num_workers)\n', (21912, 21999), False, 'from torch.utils.data import IterableDataset, DataLoader\n'), ((22013, 22027), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22025, 22027), False, 'from datetime import datetime\n'), ((25426, 25440), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (25438, 25440), False, 'from datetime import datetime\n'), ((25667, 25704), 'PIL.Image.fromarray', 'Image.fromarray', (['env.last_event.frame'], {}), '(env.last_event.frame)\n', (25682, 25704), False, 'from PIL import Image\n'), ((26109, 26134), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (26132, 26134), False, 'import argparse\n'), ((5800, 5845), 'torch.jit.annotate', 'torch.jit.annotate', (['List[Tuple[int, int]]', '[]'], {}), '(List[Tuple[int, int]], [])\n', (5818, 5845), False, 'import torch\n'), ((7863, 7916), 'torch.jit.annotate', 'torch.jit.annotate', (['List[Dict[str, torch.Tensor]]', '[]'], {}), '(List[Dict[str, torch.Tensor]], [])\n', (7881, 7916), False, 'import torch\n'), ((8597, 8636), 'torchvision.models.detection.roi_heads.maskrcnn_inference', 'maskrcnn_inference', (['mask_logits', 'labels'], {}), '(mask_logits, labels)\n', (8615, 8636), False, 'from torchvision.models.detection.roi_heads import maskrcnn_inference\n'), ((9823, 9850), 'torch.nn.functional.softmax', 'F.softmax', (['class_logits', '(-1)'], {}), '(class_logits, -1)\n', (9832, 9850), True, 'import torch.nn.functional as F\n'), ((17439, 17473), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (17471, 17473), False, 'import torch\n'), ((20551, 20588), 'PIL.Image.fromarray', 'Image.fromarray', (['env.last_event.frame'], {}), '(env.last_event.frame)\n', (20566, 20588), False, 'from PIL import Image\n'), ((21734, 21764), 'torch.device', 'torch.device', (['args.cuda_device'], {}), '(args.cuda_device)\n', (21746, 21764), False, 'import torch\n'), ((21796, 21815), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (21808, 21815), False, 'import torch\n'), ((22080, 22167), 'tqdm.tqdm', 'tqdm.tqdm', (['loader'], {'desc': 'f"""Generating MaskRCNN features for ALFRED {args.split_id}"""'}), "(loader, desc=\n f'Generating MaskRCNN features for ALFRED {args.split_id}')\n", (22089, 22167), True, 'import tqdm as tqdm\n'), ((3615, 3800), 'torchvision.models.detection.maskrcnn_resnet50_fpn', 'torchvision.models.detection.maskrcnn_resnet50_fpn', ([], {'pretrained': '(True)', 'box_score_thresh': 'box_score_thresh', 'box_nms_thresh': 'box_nms_thresh', 'box_detections_per_img': 'max_boxes_per_image'}), '(pretrained=True,\n box_score_thresh=box_score_thresh, box_nms_thresh=box_nms_thresh,\n box_detections_per_img=max_boxes_per_image)\n', (3665, 3800), False, 'import torchvision\n'), ((6203, 6251), 'collections.OrderedDict', 'collections.OrderedDict', (["[('0', image_features)]"], {}), "([('0', image_features)])\n", (6226, 6251), False, 'import collections\n'), ((10367, 10414), 'torchvision.ops.boxes.clip_boxes_to_image', 'box_ops.clip_boxes_to_image', (['boxes', 'image_shape'], {}), '(boxes, image_shape)\n', (10394, 10414), True, 'import torchvision.ops.boxes as box_ops\n'), ((10676, 10716), 'torch.arange', 'torch.arange', (['num_classes'], {'device': 'device'}), '(num_classes, device=device)\n', (10688, 10716), False, 'import torch\n'), ((12151, 12199), 'torchvision.ops.boxes.remove_small_boxes', 'box_ops.remove_small_boxes', (['boxes'], {'min_size': '(0.01)'}), '(boxes, min_size=0.01)\n', (12177, 12199), True, 'import torchvision.ops.boxes as box_ops\n'), ((12471, 12549), 'torchvision.ops.boxes.batched_nms', 'box_ops.batched_nms', (['boxes', 'scores', 'labels', 'self.detector.roi_heads.nms_thresh'], {}), '(boxes, scores, labels, self.detector.roi_heads.nms_thresh)\n', (12490, 12549), True, 'import torchvision.ops.boxes as box_ops\n'), ((15553, 15571), 'json.load', 'json.load', (['in_file'], {}), '(in_file)\n', (15562, 15571), False, 'import json\n'), ((22261, 22276), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22274, 22276), False, 'import torch\n'), ((4314, 4361), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': '"""cpu"""'}), "(checkpoint_path, map_location='cpu')\n", (4324, 4361), False, 'import torch\n'), ((11853, 11911), 'torch.where', 'torch.where', (['(scores > self.detector.roi_heads.score_thresh)'], {}), '(scores > self.detector.roi_heads.score_thresh)\n', (11864, 11911), False, 'import torch\n'), ((13422, 13443), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (13441, 13443), False, 'from torchvision.transforms import transforms\n'), ((14120, 14212), 'torch.tensor', 'torch.tensor', (['[image.shape[-2], image.shape[-1]]'], {'dtype': 'torch.int32', 'device': 'self.device'}), '([image.shape[-2], image.shape[-1]], dtype=torch.int32, device=\n self.device)\n', (14132, 14212), False, 'import torch\n'), ((14362, 14381), 'torch.stack', 'torch.stack', (['images'], {}), '(images)\n', (14373, 14381), False, 'import torch\n'), ((14383, 14401), 'torch.stack', 'torch.stack', (['sizes'], {}), '(sizes)\n', (14394, 14401), False, 'import torch\n'), ((23040, 23080), 'os.path.join', 'os.path.join', (['path', 'args.features_folder'], {}), '(path, args.features_folder)\n', (23052, 23080), False, 'import os\n'), ((15766, 15829), 'os.path.join', 'os.path.join', (['args.data_path', 'k', "task['task']", '"""traj_data.json"""'], {}), "(args.data_path, k, task['task'], 'traj_data.json')\n", (15778, 15829), False, 'import os\n'), ((20647, 20672), 'numpy.zeros_like', 'np.zeros_like', (['curr_image'], {}), '(curr_image)\n', (20660, 20672), True, 'import numpy as np\n'), ((23104, 23133), 'os.path.exists', 'os.path.exists', (['features_path'], {}), '(features_path)\n', (23118, 23133), False, 'import os\n'), ((23155, 23181), 'os.makedirs', 'os.makedirs', (['features_path'], {}), '(features_path)\n', (23166, 23181), False, 'import os\n'), ((23905, 23936), 'numpy.stack', 'np.stack', (['[h_angle, v_angle]', '(1)'], {}), '([h_angle, v_angle], 1)\n', (23913, 23936), True, 'import numpy as np\n'), ((23994, 24023), 'numpy.zeros', 'np.zeros', (['(boxes.shape[0], 2)'], {}), '((boxes.shape[0], 2))\n', (24002, 24023), True, 'import numpy as np\n'), ((16303, 16345), 'os.path.join', 'os.path.join', (['args.data_path', "task['task']"], {}), "(args.data_path, task['task'])\n", (16315, 16345), False, 'import os\n'), ((16011, 16023), 'json.load', 'json.load', (['f'], {}), '(f)\n', (16020, 16023), False, 'import json\n'), ((25112, 25147), 'os.path.join', 'os.path.join', (['features_path', '"""done"""'], {}), "(features_path, 'done')\n", (25124, 25147), False, 'import os\n')] |
"""
This module handles the topological elements of force fields.
"""
from simtk import unit
class TopologyElement(object):
"""
A wrapper for any topological element.
"""
_name = None
_writable_attrs = []
class TopologyIterator(object):
"""
An iterator for topological elements that iterates over their
attributes in an ordered way.
It is useful when writing topological elements to file.
"""
def __init__(self, top_el):
"""
It initiates a TopologyIterator object.
Parameters
----------
top_el : a TopologyElement object
The topology element to iterate on.
"""
self._index = int(0)
self._top_el = top_el
def __next__(self):
"""
It returns the next item for the iteration.
Returns
-------
attr_name : str
The name of the attribute
attr_value : float
The value of the attribute
"""
if self._index == len(self._top_el._writable_attrs):
raise StopIteration
attr_name = self._top_el._writable_attrs[self._index]
attr_value = getattr(self._top_el, attr_name)
self._index += 1
return attr_name, attr_value
@property
def name(self):
"""
The name that this topological element has.
Returns
-------
name : str
The name of the topological element
"""
return self._name
@property
def n_writable_attrs(self):
"""
The number of writable attributes this topological element has.
Returns
-------
n_writable_attrs : int
The number of writable attributes
"""
return len(self._writable_attrs)
def __iter__(self):
"""
It returns an instance of the TopologyIterator.
Returns
-------
iterator : a TopologyIterator
The TopologyIterator object
"""
return self.TopologyIterator(self)
def __repr__(self):
"""
It returns the representation string of this topological element.
Returns
-------
repr_string : str
The representation string
"""
repr_string = '{}('.format(self._name)
attrs = [attr for attr in self]
for attr_name, value in attrs[:-1]:
repr_string += '{}={}, '.format(attr_name, value)
repr_string += '{}={})'.format(*attrs[-1])
return repr_string
def __str__(self):
"""
It returns the readable representation string of this topological
element.
Returns
-------
str_string : str
The readable representation string
"""
return self.__repr__()
class Bond(TopologyElement):
"""
It represents a bond in the topology.
"""
_name = 'Bond'
_writable_attrs = ['atom1_idx', 'atom2_idx', 'spring_constant', 'eq_dist']
def __init__(self, index=-1, atom1_idx=None, atom2_idx=None,
spring_constant=None, eq_dist=None):
"""
It initiates a Bond object.
Parameters
----------
index : int
The index of this Bond object
atom1_idx : int
The index of the first atom involved in this Bond
atom2_idx : int
The index of the second atom involved in this Bond
spring_constant : simtk.unit.Quantity
The spring constant of this Bond
eq_dist : simtk.unit.Quantity
The equilibrium distance of this Bond
"""
self._index = index
self._atom1_idx = atom1_idx
self._atom2_idx = atom2_idx
self._spring_constant = spring_constant
self._eq_dist = eq_dist
def set_atom1_idx(self, index):
"""
It sets atom1's index.
Parameters
----------
index : int
The index of the first atom involved in this Bond
"""
self._atom1_idx = index
def set_atom2_idx(self, index):
"""
It sets atom2's index.
Parameters
----------
index : int
The index of the second atom involved in this Bond
"""
self._atom2_idx = index
@property
def index(self):
"""
Bond's index.
Returns
-------
index : int
The index of this Bond object
"""
return self._index
@property
def atom1_idx(self):
"""
Bond's atom1 index.
Returns
-------
atom1_idx : int
The index of the first atom involved in this Bond object
"""
return self._atom1_idx
@property
def atom2_idx(self):
"""
Bond's atom2 index.
Returns
-------
atom2_idx : int
The index of the second atom involved in this Bond object
"""
return self._atom2_idx
@property
def spring_constant(self):
"""
Bond's spring constant.
Returns
-------
spring_constant : simtk.unit.Quantity
The spring constant of this Bond object
"""
return self._spring_constant
@property
def eq_dist(self):
"""
Bond's equilibrium distance.
Returns
-------
eq_dist : simtk.unit.Quantity
The equilibrium distance of this Bond object
"""
return self._eq_dist
class Angle(TopologyElement):
"""
It represents an angle in the topology.
"""
_name = 'Angle'
_writable_attrs = ['atom1_idx', 'atom2_idx', 'atom3_idx',
'spring_constant', 'eq_angle']
def __init__(self, index=-1, atom1_idx=None, atom2_idx=None,
atom3_idx=None, spring_constant=None, eq_angle=None):
"""
It initiates an Angle object.
Parameters
----------
index : int
The index of this Angle object
atom1_idx : int
The index of the first atom involved in this Angle
atom2_idx : int
The index of the second atom involved in this Angle
atom3_idx : int
The index of the third atom involved in this Angle
spring_constant : simtk.unit.Quantity
The spring constant of this Angle
eq_angle : simtk.unit.Quantity
The equilibrium angle of this Angle
"""
self._index = index
self._atom1_idx = atom1_idx
self._atom2_idx = atom2_idx
self._atom3_idx = atom3_idx
self._spring_constant = spring_constant
self._eq_angle = eq_angle
def set_atom1_idx(self, index):
"""
It sets atom1's index.
Parameters
----------
index : int
The index of the first atom involved in this Angle
"""
self._atom1_idx = index
def set_atom2_idx(self, index):
"""
It sets atom2's index.
Parameters
----------
index : int
The index of the second atom involved in this Angle
"""
self._atom2_idx = index
def set_atom3_idx(self, index):
"""
It sets atom3's index.
Parameters
----------
index : int
The index of the third atom involved in this Angle
"""
self._atom3_idx = index
@property
def index(self):
"""
Angle's index.
Returns
-------
index : int
The index of this Angle object
"""
return self._index
@property
def atom1_idx(self):
"""
Angle's atom1 index.
Returns
-------
atom1_idx : int
The index of the first atom involved in this Angle object
"""
return self._atom1_idx
@property
def atom2_idx(self):
"""
Angle's atom2 index.
Returns
-------
atom1_idx : int
The index of the second atom involved in this Angle object
"""
return self._atom2_idx
@property
def atom3_idx(self):
"""
Angle's atom3 index.
Returns
-------
atom1_idx : int
The index of the third atom involved in this Angle object
"""
return self._atom3_idx
@property
def spring_constant(self):
"""
Angle's spring constant.
Returns
-------
spring_constant : simtk.unit.Quantity
The spring constant of this Angle object
"""
return self._spring_constant
@property
def eq_angle(self):
"""
Angle's equilibrium angle.
Returns
-------
eq_angle : simtk.unit.Quantity
The equilibrium angle of this Angle object
"""
return self._eq_angle
class Dihedral(TopologyElement):
"""
It represents a dihedral in the topology.
It can be a proper or an improper dihedral.
"""
_name = 'Dihedral'
_writable_attrs = ['atom1_idx', 'atom2_idx', 'atom3_idx', 'atom4_idx',
'constant', 'prefactor', 'periodicity']
def __init__(self, index=-1, atom1_idx=None, atom2_idx=None,
atom3_idx=None, atom4_idx=None, periodicity=None,
prefactor=None, constant=None):
"""
It initiates an Dihedral object.
Parameters
----------
index : int
The index of this Dihedral object
atom1_idx : int
The index of the first atom involved in this Dihedral
atom2_idx : int
The index of the second atom involved in this Dihedral
atom3_idx : int
The index of the third atom involved in this Dihedral
atom4_idx : int
The index of the fourth atom involved in this Dihedral
periodicity : int
The periodicity of this Dihedral
prefactor : int
The prefactor of this Dihedral
constant : simtk.unit.Quantity
The constant of this Dihedral
"""
self._index = index
self._atom1_idx = atom1_idx
self._atom2_idx = atom2_idx
self._atom3_idx = atom3_idx
self._atom4_idx = atom4_idx
self._periodicity = periodicity
self._prefactor = prefactor
self._constant = constant
def set_atom1_idx(self, index):
"""
It sets atom1's index.
Parameters
----------
index : int
The index of the first atom involved in this Dihedral
"""
self._atom1_idx = index
def set_atom2_idx(self, index):
"""
It sets atom2's index.
Parameters
----------
index : int
The index of the second atom involved in this Dihedral
"""
self._atom2_idx = index
def set_atom3_idx(self, index):
"""
It sets atom3's index.
Parameters
----------
index : int
The index of the third atom involved in this Dihedral
"""
self._atom3_idx = index
def set_atom4_idx(self, index):
"""
It sets atom4's index.
Parameters
----------
index : int
The index of the fourth atom involved in this Dihedral
"""
self._atom4_idx = index
def plot(self):
"""
It plots this Dihedral as a function of phi angle.
"""
from matplotlib import pyplot
import numpy as np
x = unit.Quantity(np.arange(0, np.pi, 0.1), unit=unit.radians)
pyplot.plot(x, self.constant * (1 + self.prefactor
* np.cos(self.periodicity * x)),
'r--')
pyplot.show()
@property
def index(self):
"""
Dihedral's index.
Returns
-------
index : int
The index of this Dihedral object
"""
return self._index
@property
def atom1_idx(self):
"""
Dihedral's atom1 index.
Returns
-------
atom1_idx : int
The index of the first atom involved in this Dihedral object
"""
return self._atom1_idx
@property
def atom2_idx(self):
"""
Dihedral's atom2 index.
Returns
-------
atom1_idx : int
The index of the second atom involved in this Dihedral object
"""
return self._atom2_idx
@property
def atom3_idx(self):
"""
Dihedral's atom3 index.
Returns
-------
atom1_idx : int
The index of the third atom involved in this Dihedral object
"""
return self._atom3_idx
@property
def atom4_idx(self):
"""
Dihedral's atom4 index.
Returns
-------
atom1_idx : int
The index of the fourth atom involved in this Dihedral object
"""
return self._atom4_idx
@property
def periodicity(self):
"""
Dihedral's periodicity.
Returns
-------
periodicity : int
The periodicity this Dihedral object
"""
return self._periodicity
@property
def prefactor(self):
"""
Dihedral's prefactor.
Returns
-------
prefactor : int
The prefactor this Dihedral object
"""
return self._prefactor
@property
def constant(self):
"""
Dihedral's constant.
Returns
-------
constant : int
The constant this Dihedral object
"""
return self._constant
class Proper(Dihedral):
"""
It represents a proper dihedral in the topology.
"""
_name = 'Proper'
exclude = False
def exclude_from_14_list(self):
"""
It excludes this proper dihedral from PELE's 1-4 list by
setting the index of the third atom to negative.
"""
self.exclude = True
class Improper(Dihedral):
"""
It represents an improper dihedral in the topology.
"""
_name = 'Improper'
class OFFDihedral(TopologyElement):
"""
It represents a dihedral in the Open Force Field's topology.
"""
_name = 'OFFDihedral'
_writable_attrs = ['atom1_idx', 'atom2_idx', 'atom3_idx', 'atom4_idx',
'periodicity', 'phase', 'k', 'idivf']
_to_PELE_class = Dihedral
def __init__(self, index=-1, atom1_idx=None, atom2_idx=None,
atom3_idx=None, atom4_idx=None, periodicity=None,
phase=None, k=None, idivf=None):
"""
It initiates an Dihedral object.
Parameters
----------
index : int
The index of this Dihedral object
atom1_idx : int
The index of the first atom involved in this Dihedral
atom2_idx : int
The index of the second atom involved in this Dihedral
atom3_idx : int
The index of the third atom involved in this Dihedral
atom4_idx : int
The index of the fourth atom involved in this Dihedral
periodicity : int
The periodicity of this Dihedral
phase : simtk.unit.Quantity
The phase of this Dihedral
k : simtk.unit.Quantity
The constant of this Dihedral
idivf : int
The idivf of this Dihedral
"""
self.index = index
self.atom1_idx = atom1_idx
self.atom2_idx = atom2_idx
self.atom3_idx = atom3_idx
self.atom4_idx = atom4_idx
self.periodicity = periodicity
self.phase = phase
self.k = k
self.idivf = idivf
def to_PELE(self):
"""
It converts this Open Force Field Dihedral object into a
PELE-compatible one.
.. todo ::
* Review doublecheck idivf term in OFF's torsion equation
Returns
-------
PELE_dihedral : a Dihedral
The PELE-compatible Dihedral object
"""
if (self.periodicity is None or self.phase is None
or self.k is None or self.idivf is None):
return None
assert self.periodicity in (1, 2, 3, 4, 6), 'Expected values for ' \
'periodicity are 1, 2, 3, 4 or 6, obtained ' \
'{}'.format(self.periodicity)
assert self.phase.value_in_unit(unit.degree) in (0, 180), \
'Expected values for phase are 0 or 180, obtained ' \
'{}'.format(self.phase)
# idivf can take values other than 1 in case of impropers
# proper's idivfs must always be 1
# assert self.idivf == 1, 'The expected value for idivf is 1, ' \
# 'obtained {}'.format(self.idivf)
if self.phase.value_in_unit(unit.degree) == 180:
PELE_prefactor = -1
else:
PELE_prefactor = 1
PELE_constant = self.k / self.idivf
PELE_dihedral_kwargs = {'index': self.index,
'atom1_idx': self.atom1_idx,
'atom2_idx': self.atom2_idx,
'atom3_idx': self.atom3_idx,
'atom4_idx': self.atom4_idx,
'periodicity': self.periodicity,
'prefactor': PELE_prefactor,
'constant': PELE_constant}
return self._to_PELE_class(**PELE_dihedral_kwargs)
def plot(self):
"""
It plots this Dihedral as a function of phi angle.
"""
from matplotlib import pyplot
import numpy as np
x = unit.Quantity(np.arange(0, np.pi, 0.1), unit=unit.radians)
pyplot.plot(x,
self.k * (1 + np.cos(self.periodicity * x - self.phase)),
'r--')
pyplot.show()
class OFFProper(OFFDihedral):
"""
It represents a proper dihedral in the Open Force Field's topology.
"""
_name = 'OFFProper'
_to_PELE_class = Proper
class OFFImproper(OFFDihedral):
"""
It represents an improper dihedral in the Open Force Field's topology.
"""
_name = 'OFFImproper'
_to_PELE_class = Improper
| [
"numpy.cos",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((12018, 12031), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (12029, 12031), False, 'from matplotlib import pyplot\n'), ((18192, 18205), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (18203, 18205), False, 'from matplotlib import pyplot\n'), ((11805, 11829), 'numpy.arange', 'np.arange', (['(0)', 'np.pi', '(0.1)'], {}), '(0, np.pi, 0.1)\n', (11814, 11829), True, 'import numpy as np\n'), ((18010, 18034), 'numpy.arange', 'np.arange', (['(0)', 'np.pi', '(0.1)'], {}), '(0, np.pi, 0.1)\n', (18019, 18034), True, 'import numpy as np\n'), ((18112, 18153), 'numpy.cos', 'np.cos', (['(self.periodicity * x - self.phase)'], {}), '(self.periodicity * x - self.phase)\n', (18118, 18153), True, 'import numpy as np\n'), ((11951, 11979), 'numpy.cos', 'np.cos', (['(self.periodicity * x)'], {}), '(self.periodicity * x)\n', (11957, 11979), True, 'import numpy as np\n')] |
"""
This is the python wrapper that calls the python_wrapper()
c function. This interfaces through c_types so that the user
doesn't have to.
"""
import numpy as np
from ctypes import c_double,c_int,POINTER,cdll
import inspect
import os
library_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))+"/Delta_Sigma_miscentering.so"
dslib = cdll.LoadLibrary(library_path)
interface = dslib.python_interface
interface.restype = c_int
"""
Arguments to the interface are:
NR,h,om,
Mass,concentration,
Rmis,delta,
single,
averaging, Nbins,
R_bin_min,R_bin_max,
R,
sigma,
Rbins,
sigma_single,
delta_sigma_single
miscentered_sigma,
miscentered_delta_sigma,
ave_miscentered_delta_sigma
ave_delta_sigma_single
"""
interface.argtypes=[c_int, c_double, c_double,
c_double, c_double,
c_double, c_int,
c_int,
c_int,c_int,
c_double,c_double,
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double)]
def calc_Delta_Sigma_miscentering(R, sigma, cosmo_dict, params):
"""Calculates the DeltaSigma profile given some cosmology, matter power spectra, and input parameters (e.g. mass, concentraton, etc.)
Note: Mass units are Msun/h. Distances are Mpc/h comoving.
R (array_like): Radii of input surface mass density; Mpc/h
sigma (array_like): Surface mass density; Msun h/pc^2
cosmo_dict (dictionary): Contains key-value pairs of cosmological parameters. Required parameters: h, om, and ode.
params (dictionary): Contains key-value pairs of halo parameters, including: Mass, delta, Rmis, fmis, concentration, NR, Nbins, R_bin_min, R_bin_max, averaging, single (for a single miscentered halo).
Returns:
output (dictionary): Contains key-value pairs of all possible quantities assosciated with the halo.
"""
Mass,concentration,delta = params["Mass"],params["concentration"],params['delta']
NR = params["NR"]
Rmis,fmis = params["Rmis"], params["fmis"]
if "single" in params: single = params['single']
else: single = 0
if "averaging" in params:
averaging = params['averaging']
Nbins = params['Nbins']
R_bin_min = params['R_bin_min']
R_bin_max = params['R_bin_max']
else: #Default values to pass to C
averaging = 0
Nbins = 2
R_bin_min = min(R)
R_bin_max = max(R)
h,om = cosmo_dict['h'],cosmo_dict['om']
R = R.astype("float64")
sigma = sigma.astype("float64")
R_in = R.ctypes.data_as(POINTER(c_double))
sigma_in = sigma.ctypes.data_as(POINTER(c_double))
sigma_single = np.zeros(NR)
sigma_single_in = sigma_single.ctypes.data_as(POINTER(c_double))
delta_sigma_single = np.zeros(NR)
delta_sigma_single_in = delta_sigma_single.ctypes.data_as(POINTER(c_double))
miscentered_sigma = np.zeros(NR)
miscentered_sigma_in = miscentered_sigma.ctypes.data_as(POINTER(c_double))
miscentered_delta_sigma = np.zeros(NR)
miscentered_delta_sigma_in = miscentered_delta_sigma.ctypes.data_as(POINTER(c_double))
Rbins = np.zeros(Nbins)
Rbins_in = Rbins.ctypes.data_as(POINTER(c_double))
ave_miscentered_delta_sigma = np.zeros(Nbins)
ave_miscentered_delta_sigma_in = ave_miscentered_delta_sigma.ctypes.data_as(POINTER(c_double))
ave_delta_sigma_single = np.zeros(Nbins)
ave_delta_sigma_single_in = ave_delta_sigma_single.ctypes.data_as(POINTER(c_double))
result = interface(NR,h,om,
Mass,concentration,
Rmis,delta,
averaging,single,
Nbins,R_bin_min,R_bin_max,
R_in,
sigma_in,
Rbins_in,
sigma_single_in,
delta_sigma_single_in,
miscentered_sigma_in,
miscentered_delta_sigma_in,
ave_miscentered_delta_sigma_in,
ave_delta_sigma_single_in)
#Now build a dictionary and return it
output = {"R":R,"sigma":sigma}
output["miscentered_sigma"] = miscentered_sigma
output["miscentered_delta_sigma"] = miscentered_delta_sigma
if single:
output["sigma_single"] = sigma_single
output["delta_sigma_single"] = delta_sigma_single
if averaging:
output["Rbins"] = Rbins
output["ave_miscentered_delta_sigma"] = ave_miscentered_delta_sigma
if single:
output["ave_delta_sigma_single"] = ave_delta_sigma_single
return output
| [
"inspect.currentframe",
"numpy.zeros",
"ctypes.POINTER",
"ctypes.cdll.LoadLibrary"
] | [((364, 394), 'ctypes.cdll.LoadLibrary', 'cdll.LoadLibrary', (['library_path'], {}), '(library_path)\n', (380, 394), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((973, 990), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (980, 990), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((1012, 1029), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1019, 1029), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((1051, 1068), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1058, 1068), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((1090, 1107), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1097, 1107), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((1129, 1146), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1136, 1146), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((1168, 1185), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1175, 1185), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((1207, 1224), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1214, 1224), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((1246, 1263), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1253, 1263), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((1285, 1302), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1292, 1302), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((2925, 2937), 'numpy.zeros', 'np.zeros', (['NR'], {}), '(NR)\n', (2933, 2937), True, 'import numpy as np\n'), ((3032, 3044), 'numpy.zeros', 'np.zeros', (['NR'], {}), '(NR)\n', (3040, 3044), True, 'import numpy as np\n'), ((3150, 3162), 'numpy.zeros', 'np.zeros', (['NR'], {}), '(NR)\n', (3158, 3162), True, 'import numpy as np\n'), ((3272, 3284), 'numpy.zeros', 'np.zeros', (['NR'], {}), '(NR)\n', (3280, 3284), True, 'import numpy as np\n'), ((3389, 3404), 'numpy.zeros', 'np.zeros', (['Nbins'], {}), '(Nbins)\n', (3397, 3404), True, 'import numpy as np\n'), ((3494, 3509), 'numpy.zeros', 'np.zeros', (['Nbins'], {}), '(Nbins)\n', (3502, 3509), True, 'import numpy as np\n'), ((3638, 3653), 'numpy.zeros', 'np.zeros', (['Nbins'], {}), '(Nbins)\n', (3646, 3653), True, 'import numpy as np\n'), ((2832, 2849), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (2839, 2849), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((2887, 2904), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (2894, 2904), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((2988, 3005), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (2995, 3005), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((3107, 3124), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (3114, 3124), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((3223, 3240), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (3230, 3240), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((3357, 3374), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (3364, 3374), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((3441, 3458), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (3448, 3458), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((3590, 3607), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (3597, 3607), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((3724, 3741), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (3731, 3741), False, 'from ctypes import c_double, c_int, POINTER, cdll\n'), ((299, 321), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (319, 321), False, 'import inspect\n')] |
import threading
import numpy as np
class InferenceThread(threading.Thread):
def __init__(self, target=None, *args, **kwargs):
super().__init__(target=self._infer, *args, **kwargs)
self.daemon = True
self._stop_event = threading.Event()
def _infer(self, session, model, data_source, inferred_stuff_queue):
with session.graph.as_default():
infer = model.inference_generator()
while True:
output = next(infer)
for frame_index in np.unique(output['frame_index']):
if frame_index not in data_source._frames:
continue
frame = data_source._frames[frame_index]
if 'inference' in frame['time']:
frame['time']['inference'] += output['inference_time']
else:
frame['time']['inference'] = output['inference_time']
inferred_stuff_queue.put_nowait(output)
if self._stop_event.is_set():
break
def stop(self):
self._stop_event.set()
| [
"threading.Event",
"numpy.unique"
] | [((250, 267), 'threading.Event', 'threading.Event', ([], {}), '()\n', (265, 267), False, 'import threading\n'), ((527, 559), 'numpy.unique', 'np.unique', (["output['frame_index']"], {}), "(output['frame_index'])\n", (536, 559), True, 'import numpy as np\n')] |
import numpy as np
from tqdm import tqdm
class SimpleRNN(object):
def __init__(self, vocab_to_idx, idx_to_vocab, vocab_size, learning_rate=0.001, seq_length=30,
hidden_layer_size=128,
epochs=100, verbose = True, sample_step=500, clip_rate=5):
'''
SimpleRNN
Inputs: vocab_to_idx - dictionery where keys are unique characters (set(text)) from the training text
idx_to_vocab - dictionery where keys are positions of the unique characters from the training text
vocab_size - how many unique characters are there in the training text
learning_rate - number used at the update time, how much we are going to move towards the minima
seq_length - how many characters we feed at the time (this number is also how many time steps we have at the unrolled network)
hidden_layer_size - how many units we have at the hidden layer
epochs - how many times we are going to train the network
verbose - if True every 1000 steps you will see the current loss
smple_step - at how many sequenceses network will sample some characters
clip_rate - this number will clip gradients below -clip_rate and above clip_rate. This param helps at overcoming Exploding Gradient problem.
'''
self.learning_rate = learning_rate
self.seq_len = seq_length
self.h_size = hidden_layer_size
self.epochs = epochs
self.vocab_size = vocab_size
self.verbose = verbose
self.sample_step = sample_step
self.smooth_loss = -np.log(1.0/self.vocab_size)*self.seq_len
self.clip_rate = clip_rate
self.vocab_to_idx = vocab_to_idx
self.idx_to_vocab = idx_to_vocab
# Setting up weight and biases for the network
self.w_ih = np.random.randn(self.vocab_size, self.h_size)*0.01
self.w_hh = np.random.randn(self.h_size, self.h_size)*0.01
self.b_hh = np.zeros((1, self.h_size))
self.w_ho = np.random.randn(self.h_size, self.vocab_size)*0.01
self.b_ho = np.zeros((1, self.vocab_size))
#This state will be updated over time
self.state = np.zeros((1, self.h_size))
#Memory params for Adagrad
self.m_w_ih = np.zeros_like(self.w_ih)
self.m_w_hh = np.zeros_like(self.w_hh)
self.m_w_ho = np.zeros_like(self.w_ho)
self.m_b_ho = np.zeros_like(self.b_ho)
self.m_b_hh = np.zeros_like(self.b_hh)
def batch_opt(self, X, index):
'''
This function is used to created batches for feeding data into the RNN
Inputs: X - encoded input data (output from encoding_dataset function)
index - is index of training loop, this index is used to determine at which place should we start our batch
Outputs: X_batch_new - one_hot encoded tensor. size: [self.seq_len, 1, self.vocab_size]
y_batch_new - is the similar one_hot tensor, but every char is shifted one time step to the right. size:[self.seq_len, 1, self.vocab_size]
'''
X_batch = X[index:index+self.seq_len]
y_batch = X[index+1:index+self.seq_len+1]
X_batch_new = []
y_batch_new = []
for i in X_batch:
one_hot_char = np.zeros((1, self.vocab_size))
one_hot_char[0][i] = 1
X_batch_new.append(one_hot_char)
for i in y_batch:
one_hot_char = np.zeros((1, self.vocab_size))
one_hot_char[0][i] = 1
y_batch_new.append(one_hot_char)
return X_batch_new, y_batch_new
def encoding_dataset(self, X):
'''
This function is used to encode text to the integers
Inputs: X - text to be processed in the string format
Outputs: encoded_data - same sized 'text' as input X but every character is encoded to integer,
based at integer index in vocab_to_idx
'''
enoded_data = []
for char in X:
enoded_data.append(self.vocab_to_idx[char])
return enoded_data
def fit(self, X):
number_of_full_sequences = len(X) // self.seq_len
#chopping end of the input text so we have full number of sequences
cut_X = X[:number_of_full_sequences*self.seq_len]
#Encoding text
encoded_cut_X = self.encoding_dataset(cut_X)
for i in range(self.epochs):
#Delete tqdm keyword if you don't like the loading bar while training
for ii in tqdm(range(0, len(encoded_cut_X)-self.seq_len, self.seq_len)): #Batch loop
X_batch, y_batch = self.batch_opt(encoded_cut_X, ii)
outputs, probs, states = self.forward(X_batch)
loss = 0
for ts in range(self.seq_len):
loss += -np.log(probs[ts][0, np.argmax(y_batch[ts])])
self.smooth_loss = self.smooth_loss * 0.999 + loss *0.001
if self.verbose:
if ii % 1000 == 0:
print('Current loss is: {}'.format(self.smooth_loss))
#Here will be a part for Backpropagation
dW_ih, dW_hh, dW_ho, db_hh, db_ho = self.bacprop(X_batch, y_batch, probs, states)
#Simple Adagrad update of params (based on SGD)
for params, derivative_params, memory in zip([self.w_ih, self.w_hh, self.w_ho, self.b_hh, self.b_ho],
[dW_ih, dW_hh, dW_ho, db_hh, db_ho],
[self.m_w_ih, self.m_w_hh, self.m_w_ho, self.m_b_hh, self.m_b_ho]):
memory += derivative_params * derivative_params
params += -self.learning_rate * derivative_params / np.sqrt(memory + 1e-8)
if ii % self.sample_step == 0:
#SAMPLE time ^_^
sampled_string = self.sample(200, 20)
print(sampled_string)
print("EPOCH: {}/{}".format(i, self.epochs))
def forward(self, X):
'''
Inputs: X - characters for the network input
OutputS: outputs - logits from the output layer
output_probs - softmax probabilities from the output layer
hidden_states - states of the RNN layer in the network
'''
current_state = self.state
outputs = {}
output_probs = {}
hidden_states = {}
hidden_states[-1] = current_state
#forward prop loop
for ts in range(self.seq_len):
current_state = np.tanh(np.dot(X[ts], self.w_ih) + np.dot(current_state, self.w_hh) + self.b_hh)
hidden_states[ts] = current_state
outputs[ts] = np.dot(current_state, self.w_ho) + self.b_ho
output_probs[ts] = np.exp(outputs[ts]) / np.sum(np.exp(outputs[ts])) #softmax
self.state = current_state
return outputs, output_probs, hidden_states
def bacprop(self, X, y, probs, states):
'''
Inputs: X - input to the forward step of the netwrok
y - targets of the currrent batch
probs - probability (softmax) outputs from the forward function
states - states for each time step computed in the forward step
Outputs: derivatives of every learnable param in the network
'''
dW_ih = np.zeros_like(self.w_ih)
dW_hh = np.zeros_like(self.w_hh)
dW_ho = np.zeros_like(self.w_ho)
db_hh = np.zeros_like(self.b_hh)
db_ho = np.zeros_like(self.b_ho)
dh_s_next = np.zeros_like(states[0])
#Backprop through time - loop
for ts in reversed(range(self.seq_len)):
dy = np.copy(probs[ts])
dy[0][np.argmax(y[ts])] -= 1
dW_ho += np.dot(states[ts].T, dy)
db_ho += dy
dhidden = (1 - states[ts]**2) * (np.dot(dy, self.w_ho.T) + dh_s_next)
dh_s_next = np.dot(dhidden, self.w_hh.T)
dW_hh += np.dot(states[ts-1].T, dhidden)
dW_ih += np.dot(X[ts].T, dhidden)
db_hh += dhidden
#Clipping gradients
for params in [dW_ih, dW_hh, dW_ho, db_hh, db_ho]:
np.clip(params, -self.clip_rate, self.clip_rate, out=params)
return dW_ih, dW_hh, dW_ho, db_hh, db_ho
def sample(self, number_to_sampled, starting_inx):
'''
Inputs: number_to_sampled - how many chars do we want to sample
starting_idx - from which character we want to start sampling
Outputs: sampled_string - string made of sampled characters with len == number_to_sampled
'''
#We always start sampling with the newest network state
sampling_state = self.state
#Setting up starting params for the sampling process
sampled_string = ""
x = np.zeros((1, self.vocab_size))
x[0][starting_inx] = 1
#Sampling loop
for i in range(number_to_sampled):
#Forwad step of the network
hidden_sample = np.tanh(np.dot(x, self.w_ih) + np.dot(sampling_state, self.w_hh) + self.b_hh)
output = np.dot(hidden_sample, self.w_ho) + self.b_ho
probs = np.exp(output)/np.sum(np.exp(output))
#We find the index with the highest prob
index = np.random.choice(range(self.vocab_size), p=probs.ravel())
#setting x-one_hot_vector for the next character
x = np.zeros((1, self.vocab_size))
x[0][index] = 1
#Find the char with the sampled index and concat to the output string
char = self.idx_to_vocab[index]
sampled_string += char
return sampled_string
###############################################
text = open('zero_to_one.txt').read()
vocab = set(text)
vocab_size = len(vocab)
#Creating vocabs for mapping chars and ints in both directions
vocab_to_int = {char:i for i, char in enumerate(vocab)}
int_to_vocab = {i:char for i, char in enumerate(vocab)}
#Setting up hyperparameters for RNN
hidden_layer_size = 128
seq_length = 20
learning_rate = 0.01
model = SimpleRNN(vocab_to_idx=vocab_to_int,
idx_to_vocab=int_to_vocab,
vocab_size=len(vocab),
learning_rate=learning_rate,
seq_length=25,
hiden_layer_size=128,
epochs=100,
verbose = True,
sample_step=500,
clip_rate=5)
model.fit(text)
| [
"numpy.clip",
"numpy.copy",
"numpy.sqrt",
"numpy.log",
"numpy.zeros_like",
"numpy.argmax",
"numpy.exp",
"numpy.dot",
"numpy.zeros",
"numpy.random.randn"
] | [((1782, 1808), 'numpy.zeros', 'np.zeros', (['(1, self.h_size)'], {}), '((1, self.h_size))\n', (1790, 1808), True, 'import numpy as np\n'), ((1889, 1919), 'numpy.zeros', 'np.zeros', (['(1, self.vocab_size)'], {}), '((1, self.vocab_size))\n', (1897, 1919), True, 'import numpy as np\n'), ((1975, 2001), 'numpy.zeros', 'np.zeros', (['(1, self.h_size)'], {}), '((1, self.h_size))\n', (1983, 2001), True, 'import numpy as np\n'), ((2048, 2072), 'numpy.zeros_like', 'np.zeros_like', (['self.w_ih'], {}), '(self.w_ih)\n', (2061, 2072), True, 'import numpy as np\n'), ((2089, 2113), 'numpy.zeros_like', 'np.zeros_like', (['self.w_hh'], {}), '(self.w_hh)\n', (2102, 2113), True, 'import numpy as np\n'), ((2130, 2154), 'numpy.zeros_like', 'np.zeros_like', (['self.w_ho'], {}), '(self.w_ho)\n', (2143, 2154), True, 'import numpy as np\n'), ((2171, 2195), 'numpy.zeros_like', 'np.zeros_like', (['self.b_ho'], {}), '(self.b_ho)\n', (2184, 2195), True, 'import numpy as np\n'), ((2212, 2236), 'numpy.zeros_like', 'np.zeros_like', (['self.b_hh'], {}), '(self.b_hh)\n', (2225, 2236), True, 'import numpy as np\n'), ((6346, 6370), 'numpy.zeros_like', 'np.zeros_like', (['self.w_ih'], {}), '(self.w_ih)\n', (6359, 6370), True, 'import numpy as np\n'), ((6381, 6405), 'numpy.zeros_like', 'np.zeros_like', (['self.w_hh'], {}), '(self.w_hh)\n', (6394, 6405), True, 'import numpy as np\n'), ((6416, 6440), 'numpy.zeros_like', 'np.zeros_like', (['self.w_ho'], {}), '(self.w_ho)\n', (6429, 6440), True, 'import numpy as np\n'), ((6452, 6476), 'numpy.zeros_like', 'np.zeros_like', (['self.b_hh'], {}), '(self.b_hh)\n', (6465, 6476), True, 'import numpy as np\n'), ((6487, 6511), 'numpy.zeros_like', 'np.zeros_like', (['self.b_ho'], {}), '(self.b_ho)\n', (6500, 6511), True, 'import numpy as np\n'), ((6527, 6551), 'numpy.zeros_like', 'np.zeros_like', (['states[0]'], {}), '(states[0])\n', (6540, 6551), True, 'import numpy as np\n'), ((7612, 7642), 'numpy.zeros', 'np.zeros', (['(1, self.vocab_size)'], {}), '((1, self.vocab_size))\n', (7620, 7642), True, 'import numpy as np\n'), ((1655, 1700), 'numpy.random.randn', 'np.random.randn', (['self.vocab_size', 'self.h_size'], {}), '(self.vocab_size, self.h_size)\n', (1670, 1700), True, 'import numpy as np\n'), ((1721, 1762), 'numpy.random.randn', 'np.random.randn', (['self.h_size', 'self.h_size'], {}), '(self.h_size, self.h_size)\n', (1736, 1762), True, 'import numpy as np\n'), ((1824, 1869), 'numpy.random.randn', 'np.random.randn', (['self.h_size', 'self.vocab_size'], {}), '(self.h_size, self.vocab_size)\n', (1839, 1869), True, 'import numpy as np\n'), ((2943, 2973), 'numpy.zeros', 'np.zeros', (['(1, self.vocab_size)'], {}), '((1, self.vocab_size))\n', (2951, 2973), True, 'import numpy as np\n'), ((3075, 3105), 'numpy.zeros', 'np.zeros', (['(1, self.vocab_size)'], {}), '((1, self.vocab_size))\n', (3083, 3105), True, 'import numpy as np\n'), ((6640, 6658), 'numpy.copy', 'np.copy', (['probs[ts]'], {}), '(probs[ts])\n', (6647, 6658), True, 'import numpy as np\n'), ((6707, 6731), 'numpy.dot', 'np.dot', (['states[ts].T', 'dy'], {}), '(states[ts].T, dy)\n', (6713, 6731), True, 'import numpy as np\n'), ((6835, 6863), 'numpy.dot', 'np.dot', (['dhidden', 'self.w_hh.T'], {}), '(dhidden, self.w_hh.T)\n', (6841, 6863), True, 'import numpy as np\n'), ((6876, 6909), 'numpy.dot', 'np.dot', (['states[ts - 1].T', 'dhidden'], {}), '(states[ts - 1].T, dhidden)\n', (6882, 6909), True, 'import numpy as np\n'), ((6920, 6944), 'numpy.dot', 'np.dot', (['X[ts].T', 'dhidden'], {}), '(X[ts].T, dhidden)\n', (6926, 6944), True, 'import numpy as np\n'), ((7044, 7104), 'numpy.clip', 'np.clip', (['params', '(-self.clip_rate)', 'self.clip_rate'], {'out': 'params'}), '(params, -self.clip_rate, self.clip_rate, out=params)\n', (7051, 7104), True, 'import numpy as np\n'), ((8130, 8160), 'numpy.zeros', 'np.zeros', (['(1, self.vocab_size)'], {}), '((1, self.vocab_size))\n', (8138, 8160), True, 'import numpy as np\n'), ((1451, 1480), 'numpy.log', 'np.log', (['(1.0 / self.vocab_size)'], {}), '(1.0 / self.vocab_size)\n', (1457, 1480), True, 'import numpy as np\n'), ((5786, 5818), 'numpy.dot', 'np.dot', (['current_state', 'self.w_ho'], {}), '(current_state, self.w_ho)\n', (5792, 5818), True, 'import numpy as np\n'), ((5853, 5872), 'numpy.exp', 'np.exp', (['outputs[ts]'], {}), '(outputs[ts])\n', (5859, 5872), True, 'import numpy as np\n'), ((6668, 6684), 'numpy.argmax', 'np.argmax', (['y[ts]'], {}), '(y[ts])\n', (6677, 6684), True, 'import numpy as np\n'), ((7863, 7895), 'numpy.dot', 'np.dot', (['hidden_sample', 'self.w_ho'], {}), '(hidden_sample, self.w_ho)\n', (7869, 7895), True, 'import numpy as np\n'), ((7919, 7933), 'numpy.exp', 'np.exp', (['output'], {}), '(output)\n', (7925, 7933), True, 'import numpy as np\n'), ((5882, 5901), 'numpy.exp', 'np.exp', (['outputs[ts]'], {}), '(outputs[ts])\n', (5888, 5901), True, 'import numpy as np\n'), ((6783, 6806), 'numpy.dot', 'np.dot', (['dy', 'self.w_ho.T'], {}), '(dy, self.w_ho.T)\n', (6789, 6806), True, 'import numpy as np\n'), ((7941, 7955), 'numpy.exp', 'np.exp', (['output'], {}), '(output)\n', (7947, 7955), True, 'import numpy as np\n'), ((4995, 5018), 'numpy.sqrt', 'np.sqrt', (['(memory + 1e-08)'], {}), '(memory + 1e-08)\n', (5002, 5018), True, 'import numpy as np\n'), ((5659, 5683), 'numpy.dot', 'np.dot', (['X[ts]', 'self.w_ih'], {}), '(X[ts], self.w_ih)\n', (5665, 5683), True, 'import numpy as np\n'), ((5686, 5718), 'numpy.dot', 'np.dot', (['current_state', 'self.w_hh'], {}), '(current_state, self.w_hh)\n', (5692, 5718), True, 'import numpy as np\n'), ((7781, 7801), 'numpy.dot', 'np.dot', (['x', 'self.w_ih'], {}), '(x, self.w_ih)\n', (7787, 7801), True, 'import numpy as np\n'), ((7804, 7837), 'numpy.dot', 'np.dot', (['sampling_state', 'self.w_hh'], {}), '(sampling_state, self.w_hh)\n', (7810, 7837), True, 'import numpy as np\n'), ((4260, 4282), 'numpy.argmax', 'np.argmax', (['y_batch[ts]'], {}), '(y_batch[ts])\n', (4269, 4282), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
import sys
import argparse
print(sys.version)
print(sys.argv)
print(sys.api_version)
from yolo3. yolo import YOLO, detect_video
from PIL import Image
import cv2
import numpy as np
from optical_flow import CameraStabilization as camStab
def detect_img(yolo):
cap = cv2.VideoCapture(0)
mean_box_cord = np.array([0, 0, 0, 0])
frame = 0
# cam_stab = camStab(flip=True)
while True:
ret, image = cap.read()
# img = input('Input image filename:')
if ret:
image_mat = Image.fromarray(image)
r_image, out_boxes, out_score = yolo.detect_image(image_mat)
if out_score.shape[0] > 0:
m_box = out_boxes.mean(0)
if frame == 0:
mean_box_cord = m_box[:]
else:
mean_box_cord = mean_box_cord * 0.8 + m_box[:] * 0.2
img = cv2.circle(
r_image, (mean_box_cord[1], mean_box_cord[0]), 5, (200, 0, 0), -1)
img = cv2.circle(
img, (mean_box_cord[3], mean_box_cord[2]), 5, (200, 0, 0), -1)
img = cv2.circle(img, (int((mean_box_cord[3] + mean_box_cord[1]) / 2), int(
(mean_box_cord[0] + mean_box_cord[2]) / 2)), 5, (0, 200, 0), -1)
frame += 1
# print(type(r_image))
cv2.imshow("boxed", img)
cv2.imshow(
"main_box", image[mean_box_cord[0]:mean_box_cord[2], mean_box_cord[1]:mean_box_cord[3]])
else:
print("none")
k = cv2.waitKey(30) & 0xff
if (k == 27):
break
yolo.close_session()
cv2.destroyAllWindows()
cap.release()
FLAGS = None
if __name__ == '__main__':
print("Image detection mode")
detect_img(YOLO())
| [
"PIL.Image.fromarray",
"cv2.imshow",
"numpy.array",
"cv2.circle",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"yolo3.yolo.YOLO",
"cv2.waitKey"
] | [((295, 314), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (311, 314), False, 'import cv2\n'), ((335, 357), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (343, 357), True, 'import numpy as np\n'), ((1703, 1726), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1724, 1726), False, 'import cv2\n'), ((1838, 1844), 'yolo3.yolo.YOLO', 'YOLO', ([], {}), '()\n', (1842, 1844), False, 'from yolo3.yolo import YOLO, detect_video\n'), ((546, 568), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (561, 568), False, 'from PIL import Image\n'), ((1611, 1626), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (1622, 1626), False, 'import cv2\n'), ((919, 996), 'cv2.circle', 'cv2.circle', (['r_image', '(mean_box_cord[1], mean_box_cord[0])', '(5)', '(200, 0, 0)', '(-1)'], {}), '(r_image, (mean_box_cord[1], mean_box_cord[0]), 5, (200, 0, 0), -1)\n', (929, 996), False, 'import cv2\n'), ((1040, 1113), 'cv2.circle', 'cv2.circle', (['img', '(mean_box_cord[3], mean_box_cord[2])', '(5)', '(200, 0, 0)', '(-1)'], {}), '(img, (mean_box_cord[3], mean_box_cord[2]), 5, (200, 0, 0), -1)\n', (1050, 1113), False, 'import cv2\n'), ((1395, 1419), 'cv2.imshow', 'cv2.imshow', (['"""boxed"""', 'img'], {}), "('boxed', img)\n", (1405, 1419), False, 'import cv2\n'), ((1436, 1539), 'cv2.imshow', 'cv2.imshow', (['"""main_box"""', 'image[mean_box_cord[0]:mean_box_cord[2], mean_box_cord[1]:mean_box_cord[3]]'], {}), "('main_box', image[mean_box_cord[0]:mean_box_cord[2],\n mean_box_cord[1]:mean_box_cord[3]])\n", (1446, 1539), False, 'import cv2\n')] |
import os,time
import numpy as np
import rawpy
import glob, scipy.io
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from model import SeeInDark
from net_canny import Net
from pytorch_msssim import MSSSIM
saveimg = True
ps = 512 #patch size for training
device = torch.device('cuda') #'cuda:'+os.environ['CUDA']#torch.device('cuda') #if torch.cuda.is_available() else 'cpu')
DIR = '/home/cse/ug/15074014/'
if os.environ["USER"] == "ketankr9":
DIR = '/home/ketankr9/workspace/mtp/codes/'
device = torch.device('cpu')
ps = 256
input_dir = DIR + 'Sony/short/'
gt_dir = DIR + 'Sony/long/'
result_dir = model_dir = DIR + 'Sony/result_Sony_edge_psnr_ssim/'
os.system('mkdir -p '+result_dir)
chpkdir = model_dir+'checkpoint_sony_resume.pth'
writer = SummaryWriter(result_dir+'log')
print(device)
#get train and test IDs
train_fns = glob.glob(gt_dir + '0*.ARW')
train_ids = []
for i in range(len(train_fns)):
_, train_fn = os.path.split(train_fns[i])
train_ids.append(int(train_fn[0:5]))
test_fns = glob.glob(gt_dir + '/1*.ARW')
test_ids = []
for i in range(len(test_fns)):
_, test_fn = os.path.split(test_fns[i])
test_ids.append(int(test_fn[0:5]))
save_freq = 100
DEBUG = 0
if DEBUG == 1:
save_freq = 100
train_ids = train_ids[0:5]
test_ids = test_ids[0:5]
def pack_raw(raw):
#pack Bayer image to 4 channels
im = raw.raw_image_visible.astype(np.float32)
im = np.maximum(im - 512,0)/ (16383 - 512) #subtract the black level
im = np.expand_dims(im,axis=2)
img_shape = im.shape
H = img_shape[0]
W = img_shape[1]
out = np.concatenate((im[0:H:2,0:W:2,:],
im[0:H:2,1:W:2,:],
im[1:H:2,1:W:2,:],
im[1:H:2,0:W:2,:]), axis=2)
return out
# LOSS REDUCED MEAN
mseclass = nn.MSELoss(reduction='mean')
def reduce_mean(out_im, gt_im):
return mseclass(out_im, gt_im)
# LOSS VGG
from utils import VGGLoss, GaussianSmoothing
vggloss = VGGLoss(device=device)
gaussianSmoothing = GaussianSmoothing(3, 5, 1, device=device)
# LOSS COLORLOSS
def colorloss(out, gt):
out = gaussianSmoothing(out)
gt = gaussianSmoothing(gt)
return torch.abs(out-gt).mean()
# MSSSIM
msssim = MSSSIM().to(device)
# LOSS CANNY
canny = Net(threshold=3.0, device=device).to(device)
canny.eval()
def canny_loss(out_im, gt_im, ch=""):
blurred_img1, grad_mag1, grad_orientation1, thin_edges1, thresholded1, early_threshold1 = canny(gt_im)
blurred_img2, grad_mag2, grad_orientation2, thin_edges2, thresholded2, early_threshold2 = canny(out_im)
if ch == '1':
return mseclass(thresholded1, thresholded2)
elif ch == '1bool':
return mseclass(thresholded1!=zero, thresholded2!=zero)
elif ch == '2':
return mseclass(early_threshold1, early_threshold2)
elif ch == '2bool':
return mseclass(early_threshold1!=zero, early_threshold2!=zero)
elif ch == '3':
return mseclass(thresholded1, thresholded2) + mseclass(early_threshold1, early_threshold2)
elif ch == '3bool':
return mseclass(thresholded1!=zero, thresholded2!=zero) + mseclass(early_threshold1!=zero, early_threshold2!=zero)
return mseclass(thresholded1/(thresholded1.max()+1), thresholded2/(thresholded2.max()+1))
#Raw data takes long time to load. Keep them in memory after loaded.
gt_images=[None]*6000
input_images = {}
input_images['300'] = [None]*len(train_ids)
input_images['250'] = [None]*len(train_ids)
input_images['100'] = [None]*len(train_ids)
g_loss = np.zeros((5000,1))
learning_rate = 1e-4
model = SeeInDark().to(device)
opt = optim.Adam(model.parameters(), lr = learning_rate)
#load last saved model weights
if os.path.isfile(chpkdir):
checkpoint = torch.load(chpkdir)
model.load_state_dict(checkpoint['model'])
opt.load_state_dict(checkpoint['optimizer'])
lastepoch = checkpoint['epoch'] + 1
else:
lastepoch = 0
model._initialize_weights()
print("*****lastepoch***** ", lastepoch)
for epoch in range(lastepoch,4001):
cnt=0
if epoch > 2000:
for g in opt.param_groups:
g['lr'] = 1e-5
E_loss = {'CANNY':0, 'MSE':0, 'MSSSIM':0, 'total':0}
for ind in np.random.permutation(len(train_ids)):
# get the path from image id
train_id = train_ids[ind]
in_files = glob.glob(input_dir + '%05d_00*.ARW'%train_id)
in_path = in_files[np.random.random_integers(0,len(in_files)-1)]
_, in_fn = os.path.split(in_path)
gt_files = glob.glob(gt_dir + '%05d_00*.ARW'%train_id)
gt_path = gt_files[0]
_, gt_fn = os.path.split(gt_path)
in_exposure = float(in_fn[9:-5])
gt_exposure = float(gt_fn[9:-5])
ratio = min(gt_exposure/in_exposure,300)
st=time.time()
cnt+=1
if input_images[str(ratio)[0:3]][ind] is None:
raw = rawpy.imread(in_path)
input_images[str(ratio)[0:3]][ind] = np.expand_dims(pack_raw(raw),axis=0) *ratio
gt_raw = rawpy.imread(gt_path)
im = gt_raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
gt_images[ind] = np.expand_dims(np.float32(im/65535.0),axis = 0)
#crop
H = input_images[str(ratio)[0:3]][ind].shape[1]
W = input_images[str(ratio)[0:3]][ind].shape[2]
xx = np.random.randint(0,W-ps)
yy = np.random.randint(0,H-ps)
input_patch = input_images[str(ratio)[0:3]][ind][:,yy:yy+ps,xx:xx+ps,:]
gt_patch = gt_images[ind][:,yy*2:yy*2+ps*2,xx*2:xx*2+ps*2,:]
if np.random.randint(2,size=1)[0] == 1: # random flip
input_patch = np.flip(input_patch, axis=1)
gt_patch = np.flip(gt_patch, axis=1)
if np.random.randint(2,size=1)[0] == 1:
input_patch = np.flip(input_patch, axis=0)
gt_patch = np.flip(gt_patch, axis=0)
if np.random.randint(2,size=1)[0] == 1: # random transpose
input_patch = np.transpose(input_patch, (0,2,1,3))
gt_patch = np.transpose(gt_patch, (0,2,1,3))
input_patch = np.minimum(input_patch,1.0)
gt_patch = np.maximum(gt_patch, 0.0)
in_img = torch.from_numpy(input_patch).permute(0,3,1,2).to(device)
gt_img = torch.from_numpy(gt_patch).permute(0,3,1,2).to(device)
model.zero_grad()
out_img = model(in_img)
# c_loss = colorloss(out_img, gt_img)
# vgg_loss = vggloss.loss(out_img, gt_img)
# mse_loss = reduce_mean(out_img, gt_img)
# loss = c_loss + vgg_loss + mse_loss
alpha, beta = 0.8, 0.1
can_loss = (1-alpha-beta)*canny_loss(out_img, gt_img)
mse_loss = alpha*reduce_mean(out_img, gt_img)
msssim_loss = beta*(1-msssim(out_img, gt_img))
loss = mse_loss + can_loss + msssim_loss
loss.backward()
opt.step()
g_loss[ind]=loss.item()
out=("%d %d C:%.3f S:%.3f P:%.3f Loss=%.3f Time=%.3f"%(epoch,cnt,can_loss, msssim_loss, mse_loss, np.mean(g_loss[np.where(g_loss)]),time.time()-st))
print(out)
try:
os.system('echo ' + out + ' >> '+result_dir+'jobout.txt')
except:
pass
# E_loss = {'CANNY':0, 'MSE':0, 'MSSSIM':0, 'total':0}
E_loss['CANNY'] += can_loss
E_loss['MSE'] += mse_loss
E_loss['MSSSIM'] += msssim_loss
E_loss['total'] += np.mean(g_loss[np.where(g_loss)])
if epoch%save_freq==0:
if not os.path.isdir(result_dir + '%04d'%epoch):
os.makedirs(result_dir + '%04d'%epoch)
output = out_img.permute(0, 2, 3, 1).cpu().data.numpy()
output = np.minimum(np.maximum(output,0),1)
if saveimg:
temp = np.concatenate((gt_patch[0,:,:,:], output[0,:,:,:]),axis=1)
# scipy.misc.toimage(temp*255, high=255, low=0, cmin=0, cmax=255).save(result_dir + '%04d/%05d_00_train_%d.jpg'%(epoch,train_id,ratio))
# torch.save(model.state_dict(), model_dir+'checkpoint_sony_e%04d.pth'%epoch)
torch.save({'epoch': epoch, \
'model': model.state_dict(), \
'optimizer': opt.state_dict(),\
}, model_dir+'checkpoint_sony_resume.pth')
writer.add_scalar('Loss/Edge', E_loss['CANNY'], epoch)
writer.add_scalar('Loss/MSSSIM', E_loss['MSSSIM'], epoch)
writer.add_scalar('Loss/RMean', E_loss['MSE'], epoch)
writer.add_scalar('LossTotal', E_loss['total'], epoch)
| [
"utils.VGGLoss",
"torch.from_numpy",
"torch.nn.MSELoss",
"rawpy.imread",
"numpy.flip",
"tensorboardX.SummaryWriter",
"numpy.where",
"os.path.split",
"os.path.isdir",
"numpy.concatenate",
"numpy.maximum",
"net_canny.Net",
"glob.glob",
"torch.abs",
"pytorch_msssim.MSSSIM",
"os.path.isfil... | [((321, 341), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (333, 341), False, 'import torch\n'), ((722, 757), 'os.system', 'os.system', (["('mkdir -p ' + result_dir)"], {}), "('mkdir -p ' + result_dir)\n", (731, 757), False, 'import os, time\n'), ((815, 848), 'tensorboardX.SummaryWriter', 'SummaryWriter', (["(result_dir + 'log')"], {}), "(result_dir + 'log')\n", (828, 848), False, 'from tensorboardX import SummaryWriter\n'), ((899, 927), 'glob.glob', 'glob.glob', (["(gt_dir + '0*.ARW')"], {}), "(gt_dir + '0*.ARW')\n", (908, 927), False, 'import glob, scipy.io\n'), ((1074, 1103), 'glob.glob', 'glob.glob', (["(gt_dir + '/1*.ARW')"], {}), "(gt_dir + '/1*.ARW')\n", (1083, 1103), False, 'import glob, scipy.io\n'), ((1868, 1896), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (1878, 1896), True, 'import torch.nn as nn\n'), ((2031, 2053), 'utils.VGGLoss', 'VGGLoss', ([], {'device': 'device'}), '(device=device)\n', (2038, 2053), False, 'from utils import VGGLoss, GaussianSmoothing\n'), ((2074, 2115), 'utils.GaussianSmoothing', 'GaussianSmoothing', (['(3)', '(5)', '(1)'], {'device': 'device'}), '(3, 5, 1, device=device)\n', (2091, 2115), False, 'from utils import VGGLoss, GaussianSmoothing\n'), ((3579, 3598), 'numpy.zeros', 'np.zeros', (['(5000, 1)'], {}), '((5000, 1))\n', (3587, 3598), True, 'import numpy as np\n'), ((3743, 3766), 'os.path.isfile', 'os.path.isfile', (['chpkdir'], {}), '(chpkdir)\n', (3757, 3766), False, 'import os, time\n'), ((562, 581), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (574, 581), False, 'import torch\n'), ((993, 1020), 'os.path.split', 'os.path.split', (['train_fns[i]'], {}), '(train_fns[i])\n', (1006, 1020), False, 'import os, time\n'), ((1166, 1192), 'os.path.split', 'os.path.split', (['test_fns[i]'], {}), '(test_fns[i])\n', (1179, 1192), False, 'import os, time\n'), ((1547, 1573), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(2)'}), '(im, axis=2)\n', (1561, 1573), True, 'import numpy as np\n'), ((1651, 1763), 'numpy.concatenate', 'np.concatenate', (['(im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2, :], im[1:H:2, 0\n :W:2, :])'], {'axis': '(2)'}), '((im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2,\n :], im[1:H:2, 0:W:2, :]), axis=2)\n', (1665, 1763), True, 'import numpy as np\n'), ((3785, 3804), 'torch.load', 'torch.load', (['chpkdir'], {}), '(chpkdir)\n', (3795, 3804), False, 'import torch\n'), ((1473, 1496), 'numpy.maximum', 'np.maximum', (['(im - 512)', '(0)'], {}), '(im - 512, 0)\n', (1483, 1496), True, 'import numpy as np\n'), ((2278, 2286), 'pytorch_msssim.MSSSIM', 'MSSSIM', ([], {}), '()\n', (2284, 2286), False, 'from pytorch_msssim import MSSSIM\n'), ((2320, 2353), 'net_canny.Net', 'Net', ([], {'threshold': '(3.0)', 'device': 'device'}), '(threshold=3.0, device=device)\n', (2323, 2353), False, 'from net_canny import Net\n'), ((3628, 3639), 'model.SeeInDark', 'SeeInDark', ([], {}), '()\n', (3637, 3639), False, 'from model import SeeInDark\n'), ((4370, 4418), 'glob.glob', 'glob.glob', (["(input_dir + '%05d_00*.ARW' % train_id)"], {}), "(input_dir + '%05d_00*.ARW' % train_id)\n", (4379, 4418), False, 'import glob, scipy.io\n'), ((4509, 4531), 'os.path.split', 'os.path.split', (['in_path'], {}), '(in_path)\n', (4522, 4531), False, 'import os, time\n'), ((4552, 4597), 'glob.glob', 'glob.glob', (["(gt_dir + '%05d_00*.ARW' % train_id)"], {}), "(gt_dir + '%05d_00*.ARW' % train_id)\n", (4561, 4597), False, 'import glob, scipy.io\n'), ((4645, 4667), 'os.path.split', 'os.path.split', (['gt_path'], {}), '(gt_path)\n', (4658, 4667), False, 'import os, time\n'), ((4813, 4824), 'time.time', 'time.time', ([], {}), '()\n', (4822, 4824), False, 'import os, time\n'), ((5401, 5429), 'numpy.random.randint', 'np.random.randint', (['(0)', '(W - ps)'], {}), '(0, W - ps)\n', (5418, 5429), True, 'import numpy as np\n'), ((5440, 5468), 'numpy.random.randint', 'np.random.randint', (['(0)', '(H - ps)'], {}), '(0, H - ps)\n', (5457, 5468), True, 'import numpy as np\n'), ((6148, 6176), 'numpy.minimum', 'np.minimum', (['input_patch', '(1.0)'], {}), '(input_patch, 1.0)\n', (6158, 6176), True, 'import numpy as np\n'), ((6195, 6220), 'numpy.maximum', 'np.maximum', (['gt_patch', '(0.0)'], {}), '(gt_patch, 0.0)\n', (6205, 6220), True, 'import numpy as np\n'), ((2234, 2253), 'torch.abs', 'torch.abs', (['(out - gt)'], {}), '(out - gt)\n', (2243, 2253), False, 'import torch\n'), ((4914, 4935), 'rawpy.imread', 'rawpy.imread', (['in_path'], {}), '(in_path)\n', (4926, 4935), False, 'import rawpy\n'), ((5051, 5072), 'rawpy.imread', 'rawpy.imread', (['gt_path'], {}), '(gt_path)\n', (5063, 5072), False, 'import rawpy\n'), ((5706, 5734), 'numpy.flip', 'np.flip', (['input_patch'], {'axis': '(1)'}), '(input_patch, axis=1)\n', (5713, 5734), True, 'import numpy as np\n'), ((5758, 5783), 'numpy.flip', 'np.flip', (['gt_patch'], {'axis': '(1)'}), '(gt_patch, axis=1)\n', (5765, 5783), True, 'import numpy as np\n'), ((5858, 5886), 'numpy.flip', 'np.flip', (['input_patch'], {'axis': '(0)'}), '(input_patch, axis=0)\n', (5865, 5886), True, 'import numpy as np\n'), ((5910, 5935), 'numpy.flip', 'np.flip', (['gt_patch'], {'axis': '(0)'}), '(gt_patch, axis=0)\n', (5917, 5935), True, 'import numpy as np\n'), ((6030, 6069), 'numpy.transpose', 'np.transpose', (['input_patch', '(0, 2, 1, 3)'], {}), '(input_patch, (0, 2, 1, 3))\n', (6042, 6069), True, 'import numpy as np\n'), ((6090, 6126), 'numpy.transpose', 'np.transpose', (['gt_patch', '(0, 2, 1, 3)'], {}), '(gt_patch, (0, 2, 1, 3))\n', (6102, 6126), True, 'import numpy as np\n'), ((7153, 7214), 'os.system', 'os.system', (["('echo ' + out + ' >> ' + result_dir + 'jobout.txt')"], {}), "('echo ' + out + ' >> ' + result_dir + 'jobout.txt')\n", (7162, 7214), False, 'import os, time\n'), ((5226, 5250), 'numpy.float32', 'np.float32', (['(im / 65535.0)'], {}), '(im / 65535.0)\n', (5236, 5250), True, 'import numpy as np\n'), ((5628, 5656), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(1)'}), '(2, size=1)\n', (5645, 5656), True, 'import numpy as np\n'), ((5795, 5823), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(1)'}), '(2, size=1)\n', (5812, 5823), True, 'import numpy as np\n'), ((5947, 5975), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(1)'}), '(2, size=1)\n', (5964, 5975), True, 'import numpy as np\n'), ((7459, 7475), 'numpy.where', 'np.where', (['g_loss'], {}), '(g_loss)\n', (7467, 7475), True, 'import numpy as np\n'), ((7529, 7571), 'os.path.isdir', 'os.path.isdir', (["(result_dir + '%04d' % epoch)"], {}), "(result_dir + '%04d' % epoch)\n", (7542, 7571), False, 'import os, time\n'), ((7587, 7627), 'os.makedirs', 'os.makedirs', (["(result_dir + '%04d' % epoch)"], {}), "(result_dir + '%04d' % epoch)\n", (7598, 7627), False, 'import os, time\n'), ((7726, 7747), 'numpy.maximum', 'np.maximum', (['output', '(0)'], {}), '(output, 0)\n', (7736, 7747), True, 'import numpy as np\n'), ((7798, 7864), 'numpy.concatenate', 'np.concatenate', (['(gt_patch[0, :, :, :], output[0, :, :, :])'], {'axis': '(1)'}), '((gt_patch[0, :, :, :], output[0, :, :, :]), axis=1)\n', (7812, 7864), True, 'import numpy as np\n'), ((7092, 7103), 'time.time', 'time.time', ([], {}), '()\n', (7101, 7103), False, 'import os, time\n'), ((6239, 6268), 'torch.from_numpy', 'torch.from_numpy', (['input_patch'], {}), '(input_patch)\n', (6255, 6268), False, 'import torch\n'), ((6314, 6340), 'torch.from_numpy', 'torch.from_numpy', (['gt_patch'], {}), '(gt_patch)\n', (6330, 6340), False, 'import torch\n'), ((7073, 7089), 'numpy.where', 'np.where', (['g_loss'], {}), '(g_loss)\n', (7081, 7089), True, 'import numpy as np\n')] |
#
# Runge-Kutta 4 elemental evaluation (wave equation version)
# ==========================================================
#
# by <NAME>. (Matlab and Python versions)
# <NAME> (Python version)
#
from simulation_data import BasisType
from local_matrices import local_mass, local_stiffness, stress_stiffness
from flux import Fluxes
import numpy
def eval_k_wave(sim_data, quad_data, basis, mesh, mat_prop, phi, s, t):
ng = sim_data.n_global_dof()
nl = sim_data.n_local_dof()
K = sim_data.n_elements # saving some typing...
# Allocates local vectors v and sigma
v_t = numpy.zeros([(K+2)*nl, 2]) # size: K elements + 1 ghost
sigma_t = numpy.zeros(v_t.shape) # size: K elements + 1 ghost
# Gets v and sigma from previous stage of RK4
v_t[nl:(K+1)*nl, 0] = phi[ng:2*ng]
sigma_t[nl:(K+1)*nl,0] = phi[2*ng:3*ng]
# Applies Neumann BC on v (v_ghost = v_element1)
# d/dx v(x_bound) = 0 -> (v_element1 - v_ghost)/delta_x = 0 -> v_ghost = v_element1
v_t[0:nl, 0] = v_t[nl:2*nl, 0] # left side of the domain
v_t[ng+nl:ng+2*nl, 0] = v_t[ng:ng+nl, 0] # right side of the domain
# Gets Mass, Stiffness and Flux matrices
# Note that we multiply S by 2*a
M = local_mass(quad_data, basis)
S = local_stiffness(quad_data, basis)
flux = Fluxes(sim_data)
# Loop over total number of elements
for i in range(1, sim_data.n_elements+1):
Ss, Ssm = stress_stiffness(quad_data, basis, mat_prop, i-1)
idx = numpy.arange(nl*i, nl*(i+1))
m1 = 0.5*flux.FRp1.dot(sigma_t[idx+nl, 0]+numpy.sqrt(mat_prop.rho[i-1]*mat_prop.mub[nl-1, i])*v_t[idx+nl, 0] )
m2 = 0.5*flux.FR.dot(sigma_t[idx, 0]-numpy.sqrt(mat_prop.rho[i-1]*mat_prop.mub[nl-1, i])*v_t[idx, 0])
m3 = 0.5*flux.FL.dot(sigma_t[idx, 0]+numpy.sqrt(mat_prop.rho[i-1]*mat_prop.mub[0, i])*v_t[idx, 0])
m4 = 0.5*flux.FLm1.dot(sigma_t[idx-nl, 0]-numpy.sqrt(mat_prop.rho[i-1]*mat_prop.mub[0, i])*v_t[idx-nl, 0])
tmp = (s[idx-nl]-S.T.dot(sigma_t[idx, 0])+m1+m2-m3-m4)/(mesh.J[i-1]*mat_prop.rho[i-1])
v_t[idx, 1] = numpy.linalg.solve(M, \
tmp )
m1 = 0.5*flux.FRp1.dot(v_t[idx+nl, 0]+(mat_prop.rho[i-1]*mat_prop.mub[nl-1, i])**(-0.5)*sigma_t[idx+nl, 0] )
m2 = 0.5*flux.FR.dot(v_t[idx, 0]-(mat_prop.rho[i-1]*mat_prop.mub[nl-1, i])**(-0.5)*sigma_t[idx, 0])
m3 = 0.5*flux.FL.dot(v_t[idx, 0]+(mat_prop.rho[i-1]*mat_prop.mub[0, i])**(-0.5)*sigma_t[idx, 0])
m4 = 0.5*flux.FLm1.dot(v_t[idx-nl, 0]-(mat_prop.rho[i-1]*mat_prop.mub[0, i])**(-0.5)*sigma_t[idx-nl, 0])
tmp = (-Ss.dot(v_t[idx, 0])-mesh.J[i-1]*Ssm.dot(v_t[idx, 0])+ \
mat_prop.mub[nl-1, i]*(m1+m2)+mat_prop.mub[0, i]*(-m3-m4))/mesh.J[i-1]
sigma_t[idx, 1] = numpy.linalg.solve(M, tmp)
# Assigns local vectors u, v and sigma to the new global vector phi
ki = numpy.zeros(3*ng)
ki[0:ng] = v_t[nl:(K+1)*nl, 0]
ki[ng:2*ng] = v_t[nl:(K+1)*nl, 1]
ki[2*ng:3*ng] = sigma_t[nl:(K+1)*nl, 1]
return ki
#-- eval_k_wave.py -------------------------------------------------------------
| [
"numpy.linalg.solve",
"numpy.sqrt",
"local_matrices.local_stiffness",
"local_matrices.local_mass",
"flux.Fluxes",
"numpy.zeros",
"local_matrices.stress_stiffness",
"numpy.arange"
] | [((594, 624), 'numpy.zeros', 'numpy.zeros', (['[(K + 2) * nl, 2]'], {}), '([(K + 2) * nl, 2])\n', (605, 624), False, 'import numpy\n'), ((668, 690), 'numpy.zeros', 'numpy.zeros', (['v_t.shape'], {}), '(v_t.shape)\n', (679, 690), False, 'import numpy\n'), ((1233, 1261), 'local_matrices.local_mass', 'local_mass', (['quad_data', 'basis'], {}), '(quad_data, basis)\n', (1243, 1261), False, 'from local_matrices import local_mass, local_stiffness, stress_stiffness\n'), ((1270, 1303), 'local_matrices.local_stiffness', 'local_stiffness', (['quad_data', 'basis'], {}), '(quad_data, basis)\n', (1285, 1303), False, 'from local_matrices import local_mass, local_stiffness, stress_stiffness\n'), ((1315, 1331), 'flux.Fluxes', 'Fluxes', (['sim_data'], {}), '(sim_data)\n', (1321, 1331), False, 'from flux import Fluxes\n'), ((2883, 2902), 'numpy.zeros', 'numpy.zeros', (['(3 * ng)'], {}), '(3 * ng)\n', (2894, 2902), False, 'import numpy\n'), ((1438, 1489), 'local_matrices.stress_stiffness', 'stress_stiffness', (['quad_data', 'basis', 'mat_prop', '(i - 1)'], {}), '(quad_data, basis, mat_prop, i - 1)\n', (1454, 1489), False, 'from local_matrices import local_mass, local_stiffness, stress_stiffness\n'), ((1503, 1537), 'numpy.arange', 'numpy.arange', (['(nl * i)', '(nl * (i + 1))'], {}), '(nl * i, nl * (i + 1))\n', (1515, 1537), False, 'import numpy\n'), ((2102, 2128), 'numpy.linalg.solve', 'numpy.linalg.solve', (['M', 'tmp'], {}), '(M, tmp)\n', (2120, 2128), False, 'import numpy\n'), ((2773, 2799), 'numpy.linalg.solve', 'numpy.linalg.solve', (['M', 'tmp'], {}), '(M, tmp)\n', (2791, 2799), False, 'import numpy\n'), ((1582, 1639), 'numpy.sqrt', 'numpy.sqrt', (['(mat_prop.rho[i - 1] * mat_prop.mub[nl - 1, i])'], {}), '(mat_prop.rho[i - 1] * mat_prop.mub[nl - 1, i])\n', (1592, 1639), False, 'import numpy\n'), ((1696, 1753), 'numpy.sqrt', 'numpy.sqrt', (['(mat_prop.rho[i - 1] * mat_prop.mub[nl - 1, i])'], {}), '(mat_prop.rho[i - 1] * mat_prop.mub[nl - 1, i])\n', (1706, 1753), False, 'import numpy\n'), ((1806, 1858), 'numpy.sqrt', 'numpy.sqrt', (['(mat_prop.rho[i - 1] * mat_prop.mub[0, i])'], {}), '(mat_prop.rho[i - 1] * mat_prop.mub[0, i])\n', (1816, 1858), False, 'import numpy\n'), ((1918, 1970), 'numpy.sqrt', 'numpy.sqrt', (['(mat_prop.rho[i - 1] * mat_prop.mub[0, i])'], {}), '(mat_prop.rho[i - 1] * mat_prop.mub[0, i])\n', (1928, 1970), False, 'import numpy\n')] |
#!/usr/bin/env python3
import sys
import numpy as np
def get_quant_error(matrix, quantNum):
quantmult = np.float32(127)/np.float32(quantNum)
#Quantize
quantized_matrix = np.around((matrix*quantmult)).astype(np.int8)
#Unquantize
unquantized_matrix = quantized_matrix*(1/quantmult)
return np.square(np.subtract(unquantized_matrix, matrix)).mean()
def find_best_matrix(matrix):
maxAbs = np.float32(abs(max(matrix.min(), matrix.max(), key=abs)))
maxAbs = maxAbs + np.float32(0.001) # The first condition
bestFactor = maxAbs
bestMSE = np.inf
limit = 0.03*maxAbs
while maxAbs > limit:
maxAbs = maxAbs - np.float32(0.001) # The first condition
mse = get_quant_error(matrix, maxAbs)
#print("MSE", mse)
if mse < bestMSE:
bestMSE = mse
bestFactor = maxAbs
# Now that we found the best matrix, replace the Maximum (or minimum number) in the matrix with that number
# so that it can be nicely picked up by our marian implementation
maxNum = abs(max(matrix.min(), matrix.max(), key=abs))
minNum = -maxNum
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][j] > bestFactor:
matrix[i][j] = bestFactor
elif matrix[i][j] < -bestFactor:
matrix[i][j] = -bestFactor
print("Old MaxAbs:", maxNum, "new:", bestFactor, "actual:", abs(max(matrix.min(), matrix.max(), key=abs)))
return matrix
def find_best_matrix_kenneth(matrix):
maxAbs = np.float32(abs(max(matrix.min(), matrix.max(), key=abs)))
min_norm = np.inf
bestFactor = maxAbs
for i in range(1,300):
m = maxAbs * float(i) / float(300)
norm = np.linalg.norm(np.clip(np.around(matrix * 127. / m), -127., 127.) / (127. / m) - matrix)
if norm < min_norm:
bestFactor = m
min_norm = norm
maxNum = abs(max(matrix.min(), matrix.max(), key=abs))
minNum = -maxNum
# print("Old MaxAbs:", maxNum, "new:", bestFactor)
# matrix = np.where(matrix >= maxNum, bestFactor, matrix)
# matrix = np.where(matrix <= minNum, -bestFactor, matrix)
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][j] > bestFactor:
matrix[i][j] = bestFactor
elif matrix[i][j] < -bestFactor:
matrix[i][j] = -bestFactor
print("Old MaxAbs:", maxNum, "new:", bestFactor, "actual:", abs(max(matrix.min(), matrix.max(), key=abs)))
return matrix
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Usage:", sys.argv[0], "input_model output_model")
sys.exit(1)
model_file = np.load(sys.argv[1])
model_file_dict = dict(model_file)
for matrix in model_file_dict.keys():
# if matrix[-2] == 'W' or matrix == "Wemb":
# model_file_dict[matrix] = (model_file_dict[matrix]*.9).astype(np.float32)
if matrix[-2] == 'W' or matrix == "Wemb":
print(matrix)
model_file_dict[matrix] = find_best_matrix_kenneth(model_file_dict[matrix])
# elif matrix == "Wemb":
# model_file_dict[matrix] = (model_file_dict[matrix]*.9).astype(np.float32)
# Save
np.savez(sys.argv[2], **model_file_dict)
| [
"numpy.savez",
"numpy.subtract",
"numpy.around",
"sys.exit",
"numpy.load",
"numpy.float32"
] | [((2704, 2724), 'numpy.load', 'np.load', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (2711, 2724), True, 'import numpy as np\n'), ((3246, 3286), 'numpy.savez', 'np.savez', (['sys.argv[2]'], {}), '(sys.argv[2], **model_file_dict)\n', (3254, 3286), True, 'import numpy as np\n'), ((109, 124), 'numpy.float32', 'np.float32', (['(127)'], {}), '(127)\n', (119, 124), True, 'import numpy as np\n'), ((125, 145), 'numpy.float32', 'np.float32', (['quantNum'], {}), '(quantNum)\n', (135, 145), True, 'import numpy as np\n'), ((497, 514), 'numpy.float32', 'np.float32', (['(0.001)'], {}), '(0.001)\n', (507, 514), True, 'import numpy as np\n'), ((2674, 2685), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2682, 2685), False, 'import sys\n'), ((184, 213), 'numpy.around', 'np.around', (['(matrix * quantmult)'], {}), '(matrix * quantmult)\n', (193, 213), True, 'import numpy as np\n'), ((660, 677), 'numpy.float32', 'np.float32', (['(0.001)'], {}), '(0.001)\n', (670, 677), True, 'import numpy as np\n'), ((325, 364), 'numpy.subtract', 'np.subtract', (['unquantized_matrix', 'matrix'], {}), '(unquantized_matrix, matrix)\n', (336, 364), True, 'import numpy as np\n'), ((1764, 1793), 'numpy.around', 'np.around', (['(matrix * 127.0 / m)'], {}), '(matrix * 127.0 / m)\n', (1773, 1793), True, 'import numpy as np\n')] |
from .utils import decompress_mesh_data
from ..skeleton import Skeleton
from ..trimesh_io import Mesh
import h5py
import os
import orjson
import pandas as pd
import warnings
import numpy as np
from dataclasses import asdict
warnings.simplefilter(action="ignore", category=pd.errors.PerformanceWarning)
NULL_VERSION = 1
LATEST_VERSION = 2
def save_meshwork_metadata(filename, mw, version=LATEST_VERSION):
with h5py.File(filename, "a") as f:
f.attrs["voxel_resolution"] = mw.anno.voxel_resolution
f.attrs["version"] = version
if mw.seg_id is not None:
f.attrs["seg_id"] = mw.seg_id
def load_meshwork_metadata(filename):
meta = {}
with h5py.File(filename, "r") as f:
meta["seg_id"] = f.attrs.get("seg_id", None)
meta["voxel_resolution"] = f.attrs.get("voxel_resolution", None)
meta["version"] = f.attrs.get("version", NULL_VERSION)
return meta
def save_meshwork_mesh(filename, mw, version=LATEST_VERSION):
node_mask = mw.mesh_mask
if mw._original_mesh_data is not None:
vs, fs, es, nm, vxsc = decompress_mesh_data(*mw._original_mesh_data)
mesh = Mesh(vs, fs, link_edges=es, node_mask=nm, voxel_scaling=vxsc)
else:
mesh = mw.mesh
with h5py.File(filename, "a") as f:
f.create_group("mesh")
f.create_dataset("mesh/vertices", data=mesh.vertices, compression="gzip")
f.create_dataset("mesh/faces", data=mesh.faces, compression="gzip")
f.create_dataset("mesh/node_mask", data=mesh.node_mask, compression="gzip")
if mesh.voxel_scaling is not None:
f["mesh"].attrs["voxel_scaling"] = mesh.voxel_scaling
if mesh.link_edges is not None:
f.create_dataset(
"mesh/link_edges", data=mesh.link_edges, compression="gzip"
)
f.create_dataset("mesh/mesh_mask", data=node_mask, compression="gzip")
def load_meshwork_mesh(filename, version=NULL_VERSION):
with h5py.File(filename, "r") as f:
verts = f["mesh/vertices"][()]
faces = f["mesh/faces"][()]
if len(faces.shape) == 1:
faces = faces.reshape(-1, 3)
if "link_edges" in f["mesh"].keys():
link_edges = f["mesh/link_edges"][()]
else:
link_edges = None
node_mask = f["mesh/node_mask"][()]
voxel_scaling = f["mesh"].attrs.get("voxel_scaling", None)
mesh_mask = f["mesh/mesh_mask"][()]
return (
Mesh(
vertices=verts,
faces=faces,
link_edges=link_edges,
node_mask=node_mask,
voxel_scaling=voxel_scaling,
),
mesh_mask,
)
def save_meshwork_skeleton(filename, mw, version=LATEST_VERSION):
if mw.skeleton is None:
return
sk = mw.skeleton.reset_mask()
with h5py.File(filename, "a") as f:
f.create_group("skeleton")
f.create_dataset("skeleton/vertices", data=sk.vertices, compression="gzip")
f.create_dataset("skeleton/edges", data=sk.edges, compression="gzip")
f.create_dataset("skeleton/root", data=sk.root)
f.create_dataset(
"skeleton/meta",
data=np.string_(
orjson.dumps(asdict(sk.meta), option=orjson.OPT_SERIALIZE_NUMPY)
),
)
f.create_dataset(
"skeleton/mesh_to_skel_map", data=sk.mesh_to_skel_map, compression="gzip"
)
if sk.radius is not None:
f.create_dataset("skeleton/radius", data=sk.radius, compression="gzip")
if sk.mesh_index is not None:
f.create_dataset(
"skeleton/mesh_index", data=sk.mesh_index, compression="gzip"
)
if sk.voxel_scaling is not None:
f["skeleton"].attrs["voxel_scaling"] = sk.voxel_scaling
def load_meshwork_skeleton(filename, version=NULL_VERSION):
with h5py.File(filename, "r") as f:
if "skeleton" not in f:
return None
verts = f["skeleton/vertices"][()]
edges = f["skeleton/edges"][()]
root = f["skeleton/root"][()]
mesh_to_skel_map = f["skeleton/mesh_to_skel_map"][()]
if "radius" in f["skeleton"].keys():
radius = f["skeleton/radius"][()]
else:
radius = None
voxel_scaling = f["skeleton"].attrs.get("voxel_scaling", None)
if "meta" in f["skeleton"].keys():
meta = orjson.loads(f["skeleton/meta"][()].tobytes())
else:
meta = {}
if "mesh_index" in f["skeleton"].keys():
mesh_index = f["skeleton/mesh_index"][()]
else:
mesh_index = None
return Skeleton(
verts,
edges,
root=root,
radius=radius,
mesh_to_skel_map=mesh_to_skel_map,
mesh_index=mesh_index,
voxel_scaling=voxel_scaling,
meta=meta,
)
def _save_dataframe_generic(df, table_name, filename):
key = f"annotations/{table_name}/data"
dat = orjson.dumps(
df.to_dict(), option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SERIALIZE_NUMPY
)
with h5py.File(filename, "a") as f:
f.create_dataset(key, data=np.string_(dat))
pass
def _load_dataframe_generic(filename, table_name):
key = f"annotations/{table_name}/data"
with h5py.File(filename, "r") as f:
dat = f[key][()].tobytes()
df = pd.DataFrame.from_records(orjson.loads(dat))
return df
def _save_dataframe_pandas(data, table_name, filename):
data.to_hdf(
filename, f"annotations/{table_name}/data", complib="blosc", complevel=5
)
pass
def _load_dataframe_pandas(filename, table_name):
return pd.read_hdf(filename, f"annotations/{table_name}/data")
anno_save_function = {1: _save_dataframe_pandas, 2: _save_dataframe_generic}
anno_load_function = {1: _load_dataframe_pandas, 2: _load_dataframe_generic}
def save_meshwork_annotations(filename, mw, version=LATEST_VERSION):
annos = mw.anno
if version not in anno_save_function:
raise ValueError(f"Version must be one of {list(anno_save_function)}")
for table_name in annos.table_names:
with h5py.File(filename, "a") as f:
dset = f.create_group(f"annotations/{table_name}")
anno = annos[table_name]
dset.attrs["anchor_to_mesh"] = int(anno.anchored)
if anno.point_column is not None:
dset.attrs["point_column"] = anno.point_column
dset.attrs["max_distance"] = anno._max_distance
dset.attrs["defined_index"] = int(anno._defined_index)
if anno._defined_index is True:
dset.attrs["index_column"] = anno.index_column_original
anno_save_function[version](
annos[table_name].data_original, table_name, filename
)
def load_meshwork_annotations(filename, version=NULL_VERSION):
with h5py.File(filename, "r") as f:
if "annotations" not in f:
return {}
table_names = list(f["annotations"].keys())
annotation_dfs = {}
for table_name in table_names:
annotation_dfs[table_name] = {}
annotation_dfs[table_name]["data"] = anno_load_function[version](
filename, table_name
)
with h5py.File(filename, "r") as f:
dset = f[f"annotations/{table_name}"]
annotation_dfs[table_name]["anchor_to_mesh"] = bool(
dset.attrs.get("anchor_to_mesh")
)
annotation_dfs[table_name]["point_column"] = dset.attrs.get(
"point_column", None
)
annotation_dfs[table_name]["max_distance"] = dset.attrs.get("max_distance")
if bool(dset.attrs.get("defined_index", False)):
annotation_dfs[table_name]["index_column"] = dset.attrs.get(
"index_column", None
)
return annotation_dfs
def _save_meshwork(filename, mw, overwrite=False, version=LATEST_VERSION):
if isinstance(filename, str):
if os.path.exists(filename):
if overwrite is False:
raise FileExistsError()
else:
print(f"\tDeleting existing data in {filename}...")
with h5py.File(filename, "w") as f:
pass
save_meshwork_metadata(filename, mw, version=version)
save_meshwork_mesh(filename, mw, version=version)
save_meshwork_skeleton(filename, mw, version=version)
save_meshwork_annotations(filename, mw, version=version)
def _load_meshwork(filename):
meta = load_meshwork_metadata(filename)
version = meta.get("version")
mesh, mask = load_meshwork_mesh(filename, version=version)
skel = load_meshwork_skeleton(filename, version=version)
annos = load_meshwork_annotations(filename, version=version)
return meta, mesh, skel, annos, mask
| [
"os.path.exists",
"dataclasses.asdict",
"h5py.File",
"numpy.string_",
"warnings.simplefilter",
"orjson.loads",
"pandas.read_hdf"
] | [((225, 302), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'pd.errors.PerformanceWarning'}), "(action='ignore', category=pd.errors.PerformanceWarning)\n", (246, 302), False, 'import warnings\n'), ((5713, 5768), 'pandas.read_hdf', 'pd.read_hdf', (['filename', 'f"""annotations/{table_name}/data"""'], {}), "(filename, f'annotations/{table_name}/data')\n", (5724, 5768), True, 'import pandas as pd\n'), ((417, 441), 'h5py.File', 'h5py.File', (['filename', '"""a"""'], {}), "(filename, 'a')\n", (426, 441), False, 'import h5py\n'), ((687, 711), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (696, 711), False, 'import h5py\n'), ((1256, 1280), 'h5py.File', 'h5py.File', (['filename', '"""a"""'], {}), "(filename, 'a')\n", (1265, 1280), False, 'import h5py\n'), ((1975, 1999), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (1984, 1999), False, 'import h5py\n'), ((2832, 2856), 'h5py.File', 'h5py.File', (['filename', '"""a"""'], {}), "(filename, 'a')\n", (2841, 2856), False, 'import h5py\n'), ((3887, 3911), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (3896, 3911), False, 'import h5py\n'), ((5144, 5168), 'h5py.File', 'h5py.File', (['filename', '"""a"""'], {}), "(filename, 'a')\n", (5153, 5168), False, 'import h5py\n'), ((5341, 5365), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (5350, 5365), False, 'import h5py\n'), ((6938, 6962), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (6947, 6962), False, 'import h5py\n'), ((8074, 8098), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (8088, 8098), False, 'import os\n'), ((5446, 5463), 'orjson.loads', 'orjson.loads', (['dat'], {}), '(dat)\n', (5458, 5463), False, 'import orjson\n'), ((6192, 6216), 'h5py.File', 'h5py.File', (['filename', '"""a"""'], {}), "(filename, 'a')\n", (6201, 6216), False, 'import h5py\n'), ((7308, 7332), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (7317, 7332), False, 'import h5py\n'), ((5210, 5225), 'numpy.string_', 'np.string_', (['dat'], {}), '(dat)\n', (5220, 5225), True, 'import numpy as np\n'), ((8282, 8306), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (8291, 8306), False, 'import h5py\n'), ((3229, 3244), 'dataclasses.asdict', 'asdict', (['sk.meta'], {}), '(sk.meta)\n', (3235, 3244), False, 'from dataclasses import asdict\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File name: load_data.py
Author: locke
Date created: 2020/3/25 下午7:00
"""
import time
import numpy as np
import sklearn
import time
import torch
from sklearn.neighbors import KDTree
import heapq
def clear_attribute_triples(attribute_triples):
print('\nbefore clear:', len(attribute_triples))
# step 1
attribute_triples_new = set()
attr_num = {}
for (e, a, _) in attribute_triples:
ent_num = 1
if a in attr_num:
ent_num += attr_num[a]
attr_num[a] = ent_num
attr_set = set(attr_num.keys())
attr_set_new = set()
for a in attr_set:
if attr_num[a] >= 10:
attr_set_new.add(a)
for (e, a, v) in attribute_triples:
if a in attr_set_new:
attribute_triples_new.add((e, a, v))
attribute_triples = attribute_triples_new
print('after step 1:', len(attribute_triples))
# step 2
attribute_triples_new = []
literals_number, literals_string = [], []
for (e, a, v) in attribute_triples:
if '"^^' in v:
v = v[:v.index('"^^')]
if v.endswith('"@en'):
v = v[:v.index('"@en')]
if is_number(v):
literals_number.append(v)
else:
literals_string.append(v)
v = v.replace('.', '').replace('(', '').replace(')', '').replace(',', '').replace('"', '')
v = v.replace('_', ' ').replace('-', ' ').replace('/', ' ')
if 'http' in v:
continue
attribute_triples_new.append((e, a, v))
attribute_triples = attribute_triples_new
print('after step 2:', len(attribute_triples))
return attribute_triples, literals_number, literals_string
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
class AlignmentData:
def __init__(self, data_dir="data/D_W_15K_V1", rate=0.3, share=False, swap=False, val=0.0, with_r=False,OpenEa = False,rev_relation = True):
t_ = time.time()
self.rev_relation = True
self.rate = rate
self.val = val
if(OpenEa):
self.ins2id_dict, self.id2ins_dict, [self.kg1_ins_ids, self.kg2_ins_ids] = self.OpenEa_load_dict(
data_dir + "/ent_links")
self.rel2id_dict, self.id2rel_dict, [self.kg1_rel_ids, self.kg2_rel_ids] = self.OpenEa_load_relation_dict(
data_dir + "/rel_triples_")
self.attr2id_dict, self.id2attr_dict, [self.kg1_attr_ids, self.kg2_attr_ids] = self.OpenEa_load_relation_dict(
data_dir + "/attr_triples_")
self.ins_num = len(self.ins2id_dict)
self.rel_num = len(self.rel2id_dict)
if(self.rev_relation):
self.rel_num = 2
self.num_attr = len(self.attr2id_dict)
self.triple_idx = self.OpenEa_load_triples(data_dir + "/rel_triples_", file_num=2)
self.ill_idx = self.OpenEa_entities_load_triples(data_dir + "/ent_links", file_num=1)
self.ill_train_idx = np.array(self.OpenEa_entities_load_triples(data_dir + "/721_5fold/1/train_links", file_num=1))
#self.ill_val_idx = np.array(self.OpenEa_entities_load_triples(data_dir + "/721_5fold/1/valid_links", file_num=1))
self.ill_val_idx = []
self.ill_test_idx = np.array(self.OpenEa_entities_load_triples(data_dir + "/721_5fold/1/test_links", file_num=1))
self.atrr_idx = self.OpenEa_load_attributes(data_dir + "/attr_triples_", file_num=2)
else:
self.ins2id_dict, self.id2ins_dict, [self.kg1_ins_ids, self.kg2_ins_ids] = self.load_dict(data_dir + "/ent_ids_", file_num=2)
self.rel2id_dict, self.id2rel_dict, [self.kg1_rel_ids, self.kg2_rel_ids] = self.load_dict(data_dir + "/rel_ids_", file_num=2)
self.ins_num = len(self.ins2id_dict)
self.rel_num = len(self.rel2id_dict)
self.triple_idx = self.load_triples(data_dir + "/triples_", file_num=2)
self.ill_idx = self.load_triples(data_dir + "/ill_ent_ids", file_num=1)
np.random.shuffle(self.ill_idx)
self.ill_train_idx, self.ill_val_idx, self.ill_test_idx = np.array(self.ill_idx[:int(len(self.ill_idx) // 1 * rate)], dtype=np.int32), np.array(self.ill_idx[int(len(self.ill_idx) // 1 * rate) : int(len(self.ill_idx) // 1 * (rate+val))], dtype=np.int32), np.array(self.ill_idx[int(len(self.ill_idx) // 1 * (rate+val)):], dtype=np.int32)
if (self.rev_relation):
self.rel_num *= 2
rev_triple_idx = []
for (h, r, t) in self.triple_idx:
rev_triple_idx.append((t, r + self.rel_num // 2, h))
self.triple_idx += rev_triple_idx
self.ill_idx_dic = {}
for x in self.ill_idx:
self.ill_idx_dic[x[0]] = x[1]
self.ill_idx_dic[x[1]] = x[0]
self.ins_G_edges_idx, self.ins_G_values_idx, self.r_ij_idx = self.gen_sparse_graph_from_triples(self.triple_idx, self.ins_num, with_r)
assert (share != swap or (share == False and swap == False))
if share:
self.triple_idx = self.share(self.triple_idx, self.ill_train_idx) # 1 -> 2:base
if(OpenEa):
self.triple_idx = self.share_attr(self.atrr_idx, self.ill_train_idx)
self.kg1_ins_ids = (self.kg1_ins_ids - set(self.ill_train_idx[:, 0])) | set(self.ill_train_idx[:, 1])
self.ill_train_idx = []
if swap:
self.triple_idx = self.swap(self.triple_idx, self.ill_train_idx)
if(OpenEa):
self.triple_idx = self.swap_attr(self.atrr_idx, self.ill_train_idx)
self.labeled_alignment = set()
self.boot_triple_idx = []
self.boot_pair_dix = []
self.init_time = time.time() - t_
def load_triples(self, data_dir, file_num=2):
if file_num == 2:
file_names = [data_dir + str(i) for i in range(1, 3)]
else:
file_names = [data_dir]
triple = []
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as f:
data = f.read().strip().split("\n")
data = [tuple(map(int, i.split("\t"))) for i in data]
triple += data
np.random.shuffle(triple)
return triple
def load_dict(self, data_dir, file_num=2):
if file_num == 2:
file_names = [data_dir + str(i) for i in range(1, 3)]
else:
file_names = [data_dir]
what2id, id2what, ids = {}, {}, []
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as f:
data = f.read().strip().split("\n")
data = [i.split("\t") for i in data]
what2id = {**what2id, **dict([[i[1], int(i[0])] for i in data])}
id2what = {**id2what, **dict([[int(i[0]), i[1]] for i in data])}
ids.append(set([int(i[0]) for i in data]))
return what2id, id2what, ids
def OpenEa_load_triples(self, data_dir, file_num=2):
if file_num == 2:
file_names = [data_dir + str(i) for i in range(1, 3)]
else:
file_names = [data_dir]
triple = []
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as f:
data = f.read().strip().split("\n")
data = [tuple([self.ins2id_dict[i.split("\t")[0]],self.rel2id_dict[i.split("\t")[1]],self.ins2id_dict[i.split("\t")[2]]]) for i in data]
triple += data
np.random.shuffle(triple)
return triple
def OpenEa_load_attributes(self, data_dir, file_num=2):
if file_num == 2:
file_names = [data_dir + str(i) for i in range(1, 3)]
else:
file_names = [data_dir]
triple = []
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as f:
data = f.read().strip().split("\n")
data = [tuple([self.ins2id_dict[i.split("\t")[0]],self.attr2id_dict[i.split("\t")[1]],i.split("\t")[2]]) for i in data]
triple += data
triple, _, _ = clear_attribute_triples(triple)
np.random.shuffle(triple)
return triple
def OpenEa_entities_load_triples(self, data_dir, file_num=2):
if file_num == 2:
file_names = [data_dir + str(i) for i in range(1, 3)]
else:
file_names = [data_dir]
triple = []
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as f:
data = f.read().strip().split("\n")
data = [tuple([self.ins2id_dict[i.split("\t")[0]],self.ins2id_dict[i.split("\t")[1]]]) for i in data]
triple += data
np.random.shuffle(triple)
return triple
def OpenEa_load_dict(self, file_name):
ids = []
kg1_ents_uri = []
kg2_ents_uri = []
ins2id_dict = {}
id2ins_dict = {}
with open(file_name, "r", encoding="utf-8") as f:
lines = f.read().strip().split("\n")
kg1_ents_uri = [line.split('\t')[0] for line in lines]
kg2_ents_uri = [line.split('\t')[1] for line in lines]
id = 0
for item in kg1_ents_uri:
if (item not in ins2id_dict):
ins2id_dict[item] = id
id2ins_dict[id] = item
id += 1;
n1 = id
for item in kg2_ents_uri:
if (item not in ins2id_dict):
ins2id_dict[item] = id
id2ins_dict[id] = item
id += 1;
n2 = id - n1
ids.append(set([i for i in range(n1)]))
ids.append(set([i+n1 for i in range(n2)]))
return ins2id_dict, id2ins_dict, ids
def OpenEa_load_relation_dict(self, file_name):
ids = []
kg1_ents_uri = []
kg2_ents_uri = []
ins2id_dict = {}
id2ins_dict = {}
id = 0
pre_n = 0
for i in range(2):
with open(file_name + str(i+1), "r", encoding="utf-8") as f:
lines = f.read().strip().split("\n")
kg_ents_uri = [line.split('\t')[1] for line in lines]
n1 = id
for item in kg_ents_uri:
if (item not in ins2id_dict):
ins2id_dict[item] = id
id2ins_dict[id] = item
id += 1;
ids.append(set([i+n1 for i in range(id-n1)]))
return ins2id_dict, id2ins_dict, ids
def recursive_triple_embedding(self,triples, h_embed,r_embed ,t_embed,num_epoch = 2):
h_embed = sklearn.preprocessing.normalize(h_embed,norm="l2", axis=1)
t_embed = sklearn.preprocessing.normalize(t_embed, norm="l2", axis=1)
r_embed = sklearn.preprocessing.normalize(r_embed, norm="l2", axis=1)
temp_ent_in = h_embed.copy()
temp_ent_out = t_embed.copy()
temp_rel_in = h_embed.copy()
temp_rel_out = t_embed.copy()
adj_rel_in = np.zeros(h_embed.shape)
adj_rel_out = np.zeros(h_embed.shape)
adj_ent_in = np.zeros(h_embed.shape)
adj_ent_out = np.zeros(h_embed.shape)
in_deg = np.zeros(h_embed.shape[0])
out_deg = np.zeros(h_embed.shape[0])
r_deg = np.zeros(r_embed.shape[0])
for epoch in range(num_epoch):
for (h, r, t) in triples:
adj_rel_in[t] += (temp_rel_in[r])
adj_rel_out[h] += (temp_rel_out[r])
adj_ent_out[h] += (temp_ent_out[t])
adj_ent_in[t] += (temp_ent_in[h])
if(epoch == 0):
in_deg[t] += 1
out_deg[h] += 1
r_deg[r] += 1
for i in range(h_embed.shape[0]):
if(out_deg[i] > 0):
adj_rel_out[i] += adj_rel_out[i] / (out_deg[i] * 2 ** (epoch/2))
if (in_deg[i] > 0):
adj_rel_in[i] += adj_rel_in[i] / (in_deg[i] * 2 ** (epoch/2))
if(out_deg[i] > 0):
adj_ent_out[i] += adj_ent_out[i] / (out_deg[i] * 2 ** (epoch/2))
if (in_deg[i] > 0):
adj_ent_in[i] += adj_ent_in[i] / (in_deg[i] * 2 ** (epoch/2))
adj_ent_in = sklearn.preprocessing.normalize(adj_ent_in, norm="l2", axis=1)
adj_ent_out = sklearn.preprocessing.normalize(adj_ent_out, norm="l2", axis=1)
adj_rel_in = sklearn.preprocessing.normalize(adj_rel_in, norm="l2", axis=1)
adj_rel_out = sklearn.preprocessing.normalize(adj_rel_out, norm="l2", axis=1)
#return np.concatenate((h_embed, adj_rel_in, adj_rel_out), axis=-1)
return np.concatenate((h_embed, adj_ent_in, adj_ent_out), axis=-1)
return np.concatenate((h_embed, adj_rel_in, adj_ent_in, adj_rel_out, adj_ent_out), axis=-1)
return np.concatenate((h_embed, adj_rel_in, adj_ent_in,adj_rel_out,adj_ent_out), axis = -1)
def justification(self,distance,left,right,topk=3,labels=None):
#diag = [i for i in range(distance.shape[0])]
#distance[diag,diag] = 1.0
if (labels == None):
label_topk_l = torch.topk(-A, topk,dim=-1).indices.numpy()
l2r_pc = [x for x in range(len(left)) if right[label_topk_l[x,0]] == self.ill_idx_dic[left[x]]].__len__()
r2l_pc = [x for x in range(len(right)) if left[label_topk_r[x,0]] == self.ill_idx_dic[right[x]]].__len__()
print('l2r before justification is ' , l2r_pc)
print('r2l before justification is ', r2l_pc)
t2 = time.time();
print('sorting lasts ' ,int(t2-t1),' seconds');
t1 = time.time()
gap = self.kg1_ins_ids.__len__()
justification_l = np.zeros([self.ins_num,topk])
justification_r = np.zeros([self.ins_num,topk])
l_index = -np.ones([self.ins_num])
r_index = -np.ones([self.ins_num])
l_same =-np.ones([self.ins_num,topk])
r_same =-np.ones([self.ins_num,topk])
graph_list = [(h, t) for (h, r, t) in self.triple_idx]
graph = {}
for e in graph_list:
graph[e] = 0
for e in graph_list:
graph[e] = 0
for i in range(len(right)):
r_same[right[i],:] = left[label_topk_r[i, :]]
r_index[right[i]] = 1
for i in range(len(left)):
l_same[left[i],:] = right[label_topk_l[i, :]]
l_index[left[i]] = 1
for l in labels:
r_same[l[0],:] = np.array(l[1]*topk)
r_same[l[1],:] = np.array(l[0]*topk)
l_same[l[0],:] = np.array(l[1]*topk)
l_same[l[1],:] = np.array(l[0]*topk)
for i, j in zip(range(topk), range(topk)):
l_true_edges = [[h, t] for [h,t] in graph_list if (l_index[h] == 1 or l_index[t] == 1) and (l_same[h,i],l_same[t,j]) in graph]
r_true_edges = [[h, t] for [h, t] in graph_list if (r_index[h] == 1 or r_index[t] == 1) and (r_same[h,i], r_same[t,j]) in graph]
if(l_true_edges.__len__()>0):
l_true_edge_np = np.array(l_true_edges)
justification_l[l_true_edge_np[:, 0],i] += 1
justification_l[l_true_edge_np[:, 1],j] += 1
if (r_true_edges.__len__() > 0):
r_true_edge_np = np.array(r_true_edges)
justification_r[r_true_edge_np[:, 0],i] += 1
justification_r[r_true_edge_np[:, 1],j] += 1
t2 = time.time();
print('mass justification lasts ' ,int(t2-t1),' seconds');
t1 = time.time()
justification_l= justification_l[left,:]
justification_r= justification_r[right,:]
best_label_l = np.argmax(justification_l, axis=-1)
best_label_r = np.argmax(justification_r, axis=-1)
#distance = np.ones(distance.shape)
new_l2r_pc = [x for x in range(len(left)) if right[label_topk_l[x, best_label_l[x]]] == self.ill_idx_dic[left[x]]].__len__()
new_r2l_pc = [x for x in range(len(right)) if left[label_topk_r[x, best_label_r[x]]] == self.ill_idx_dic[right[x]]].__len__()
print('new l2r after justification is ' , new_l2r_pc , ' and improve :', new_l2r_pc - l2r_pc)
print('new r2l after justification is ', new_r2l_pc, ' and improve :', new_r2l_pc - r2l_pc)
for i in range(len(left)):
if justification_l[i, best_label_l[i]] > 0:
distance[i,label_topk_l[i, best_label_l[i]]]= distance[i,label_topk_l[i, 0]] - 1/(100-n)
for i in range(len(right)):
if justification_r[i, best_label_r[i]] > 0:
distance[label_topk_r[i, best_label_r[i]],i ]= distance[label_topk_r[i, 0],i ] - 1/(100-n)
t2 = time.time();
print('compute new distance matrix lasts ' ,int(t2-t1),' seconds');
return distance;
def ReGAL (self,embeding,left,right):
gap = self.ins_num//2
#left = [i for i in range(gap)]
#right = [i+gap for i in range(gap)]
left_emb = embeding[left]
right_emb = embeding[right]
tree = KDTree(left_emb)
dist, ind = tree.query(right_emb, k=1)
same = {};
for i in range(len(right_emb)):
if(dist[i] < 0.1):
same[left[int(ind[i])]] = right[i]
edges = [[h,t] for (h, r, t) in self.triple_idx]+[[t,h] for (h, r, t) in self.triple_idx]
justification = np.zeros(gap)
for [e1,e2] in edges:
if(e2 < gap and e1 < gap ):
if(e1 in same and e2 in same and [same[e1],same[e2]] in edges):
justification[e1] += 1
justification[e2] += 1
idx = np.argsort(justification)
candid_anchor = idx[-100:]
return self.BFS(edges,candid_anchor ,same)
def BFS(self,edges,start_idx,same):
n_i = {}
num_entity = self.ins_num
for i in range(num_entity):
n_i[i] = []
for (e1,e2) in edges:
n_i[e1].append(e2)
n_i[e2].append(e1)
embd = np.ones([num_entity,len(start_idx)])
embd[:,:] = 1000
ix = 0
for x in start_idx:
bfs = np.zeros([num_entity]);
embd[x,ix] = 0
embd[same[x], ix] = 0
stack = [x,same[x]]
bfs[x] = 1
bfs[same[x]] = 1
while(len(stack)>0):
s = stack[0]
stack = stack[1:]
for n in n_i[s]:
embd[n,ix] = min(embd[n,ix],embd[s,ix]+1)
if(bfs[n]==0):
stack.append(n)
bfs[n] = 1
ix += 1
embd = sklearn.preprocessing.normalize(np.exp(-embd), norm="l2", axis=1)
return embd
def gen_sparse_graph_from_triples(self, triples, ins_num, with_r=False):
edge_dict = {}
in_nodes_dict = {}
out_nodes_dict = {}
in_rels_dict = {}
out_rels_dict = {}
for (h, r, t) in triples:
if h != t:
r1 = r + self.ins_num
if (h, t) not in edge_dict:
edge_dict[(h, t)] = []
if (h, t) not in in_nodes_dict:
in_nodes_dict[(h, t)] = []
if (r1, t) not in in_nodes_dict:
in_nodes_dict[(r1, t)] = []
if (r1,h) not in out_nodes_dict:
out_nodes_dict[(r1,h)] = []
if (t,h) not in out_nodes_dict:
out_nodes_dict[(t,h)] = []
if (r1,h) not in in_rels_dict:
in_rels_dict[(r1,h)] = []
if (r1, t) not in out_rels_dict:
out_rels_dict[(r1, t)] = []
edge_dict[(h, t)].append(r)
in_nodes_dict[(h, t)].append(r)
in_nodes_dict[(r1, t)].append(h)
out_nodes_dict[(r1, h)].append(t)
out_nodes_dict[(t, h)].append(r)
in_rels_dict[(r1,h)].append(t)
out_rels_dict[(r1,t)].append(h)
if with_r:
edges = [[h, t] for (h, t) in edge_dict for r in edge_dict[(h, t)]]
edges1 = [[h, t] for (h, t) in edge_dict ]
values = [1.0 / edge_dict[(h, t)].__len__() for (h, t) in edge_dict for r in edge_dict[(h, t)]]
cnt = self.rel_num
r_ij = []
for (h, t) in edge_dict:
for r in edge_dict[(h, t)]:
r_ij.append([r,cnt])
cnt += 1
in_nodes = [[h, t] for (h, t) in in_nodes_dict]
out_nodes = [[h, t] for (h, t) in out_nodes_dict]
in_rels = [[h, t] for (h, t) in in_rels_dict]
out_rels = [[h, t] for (h, t) in out_rels_dict]
edges = np.array(edges, dtype=np.int32)
values = np.array(values, dtype=np.float32)
r_ij = np.array(r_ij, dtype=np.float32)
edges_dict = {'default': edges, 'default1': edges1,'in_nodes': in_nodes, 'out_nodes': out_nodes, 'in_rels': in_rels,
'rels' : in_rels + out_rels,
'out_rels': out_rels, 'default_cnt':values}
return edges_dict, values, r_ij
else:
in_nodes = [[h, t] for (h, t) in in_nodes_dict]
out_nodes = [[h, t] for (h, t) in out_nodes_dict]
in_rels = [[h, t] for (h, t) in in_rels_dict]
out_rels = [[h, t] for (h, t) in out_rels_dict ]
if 1==1:
#in_nodes += [[e, e] for e in range(ins_num + self.rel_num)]
#out_nodes += [[e, e] for e in range(ins_num + self.rel_num)]
in_rels += [[e, e] for e in range(ins_num,ins_num + self.rel_num)]
out_rels += [[e, e] for e in range(ins_num,ins_num + self.rel_num)]
edges = [[h, t] for (h, t) in edge_dict]
values = [1 for (h, t) in edge_dict]
# add self-loop
edges += [[e, e] for e in range(ins_num)]
values += [1 for e in range(ins_num)]
edges = np.array(edges, dtype=np.int32)
values = np.array(values, dtype=np.float32)
edges_dict = {'default':edges,'in_nodes':in_nodes,'out_nodes':out_nodes,'in_rels':in_rels,'out_rels':out_rels}
return edges_dict, values, None
def share(self, triples, ill):
from_1_to_2_dict = dict(ill)
new_triples = []
for (h, r, t) in triples:
if h in from_1_to_2_dict:
h = from_1_to_2_dict[h]
if t in from_1_to_2_dict:
t = from_1_to_2_dict[t]
new_triples.append((h, r, t))
new_triples = list(set(new_triples))
return new_triples
def share_attr(self, triples, ill):
from_1_to_2_dict = dict(ill)
new_triples = []
for (h, r, t) in triples:
if h in from_1_to_2_dict:
h = from_1_to_2_dict[h]
new_triples.append((h, r, t))
new_triples = list(set(new_triples))
return new_triples
def swap(self, triples, ill):
from_1_to_2_dict = dict(ill)
from_2_to_1_dict = dict(ill[:, ::-1])
new_triples = []
for (h, r, t) in triples:
new_triples.append((h, r, t))
if h in from_1_to_2_dict:
new_triples.append((from_1_to_2_dict[h], r, t))
if t in from_1_to_2_dict:
new_triples.append((h, r, from_1_to_2_dict[t]))
if h in from_2_to_1_dict:
new_triples.append((from_2_to_1_dict[h], r, t))
if t in from_2_to_1_dict:
new_triples.append((h, r, from_2_to_1_dict[t]))
new_triples = list(set(new_triples))
return new_triples
def swap_attr(self, triples, ill):
from_1_to_2_dict = dict(ill)
from_2_to_1_dict = dict(ill[:, ::-1])
new_triples = []
for (h, r, t) in triples:
new_triples.append((h, r, t))
if h in from_1_to_2_dict:
new_triples.append((from_1_to_2_dict[h], r, t))
if h in from_2_to_1_dict:
new_triples.append((from_2_to_1_dict[h], r, t))
new_triples = list(set(new_triples))
return new_triples
def __repr__(self):
return self.__class__.__name__ + " dataset summary:" + \
"\n\tins_num: " + str(self.ins_num) + \
"\n\trel_num: " + str(self.rel_num) + \
"\n\ttriple_idx: " + str(len(self.triple_idx)) + \
"\n\trate: " + str(self.rate) + "\tval: " + str(self.val) + \
"\n\till_idx(train/test/val): " + str(len(self.ill_idx)) + " = " + str(len(self.ill_train_idx)) + " + " + str(len(self.ill_test_idx)) + " + " + str(len(self.ill_val_idx)) + \
"\n\tins_G_edges_idx: " + str(len(self.ins_G_edges_idx['default'])) + \
"\n\t----------------------------- init_time: " + str(round(self.init_time, 3)) + "s"
if __name__ == '__main__':
# TEST
d = AlignmentData(share=False, swap=False,OpenEa = True)
print(d)
d = AlignmentData(share=True, swap=False)
print(d)
d = AlignmentData(share=False, swap=True)
print(d)
| [
"numpy.ones",
"torch.topk",
"sklearn.neighbors.KDTree",
"numpy.argmax",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.exp",
"numpy.concatenate",
"sklearn.preprocessing.normalize",
"time.time",
"unicodedata.numeric",
"numpy.random.shuffle"
] | [((1878, 1900), 'unicodedata.numeric', 'unicodedata.numeric', (['s'], {}), '(s)\n', (1897, 1900), False, 'import unicodedata\n'), ((2169, 2180), 'time.time', 'time.time', ([], {}), '()\n', (2178, 2180), False, 'import time\n'), ((6462, 6487), 'numpy.random.shuffle', 'np.random.shuffle', (['triple'], {}), '(triple)\n', (6479, 6487), True, 'import numpy as np\n'), ((7768, 7793), 'numpy.random.shuffle', 'np.random.shuffle', (['triple'], {}), '(triple)\n', (7785, 7793), True, 'import numpy as np\n'), ((8420, 8445), 'numpy.random.shuffle', 'np.random.shuffle', (['triple'], {}), '(triple)\n', (8437, 8445), True, 'import numpy as np\n'), ((9005, 9030), 'numpy.random.shuffle', 'np.random.shuffle', (['triple'], {}), '(triple)\n', (9022, 9030), True, 'import numpy as np\n'), ((10970, 11029), 'sklearn.preprocessing.normalize', 'sklearn.preprocessing.normalize', (['h_embed'], {'norm': '"""l2"""', 'axis': '(1)'}), "(h_embed, norm='l2', axis=1)\n", (11001, 11029), False, 'import sklearn\n'), ((11047, 11106), 'sklearn.preprocessing.normalize', 'sklearn.preprocessing.normalize', (['t_embed'], {'norm': '"""l2"""', 'axis': '(1)'}), "(t_embed, norm='l2', axis=1)\n", (11078, 11106), False, 'import sklearn\n'), ((11125, 11184), 'sklearn.preprocessing.normalize', 'sklearn.preprocessing.normalize', (['r_embed'], {'norm': '"""l2"""', 'axis': '(1)'}), "(r_embed, norm='l2', axis=1)\n", (11156, 11184), False, 'import sklearn\n'), ((11358, 11381), 'numpy.zeros', 'np.zeros', (['h_embed.shape'], {}), '(h_embed.shape)\n', (11366, 11381), True, 'import numpy as np\n'), ((11404, 11427), 'numpy.zeros', 'np.zeros', (['h_embed.shape'], {}), '(h_embed.shape)\n', (11412, 11427), True, 'import numpy as np\n'), ((11449, 11472), 'numpy.zeros', 'np.zeros', (['h_embed.shape'], {}), '(h_embed.shape)\n', (11457, 11472), True, 'import numpy as np\n'), ((11495, 11518), 'numpy.zeros', 'np.zeros', (['h_embed.shape'], {}), '(h_embed.shape)\n', (11503, 11518), True, 'import numpy as np\n'), ((11537, 11563), 'numpy.zeros', 'np.zeros', (['h_embed.shape[0]'], {}), '(h_embed.shape[0])\n', (11545, 11563), True, 'import numpy as np\n'), ((11582, 11608), 'numpy.zeros', 'np.zeros', (['h_embed.shape[0]'], {}), '(h_embed.shape[0])\n', (11590, 11608), True, 'import numpy as np\n'), ((11625, 11651), 'numpy.zeros', 'np.zeros', (['r_embed.shape[0]'], {}), '(r_embed.shape[0])\n', (11633, 11651), True, 'import numpy as np\n'), ((13059, 13118), 'numpy.concatenate', 'np.concatenate', (['(h_embed, adj_ent_in, adj_ent_out)'], {'axis': '(-1)'}), '((h_embed, adj_ent_in, adj_ent_out), axis=-1)\n', (13073, 13118), True, 'import numpy as np\n'), ((13134, 13222), 'numpy.concatenate', 'np.concatenate', (['(h_embed, adj_rel_in, adj_ent_in, adj_rel_out, adj_ent_out)'], {'axis': '(-1)'}), '((h_embed, adj_rel_in, adj_ent_in, adj_rel_out, adj_ent_out),\n axis=-1)\n', (13148, 13222), True, 'import numpy as np\n'), ((13234, 13322), 'numpy.concatenate', 'np.concatenate', (['(h_embed, adj_rel_in, adj_ent_in, adj_rel_out, adj_ent_out)'], {'axis': '(-1)'}), '((h_embed, adj_rel_in, adj_ent_in, adj_rel_out, adj_ent_out),\n axis=-1)\n', (13248, 13322), True, 'import numpy as np\n'), ((17679, 17695), 'sklearn.neighbors.KDTree', 'KDTree', (['left_emb'], {}), '(left_emb)\n', (17685, 17695), False, 'from sklearn.neighbors import KDTree\n'), ((18009, 18022), 'numpy.zeros', 'np.zeros', (['gap'], {}), '(gap)\n', (18017, 18022), True, 'import numpy as np\n'), ((18275, 18300), 'numpy.argsort', 'np.argsort', (['justification'], {}), '(justification)\n', (18285, 18300), True, 'import numpy as np\n'), ((22676, 22707), 'numpy.array', 'np.array', (['edges'], {'dtype': 'np.int32'}), '(edges, dtype=np.int32)\n', (22684, 22707), True, 'import numpy as np\n'), ((22725, 22759), 'numpy.array', 'np.array', (['values'], {'dtype': 'np.float32'}), '(values, dtype=np.float32)\n', (22733, 22759), True, 'import numpy as np\n'), ((4261, 4292), 'numpy.random.shuffle', 'np.random.shuffle', (['self.ill_idx'], {}), '(self.ill_idx)\n', (4278, 4292), True, 'import numpy as np\n'), ((5972, 5983), 'time.time', 'time.time', ([], {}), '()\n', (5981, 5983), False, 'import time\n'), ((12634, 12696), 'sklearn.preprocessing.normalize', 'sklearn.preprocessing.normalize', (['adj_ent_in'], {'norm': '"""l2"""', 'axis': '(1)'}), "(adj_ent_in, norm='l2', axis=1)\n", (12665, 12696), False, 'import sklearn\n'), ((12723, 12786), 'sklearn.preprocessing.normalize', 'sklearn.preprocessing.normalize', (['adj_ent_out'], {'norm': '"""l2"""', 'axis': '(1)'}), "(adj_ent_out, norm='l2', axis=1)\n", (12754, 12786), False, 'import sklearn\n'), ((12812, 12874), 'sklearn.preprocessing.normalize', 'sklearn.preprocessing.normalize', (['adj_rel_in'], {'norm': '"""l2"""', 'axis': '(1)'}), "(adj_rel_in, norm='l2', axis=1)\n", (12843, 12874), False, 'import sklearn\n'), ((12901, 12964), 'sklearn.preprocessing.normalize', 'sklearn.preprocessing.normalize', (['adj_rel_out'], {'norm': '"""l2"""', 'axis': '(1)'}), "(adj_rel_out, norm='l2', axis=1)\n", (12932, 12964), False, 'import sklearn\n'), ((13950, 13961), 'time.time', 'time.time', ([], {}), '()\n', (13959, 13961), False, 'import time\n'), ((14040, 14051), 'time.time', 'time.time', ([], {}), '()\n', (14049, 14051), False, 'import time\n'), ((14128, 14158), 'numpy.zeros', 'np.zeros', (['[self.ins_num, topk]'], {}), '([self.ins_num, topk])\n', (14136, 14158), True, 'import numpy as np\n'), ((14188, 14218), 'numpy.zeros', 'np.zeros', (['[self.ins_num, topk]'], {}), '([self.ins_num, topk])\n', (14196, 14218), True, 'import numpy as np\n'), ((15990, 16001), 'time.time', 'time.time', ([], {}), '()\n', (15999, 16001), False, 'import time\n'), ((16091, 16102), 'time.time', 'time.time', ([], {}), '()\n', (16100, 16102), False, 'import time\n'), ((16239, 16274), 'numpy.argmax', 'np.argmax', (['justification_l'], {'axis': '(-1)'}), '(justification_l, axis=-1)\n', (16248, 16274), True, 'import numpy as np\n'), ((16302, 16337), 'numpy.argmax', 'np.argmax', (['justification_r'], {'axis': '(-1)'}), '(justification_r, axis=-1)\n', (16311, 16337), True, 'import numpy as np\n'), ((17312, 17323), 'time.time', 'time.time', ([], {}), '()\n', (17321, 17323), False, 'import time\n'), ((18770, 18792), 'numpy.zeros', 'np.zeros', (['[num_entity]'], {}), '([num_entity])\n', (18778, 18792), True, 'import numpy as np\n'), ((19309, 19322), 'numpy.exp', 'np.exp', (['(-embd)'], {}), '(-embd)\n', (19315, 19322), True, 'import numpy as np\n'), ((21398, 21429), 'numpy.array', 'np.array', (['edges'], {'dtype': 'np.int32'}), '(edges, dtype=np.int32)\n', (21406, 21429), True, 'import numpy as np\n'), ((21451, 21485), 'numpy.array', 'np.array', (['values'], {'dtype': 'np.float32'}), '(values, dtype=np.float32)\n', (21459, 21485), True, 'import numpy as np\n'), ((21505, 21537), 'numpy.array', 'np.array', (['r_ij'], {'dtype': 'np.float32'}), '(r_ij, dtype=np.float32)\n', (21513, 21537), True, 'import numpy as np\n'), ((14242, 14265), 'numpy.ones', 'np.ones', (['[self.ins_num]'], {}), '([self.ins_num])\n', (14249, 14265), True, 'import numpy as np\n'), ((14289, 14312), 'numpy.ones', 'np.ones', (['[self.ins_num]'], {}), '([self.ins_num])\n', (14296, 14312), True, 'import numpy as np\n'), ((14334, 14363), 'numpy.ones', 'np.ones', (['[self.ins_num, topk]'], {}), '([self.ins_num, topk])\n', (14341, 14363), True, 'import numpy as np\n'), ((14384, 14413), 'numpy.ones', 'np.ones', (['[self.ins_num, topk]'], {}), '([self.ins_num, topk])\n', (14391, 14413), True, 'import numpy as np\n'), ((14971, 14992), 'numpy.array', 'np.array', (['(l[1] * topk)'], {}), '(l[1] * topk)\n', (14979, 14992), True, 'import numpy as np\n'), ((15024, 15045), 'numpy.array', 'np.array', (['(l[0] * topk)'], {}), '(l[0] * topk)\n', (15032, 15045), True, 'import numpy as np\n'), ((15077, 15098), 'numpy.array', 'np.array', (['(l[1] * topk)'], {}), '(l[1] * topk)\n', (15085, 15098), True, 'import numpy as np\n'), ((15130, 15151), 'numpy.array', 'np.array', (['(l[0] * topk)'], {}), '(l[0] * topk)\n', (15138, 15151), True, 'import numpy as np\n'), ((15580, 15602), 'numpy.array', 'np.array', (['l_true_edges'], {}), '(l_true_edges)\n', (15588, 15602), True, 'import numpy as np\n'), ((15819, 15841), 'numpy.array', 'np.array', (['r_true_edges'], {}), '(r_true_edges)\n', (15827, 15841), True, 'import numpy as np\n'), ((13533, 13561), 'torch.topk', 'torch.topk', (['(-A)', 'topk'], {'dim': '(-1)'}), '(-A, topk, dim=-1)\n', (13543, 13561), False, 'import torch\n')] |
import utils.init_multiprocessing # import before numpy
import numpy as np
import os
import time
from multiprocessing import cpu_count
from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid
from singlecell.analysis_basin_transitions import \
ensemble_projection_timeseries, get_basin_stats, fast_basin_stats, get_init_info, \
ANNEAL_PROTOCOL, FIELD_APPLIED_PROTOCOL, ANALYSIS_SUBDIR, SPURIOUS_LIST, OCC_THRESHOLD, \
save_and_plot_basinstats, load_basinstats, fetch_from_run_info
from singlecell.singlecell_constants import ASYNC_BATCH, MEMS_MEHTA, MEMS_SCMCA
from singlecell.singlecell_functions import hamming
from singlecell.singlecell_simsetup import singlecell_simsetup
from utils.file_io import run_subdir_setup, runinfo_append, RUNS_FOLDER
def gen_basin_grid(ensemble, num_processes, simsetup=None, num_steps=100, anneal_protocol=ANNEAL_PROTOCOL,
field_protocol=FIELD_APPLIED_PROTOCOL, occ_threshold=OCC_THRESHOLD, async_batch=ASYNC_BATCH, saveall=False,
save=True, plot=False, verbose=False, parallel=True):
"""
generate matrix G_ij of size p x (p + k): grid of data between 0 and 1
each row represents one of the p encoded basins as an initial condition
each column represents an endpoint of the simulation starting at a given basin (row)
G_ij would represent: starting in cell type i, G_ij of the ensemble transitioned to cell type j
"""
# simsetup unpack for labelling plots
if simsetup is None:
simsetup = singlecell_simsetup()
celltype_labels = simsetup['CELLTYPE_LABELS']
io_dict = run_subdir_setup(run_subfolder=ANALYSIS_SUBDIR)
basin_grid = np.zeros((len(celltype_labels), len(celltype_labels)+len(SPURIOUS_LIST)))
for idx, celltype in enumerate(celltype_labels):
print("Generating row: %d, %s" % (idx, celltype))
if saveall:
assert parallel
plot_all = False
proj_timeseries_array, basin_occupancy_timeseries, _, _ = \
ensemble_projection_timeseries(celltype, ensemble, num_proc, simsetup=simsetup, num_steps=num_steps,
anneal_protocol=anneal_protocol, field_protocol=field_protocol,
occ_threshold=occ_threshold, async_batch=async_batch,
plot=False, output=False)
save_and_plot_basinstats(io_dict, proj_timeseries_array, basin_occupancy_timeseries, num_steps, ensemble,
simsetup=simsetup, prefix=celltype, occ_threshold=occ_threshold, plot=plot_all)
else:
init_state, init_id = get_init_info(celltype, simsetup)
if parallel:
transfer_dict, proj_timeseries_array, basin_occupancy_timeseries, _ = \
fast_basin_stats(celltype, init_state, init_id, ensemble, num_processes, simsetup=simsetup,
num_steps=num_steps, anneal_protocol=anneal_protocol, field_protocol=field_protocol,
occ_threshold=occ_threshold, async_batch=async_batch, verbose=verbose)
else:
# Unparallelized for testing/profiling:
transfer_dict, proj_timeseries_array, basin_occupancy_timeseries, _ = \
get_basin_stats(celltype, init_state, init_id, ensemble, 0, simsetup, num_steps=num_steps,
anneal_protocol=anneal_protocol, field_protocol=field_protocol,
async_batch=async_batch, occ_threshold=occ_threshold, verbose=verbose)
proj_timeseries_array = proj_timeseries_array / ensemble # ensure normalized (get basin stats won't do this)
# fill in row of grid data from each celltype simulation
basin_grid[idx, :] = basin_occupancy_timeseries[:,-1]
if save:
np.savetxt(io_dict['latticedir'] + os.sep + 'gen_basin_grid.txt', basin_grid, delimiter=',', fmt='%.4f')
if plot:
plot_basin_grid(basin_grid, ensemble, num_steps, celltype_labels, io_dict['latticedir'], SPURIOUS_LIST)
return basin_grid, io_dict
def load_basin_grid(filestr_data):
# TODO: prepare IO functions for standardized sim settings dict struct
basin_grid = np.loadtxt(filestr_data, delimiter=',', dtype=float)
#sim_settings = load_sim_settings(filestr_settings)
return basin_grid
def grid_stats(grid_data, printtorank=10):
"""
Prints info based on row statistics of the grid
Args:
grid_data: basin grid data from basin occupancy hopping sim
Returns:
None
"""
basin_row_sum = np.sum(grid_data, axis=1)
ensemble = basin_row_sum[0]
ref_list = celltype_labels + SPURIOUS_LIST
for row in range(len(celltype_labels)):
assert basin_row_sum[row] == ensemble # make sure all rows sum to expected value
sortedmems_smalltobig = np.argsort(grid_data[row, :])
sortedmems_bigtosmall = sortedmems_smalltobig[::-1]
print("\nRankings for row", row, celltype_labels[row], "(sum %d)" % int(basin_row_sum[row]))
for rank in range(printtorank):
ranked_col_idx = sortedmems_bigtosmall[rank]
ranked_label = ref_list[ranked_col_idx]
print(rank, ranked_label, grid_data[row, ranked_col_idx], grid_data[row, ranked_col_idx] / ensemble)
def static_overlap_grid(simsetup, calc_hamming=False, savedata=True, plot=True):
"""
Args:
simsetup: project standard sim object
Returns:
grid_data = np.dot(xi.T, xi) -- the (p x p) correlation matrix of the memories OR
(p x p) array of hamming distance between all the memory pairs (equivalent; transformed)
"""
# generate
celltypes = simsetup["CELLTYPE_LABELS"]
xi = simsetup["XI"]
if calc_hamming:
grid_data = np.zeros((len(celltypes), len(celltypes)))
for i in range(len(celltypes)):
for j in range(len(celltypes)):
hd = hamming(xi[:, i], xi[:, j])
grid_data[i, j] = hd
grid_data[j, i] = hd
else:
grid_data = np.dot(xi.T, xi)
# save and plot
outdir = RUNS_FOLDER
if savedata:
dataname = 'celltypes_%s.txt' % ["overlap", "hammingdist"][calc_hamming]
np.savetxt(outdir + os.sep + dataname, grid_data, delimiter=',', fmt='%.4f')
if plot:
plot_overlap_grid(grid_data, celltypes, outdir, ext='.pdf', normalize=False)
return grid_data
def stoch_from_distance(distance_data, kappa=None):
"""
Given a square matrix of distances between N points
- stretch the distances according to a transformation rule (currently only np.exp(-kappa*d_ab) with kappa > 0
- set diagonals to zero
- normalize columns so that it represents a stochastic rate matrix (diagonals are -1*sum(col))
- return the generated stochastic rate matrix
"""
def transform_distance(d_ab, kappa):
if kappa is None:
return 1/d_ab
else:
return np.exp(-kappa*d_ab)
stoch_array = np.zeros(distance_data.shape)
for col in range(distance_data.shape[1]):
colsum = 0.0
for row in range(distance_data.shape[0]):
if row != col:
distmod = transform_distance(distance_data[row, col], kappa)
stoch_array[row, col] = distmod
colsum += distmod
else:
stoch_array[row, col] = 0.0
stoch_array[col, col] = -colsum
stoch_array[:, col] /= colsum # TODO this step should not be necessary
return stoch_array
if __name__ == '__main__':
run_basin_grid = False
gen_overlap_grid = False
load_and_plot_basin_grid = False
load_and_compare_grids = False
reanalyze_grid_over_time = True
make_grid_video = True
print_grid_stats_from_file = False
# prep simulation globals
simsetup = singlecell_simsetup(npzpath=MEMS_MEHTA)
celltype_labels = simsetup['CELLTYPE_LABELS']
if run_basin_grid:
ensemble = 1000
timesteps = 500
field_protocol = FIELD_APPLIED_PROTOCOL
anneal_protocol = ANNEAL_PROTOCOL
num_proc = cpu_count() / 2
async_batch = True
plot = False
saveall = True
parallel = True
# run gen_basin_grid
t0 = time.time()
basin_grid, io_dict = gen_basin_grid(ensemble, num_proc, simsetup=simsetup, num_steps=timesteps,
anneal_protocol=anneal_protocol, field_protocol=field_protocol,
async_batch=async_batch, saveall=saveall, plot=plot, parallel=parallel)
t1 = time.time() - t0
print("GRID TIMER:", t1)
# add info to run info file TODO maybe move this INTO the function?
info_list = [['fncall', 'gen_basin_grid()'], ['ensemble', ensemble], ['num_steps', timesteps],
['num_proc', num_proc], ['anneal_protocol', anneal_protocol], ['field_protocol', field_protocol],
['occ_threshold', OCC_THRESHOLD], ['async_batch', async_batch], ['time', t1]]
runinfo_append(io_dict, info_list, multi=True)
if gen_overlap_grid:
static_grid_data = static_overlap_grid(simsetup, calc_hamming=True)
# direct data plotting
if load_and_plot_basin_grid:
# specify paths and load data / parameters
groupdir = RUNS_FOLDER + os.sep + "gridmovie"
basedirs = ['grid_781444_1kx500_2018scMCA_mir21_lvl2']
for basedir in basedirs:
datadir = groupdir + os.sep + basedir
filestr_data = datadir + os.sep + "grid_at_step_499.txt"
basin_grid_data = load_basin_grid(filestr_data)
ensemble, num_steps = fetch_from_run_info(datadir + os.sep + 'run_info.txt', ['ensemble', 'num_steps'])
# build grid plots
plot_basin_grid(basin_grid_data, ensemble, num_steps, celltype_labels, datadir, SPURIOUS_LIST,
relmax=False, ext='.pdf', vforce=0.2, namemod=basedir)
plot_basin_grid(basin_grid_data, ensemble, num_steps, celltype_labels, datadir, SPURIOUS_LIST,
relmax=False, ext='.pdf', vforce=0.5, namemod=basedir)
plot_basin_grid(basin_grid_data, ensemble, num_steps, celltype_labels, datadir, SPURIOUS_LIST,
relmax=False, ext='.pdf', vforce=1.0, namemod=basedir)
# direct data plotting
if load_and_compare_grids:
kappa = 1.0 # positive number or None
comparisondir = RUNS_FOLDER + os.sep + "comparegrids"
# load basin grid
rundir = comparisondir + os.sep + "aug11 - 1000ens x 500step"
latticedir = rundir + os.sep + "lattice"
filestr_data = latticedir + os.sep + "gen_basin_grid.txt"
simulated_data = load_basin_grid(filestr_data)
ensemble, num_steps = fetch_from_run_info(rundir + os.sep + 'run_info.txt', ['ensemble', 'num_steps'])
plot_basin_grid(simulated_data, ensemble, num_steps, celltype_labels, comparisondir, SPURIOUS_LIST,
relmax=False, ext='.pdf', vforce=0.5, plotname='simulated_endpt')
simulated_data_normed = simulated_data / ensemble
# load static distance grid
distance_path = comparisondir + os.sep + "celltypes_hammingdist.txt"
distance_data_normed = load_basin_grid(distance_path) / simsetup['N'] # note normalization
plot_overlap_grid(distance_data_normed, celltype_labels, comparisondir, hamming=True,
relmax=True, ext='.pdf', plotname='distances_matrix')
# transform distance grid via f(d_ab)
stochastic_matrix = stoch_from_distance(distance_data_normed, kappa=kappa)
stochastic_matrix_nodiag = stochastic_matrix - np.diag(np.diag(stochastic_matrix))
plot_overlap_grid(stochastic_matrix, celltype_labels, comparisondir,
hamming=True, relmax=True, ext='.pdf', plotname='stochastic_matrix')
plot_overlap_grid(stochastic_matrix_nodiag, celltype_labels, comparisondir,
hamming=True, relmax=True, ext='.pdf', plotname='stochastic_matrix_nodiag') # deleted diags
# compute deviations
truncated_sim = simulated_data_normed[:, 0:-len(SPURIOUS_LIST)]
print(truncated_sim.shape)
deviation_matrix = stochastic_matrix_nodiag - truncated_sim.T
print(simulated_data[0:4, 0:4])
print(simulated_data_normed[0:4, 0:4])
print(distance_data_normed[0:4, 0:4])
print(stochastic_matrix[0:4, 0:4])
print(stochastic_matrix_nodiag[0:4, 0:4])
print(deviation_matrix[0:4, 0:4])
plot_overlap_grid(deviation_matrix, celltype_labels, comparisondir,
hamming=True, relmax=True, ext='.pdf', plotname='deviation_matrix')
# solve for scaling constant such that difference is minimized (A-cB=0 => AB^-1=cI)
inverted_deviation = stochastic_matrix_nodiag*np.linalg.inv(truncated_sim.T)
print(inverted_deviation[0:4, 0:4])
# use labelled collection of timeseries from each row to generate multiple grids over time
if reanalyze_grid_over_time:
# step 0 specify ensemble, num steps, and location of row data
groupdir = RUNS_FOLDER + os.sep + 'gridmovie'
basedirs = ['grid_785963_1kx500_2014mehta_mir21_lvl3']
for basedir in basedirs:
datadir = groupdir + os.sep + basedir
print("working in", datadir)
ensemble, num_steps = fetch_from_run_info(datadir + os.sep + 'run_info.txt', ['ensemble', 'num_steps'])
# step 1 restructure data
rowdatadir = datadir + os.sep + "data"
latticedir = datadir + os.sep + "lattice"
plotlatticedir = datadir + os.sep + "plot_lattice"
p = len(celltype_labels)
k = len(SPURIOUS_LIST)
grid_over_time = np.zeros((p, p+k, num_steps))
for idx, celltype in enumerate(celltype_labels):
print("loading:", idx, celltype)
proj_timeseries_array, basin_occupancy_timeseries = load_basinstats(rowdatadir, celltype)
grid_over_time[idx, :, :] += basin_occupancy_timeseries
# step 2 save and plot
vforce = 0.5
filename = 'grid_at_step'
for step in range(num_steps):
print("step", step)
grid_at_step = grid_over_time[:, :, step]
namemod = '_%d' % step
np.savetxt(latticedir + os.sep + filename + namemod + '.txt', grid_at_step, delimiter=',', fmt='%.4f')
plot_basin_grid(grid_at_step, ensemble, step, celltype_labels, plotlatticedir, SPURIOUS_LIST,
plotname=filename, relmax=False, vforce=vforce, namemod=namemod, ext='.jpg')
if make_grid_video:
custom_fps = 5 # 1, 5, or 20 are good
groupdir = RUNS_FOLDER + os.sep + 'gridmovie'
basedirs = ['grid_785963_1kx500_2014mehta_mir21_lvl3']
for basedir in basedirs:
datadir = groupdir + os.sep + basedir
vidname = "%s_vmax0.5_fps%d" % (basedir, custom_fps)
latticedir = datadir + os.sep + "plot_lattice"
videopath = grid_video(datadir, vidname, imagedir=latticedir, fps=custom_fps)
if print_grid_stats_from_file:
filestr_data = RUNS_FOLDER + os.sep + "gen_basin_grid_C.txt"
basin_grid_data = load_basin_grid(filestr_data)
grid_stats(basin_grid_data)
"""
ensemble = 960
basin_grid_A = load_basin_grid(RUNS_FOLDER + os.sep + "gen_basin_grid_A.txt") / ensemble
basin_grid_B = load_basin_grid(RUNS_FOLDER + os.sep + "gen_basin_grid_B.txt") / ensemble
basin_grid_C = load_basin_grid(RUNS_FOLDER + os.sep + "gen_basin_grid_C.txt") / ensemble
basin_grid_D = load_basin_grid(RUNS_FOLDER + os.sep + "gen_basin_grid_D.txt") / ensemble
basin_grid_E = load_basin_grid(RUNS_FOLDER + os.sep + "gen_basin_grid_E.txt") / ensemble
for idx, label in enumerate(celltype_labels):
print idx, "%.2f vs %.2f" % (basin_grid_A[idx,-1], basin_grid_C[idx,-1]), label
print idx, "%.2f vs %.2f" % (basin_grid_B[idx,-1], basin_grid_D[idx,-1]), label
"""
| [
"multiprocessing.cpu_count",
"numpy.argsort",
"singlecell.analysis_basin_plotting.grid_video",
"singlecell.singlecell_functions.hamming",
"numpy.exp",
"numpy.dot",
"singlecell.analysis_basin_transitions.save_and_plot_basinstats",
"singlecell.analysis_basin_transitions.fetch_from_run_info",
"utils.fi... | [((1637, 1684), 'utils.file_io.run_subdir_setup', 'run_subdir_setup', ([], {'run_subfolder': 'ANALYSIS_SUBDIR'}), '(run_subfolder=ANALYSIS_SUBDIR)\n', (1653, 1684), False, 'from utils.file_io import run_subdir_setup, runinfo_append, RUNS_FOLDER\n'), ((4354, 4406), 'numpy.loadtxt', 'np.loadtxt', (['filestr_data'], {'delimiter': '""","""', 'dtype': 'float'}), "(filestr_data, delimiter=',', dtype=float)\n", (4364, 4406), True, 'import numpy as np\n'), ((4722, 4747), 'numpy.sum', 'np.sum', (['grid_data'], {'axis': '(1)'}), '(grid_data, axis=1)\n', (4728, 4747), True, 'import numpy as np\n'), ((7162, 7191), 'numpy.zeros', 'np.zeros', (['distance_data.shape'], {}), '(distance_data.shape)\n', (7170, 7191), True, 'import numpy as np\n'), ((8005, 8044), 'singlecell.singlecell_simsetup.singlecell_simsetup', 'singlecell_simsetup', ([], {'npzpath': 'MEMS_MEHTA'}), '(npzpath=MEMS_MEHTA)\n', (8024, 8044), False, 'from singlecell.singlecell_simsetup import singlecell_simsetup\n'), ((1550, 1571), 'singlecell.singlecell_simsetup.singlecell_simsetup', 'singlecell_simsetup', ([], {}), '()\n', (1569, 1571), False, 'from singlecell.singlecell_simsetup import singlecell_simsetup\n'), ((3964, 4072), 'numpy.savetxt', 'np.savetxt', (["(io_dict['latticedir'] + os.sep + 'gen_basin_grid.txt')", 'basin_grid'], {'delimiter': '""","""', 'fmt': '"""%.4f"""'}), "(io_dict['latticedir'] + os.sep + 'gen_basin_grid.txt',\n basin_grid, delimiter=',', fmt='%.4f')\n", (3974, 4072), True, 'import numpy as np\n'), ((4090, 4198), 'singlecell.analysis_basin_plotting.plot_basin_grid', 'plot_basin_grid', (['basin_grid', 'ensemble', 'num_steps', 'celltype_labels', "io_dict['latticedir']", 'SPURIOUS_LIST'], {}), "(basin_grid, ensemble, num_steps, celltype_labels, io_dict[\n 'latticedir'], SPURIOUS_LIST)\n", (4105, 4198), False, 'from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid\n'), ((4993, 5022), 'numpy.argsort', 'np.argsort', (['grid_data[row, :]'], {}), '(grid_data[row, :])\n', (5003, 5022), True, 'import numpy as np\n'), ((6217, 6233), 'numpy.dot', 'np.dot', (['xi.T', 'xi'], {}), '(xi.T, xi)\n', (6223, 6233), True, 'import numpy as np\n'), ((6385, 6461), 'numpy.savetxt', 'np.savetxt', (['(outdir + os.sep + dataname)', 'grid_data'], {'delimiter': '""","""', 'fmt': '"""%.4f"""'}), "(outdir + os.sep + dataname, grid_data, delimiter=',', fmt='%.4f')\n", (6395, 6461), True, 'import numpy as np\n'), ((6483, 6559), 'singlecell.analysis_basin_plotting.plot_overlap_grid', 'plot_overlap_grid', (['grid_data', 'celltypes', 'outdir'], {'ext': '""".pdf"""', 'normalize': '(False)'}), "(grid_data, celltypes, outdir, ext='.pdf', normalize=False)\n", (6500, 6559), False, 'from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid\n'), ((8430, 8441), 'time.time', 'time.time', ([], {}), '()\n', (8439, 8441), False, 'import time\n'), ((9242, 9288), 'utils.file_io.runinfo_append', 'runinfo_append', (['io_dict', 'info_list'], {'multi': '(True)'}), '(io_dict, info_list, multi=True)\n', (9256, 9288), False, 'from utils.file_io import run_subdir_setup, runinfo_append, RUNS_FOLDER\n'), ((11013, 11098), 'singlecell.analysis_basin_transitions.fetch_from_run_info', 'fetch_from_run_info', (["(rundir + os.sep + 'run_info.txt')", "['ensemble', 'num_steps']"], {}), "(rundir + os.sep + 'run_info.txt', ['ensemble', 'num_steps']\n )\n", (11032, 11098), False, 'from singlecell.analysis_basin_transitions import ensemble_projection_timeseries, get_basin_stats, fast_basin_stats, get_init_info, ANNEAL_PROTOCOL, FIELD_APPLIED_PROTOCOL, ANALYSIS_SUBDIR, SPURIOUS_LIST, OCC_THRESHOLD, save_and_plot_basinstats, load_basinstats, fetch_from_run_info\n'), ((11102, 11275), 'singlecell.analysis_basin_plotting.plot_basin_grid', 'plot_basin_grid', (['simulated_data', 'ensemble', 'num_steps', 'celltype_labels', 'comparisondir', 'SPURIOUS_LIST'], {'relmax': '(False)', 'ext': '""".pdf"""', 'vforce': '(0.5)', 'plotname': '"""simulated_endpt"""'}), "(simulated_data, ensemble, num_steps, celltype_labels,\n comparisondir, SPURIOUS_LIST, relmax=False, ext='.pdf', vforce=0.5,\n plotname='simulated_endpt')\n", (11117, 11275), False, 'from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid\n'), ((11571, 11714), 'singlecell.analysis_basin_plotting.plot_overlap_grid', 'plot_overlap_grid', (['distance_data_normed', 'celltype_labels', 'comparisondir'], {'hamming': '(True)', 'relmax': '(True)', 'ext': '""".pdf"""', 'plotname': '"""distances_matrix"""'}), "(distance_data_normed, celltype_labels, comparisondir,\n hamming=True, relmax=True, ext='.pdf', plotname='distances_matrix')\n", (11588, 11714), False, 'from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid\n'), ((11965, 12106), 'singlecell.analysis_basin_plotting.plot_overlap_grid', 'plot_overlap_grid', (['stochastic_matrix', 'celltype_labels', 'comparisondir'], {'hamming': '(True)', 'relmax': '(True)', 'ext': '""".pdf"""', 'plotname': '"""stochastic_matrix"""'}), "(stochastic_matrix, celltype_labels, comparisondir,\n hamming=True, relmax=True, ext='.pdf', plotname='stochastic_matrix')\n", (11982, 12106), False, 'from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid\n'), ((12137, 12292), 'singlecell.analysis_basin_plotting.plot_overlap_grid', 'plot_overlap_grid', (['stochastic_matrix_nodiag', 'celltype_labels', 'comparisondir'], {'hamming': '(True)', 'relmax': '(True)', 'ext': '""".pdf"""', 'plotname': '"""stochastic_matrix_nodiag"""'}), "(stochastic_matrix_nodiag, celltype_labels, comparisondir,\n hamming=True, relmax=True, ext='.pdf', plotname='stochastic_matrix_nodiag')\n", (12154, 12292), False, 'from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid\n'), ((12814, 12954), 'singlecell.analysis_basin_plotting.plot_overlap_grid', 'plot_overlap_grid', (['deviation_matrix', 'celltype_labels', 'comparisondir'], {'hamming': '(True)', 'relmax': '(True)', 'ext': '""".pdf"""', 'plotname': '"""deviation_matrix"""'}), "(deviation_matrix, celltype_labels, comparisondir, hamming\n =True, relmax=True, ext='.pdf', plotname='deviation_matrix')\n", (12831, 12954), False, 'from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid\n'), ((2052, 2310), 'singlecell.analysis_basin_transitions.ensemble_projection_timeseries', 'ensemble_projection_timeseries', (['celltype', 'ensemble', 'num_proc'], {'simsetup': 'simsetup', 'num_steps': 'num_steps', 'anneal_protocol': 'anneal_protocol', 'field_protocol': 'field_protocol', 'occ_threshold': 'occ_threshold', 'async_batch': 'async_batch', 'plot': '(False)', 'output': '(False)'}), '(celltype, ensemble, num_proc, simsetup=\n simsetup, num_steps=num_steps, anneal_protocol=anneal_protocol,\n field_protocol=field_protocol, occ_threshold=occ_threshold, async_batch\n =async_batch, plot=False, output=False)\n', (2082, 2310), False, 'from singlecell.analysis_basin_transitions import ensemble_projection_timeseries, get_basin_stats, fast_basin_stats, get_init_info, ANNEAL_PROTOCOL, FIELD_APPLIED_PROTOCOL, ANALYSIS_SUBDIR, SPURIOUS_LIST, OCC_THRESHOLD, save_and_plot_basinstats, load_basinstats, fetch_from_run_info\n'), ((2450, 2643), 'singlecell.analysis_basin_transitions.save_and_plot_basinstats', 'save_and_plot_basinstats', (['io_dict', 'proj_timeseries_array', 'basin_occupancy_timeseries', 'num_steps', 'ensemble'], {'simsetup': 'simsetup', 'prefix': 'celltype', 'occ_threshold': 'occ_threshold', 'plot': 'plot_all'}), '(io_dict, proj_timeseries_array,\n basin_occupancy_timeseries, num_steps, ensemble, simsetup=simsetup,\n prefix=celltype, occ_threshold=occ_threshold, plot=plot_all)\n', (2474, 2643), False, 'from singlecell.analysis_basin_transitions import ensemble_projection_timeseries, get_basin_stats, fast_basin_stats, get_init_info, ANNEAL_PROTOCOL, FIELD_APPLIED_PROTOCOL, ANALYSIS_SUBDIR, SPURIOUS_LIST, OCC_THRESHOLD, save_and_plot_basinstats, load_basinstats, fetch_from_run_info\n'), ((2721, 2754), 'singlecell.analysis_basin_transitions.get_init_info', 'get_init_info', (['celltype', 'simsetup'], {}), '(celltype, simsetup)\n', (2734, 2754), False, 'from singlecell.analysis_basin_transitions import ensemble_projection_timeseries, get_basin_stats, fast_basin_stats, get_init_info, ANNEAL_PROTOCOL, FIELD_APPLIED_PROTOCOL, ANALYSIS_SUBDIR, SPURIOUS_LIST, OCC_THRESHOLD, save_and_plot_basinstats, load_basinstats, fetch_from_run_info\n'), ((7124, 7145), 'numpy.exp', 'np.exp', (['(-kappa * d_ab)'], {}), '(-kappa * d_ab)\n', (7130, 7145), True, 'import numpy as np\n'), ((8276, 8287), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (8285, 8287), False, 'from multiprocessing import cpu_count\n'), ((8786, 8797), 'time.time', 'time.time', ([], {}), '()\n', (8795, 8797), False, 'import time\n'), ((9866, 9951), 'singlecell.analysis_basin_transitions.fetch_from_run_info', 'fetch_from_run_info', (["(datadir + os.sep + 'run_info.txt')", "['ensemble', 'num_steps']"], {}), "(datadir + os.sep + 'run_info.txt', ['ensemble',\n 'num_steps'])\n", (9885, 9951), False, 'from singlecell.analysis_basin_transitions import ensemble_projection_timeseries, get_basin_stats, fast_basin_stats, get_init_info, ANNEAL_PROTOCOL, FIELD_APPLIED_PROTOCOL, ANALYSIS_SUBDIR, SPURIOUS_LIST, OCC_THRESHOLD, save_and_plot_basinstats, load_basinstats, fetch_from_run_info\n'), ((9991, 10149), 'singlecell.analysis_basin_plotting.plot_basin_grid', 'plot_basin_grid', (['basin_grid_data', 'ensemble', 'num_steps', 'celltype_labels', 'datadir', 'SPURIOUS_LIST'], {'relmax': '(False)', 'ext': '""".pdf"""', 'vforce': '(0.2)', 'namemod': 'basedir'}), "(basin_grid_data, ensemble, num_steps, celltype_labels,\n datadir, SPURIOUS_LIST, relmax=False, ext='.pdf', vforce=0.2, namemod=\n basedir)\n", (10006, 10149), False, 'from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid\n'), ((10181, 10339), 'singlecell.analysis_basin_plotting.plot_basin_grid', 'plot_basin_grid', (['basin_grid_data', 'ensemble', 'num_steps', 'celltype_labels', 'datadir', 'SPURIOUS_LIST'], {'relmax': '(False)', 'ext': '""".pdf"""', 'vforce': '(0.5)', 'namemod': 'basedir'}), "(basin_grid_data, ensemble, num_steps, celltype_labels,\n datadir, SPURIOUS_LIST, relmax=False, ext='.pdf', vforce=0.5, namemod=\n basedir)\n", (10196, 10339), False, 'from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid\n'), ((10371, 10529), 'singlecell.analysis_basin_plotting.plot_basin_grid', 'plot_basin_grid', (['basin_grid_data', 'ensemble', 'num_steps', 'celltype_labels', 'datadir', 'SPURIOUS_LIST'], {'relmax': '(False)', 'ext': '""".pdf"""', 'vforce': '(1.0)', 'namemod': 'basedir'}), "(basin_grid_data, ensemble, num_steps, celltype_labels,\n datadir, SPURIOUS_LIST, relmax=False, ext='.pdf', vforce=1.0, namemod=\n basedir)\n", (10386, 10529), False, 'from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid\n'), ((13122, 13152), 'numpy.linalg.inv', 'np.linalg.inv', (['truncated_sim.T'], {}), '(truncated_sim.T)\n', (13135, 13152), True, 'import numpy as np\n'), ((13673, 13758), 'singlecell.analysis_basin_transitions.fetch_from_run_info', 'fetch_from_run_info', (["(datadir + os.sep + 'run_info.txt')", "['ensemble', 'num_steps']"], {}), "(datadir + os.sep + 'run_info.txt', ['ensemble',\n 'num_steps'])\n", (13692, 13758), False, 'from singlecell.analysis_basin_transitions import ensemble_projection_timeseries, get_basin_stats, fast_basin_stats, get_init_info, ANNEAL_PROTOCOL, FIELD_APPLIED_PROTOCOL, ANALYSIS_SUBDIR, SPURIOUS_LIST, OCC_THRESHOLD, save_and_plot_basinstats, load_basinstats, fetch_from_run_info\n'), ((14062, 14093), 'numpy.zeros', 'np.zeros', (['(p, p + k, num_steps)'], {}), '((p, p + k, num_steps))\n', (14070, 14093), True, 'import numpy as np\n'), ((15411, 15476), 'singlecell.analysis_basin_plotting.grid_video', 'grid_video', (['datadir', 'vidname'], {'imagedir': 'latticedir', 'fps': 'custom_fps'}), '(datadir, vidname, imagedir=latticedir, fps=custom_fps)\n', (15421, 15476), False, 'from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid\n'), ((2888, 3148), 'singlecell.analysis_basin_transitions.fast_basin_stats', 'fast_basin_stats', (['celltype', 'init_state', 'init_id', 'ensemble', 'num_processes'], {'simsetup': 'simsetup', 'num_steps': 'num_steps', 'anneal_protocol': 'anneal_protocol', 'field_protocol': 'field_protocol', 'occ_threshold': 'occ_threshold', 'async_batch': 'async_batch', 'verbose': 'verbose'}), '(celltype, init_state, init_id, ensemble, num_processes,\n simsetup=simsetup, num_steps=num_steps, anneal_protocol=anneal_protocol,\n field_protocol=field_protocol, occ_threshold=occ_threshold, async_batch\n =async_batch, verbose=verbose)\n', (2904, 3148), False, 'from singlecell.analysis_basin_transitions import ensemble_projection_timeseries, get_basin_stats, fast_basin_stats, get_init_info, ANNEAL_PROTOCOL, FIELD_APPLIED_PROTOCOL, ANALYSIS_SUBDIR, SPURIOUS_LIST, OCC_THRESHOLD, save_and_plot_basinstats, load_basinstats, fetch_from_run_info\n'), ((3392, 3630), 'singlecell.analysis_basin_transitions.get_basin_stats', 'get_basin_stats', (['celltype', 'init_state', 'init_id', 'ensemble', '(0)', 'simsetup'], {'num_steps': 'num_steps', 'anneal_protocol': 'anneal_protocol', 'field_protocol': 'field_protocol', 'async_batch': 'async_batch', 'occ_threshold': 'occ_threshold', 'verbose': 'verbose'}), '(celltype, init_state, init_id, ensemble, 0, simsetup,\n num_steps=num_steps, anneal_protocol=anneal_protocol, field_protocol=\n field_protocol, async_batch=async_batch, occ_threshold=occ_threshold,\n verbose=verbose)\n', (3407, 3630), False, 'from singlecell.analysis_basin_transitions import ensemble_projection_timeseries, get_basin_stats, fast_basin_stats, get_init_info, ANNEAL_PROTOCOL, FIELD_APPLIED_PROTOCOL, ANALYSIS_SUBDIR, SPURIOUS_LIST, OCC_THRESHOLD, save_and_plot_basinstats, load_basinstats, fetch_from_run_info\n'), ((6085, 6112), 'singlecell.singlecell_functions.hamming', 'hamming', (['xi[:, i]', 'xi[:, j]'], {}), '(xi[:, i], xi[:, j])\n', (6092, 6112), False, 'from singlecell.singlecell_functions import hamming\n'), ((11929, 11955), 'numpy.diag', 'np.diag', (['stochastic_matrix'], {}), '(stochastic_matrix)\n', (11936, 11955), True, 'import numpy as np\n'), ((14270, 14307), 'singlecell.analysis_basin_transitions.load_basinstats', 'load_basinstats', (['rowdatadir', 'celltype'], {}), '(rowdatadir, celltype)\n', (14285, 14307), False, 'from singlecell.analysis_basin_transitions import ensemble_projection_timeseries, get_basin_stats, fast_basin_stats, get_init_info, ANNEAL_PROTOCOL, FIELD_APPLIED_PROTOCOL, ANALYSIS_SUBDIR, SPURIOUS_LIST, OCC_THRESHOLD, save_and_plot_basinstats, load_basinstats, fetch_from_run_info\n'), ((14669, 14775), 'numpy.savetxt', 'np.savetxt', (["(latticedir + os.sep + filename + namemod + '.txt')", 'grid_at_step'], {'delimiter': '""","""', 'fmt': '"""%.4f"""'}), "(latticedir + os.sep + filename + namemod + '.txt', grid_at_step,\n delimiter=',', fmt='%.4f')\n", (14679, 14775), True, 'import numpy as np\n'), ((14788, 14967), 'singlecell.analysis_basin_plotting.plot_basin_grid', 'plot_basin_grid', (['grid_at_step', 'ensemble', 'step', 'celltype_labels', 'plotlatticedir', 'SPURIOUS_LIST'], {'plotname': 'filename', 'relmax': '(False)', 'vforce': 'vforce', 'namemod': 'namemod', 'ext': '""".jpg"""'}), "(grid_at_step, ensemble, step, celltype_labels,\n plotlatticedir, SPURIOUS_LIST, plotname=filename, relmax=False, vforce=\n vforce, namemod=namemod, ext='.jpg')\n", (14803, 14967), False, 'from singlecell.analysis_basin_plotting import plot_basin_grid, grid_video, plot_overlap_grid\n')] |
import csv
import logging
import fret
import numpy as np
import torchtext as tt
logger = logging.getLogger(__name__)
class Cutter:
def __init__(self, words, max_len=None, split=' '):
self.words = words
self.max_len = max_len
self.split = split
def __call__(self, s):
words = s.split(self.split)[:self.max_len] \
if self.max_len else s.split()
return [self.words.get(w) or 0 for w in words]
class Vocab:
def __init__(self, vocab_list):
if isinstance(vocab_list, str):
self.itos = open(vocab_list).read().strip().split('\n')
else:
self.itos = vocab_list
self.stoi = {k: i for i, k in enumerate(self.itos)}
self.stoi['<unk>'] = -1
def __getitem__(self, item):
if isinstance(item, int):
return self.itos[item]
else:
return self.stoi[item]
def get(self, item):
return self.stoi[item] if item in self.stoi else None
def __len__(self):
return len(self.itos)
class Questions:
def __init__(self, dataset, maxlen=400):
self.dataset = dataset
cfg = fret.app['datasets'][dataset]
self._word = Vocab(cfg['word_list'])
self._know = Vocab(cfg['knowledge_list'])
self.n_words = len(self._word)
self.n_knowledge = len(self._know)
text_field = tt.data.Field(
tokenize=Cutter(self._word, maxlen),
use_vocab=False)
self._ques_text = tt.data.TabularDataset(
cfg['question_text_file'],
format='tsv',
fields=[('id', tt.data.Field(sequential=False)),
('content', text_field)],
skip_header=True,
csv_reader_params={'quoting': csv.QUOTE_NONE})
self._ques_text_ind = {item.id: i
for i, item in enumerate(self._ques_text)}
knowledge_field = tt.data.Field(
tokenize=Cutter(self._know, split=','),
use_vocab=False)
self._ques_know = tt.data.TabularDataset(
cfg['question_knowledge_file'],
format='tsv',
fields=[('id', tt.data.Field(sequential=False)),
('knowledge', knowledge_field)],
skip_header=True,
csv_reader_params={'quoting': csv.QUOTE_NONE})
self._ques_know = {item.id: item.knowledge for item in self._ques_know}
self._ques_diff = {}
diff_f = open(cfg['question_difficulty_file'])
next(diff_f)
for line in diff_f:
qid, diff = line.strip().split('\t')
diff = float(diff)
self._ques_diff[qid] = diff
self._ques_set = set(self._ques_text_ind) & \
set(self._ques_know) & set(self._ques_diff)
self.vocab = Vocab(list(sorted(self._ques_set)))
self.stoi = self.vocab.stoi
self.itos = self.vocab.itos
self.n_questions = len(self.vocab)
def __getitem__(self, index):
if isinstance(index, int):
qid = self.vocab.itos[index]
else:
qid = index
if qid in self._ques_set:
know = np.zeros((self.n_knowledge,))
know[self._ques_know[qid]] = 1
text = self._ques_text[self._ques_text_ind[qid]].content
if self.dataset == 'poj':
text = text[:50]
return {
'id': qid,
'text': text,
'knowledge': know,
'difficulty': self._ques_diff[qid]
}
else:
return None
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __len__(self):
return len(self.vocab)
@property
def knowledge(self):
return self._know
@property
def word(self):
return self._word
def load_embedding(emb_file):
f = open(emb_file, 'r', encoding='utf-8')
wcnt, emb_size = next(f).strip().split(' ')
wcnt, emb_size = int(wcnt), int(emb_size)
words = []
embs = []
for line in f:
fields = line.strip().split(' ')
word = fields[0]
emb = np.array([float(x) for x in fields[1:]])
words.append(word)
embs.append(emb)
embs = np.asarray(embs)
return embs
class QidField:
def __init__(self, set):
self._set = set
def get(self, item):
qid = item.split(',')[0]
if qid in self._set:
return qid
else:
return '<unk>'
class ScoreField:
def get(self, item):
return float(item.split(',')[1])
def load_record(rec_file, q_field):
question = tt.data.Field(tokenize=Cutter(QidField(q_field.stoi)))
question.vocab = q_field
score = tt.data.Field(tokenize=Cutter(ScoreField()),
use_vocab=False)
fields = {'question': ('question', question), 'score': ('score', score)}
reader = csv.reader(open(rec_file), quoting=csv.QUOTE_NONE,
delimiter='\t')
field_to_index = {'question': 0, 'score': 0}
examples = [tt.data.Example.fromCSV(line, fields, field_to_index)
for line in reader]
field_list = []
for field in fields.values():
if isinstance(field, list):
field_list.extend(field)
else:
field_list.append(field)
field_list = field_list
records = tt.data.Dataset(examples, field_list)
return records
| [
"logging.getLogger",
"torchtext.data.Field",
"torchtext.data.Dataset",
"torchtext.data.Example.fromCSV",
"numpy.asarray",
"numpy.zeros"
] | [((90, 117), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (107, 117), False, 'import logging\n'), ((4275, 4291), 'numpy.asarray', 'np.asarray', (['embs'], {}), '(embs)\n', (4285, 4291), True, 'import numpy as np\n'), ((5413, 5450), 'torchtext.data.Dataset', 'tt.data.Dataset', (['examples', 'field_list'], {}), '(examples, field_list)\n', (5428, 5450), True, 'import torchtext as tt\n'), ((5101, 5154), 'torchtext.data.Example.fromCSV', 'tt.data.Example.fromCSV', (['line', 'fields', 'field_to_index'], {}), '(line, fields, field_to_index)\n', (5124, 5154), True, 'import torchtext as tt\n'), ((3172, 3201), 'numpy.zeros', 'np.zeros', (['(self.n_knowledge,)'], {}), '((self.n_knowledge,))\n', (3180, 3201), True, 'import numpy as np\n'), ((1621, 1652), 'torchtext.data.Field', 'tt.data.Field', ([], {'sequential': '(False)'}), '(sequential=False)\n', (1634, 1652), True, 'import torchtext as tt\n'), ((2176, 2207), 'torchtext.data.Field', 'tt.data.Field', ([], {'sequential': '(False)'}), '(sequential=False)\n', (2189, 2207), True, 'import torchtext as tt\n')] |
from microprediction import MicroWriter
import numpy as np
from pprint import pprint
import matplotlib.pyplot as plt
import random
import time
from pprint import pprint
import warnings
warnings.filterwarnings('ignore')
from copulas.multivariate import VineCopula
import pandas as pd
from copulas.visualization import scatter_2d
import os
try:
from set_env_private import NOTHING
except ImportError:
pass
# 1. Grab the Github secrets
try:
# For this script to work you need to create four separate GitHub secrets
# called WRITE_KEY_1 WRITE_KEY_2 WRITE_KEY_3 and WRITE_KEY_4
# The idea is that this way you get 900 samples instead of 225
WRITE_KEYS = [ os.environ.get('WRITE_KEY_'+str(i+1)) for i in range(4) ]
assert len(WRITE_KEYS)==4,'Need four write keys to make the syndicate'
except:
# Or one secret called WRITE_KEY or WRITE_KEYS with them comma separated
WRITE_KEYS_comma_sep = os.environ.get('WRITE_KEYS') or os.environ.get('WRITE_KEY')
WRITE_KEYS = WRITE_KEYS_comma_sep.split(',')
print('Copula syndicate is firing up.')
for write_key in WRITE_KEYS:
animal = MicroWriter.animal_from_key(write_key)
print(animal)
# 2. Pick a copula
VINES = ['center','regular','direct'] # See https://sdv.dev/Copulas/tutorials/03_Multivariate_Distributions.html#Vine-Copulas
VINE_TYPE = random.choice(VINES) # Perhaps you want to fix this choice. This way we get lots of plots.
# 3. (Optional) Set the URL of your repo so that others can learn from it
REPO = 'https://github.com/microprediction/microactors-plots/blob/master/fit.py' # <--- Change your username
PLOTS_PATH = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'gallery'
# 4. Create your fitting function
def fit_and_sample(lagged_zvalues:[[float]],num:int, copula=None, fig_file=None, labels=None ):
""" Example of fitting a copula function, and sampling
lagged_zvalues: [ [z1,z2,z3] ] Data with roughly N(0,1) margins
copula :
returns: [ [z1, z2, z3] ] representative sample
labels: [str] axis labels
"""
# This is the part where there's plenty of room for improvement
# Remark 1: It's lazy to just sample synthetic data
# Some more evenly spaced sampling would be preferable.
# See https://www.microprediction.com/blog/lottery for discussion of why evenly
# spaced samples are likely to serve you better.
# Remark 2: Any multivariate density estimation could go here.
# Remark 3: If you want to literally fit to a Copula (i.e. roughly uniform margins)
# then you might want to use mw.get_lagged_copulas(name=name, count= 5000) instead
real = pd.DataFrame(data=lagged_zvalues)
if copula is None:
copula = VineCopula(VINE_TYPE)
copula.fit(real)
print('Fit done, now generating samples ...')
synthetic = copula.sample(num) # Again, see remarks above
print('Sample generated')
synth = synthetic.values.tolist()
dim = len(synth[0])
if dim==3 and fig_file is not None:
print('Saving to '+fig_file)
plot_3d(real=real, synth=synthetic, fig_file=fig_file, labels=labels)
return synth
def scatter_3d(data, columns=None, fig=None, title=None, position=None, labels=None):
"""Plot 3 dimensional data in a scatter plot."""
fig = fig or plt.figure()
position = position or 111
ax = fig.add_subplot(position, projection='3d')
ax.scatter(*(
data[column]
for column in columns or data.columns
))
if title:
ax.set_title(title)
ax.title.set_position([.5, 1.05])
if labels:
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_zlabel(labels[2])
return ax
def plot_3d(real, synth, fig_file, columns=None, figsize=(16, 6), labels=None ):
""" Create and store comparison plot """
columns = columns or real.columns
fig = plt.figure(figsize=figsize)
num_synthetic = len(synth.index)
some_real = real.iloc[-num_synthetic:]
num_real = len(some_real.index)
scatter_3d(some_real[columns], fig=fig, title='Real Data ('+str(num_real)+')', position=121, labels=labels)
scatter_3d(synth[columns], fig=fig, title='Synthetic Data ('+str(num_synthetic)+')', position=122, labels=labels)
plt.tight_layout()
plt.savefig(fig_file)
if __name__ == "__main__":
mws = [ MicroWriter(write_key=write_key) for write_key in WRITE_KEYS ]
for mw in mws:
mw.set_repository(REPO)
mw0 = mws[0] # Doesn't matter which one
NAMES = [ n for n in mw0.get_stream_names() if 'z3~' in n ]
count = 0
num_to_fit = 2
while count < num_to_fit:
name = random.choice(NAMES)
labels = name.split('~')[1:-1]
lagged_zvalues = mw0.get_lagged_zvalues(name=name, count=5000)
if len(lagged_zvalues) > 20:
num = mw0.num_predictions
four = len(WRITE_KEYS)
fig_file = PLOTS_PATH + os.path.sep + name.replace('.json','')+'_'+ VINE_TYPE.lower()+'.png'
pprint((name, len(lagged_zvalues)))
zvalues = fit_and_sample(lagged_zvalues=lagged_zvalues, num=num*four, fig_file=fig_file,labels=labels)
print('Syndicate submission starting')
try:
# Split the samples up amongst the syndicate
# This would be more effective if the samples were not random :-)
# Enter the same samples for all horizons
responses = list()
for delay in mw0.DELAYS:
for j, mw in enumerate(mws):
zvalues_j = zvalues[j*num:(j+1)*num]
assert len(zvalues_j)==num
responses.append( mw.submit_zvalues(name=name, zvalues=zvalues_j, delay=delay ) )
pprint(np.mean(responses))
except Exception as e:
print(e)
print('Syndicate submission finished')
count = count + 1
print('Done '+str(count)+' of '+str(num_to_fit))
else:
print(name+' history too short ')
# Give up if performance is bad
for mw in mws:
mw.cancel_worst_active(stop_loss=25,num=1)
| [
"microprediction.MicroWriter.animal_from_key",
"numpy.mean",
"random.choice",
"matplotlib.pyplot.savefig",
"os.environ.get",
"copulas.multivariate.VineCopula",
"os.path.realpath",
"microprediction.MicroWriter",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"... | [((185, 218), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (208, 218), False, 'import warnings\n'), ((1343, 1363), 'random.choice', 'random.choice', (['VINES'], {}), '(VINES)\n', (1356, 1363), False, 'import random\n'), ((1117, 1155), 'microprediction.MicroWriter.animal_from_key', 'MicroWriter.animal_from_key', (['write_key'], {}), '(write_key)\n', (1144, 1155), False, 'from microprediction import MicroWriter\n'), ((2737, 2770), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'lagged_zvalues'}), '(data=lagged_zvalues)\n', (2749, 2770), True, 'import pandas as pd\n'), ((3968, 3995), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3978, 3995), True, 'import matplotlib.pyplot as plt\n'), ((4346, 4364), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4362, 4364), True, 'import matplotlib.pyplot as plt\n'), ((4369, 4390), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_file'], {}), '(fig_file)\n', (4380, 4390), True, 'import matplotlib.pyplot as plt\n'), ((2811, 2832), 'copulas.multivariate.VineCopula', 'VineCopula', (['VINE_TYPE'], {}), '(VINE_TYPE)\n', (2821, 2832), False, 'from copulas.multivariate import VineCopula\n'), ((3390, 3402), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3400, 3402), True, 'import matplotlib.pyplot as plt\n'), ((4440, 4472), 'microprediction.MicroWriter', 'MicroWriter', ([], {'write_key': 'write_key'}), '(write_key=write_key)\n', (4451, 4472), False, 'from microprediction import MicroWriter\n'), ((4740, 4760), 'random.choice', 'random.choice', (['NAMES'], {}), '(NAMES)\n', (4753, 4760), False, 'import random\n'), ((926, 954), 'os.environ.get', 'os.environ.get', (['"""WRITE_KEYS"""'], {}), "('WRITE_KEYS')\n", (940, 954), False, 'import os\n'), ((958, 985), 'os.environ.get', 'os.environ.get', (['"""WRITE_KEY"""'], {}), "('WRITE_KEY')\n", (972, 985), False, 'import os\n'), ((1661, 1687), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1677, 1687), False, 'import os\n'), ((5884, 5902), 'numpy.mean', 'np.mean', (['responses'], {}), '(responses)\n', (5891, 5902), True, 'import numpy as np\n')] |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Segmentation results visualization on a given set of WSI.
See model.py for more details and usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append("..")
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
warnings.filterwarnings("ignore",category=RuntimeWarning)
import os.path
import time
import numpy as np
from six.moves import range
import tensorflow as tf
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.contrib import training as contrib_training
from deeplab import common
from deeplab import model
from deeplab.datasets import wsi_data_generator
from deeplab.utils import save_annotation
from deeplab.utils.wsi_dataset_util import get_slide_size
from deeplab.utils.mask_to_xml import mask_to_xml
from deeplab.utils.xml_to_json import convert_xml_json
from skimage.morphology import remove_small_objects
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
# Settings for log directories.
flags.DEFINE_string('vis_logdir', None, 'Where to write the event logs.')
flags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.')
# Settings for visualizing the model.
flags.DEFINE_integer('vis_batch_size', 1,
'The number of images in each batch during evaluation.')
flags.DEFINE_integer('vis_crop_size', 512,
'Crop size [size, size] for visualization.')
flags.DEFINE_integer('eval_interval_secs', 60 * 5,
'How often (in seconds) to run evaluation.')
# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or
# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note
# one could use different atrous_rates/output_stride during training/evaluation.
flags.DEFINE_multi_integer('atrous_rates', None,
'Atrous rates for atrous spatial pyramid pooling.')
flags.DEFINE_integer('output_stride', 16,
'The ratio of input to output spatial resolution.')
# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test.
flags.DEFINE_multi_float('eval_scales', [1.0],
'The scales to resize images for evaluation.')
# Change to True for adding flipped images during test.
flags.DEFINE_bool('add_flipped_images', False,
'Add flipped images for evaluation or not.')
flags.DEFINE_integer(
'quantize_delay_step', -1,
'Steps to start quantized training. If < 0, will not quantize model.')
# Dataset settings.
flags.DEFINE_string('dataset', 'wsi_dataset',
'Name of the segmentation dataset.')
flags.DEFINE_integer('wsi_downsample', 4,
'Downsample rate of WSI used during training.')
flags.DEFINE_integer('overlap_num', 4,
'Number of times the patch grid overlaps during testing.')
flags.DEFINE_integer('min_size', None,
'Minimum size of the detected regions.')
flags.DEFINE_integer('num_classes', 2,
'Downsample rate of WSI used during training.')
flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')
flags.DEFINE_enum('colormap_type', 'pascal', ['pascal', 'cityscapes', 'ade20k'],
'Visualization colormap type.')
flags.DEFINE_boolean('also_save_raw_predictions', False,
'Also save raw predictions.')
flags.DEFINE_boolean('save_json_annotation', False,
'Save the predictions in .json format for HistomicsTK.')
flags.DEFINE_string('json_filename', 'annotation.anot', '*.json annotation filename.')
# The folder where semantic segmentation predictions are saved.
_SEMANTIC_PREDICTION_SAVE_FOLDER = 'segmentation_results'
# The folder where raw semantic segmentation predictions are saved.
_RAW_SEMANTIC_PREDICTION_SAVE_FOLDER = 'raw_segmentation_results'
# The format to save image.
_IMAGE_FORMAT = '%06d_image'
# The format to save prediction
_PREDICTION_FORMAT = '%06d_prediction'
def _convert_train_id_to_eval_id(prediction, train_id_to_eval_id):
"""Converts the predicted label for evaluation.
There are cases where the training labels are not equal to the evaluation
labels. This function is used to perform the conversion so that we could
evaluate the results on the evaluation server.
Args:
prediction: Semantic segmentation prediction.
train_id_to_eval_id: A list mapping from train id to evaluation id.
Returns:
Semantic segmentation prediction whose labels have been changed.
"""
converted_prediction = prediction.copy()
for train_id, eval_id in enumerate(train_id_to_eval_id):
converted_prediction[prediction == train_id] = eval_id
return converted_prediction
def _process_batch(sess, slide_mask, original_images, semantic_predictions,
image_names, mask_size, downsample, image_heights,
image_widths, image_id_offset,
raw_save_dir, train_id_to_eval_id=None):
"""Evaluates one single batch qualitatively.
Args:
sess: TensorFlow session.
original_images: One batch of original images.
semantic_predictions: One batch of semantic segmentation predictions.
image_names: Image names.
mask_size: [x,y] dimentions of the mask
image_heights: Image heights.
image_widths: Image widths.
image_id_offset: Image id offset for indexing images.
raw_save_dir: The directory where the raw predictions will be saved.
train_id_to_eval_id: A list mapping from train id to eval id.
"""
(original_images,
semantic_predictions,
image_names,
image_heights,
image_widths) = sess.run([original_images, semantic_predictions,
image_names, image_heights, image_widths])
num_image = semantic_predictions.shape[0]
for i in range(num_image):
image_height = np.squeeze(image_heights[i])
image_width = np.squeeze(image_widths[i])
original_image = np.squeeze(original_images[i])
semantic_prediction = np.squeeze(semantic_predictions[i])
crop_semantic_prediction = semantic_prediction[:image_height, :image_width]
image_filename = image_names[i].decode()
# populate wsi mask
Ystart = float(image_filename.split('-')[-2])
Ystart /= downsample
Ystart = int(round(Ystart))
Xstart = float(image_filename.split('-')[-3])
Xstart /= downsample
Xstart = int(round(Xstart))
Xstop = min(Xstart+image_width, mask_size[0])
Ystop = min(Ystart+image_height, mask_size[1])
slide_mask[Ystart:Ystop, Xstart:Xstop] = np.maximum(
slide_mask[Ystart:Ystop, Xstart:Xstop],
semantic_prediction[:Ystop-Ystart, :Xstop-Xstart])
if FLAGS.also_save_raw_predictions:
# # Save image.
# save_annotation.save_annotation(
# original_image, raw_save_dir, _IMAGE_FORMAT % (image_id_offset + i),
# add_colormap=False)
#
# # Save prediction.
# save_annotation.save_annotation(
# crop_semantic_prediction, raw_save_dir,
# _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
# colormap_type=FLAGS.colormap_type)
if train_id_to_eval_id is not None:
crop_semantic_prediction = _convert_train_id_to_eval_id(
crop_semantic_prediction,
train_id_to_eval_id)
save_annotation.save_annotation(
crop_semantic_prediction, raw_save_dir, image_filename,
add_colormap=False)
return slide_mask
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Get dataset-dependent information.
dataset = wsi_data_generator.Dataset(
dataset_name=FLAGS.dataset,
dataset_dir=FLAGS.dataset_dir,
num_of_classes=FLAGS.num_classes,
downsample=FLAGS.wsi_downsample,
overlap_num=FLAGS.overlap_num,
batch_size=FLAGS.vis_batch_size,
crop_size=FLAGS.vis_crop_size,
min_resize_value=FLAGS.min_resize_value,
max_resize_value=FLAGS.max_resize_value,
resize_factor=FLAGS.resize_factor,
model_variant=FLAGS.model_variant,
is_training=False,
should_shuffle=False,
should_repeat=False)
if os.path.isfile(FLAGS.dataset_dir):
slides = [FLAGS.dataset_dir]
else:
# get all WSI in test set
slides = dataset._get_all_files(with_xml=False)
for slide in slides:
print('Working on: [{}]'.format(slide))
# get slide size and create empty wsi mask
slide_size = get_slide_size(slide)
def get_downsampled_size(size, downsample=FLAGS.wsi_downsample):
size /= downsample
return int(np.ceil(size))
mask_size = [get_downsampled_size(slide_size[0]), get_downsampled_size(slide_size[1])]
slide_mask = np.zeros([slide_size[1], slide_size[0]], dtype=np.uint8)
train_id_to_eval_id = None
raw_save_dir = None
if FLAGS.also_save_raw_predictions:
raw_save_dir = os.path.join(
FLAGS.vis_logdir, _RAW_SEMANTIC_PREDICTION_SAVE_FOLDER)
# Prepare for visualization.
tf.gfile.MakeDirs(FLAGS.vis_logdir)
tf.gfile.MakeDirs(raw_save_dir)
with tf.Graph().as_default():
iterator, num_samples = dataset.get_one_shot_iterator_grid(slide)
samples = iterator.get_next()
model_options = common.ModelOptions(
outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_of_classes},
crop_size=[FLAGS.vis_crop_size,FLAGS.vis_crop_size],
atrous_rates=FLAGS.atrous_rates,
output_stride=FLAGS.output_stride)
tf.logging.info('Performing WSI patch detection.\n')
predictions = model.predict_labels(
samples[common.IMAGE],
model_options=model_options,
image_pyramid=FLAGS.image_pyramid)
predictions = predictions[common.OUTPUT_TYPE]
tf.train.get_or_create_global_step()
if FLAGS.quantize_delay_step >= 0:
contrib_quantize.create_eval_graph()
# checkpoints_iterator = contrib_training.checkpoints_iterator(
# FLAGS.checkpoint_dir, min_interval_secs=FLAGS.eval_interval_secs)
# for checkpoint_path in checkpoints_iterator:
checkpoint_path = FLAGS.checkpoint_dir
# tf.logging.info(
# 'Starting visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
# time.gmtime()))
# tf.logging.info('Visualizing with model %s', checkpoint_path)
scaffold = tf.train.Scaffold(init_op=tf.global_variables_initializer())
session_creator = tf.train.ChiefSessionCreator(
scaffold=scaffold,
master=FLAGS.master,
checkpoint_filename_with_path=checkpoint_path)
with tf.train.MonitoredSession(
session_creator=session_creator, hooks=None) as sess:
batch = 0
image_id_offset = 0
while not sess.should_stop():
# tf.logging.info('Visualizing batch %d', batch + 1)
print('\rWorking on batch: [{} of {}]'.format(batch, num_samples), end='')
slide_mask = _process_batch(sess=sess,
slide_mask=slide_mask,
original_images=samples[common.IMAGE],
semantic_predictions=predictions,
image_names=samples[common.IMAGE_NAME],
mask_size=mask_size,
downsample=FLAGS.wsi_downsample,
image_heights=samples[common.HEIGHT],
image_widths=samples[common.WIDTH],
image_id_offset=image_id_offset,
raw_save_dir=raw_save_dir,
train_id_to_eval_id=train_id_to_eval_id)
image_id_offset += FLAGS.vis_batch_size
batch += 1
# remove small objects
if FLAGS.min_size != None and FLAGS.min_size > 0:
min_pixel_size = FLAGS.min_size/FLAGS.wsi_downsample
print('\ncleaning up small objects < {} pixels'.format(min_pixel_size))
for iter in range(FLAGS.num_classes)[1:]:
boolmask = slide_mask == iter
cleanMask = remove_small_objects(boolmask.astype(bool), min_pixel_size)
slide_mask[slide_mask==iter] = 0
slide_mask += cleanMask
if FLAGS.save_json_annotation:
anot_filename = FLAGS.json_filename
print('\ncreating annotation file: [{}]'.format(anot_filename))
root = mask_to_xml(xml_path=anot_filename, mask=slide_mask, downsample=FLAGS.wsi_downsample, return_root=True)
json_data = convert_xml_json(root, ['gloms'])
import json
with open(anot_filename, 'w') as annotation_file:
json.dump(json_data, annotation_file, indent=2, sort_keys=False)
else:
anot_filename = '{}.xml'.format(slide.split('.')[0])
print('\ncreating annotation file: [{}]'.format(anot_filename))
mask_to_xml(xml_path=anot_filename, mask=slide_mask, downsample=FLAGS.wsi_downsample)
print('annotation file saved...\n\n')
if __name__ == '__main__':
flags.mark_flag_as_required('checkpoint_dir')
flags.mark_flag_as_required('dataset_dir')
tf.app.run()
print('\n\nall done.')
| [
"deeplab.utils.save_annotation.save_annotation",
"deeplab.utils.mask_to_xml.mask_to_xml",
"tensorflow.logging.set_verbosity",
"tensorflow.gfile.MakeDirs",
"deeplab.utils.wsi_dataset_util.get_slide_size",
"sys.path.append",
"tensorflow.app.run",
"tensorflow.Graph",
"deeplab.model.predict_labels",
"... | [((944, 965), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (959, 965), False, 'import sys\n'), ((988, 1013), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1011, 1013), False, 'import warnings\n'), ((1019, 1076), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (1042, 1076), False, 'import warnings\n'), ((1080, 1138), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (1103, 1138), False, 'import warnings\n'), ((6722, 6738), 'six.moves.range', 'range', (['num_image'], {}), '(num_image)\n', (6727, 6738), False, 'from six.moves import range\n'), ((8424, 8465), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (8448, 8465), True, 'import tensorflow as tf\n'), ((8518, 9007), 'deeplab.datasets.wsi_data_generator.Dataset', 'wsi_data_generator.Dataset', ([], {'dataset_name': 'FLAGS.dataset', 'dataset_dir': 'FLAGS.dataset_dir', 'num_of_classes': 'FLAGS.num_classes', 'downsample': 'FLAGS.wsi_downsample', 'overlap_num': 'FLAGS.overlap_num', 'batch_size': 'FLAGS.vis_batch_size', 'crop_size': 'FLAGS.vis_crop_size', 'min_resize_value': 'FLAGS.min_resize_value', 'max_resize_value': 'FLAGS.max_resize_value', 'resize_factor': 'FLAGS.resize_factor', 'model_variant': 'FLAGS.model_variant', 'is_training': '(False)', 'should_shuffle': '(False)', 'should_repeat': '(False)'}), '(dataset_name=FLAGS.dataset, dataset_dir=FLAGS.\n dataset_dir, num_of_classes=FLAGS.num_classes, downsample=FLAGS.\n wsi_downsample, overlap_num=FLAGS.overlap_num, batch_size=FLAGS.\n vis_batch_size, crop_size=FLAGS.vis_crop_size, min_resize_value=FLAGS.\n min_resize_value, max_resize_value=FLAGS.max_resize_value,\n resize_factor=FLAGS.resize_factor, model_variant=FLAGS.model_variant,\n is_training=False, should_shuffle=False, should_repeat=False)\n', (8544, 9007), False, 'from deeplab.datasets import wsi_data_generator\n'), ((14271, 14283), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (14281, 14283), True, 'import tensorflow as tf\n'), ((6759, 6787), 'numpy.squeeze', 'np.squeeze', (['image_heights[i]'], {}), '(image_heights[i])\n', (6769, 6787), True, 'import numpy as np\n'), ((6806, 6833), 'numpy.squeeze', 'np.squeeze', (['image_widths[i]'], {}), '(image_widths[i])\n', (6816, 6833), True, 'import numpy as np\n'), ((6855, 6885), 'numpy.squeeze', 'np.squeeze', (['original_images[i]'], {}), '(original_images[i])\n', (6865, 6885), True, 'import numpy as np\n'), ((6912, 6947), 'numpy.squeeze', 'np.squeeze', (['semantic_predictions[i]'], {}), '(semantic_predictions[i])\n', (6922, 6947), True, 'import numpy as np\n'), ((7461, 7571), 'numpy.maximum', 'np.maximum', (['slide_mask[Ystart:Ystop, Xstart:Xstop]', 'semantic_prediction[:Ystop - Ystart, :Xstop - Xstart]'], {}), '(slide_mask[Ystart:Ystop, Xstart:Xstop], semantic_prediction[:\n Ystop - Ystart, :Xstop - Xstart])\n', (7471, 7571), True, 'import numpy as np\n'), ((9375, 9396), 'deeplab.utils.wsi_dataset_util.get_slide_size', 'get_slide_size', (['slide'], {}), '(slide)\n', (9389, 9396), False, 'from deeplab.utils.wsi_dataset_util import get_slide_size\n'), ((9645, 9701), 'numpy.zeros', 'np.zeros', (['[slide_size[1], slide_size[0]]'], {'dtype': 'np.uint8'}), '([slide_size[1], slide_size[0]], dtype=np.uint8)\n', (9653, 9701), True, 'import numpy as np\n'), ((8247, 8358), 'deeplab.utils.save_annotation.save_annotation', 'save_annotation.save_annotation', (['crop_semantic_prediction', 'raw_save_dir', 'image_filename'], {'add_colormap': '(False)'}), '(crop_semantic_prediction, raw_save_dir,\n image_filename, add_colormap=False)\n', (8278, 8358), False, 'from deeplab.utils import save_annotation\n'), ((9965, 10000), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.vis_logdir'], {}), '(FLAGS.vis_logdir)\n', (9982, 10000), True, 'import tensorflow as tf\n'), ((10011, 10042), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['raw_save_dir'], {}), '(raw_save_dir)\n', (10028, 10042), True, 'import tensorflow as tf\n'), ((10217, 10436), 'deeplab.common.ModelOptions', 'common.ModelOptions', ([], {'outputs_to_num_classes': '{common.OUTPUT_TYPE: dataset.num_of_classes}', 'crop_size': '[FLAGS.vis_crop_size, FLAGS.vis_crop_size]', 'atrous_rates': 'FLAGS.atrous_rates', 'output_stride': 'FLAGS.output_stride'}), '(outputs_to_num_classes={common.OUTPUT_TYPE: dataset.\n num_of_classes}, crop_size=[FLAGS.vis_crop_size, FLAGS.vis_crop_size],\n atrous_rates=FLAGS.atrous_rates, output_stride=FLAGS.output_stride)\n', (10236, 10436), False, 'from deeplab import common\n'), ((10485, 10537), 'tensorflow.logging.info', 'tf.logging.info', (['"""Performing WSI patch detection.\n"""'], {}), "('Performing WSI patch detection.\\n')\n", (10500, 10537), True, 'import tensorflow as tf\n'), ((10560, 10671), 'deeplab.model.predict_labels', 'model.predict_labels', (['samples[common.IMAGE]'], {'model_options': 'model_options', 'image_pyramid': 'FLAGS.image_pyramid'}), '(samples[common.IMAGE], model_options=model_options,\n image_pyramid=FLAGS.image_pyramid)\n', (10580, 10671), False, 'from deeplab import model\n'), ((10775, 10811), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (10809, 10811), True, 'import tensorflow as tf\n'), ((11518, 11637), 'tensorflow.train.ChiefSessionCreator', 'tf.train.ChiefSessionCreator', ([], {'scaffold': 'scaffold', 'master': 'FLAGS.master', 'checkpoint_filename_with_path': 'checkpoint_path'}), '(scaffold=scaffold, master=FLAGS.master,\n checkpoint_filename_with_path=checkpoint_path)\n', (11546, 11637), True, 'import tensorflow as tf\n'), ((13535, 13643), 'deeplab.utils.mask_to_xml.mask_to_xml', 'mask_to_xml', ([], {'xml_path': 'anot_filename', 'mask': 'slide_mask', 'downsample': 'FLAGS.wsi_downsample', 'return_root': '(True)'}), '(xml_path=anot_filename, mask=slide_mask, downsample=FLAGS.\n wsi_downsample, return_root=True)\n', (13546, 13643), False, 'from deeplab.utils.mask_to_xml import mask_to_xml\n'), ((13661, 13694), 'deeplab.utils.xml_to_json.convert_xml_json', 'convert_xml_json', (['root', "['gloms']"], {}), "(root, ['gloms'])\n", (13677, 13694), False, 'from deeplab.utils.xml_to_json import convert_xml_json\n'), ((14016, 14106), 'deeplab.utils.mask_to_xml.mask_to_xml', 'mask_to_xml', ([], {'xml_path': 'anot_filename', 'mask': 'slide_mask', 'downsample': 'FLAGS.wsi_downsample'}), '(xml_path=anot_filename, mask=slide_mask, downsample=FLAGS.\n wsi_downsample)\n', (14027, 14106), False, 'from deeplab.utils.mask_to_xml import mask_to_xml\n'), ((9518, 9531), 'numpy.ceil', 'np.ceil', (['size'], {}), '(size)\n', (9525, 9531), True, 'import numpy as np\n'), ((10865, 10901), 'tensorflow.contrib.quantize.create_eval_graph', 'contrib_quantize.create_eval_graph', ([], {}), '()\n', (10899, 10901), True, 'from tensorflow.contrib import quantize as contrib_quantize\n'), ((11690, 11760), 'tensorflow.train.MonitoredSession', 'tf.train.MonitoredSession', ([], {'session_creator': 'session_creator', 'hooks': 'None'}), '(session_creator=session_creator, hooks=None)\n', (11715, 11760), True, 'import tensorflow as tf\n'), ((13109, 13133), 'six.moves.range', 'range', (['FLAGS.num_classes'], {}), '(FLAGS.num_classes)\n', (13114, 13133), False, 'from six.moves import range\n'), ((13791, 13855), 'json.dump', 'json.dump', (['json_data', 'annotation_file'], {'indent': '(2)', 'sort_keys': '(False)'}), '(json_data, annotation_file, indent=2, sort_keys=False)\n', (13800, 13855), False, 'import json\n'), ((10055, 10065), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10063, 10065), True, 'import tensorflow as tf\n'), ((11457, 11490), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11488, 11490), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
"""
Tools to manipulate data
"""
from collections import Counter
from functools import partial
from typing import Union, List
import pandas as pd
import numpy as np
from .logging import get_logger
logger = get_logger('{pkg}.utils.pandas')
def nan_str(x):
if isinstance(x, float):
return '' if np.isnan(x) else str(x)
return str(x)
def str_join(x, sep=''):
return sep.join((nan_str(_) for _ in x))
def fix_bools(df):
"""
For some reason, when modifying inplace or adding data to a dataframe, a bool col can become object :/
This causes lots of issues afterwards => try to fix it here
"""
for c in list(df):
k = df[c].dtype.kind
if k == 'O':
if not set(df[c].unique()).difference({True, False}):
df.loc[:, c] = df[c].astype(bool)
dtypes_agg = {
'f': np.nansum,
'i': np.nansum,
'b': np.nanmax,
'O': str_join
}
dtypes_gb_agg = {
'f': 'sum',
'i': 'sum',
'b': 'max',
'O': 'unique',
'M': 'max'
}
def unmelt(df: pd.DataFrame, on, value_cols, id_cols=None, new_cols=None, compress=False, filter_ids=None, agg=None, dtype_agg=None, **kw) -> pd.DataFrame:
"""
Given a dataframe `df` of the form:
x A B C
1 bar 1.2 NaN
1 foo 2.3 1
1 baz 45.3 2
2 bar 8.3 10.3
2 foo -3.3 0.4
3 bar 0.34 -0.53
"unmelting" `df` on 'A' for 'B' and 'C', id from 'x' will produce the following dataframe:
x B_bar B_foo B_baz C_bar C_foo C_baz
1 1.2 2.3 45.3 NaN 1 2
2 8.3 -3.3 NaN 10.3 0.4 NaN
3 0.34 NaN NaN -0.53 NaN NaN
ie. the dataframe values from `value_cols` are pivoted from being row-indexed to col-indexed
Args:
df (DataFrame): dataframe to unmelt
on (str): column to be unmelted (to do the pivot on)
value_cols (str|list[str]): list of columns holding values (x) to be unmelted (pivoted)
id_cols (str|list[str]=None): columns used to identify uniquely new row (after aggregation)
if None, do not aggregate
new_cols (callable|dict|str=None): mapping to create new pivoted columns ; default to "{col}_{x}"
if a function: new_name = func(col, x)
if a dict: new_name = '{col}_{z}'.format(col=col, z=new_cols.get(x, x))
if a str: new_name = new_col.format(col=col, x=x)
compress (bool=False): if True, remove empty created feature (ie. full of NaN)
filter_ids (callable|list|bool=None): `x` values ("on") to be kept ; if True, keep non-NaN values ;
if callable, takes list of unique values, and return list of kept values
agg (str|dict|callable=None): aggregation (for groupby)
if None + new cols are unique -> agg='max' (ie. one-to-one pivot, no loss of information, nrow reduction)
if None + new cols are overlapping -> agg={u:'max', o:'sum'} (ie. max for unique columns,
sum aggregation for overlapping)
dtype_agg (dict=None): describe how to aggregate values when `new_cols` mapping has a many-to-one relation (ie. two distinct x values
will produce same new column name, thus needing to aggregate values)
if False -> do not aggregate at all (same number of rows as input)
if None, replace old values by new ones
if 'default', use default aggregation per type (see `dtypes_agg` in this module)
else, should be a dict mapping type kind (f, i, b, O) to a function (eg. np.sum) or None
TODO: this should never be needed!
use_default_agg (bool=None) if True, use `agg_new`, `agg_other`, and `agg_num` even is `agg` is provided
if False, and `agg` is given, ignore `agg_new`, `agg_other` and `agg_num`
if None (default), becomes False if `agg` is given, True otherwise.
agg_new (str='sum') how to aggregate new columns by default
agg_other (str='max') how to aggregate non-numerical, original columns, by default
agg_num (str='sum') how to aggregate numerical, original columns, by default
**kw (dict): options
Keyword Arguments:
na_rep : replacement for NaN values in created new features
na_rep_all (bool=True): if True, replace original NaN too with `na_rep`
suffix_sep (str='_') : the string to seperate the original column name with its `x` value (default: '_')
agg_new (str=sum) : the default method to aggregate new columns
agg_num (str=sum) : the default method to aggregate non-new numerical columns
agg_other (str=max) : the default method to aggregate other (not new, not num) columns
"""
xarr = df[on].unique()
# filter `x` values
if filter_ids is not None:
if isinstance(filter_ids, bool):
if filter_ids:
xarr = df.loc[df[on].notnull(), on].unique()
elif callable(filter_ids):
xarr = filter_ids(xarr)
elif isinstance(filter_ids, str):
xarr = [filter_ids]
else:
assert isinstance(filter_ids, (list, tuple, np.ndarray))
xarr = list(filter_ids)
# new column names generator: from an original column `col` and a value `x`, return new column name
_sep = kw.get('suffix_sep', '_')
if new_cols is None:
get_name = (f'{{col}}{_sep}{{x}}').format
elif isinstance(new_cols, str):
def get_name(col, x):
return new_cols.format(col=col, x=x)
elif callable(new_cols):
get_name = new_cols
else:
assert isinstance(new_cols, dict)
def get_name(col, x):
_ = new_cols.get(x, x)
return f'{col}{_sep}{_}'
# NaN management
na_rep = repl_na = False
nra = kw.get('na_rep_all', True)
if 'na_rep' in kw:
repl_na = True
na_rep = kw['na_rep']
keep_ona = repl_na and not nra
# agg default
default_agg_new = kw.get('agg_new', 'sum')
default_agg_other = kw.get('agg_other', 'max')
default_agg_num = kw.get('agg_num', 'sum')
use_default_agg = kw.get('use_default_agg', None)
if use_default_agg is None:
use_default_agg = agg is None
# value aggregation
if dtype_agg == 'default':
dtype_agg = dtypes_agg
elif dtype_agg is not None:
assert isinstance(dtype_agg, dict)
if isinstance(value_cols, str):
value_cols = [value_cols]
else:
value_cols = list(value_cols)
ecols = {on}.union(value_cols)
copy_cols = [_ for _ in df if _ not in ecols]
res = df.loc[:, copy_cols].copy()
new = Counter()
ona = {}
for x in xarr:
# locate `xid` in data
loc = df[on] == x
# new col names
names = [get_name(col=col, x=x) for col in value_cols]
names_map = dict(zip(value_cols, names))
new.update(names)
# extract dataframe values
y = df.loc[loc, value_cols].rename(columns=names_map).copy()
# add to result
co = list(set(y).intersection(res))
if not co:
# only unique new cols -> merge
res = res.merge(y, how='outer', left_index=True, right_index=True)
else:
# new cols -> merge
cu = list(set(y).difference(res))
if cu:
res = res.merge(y.loc[:, cu], how='outer', left_index=True, right_index=True)
if dtype_agg:
for c in co:
fagg = dtype_agg.get(y[c].dtype.kind, None)
if fagg is None:
res.loc[loc, c] = y.loc[:, c]
else:
res.loc[loc, c] = pd.DataFrame([res.loc[loc, c], y.loc[:, c]]).apply(fagg, axis=0)
else:
# overlapping cols -> replace values
res.loc[loc, co] = y.loc[:, co]
# keep info on original NaN
if keep_ona:
ona.update({col: y[col].isnull().index.values for col in names})
# aggregation check
if id_cols is None:
agg = False
elif isinstance(id_cols, str):
id_cols = [id_cols]
else:
id_cols = list(id_cols)
# if agg is None:
if use_default_agg:
_agg = dict({c: default_agg_new for c, n in new.items() if n > 1})
_agg.update({c: default_agg_other if res[c].dtype.kind not in ('f', 'i') else default_agg_num for c in set(res).difference(_agg).difference(id_cols)})
if agg is None:
agg = _agg
else:
agg = dict(_agg, **agg)
elif agg is None:
agg = False
new = sorted(new)
if not isinstance(agg, bool) and agg != 'max' and keep_ona:
logger.warning('partial NaN replacement in `unmelt` is only reliable if `agg="max"`: replacing all NaNs')
keep_ona = False
nra = True
# compress: remove columns full of NaNs
if compress:
comp = res.loc[:, new].dropna(axis='columns', how='all')
res.drop(columns=new, inplace=True)
res = res.merge(comp, how='outer', left_index=True, right_index=True)
if isinstance(agg, dict):
for c in set(new).difference(res).intersection(agg):
del agg[c]
new = sorted(set(res).intersection(new))
# fill NaN
xnr = xnk = np.nan
if repl_na:
xnr = xnk = na_rep
if agg == 'max':
# use a trick: replace by numbers that we know are less than minimum value in data
xn0 = int(np.floor(res[new].min().min()))
xnr = xn0 - 100 # value for NaN to replace = lowest value
xnk = xn0 - 50 # value for NaN to keep = 2nd lowest value
else:
repl_na = False # do not do replacement trick afterwards
res.loc[:, new] = res[new].fillna(xnr).copy()
if not nra:
for col, loc in ona.items():
res.loc[loc, col] = xnk
# last, create a view grouping by `id_cols`, thus removing uncessary rows
# res = res.groupby(id_cols).max().reset_index() # max -> ensure we keep data's value, unless only NaN (ie. lower value replaced for NaN)
if agg is not False:
res = res.groupby(id_cols).agg(agg).reset_index() # max -> ensure we keep data's value, unless only NaN (ie. lower value replaced for NaN)
if repl_na:
# now really replace original / created NaN values by their intended values: `na_rep` or NaN if not `na_rep_all`
for col in new:
res.loc[res[col] == xnr, col] = na_rep
if not nra:
# put back NaN values we wanted to keep
res.loc[res[col] == xnk, col] = np.nan
return res
def partial_merge(df_left: pd.DataFrame, df_right: pd.DataFrame, on=None, left_on=None, right_on=None, **name_mapping):
"""
Merge `df_right` with `df_left`, but add only new cols from `df_right`
"""
kw = {'how': 'outer'}
if all((_ is None for _ in (on, left_on, right_on))):
kw.update(left_index=True, right_index=True)
else:
kw.update(on=on, left_on=left_on, right_on=right_on)
cols = set(df_left).difference({on, right_on})
cols.discard(None)
y = df_right.loc[:, [_ for _ in df_right if _ not in cols]]
if name_mapping:
y = y.rename(columns=name_mapping)
return df_left.merge(y, **kw)
def merge_update(df_left: pd.DataFrame, df_right: pd.DataFrame, on=None, left_on=None, right_on=None,
left_index=False, right_index=False, prefer='right', adjust_dtypes=True):
"""
Merge `df_right` with `df_left` in an update method:
- distinct left/right columns are combined into the new dataframe
- for common columns, a `combine_first` is performed (left to right if `prefer='left'`, right to left otherwise)
this update replace NaN values with non-NaNs values where possible
If `prefer` is 'left', right values are ignored if left ones are not NaNs.
"""
if all((_ is None for _ in (on, left_on, right_on))):
# based on index values
if prefer == 'left':
m = df_left.combine_first(df_right)
else:
m = df_right.combine_first(df_left)
else:
# use provided id columns
if left_index:
ml = df_left
else:
ml = df_left.set_index(left_on or on)
if right_index:
mr = df_right
else:
mr = df_right.set_index(right_on or on)
if prefer == 'left':
m = ml.combine_first(mr).reset_index()
else:
m = mr.combine_first(ml).reset_index()
if adjust_dtypes:
m = m.infer_objects()
return m
def object_nan_rep(x: pd.Series):
u = x.loc[x.notnull()].unique()
k = list({type(_).__name__.replace('tuple', 'list') for _ in u})
if len(k) == 1:
return {'str': '', 'list': [], 'dict': {}}.get(k[0], np.nan)
return np.nan
na_rep_mapping = {
'f': 0.,
'i': 0,
'b': False,
'O': object_nan_rep
}
def smart_fillna(df: pd.DataFrame, na_reps: dict=None, inplace=False, downcast=None):
"""
Replace NaN values according to the type of each column. You can specify each NaN replacement by dtype kind:
f: float
i: int
b: bool
O: str (object)
Default on object (O): check the type of other values and decide:
- if only strings, NaN -> ''
- if only list / tuple, NaN -> []
- else no replacement
"""
nr = na_rep_mapping.copy()
if na_reps is not None:
nr.update(na_reps)
dt = df.dtypes.apply(lambda _: nr.get(_.kind, None)).to_dict()
values = {c: x if not callable(x) else x(df[c]) for c, x in dt.items() if x is not None}
return df.fillna(values, inplace=inplace, downcast=downcast)
def auto_adjust_dtypes(df: pd.DataFrame, inplace=False):
if not inplace:
df = df.copy()
for c in list(df):
df.loc[:, c] = pd.Series(df[c].tolist(), index=df.index, name=c)
return df
def map_values(s: pd.Series, mapping: dict, warn=True):
u = s.unique()
m = set(u).difference(mapping)
if m:
if warn:
logger.warning(f'missing values in your mapping ; in the series but not in mapping: {m}')
mapping = mapping.copy()
mapping.update({_: _ for _ in m})
return s.map(mapping)
def constant_reducer(x0):
def reducer(x):
return x0
return reducer
class ForbiddenAggError(Exception):
pass
def forbidden_reducer(col=None, dtype=None):
if col is None:
assert dtype is not None
def reducer(x):
raise ForbiddenAggError(f'cannot aggregate this data type: {dtype}')
else:
def reducer(x):
raise ForbiddenAggError(f'cannot aggregate this column: {col}')
return reducer
def most_common_reducer(s=None, warn=False):
if s is None:
def reducer(x: pd.Series):
y = x.loc[x.notnull()]
v = y.mode().values[0]
if warn:
u = y.unique()
if not len(u) == 1:
logger.warning(f'there are more than one non-NaNs unique values for column "{x.name}" ; using: {v}')
return v
else:
y = s.loc[s.notnull()]
v = y.mode().values[0]
if warn:
u = y.unique()
if not len(u) == 1:
logger.warning(f'there are more than one non-NaNs unique values for column "{s.name}" ; using: {v}')
reducer = constant_reducer(v)
return reducer
def unique_reducer(s=None):
if s is None:
def reducer(x: pd.Series):
u = x.loc[x.notnull()].unique()
if not len(u) == 1 :
print(x.name)
print(u)
print(x.unique())
print(x.index)
assert len(u) == 1, f'{len(u)} non-NaNs unique values for column "{x.name}": expected 1'
return u[0]
else:
u = s.loc[s.notnull()].unique()
assert len(u) == 1, f'{len(u)} non-NaNs unique values for column "{s.name}": expected 1'
reducer = constant_reducer(u[0])
return reducer
def unique_na_reducer(s=None):
if s is None:
def reducer(x: pd.Series):
u = x.unique()
n = u.size
if n == 1:
return u[0]
print(x.name)
print(u)
print(x.unique())
print(x.index)
assert False, f'{n} unique values for column "{x.name}": expected 1'
else:
u = s.unique()
n = u.size
if n == 1:
reducer = constant_reducer(u[0])
else:
assert False, f'{n} unique values for column "{s.name}": expected 1'
return reducer
def set_reducer(s=None, sort: bool=True):
if sort:
_2l = sorted
else:
_2l = list
if s is None:
def reducer(x: pd.Series):
u = _2l(x.loc[x.notnull()].unique())
return u
else:
u = _2l(s.loc[s.notnull()].unique())
reducer = constant_reducer(u)
return reducer
def preferred_or_most_common_reducer(preferred: list, s=None, warn=False):
if s is None:
def reducer(x: pd.Series):
y = x.loc[x.notnull()]
u = set(y.unique())
try:
# find the first preferred value, it in unique (else raise StopIteration)
v = next((x for x in preferred if x in u))
except StopIteration:
v = y.mode().values[0]
if warn and len(u) > 1:
logger.warning(f'there are more than one non-NaNs unique values for column "{x.name}" ; using: {v}')
return v
else:
y = s.loc[s.notnull()]
u = set(y.unique())
try:
# find the first preferred value, it in unique (else raise StopIteration)
v = next((x for x in preferred if x in u))
except StopIteration:
v = y.mode().values[0]
if warn and len(u) > 1:
logger.warning(f'there are more than one non-NaNs unique values for column "{s.name}" ; using: {v}')
reducer = constant_reducer(v)
return reducer
def gb_agg_dtypes(df: pd.DataFrame, on: (str, list), dtype_agg: dict=None, default_agg=None, agg: (dict, list)=None, preferred=None) -> pd.DataFrame:
"""
Groupby `df` on `on`, and then aggregate the results and reset the index.
This function is intended to aggregate easily based on data types. Four special agg strings can be used:
- 'raise' -> forbid to aggregate on this type / column (expect one value only)
- 'unique' -> force the array to have one unique non-NaN value, used as the aggregation value
- 'most_common' -> use the most common value (excluding NaNs)
- 'most_common_warn' -> same as above, but show warning when there is more than 1 unique values (excluding NaNs)
- 'preferred_or_most_common' -> given a list preferred value, pick the first one in data, otherwise the most common
- 'preferred_or_most_common_warn' -> given a list preferred value, pick the first one in data, otherwise the most common (with a warning if not unique)
Args:
df : the dataframe
on : the column to groupby on
dtype_agg (dict) : the mapping of dtypes (f, i, b, O) to agg function (str, callable) ; a default mapping is used
default_agg (str|callable) : the default aggregation function used if a column:
- type is not in `dtype_agg`
- AND not in `agg` (if provided)
agg (dict|list) : explicit column aggregation function ; if a list, `default_agg` must be given
preferred (dict|list) : preferred values, when `dtype_agg` = 'preferred_or_most_common'
"""
if isinstance(on, str):
on = [on]
else:
on = list(on)
if preferred is None:
preferred = {}
else:
if isinstance(preferred, (str, int, float, bool)):
preferred = [preferred]
if isinstance(preferred, list):
# same list for all types...
preferred = {t: preferred for t in 'fbiOM'}
else:
assert isinstance(preferred, dict)
if isinstance(dtype_agg, str):
if dtype_agg == 'default':
dtype_agg = dtypes_gb_agg.copy()
elif dtype_agg.startswith('preferred_or_most_common'):
dtype_agg = {t: dtype_agg for t in 'fbiOM'}
else:
dtype_agg = {t: dtype_agg for t in 'fibOM'}
elif callable(dtype_agg):
dtype_agg = {t: dtype_agg for t in 'fibOM'}
elif dtype_agg is None:
if default_agg is None:
dtype_agg = dtypes_gb_agg.copy()
else:
dtype_agg = {t: default_agg for t in 'fibOM'}
else:
assert isinstance(dtype_agg, dict)
if default_agg is not None:
dtype_agg = dict(dtypes_gb_agg, **{k: default_agg for k in set(dtypes_gb_agg).difference(dtype_agg)})
else:
dtype_agg = dict(dtypes_gb_agg, **dtype_agg)
for t in list(dtype_agg):
v = dtype_agg[t]
if v == 'unique':
dtype_agg[t] = unique_reducer()
elif v == 'unique_na':
dtype_agg[t] = unique_na_reducer()
elif v == 'raise':
dtype_agg[t] = forbidden_reducer(dtype=t)
elif v == 'default':
dtype_agg[t] = dtypes_gb_agg[t]
elif v == 'most_common':
dtype_agg[t] = most_common_reducer()
elif v == 'most_common_warn':
dtype_agg[t] = most_common_reducer(warn=True)
elif v == 'preferred_or_most_common':
dtype_agg[t] = preferred_or_most_common_reducer(preferred.get(t, ()))
elif v == 'preferred_or_most_common_warn':
dtype_agg[t] = preferred_or_most_common_reducer(preferred.get(t, ()), warn=True)
if agg is None:
agg = {}
elif not isinstance(agg, dict):
assert default_agg is not None, '`default_agg` must be provided when `agg` is not a dict'
agg = {k: default_agg for k in agg}
else:
for c in list(agg):
v = agg[c]
if v == 'unique':
agg[c] = unique_reducer(df[c])
elif v == 'unique_na':
agg[c] = unique_na_reducer(df[c])
elif v == 'raise':
agg[c] = forbidden_reducer(col=c)
elif v == 'most_common':
agg[c] = most_common_reducer(df[c])
elif v == 'most_common_warn':
agg[c] = most_common_reducer(df[c], warn=True)
elif v == 'preferred_or_most_common':
agg[c] = preferred_or_most_common_reducer(preferred.get(c, ()), df[c])
elif v == 'preferred_or_most_common_warn':
agg[c] = preferred_or_most_common_reducer(preferred.get(c, ()), df[c], warn=True)
for c in set(df).difference(on).difference(agg):
k = df[c].dtype.kind
agg[c] = dtype_agg[k]
return df.groupby(on).agg(agg).reset_index()
def add_transformed_cols(x: pd.DataFrame, on: Union[str, list], data: Union[dict, list], transform: Union[str, dict], adjust_dtype: bool=True):
"""
Transform new data (but same index as x) and add the result to a dataframe
"""
if isinstance(on, str):
on = [on]
y = x[on].copy()
if not isinstance(data, dict):
data = {c: x[c].values for c in data}
assert not set(data).intersection(y)
for col, values in data.items():
y.loc[:, col] = values
y = y.groupby(on).transform(transform)
if adjust_dtype:
for col in y:
x.loc[:, col] = pd.Series(y[col].tolist(), index=x.index, name=col)
else:
x = merge_update(x, y, on=on, prefer='right')
return x
def count_unique(x: pd.Series):
return x.loc[x.notnull()].unique().size
def has_more_than_one_unique(x: pd.Series):
return x.loc[x.notnull()].unique().size > 1
def flag_non_unique_agg_values(df: pd.DataFrame, ids, cols, suffix=''):
if isinstance(cols, str):
cols = [cols]
if isinstance(ids, str):
ids = [ids]
else:
ids = list(ids)
gbc = df.groupby(ids)[cols].transform(has_more_than_one_unique)
new_cols = {c: f'{c}{suffix}' for c in cols}
gbc.rename(columns=new_cols, inplace=True)
drops = [_ for _ in new_cols.values() if gbc[_].sum() == 0]
if len(drops) == len(new_cols):
# all dropped -> no flags
return None
gbc.drop(columns=drops, inplace=True)
return gbc
def _identity(x):
return x
def gb_itergroup(x: Union[pd.DataFrame, pd.core.groupby.generic.DataFrameGroupBy], by: Union[str, List[str]]=None):
"""
Given a DataFrame or a DataFrameGroupBy, for each group, yield group value(s) and dataframe's index values
"""
if isinstance(x, pd.DataFrame):
assert by is not None
gb = x.groupby(by)
else:
assert isinstance(x, pd.core.groupby.generic.DataFrameGroupBy)
gb = x
for g, idx in gb.groups.items():
yield g, idx
def gb_iterdf(x: pd.DataFrame, by: Union[str, List[str]]):
"""
Given a DataFrame, groupby using `by`, and for each group yield group value(s) and corresponding sub-dataframe
"""
gb = x.groupby(by)
for g, idx in gb.groups.items():
yield g, x.loc[idx]
def gb_itertuples_gen(x: pd.DataFrame, by: Union[str, List[str]]):
"""
Given a DataFrame or a DataFrameGroupBy, for each group yield group value(s) and a generator of row tuples
(similar to what `DataFrame.itertuples` would return)
Therefore, to go to the row level, one would use this function as below:
>>> for g, gen in gb_itertuples_gen(x, by='column_name'):
>>> for row in gen:
>>> # do something with `row`
"""
for g, df in gb_iterdf(x, by):
yield g, df.itertuples()
class Round:
def __init__(self, rounding=None, round_before_agg=False, agg='sum', **kw):
fround = _identity
if rounding is None:
round_before_agg = False
elif isinstance(rounding, bool):
if not rounding:
round_before_agg = False
else:
fround = np.round
else:
if isinstance(rounding, str):
if rounding == 'down':
fround = np.floor
elif rounding == 'up':
fround = np.ceil
else:
fround = np.round
elif isinstance(rounding, int):
fround = partial(np.round, decimals=rounding)
else:
assert callable(rounding)
fround = rounding
self._func = fround
self._before = round_before_agg
self._agg = agg
self._kw = kw.copy()
self.round_before(round_before_agg)
def round_before(self, yes):
if yes:
self._before = True
else:
self._before = False
def get_round_func(self):
return self._func
def default_agg(self, agg, **kw):
self._agg = agg
self._kw = kw.copy()
def round_and_agg(self, df: pd.DataFrame=None, cols=None, agg=None, **kw):
if cols is not None:
df = df.loc[:, cols]
return getattr(df.apply(self._func), agg or self._agg)(**(kw or self._kw))
def agg_and_round(self, df: pd.DataFrame=None, cols=None, agg=None, **kw):
if cols is not None:
df = df.loc[:, cols]
return getattr(df, agg or self._agg)(**(kw or self._kw)).apply(self._func)
def __call__(self, df: pd.DataFrame=None, cols=None, agg=None, **kw):
if self._before:
return self.round_and_agg(df, cols=cols, agg=agg, **kw)
else:
return self.agg_and_round(df, cols=cols, agg=agg, **kw)
def get_round_agg_func(self):
if self._before:
return self.round_and_agg
else:
return self.agg_and_round
| [
"collections.Counter",
"functools.partial",
"numpy.isnan",
"pandas.DataFrame"
] | [((7631, 7640), 'collections.Counter', 'Counter', ([], {}), '()\n', (7638, 7640), False, 'from collections import Counter\n'), ((334, 345), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (342, 345), True, 'import numpy as np\n'), ((27767, 27803), 'functools.partial', 'partial', (['np.round'], {'decimals': 'rounding'}), '(np.round, decimals=rounding)\n', (27774, 27803), False, 'from functools import partial\n'), ((8702, 8746), 'pandas.DataFrame', 'pd.DataFrame', (['[res.loc[loc, c], y.loc[:, c]]'], {}), '([res.loc[loc, c], y.loc[:, c]])\n', (8714, 8746), True, 'import pandas as pd\n')] |
"""Metric file for pixel wise scores computation
"""
import argparse
import math
import numpy as np
from PIL import Image
import os
import sklearn.metrics as metrics # for confusion matrix
#==============================================
#==============================================
# IO FUNCTIONS
#==============================================
#==============================================
def raster_loader(file_path):
""" Load a raster of .
"""
image = np.array(Image.open(file_path), dtype=np.int)
return image
#==============================================
#==============================================
# STATS
#==============================================
#==============================================
def stats_overall_accuracy(cm):
"""Compute the overall accuracy.
"""
return np.trace(cm)/cm.sum()
def stats_pfa_per_class(cm):
"""Compute the probability of false alarms.
"""
sums = np.sum(cm, axis=0)
mask = (sums>0)
sums[sums==0] = 1
pfa_per_class = (cm.sum(axis=0)-np.diag(cm)) / sums
pfa_per_class[np.logical_not(mask)] = -1
average_pfa = pfa_per_class[mask].mean()
return average_pfa, pfa_per_class
def stats_accuracy_per_class(cm):
"""Compute the accuracy per class and average
puts -1 for invalid values (division per 0)
returns average accuracy, accuracy per class
"""
# equvalent to for class i to
# number or true positive of class i (data[target==i]==i).sum()/ number of elements of i (target==i).sum()
sums = np.sum(cm, axis=1)
mask = (sums>0)
sums[sums==0] = 1
accuracy_per_class = np.diag(cm) / sums #sum over lines
accuracy_per_class[np.logical_not(mask)] = -1
average_accuracy = accuracy_per_class[mask].mean()
return average_accuracy, accuracy_per_class
def stats_iou_per_class(cm):
"""Compute the iou per class and average iou
Puts -1 for invalid values
returns average iou, iou per class
"""
sums = (np.sum(cm, axis=1) + np.sum(cm, axis=0) - np.diag(cm))
mask = (sums>0)
sums[sums==0] = 1
iou_per_class = np.diag(cm) / sums
iou_per_class[np.logical_not(mask)] = -1
average_iou = iou_per_class[mask].mean()
return average_iou, iou_per_class
def stats_f1score_per_class(cm):
"""Compute f1 scores per class and mean f1.
puts -1 for invalid classes
returns average f1 score, f1 score per class
"""
# defined as 2 * recall * prec / recall + prec
sums = (np.sum(cm, axis=1) + np.sum(cm, axis=0))
mask = (sums>0)
sums[sums==0] = 1
f1score_per_class = 2 * np.diag(cm) / sums
f1score_per_class[np.logical_not(mask)] = -1
average_f1_score = f1score_per_class[mask].mean()
return average_f1_score, f1score_per_class
def main():
"""Main function."""
# create the parser for arguments
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, default=None,
help="input file")
parser.add_argument("--target", type=str, default=None,
help="target file (ground truth)")
parser.add_argument("--filelist", type=str, default=None,
help="filepath for multi-file stats")
parser.add_argument("--labels", type=int, default=None, required=True,
help="number of labels, if not used, computed from data")
parser.add_argument("--delimiter", type=str, default=" ",
help="Default delimiter for loadin file, default is space")
parser.add_argument("--verbose", action="store_true",
help="Detailed per class values")
args = parser.parse_args()
print("LABELS", args.labels)
labels = list(range(args.labels))
cm = None # the confusion matrix
print("Loading data and build confusion matrix...", flush=True)
if args.filelist is not None:
in_f = open(args.filelist, "r")
for line in in_f:
filenames = line.split("\n")[0].split(args.delimiter)
print(" ", filenames[0])
data = raster_loader(filenames[0])
target = raster_loader(filenames[1])
data = data.ravel()
target = target.ravel()
if cm is None:
cm = metrics.confusion_matrix(target,data, labels=labels)
else:
cm += metrics.confusion_matrix(target,data, labels=labels)
in_f.close()
else:
if args.input is None or args.target is None:
raise Exception("Input / Target exception")
data = raster_loader(args.input)
target = raster_loader(args.target)
data = data.ravel()
target = target.ravel()
cm = metrics.confusion_matrix(target,data, labels=labels)
print("Done")
if args.verbose:
print("============ Confusion Matrix")
print(cm)
overall_accuracy = stats_overall_accuracy(cm)
print("Overall Accuracy", overall_accuracy)
average_accuracy, accuracy_per_class = stats_accuracy_per_class(cm)
if args.verbose:
print("============ Accuracy per class")
for i in range(args.labels):
if accuracy_per_class[i] > -1:
print(" label", i, accuracy_per_class[i])
else:
print(" label", i, "invalud value (not in ground truth)")
print("Average accuracy", average_accuracy)
average_iou, iou_per_class = stats_iou_per_class(cm)
if args.verbose:
print("============ Intersection over union")
for i in range(args.labels):
if iou_per_class[i] > -1:
print(" label", i, iou_per_class[i])
else:
print(" label", i, "invalud value (not in ground truth)")
print("Average IoU", average_iou)
average_f1_score, f1score_per_class = stats_f1score_per_class(cm)
if args.verbose:
print("============ F1-scores")
for i in range(args.labels):
if f1score_per_class[i] > -1:
print(" label", i, f1score_per_class[i])
else:
print(" label", i, "invalud value (not in ground truth)")
print("Average F1-score", average_f1_score)
average_pfa, pfa_per_class = stats_pfa_per_class(cm)
if args.verbose:
print("============ PFA per class")
for i in range(args.labels):
if pfa_per_class[i] > -1:
print(" label", i, pfa_per_class[i])
else:
print(" label", i, "invalud value (not in ground truth)")
print("Average PFA", average_pfa)
#==============================================
if __name__ == "__main__":
main()
#EOF
| [
"numpy.trace",
"PIL.Image.open",
"argparse.ArgumentParser",
"numpy.logical_not",
"numpy.diag",
"numpy.sum",
"sklearn.metrics.confusion_matrix"
] | [((949, 967), 'numpy.sum', 'np.sum', (['cm'], {'axis': '(0)'}), '(cm, axis=0)\n', (955, 967), True, 'import numpy as np\n'), ((1550, 1568), 'numpy.sum', 'np.sum', (['cm'], {'axis': '(1)'}), '(cm, axis=1)\n', (1556, 1568), True, 'import numpy as np\n'), ((2887, 2912), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2910, 2912), False, 'import argparse\n'), ((486, 507), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (496, 507), False, 'from PIL import Image\n'), ((830, 842), 'numpy.trace', 'np.trace', (['cm'], {}), '(cm)\n', (838, 842), True, 'import numpy as np\n'), ((1085, 1105), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (1099, 1105), True, 'import numpy as np\n'), ((1637, 1648), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (1644, 1648), True, 'import numpy as np\n'), ((1695, 1715), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (1709, 1715), True, 'import numpy as np\n'), ((2049, 2060), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (2056, 2060), True, 'import numpy as np\n'), ((2125, 2136), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (2132, 2136), True, 'import numpy as np\n'), ((2162, 2182), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (2176, 2182), True, 'import numpy as np\n'), ((2514, 2532), 'numpy.sum', 'np.sum', (['cm'], {'axis': '(1)'}), '(cm, axis=1)\n', (2520, 2532), True, 'import numpy as np\n'), ((2535, 2553), 'numpy.sum', 'np.sum', (['cm'], {'axis': '(0)'}), '(cm, axis=0)\n', (2541, 2553), True, 'import numpy as np\n'), ((2667, 2687), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (2681, 2687), True, 'import numpy as np\n'), ((4750, 4803), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['target', 'data'], {'labels': 'labels'}), '(target, data, labels=labels)\n', (4774, 4803), True, 'import sklearn.metrics as metrics\n'), ((1047, 1058), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (1054, 1058), True, 'import numpy as np\n'), ((2007, 2025), 'numpy.sum', 'np.sum', (['cm'], {'axis': '(1)'}), '(cm, axis=1)\n', (2013, 2025), True, 'import numpy as np\n'), ((2028, 2046), 'numpy.sum', 'np.sum', (['cm'], {'axis': '(0)'}), '(cm, axis=0)\n', (2034, 2046), True, 'import numpy as np\n'), ((2626, 2637), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (2633, 2637), True, 'import numpy as np\n'), ((4304, 4357), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['target', 'data'], {'labels': 'labels'}), '(target, data, labels=labels)\n', (4328, 4357), True, 'import sklearn.metrics as metrics\n'), ((4397, 4450), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['target', 'data'], {'labels': 'labels'}), '(target, data, labels=labels)\n', (4421, 4450), True, 'import sklearn.metrics as metrics\n')] |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Pipeline object '''
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
import pandas as pd
import re
import six
from collections import Sequence
from .base import BaseTransformer, BaseEstimator, BaseModel
def tosequence(obj):
''' Cast an iterable to a sequence '''
if isinstance(obj, np.ndarray):
return np.asarray(obj)
elif isinstance(obj, Sequence):
return obj
return list(obj)
@six.python_2_unicode_compatible
class Pipeline(object):
'''
Execute a series of transformers and estimators
Parameters
----------
stages : one or more transformers/estimators
The stages of the pipeline to execute
Examples
--------
Basic pipeline of imputers and an estimator:
>>> mean_imp = Imputer(Imputer.MEAN)
>>> mode_imp = Imputer(Imputer.MODE)
>>> dtree = DecisionTree(target='Origin',
... nominals=['Type', 'Cylinders', 'Origin'],
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'])
>>> pipe = Pipeline([mean_imp, mode_imp, dtree])
Returns
-------
:class:`Pipeline`
'''
def __init__(self, stages):
self.stages = tosequence(stages)
self._extra_params = []
for item in self.stages:
if not isinstance(item, BaseTransformer):
raise TypeError('%s is not a transformer or estimator' % item)
def __str__(self):
return '%s([%s])' % (type(self).__name__,
', '.join(str(x) for x in self.stages))
def __repr__(self):
return str(self)
def set_params(self, *args, **kwargs):
'''
Set additional parameters for the estimators in the pipeline
Parameters
----------
*args : positional parameters, optional
Any valid parameters to the estimators' ``fit`` method
**kwargs : keyword parameters, optional
Any valid keyword parameters to the estimators' ``fit`` method
'''
self._extra_params.extend(list(args))
self._extra_params.append(kwargs)
def fit(self, table, *args, **kwargs):
'''
Train the models using the stages in the pipeline
Notes
-----
Parameters passed in on this method are not persisted on
the pipeline. They are only used during the scope of this method.
Parameters
----------
table : data set
Any data set object supported by the transformers and
estimators in the pipeline stages
*args : positional parameters, optional
Any valid parameters to the estimators' ``fit`` method
**kwargs : keyword parameters, optional
Any valid keyword parameters to the estimators' ``fit`` method
Examples
--------
Basic pipeline fit using imputers and an estimator:
>>> mean_imp = Imputer(Imputer.MEAN)
>>> mode_imp = Imputer(Imputer.MODE)
>>> dtree = DecisionTree(target='Origin',
... nominals=['Type', 'Cylinders', 'Origin'],
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'])
>>> pipe = Pipeline([mean_imp, mode_imp, dtree])
>>> model = pipe.fit(data)
Returns
-------
:class:`PipelineModel`
'''
out = []
last_idx = len(self.stages) - 1
extra_params = list(self._extra_params)
extra_params.extend(args)
extra_params.append(kwargs)
for i, stage in enumerate(self.stages):
params = stage.get_filtered_params(*extra_params)
if isinstance(stage, BaseEstimator):
out.append(stage.fit(table, **params))
if i == last_idx:
break
else:
out.append(stage)
table = out[-1].transform(table)
if out:
return PipelineModel(out)
def transform(self, table, *args, **kwargs):
'''
Execute the transformations in this pipeline only
Parameters
----------
table : data set
Any data set object supported by the transformers and
estimators in the pipeline stages
*args : positional parameters, optional
Any valid parameters to the transformers' ``transform`` method
**kwargs : keyword parameters, optional
Any valid keyword parameters to the transformers' ``transform`` method
Notes
-----
When the pipeline contains estimators, they typically just pass the
input table on to the next stage of the pipeline.
Examples
--------
Basic pipeline fit using imputers and an estimator:
>>> mean_imp = Imputer(Imputer.MEAN)
>>> mode_imp = Imputer(Imputer.MODE)
>>> dtree = DecisionTree(target='Origin',
... nominals=['Type', 'Cylinders', 'Origin'],
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'])
>>> pipe = Pipeline([mean_imp, mode_imp, dtree])
>>> new_table = pipe.transform(data)
Returns
-------
data set
The same type of data set as passed in `table`
'''
out = []
last_idx = len(self.stages) - 1
extra_params = list(self._extra_params)
extra_params.extend(args)
extra_params.append(kwargs)
for i, stage in enumerate(self.stages):
params = stage.get_filtered_params(*extra_params)
if isinstance(stage, BaseEstimator):
out.append(stage.fit(table, **params))
if i == last_idx:
break
else:
out.append(stage)
table = out[-1].transform(table)
return table
def __getitem__(self, idx):
return self.stages[idx]
@six.python_2_unicode_compatible
class PipelineModel(object):
'''
Trained model for a Pipeline
Notes
-----
This object is not instantiated directly. It is the result of
calling the ``fit`` method of the :class:`Pipeline` object.
Parameters
----------
stages : list of transformors / models
A list of the elements of the fitted Pipeline.
Returns
-------
:class:`PipelineModel`
'''
def __init__(self, stages):
self.stages = tosequence(stages)
def __str__(self):
return '%s([%s])' % (type(self).__name__,
', '.join(str(x) for x in self.stages))
def __repr__(self):
return str(self)
def score(self, table, **kwargs):
'''
Apply transformations and score the data using the trained model
Parameters
----------
table : data set
A data set that is of the same type as the training data set
Examples
--------
Basic pipeline model transform using imputers and an estimator:
>>> mean_imp = Imputer(Imputer.MEAN)
>>> mode_imp = Imputer(Imputer.MODE)
>>> dtree = DecisionTree(target='Origin',
... nominals=['Type', 'Cylinders', 'Origin'],
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'])
>>> pipe = Pipeline([mean_imp, mode_imp, dtree])
>>> model = pipe.fit(training_data)
>>> score = model.score(data)
Returns
-------
:class:`pandas.DataFrame`
'''
scores = []
names = {}
for i, stage in enumerate(self.stages):
if isinstance(stage, BaseModel):
scores.append(stage.score(table, **kwargs))
name = re.sub(r'Model$', '', type(stage).__name__)
if name in names:
names[name] += 1
name = '%s%s' % (name, names[name])
else:
names[name] = 0
scores[-1].name = name
table = stage.transform(table)
if scores:
if len(scores) == 1:
return scores[0]
return pd.DataFrame(scores)
def transform(self, table):
'''
Run the transforms in the trained pipeline
Parameters
----------
table : data set
A data set that is of the same type as the training data set
Examples
--------
Basic pipeline model transform using imputers and an estimator:
>>> mean_imp = Imputer(Imputer.MEAN)
>>> mode_imp = Imputer(Imputer.MODE)
>>> dtree = DecisionTree(target='Origin',
... nominals=['Type', 'Cylinders', 'Origin'],
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'])
>>> pipe = Pipeline([mean_imp, mode_imp, dtree])
>>> model = pipe.fit(training_data)
>>> new_table = model.transform(data)
Returns
-------
data set
A data set of the same type that was passed in `table`
'''
for stage in self.stages:
table = stage.transform(table)
return table
def __getitem__(self, idx):
return self.stages[idx]
def unload(self):
''' Unload model resources '''
for stage in self.stages:
if isinstance(stage, BaseModel):
stage.unload()
| [
"pandas.DataFrame",
"numpy.asarray"
] | [((1002, 1017), 'numpy.asarray', 'np.asarray', (['obj'], {}), '(obj)\n', (1012, 1017), True, 'import numpy as np\n'), ((9112, 9132), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {}), '(scores)\n', (9124, 9132), True, 'import pandas as pd\n')] |
import numpy as np
import imutils
import cv2
from itertools import combinations
def eucledian_distance(p1, p2):
return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe("./MobileNetSSD_deploy.prototxt.txt", "./MobileNetSSD_deploy.caffemodel")
print("[INFO] starting video stream...")
vs = cv2.VideoCapture('./vid/walking_distance1.mp4')
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
_, frame = vs.read()
frame = imutils.resize(frame, width=400)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
people_in_img = []
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > 0.6:
# extract the index of the class label from the
# `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object
idx = int(detections[0, 0, i, 1])
if CLASSES[idx] == 'person':
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
mid_x = endX - ((endX - startX) / 2)
mid_y = endY - ((endY - startY) / 2)
people_in_img.append([mid_x, mid_y, endY - startY])
# draw the prediction on the frame
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
for p1, p2 in combinations(people_in_img, 2):
dist = eucledian_distance(p1, p2)
if dist <= (p1[2] + p2[2]) / 2:
cv2.line(frame, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), (0, 0, 255))
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
| [
"cv2.rectangle",
"cv2.dnn.readNetFromCaffe",
"cv2.imshow",
"itertools.combinations",
"imutils.resize",
"cv2.putText",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.resize",
"cv2.waitKey",
"numpy.arange"
] | [((702, 804), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['"""./MobileNetSSD_deploy.prototxt.txt"""', '"""./MobileNetSSD_deploy.caffemodel"""'], {}), "('./MobileNetSSD_deploy.prototxt.txt',\n './MobileNetSSD_deploy.caffemodel')\n", (726, 804), False, 'import cv2\n'), ((848, 895), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""./vid/walking_distance1.mp4"""'], {}), "('./vid/walking_distance1.mp4')\n", (864, 895), False, 'import cv2\n'), ((3043, 3066), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3064, 3066), False, 'import cv2\n'), ((1089, 1121), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(400)'}), '(frame, width=400)\n', (1103, 1121), False, 'import imutils\n'), ((1514, 1547), 'numpy.arange', 'np.arange', (['(0)', 'detections.shape[2]'], {}), '(0, detections.shape[2])\n', (1523, 1547), True, 'import numpy as np\n'), ((2670, 2700), 'itertools.combinations', 'combinations', (['people_in_img', '(2)'], {}), '(people_in_img, 2)\n', (2682, 2700), False, 'from itertools import combinations\n'), ((2883, 2909), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (2893, 2909), False, 'import cv2\n'), ((1233, 1262), 'cv2.resize', 'cv2.resize', (['frame', '(300, 300)'], {}), '(frame, (300, 300))\n', (1243, 1262), False, 'import cv2\n'), ((2917, 2931), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2928, 2931), False, 'import cv2\n'), ((2404, 2472), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(startX, startY)', '(endX, endY)', 'COLORS[idx]', '(2)'], {}), '(frame, (startX, startY), (endX, endY), COLORS[idx], 2)\n', (2417, 2472), False, 'import cv2\n'), ((2552, 2641), 'cv2.putText', 'cv2.putText', (['frame', 'label', '(startX, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'COLORS[idx]', '(2)'], {}), '(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n COLORS[idx], 2)\n', (2563, 2641), False, 'import cv2\n'), ((2048, 2070), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (2056, 2070), True, 'import numpy as np\n')] |
# coding: utf-8
import numpy as np
import pandas as pd
from utils.split_data import split_data
from utils.write_logs import write_log
import re
class Prefix:
def __init__(self, app_name='', data_name='data.csv', target='',alert_level = 1):
df = pd.read_csv(data_name)
self.app_name = app_name
self.target = target
self.alert_level = alert_level
self.df = df[df['N_APPNAME'] == self.app_name]
self.datas = []
self.labels = []
def keyword(self, df, keyword, if_true=True):
pattern = re.compile('.*' + keyword + '.*')
if (pattern.match(df["N_SUMMARYCN"]) is not None) and (df['N_CUSTOMERSEVERITY'] == self.alert_level):
return if_true
else:
return not if_true
def sample(self, step=10, window_size=60, react_size=10, positive_range=120, min_log=5):
self.step = step * 60
self.window_size = window_size * 60
self.react_size = react_size * 60
self.positive_range = positive_range * 60
self.min_log = min_log
self.data_time = []
datas = []
labels = []
start_stamp = self.df['firsttimestamp'].min()
end_stamp = self.df['firsttimestamp'].max()
for i in range(start_stamp, (end_stamp - self.window_size - self.react_size - self.positive_range), self.step):
temp = self.df[(self.df['firsttimestamp'] >= i) & (self.df['firsttimestamp'] < (i + self.window_size))]
if temp.shape[0] < self.min_log:
continue
else:
if temp[(temp.apply(self.keyword, keyword=self.target, axis=1))].shape[0]:
temp = temp[(temp.apply(self.keyword, keyword=self.target, if_true=False, axis=1))]
tmp = temp['N_SUMMARYCN'].values
datas.append(list(tmp))
future = self.df[(self.df['firsttimestamp'] >= (i + self.window_size + self.react_size)) & (
self.df['firsttimestamp'] <= (
i + self.window_size + self.react_size + self.positive_range))]
self.data_time.append(i + self.window_size)
if future.shape[0]==0:
labels.append(0)
else:
if future[future.apply(self.keyword, keyword=self.target, axis=1)].shape[0]:
labels.append(1)
else:
labels.append(0)
self.datas = datas
self.labels = labels
print("---sample done---")
def split_data(self, split_percent=0.7):
split_timestamp = self.data_time[int(len(self.data_time) * split_percent)]
train_df = self.df[self.df['firsttimestamp'] < split_timestamp]
test_df = self.df[self.df['firsttimestamp'] >= split_timestamp]
self.train_alert_num = train_df[train_df.apply(self.keyword, keyword=self.target, axis=1)].shape[0]
self.test_alert_num = test_df[test_df.apply(self.keyword, keyword=self.target, axis=1)].shape[0]
train_data, train_label, test_data, test_label = split_data(self.datas, self.labels, split_percent)
train_label_num_1 = np.sum(np.array(train_label) == 1)
train_label_num_0 = np.sum(np.array(train_label) == 0)
test_label_num_1 = np.sum(np.array(test_label) == 1)
test_label_num_0 = np.sum(np.array(test_label) == 0)
logs = "\nAPPNAME:{}".format(self.app_name) + \
"\nalert to predict:{}".format(self.target) + \
"\ntraining={}".format(self.train_alert_num) + \
"\ntesting={}".format(self.test_alert_num) + \
"\nstep_size={}min".format(self.step//60) + \
"\nwindow_size={}h".format(self.window_size//3600) + \
"\nreact_size={}min".format(self.react_size//60) + \
"\npositive_range={}h".format(self.positive_range//3600) + \
"\nmin_log={}".format(self.min_log) + \
"\ntrain(+):{}".format(train_label_num_1) + \
"\ntrain(-):{}".format(train_label_num_0) + \
"\ntest(+):{}".format(test_label_num_1) + \
"\ntest(-):{}".format(test_label_num_0)
write_log(logs)
return train_data, train_label, test_data, test_label
| [
"utils.write_logs.write_log",
"pandas.read_csv",
"re.compile",
"utils.split_data.split_data",
"numpy.array"
] | [((264, 286), 'pandas.read_csv', 'pd.read_csv', (['data_name'], {}), '(data_name)\n', (275, 286), True, 'import pandas as pd\n'), ((561, 594), 're.compile', 're.compile', (["('.*' + keyword + '.*')"], {}), "('.*' + keyword + '.*')\n", (571, 594), False, 'import re\n'), ((3121, 3171), 'utils.split_data.split_data', 'split_data', (['self.datas', 'self.labels', 'split_percent'], {}), '(self.datas, self.labels, split_percent)\n', (3131, 3171), False, 'from utils.split_data import split_data\n'), ((4209, 4224), 'utils.write_logs.write_log', 'write_log', (['logs'], {}), '(logs)\n', (4218, 4224), False, 'from utils.write_logs import write_log\n'), ((3207, 3228), 'numpy.array', 'np.array', (['train_label'], {}), '(train_label)\n', (3215, 3228), True, 'import numpy as np\n'), ((3270, 3291), 'numpy.array', 'np.array', (['train_label'], {}), '(train_label)\n', (3278, 3291), True, 'import numpy as np\n'), ((3332, 3352), 'numpy.array', 'np.array', (['test_label'], {}), '(test_label)\n', (3340, 3352), True, 'import numpy as np\n'), ((3393, 3413), 'numpy.array', 'np.array', (['test_label'], {}), '(test_label)\n', (3401, 3413), True, 'import numpy as np\n')] |
'''
Based on
https://github.com/r9y9/deepvoice3_pytorch/blob/master/train.py
'''
import numpy as np
from numba import jit
@jit(nopython=True)
def guided_attention(N, max_N, T, max_T, g):
W = np.zeros((max_N, max_T), dtype=np.float32)
for n in range(N):
for t in range(T):
W[n, t] = 1 - np.exp(-(n / N - t / T)**2 / (2 * g * g))
return W
def guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2):
B = len(input_lengths)
max_input_len = input_lengths.max()
W = np.zeros((B, max_target_len, max_input_len), dtype=np.float32)
for b in range(B):
W[b] = guided_attention(input_lengths[b], max_input_len,
target_lengths[b], max_target_len, g).T
return W
| [
"numpy.exp",
"numpy.zeros",
"numba.jit"
] | [((125, 143), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (128, 143), False, 'from numba import jit\n'), ((197, 239), 'numpy.zeros', 'np.zeros', (['(max_N, max_T)'], {'dtype': 'np.float32'}), '((max_N, max_T), dtype=np.float32)\n', (205, 239), True, 'import numpy as np\n'), ((525, 587), 'numpy.zeros', 'np.zeros', (['(B, max_target_len, max_input_len)'], {'dtype': 'np.float32'}), '((B, max_target_len, max_input_len), dtype=np.float32)\n', (533, 587), True, 'import numpy as np\n'), ((316, 359), 'numpy.exp', 'np.exp', (['(-(n / N - t / T) ** 2 / (2 * g * g))'], {}), '(-(n / N - t / T) ** 2 / (2 * g * g))\n', (322, 359), True, 'import numpy as np\n')] |
# ref: https://yt-project.org/docs/dev/examining/generic_array_data.html
import yt
import h5py
import numpy as np
# user-specified parameters
file_in = 'Cube_x0.000-3.000_y0.000-3.000_z0.000-3.000_lv1.hdf5' # input filename
fields = [ 'Dens', ] # target field(s)
# load data
f = h5py.File( file_in, 'r' )
mode = f['Info']['OutputMode']
dimension = f['Info']['GridDimension']
time = f['Info']['Time']
dh = f['Info']['CellWidth']
box_size = f['Info']['SubdomainSize']
left_edge = f['Info']['SubdomainLeftEdge']
unit_l = ( f['Info']['Unit_L'], 'cm' )
unit_m = ( f['Info']['Unit_M'], 'g' )
unit_t = ( f['Info']['Unit_T'], 's' )
units = [ f['Data'][k].attrs['Unit'].decode('utf-8') for k in fields ]
data = { k:(f['Data'][k][()].transpose(),u) for k,u in zip(fields,units) }
# adjust the projected data since what gamer_extract_uniform computes is actually the
# **average** instead of **projected** values
# --> overwrite the subdomain coordinates as dh and adjust the left edge
# --> multiply data by the projection distance
if mode >= 4 and mode <= 6:
xyz = mode - 4
ncell_proj = box_size[xyz] / dh
left_edge[xyz] = left_edge[xyz] + 0.5*( box_size[xyz] - dh )
box_size [xyz] = dh
for k in fields: data[k][0][:,:,:] *= ncell_proj
bbox = np.array( [ [left_edge[0], left_edge[0]+box_size[0]],
[left_edge[1], left_edge[1]+box_size[1]],
[left_edge[2], left_edge[2]+box_size[2]] ] )
ds = yt.load_uniform_grid( data=data, domain_dimensions=dimension,
length_unit=unit_l, mass_unit=unit_m, time_unit=unit_t,
bbox=bbox, nprocs=1, sim_time=time,
periodicity=(False,False,False) )
# plot
plt = yt.SlicePlot( ds, 'z', fields, center='c' )
#plt = yt.ProjectionPlot( ds, 'z', fields, center='c', origin=('center','window') )
#plt.set_unit( fields[0], 'Msun/kpc**2' )
#plt.set_zlim( fields[0], 1.0e5, 1.0e8 )
plt.save()
| [
"numpy.array",
"yt.load_uniform_grid",
"yt.SlicePlot",
"h5py.File"
] | [((339, 362), 'h5py.File', 'h5py.File', (['file_in', '"""r"""'], {}), "(file_in, 'r')\n", (348, 362), False, 'import h5py\n'), ((1358, 1505), 'numpy.array', 'np.array', (['[[left_edge[0], left_edge[0] + box_size[0]], [left_edge[1], left_edge[1] +\n box_size[1]], [left_edge[2], left_edge[2] + box_size[2]]]'], {}), '([[left_edge[0], left_edge[0] + box_size[0]], [left_edge[1], \n left_edge[1] + box_size[1]], [left_edge[2], left_edge[2] + box_size[2]]])\n', (1366, 1505), True, 'import numpy as np\n'), ((1543, 1739), 'yt.load_uniform_grid', 'yt.load_uniform_grid', ([], {'data': 'data', 'domain_dimensions': 'dimension', 'length_unit': 'unit_l', 'mass_unit': 'unit_m', 'time_unit': 'unit_t', 'bbox': 'bbox', 'nprocs': '(1)', 'sim_time': 'time', 'periodicity': '(False, False, False)'}), '(data=data, domain_dimensions=dimension, length_unit=\n unit_l, mass_unit=unit_m, time_unit=unit_t, bbox=bbox, nprocs=1,\n sim_time=time, periodicity=(False, False, False))\n', (1563, 1739), False, 'import yt\n'), ((1827, 1868), 'yt.SlicePlot', 'yt.SlicePlot', (['ds', '"""z"""', 'fields'], {'center': '"""c"""'}), "(ds, 'z', fields, center='c')\n", (1839, 1868), False, 'import yt\n')] |
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
import numpy as np
from scipy.spatial.distance import squareform
from pm4py.algo.clustering.trace_attribute_driven.leven_dist import leven_dist_calc
from pm4py.algo.clustering.trace_attribute_driven.merge_log import merge_log
from pm4py.algo.clustering.trace_attribute_driven.dfg import dfg_dist
from pm4py.algo.clustering.trace_attribute_driven.variants import act_dist_calc
from pm4py.algo.clustering.trace_attribute_driven.variants import suc_dist_calc
def linkage_dfg_update(loglist, dist_mat, alpha, percent):
index_list = []
for i in range(len(dist_mat)):
for j in range(i + 1, len(dist_mat)):
index_list.append([i, j])
y = squareform(dist_mat)
n = len(dist_mat) # The number of observations.
Z = []
cluster_size = dict(zip(range(n), np.ones(n))) # record merged cluster size every step
k = 1
logsindex = list(range(len(loglist)))
while (k <= n - 2):
min_index = np.argmin(y)
# update Z
temp = []
temp.extend(index_list[min_index])
temp.append(y[min_index])
cluster_size[n - 1 + k] = cluster_size[temp[0]] + cluster_size[temp[1]]
temp.append(cluster_size[n - 1 + k])
Z.append(temp)
# get index of min in y
item = index_list[min_index][::]
record1 = []
record2 = []
for ele in index_list:
if item[0] in ele:
record1.append(index_list.index(ele))
inde = ele.index(item[0])
ele[inde] = n - 1 + k
if item[1] in ele: # here if/elif both works
record2.append(index_list.index(ele))
inde = ele.index(item[1])
ele[inde] = n - 1 + k
ele.sort()
record = list(set(record1).union(set(record2)))
merged1 = merge_log.update_merge([loglist[item[0]], loglist[item[1]]])
# here the logsindex is changing
diff = list(set(logsindex).difference(set(item))) # diff is the node number need to be updated
update_dist = dict()
for ele in diff:
(dist_act, dist_dfg) = dfg_dist.dfg_dist_calc(merged1, loglist[ele])
tempdist = dist_act * alpha + dist_dfg * (1 - alpha)
# tempdist = leven_dist_calc.leven_dist_avg(merged1, loglist[ele], percent, percent)
update_dist[ele] = tempdist
loglist.append(merged1)
diff.append(n - 1 + k)
logsindex = diff
del (record1[record1.index(min_index)])
del (record2[record2.index(min_index)])
# for i in range(len(record1)):
# y[record1[i]] = (y[record1[i]]*cluster_size[item[0]] + y[record2[i]]*cluster_size[item[1]]) / (cluster_size[item[0]]+cluster_size[item[1]])
for ele in record1:
uindex = index_list[ele][0] # record1 is the location if nodes in diff in the index_list
y[ele] = update_dist[uindex]
diff1 = list(set(range(len(index_list))).difference(set(record)))
newindex = record1 + diff1
newindex.sort()
range_newindex = range(len(newindex))
tempy = list(range_newindex)
templist = list(range_newindex)
for i in range_newindex:
tempy[i] = y[newindex[i]]
templist[i] = index_list[newindex[i]]
index_list = templist
y = tempy
k = k + 1
temp = []
temp.extend(index_list[0])
temp.append(y[0])
cluster_size[n - 1 + k] = cluster_size[temp[0]] + cluster_size[temp[1]]
temp.append(cluster_size[n - 1 + k])
Z.append(temp)
Z = np.array(Z)
return Z
def linkage_avg(loglist, dist_mat, alpha, percent):
index_list = []
cluster_size = []
for i in range(len(dist_mat)):
cluster_size.append(len(loglist[i]))
for j in range(i + 1, len(dist_mat)):
index_list.append([i, j])
y = squareform(dist_mat)
n = len(dist_mat) # The number of observations.
Z = []
cluster_size = dict(zip(range(n), cluster_size)) # record merged cluster size every step
k = 1
while (k <= n - 2):
min_index = np.argmin(y)
# update Z
temp = []
temp.extend(index_list[min_index])
temp.append(y[min_index])
cluster_size[n - 1 + k] = cluster_size[temp[0]] + cluster_size[temp[1]]
temp.append(cluster_size[n - 1 + k])
Z.append(temp)
# get index of min in y
item = index_list[min_index][::]
record1 = []
record2 = []
for ele in index_list:
if item[0] in ele:
record1.append(index_list.index(ele))
inde = ele.index(item[0])
ele[inde] = n - 1 + k
if item[1] in ele: # here if/elif both works
record2.append(index_list.index(ele))
inde = ele.index(item[1])
ele[inde] = n - 1 + k
ele.sort()
record = list(set(record1).union(set(record2)))
del (record1[record1.index(min_index)])
del (record2[record2.index(min_index)])
for i in range(len(record1)):
y[record1[i]] = (y[record1[i]] * cluster_size[item[0]] + y[record2[i]] * cluster_size[item[1]]) / (
cluster_size[item[0]] + cluster_size[item[1]])
# for ele in record1:
# uindex = index_list[ele][0] # record1 is the location if nodes in diff in the index_list
# y[ele] = update_dist[uindex]
diff1 = list(set(range(len(index_list))).difference(set(record)))
newindex = record1 + diff1
newindex.sort()
range_newindex = range(len(newindex))
tempy = list(range_newindex)
templist = list(range_newindex)
for i in range_newindex:
tempy[i] = y[newindex[i]]
templist[i] = index_list[newindex[i]]
index_list = templist
y = tempy
k = k + 1
temp = []
temp.extend(index_list[0])
temp.append(y[0])
cluster_size[n - 1 + k] = cluster_size[temp[0]] + cluster_size[temp[1]]
temp.append(cluster_size[n - 1 + k])
Z.append(temp)
Z = np.array(Z)
return Z
def linkage_DMM_update(loglist, dist_mat, alpha, percent):
index_list = []
for i in range(len(dist_mat)):
for j in range(i + 1, len(dist_mat)):
index_list.append([i, j])
y = squareform(dist_mat)
n = len(dist_mat) # The number of observations.
Z = []
cluster_size = dict(zip(range(n), np.ones(n))) # record merged cluster size every step
k = 1
logsindex = list(range(len(loglist)))
while (k <= n - 2):
min_index = np.argmin(y)
# update Z
temp = []
temp.extend(index_list[min_index])
temp.append(y[min_index])
cluster_size[n - 1 + k] = cluster_size[temp[0]] + cluster_size[temp[1]]
temp.append(cluster_size[n - 1 + k])
Z.append(temp)
# get index of min in y
item = index_list[min_index][::]
record1 = []
record2 = []
for ele in index_list:
if item[0] in ele:
record1.append(index_list.index(ele))
inde = ele.index(item[0])
ele[inde] = n - 1 + k
if item[1] in ele: # here if/elif both works
record2.append(index_list.index(ele))
inde = ele.index(item[1])
ele[inde] = n - 1 + k
ele.sort()
record = list(set(record1).union(set(record2)))
merged1 = merge_log.update_merge([loglist[item[0]], loglist[item[1]]])
# here the logsindex is changing
diff = list(set(logsindex).difference(set(item))) # diff is the node number need to be updated
update_dist = dict()
for ele in diff:
dist_act = act_dist_calc.act_sim_percent(merged1, loglist[ele], percent, percent)
dist_suc = suc_dist_calc.suc_sim_percent(merged1, loglist[ele], percent, percent)
tempdist = dist_act * alpha + dist_suc * (1 - alpha)
# tempdist = leven_dist_calc.leven_dist_avg(merged1, loglist[ele], percent, percent)
update_dist[ele] = tempdist
loglist.append(merged1)
diff.append(n - 1 + k)
logsindex = diff
del (record1[record1.index(min_index)])
del (record2[record2.index(min_index)])
# for i in range(len(record1)):
# y[record1[i]] = (y[record1[i]]*cluster_size[item[0]] + y[record2[i]]*cluster_size[item[1]]) / (cluster_size[item[0]]+cluster_size[item[1]])
for ele in record1:
uindex = index_list[ele][0] # record1 is the location if nodes in diff in the index_list
y[ele] = update_dist[uindex]
diff1 = list(set(range(len(index_list))).difference(set(record)))
newindex = record1 + diff1
newindex.sort()
range_newindex = range(len(newindex))
tempy = list(range_newindex)
templist = list(range_newindex)
for i in range_newindex:
tempy[i] = y[newindex[i]]
templist[i] = index_list[newindex[i]]
index_list = templist
y = tempy
k = k + 1
temp = []
temp.extend(index_list[0])
temp.append(y[0])
cluster_size[n - 1 + k] = cluster_size[temp[0]] + cluster_size[temp[1]]
temp.append(cluster_size[n - 1 + k])
Z.append(temp)
Z = np.array(Z)
return Z
def linkage_DMM_update_leven(loglist, dist_mat, alpha, percent):
index_list = []
for i in range(len(dist_mat)):
for j in range(i + 1, len(dist_mat)):
index_list.append([i, j])
y = squareform(dist_mat)
n = len(dist_mat) # The number of observations.
Z = []
cluster_size = dict(zip(range(n), np.ones(n))) # record merged cluster size every step
k = 1
logsindex = list(range(len(loglist)))
while (k <= n - 2):
min_index = np.argmin(y)
# update Z
temp = []
temp.extend(index_list[min_index])
temp.append(y[min_index])
cluster_size[n - 1 + k] = cluster_size[temp[0]] + cluster_size[temp[1]]
temp.append(cluster_size[n - 1 + k])
Z.append(temp)
# get index of min in y
item = index_list[min_index][::]
record1 = []
record2 = []
for ele in index_list:
if item[0] in ele:
record1.append(index_list.index(ele))
inde = ele.index(item[0])
ele[inde] = n - 1 + k
if item[1] in ele: # here if/elif both works
record2.append(index_list.index(ele))
inde = ele.index(item[1])
ele[inde] = n - 1 + k
ele.sort()
record = list(set(record1).union(set(record2)))
merged1 = merge_log.update_merge([loglist[item[0]], loglist[item[1]]])
# here the logsindex is changing
diff = list(set(logsindex).difference(set(item))) # diff is the node number need to be updated
update_dist = dict()
for ele in diff:
tempdist = leven_dist_calc.leven_dist(merged1, loglist[ele], percent, percent)
# tempdist = leven_dist_calc.leven_dist_avg(merged1, loglist[ele], percent, percent)
update_dist[ele] = tempdist
loglist.append(merged1)
diff.append(n - 1 + k)
logsindex = diff
del (record1[record1.index(min_index)])
del (record2[record2.index(min_index)])
# for i in range(len(record1)):
# y[record1[i]] = (y[record1[i]]*cluster_size[item[0]] + y[record2[i]]*cluster_size[item[1]]) / (cluster_size[item[0]]+cluster_size[item[1]])
for ele in record1:
uindex = index_list[ele][0] # record1 is the location if nodes in diff in the index_list
y[ele] = update_dist[uindex]
diff1 = list(set(range(len(index_list))).difference(set(record)))
newindex = record1 + diff1
newindex.sort()
range_newindex = range(len(newindex))
tempy = list(range_newindex)
templist = list(range_newindex)
for i in range_newindex:
tempy[i] = y[newindex[i]]
templist[i] = index_list[newindex[i]]
index_list = templist
y = tempy
k = k + 1
temp = []
temp.extend(index_list[0])
temp.append(y[0])
cluster_size[n - 1 + k] = cluster_size[temp[0]] + cluster_size[temp[1]]
temp.append(cluster_size[n - 1 + k])
Z.append(temp)
Z = np.array(Z)
return Z
| [
"pm4py.algo.clustering.trace_attribute_driven.merge_log.merge_log.update_merge",
"scipy.spatial.distance.squareform",
"numpy.ones",
"pm4py.algo.clustering.trace_attribute_driven.variants.act_dist_calc.act_sim_percent",
"pm4py.algo.clustering.trace_attribute_driven.leven_dist.leven_dist_calc.leven_dist",
"... | [((1383, 1403), 'scipy.spatial.distance.squareform', 'squareform', (['dist_mat'], {}), '(dist_mat)\n', (1393, 1403), False, 'from scipy.spatial.distance import squareform\n'), ((4291, 4302), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (4299, 4302), True, 'import numpy as np\n'), ((4586, 4606), 'scipy.spatial.distance.squareform', 'squareform', (['dist_mat'], {}), '(dist_mat)\n', (4596, 4606), False, 'from scipy.spatial.distance import squareform\n'), ((6835, 6846), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (6843, 6846), True, 'import numpy as np\n'), ((7070, 7090), 'scipy.spatial.distance.squareform', 'squareform', (['dist_mat'], {}), '(dist_mat)\n', (7080, 7090), False, 'from scipy.spatial.distance import squareform\n'), ((10085, 10096), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (10093, 10096), True, 'import numpy as np\n'), ((10326, 10346), 'scipy.spatial.distance.squareform', 'squareform', (['dist_mat'], {}), '(dist_mat)\n', (10336, 10346), False, 'from scipy.spatial.distance import squareform\n'), ((13179, 13190), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (13187, 13190), True, 'import numpy as np\n'), ((1656, 1668), 'numpy.argmin', 'np.argmin', (['y'], {}), '(y)\n', (1665, 1668), True, 'import numpy as np\n'), ((2536, 2596), 'pm4py.algo.clustering.trace_attribute_driven.merge_log.merge_log.update_merge', 'merge_log.update_merge', (['[loglist[item[0]], loglist[item[1]]]'], {}), '([loglist[item[0]], loglist[item[1]]])\n', (2558, 2596), False, 'from pm4py.algo.clustering.trace_attribute_driven.merge_log import merge_log\n'), ((4819, 4831), 'numpy.argmin', 'np.argmin', (['y'], {}), '(y)\n', (4828, 4831), True, 'import numpy as np\n'), ((7343, 7355), 'numpy.argmin', 'np.argmin', (['y'], {}), '(y)\n', (7352, 7355), True, 'import numpy as np\n'), ((8223, 8283), 'pm4py.algo.clustering.trace_attribute_driven.merge_log.merge_log.update_merge', 'merge_log.update_merge', (['[loglist[item[0]], loglist[item[1]]]'], {}), '([loglist[item[0]], loglist[item[1]]])\n', (8245, 8283), False, 'from pm4py.algo.clustering.trace_attribute_driven.merge_log import merge_log\n'), ((10599, 10611), 'numpy.argmin', 'np.argmin', (['y'], {}), '(y)\n', (10608, 10611), True, 'import numpy as np\n'), ((11479, 11539), 'pm4py.algo.clustering.trace_attribute_driven.merge_log.merge_log.update_merge', 'merge_log.update_merge', (['[loglist[item[0]], loglist[item[1]]]'], {}), '([loglist[item[0]], loglist[item[1]]])\n', (11501, 11539), False, 'from pm4py.algo.clustering.trace_attribute_driven.merge_log import merge_log\n'), ((1506, 1516), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1513, 1516), True, 'import numpy as np\n'), ((2832, 2877), 'pm4py.algo.clustering.trace_attribute_driven.dfg.dfg_dist.dfg_dist_calc', 'dfg_dist.dfg_dist_calc', (['merged1', 'loglist[ele]'], {}), '(merged1, loglist[ele])\n', (2854, 2877), False, 'from pm4py.algo.clustering.trace_attribute_driven.dfg import dfg_dist\n'), ((7193, 7203), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (7200, 7203), True, 'import numpy as np\n'), ((8507, 8577), 'pm4py.algo.clustering.trace_attribute_driven.variants.act_dist_calc.act_sim_percent', 'act_dist_calc.act_sim_percent', (['merged1', 'loglist[ele]', 'percent', 'percent'], {}), '(merged1, loglist[ele], percent, percent)\n', (8536, 8577), False, 'from pm4py.algo.clustering.trace_attribute_driven.variants import act_dist_calc\n'), ((8601, 8671), 'pm4py.algo.clustering.trace_attribute_driven.variants.suc_dist_calc.suc_sim_percent', 'suc_dist_calc.suc_sim_percent', (['merged1', 'loglist[ele]', 'percent', 'percent'], {}), '(merged1, loglist[ele], percent, percent)\n', (8630, 8671), False, 'from pm4py.algo.clustering.trace_attribute_driven.variants import suc_dist_calc\n'), ((10449, 10459), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (10456, 10459), True, 'import numpy as np\n'), ((11763, 11830), 'pm4py.algo.clustering.trace_attribute_driven.leven_dist.leven_dist_calc.leven_dist', 'leven_dist_calc.leven_dist', (['merged1', 'loglist[ele]', 'percent', 'percent'], {}), '(merged1, loglist[ele], percent, percent)\n', (11789, 11830), False, 'from pm4py.algo.clustering.trace_attribute_driven.leven_dist import leven_dist_calc\n')] |
import numpy as np
import torch
from tqdm import tqdm
from cmpnn.train.utils import predict
from cmpnn.data import MoleculeDataset
from cmpnn.utils import load_checkpoint
from cmpnn import config
from cmpnn.data.utils import load_data
def prediction(df):
try:
torch.cuda.set_device(config.gpu)
except:
print('no gpu')
num_tasks = df.shape[1] - 1
print('Loading data')
test_data = load_data(df)
print('Validating SMILES')
valid_indices = [i for i in range(len(test_data)) if test_data[i].mol is not None]
full_data = test_data
test_data = MoleculeDataset([test_data[i] for i in valid_indices])
print(f'Test size = {len(test_data):,}')
# Predict with each model individually and sum predictions
if config.dataset_type == 'multiclass':
sum_preds = np.zeros((len(test_data), num_tasks, config.multiclass_num_classes))
else:
sum_preds = np.zeros((len(test_data), num_tasks))
print(f'Predicting with an ensemble of {len(config.checkpoint_paths)} models')
for checkpoint_path in tqdm(config.checkpoint_paths, total=len(config.checkpoint_paths)):
# Load model
model = load_checkpoint(checkpoint_path)
model_preds = predict(
model=model,
data=test_data,
batch_size=config.batch_size,
)
sum_preds += np.array(model_preds)
avg_preds = sum_preds / len(config.checkpoint_paths)
avg_preds = avg_preds.tolist()
# Put Nones for invalid smiles
full_preds = [None] * len(full_data)
for i, si in enumerate(valid_indices):
full_preds[si] = avg_preds[i]
avg_preds = full_preds
avg_preds = np.array(avg_preds).reshape(-1)
return avg_preds
| [
"cmpnn.data.MoleculeDataset",
"numpy.array",
"cmpnn.utils.load_checkpoint",
"cmpnn.data.utils.load_data",
"torch.cuda.set_device",
"cmpnn.train.utils.predict"
] | [((418, 431), 'cmpnn.data.utils.load_data', 'load_data', (['df'], {}), '(df)\n', (427, 431), False, 'from cmpnn.data.utils import load_data\n'), ((592, 646), 'cmpnn.data.MoleculeDataset', 'MoleculeDataset', (['[test_data[i] for i in valid_indices]'], {}), '([test_data[i] for i in valid_indices])\n', (607, 646), False, 'from cmpnn.data import MoleculeDataset\n'), ((274, 307), 'torch.cuda.set_device', 'torch.cuda.set_device', (['config.gpu'], {}), '(config.gpu)\n', (295, 307), False, 'import torch\n'), ((1171, 1203), 'cmpnn.utils.load_checkpoint', 'load_checkpoint', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1186, 1203), False, 'from cmpnn.utils import load_checkpoint\n'), ((1226, 1292), 'cmpnn.train.utils.predict', 'predict', ([], {'model': 'model', 'data': 'test_data', 'batch_size': 'config.batch_size'}), '(model=model, data=test_data, batch_size=config.batch_size)\n', (1233, 1292), False, 'from cmpnn.train.utils import predict\n'), ((1361, 1382), 'numpy.array', 'np.array', (['model_preds'], {}), '(model_preds)\n', (1369, 1382), True, 'import numpy as np\n'), ((1682, 1701), 'numpy.array', 'np.array', (['avg_preds'], {}), '(avg_preds)\n', (1690, 1701), True, 'import numpy as np\n')] |
import numpy as np
def _padding_array(seqs, from_post, max_len, default):
seqs_len = len(seqs)
if seqs_len == max_len:
return seqs
if seqs_len > max_len:
if from_post:
return seqs[0:max_len]
start = seqs_len - max_len
return seqs[start:]
append_times = max_len - seqs_len
if from_post:
list_to_be_append = seqs
else:
list_to_be_append = seqs[::-1]
for _ in range(append_times):
list_to_be_append.append(default)
if from_post:
return list_to_be_append
return list_to_be_append[::-1]
def generate_padding_array(seqs, transform_func, default, max_len, inverse=False):
transformed_seqs = []
for seq in seqs:
if inverse:
seq = seq[::-1]
transformed_seqs.append(_padding_array(
transform_func(seq[::-1]), False, max_len, default,
))
return np.array(transformed_seqs) | [
"numpy.array"
] | [((916, 942), 'numpy.array', 'np.array', (['transformed_seqs'], {}), '(transformed_seqs)\n', (924, 942), True, 'import numpy as np\n')] |
#!/usr/bin/python3
from picamera.array import PiRGBArray
from picamera import PiCamera
import numpy as np
import cv2
import RPi.GPIO as GPIO
import time
# Config
PIN_PAIR = 27
PIN_SERVO_PAIR = 12
PIN_SERVO_POKEBALL = 18
REPAIR_IDLE_TIME = 60 * 7
REPAIR_DELAY_TIME = 30
MINIMUM_PIXELS = 10000
RESOLUTION = (640, 480)
def setup_servo(pin):
GPIO.setup(pin, GPIO.OUT)
servo = GPIO.PWM(pin, 50)
servo.start(0.1)
time.sleep(0.2)
servo.ChangeDutyCycle(0)
return servo
def trigger_servo(servo):
# Turn servo on
servo.ChangeDutyCycle(0.1)
time.sleep(0.1)
# Activate the button
servo.ChangeDutyCycle(3.5)
time.sleep(0.2)
# Go back to the off position
servo.ChangeDutyCycle(0.1)
time.sleep(0.1)
# Turn servo off
servo.ChangeDutyCycle(0)
# GPIO Setup
GPIO.setwarnings(False) # Do not tell anyone
GPIO.setmode(GPIO.BCM)
# Servo's
pairServo = setup_servo(PIN_SERVO_PAIR)
pokeballServo = setup_servo(PIN_SERVO_POKEBALL)
# Pair button
GPIO.setup(PIN_PAIR, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Camera
camera = PiCamera()
camera.resolution = RESOLUTION
camera.framerate = 6
rawCapture = PiRGBArray(camera, size=RESOLUTION)
time.sleep(1) # Camera needs some time for itself
# Image transformation
blueLower = np.array([10, 70, 70])
blueUpper = np.array([30, 255, 255])
greenLower = np.array([35, 70, 70])
greenUpper = np.array([70, 255, 255])
lastInteraction = time.time() - 1000000
for rgbFrame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print("Quit")
break
hsvFrame = cv2.cvtColor(rgbFrame.array, cv2.COLOR_RGB2HSV)
# Pokestop
blueMask = cv2.inRange(hsvFrame, blueLower, blueUpper)
blueCount = cv2.countNonZero(blueMask)
# Pokemon
greenMask = cv2.inRange(hsvFrame, greenLower, greenUpper)
greenCount = cv2.countNonZero(greenMask)
# res = cv2.bitwise_and(rgbFrame.array, rgbFrame.array, mask = blueMask)
# cv2.imshow('rgbFrame', rgbFrame.array)
# cv2.imshow('blueMask', blueMask)
# cv2.imshow('greenMask', greenMask)
# cv2.imshow('res', res)
rawCapture.truncate(0)
if not GPIO.input(PIN_PAIR):
print("Button Pressed")
trigger_servo(pairServo)
trigger_servo(pokeballServo)
time.sleep(1)
elif blueCount > MINIMUM_PIXELS and blueCount > greenCount:
lastInteraction = time.time()
time.sleep(2)
elif greenCount > MINIMUM_PIXELS:
lastInteraction = time.time()
trigger_servo(pokeballServo)
# Sleep so we do not keep hitting the button
time.sleep(3)
elif lastInteraction < time.time() - REPAIR_IDLE_TIME:
trigger_servo(pairServo)
trigger_servo(pokeballServo)
lastInteraction = time.time() - REPAIR_IDLE_TIME - REPAIR_DELAY_TIME
time.sleep(5)
pokeballServo.stop()
pairServo.stop()
GPIO.cleanup()
cv2.destroyAllWindows()
| [
"RPi.GPIO.cleanup",
"cv2.countNonZero",
"RPi.GPIO.setup",
"cv2.inRange",
"RPi.GPIO.setwarnings",
"picamera.PiCamera",
"time.sleep",
"RPi.GPIO.PWM",
"numpy.array",
"cv2.waitKey",
"cv2.destroyAllWindows",
"RPi.GPIO.input",
"cv2.cvtColor",
"picamera.array.PiRGBArray",
"time.time",
"RPi.GP... | [((816, 839), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (832, 839), True, 'import RPi.GPIO as GPIO\n'), ((862, 884), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (874, 884), True, 'import RPi.GPIO as GPIO\n'), ((999, 1054), 'RPi.GPIO.setup', 'GPIO.setup', (['PIN_PAIR', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(PIN_PAIR, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (1009, 1054), True, 'import RPi.GPIO as GPIO\n'), ((1074, 1084), 'picamera.PiCamera', 'PiCamera', ([], {}), '()\n', (1082, 1084), False, 'from picamera import PiCamera\n'), ((1150, 1185), 'picamera.array.PiRGBArray', 'PiRGBArray', (['camera'], {'size': 'RESOLUTION'}), '(camera, size=RESOLUTION)\n', (1160, 1185), False, 'from picamera.array import PiRGBArray\n'), ((1186, 1199), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1196, 1199), False, 'import time\n'), ((1273, 1295), 'numpy.array', 'np.array', (['[10, 70, 70]'], {}), '([10, 70, 70])\n', (1281, 1295), True, 'import numpy as np\n'), ((1308, 1332), 'numpy.array', 'np.array', (['[30, 255, 255]'], {}), '([30, 255, 255])\n', (1316, 1332), True, 'import numpy as np\n'), ((1346, 1368), 'numpy.array', 'np.array', (['[35, 70, 70]'], {}), '([35, 70, 70])\n', (1354, 1368), True, 'import numpy as np\n'), ((1382, 1406), 'numpy.array', 'np.array', (['[70, 255, 255]'], {}), '([70, 255, 255])\n', (1390, 1406), True, 'import numpy as np\n'), ((2935, 2949), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (2947, 2949), True, 'import RPi.GPIO as GPIO\n'), ((2950, 2973), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2971, 2973), False, 'import cv2\n'), ((347, 372), 'RPi.GPIO.setup', 'GPIO.setup', (['pin', 'GPIO.OUT'], {}), '(pin, GPIO.OUT)\n', (357, 372), True, 'import RPi.GPIO as GPIO\n'), ((385, 402), 'RPi.GPIO.PWM', 'GPIO.PWM', (['pin', '(50)'], {}), '(pin, 50)\n', (393, 402), True, 'import RPi.GPIO as GPIO\n'), ((428, 443), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (438, 443), False, 'import time\n'), ((573, 588), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (583, 588), False, 'import time\n'), ((650, 665), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (660, 665), False, 'import time\n'), ((735, 750), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (745, 750), False, 'import time\n'), ((1426, 1437), 'time.time', 'time.time', ([], {}), '()\n', (1435, 1437), False, 'import time\n'), ((1647, 1694), 'cv2.cvtColor', 'cv2.cvtColor', (['rgbFrame.array', 'cv2.COLOR_RGB2HSV'], {}), '(rgbFrame.array, cv2.COLOR_RGB2HSV)\n', (1659, 1694), False, 'import cv2\n'), ((1726, 1769), 'cv2.inRange', 'cv2.inRange', (['hsvFrame', 'blueLower', 'blueUpper'], {}), '(hsvFrame, blueLower, blueUpper)\n', (1737, 1769), False, 'import cv2\n'), ((1786, 1812), 'cv2.countNonZero', 'cv2.countNonZero', (['blueMask'], {}), '(blueMask)\n', (1802, 1812), False, 'import cv2\n'), ((1844, 1889), 'cv2.inRange', 'cv2.inRange', (['hsvFrame', 'greenLower', 'greenUpper'], {}), '(hsvFrame, greenLower, greenUpper)\n', (1855, 1889), False, 'import cv2\n'), ((1907, 1934), 'cv2.countNonZero', 'cv2.countNonZero', (['greenMask'], {}), '(greenMask)\n', (1923, 1934), False, 'import cv2\n'), ((1549, 1563), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1560, 1563), False, 'import cv2\n'), ((2207, 2227), 'RPi.GPIO.input', 'GPIO.input', (['PIN_PAIR'], {}), '(PIN_PAIR)\n', (2217, 2227), True, 'import RPi.GPIO as GPIO\n'), ((2339, 2352), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2349, 2352), False, 'import time\n'), ((2444, 2455), 'time.time', 'time.time', ([], {}), '()\n', (2453, 2455), False, 'import time\n'), ((2464, 2477), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2474, 2477), False, 'import time\n'), ((2543, 2554), 'time.time', 'time.time', ([], {}), '()\n', (2552, 2554), False, 'import time\n'), ((2653, 2666), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2663, 2666), False, 'import time\n'), ((2882, 2895), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2892, 2895), False, 'import time\n'), ((2695, 2706), 'time.time', 'time.time', ([], {}), '()\n', (2704, 2706), False, 'import time\n'), ((2823, 2834), 'time.time', 'time.time', ([], {}), '()\n', (2832, 2834), False, 'import time\n')] |
import math
import sys
import typing
import numpy as np
from analysis import plot_common
from analysis import plot_constants
from analysis import video_analysis
from analysis import video_data
def gnuplot_aircraft_yaw(stream: typing.TextIO=sys.stdout) -> typing.List[str]:
result = [
'# "{}"'.format('Estimated yaw of aircraft.')
]
notes = [
'time yaw(degrees)'
]
if len(notes):
stream.write('#\n')
stream.write('# Notes:\n')
for note in notes:
stream.write('# {}\n'.format(note))
gs_fit = video_analysis.ground_speed_curve_fit(video_data.ErrorDirection.MID)
# time_dist_brng = video_analysis.observer_time_distance_bearing(gs_fit, video_data.ErrorDirection.MID)
time_dist_brng = video_analysis.observer_time_distance_bearing_from_wing_tips(gs_fit, video_data.ErrorDirection.MID)
# time_dist_brng = video_analysis.time_distance_bearing_from_fits(time_interval=1.0)
# time_dist_brng is a four column array of (time, x_distance, aspect, aspect_error)
# from the observed aspect data.
# Units are (seconds, metres, degrees).
time_yaw = []
# ((x_mean, x_std), (y_mean, y_std)) = video_analysis.observer_position_mean_std_from_aspects(
# baseline=plot_constants.OBSERVER_XY_MINIMUM_BASELINE,
# ignore_first_n=plot_constants.OBSERVER_XY_IGNORE_N_FIRST_BEARINGS,
# t_range=plot_constants.OBSERVER_XY_TIME_RANGE,
# )
((x_mean, x_std), (y_mean, y_std)) = video_analysis.observer_position_mean_std_from_full_transits()
observer_error = math.sqrt(x_std**2 + y_std**2)
# Observer from bearings : x=3480.0 y=-763.0
# Observer from google earth: x=3457.9 y=-655.5
# Diff (bearings - ge): x= 22.1 y=-107.5
# x_mean = 3457.9 - 1214
# y_mean = -655.5
# y_mean = -744.88
# y_mean = -754.88
# y_mean = -764.88
for t, x_distance, aspect, aspect_error in time_dist_brng:
# Calculate the bearing from the observers assumed position to the aircraft position
x_obs_aircraft = x_mean - x_distance - plot_common.x_offset()
obs_brng = math.degrees(
math.atan2(y_mean, x_obs_aircraft)
)
obs_brng %= 360
yaw = (obs_brng - aspect) % 360
if yaw > 180.0:
yaw -= 360
# Compute error as the angle:
# 2.0 * atan(OBSERVER_ASSUMED_POSITION_ERROR / observer-to_aircraft_distance)
obs_aircraft_distance = math.sqrt(y_mean**2 + x_obs_aircraft**2)
error = 2.0 * math.degrees(math.atan(observer_error / obs_aircraft_distance))
error += aspect_error
time_yaw.append((t, yaw, yaw - error, yaw + error))
# print('TRACE: t={:8.1f} yaw={:6.3f}),'.format(t, yaw))
# print('TRACE: time_yaw:')
# pprint.pprint(time_yaw)
plot_common.gnuplot_write_arrays(stream, np.array(time_yaw))
return ['']
def gnuplot_aircraft_yaw_plt() -> str:
return """# set logscale x
set colorsequence classic
set grid
set title "Aircraft Deviation from Runway Heading{computed_data}"
set xlabel "Video Time (s)"
#set xtics 2100,100,2400
set xtics autofreq
#set xrange [2000:2500]
#set format x ""
# set logscale y
set ylabel "Deviation (degrees, +ve right, -ve left)"
set yrange [5:-5]
#set yrange [-600:-900] reverse
set ytics 1
# set mytics 0.5
# set ytics autofreq
# set ytics 8,35,3
# set logscale y2
# set y2label "Bytes"
# set y2range [1:1e9]
# set y2tics
set pointsize 2
set datafile separator whitespace#" "
set datafile missing "NaN"
# set key off
set terminal svg size 800,600
set output "{file_name}.svg"
# Nose wheel off at around 00:17:27 i.e. 17.9s
set arrow from 17.9,-3.5 to 17.9,-0.8 lw 2 lc rgb "black"
set label 3 "Nose wheel off" at 17.9,-4 font ",12" center
# Main gear off at around 00:25:19 i.e. 25.63
set arrow from 25.6,-3.5 to 25.6,-0.5 lw 2 lc rgb "black"
set label 4 "Main wheels off" at 25.6,-4 font ",12" center
# linespoints ps 1.25
plot "{file_name}.dat" using 1:2:3:4 title "Estimated" w yerrorbars ps 1.25, \\
"{file_name}.dat" using 1:2 title "Fit of estimate" w lines lw 2 smooth bezier# csplines#bezier
reset
""" | [
"analysis.plot_common.x_offset",
"analysis.video_analysis.ground_speed_curve_fit",
"math.sqrt",
"analysis.video_analysis.observer_position_mean_std_from_full_transits",
"numpy.array",
"math.atan2",
"math.atan",
"analysis.video_analysis.observer_time_distance_bearing_from_wing_tips"
] | [((570, 638), 'analysis.video_analysis.ground_speed_curve_fit', 'video_analysis.ground_speed_curve_fit', (['video_data.ErrorDirection.MID'], {}), '(video_data.ErrorDirection.MID)\n', (607, 638), False, 'from analysis import video_analysis\n'), ((768, 871), 'analysis.video_analysis.observer_time_distance_bearing_from_wing_tips', 'video_analysis.observer_time_distance_bearing_from_wing_tips', (['gs_fit', 'video_data.ErrorDirection.MID'], {}), '(gs_fit,\n video_data.ErrorDirection.MID)\n', (828, 871), False, 'from analysis import video_analysis\n'), ((1492, 1554), 'analysis.video_analysis.observer_position_mean_std_from_full_transits', 'video_analysis.observer_position_mean_std_from_full_transits', ([], {}), '()\n', (1552, 1554), False, 'from analysis import video_analysis\n'), ((1576, 1610), 'math.sqrt', 'math.sqrt', (['(x_std ** 2 + y_std ** 2)'], {}), '(x_std ** 2 + y_std ** 2)\n', (1585, 1610), False, 'import math\n'), ((2466, 2510), 'math.sqrt', 'math.sqrt', (['(y_mean ** 2 + x_obs_aircraft ** 2)'], {}), '(y_mean ** 2 + x_obs_aircraft ** 2)\n', (2475, 2510), False, 'import math\n'), ((2855, 2873), 'numpy.array', 'np.array', (['time_yaw'], {}), '(time_yaw)\n', (2863, 2873), True, 'import numpy as np\n'), ((2086, 2108), 'analysis.plot_common.x_offset', 'plot_common.x_offset', ([], {}), '()\n', (2106, 2108), False, 'from analysis import plot_common\n'), ((2154, 2188), 'math.atan2', 'math.atan2', (['y_mean', 'x_obs_aircraft'], {}), '(y_mean, x_obs_aircraft)\n', (2164, 2188), False, 'import math\n'), ((2542, 2591), 'math.atan', 'math.atan', (['(observer_error / obs_aircraft_distance)'], {}), '(observer_error / obs_aircraft_distance)\n', (2551, 2591), False, 'import math\n')] |
#! /usr/bin/env python3
from analysis.experiment import ExperimentStats, read_experiment_stats
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import argparse
from typing import List
''' viz_cio_set_algo.py
Script to compare phase of the CIO-Set algorithm.
Output is a simple stacked bar plot showing the duration of the CIO-Set algorithm.
Example:
$ ./vis_cio_set_algo.py ./madbench2-shared-16 ./madbench2-shared-64 --xlabels madbench-16 madbench-64 -o madbench-out.png
'''
class AlgorithmStats:
graph_construction_times_: List[float]
phase_construction_times_: List[float]
merge_times_: List[float]
len_: int
def __init__(self, exp_stats: List[ExperimentStats]) -> None:
self.len_ = len(exp_stats)
self.graph_construction_times_ = [st.graph_stats.build_time / 1000. for st in exp_stats]
self.phase_construction_times_ = [st.pio_stats.build_time / 1000. for st in exp_stats]
self.merge_times_ = [st.cio_stats.build_time / 1000. for st in exp_stats]
@property
def graph_times(self) -> List[float]:
return self.graph_construction_times_
@property
def phase_times(self) -> List[float]:
return self.phase_construction_times_
@property
def merge_times(self) -> List[float]:
return self.merge_times_
@property
def len(self) -> int:
return self.len_
def plot_algorithm_stats(algo_stats: AlgorithmStats, xticks: List[str], filename: str):
# Size adjustments, for better pictures in the paper.
# 3.5 inch per column
# 7.15 inches per textwidth
size_mult = 4
# width in inches
# fig_width = 4.1
fig_width = 3.5 * size_mult
# height in inches
# fig_height = 1.8
fig_height = 1.5 * size_mult
# font size
fig_font_size = 6 * size_mult
# set canvas size
plt.rcParams['figure.figsize'] = fig_width, fig_height
# set font size and style
matplotlib.rcParams.update({'font.size': fig_font_size})
matplotlib.rcParams.update({'axes.linewidth': 0.5})
ind = np.arange(algo_stats.len)
width = 0.35
fig, ax = plt.subplots()
gc = ax.bar(ind, algo_stats.graph_times, width, color='g')
spp = ax.bar(ind, algo_stats.phase_times, width, bottom=algo_stats.graph_times, color='b')
mg = ax.bar(ind, algo_stats.merge_times, width,
bottom=np.array(algo_stats.graph_times) + np.array(algo_stats.phase_times),
color='r')
totals = [g + p + m for g, p, m in zip(algo_stats.graph_times, algo_stats.phase_times, algo_stats.merge_times)]
def autolabel(rects, div=2):
for i, rect in enumerate(rects):
total = totals[i]
height = rect.get_height()
ax.text(rect.get_x() + (rect.get_width() / 4.), rect.get_y() * 1.08,
'{} ms'.format(int(total)))
autolabel(mg)
plt.xticks(ind, xticks)
ax.set_ylim(0, 230000)
plt.ylabel('Time in ms')
plt.xlabel('Trace configuration')
plt.legend(('Graph construction', 'Phase identification', 'Definition of global CIO-Sets'))
plt.savefig(filename, dpi=300, format='png', bbox_inches='tight')
def main(args) -> None:
exp_stats = [read_experiment_stats(path) for path in args.experiment_paths]
algo_stats = AlgorithmStats(exp_stats)
xticks = []
if not args.xlabels:
xticks = [st.tracefile for st in exp_stats]
else:
xticks = args.xlabels
plot_algorithm_stats(algo_stats, xticks, args.output)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'experiment_paths',
nargs='+',
type=str,
help='Paths of exepriments to compare.',
)
parser.add_argument(
'--xlabels',
nargs='*',
type=str,
help='List of xticks.',
)
parser.add_argument(
'-o', '--output',
type=str,
default='cio_set_algo.png',
help='Filename for the resulting png.',
)
args = parser.parse_args()
main(args)
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.rcParams.update",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"argparse.ArgumentParser",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.arange",
"analysis.experiment.read_experiment_... | [((1939, 1995), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': fig_font_size}"], {}), "({'font.size': fig_font_size})\n", (1965, 1995), False, 'import matplotlib\n'), ((2000, 2051), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'axes.linewidth': 0.5}"], {}), "({'axes.linewidth': 0.5})\n", (2026, 2051), False, 'import matplotlib\n'), ((2063, 2088), 'numpy.arange', 'np.arange', (['algo_stats.len'], {}), '(algo_stats.len)\n', (2072, 2088), True, 'import numpy as np\n'), ((2120, 2134), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2132, 2134), True, 'import matplotlib.pyplot as plt\n'), ((2869, 2892), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ind', 'xticks'], {}), '(ind, xticks)\n', (2879, 2892), True, 'import matplotlib.pyplot as plt\n'), ((2924, 2948), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time in ms"""'], {}), "('Time in ms')\n", (2934, 2948), True, 'import matplotlib.pyplot as plt\n'), ((2953, 2986), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Trace configuration"""'], {}), "('Trace configuration')\n", (2963, 2986), True, 'import matplotlib.pyplot as plt\n'), ((2991, 3086), 'matplotlib.pyplot.legend', 'plt.legend', (["('Graph construction', 'Phase identification', 'Definition of global CIO-Sets')"], {}), "(('Graph construction', 'Phase identification',\n 'Definition of global CIO-Sets'))\n", (3001, 3086), True, 'import matplotlib.pyplot as plt\n'), ((3087, 3152), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(300)', 'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(filename, dpi=300, format='png', bbox_inches='tight')\n", (3098, 3152), True, 'import matplotlib.pyplot as plt\n'), ((3537, 3562), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3560, 3562), False, 'import argparse\n'), ((3197, 3224), 'analysis.experiment.read_experiment_stats', 'read_experiment_stats', (['path'], {}), '(path)\n', (3218, 3224), False, 'from analysis.experiment import ExperimentStats, read_experiment_stats\n'), ((2365, 2397), 'numpy.array', 'np.array', (['algo_stats.graph_times'], {}), '(algo_stats.graph_times)\n', (2373, 2397), True, 'import numpy as np\n'), ((2400, 2432), 'numpy.array', 'np.array', (['algo_stats.phase_times'], {}), '(algo_stats.phase_times)\n', (2408, 2432), True, 'import numpy as np\n')] |
# !/usr/bin/env python
# coding=utf-8
# @Time : 2019-07-02 21:00
# @Author : <EMAIL>
# @File : io_utils.py
import sys
import collections
import csv
import json
import logging
import os.path
import pickle
import random
import tarfile
import zipfile
from six.moves import urllib
import requests
import h5py
import numpy as np
import pandas as pd
from pandas.errors import ParserError
import tensorflow as tf
from aispace.utils.file_utils import default_download_dir, maybe_create_dir
logger = logging.getLogger(__name__)
__all__ = [
"load_csv",
"read_csv",
"save_csv",
"save_json",
"load_json",
"load_hdf5",
"save_hdf5",
"load_object",
"save_object",
"load_vocab",
"load_array",
"load_from_file",
"load_glove",
"load_matrix",
"load_pretrained_embeddings",
"maybe_download",
"save_array",
]
def load_csv(data_fp):
data = []
with open(data_fp, 'rb') as f:
data = list(csv.reader(f))
return data
def read_csv(data_fp, header=0):
"""
Helper method to read a csv file. Wraps around pd.read_csv to handle some
exceptions. Can extend to cover cases as necessary
:param data_fp: path to the csv file
:return: Pandas dataframe with the data
"""
try:
df = pd.read_csv(data_fp, header=header)
except ParserError:
logging.WARNING(r'Failed to parse the CSV with pandas default way, trying \ as escape character.')
df = pd.read_csv(data_fp, header=header, escapechar='\\')
return df
def save_csv(data_fp, data):
writer = csv.writer(open(data_fp, 'w'))
for row in data:
if not isinstance(row, collections.Iterable) or isinstance(row, str):
row = [row]
writer.writerow(row)
def load_json(data_fp):
data = {}
with open(data_fp, 'r', encoding="utf-8") as input_file:
data = json.load(input_file)
return data
def save_json(data_fp, data, sort_keys=True, indent=4, mode='w'):
with open(data_fp, mode, encoding='utf8') as output_file:
json.dump(data, output_file, cls=NumpyEncoder, sort_keys=sort_keys, indent=indent, ensure_ascii=False)
def json_dumps(data):
return json.dumps(data, ensure_ascii=False, cls=NumpyEncoder)
# to be tested
# also, when loading an hdf5 file
# most of the times you don't want
# to put everything in memory
# like this function does
# it's jsut for convenience for relatively small datasets
def load_hdf5(data_fp):
data = {}
with h5py.File(data_fp, 'r') as h5_file:
for key in h5_file.keys():
data[key] = h5_file[key].value
return data
# def save_hdf5(data_fp: str, data: Dict[str, object]):
def save_hdf5(data_fp, data, metadata=None):
if metadata is None:
metadata = {}
mode = 'w'
if os.path.isfile(data_fp):
mode = 'r+'
with h5py.File(data_fp, mode) as h5_file:
for key, value in data.items():
dataset = h5_file.create_dataset(key, data=value)
if key in metadata:
if 'in_memory' in metadata[key]:
if metadata[key]['in_memory']:
dataset.attrs['in_memory'] = True
else:
dataset.attrs['in_memory'] = False
def load_object(object_fp):
with open(object_fp, 'rb') as f:
return pickle.load(f)
def save_object(object_fp, obj):
with open(object_fp, 'wb') as f:
pickle.dump(obj, f)
def load_array(data_fp, dtype=float):
list_num = []
with open(data_fp, 'r') as input_file:
for x in input_file:
list_num.append(dtype(x.strip()))
return np.array(list_num)
def load_matrix(data_fp, dtype=float):
list_num = []
with open(data_fp, 'r') as input_file:
for row in input_file:
list_num.append([dtype(elem) for elem in row.strip().split()])
return np.squeeze(np.array(list_num))
def save_array(data_fp, array):
with open(data_fp, 'w') as output_file:
for x in np.nditer(array):
output_file.write(str(x) + '\n')
def load_pretrained_embeddings(embeddings_path, vocab):
embeddings = load_glove(embeddings_path)
# find out the size of the embeddings
embeddings_size = len(next(iter(embeddings.values())))
# calculate an average embedding, to use for initializing missing words
avg_embedding = np.zeros(embeddings_size)
count = 0
for word in vocab:
if word in embeddings:
avg_embedding += embeddings[word]
count += 1
if count > 0:
avg_embedding /= count
# create the embedding matrix
embeddings_vectors = []
for word in vocab:
if word in embeddings:
embeddings_vectors.append(embeddings[word])
else:
embeddings_vectors.append(
avg_embedding + np.random.uniform(-0.01, 0.01, embeddings_size))
embeddings_matrix = np.stack(embeddings_vectors)
# let's help the garbage collector free some memory
embeddings = None
return embeddings_matrix
def load_glove(file_path):
logging.info(' Loading Glove format file {}'.format(file_path))
embeddings = {}
with open(file_path, 'r') as f:
for line in f:
if line:
split = line.split()
word = split[0]
embedding = np.array([float(val) for val in split[1:]])
embeddings[word] = embedding
logging.info(' {0} embeddings loaded'.format(len(embeddings)))
return embeddings
def split_data(split, data):
# type: (float, list) -> (list, list)
split_length = int(round(split * len(data)))
random.shuffle(data)
return data[:split_length], data[split_length:]
def shuffle_unison_inplace(list_of_lists, random_state=None):
if list_of_lists:
assert all(len(l) == len(list_of_lists[0]) for l in list_of_lists)
if random_state is not None:
random_state.permutation(len(list_of_lists[0]))
else:
p = np.random.permutation(len(list_of_lists[0]))
return [l[p] for l in list_of_lists]
return None
def shuffle_dict_unison_inplace(np_dict, random_state=None):
keys = list(np_dict.keys())
list_of_lists = list(np_dict.values())
# shuffle up the list of lists according to previous fct
shuffled_list = shuffle_unison_inplace(list_of_lists, random_state)
recon = {}
for ii in range(len(keys)):
dkey = keys[ii]
recon[dkey] = shuffled_list[ii]
# we've shuffled the dictionary in place!
return recon
def shuffle_inplace(np_dict):
if len(np_dict) == 0:
return
size = np_dict[next(iter(np_dict))].shape[0]
for k in np_dict:
if np_dict[k].shape[0] != size:
raise ValueError(
'Invalid: dictionary contains variable length arrays')
p = np.random.permutation(size)
for k in np_dict:
np_dict[k] = np_dict[k][p]
def split_dataset_tvt(dataset, split):
if 'split' in dataset:
del dataset['split']
training_set = split_dataset(dataset, split, value_to_split=0)
validation_set = split_dataset(dataset, split, value_to_split=1)
test_set = split_dataset(dataset, split, value_to_split=2)
return training_set, test_set, validation_set
def split_dataset(dataset, split, value_to_split=0):
splitted_dataset = {}
for key in dataset:
splitted_dataset[key] = dataset[key][split == value_to_split]
return splitted_dataset
def collapse_rare_labels(labels, labels_limit):
if labels_limit > 0:
labels[labels >= labels_limit] = labels_limit
return labels
def class_counts(dataset, labels_field):
return np.bincount(dataset[labels_field].flatten()).tolist()
def text_feature_data_field(text_feature):
return text_feature['name'] + '_' + text_feature['level']
def load_from_file(file_name, field=None, dtype=int):
if file_name.endswith('.hdf5') and field is not None:
hdf5_data = h5py.File(file_name, 'r')
split = hdf5_data['split'].value
column = hdf5_data[field].value
hdf5_data.close()
array = column[split == 2] # ground truth
elif file_name.endswith('.npy'):
array = np.load(file_name)
elif file_name.endswith('.csv'):
array = read_csv(file_name, header=None)[0].tolist()
elif file_name.endswith('.json'):
array = load_json(file_name)
elif file_name.endswith(".model"):
array = None
else:
array = load_matrix(file_name, dtype)
return array
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
elif isinstance(obj, tuple):
return list(obj)
elif isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return json.JSONEncoder.default(self, obj)
def load_vocab(file_path) -> list:
import tensorflow as tf
if not file_path.endswith(".model"):
with tf.io.gfile.GFile(file_path) as vocab_file:
# Converts to 'unicode' (Python 2) or 'str' (Python 3)
vocab = list(tf.compat.as_text(line.strip().split()[0]) for line in vocab_file if line is not None and len(line.strip()) != 0)
elif file_path.endswith(".json"):
tmp_vocab = json.load(open(file_path))
tmp_vocab_reversed = {v: k for k, v in tmp_vocab.items()}
vocab = [tmp_vocab_reversed[idx] for idx in range(len(tmp_vocab_reversed))]
else:
try:
import sentencepiece as spm
except ImportError:
logger.warning("You need to install SentencePiece to use AlbertTokenizer: "
"https://github.com/google/sentencepiece pip install sentencepiece")
tmp_vocab = spm.SentencePieceProcessor()
tmp_vocab.Load(file_path)
vocab = [tmp_vocab.id_to_piece(idx) for idx in range(len(tmp_vocab))]
return vocab
def maybe_download(urls, path, filenames=None, extract=False):
"""Downloads a set of files.
Args:
urls: A (list of) urls to download files.
path (str): The destination path to save the files.
filenames: A (list of) strings of the file names. If given,
must have the same length with :attr:`urls`. If `None`,
filenames are extracted from :attr:`urls`.
extract (bool): Whether to extract compressed files.
Returns:
A list of paths to the downloaded files.
"""
maybe_create_dir(path)
if not isinstance(urls, (list, tuple)):
urls = [urls]
if filenames is not None:
if not isinstance(filenames, (list, tuple)):
filenames = [filenames]
if len(urls) != len(filenames):
raise ValueError(
'`filenames` must have the same number of elements as `urls`.')
result = []
for i, url in enumerate(urls):
if filenames is not None:
filename = filenames[i]
elif 'drive.google.com' in url:
filename = _extract_google_drive_file_id(url)
else:
filename = url.split('/')[-1]
# If downloading from GitHub, remove suffix ?raw=True
# from local filename
if filename.endswith("?raw=true"):
filename = filename[:-9]
filepath = os.path.join(path, filename)
result.append(filepath)
if not tf.io.gfile.exists(filepath):
if 'drive.google.com' in url:
filepath = _download_from_google_drive(url, filename, path)
else:
filepath = _download(url, filename, path)
if extract:
logger.info('Extract %s', filepath)
if tarfile.is_tarfile(filepath):
tarfile.open(filepath, 'r').extractall(path)
elif zipfile.is_zipfile(filepath):
with zipfile.ZipFile(filepath) as zfile:
zfile.extractall(path)
else:
logger.info("Unknown compression type. Only .tar.gz, "
".tar.bz2, .tar, and .zip are supported")
return result
def _download(url, filename, path):
def _progress(count, block_size, total_size):
percent = float(count * block_size) / float(total_size) * 100.
# pylint: disable=cell-var-from-loop
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename, percent))
sys.stdout.flush()
filepath = os.path.join(path, filename)
filepath, _ = urllib.request.urlretrieve(url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded {} {} bytes.'.format(
filename, statinfo.st_size))
return filepath
def _extract_google_drive_file_id(url):
# id is between `/d/` and '/'
url_suffix = url[url.find('/d/') + 3:]
file_id = url_suffix[:url_suffix.find('/')]
return file_id
def _download_from_google_drive(url, filename, path):
"""Adapted from `https://github.com/saurabhshri/gdrive-downloader`
"""
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
file_id = _extract_google_drive_file_id(url)
gurl = "https://docs.google.com/uc?export=download"
sess = requests.Session()
response = sess.get(gurl, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = sess.get(gurl, params=params, stream=True)
filepath = os.path.join(path, filename)
CHUNK_SIZE = 32768
with tf.io.gfile.GFile(filepath, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk:
f.write(chunk)
print('Successfully downloaded {}.'.format(filename))
return filepath | [
"logging.getLogger",
"tarfile.open",
"requests.Session",
"pandas.read_csv",
"json.JSONEncoder.default",
"zipfile.ZipFile",
"sentencepiece.SentencePieceProcessor",
"numpy.array",
"logging.WARNING",
"tensorflow.io.gfile.GFile",
"json.dumps",
"tarfile.is_tarfile",
"numpy.stack",
"sys.stdout.f... | [((501, 528), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (518, 528), False, 'import logging\n'), ((2188, 2242), 'json.dumps', 'json.dumps', (['data'], {'ensure_ascii': '(False)', 'cls': 'NumpyEncoder'}), '(data, ensure_ascii=False, cls=NumpyEncoder)\n', (2198, 2242), False, 'import json\n'), ((3643, 3661), 'numpy.array', 'np.array', (['list_num'], {}), '(list_num)\n', (3651, 3661), True, 'import numpy as np\n'), ((4372, 4397), 'numpy.zeros', 'np.zeros', (['embeddings_size'], {}), '(embeddings_size)\n', (4380, 4397), True, 'import numpy as np\n'), ((4915, 4943), 'numpy.stack', 'np.stack', (['embeddings_vectors'], {}), '(embeddings_vectors)\n', (4923, 4943), True, 'import numpy as np\n'), ((5653, 5673), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (5667, 5673), False, 'import random\n'), ((6862, 6889), 'numpy.random.permutation', 'np.random.permutation', (['size'], {}), '(size)\n', (6883, 6889), True, 'import numpy as np\n'), ((10640, 10662), 'aispace.utils.file_utils.maybe_create_dir', 'maybe_create_dir', (['path'], {}), '(path)\n', (10656, 10662), False, 'from aispace.utils.file_utils import default_download_dir, maybe_create_dir\n'), ((12724, 12776), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'filepath', '_progress'], {}), '(url, filepath, _progress)\n', (12750, 12776), False, 'from six.moves import urllib\n'), ((13566, 13584), 'requests.Session', 'requests.Session', ([], {}), '()\n', (13582, 13584), False, 'import requests\n'), ((1283, 1318), 'pandas.read_csv', 'pd.read_csv', (['data_fp'], {'header': 'header'}), '(data_fp, header=header)\n', (1294, 1318), True, 'import pandas as pd\n'), ((1874, 1895), 'json.load', 'json.load', (['input_file'], {}), '(input_file)\n', (1883, 1895), False, 'import json\n'), ((2050, 2157), 'json.dump', 'json.dump', (['data', 'output_file'], {'cls': 'NumpyEncoder', 'sort_keys': 'sort_keys', 'indent': 'indent', 'ensure_ascii': '(False)'}), '(data, output_file, cls=NumpyEncoder, sort_keys=sort_keys, indent=\n indent, ensure_ascii=False)\n', (2059, 2157), False, 'import json\n'), ((2489, 2512), 'h5py.File', 'h5py.File', (['data_fp', '"""r"""'], {}), "(data_fp, 'r')\n", (2498, 2512), False, 'import h5py\n'), ((2845, 2869), 'h5py.File', 'h5py.File', (['data_fp', 'mode'], {}), '(data_fp, mode)\n', (2854, 2869), False, 'import h5py\n'), ((3341, 3355), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3352, 3355), False, 'import pickle\n'), ((3436, 3455), 'pickle.dump', 'pickle.dump', (['obj', 'f'], {}), '(obj, f)\n', (3447, 3455), False, 'import pickle\n'), ((3892, 3910), 'numpy.array', 'np.array', (['list_num'], {}), '(list_num)\n', (3900, 3910), True, 'import numpy as np\n'), ((4007, 4023), 'numpy.nditer', 'np.nditer', (['array'], {}), '(array)\n', (4016, 4023), True, 'import numpy as np\n'), ((7993, 8018), 'h5py.File', 'h5py.File', (['file_name', '"""r"""'], {}), "(file_name, 'r')\n", (8002, 8018), False, 'import h5py\n'), ((12540, 12608), 'sys.stdout.write', 'sys.stdout.write', (["('\\r>> Downloading %s %.1f%%' % (filename, percent))"], {}), "('\\r>> Downloading %s %.1f%%' % (filename, percent))\n", (12556, 12608), False, 'import sys\n'), ((12642, 12660), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (12658, 12660), False, 'import sys\n'), ((13898, 13931), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['filepath', '"""wb"""'], {}), "(filepath, 'wb')\n", (13915, 13931), True, 'import tensorflow as tf\n'), ((961, 974), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (971, 974), False, 'import csv\n'), ((1351, 1459), 'logging.WARNING', 'logging.WARNING', (['"""Failed to parse the CSV with pandas default way, trying \\\\ as escape character."""'], {}), "(\n 'Failed to parse the CSV with pandas default way, trying \\\\ as escape character.'\n )\n", (1366, 1459), False, 'import logging\n'), ((1463, 1515), 'pandas.read_csv', 'pd.read_csv', (['data_fp'], {'header': 'header', 'escapechar': '"""\\\\"""'}), "(data_fp, header=header, escapechar='\\\\')\n", (1474, 1515), True, 'import pandas as pd\n'), ((8230, 8248), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (8237, 8248), True, 'import numpy as np\n'), ((9156, 9184), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['file_path'], {}), '(file_path)\n', (9173, 9184), True, 'import tensorflow as tf\n'), ((9936, 9964), 'sentencepiece.SentencePieceProcessor', 'spm.SentencePieceProcessor', ([], {}), '()\n', (9962, 9964), True, 'import sentencepiece as spm\n'), ((11560, 11588), 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['filepath'], {}), '(filepath)\n', (11578, 11588), True, 'import tensorflow as tf\n'), ((11880, 11908), 'tarfile.is_tarfile', 'tarfile.is_tarfile', (['filepath'], {}), '(filepath)\n', (11898, 11908), False, 'import tarfile\n'), ((4842, 4889), 'numpy.random.uniform', 'np.random.uniform', (['(-0.01)', '(0.01)', 'embeddings_size'], {}), '(-0.01, 0.01, embeddings_size)\n', (4859, 4889), True, 'import numpy as np\n'), ((11996, 12024), 'zipfile.is_zipfile', 'zipfile.is_zipfile', (['filepath'], {}), '(filepath)\n', (12014, 12024), False, 'import zipfile\n'), ((9001, 9036), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (9025, 9036), False, 'import json\n'), ((11930, 11957), 'tarfile.open', 'tarfile.open', (['filepath', '"""r"""'], {}), "(filepath, 'r')\n", (11942, 11957), False, 'import tarfile\n'), ((12051, 12076), 'zipfile.ZipFile', 'zipfile.ZipFile', (['filepath'], {}), '(filepath)\n', (12066, 12076), False, 'import zipfile\n')] |
import numpy as np
def intensity(I, r, rad):
# sigma = np.std(I[np.where(np.logical_not(np.isnan(I)))])
sigma = np.sqrt(np.mean((1.0 - I[np.where(np.logical_not(np.isnan(I)))])**2.0))
I[np.where(np.isnan(I))] = 0.0
Iu = 0.53
mask_s = np.zeros((4096, 4096))
mask_u = np.zeros((4096, 4096))
mask_p = np.zeros((4096, 4096))
mask_s[np.where((rad <= r) & (I <= 1.0 - 3.0 * sigma) & (I != 0.0))] = 1.0
mask_u[np.where((rad <= r) & (I <= Iu) & (I != 0.0))] = 1.0
mask_p[np.where((rad <= r) & (I > Iu) & (I <= 1.0 - 3.0 * sigma) & (I != 0.0))] = 0.5
sff = len(mask_s[np.where(mask_s == 1.0)]) / len(mask_s[np.where(rad <= r)])
uff = len(mask_u[np.where(mask_u == 1.0)]) / len(mask_u[np.where(rad <= r)])
pff = len(mask_p[np.where(mask_p == 0.5)]) / len(mask_p[np.where(rad <= r)])
return mask_s, mask_u, mask_p, sff, uff, pff
def magnetic_field(B, r, rad):
B[np.where(np.isnan(B))] = 0.0
sigma = 10.0
mask = np.zeros((4096, 4096))
mask[np.where((rad <= r) & ((B <= -3.0 * sigma) | (B >= 3.0 * sigma)) & (B != 0.0))] = 1.0
return mask
| [
"numpy.where",
"numpy.zeros",
"numpy.isnan"
] | [((259, 281), 'numpy.zeros', 'np.zeros', (['(4096, 4096)'], {}), '((4096, 4096))\n', (267, 281), True, 'import numpy as np\n'), ((295, 317), 'numpy.zeros', 'np.zeros', (['(4096, 4096)'], {}), '((4096, 4096))\n', (303, 317), True, 'import numpy as np\n'), ((331, 353), 'numpy.zeros', 'np.zeros', (['(4096, 4096)'], {}), '((4096, 4096))\n', (339, 353), True, 'import numpy as np\n'), ((982, 1004), 'numpy.zeros', 'np.zeros', (['(4096, 4096)'], {}), '((4096, 4096))\n', (990, 1004), True, 'import numpy as np\n'), ((366, 426), 'numpy.where', 'np.where', (['((rad <= r) & (I <= 1.0 - 3.0 * sigma) & (I != 0.0))'], {}), '((rad <= r) & (I <= 1.0 - 3.0 * sigma) & (I != 0.0))\n', (374, 426), True, 'import numpy as np\n'), ((446, 491), 'numpy.where', 'np.where', (['((rad <= r) & (I <= Iu) & (I != 0.0))'], {}), '((rad <= r) & (I <= Iu) & (I != 0.0))\n', (454, 491), True, 'import numpy as np\n'), ((511, 582), 'numpy.where', 'np.where', (['((rad <= r) & (I > Iu) & (I <= 1.0 - 3.0 * sigma) & (I != 0.0))'], {}), '((rad <= r) & (I > Iu) & (I <= 1.0 - 3.0 * sigma) & (I != 0.0))\n', (519, 582), True, 'import numpy as np\n'), ((1015, 1093), 'numpy.where', 'np.where', (['((rad <= r) & ((B <= -3.0 * sigma) | (B >= 3.0 * sigma)) & (B != 0.0))'], {}), '((rad <= r) & ((B <= -3.0 * sigma) | (B >= 3.0 * sigma)) & (B != 0.0))\n', (1023, 1093), True, 'import numpy as np\n'), ((210, 221), 'numpy.isnan', 'np.isnan', (['I'], {}), '(I)\n', (218, 221), True, 'import numpy as np\n'), ((932, 943), 'numpy.isnan', 'np.isnan', (['B'], {}), '(B)\n', (940, 943), True, 'import numpy as np\n'), ((612, 635), 'numpy.where', 'np.where', (['(mask_s == 1.0)'], {}), '(mask_s == 1.0)\n', (620, 635), True, 'import numpy as np\n'), ((651, 669), 'numpy.where', 'np.where', (['(rad <= r)'], {}), '(rad <= r)\n', (659, 669), True, 'import numpy as np\n'), ((693, 716), 'numpy.where', 'np.where', (['(mask_u == 1.0)'], {}), '(mask_u == 1.0)\n', (701, 716), True, 'import numpy as np\n'), ((732, 750), 'numpy.where', 'np.where', (['(rad <= r)'], {}), '(rad <= r)\n', (740, 750), True, 'import numpy as np\n'), ((774, 797), 'numpy.where', 'np.where', (['(mask_p == 0.5)'], {}), '(mask_p == 0.5)\n', (782, 797), True, 'import numpy as np\n'), ((813, 831), 'numpy.where', 'np.where', (['(rad <= r)'], {}), '(rad <= r)\n', (821, 831), True, 'import numpy as np\n'), ((171, 182), 'numpy.isnan', 'np.isnan', (['I'], {}), '(I)\n', (179, 182), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
import subprocess
import numpy as np
import pandas as pd
import os
import fire
from keras.layers import Input, Embedding
from keras.layers import Flatten, Dense, concatenate, Dropout, Reshape
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2, l1_l2
from sklearn.preprocessing import Imputer, PolynomialFeatures, LabelEncoder, MinMaxScaler
def _download():
"""
Download data from ics uci
"""
COLUMNS = [
'age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income_bracket']
train_data = "train.csv"
train_data_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data"
test_data = "test.csv"
test_data_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test"
data_src = {train_data: train_data_url,
test_data: test_data_url}
for data_file, data_url in data_src.items():
if not os.path.exists(data_file):
print("Downloading {} ============".format(data_file))
cmd = "wget -O {0} {1}".format(data_file, data_url)
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
else:
print("{} found so we skip ==========".format(data_file))
train_data_df = pd.read_csv(train_data,
names=COLUMNS,
skipinitialspace=True)
test_data_df = pd.read_csv(test_data,
names=COLUMNS,
skipinitialspace=True,
skiprows=1)
return train_data_df, test_data_df
def _categorical_input(n_in, n_out, reg):
inp = Input(shape=(1,), dtype='int64')
return inp, Embedding(n_in, n_out, input_length=1, embeddings_regularizer=l2(reg))(inp)
def _numerical_input():
inp = Input(shape=(1,), dtype='float32')
return inp, Reshape((1, 1))(inp)
def _clean_target(x):
if x[-1] == ".":
return x[:-1]
else:
return x
def prepare_data():
"""
Prepare data for training
1. Label encoding for categorical columns
"""
train_data, test_data = _download()
CAT_FEATURES = ["education", "workclass", "marital_status",
"occupation", "relationship", "gender", "native_country",
"race"]
NUM_FEATURES = ["age", "hours_per_week", "capital_gain", "capital_loss"]
# NUM_FEATURES = []
TARGET = "income_bracket"
columns_used = CAT_FEATURES+NUM_FEATURES
columns_used.append(TARGET)
TRAIN_FLAG = "IS_TRAIN"
train_data = train_data[columns_used]
test_data = test_data[columns_used]
train_data[TRAIN_FLAG] = 1
test_data[TRAIN_FLAG] = 0
df_all = pd.concat([train_data, test_data], ignore_index=True)
df_all["income_bracket"] = df_all["income_bracket"].apply(lambda x: _clean_target(x))
le = LabelEncoder()
le_count = 0
for col in CAT_FEATURES:
le.fit(df_all[col])
df_all[col] = le.transform(df_all[col])
le_count += 1
le.fit(df_all[TARGET])
df_all[TARGET] = le.transform(df_all[TARGET])
le_count += 1
return df_all, TRAIN_FLAG, TARGET, CAT_FEATURES, NUM_FEATURES
def deep_model():
"""deep model building"""
MODEL_SETTING = {
"DIM": 5,
"REG": 1e-4,
"BATCH_SIZE": 64,
"EPOCHS": 10
}
data, TRAIN_FLAG, TARGET, CAT_FEATURES, NUM_FEATURES = prepare_data()
train_x_df = data.loc[data[TRAIN_FLAG] == 1].drop(columns=[TRAIN_FLAG, TARGET], axis=1)
train_x = [train_x_df[_] for _ in list(train_x_df.columns)]
train_y = np.array(data.loc[data[TRAIN_FLAG] == 1][TARGET].values).reshape(-1, 1)
test_x_df = data.loc[data[TRAIN_FLAG] == 0].drop([TRAIN_FLAG, TARGET], axis=1)
test_x = [test_x_df[_] for _ in list(test_x_df.columns)]
test_y = np.array(data.loc[data[TRAIN_FLAG] == 0][TARGET].values).reshape(-1, 1)
print(train_x[0])
embedding_tensors = []
for _ in CAT_FEATURES:
number_input = data[_].nunique()
tensor_input, tensor_build = _categorical_input(
number_input, MODEL_SETTING["DIM"], MODEL_SETTING["REG"]
)
embedding_tensors.append((tensor_input, tensor_build))
continuous_tensors = []
for _ in NUM_FEATURES:
tensor_input, tensor_build = _numerical_input()
continuous_tensors.append((tensor_input, tensor_build))
input_layer = [_[0] for _ in embedding_tensors]
input_layer += [_[0] for _ in continuous_tensors]
input_embed = [_[1] for _ in embedding_tensors]
input_embed += [_[1] for _ in continuous_tensors]
x = concatenate(input_embed, axis=-1)
x = Flatten()(x)
x = BatchNormalization()(x)
x = Dense(200, activation='relu', kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(x)
x = Dropout(0.5)(x)
x = Dense(200, activation='relu')(x)
x = Dense(1, activation="sigmoid")(x)
deep_model = Model(input_layer, x)
deep_model.compile(optimizer="adam",
loss="binary_crossentropy",
metrics=["acc"])
deep_model.fit(train_x, train_y,
batch_size=MODEL_SETTING["BATCH_SIZE"],
epochs=MODEL_SETTING["EPOCHS"],
validation_data=(test_x, test_y))
eval_res = deep_model.evaluate(test_x, test_y)
print("eval results: ", eval_res)
def wide_model():
MODEL_SETTING = {
"DIM": 5,
"REG": 1e-4,
"BATCH_SIZE": 64,
"EPOCHS": 20}
data, TRAIN_FLAG, TARGET, CAT_FEATURES, NUM_FEATURES = prepare_data()
data = pd.get_dummies(data, columns=[_ for _ in CAT_FEATURES])
train_x_df = data.loc[data[TRAIN_FLAG] == 1].drop(columns=[TRAIN_FLAG, TARGET], axis=1)
# train_x = [train_x_df[_] for _ in list(train_x_df.columns)]
train_x = train_x_df.values
train_y = np.array(data.loc[data[TRAIN_FLAG] == 1][TARGET].values).reshape(-1, 1)
test_x_df = data.loc[data[TRAIN_FLAG] == 0].drop([TRAIN_FLAG, TARGET], axis=1)
# test_x = [test_x_df[_] for _ in list(test_x_df.columns)]
test_x = test_x_df.values
test_y = np.array(data.loc[data[TRAIN_FLAG] == 0][TARGET].values).reshape(-1, 1)
scaler = MinMaxScaler()
train_x = scaler.fit_transform(train_x)
test_x = scaler.fit_transform(test_x)
input_layer = Input(shape=(train_x.shape[1],), dtype='float32')
x = Dense(train_y.shape[1], activation="relu")(input_layer)
wide_model = Model(input_layer, x)
wide_model.compile(optimizer="adam",
loss="binary_crossentropy",
metrics=["acc"])
wide_model.fit(train_x, train_y,
epochs=MODEL_SETTING["EPOCHS"],
batch_size=MODEL_SETTING["BATCH_SIZE"],
validation_data=(test_x, test_y))
results = wide_model.evaluate(test_x, test_y)
print("\n", results)
if __name__ == "__main__":
fire.Fire({
"prepare": prepare_data,
"deep": deep_model,
"wide": wide_model
})
| [
"os.path.exists",
"sklearn.preprocessing.LabelEncoder",
"keras.regularizers.l1_l2",
"keras.layers.Flatten",
"pandas.read_csv",
"fire.Fire",
"keras.layers.normalization.BatchNormalization",
"keras.layers.Dense",
"keras.layers.Input",
"keras.layers.Dropout",
"keras.layers.concatenate",
"keras.mo... | [((1572, 1633), 'pandas.read_csv', 'pd.read_csv', (['train_data'], {'names': 'COLUMNS', 'skipinitialspace': '(True)'}), '(train_data, names=COLUMNS, skipinitialspace=True)\n', (1583, 1633), True, 'import pandas as pd\n'), ((1718, 1790), 'pandas.read_csv', 'pd.read_csv', (['test_data'], {'names': 'COLUMNS', 'skipinitialspace': '(True)', 'skiprows': '(1)'}), '(test_data, names=COLUMNS, skipinitialspace=True, skiprows=1)\n', (1729, 1790), True, 'import pandas as pd\n'), ((1978, 2010), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': '"""int64"""'}), "(shape=(1,), dtype='int64')\n", (1983, 2010), False, 'from keras.layers import Input, Embedding\n'), ((2139, 2173), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': '"""float32"""'}), "(shape=(1,), dtype='float32')\n", (2144, 2173), False, 'from keras.layers import Input, Embedding\n'), ((3029, 3082), 'pandas.concat', 'pd.concat', (['[train_data, test_data]'], {'ignore_index': '(True)'}), '([train_data, test_data], ignore_index=True)\n', (3038, 3082), True, 'import pandas as pd\n'), ((3184, 3198), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (3196, 3198), False, 'from sklearn.preprocessing import Imputer, PolynomialFeatures, LabelEncoder, MinMaxScaler\n'), ((4937, 4970), 'keras.layers.concatenate', 'concatenate', (['input_embed'], {'axis': '(-1)'}), '(input_embed, axis=-1)\n', (4948, 4970), False, 'from keras.layers import Flatten, Dense, concatenate, Dropout, Reshape\n'), ((5234, 5255), 'keras.models.Model', 'Model', (['input_layer', 'x'], {}), '(input_layer, x)\n', (5239, 5255), False, 'from keras.models import Model\n'), ((5897, 5952), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': '[_ for _ in CAT_FEATURES]'}), '(data, columns=[_ for _ in CAT_FEATURES])\n', (5911, 5952), True, 'import pandas as pd\n'), ((6507, 6521), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (6519, 6521), False, 'from sklearn.preprocessing import Imputer, PolynomialFeatures, LabelEncoder, MinMaxScaler\n'), ((6628, 6677), 'keras.layers.Input', 'Input', ([], {'shape': '(train_x.shape[1],)', 'dtype': '"""float32"""'}), "(shape=(train_x.shape[1],), dtype='float32')\n", (6633, 6677), False, 'from keras.layers import Input, Embedding\n'), ((6759, 6780), 'keras.models.Model', 'Model', (['input_layer', 'x'], {}), '(input_layer, x)\n', (6764, 6780), False, 'from keras.models import Model\n'), ((7224, 7300), 'fire.Fire', 'fire.Fire', (["{'prepare': prepare_data, 'deep': deep_model, 'wide': wide_model}"], {}), "({'prepare': prepare_data, 'deep': deep_model, 'wide': wide_model})\n", (7233, 7300), False, 'import fire\n'), ((4979, 4988), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4986, 4988), False, 'from keras.layers import Flatten, Dense, concatenate, Dropout, Reshape\n'), ((5000, 5020), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5018, 5020), False, 'from keras.layers.normalization import BatchNormalization\n'), ((5117, 5129), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5124, 5129), False, 'from keras.layers import Flatten, Dense, concatenate, Dropout, Reshape\n'), ((5141, 5170), 'keras.layers.Dense', 'Dense', (['(200)'], {'activation': '"""relu"""'}), "(200, activation='relu')\n", (5146, 5170), False, 'from keras.layers import Flatten, Dense, concatenate, Dropout, Reshape\n'), ((5182, 5212), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (5187, 5212), False, 'from keras.layers import Flatten, Dense, concatenate, Dropout, Reshape\n'), ((6686, 6728), 'keras.layers.Dense', 'Dense', (['train_y.shape[1]'], {'activation': '"""relu"""'}), "(train_y.shape[1], activation='relu')\n", (6691, 6728), False, 'from keras.layers import Flatten, Dense, concatenate, Dropout, Reshape\n'), ((1183, 1208), 'os.path.exists', 'os.path.exists', (['data_file'], {}), '(data_file)\n', (1197, 1208), False, 'import os\n'), ((2190, 2205), 'keras.layers.Reshape', 'Reshape', (['(1, 1)'], {}), '((1, 1))\n', (2197, 2205), False, 'from keras.layers import Flatten, Dense, concatenate, Dropout, Reshape\n'), ((3917, 3973), 'numpy.array', 'np.array', (['data.loc[data[TRAIN_FLAG] == 1][TARGET].values'], {}), '(data.loc[data[TRAIN_FLAG] == 1][TARGET].values)\n', (3925, 3973), True, 'import numpy as np\n'), ((4146, 4202), 'numpy.array', 'np.array', (['data.loc[data[TRAIN_FLAG] == 0][TARGET].values'], {}), '(data.loc[data[TRAIN_FLAG] == 0][TARGET].values)\n', (4154, 4202), True, 'import numpy as np\n'), ((6159, 6215), 'numpy.array', 'np.array', (['data.loc[data[TRAIN_FLAG] == 1][TARGET].values'], {}), '(data.loc[data[TRAIN_FLAG] == 1][TARGET].values)\n', (6167, 6215), True, 'import numpy as np\n'), ((6421, 6477), 'numpy.array', 'np.array', (['data.loc[data[TRAIN_FLAG] == 0][TARGET].values'], {}), '(data.loc[data[TRAIN_FLAG] == 0][TARGET].values)\n', (6429, 6477), True, 'import numpy as np\n'), ((5081, 5104), 'keras.regularizers.l1_l2', 'l1_l2', ([], {'l1': '(0.01)', 'l2': '(0.01)'}), '(l1=0.01, l2=0.01)\n', (5086, 5104), False, 'from keras.regularizers import l2, l1_l2\n'), ((2089, 2096), 'keras.regularizers.l2', 'l2', (['reg'], {}), '(reg)\n', (2091, 2096), False, 'from keras.regularizers import l2, l1_l2\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
import numpy as np
from lib.model.mlp import MLP
class DropoutRegressor(nn.Module):
def __init__(self, args, tokenizer):
super().__init__()
self.args = args
self.num_tokens = args.vocab_size
self.max_len = args.max_len
self.tokenizer = tokenizer
self.init_model()
self.sigmoid = nn.Sigmoid()
self.proxy_num_iterations = args.proxy_num_iterations
self.device = args.device
if args.task == "amp":
self.eos_tok = 0
elif args.task == "tfbind":
self.eos_tok = 4
def init_model(self):
if self.args.proxy_arch == "mlp":
self.model = MLP(num_tokens=self.num_tokens,
num_outputs=1,
num_hid=self.args.proxy_num_hid,
num_layers=self.args.proxy_num_layers, # TODO: add these as hyperparameters?
dropout=self.args.proxy_dropout,
max_len=self.max_len)
self.model.to(self.args.device)
self.opt = torch.optim.Adam(self.model.parameters(), self.args.proxy_learning_rate,
weight_decay=self.args.proxy_L2)
def fit(self, data, reset=False):
losses = []
test_losses = []
best_params = None
best_loss = 1e6
early_stop_tol = self.args.proxy_early_stop_tol
early_stop_count = 0
epoch_length = 100
if reset:
self.init_model()
for it in tqdm(range(self.proxy_num_iterations), disable=False):
x, y = data.sample(self.args.proxy_num_per_minibatch)
x = self.tokenizer.process(x).to(self.device)
if self.args.proxy_arch == "mlp":
inp_x = F.one_hot(x, num_classes=self.num_tokens+1)[:, :, :-1].to(torch.float32)
inp = torch.zeros(x.shape[0], self.max_len, self.num_tokens)
inp[:, :inp_x.shape[1], :] = inp_x
x = inp.reshape(x.shape[0], -1).to(self.device).detach()
y = torch.tensor(y, device=self.device, dtype=torch.float).reshape(-1)
if self.args.proxy_arch == "mlp":
output = self.model(x, None).squeeze(1)
loss = (output - y).pow(2).mean()
loss.backward()
self.opt.step()
self.opt.zero_grad()
losses.append(loss.item())
self.args.logger.add_scalar("proxy_train_loss", loss.item())
if not it % epoch_length:
vx, vy = data.validation_set()
vlosses = []
for j in range(len(vx) // 256):
x = self.tokenizer.process(vx[j*256:(j+1)*256]).to(self.device)
if self.args.proxy_arch == "mlp":
inp_x = F.one_hot(x, num_classes=self.num_tokens+1)[:, :, :-1].to(torch.float32)
inp = torch.zeros(x.shape[0], self.max_len, self.num_tokens)
inp[:, :inp_x.shape[1], :] = inp_x
x = inp.reshape(x.shape[0], -1).to(self.device).detach()
y = torch.tensor(vy[j*256:(j+1)*256], device=self.device, dtype=torch.float).reshape(-1)
if self.args.proxy_arch == "mlp":
output = self.model(x, None).squeeze(1)
loss = (output - y).pow(2)
vlosses.append(loss.sum().item())
test_loss = np.sum(vlosses) / len(vx)
test_losses.append(test_loss)
self.args.logger.add_scalar("proxy_test_loss", test_loss)
if test_loss < best_loss:
best_loss = test_loss
best_params = [i.data.cpu().numpy() for i in self.model.parameters()]
early_stop_count = 0
else:
early_stop_count += 1
if early_stop_count >= early_stop_tol:
print(best_loss)
print('early stopping')
break
if self.args.proxy_early_stop_to_best_params:
# Put best parameters back in
for i, besti in zip(self.model.parameters(), best_params):
i.data = torch.tensor(besti).to(self.device)
self.args.logger.save(self.args.save_path, self.args)
return {}
def forward(self, curr_x, uncertainty_call=False):
x = self.tokenizer.process(curr_x).to(self.device)
if self.args.proxy_arch == "mlp":
inp_x = F.one_hot(x, num_classes=self.num_tokens+1)[:, :, :-1].to(torch.float32)
inp = torch.zeros(x.shape[0], self.max_len, self.num_tokens)
inp[:, :inp_x.shape[1], :] = inp_x
x = inp.reshape(x.shape[0], -1).to(self.device).detach()
if uncertainty_call:
if self.args.proxy_arch == "mlp":
ys = self.model(x, None).unsqueeze(0)
else:
self.model.eval()
if self.args.proxy_arch == "mlp":
ys = self.model(x, None)
self.model.train()
return ys
def forward_with_uncertainty(self, x):
self.model.train()
with torch.no_grad():
outputs = torch.cat([self.forward(x, True) for _ in range(self.args.proxy_num_dropout_samples)])
return outputs.mean(dim=0), outputs.std(dim=0)
def save(self, path):
torch.save(self.state_dict(), path)
def load(self, path):
self.load_state_dict(path)
class EnsembleRegressor(nn.Module):
def __init__(self, args, tokenizer):
super().__init__()
self.args = args
self.num_tokens = args.vocab_size
self.max_len = args.max_len
self.tokenizer = tokenizer
self.init_model()
self.sigmoid = nn.Sigmoid()
self.proxy_num_iterations = args.proxy_num_iterations
self.device = args.device
if args.task == "amp":
self.eos_tok = 0
elif args.task == "tfbind":
self.eos_tok = 4
def init_model(self):
if self.args.proxy_arch == "mlp":
self.models = [MLP(num_tokens=self.num_tokens,
num_outputs=1,
num_hid=self.args.proxy_num_hid,
num_layers=self.args.proxy_num_layers,
dropout=self.args.proxy_dropout,
max_len=self.max_len) for i in range(self.args.proxy_num_dropout_samples)]
[model.to(self.args.device) for model in self.models]
self.params = sum([list(model.parameters()) for model in self.models], [])
self.opt = torch.optim.Adam(self.params, self.args.proxy_learning_rate,
weight_decay=self.args.proxy_L2)
def fit(self, data, reset=False):
losses = []
test_losses = []
best_params = None
best_loss = 1e6
early_stop_tol = 100
early_stop_count = 0
epoch_length = 100
if reset:
self.init_model()
for it in tqdm(range(self.proxy_num_iterations), disable=True):
x, y = data.sample(self.args.proxy_num_per_minibatch)
x = self.tokenizer.process(x).to(self.device)
y = torch.tensor(y, device=self.device, dtype=torch.float).reshape(-1)
if self.args.proxy_arch == "mlp":
output = self._call_models(x).mean(0)
loss = (output - y).pow(2).mean()
loss.backward()
self.opt.step()
self.opt.zero_grad()
losses.append(loss.item())
self.args.logger.add_scalar("proxy_train_loss", loss.item())
if not it % epoch_length:
vx, vy = data.validation_set()
vlosses = []
for j in range(len(vx) // 256):
x = self.tokenizer.process(vx[j*256:(j+1)*256]).to(self.device)
y = torch.tensor(vy[j*256:(j+1)*256], device=self.device, dtype=torch.float).reshape(-1)
if self.args.proxy_arch == "mlp":
output = self._call_models(x).mean(0)
loss = (output - y).pow(2)
vlosses.append(loss.sum().item())
test_loss = np.sum(vlosses) / len(vx)
test_losses.append(test_loss)
self.args.logger.add_scalar("proxy_test_loss", test_loss)
if test_loss < best_loss:
best_loss = test_loss
best_params = [[i.data.cpu().numpy() for i in model.parameters()] for model in self.models]
early_stop_count = 0
else:
early_stop_count += 1
if early_stop_count >= early_stop_tol:
print(best_loss)
print('early stopping')
break
if self.args.proxy_early_stop_to_best_params:
# Put best parameters back in
for i, model in enumerate(self.models):
for i, besti in zip(model.parameters(), best_params[i]):
i.data = torch.tensor(besti).to(self.device)
self.args.logger.save(self.args.save_path, self.args)
return {}
def _call_models(self, x):
if self.args.proxy_arch == "mlp":
inp_x = F.one_hot(x, num_classes=self.num_tokens+1)[:, :, :-1].to(torch.float32)
inp = torch.zeros(x.shape[0], self.max_len, self.num_tokens)
inp[:, :inp_x.shape[1], :] = inp_x
x = inp.reshape(x.shape[0], -1).to(self.device).detach()
if self.args.proxy_arch == "mlp":
ys = torch.cat([model(x, None).unsqueeze(0) for model in self.models])
return ys
def forward_with_uncertainty(self, x):
with torch.no_grad():
outputs = self._call_models(x)
return outputs.mean(dim=0), outputs.std(dim=0)
def save(self, path):
torch.save(self.state_dict(), path)
def load(self, path):
self.load_state_dict(path)
| [
"torch.optim.Adam",
"torch.nn.Sigmoid",
"torch.tensor",
"numpy.sum",
"torch.nn.functional.one_hot",
"lib.model.mlp.MLP",
"torch.no_grad",
"torch.zeros"
] | [((431, 443), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (441, 443), True, 'import torch.nn as nn\n'), ((5975, 5987), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5985, 5987), True, 'import torch.nn as nn\n'), ((6865, 6963), 'torch.optim.Adam', 'torch.optim.Adam', (['self.params', 'self.args.proxy_learning_rate'], {'weight_decay': 'self.args.proxy_L2'}), '(self.params, self.args.proxy_learning_rate, weight_decay=\n self.args.proxy_L2)\n', (6881, 6963), False, 'import torch\n'), ((768, 951), 'lib.model.mlp.MLP', 'MLP', ([], {'num_tokens': 'self.num_tokens', 'num_outputs': '(1)', 'num_hid': 'self.args.proxy_num_hid', 'num_layers': 'self.args.proxy_num_layers', 'dropout': 'self.args.proxy_dropout', 'max_len': 'self.max_len'}), '(num_tokens=self.num_tokens, num_outputs=1, num_hid=self.args.\n proxy_num_hid, num_layers=self.args.proxy_num_layers, dropout=self.args\n .proxy_dropout, max_len=self.max_len)\n', (771, 951), False, 'from lib.model.mlp import MLP\n'), ((4791, 4845), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.max_len', 'self.num_tokens'], {}), '(x.shape[0], self.max_len, self.num_tokens)\n', (4802, 4845), False, 'import torch\n'), ((5363, 5378), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5376, 5378), False, 'import torch\n'), ((9698, 9752), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.max_len', 'self.num_tokens'], {}), '(x.shape[0], self.max_len, self.num_tokens)\n', (9709, 9752), False, 'import torch\n'), ((10073, 10088), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10086, 10088), False, 'import torch\n'), ((2001, 2055), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.max_len', 'self.num_tokens'], {}), '(x.shape[0], self.max_len, self.num_tokens)\n', (2012, 2055), False, 'import torch\n'), ((6314, 6497), 'lib.model.mlp.MLP', 'MLP', ([], {'num_tokens': 'self.num_tokens', 'num_outputs': '(1)', 'num_hid': 'self.args.proxy_num_hid', 'num_layers': 'self.args.proxy_num_layers', 'dropout': 'self.args.proxy_dropout', 'max_len': 'self.max_len'}), '(num_tokens=self.num_tokens, num_outputs=1, num_hid=self.args.\n proxy_num_hid, num_layers=self.args.proxy_num_layers, dropout=self.args\n .proxy_dropout, max_len=self.max_len)\n', (6317, 6497), False, 'from lib.model.mlp import MLP\n'), ((2196, 2250), 'torch.tensor', 'torch.tensor', (['y'], {'device': 'self.device', 'dtype': 'torch.float'}), '(y, device=self.device, dtype=torch.float)\n', (2208, 2250), False, 'import torch\n'), ((3626, 3641), 'numpy.sum', 'np.sum', (['vlosses'], {}), '(vlosses)\n', (3632, 3641), True, 'import numpy as np\n'), ((7468, 7522), 'torch.tensor', 'torch.tensor', (['y'], {'device': 'self.device', 'dtype': 'torch.float'}), '(y, device=self.device, dtype=torch.float)\n', (7480, 7522), False, 'import torch\n'), ((8532, 8547), 'numpy.sum', 'np.sum', (['vlosses'], {}), '(vlosses)\n', (8538, 8547), True, 'import numpy as np\n'), ((3073, 3127), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'self.max_len', 'self.num_tokens'], {}), '(x.shape[0], self.max_len, self.num_tokens)\n', (3084, 3127), False, 'import torch\n'), ((4407, 4426), 'torch.tensor', 'torch.tensor', (['besti'], {}), '(besti)\n', (4419, 4426), False, 'import torch\n'), ((4700, 4745), 'torch.nn.functional.one_hot', 'F.one_hot', (['x'], {'num_classes': '(self.num_tokens + 1)'}), '(x, num_classes=self.num_tokens + 1)\n', (4709, 4745), True, 'import torch.nn.functional as F\n'), ((9607, 9652), 'torch.nn.functional.one_hot', 'F.one_hot', (['x'], {'num_classes': '(self.num_tokens + 1)'}), '(x, num_classes=self.num_tokens + 1)\n', (9616, 9652), True, 'import torch.nn.functional as F\n'), ((1906, 1951), 'torch.nn.functional.one_hot', 'F.one_hot', (['x'], {'num_classes': '(self.num_tokens + 1)'}), '(x, num_classes=self.num_tokens + 1)\n', (1915, 1951), True, 'import torch.nn.functional as F\n'), ((3292, 3370), 'torch.tensor', 'torch.tensor', (['vy[j * 256:(j + 1) * 256]'], {'device': 'self.device', 'dtype': 'torch.float'}), '(vy[j * 256:(j + 1) * 256], device=self.device, dtype=torch.float)\n', (3304, 3370), False, 'import torch\n'), ((8179, 8257), 'torch.tensor', 'torch.tensor', (['vy[j * 256:(j + 1) * 256]'], {'device': 'self.device', 'dtype': 'torch.float'}), '(vy[j * 256:(j + 1) * 256], device=self.device, dtype=torch.float)\n', (8191, 8257), False, 'import torch\n'), ((9393, 9412), 'torch.tensor', 'torch.tensor', (['besti'], {}), '(besti)\n', (9405, 9412), False, 'import torch\n'), ((2970, 3015), 'torch.nn.functional.one_hot', 'F.one_hot', (['x'], {'num_classes': '(self.num_tokens + 1)'}), '(x, num_classes=self.num_tokens + 1)\n', (2979, 3015), True, 'import torch.nn.functional as F\n')] |
#!/usr/bin/env python3
"""
Websocket server forked from https://github.com/Bronkoknorb/PyImageStream
"""
import argparse
import os
import io
import tornado.ioloop
import tornado.web
import tornado.websocket
from PIL import Image
import pygame.camera
import pygame.image
import urllib
from matplotlib import cm
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Input, Dropout, concatenate, BatchNormalization, Activation, Conv2DTranspose
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
import keras.losses
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import sys
import time
import asyncio
# initiating kears model for image detection
def dice_loss(softmax_output, labels, ignore_background=False, square=False):
if ignore_background:
labels = labels[..., 1:]
softmax_output = softmax_output[..., 1:]
axis = (0,1,2)
eps = 1e-7
nom = (2 * tf.reduce_sum(labels * softmax_output, axis=axis) + eps)
if square:
labels = tf.square(labels)
softmax_output = tf.square(softmax_output)
denom = tf.reduce_sum(labels, axis=axis) + tf.reduce_sum(softmax_output, axis=axis) + eps
return 1 - tf.reduce_mean(nom / denom)
model = tf.keras.models.load_model('./tensor_model', custom_objects={'dice_loss': dice_loss})
parser = argparse.ArgumentParser(description='Start the SematicSegmentationDemo server.')
parser.add_argument('--port', default=8888, type=int, help='Web server port (default: 8888)')
parser.add_argument('--url', default="http://192.168.0.2:56000/jpeg", type=str, help='Get Image url')
parser.add_argument('--quality', default=70, type=int, help='JPEG Quality 1 (worst) to 100 (best) (default: 70)')
parser.add_argument('--snippet', default=2, type=int, help='Snippet length in seconds')
args = parser.parse_args()
# Camera class for fetching images
class Camera:
def __init__(self, url, quality, snippet):
self.camera_domain = url
self.quality = quality
self.snippet = snippet
print("Camera initialized")
def get_jpeg_image_bytes(self):
start = time.time()
pred_images = []
pred_times = []
while(True):
start_gather = time.time()
with urllib.request.urlopen(self.camera_domain) as url:
resp = url.read()
image = np.asarray(bytearray(resp), dtype="uint8")
img = cv2.imdecode(image, 0)
DIM = (512, 512)
img = cv2.resize(img, DIM)
img = img/255.
pred_images.append( np.array(img) )
end_gather = time.time()
pred_times.append(end_gather - start_gather)
if (end_gather - start) > self.snippet:
break
pred_images = np.array(pred_images)
end_gather = time.time()
print("Total frames ", len(pred_images))
print("Capture time ", end_gather - start)
predicted = model.predict(pred_images)
response = []
for i in range(0, len(predicted)):
np_img = np.squeeze( predicted[i]*255. , axis=2)
pimg = Image.fromarray(np_img).convert('RGB')
with io.BytesIO() as bytesIO:
pimg.save(bytesIO, "JPEG", quality=self.quality, optimize=True)
response.append(bytesIO.getvalue())
end = time.time()
print("Total time ", end - start)
return response, pred_times
camera = Camera(args.url, args.quality, args.snippet)
class ImageWebSocket(tornado.websocket.WebSocketHandler):
clients = set()
def check_origin(self, origin):
# Allow access from every origin
return True
def open(self):
ImageWebSocket.clients.add(self)
print("WebSocket opened from: " + self.request.remote_ip)
# when message is recieved, take images and send them back
def on_message(self, message):
print(message)
jpeg_bytes, times = camera.get_jpeg_image_bytes()
self.write_message(jpeg_bytes[0], binary=True)
for i in range(0, len(jpeg_bytes)):
start = time.time()
self.write_message(jpeg_bytes[i], binary=True)
end = time.time()
time.sleep(times[i] - (end-start) )
def on_close(self):
ImageWebSocket.clients.remove(self)
print("WebSocket closed from: " + self.request.remote_ip)
script_path = os.path.dirname(os.path.realpath(__file__))
static_path = script_path + '/static/'
app = tornado.web.Application([
(r"/websocket", ImageWebSocket),
(r"/(.*)", tornado.web.StaticFileHandler, {'path': static_path, 'default_filename': 'index.html'}),
])
app.listen(args.port)
print("Starting server: http://localhost:" + str(args.port) + "/")
tornado.ioloop.IOLoop.current().start()
| [
"PIL.Image.fromarray",
"argparse.ArgumentParser",
"tensorflow.reduce_sum",
"io.BytesIO",
"time.sleep",
"numpy.squeeze",
"os.path.realpath",
"numpy.array",
"tensorflow.keras.models.load_model",
"cv2.imdecode",
"tensorflow.square",
"tensorflow.reduce_mean",
"cv2.resize",
"time.time",
"urll... | [((1450, 1539), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""./tensor_model"""'], {'custom_objects': "{'dice_loss': dice_loss}"}), "('./tensor_model', custom_objects={'dice_loss':\n dice_loss})\n", (1476, 1539), True, 'import tensorflow as tf\n'), ((1547, 1632), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Start the SematicSegmentationDemo server."""'}), "(description='Start the SematicSegmentationDemo server.'\n )\n", (1570, 1632), False, 'import argparse\n'), ((4672, 4698), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4688, 4698), False, 'import os\n'), ((1236, 1253), 'tensorflow.square', 'tf.square', (['labels'], {}), '(labels)\n', (1245, 1253), True, 'import tensorflow as tf\n'), ((1277, 1302), 'tensorflow.square', 'tf.square', (['softmax_output'], {}), '(softmax_output)\n', (1286, 1302), True, 'import tensorflow as tf\n'), ((1412, 1439), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(nom / denom)'], {}), '(nom / denom)\n', (1426, 1439), True, 'import tensorflow as tf\n'), ((2339, 2350), 'time.time', 'time.time', ([], {}), '()\n', (2348, 2350), False, 'import time\n'), ((3008, 3029), 'numpy.array', 'np.array', (['pred_images'], {}), '(pred_images)\n', (3016, 3029), True, 'import numpy as np\n'), ((3052, 3063), 'time.time', 'time.time', ([], {}), '()\n', (3061, 3063), False, 'import time\n'), ((3588, 3599), 'time.time', 'time.time', ([], {}), '()\n', (3597, 3599), False, 'import time\n'), ((1149, 1198), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(labels * softmax_output)'], {'axis': 'axis'}), '(labels * softmax_output, axis=axis)\n', (1162, 1198), True, 'import tensorflow as tf\n'), ((1315, 1347), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['labels'], {'axis': 'axis'}), '(labels, axis=axis)\n', (1328, 1347), True, 'import tensorflow as tf\n'), ((1350, 1390), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['softmax_output'], {'axis': 'axis'}), '(softmax_output, axis=axis)\n', (1363, 1390), True, 'import tensorflow as tf\n'), ((2450, 2461), 'time.time', 'time.time', ([], {}), '()\n', (2459, 2461), False, 'import time\n'), ((2646, 2668), 'cv2.imdecode', 'cv2.imdecode', (['image', '(0)'], {}), '(image, 0)\n', (2658, 2668), False, 'import cv2\n'), ((2718, 2738), 'cv2.resize', 'cv2.resize', (['img', 'DIM'], {}), '(img, DIM)\n', (2728, 2738), False, 'import cv2\n'), ((2841, 2852), 'time.time', 'time.time', ([], {}), '()\n', (2850, 2852), False, 'import time\n'), ((3301, 3341), 'numpy.squeeze', 'np.squeeze', (['(predicted[i] * 255.0)'], {'axis': '(2)'}), '(predicted[i] * 255.0, axis=2)\n', (3311, 3341), True, 'import numpy as np\n'), ((4347, 4358), 'time.time', 'time.time', ([], {}), '()\n', (4356, 4358), False, 'import time\n'), ((4436, 4447), 'time.time', 'time.time', ([], {}), '()\n', (4445, 4447), False, 'import time\n'), ((4460, 4496), 'time.sleep', 'time.sleep', (['(times[i] - (end - start))'], {}), '(times[i] - (end - start))\n', (4470, 4496), False, 'import time\n'), ((2479, 2521), 'urllib.request.urlopen', 'urllib.request.urlopen', (['self.camera_domain'], {}), '(self.camera_domain)\n', (2501, 2521), False, 'import urllib\n'), ((2799, 2812), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2807, 2812), True, 'import numpy as np\n'), ((3417, 3429), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3427, 3429), False, 'import io\n'), ((3360, 3383), 'PIL.Image.fromarray', 'Image.fromarray', (['np_img'], {}), '(np_img)\n', (3375, 3383), False, 'from PIL import Image\n')] |
import numpy as np
import cv2
import pyzbar.pyzbar as pyzbar
# Used pyrealsense2 on License: Apache 2.0.
import pyrealsense2 as rs
WIDTH = int(1280)
HEIGHT = int(720)
# Configure depth and color streams
pipeline = rs.pipeline()
realsense_config = rs.config()
realsense_config.enable_stream(rs.stream.depth, WIDTH, HEIGHT, rs.format.z16, 30)
realsense_config.enable_stream(rs.stream.color, WIDTH, HEIGHT, rs.format.bgr8, 30)
# Record stream (color and depth) to file
realsense_config.enable_record_to_file('lego_detection_test.bag')
# Create alignment primitive with color as its target stream:
alignment_stream = rs.align(rs.stream.color)
# Start streaming
profile = pipeline.start(realsense_config)
# Getting the depth sensor's depth scale
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
# Display barcode and QR code location
def display(frame, decoded_objects):
# Loop over all decoded objects
for decoded_object in decoded_objects:
points = decoded_object.polygon
# If the points do not form a quad, find convex hull
if len(points) > 4:
hull = cv2.convexHull(np.array([point for point in points], dtype=np.float32))
hull = list(map(tuple, np.squeeze(hull)))
else:
hull = points
# Number of points in the convex hull
n = len(hull)
# Draw the convex hull
for j in range(0, n):
cv2.line(frame, hull[j], hull[(j + 1) % n], (255, 0, 0), 3)
try:
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
# Align the depth frame to color frame
aligned_frames = alignment_stream.process(frames)
# Get aligned frames (depth images)
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
if not aligned_depth_frame or not color_frame:
continue
# Convert images to numpy arrays
# depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Convert to black and white to find QR-Codes
# Threshold image to white in black
mask = cv2.inRange(color_image, (0, 0, 0), (230, 230, 230))
white_in_black = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
# Invert image it to black in white
looking_for_qr_code_image = 255 - white_in_black
# Find and display QR codes
decoded_objects = pyzbar.decode(looking_for_qr_code_image)
display(looking_for_qr_code_image, decoded_objects)
# Show color images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', color_image)
# Show found QR-Codes
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('QR_Codes', looking_for_qr_code_image)
key = cv2.waitKey(33)
# Save video and break with Esc
if key == 27:
print("Save video")
break
finally:
# Stop streaming
pipeline.stop()
| [
"cv2.inRange",
"cv2.line",
"pyrealsense2.pipeline",
"cv2.imshow",
"numpy.squeeze",
"pyrealsense2.align",
"pyzbar.pyzbar.decode",
"numpy.array",
"cv2.cvtColor",
"pyrealsense2.config",
"cv2.waitKey",
"cv2.namedWindow"
] | [((216, 229), 'pyrealsense2.pipeline', 'rs.pipeline', ([], {}), '()\n', (227, 229), True, 'import pyrealsense2 as rs\n'), ((249, 260), 'pyrealsense2.config', 'rs.config', ([], {}), '()\n', (258, 260), True, 'import pyrealsense2 as rs\n'), ((617, 642), 'pyrealsense2.align', 'rs.align', (['rs.stream.color'], {}), '(rs.stream.color)\n', (625, 642), True, 'import pyrealsense2 as rs\n'), ((2287, 2339), 'cv2.inRange', 'cv2.inRange', (['color_image', '(0, 0, 0)', '(230, 230, 230)'], {}), '(color_image, (0, 0, 0), (230, 230, 230))\n', (2298, 2339), False, 'import cv2\n'), ((2365, 2403), 'cv2.cvtColor', 'cv2.cvtColor', (['mask', 'cv2.COLOR_GRAY2BGR'], {}), '(mask, cv2.COLOR_GRAY2BGR)\n', (2377, 2403), False, 'import cv2\n'), ((2568, 2608), 'pyzbar.pyzbar.decode', 'pyzbar.decode', (['looking_for_qr_code_image'], {}), '(looking_for_qr_code_image)\n', (2581, 2608), True, 'import pyzbar.pyzbar as pyzbar\n'), ((2706, 2755), 'cv2.namedWindow', 'cv2.namedWindow', (['"""RealSense"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('RealSense', cv2.WINDOW_AUTOSIZE)\n", (2721, 2755), False, 'import cv2\n'), ((2764, 2800), 'cv2.imshow', 'cv2.imshow', (['"""RealSense"""', 'color_image'], {}), "('RealSense', color_image)\n", (2774, 2800), False, 'import cv2\n'), ((2840, 2889), 'cv2.namedWindow', 'cv2.namedWindow', (['"""RealSense"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('RealSense', cv2.WINDOW_AUTOSIZE)\n", (2855, 2889), False, 'import cv2\n'), ((2898, 2947), 'cv2.imshow', 'cv2.imshow', (['"""QR_Codes"""', 'looking_for_qr_code_image'], {}), "('QR_Codes', looking_for_qr_code_image)\n", (2908, 2947), False, 'import cv2\n'), ((2963, 2978), 'cv2.waitKey', 'cv2.waitKey', (['(33)'], {}), '(33)\n', (2974, 2978), False, 'import cv2\n'), ((1465, 1524), 'cv2.line', 'cv2.line', (['frame', 'hull[j]', 'hull[(j + 1) % n]', '(255, 0, 0)', '(3)'], {}), '(frame, hull[j], hull[(j + 1) % n], (255, 0, 0), 3)\n', (1473, 1524), False, 'import cv2\n'), ((1171, 1226), 'numpy.array', 'np.array', (['[point for point in points]'], {'dtype': 'np.float32'}), '([point for point in points], dtype=np.float32)\n', (1179, 1226), True, 'import numpy as np\n'), ((1263, 1279), 'numpy.squeeze', 'np.squeeze', (['hull'], {}), '(hull)\n', (1273, 1279), True, 'import numpy as np\n')] |
import gevent.monkey
gevent.monkey.patch_all()
import csv
import json
from concurrent.futures import ProcessPoolExecutor
from time import time
import numpy as np
from taskqueue import GreenTaskQueue
from args import (get_aligner, get_argparser, get_bbox, get_provenance,
parse_args)
from cloudmanager import CloudManager
def make_range(block_range, part_num):
rangelen = len(block_range)
if(rangelen < part_num):
srange = 1
part = rangelen
else:
part = part_num
srange = rangelen//part
range_list = []
for i in range(part-1):
range_list.append(block_range[i*srange:(i+1)*srange])
range_list.append(block_range[(part-1)*srange:])
return range_list
if __name__ == '__main__':
parser = get_argparser()
parser.add_argument('--downsample_shift', type=int, default=0,
help='temporary hack to account for half pixel shifts caused by downsampling')
parser.add_argument('--section_lookup', type=str,
help='path to json file with section specific settings')
parser.add_argument('--z_range_path', type=str,
help='path to csv file with list of z indices to use')
parser.add_argument('--src_path', type=str)
parser.add_argument('--info_path', type=str,
help='path to CloudVolume to use as template info file')
parser.add_argument('--field_path', type=str)
parser.add_argument('--fine_field_path', type=str)
parser.add_argument('--coarse_field_path', type=str)
parser.add_argument('--fine_mip', type=int)
parser.add_argument('--coarse_mip', type=int)
parser.add_argument('--dst_path', type=str)
parser.add_argument('--src_mip', type=int)
parser.add_argument('--bbox_start', nargs=3, type=int,
help='bbox origin, 3-element int list')
parser.add_argument('--bbox_stop', nargs=3, type=int,
help='bbox origin+shape, 3-element int list')
parser.add_argument('--bbox_mip', type=int, default=0,
help='MIP level at which bbox_start & bbox_stop are specified')
parser.add_argument('--max_mip', type=int, default=9)
parser.add_argument('--pad',
help='the size of the largest displacement expected; should be 2^high_mip',
type=int, default=2048)
args = parse_args(parser)
# only compute matches to previous sections
a = get_aligner(args)
bbox = get_bbox(args)
provenance = get_provenance(args)
chunk_size = 1024
src_mip = args.src_mip
fine_mip = args.fine_mip
coarse_mip = args.coarse_mip
max_mip = args.max_mip
pad = args.pad
# Compile ranges
z_range = range(args.bbox_start[2], args.bbox_stop[2])
if args.z_range_path:
print('Compiling z_range from {}'.format(args.z_range_path))
z_range = []
with open(args.z_range_path) as f:
reader = csv.reader(f, delimiter=',')
for k, r in enumerate(reader):
if k != 0:
z_start = int(r[0])
z_stop = int(r[1])
print('adding to z_range {}:{}'.format(z_start, z_stop))
z_range.extend(list(range(z_start, z_stop)))
# Create CloudVolume Manager
template_path = args.src_path
if args.info_path:
template_path = args.info_path
cm = CloudManager(template_path, max_mip, pad, provenance, batch_size=1,
size_chunk=chunk_size, batch_mip=src_mip)
# Create src CloudVolumes
src = cm.create(args.src_path, data_type='uint8', num_channels=1,
fill_missing=True, overwrite=False)
fine_field = cm.create(args.fine_field_path, data_type='int16', num_channels=2,
fill_missing=True, overwrite=False)
dst = cm.create(args.dst_path, data_type='uint8', num_channels=1,
fill_missing=True, overwrite=True)
coarse_field = cm.create(args.coarse_field_path, data_type='int16', num_channels=2,
fill_missing=True, overwrite=False)
field = cm.create(args.field_path, data_type='int16', num_channels=2,
fill_missing=True, overwrite=True)
# Source Dict
src_path_to_cv = {args.src_path: src}
# compile model lookup per z index
affine_lookup = None
source_lookup = {}
if args.section_lookup:
affine_lookup = {}
with open(args.section_lookup) as f:
section_list = json.load(f)
for section in section_list:
z = section['z']
affine_lookup[z] = np.array(section['transform'])
affine_lookup[z][:, 2] += args.downsample_shift
try:
src_path = section['src']
except KeyError:
src_path = args.src_path
if src_path not in src_path_to_cv:
src_path_to_cv[src_path] = cm.create(src_path,
data_type='uint8', num_channels=1, fill_missing=True,
overwrite=False)
source_lookup[z] = src_path_to_cv[src_path]
def remote_upload(tasks):
with GreenTaskQueue(queue_name=args.queue_name) as tq:
tq.insert_all(tasks)
class ComposeTaskIterator(object):
def __init__(self, zrange):
self.zrange = zrange
def __iter__(self):
print("range is ", self.zrange)
for z in self.zrange:
affine = None
t = a.compose(cm, fine_field.path, coarse_field.path,
field.path, z, z, z, bbox, fine_mip,
coarse_mip, fine_mip, factor=1, affine=affine,
pad=pad)
yield from t
ptask = []
range_list = make_range(z_range, a.threads)
start = time()
for irange in range_list:
ptask.append(ComposeTaskIterator(irange))
with ProcessPoolExecutor(max_workers=a.threads) as executor:
executor.map(remote_upload, ptask)
end = time()
diff = end - start
print("Sending Compose Tasks use time:", diff)
print('Running Compose Tasks')
# wait
start = time()
a.wait_for_sqs_empty()
end = time()
diff = end - start
print("Executing Compose Tasks use time:", diff)
class RenderTaskIterator(object):
def __init__(self, zrange):
self.zrange = zrange
def __iter__(self):
print("range is ", self.zrange)
for z in self.zrange:
affine = None
if affine_lookup:
try:
affine = affine_lookup[z]
except KeyError:
affine = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
try:
src_path = source_lookup[z].path
if src_path != src.path:
print("Overriding {} source dir with path {}".format(z, src_path))
except KeyError:
src_path = src.path
t = a.render(cm, src_path, field.path, dst.path, z, z, z, bbox,
src_mip, fine_mip, affine=affine)
yield from t
ptask = []
start = time()
for irange in range_list:
ptask.append(RenderTaskIterator(irange))
with ProcessPoolExecutor(max_workers=a.threads) as executor:
executor.map(remote_upload, ptask)
end = time()
diff = end - start
print("Sending Render Tasks use time:", diff)
print('Running Render Tasks')
# wait
start = time()
a.wait_for_sqs_empty()
end = time()
diff = end - start
print("Executing Render Tasks use time:", diff)
| [
"args.get_argparser",
"args.get_bbox",
"cloudmanager.CloudManager",
"time.time",
"taskqueue.GreenTaskQueue",
"json.load",
"numpy.array",
"concurrent.futures.ProcessPoolExecutor",
"args.get_aligner",
"args.get_provenance",
"csv.reader",
"args.parse_args"
] | [((778, 793), 'args.get_argparser', 'get_argparser', ([], {}), '()\n', (791, 793), False, 'from args import get_aligner, get_argparser, get_bbox, get_provenance, parse_args\n'), ((2199, 2217), 'args.parse_args', 'parse_args', (['parser'], {}), '(parser)\n', (2209, 2217), False, 'from args import get_aligner, get_argparser, get_bbox, get_provenance, parse_args\n'), ((2270, 2287), 'args.get_aligner', 'get_aligner', (['args'], {}), '(args)\n', (2281, 2287), False, 'from args import get_aligner, get_argparser, get_bbox, get_provenance, parse_args\n'), ((2297, 2311), 'args.get_bbox', 'get_bbox', (['args'], {}), '(args)\n', (2305, 2311), False, 'from args import get_aligner, get_argparser, get_bbox, get_provenance, parse_args\n'), ((2327, 2347), 'args.get_provenance', 'get_provenance', (['args'], {}), '(args)\n', (2341, 2347), False, 'from args import get_aligner, get_argparser, get_bbox, get_provenance, parse_args\n'), ((3130, 3243), 'cloudmanager.CloudManager', 'CloudManager', (['template_path', 'max_mip', 'pad', 'provenance'], {'batch_size': '(1)', 'size_chunk': 'chunk_size', 'batch_mip': 'src_mip'}), '(template_path, max_mip, pad, provenance, batch_size=1,\n size_chunk=chunk_size, batch_mip=src_mip)\n', (3142, 3243), False, 'from cloudmanager import CloudManager\n'), ((5453, 5459), 'time.time', 'time', ([], {}), '()\n', (5457, 5459), False, 'from time import time\n'), ((5653, 5659), 'time.time', 'time', ([], {}), '()\n', (5657, 5659), False, 'from time import time\n'), ((5783, 5789), 'time.time', 'time', ([], {}), '()\n', (5787, 5789), False, 'from time import time\n'), ((5823, 5829), 'time.time', 'time', ([], {}), '()\n', (5827, 5829), False, 'from time import time\n'), ((6733, 6739), 'time.time', 'time', ([], {}), '()\n', (6737, 6739), False, 'from time import time\n'), ((6929, 6935), 'time.time', 'time', ([], {}), '()\n', (6933, 6935), False, 'from time import time\n'), ((7057, 7063), 'time.time', 'time', ([], {}), '()\n', (7061, 7063), False, 'from time import time\n'), ((7097, 7103), 'time.time', 'time', ([], {}), '()\n', (7101, 7103), False, 'from time import time\n'), ((5546, 5588), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'a.threads'}), '(max_workers=a.threads)\n', (5565, 5588), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((6823, 6865), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'a.threads'}), '(max_workers=a.threads)\n', (6842, 6865), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((2731, 2759), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (2741, 2759), False, 'import csv\n'), ((4206, 4218), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4215, 4218), False, 'import json\n'), ((4795, 4837), 'taskqueue.GreenTaskQueue', 'GreenTaskQueue', ([], {'queue_name': 'args.queue_name'}), '(queue_name=args.queue_name)\n', (4809, 4837), False, 'from taskqueue import GreenTaskQueue\n'), ((4306, 4336), 'numpy.array', 'np.array', (["section['transform']"], {}), "(section['transform'])\n", (4314, 4336), True, 'import numpy as np\n'), ((6260, 6304), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])\n', (6268, 6304), True, 'import numpy as np\n')] |
import pennylane as qml
import numpy as np
def add_dummy_measurements_for_test(func):
def inner(*args, **kwargs):
func(*args, **kwargs)
if test == True:
return qml.expval(qml.PauliY(0))
return inner
class EncodingCircuitsPennylane:
def __init__(self, enc = None, qubit = None):
self.choices = [1, 2, 3, 4, 5, 6]
assert enc in self.choices
self.enc = enc
self.qubit = qubit
def get_encoder(self, inputs):
if self.enc == 1:
return self.__encoder_1(inputs)
if self.enc == 2:
return self.__encoder_2(inputs)
if self.enc == 3:
return self.__encoder_3(inputs)
if self.enc == 4:
return self.__encoder_4(inputs)
if self.enc == 5:
return self.__encoder_5(inputs)
if self.enc == 6:
return self.__encoder_6(inputs)
def max_inputs_length(self):
if self.enc == 1:
return self.qubit
if self.enc == 2:
return self.qubit * 2
if self.enc == 3:
return self.qubit * 3
if self.enc == 4:
return self.qubit * 4
if self.enc == 5:
return self.qubit
if self.enc == 6:
return self.qubit * 8
@add_dummy_measurements_for_test
def __encoder_1(self, inputs):
assert len(inputs) <= self.max_inputs_length()
encoding_gates = ['RZ']
for qub in range(self.qubit):
qml.Hadamard(wires = qub)
if qub < len(inputs):
exec('qml.{}({}, wires = {})'.format(encoding_gates[0], inputs[qub], qub))
else: #load nothing
pass
@add_dummy_measurements_for_test
def __encoder_2(self, inputs):
assert len(inputs) <= self.max_inputs_length()
encoding_gates = ['RZ', 'RX']
var_per_qubit = 2
for qub in range(self.qubit):
qml.Hadamard(wires = qub)
for i in range(var_per_qubit):
if (qub * var_per_qubit + i) < len(inputs):
exec('qml.{}({}, wires = {})'.format(encoding_gates[i], inputs[qub * var_per_qubit + i], qub))
else: #load nothing
pass
@add_dummy_measurements_for_test
def __encoder_3(self, inputs):
assert len(inputs) <= self.max_inputs_length()
encoding_gates = ['RZ', 'RX', 'RZ']
var_per_qubit = 3
for qub in range(self.qubit):
qml.Hadamard(wires = qub)
for i in range(var_per_qubit):
if (qub * var_per_qubit + i) < len(inputs):
exec('qml.{}({}, wires = {})'.format(encoding_gates[i], inputs[qub * var_per_qubit + i], qub))
else: #load nothing
pass
@add_dummy_measurements_for_test
def __encoder_4(self, inputs):
assert len(inputs) <= self.max_inputs_length()
encoding_gates = ['RZ', 'RX', 'RZ', 'RX']
var_per_qubit = 4
for qub in range(self.qubit):
qml.Hadamard(wires = qub)
for i in range(var_per_qubit):
if (qub * var_per_qubit + i) < len(inputs):
exec('qml.{}({}, wires = {})'.format(encoding_gates[i], inputs[qub * var_per_qubit + i], qub))
else: #load nothing
pass
@add_dummy_measurements_for_test
def __encoder_5(self, inputs):
assert len(inputs) <= self.max_inputs_length()
encoding_gates = ['RY']
for qub in range(self.qubit):
if qub < len(inputs):
exec('qml.{}({}, wires = {})'.format(encoding_gates[0], inputs[qub], qub))
else: #load nothing
pass
@add_dummy_measurements_for_test
def __encoder_6(self, inputs):
assert len(inputs) <= self.max_inputs_length()
encoding_gates = ['RZ', 'RX', 'RZ', 'RX', 'RZ', 'RX', 'RZ', 'RX']
var_per_qubit = 8
for qub in range(self.qubit):
qml.Hadamard(wires = qub)
for i in range(var_per_qubit):
if (qub * var_per_qubit + i) < len(inputs):
exec('qml.{}({}, wires = {})'.format(encoding_gates[i], inputs[qub * var_per_qubit + i], qub))
else: #load nothing
pass
if __name__ == '__main__':
test = True
enc = EncodingCircuitsPennylane(enc = 4, qubit = 5)
inputs_length = enc.max_inputs_length()
inputs = np.random.random(inputs_length)
dev = qml.device("default.qubit", wires = 10) #target pennylane device
qnode = qml.QNode(enc.get_encoder, dev) #circuit
qnode(inputs)
print(qnode.draw())
else:
test = False
| [
"pennylane.PauliY",
"pennylane.QNode",
"numpy.random.random",
"pennylane.device",
"pennylane.Hadamard"
] | [((4506, 4537), 'numpy.random.random', 'np.random.random', (['inputs_length'], {}), '(inputs_length)\n', (4522, 4537), True, 'import numpy as np\n'), ((4548, 4585), 'pennylane.device', 'qml.device', (['"""default.qubit"""'], {'wires': '(10)'}), "('default.qubit', wires=10)\n", (4558, 4585), True, 'import pennylane as qml\n'), ((4625, 4656), 'pennylane.QNode', 'qml.QNode', (['enc.get_encoder', 'dev'], {}), '(enc.get_encoder, dev)\n', (4634, 4656), True, 'import pennylane as qml\n'), ((1511, 1534), 'pennylane.Hadamard', 'qml.Hadamard', ([], {'wires': 'qub'}), '(wires=qub)\n', (1523, 1534), True, 'import pennylane as qml\n'), ((1961, 1984), 'pennylane.Hadamard', 'qml.Hadamard', ([], {'wires': 'qub'}), '(wires=qub)\n', (1973, 1984), True, 'import pennylane as qml\n'), ((2514, 2537), 'pennylane.Hadamard', 'qml.Hadamard', ([], {'wires': 'qub'}), '(wires=qub)\n', (2526, 2537), True, 'import pennylane as qml\n'), ((3077, 3100), 'pennylane.Hadamard', 'qml.Hadamard', ([], {'wires': 'qub'}), '(wires=qub)\n', (3089, 3100), True, 'import pennylane as qml\n'), ((4044, 4067), 'pennylane.Hadamard', 'qml.Hadamard', ([], {'wires': 'qub'}), '(wires=qub)\n', (4056, 4067), True, 'import pennylane as qml\n'), ((204, 217), 'pennylane.PauliY', 'qml.PauliY', (['(0)'], {}), '(0)\n', (214, 217), True, 'import pennylane as qml\n')] |
from abc import abstractmethod
import numpy as np
from intervaltree import IntervalTree
from loguru import logger
from vimms.Common import ScanParameters
########################################################################################################################
# DEW Exclusions
########################################################################################################################
class ExclusionItem(object):
"""
A class to store the item to exclude when computing dynamic exclusion window
"""
def __init__(self, from_mz, to_mz, from_rt, to_rt, frag_at):
"""
Creates a dynamic exclusion item
:param from_mz: m/z lower bounding box
:param to_mz: m/z upper bounding box
:param from_rt: RT lower bounding box
:param to_rt: RT upper bounding box
"""
self.from_mz = from_mz
self.to_mz = to_mz
self.from_rt = from_rt
self.to_rt = to_rt
self.frag_at = frag_at
self.mz = (self.from_mz + self.to_mz) / 2.
self.rt = self.frag_at
def peak_in(self, mz, rt):
if self.rt_match(rt) and self.mz_match(mz):
return True
else:
return False
def rt_match(self, rt):
if rt >= self.from_rt and rt <= self.to_rt:
return True
else:
return False
def mz_match(self, mz):
if mz >= self.from_mz and mz <= self.to_mz:
return True
else:
return False
def __repr__(self):
return 'ExclusionItem mz=(%f, %f) rt=(%f-%f)' % (self.from_mz, self.to_mz, self.from_rt, self.to_rt)
def __lt__(self, other):
if self.from_mz <= other.from_mz:
return True
else:
return False
class BoxHolder(object):
"""
A class to allow quick lookup of boxes (e.g. exclusion items, targets, etc)
Creates an interval tree on mz as this is likely to narrow things down quicker
Also has a method for returning an rt interval tree for a particular mz
and an mz interval tree for a particular rt
"""
def __init__(self):
self.boxes_mz = IntervalTree()
self.boxes_rt = IntervalTree()
def add_box(self, box):
"""
Add a box to the IntervalTree
"""
mz_from = box.from_mz
mz_to = box.to_mz
rt_from = box.from_rt
rt_to = box.to_rt
self.boxes_mz.addi(mz_from, mz_to, box)
self.boxes_rt.addi(rt_from, rt_to, box)
def check_point(self, mz, rt):
"""
Find the boxes that match this mz and rt value
"""
regions = self.boxes_mz.at(mz)
hits = set()
for r in regions:
if r.data.rt_match(rt):
hits.add(r.data)
return hits
def check_point_2(self, mz, rt):
"""
An alternative method that searches both trees
Might be faster if there are lots of rt ranges that
can map to a particular mz value
"""
mz_regions = self.boxes_mz.at(mz)
rt_regions = self.boxed_rt.at(rt)
inter = mz_regions.intersection(rt_regions)
return [r.data for r in inter]
def is_in_box(self, mz, rt):
"""
Check if this mz and rt is in *any* box
"""
hits = self.check_point(mz, rt)
if len(hits) > 0:
return True
else:
return False
def is_in_box_mz(self, mz):
"""
Check if an mz value is in any box
"""
regions = self.boxes_mz.at(mz)
if len(regions) > 0:
return True
else:
return False
def is_in_box_rt(self, rt):
"""
Check if an rt value is in any box
"""
regions = self.boxes_rt.at(rt)
if len(regions) > 0:
return True
else:
return False
def get_subset_rt(self, rt):
"""
Create an interval tree based upon mz for all boxes active at rt
"""
regions = self.boxes_rt.at(rt)
it = BoxHolder()
for r in regions:
box = r.data
it.add_box(box)
return it
def get_subset_mz(self, mz):
"""
Create an interval tree based upon rt fro all boxes active at mz
"""
regions = self.boxes_mz.at(mz)
it = BoxHolder()
for r in regions:
box = r.data
it.add_box(box)
return it
class TopNExclusion(object):
def __init__(self, initial_exclusion_list=None):
self.exclusion_list = []
if initial_exclusion_list is not None: # copy initial list, if provided
self.exclusion_list = list(initial_exclusion_list)
def is_excluded(self, mz, rt):
"""
Checks if a pair of (mz, rt) value is currently excluded by dynamic exclusion window
:param mz: m/z value
:param rt: RT value
:return: True if excluded (with weight 0.0), False otherwise (weight 1.0)
"""
# TODO: make this faster?
for x in self.exclusion_list:
exclude_mz = x.from_mz <= mz <= x.to_mz
exclude_rt = x.from_rt <= rt <= x.to_rt
if exclude_mz and exclude_rt:
logger.debug(
'Excluded precursor ion mz {:.4f} rt {:.2f} because of {}'.format(mz, rt, x))
return True, 0.0
return False, 1.0
def update(self, current_scan, ms2_tasks):
"""
Updates the state of this exclusion object based on the current ms1 scan and scheduled ms2 tasks
:param current_scan: the current MS1 scan
:param ms2_tasks: scheduled ms2 tasks
"""
rt = current_scan.rt
temp_exclusion_list = []
for task in ms2_tasks:
for precursor in task.get('precursor_mz'):
mz = precursor.precursor_mz
mz_tol = task.get(ScanParameters.DYNAMIC_EXCLUSION_MZ_TOL)
rt_tol = task.get(ScanParameters.DYNAMIC_EXCLUSION_RT_TOL)
x = self._get_exclusion_item(mz, rt, mz_tol, rt_tol)
logger.debug('Time {:.6f} Created dynamic temporary exclusion window mz ({}-{}) rt ({}-{})'.format(
rt,
x.from_mz, x.to_mz, x.from_rt, x.to_rt
))
x = self._get_exclusion_item(mz, rt, mz_tol, rt_tol)
temp_exclusion_list.append(x)
self.exclusion_list.extend(temp_exclusion_list)
def cleanup(self, current_scan):
"""
Clean-up dynamic exclusion list. Should typically be called once a scan has been processed
:param param: a scan parameter object
:param current_scan: the newly generated scan
:return: None
"""
# current simulated time is scan start RT + scan duration
# in the real data, scan.duration is not set, so we just use the scan rt as the current time
current_time = current_scan.rt
if current_scan.scan_duration is not None:
current_time += current_scan.scan_duration
# remove expired items from dynamic exclusion list
self.exclusion_list = list(filter(lambda x: x.to_rt > current_time, self.exclusion_list))
def _get_exclusion_item(self, mz, rt, mz_tol, rt_tol):
mz_lower = mz * (1 - mz_tol / 1e6)
mz_upper = mz * (1 + mz_tol / 1e6)
rt_lower = rt - rt_tol
rt_upper = rt + rt_tol
x = ExclusionItem(from_mz=mz_lower, to_mz=mz_upper, from_rt=rt_lower, to_rt=rt_upper,
frag_at=rt)
return x
class WeightedDEWExclusion(TopNExclusion):
def __init__(self, rt_tol, exclusion_t_0):
super().__init__()
self.rt_tol = rt_tol
self.exclusion_t_0 = exclusion_t_0
assert self.exclusion_t_0 <= self.rt_tol
def is_excluded(self, mz, rt):
"""
Checks if a pair of (mz, rt) value is currently excluded by the weighted dynamic exclusion window
:param mz: m/z value
:param rt: RT value
:return: True if excluded, False otherwise
"""
# TODO: make this faster?
self.exclusion_list.sort(key=lambda x: x.from_rt, reverse=True)
for x in self.exclusion_list:
exclude_mz = x.from_mz <= mz <= x.to_mz
exclude_rt = x.from_rt <= rt <= x.to_rt
if exclude_mz and exclude_rt:
logger.debug(
'Excluded precursor ion mz {:.4f} rt {:.2f} because of {}'.format(mz, rt, x))
return compute_weight(rt, x.frag_at, self.rt_tol, self.exclusion_t_0)
return False, 1.0
def compute_weight(current_rt, frag_at, rt_tol, exclusion_t_0):
if frag_at is None:
# never been fragmented before, always include (weight 1.0)
return False, 1.0
elif current_rt <= frag_at + exclusion_t_0:
# fragmented but within exclusion_t_0, always exclude (weight 0.0)
return True, 0.0
else:
# compute weight according to the WeightedDEW scheme
weight = (current_rt - (exclusion_t_0 + frag_at)) / (rt_tol - exclusion_t_0)
if weight > 1:
logger.warning('exclusion weight %f is greater than 1 (current_rt %f exclusion_t_0 %f frag_at %f rt_tol %f)' %
(weight, current_rt, exclusion_t_0, frag_at, rt_tol))
# assert weight <= 1, weight
return True, weight
########################################################################################################################
# Filters
########################################################################################################################
class ScoreFilter():
@abstractmethod
def filter(self): pass
class MinIntensityFilter(ScoreFilter):
def __init__(self, min_ms1_intensity):
self.min_ms1_intensity = min_ms1_intensity
def filter(self, intensities):
return np.array(intensities) > self.min_ms1_intensity
class DEWFilter(ScoreFilter):
def __init__(self, rt_tol):
self.rt_tol = rt_tol
def filter(self, current_rt, last_frag_rts):
# Handles None values by converting to NaN for which all comparisons return 0
return np.logical_not(current_rt - np.array(last_frag_rts, dtype=np.double) < self.rt_tol)
class WeightedDEWFilter(ScoreFilter):
def __init__(self, rt_tol, exclusion_t_0):
self.rt_tol = rt_tol
self.exclusion_t_0 = exclusion_t_0
def filter(self, current_rt, last_frag_rts):
weights = []
for frag_at in last_frag_rts:
is_exc, weight = compute_weight(current_rt, frag_at, self.rt_tol, self.exclusion_t_0)
weights.append(weight)
return np.array(weights)
class LengthFilter(ScoreFilter):
def __init__(self, min_roi_length_for_fragmentation):
self.min_roi_length_for_fragmentation = min_roi_length_for_fragmentation
def filter(self, roi_lengths):
return roi_lengths >= self.min_roi_length_for_fragmentation
class SmartROIFilter(ScoreFilter):
def filter(self, rois):
# if this is a normal ROI object, always return True for everything
# otherwise track the status based on the SmartROI rules
return np.array([roi.get_can_fragment() for roi in rois])
if __name__ == '__main__':
e = ExclusionItem(1.1, 1.2, 3.4, 3.5, 3.45)
f = ExclusionItem(1.0, 1.4, 3.3, 3.6, 3.45)
g = ExclusionItem(2.1, 2.2, 3.2, 3.5, 3.45)
b = BoxHolder()
b.add_box(e)
b.add_box(f)
b.add_box(g)
print(b.is_in_box(1.15, 3.55))
print(b.is_in_box(1.15, 3.75))
| [
"intervaltree.IntervalTree",
"numpy.array",
"loguru.logger.warning"
] | [((2170, 2184), 'intervaltree.IntervalTree', 'IntervalTree', ([], {}), '()\n', (2182, 2184), False, 'from intervaltree import IntervalTree\n'), ((2209, 2223), 'intervaltree.IntervalTree', 'IntervalTree', ([], {}), '()\n', (2221, 2223), False, 'from intervaltree import IntervalTree\n'), ((10754, 10771), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (10762, 10771), True, 'import numpy as np\n'), ((9963, 9984), 'numpy.array', 'np.array', (['intensities'], {}), '(intensities)\n', (9971, 9984), True, 'import numpy as np\n'), ((9196, 9370), 'loguru.logger.warning', 'logger.warning', (["('exclusion weight %f is greater than 1 (current_rt %f exclusion_t_0 %f frag_at %f rt_tol %f)'\n % (weight, current_rt, exclusion_t_0, frag_at, rt_tol))"], {}), "(\n 'exclusion weight %f is greater than 1 (current_rt %f exclusion_t_0 %f frag_at %f rt_tol %f)'\n % (weight, current_rt, exclusion_t_0, frag_at, rt_tol))\n", (9210, 9370), False, 'from loguru import logger\n'), ((10282, 10322), 'numpy.array', 'np.array', (['last_frag_rts'], {'dtype': 'np.double'}), '(last_frag_rts, dtype=np.double)\n', (10290, 10322), True, 'import numpy as np\n')] |
import numpy as np
import os
from gym import utils
from gym.envs.mujoco import mujoco_env
class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
mujoco_env.MujocoEnv.__init__(self, '%s/assets/ant.xml' % dir_path, 5)
utils.EzPickle.__init__(self)
def step(self, a):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_reward = (xposafter - xposbefore) / self.dt
ctrl_cost = .5 * np.square(a).sum()
# print("forward_reward: ", forward_reward, xposbefore, xposafter, self.dt)
# contact_cost = 0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
survive_reward = 1.0
reward = forward_reward - ctrl_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() and 0.2 <= state[2] <= 1.0
done = not notdone
ob = self._get_obs()
return ob, reward, done, dict(
reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
# reward_contact=-contact_cost,
reward_survive=survive_reward)
def mb_step(self, obs, action, next_obs):
forward_reward = (next_obs[0] - obs[0]) / self.dt
ctrl_reward = .5 * np.square(action).sum()
rewards = forward_reward - ctrl_reward + 1.
notdone = np.isfinite(obs).all() and 0.2 <= obs[1] <= 1.0
done = not notdone
return rewards, done
def _get_obs(self):
return np.concatenate([
self.get_body_com('torso').flat[:1],
self.sim.data.qpos.flat[2:],
self.sim.data.qvel.flat,
# np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
])
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
if __name__ == '__main__':
from gym import register
import gym
register(id='ant-v2', entry_point=AntEnv, max_episode_steps=1000)
env = gym.make('ant-v2')
state = env.reset()
total = 0
v_t = 0
for i in range(1000):
action = env.action_space.sample()
next_state, reward, done, _ = env.step(action)
r, done_ = env.mb_step(state, action, next_state)
v_t += r
state = next_state
total += reward
if done:
print(i)
exit()
print(total, v_t)
| [
"gym.register",
"gym.envs.mujoco.mujoco_env.MujocoEnv.__init__",
"numpy.square",
"os.path.realpath",
"gym.utils.EzPickle.__init__",
"numpy.isfinite",
"gym.make"
] | [((2280, 2345), 'gym.register', 'register', ([], {'id': '"""ant-v2"""', 'entry_point': 'AntEnv', 'max_episode_steps': '(1000)'}), "(id='ant-v2', entry_point=AntEnv, max_episode_steps=1000)\n", (2288, 2345), False, 'from gym import register\n'), ((2356, 2374), 'gym.make', 'gym.make', (['"""ant-v2"""'], {}), "('ant-v2')\n", (2364, 2374), False, 'import gym\n'), ((240, 310), 'gym.envs.mujoco.mujoco_env.MujocoEnv.__init__', 'mujoco_env.MujocoEnv.__init__', (['self', "('%s/assets/ant.xml' % dir_path)", '(5)'], {}), "(self, '%s/assets/ant.xml' % dir_path, 5)\n", (269, 310), False, 'from gym.envs.mujoco import mujoco_env\n'), ((319, 348), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (342, 348), False, 'from gym import utils\n'), ((204, 230), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (220, 230), False, 'import os\n'), ((606, 618), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (615, 618), True, 'import numpy as np\n'), ((949, 967), 'numpy.isfinite', 'np.isfinite', (['state'], {}), '(state)\n', (960, 967), True, 'import numpy as np\n'), ((1395, 1412), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (1404, 1412), True, 'import numpy as np\n'), ((1489, 1505), 'numpy.isfinite', 'np.isfinite', (['obs'], {}), '(obs)\n', (1500, 1505), True, 'import numpy as np\n')] |
'''
@Description:yolov4_loss 损失计算
@Author:Zigar
@Date:2021/03/11 11:32:42
'''
import torch
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from PIL import Image
from utils.utils import box_iou, box_ciou, clip_by_tensor, smooth_labels
#---------------------------------------------------------------#
# 均方误差损失函数(没有用到)
# https://blog.csdn.net/oBrightLamp/article/details/85137756
#---------------------------------------------------------------#
def MSELoss(pred, target):
return (pred - target) ** 2
#---------------------------------------------------------------#
# 二类交叉熵损失函数,torch.nn.BCELoss()
# https://blog.csdn.net/geter_CS/article/details/84747670
#---------------------------------------------------------------#
def BCELoss(pred, target):
epsilon = 1e-7
pred = clip_by_tensor(pred, epsilon, 1.0 - epsilon)
output = - target * torch.log(pred) - (1.0 - target) * torch.log(1.0 - pred)
return output
#---------------------------------------------------------------#
# YOLOLoss 损失计算
#---------------------------------------------------------------#
class YOLOLoss(nn.Module):
def __init__(self, anchors, num_classes, img_size, label_smooth=0, cuda=True, normalize=True):
super(YOLOLoss, self).__init__()
self.anchors = anchors
self.num_anchors = len(anchors)
self.num_classes = num_classes
self.bbox_attrs = 5 + num_classes # 5 = xywh + conf
self.img_size = img_size
self.feature_length = [img_size[0]//32, img_size[0]//16, img_size[0]//8]
self.label_smooth = label_smooth
self.ignore_threshold = 0.5
self.lambda_conf = 1.0
self.lamnda_cls = 1.0
self.lambda_loc = 1.0
self.cuda = cuda
self.normallize = normalize
def forward(self, input, targets=None):
#--------------------------------------------------------------#
# input的shape为 batchsize, 3*(5+num_classes), 13, 13
# batchsize, 3*(5+num_classes), 26, 26
# batchsize, 3*(5+num_classes), 52, 52
#--------------------------------------------------------------#
# 每个batch的图片数量
batch_size = input.size(0)
# 特征层的宽高
in_w, in_h = input.size(2), input.size(3)
#----------------------------------------------------------------------#
# 计算步长
# 每一个特征点对应原来的图片上多少个像素点
# 如果特征层为13x13的话,一个特征点就对应原来的图片上的32个像素点
# 如果特征层为26x26的话,一个特征点就对应原来的图片上的16个像素点
# 如果特征层为52x52的话,一个特征点就对应原来的图片上的8个像素点
# stride_h = stride_w = 32、16、8
#---------------------------------------------------------------#
stride_h = self.img_size[0] / in_h
stride_w = self.img_size[0] / in_w
#---------------------------------------------------------------#
# 原始先验框大小是针对原始图片大小
# 此时获得的scaled_anchors大小是相对于特征层的
#---------------------------------------------------------------#
scaled_anchors = [(a_w / stride_w, a_h / stride_h) for a_w, a_h in self.anchors]
#---------------------------------------------------------------#
# 输入的input一共有三个,他们的shape分别是
# batch_size, 3, 13, 13, 5 + num_classes
# batch_size, 3, 26, 26, 5 + num_classes
# batch_size, 3, 52, 52, 5 + num_classes
# view():将一个多行的Tensor,拼接成一行
# https://blog.csdn.net/program_developer/article/details/82112372
# permute():将tensor的维度换位
# https://blog.csdn.net/york1996/article/details/81876886
# contiguous():把tensor变成在内存中连续分布的形式
# https://blog.csdn.net/qq_36653505/article/details/83375160
#---------------------------------------------------------------#
prediction = input.view(batch_size, int(self.num_anchors / 3),
self.bbox_attrs,in_h,in_w).permute(0, 1, 3, 4, 2).contiguous()
# 获取位置置信度,是否有目标
pred_conf = torch.sigmoid(prediction[..., 4])
# 获取种类置信度,是否为该种类
pred_cls = torch.sigmoid(prediction[..., 5:])
#-----------------------------------------------------------------#
# 找到哪些先验框内部包含目标
# 利用真实框和先验框计算交并比
# obj_mask batch_size, 3, in_h, in_w 有目标的特征点
# no_obj_mask batch_size, 3, in_h, in_w 无目标的特征点
# t_box batch_size, 3, in_h, in_w, 4 中心宽高的真实值
# t_conf batch_size, 3, in_h, in_w 置信度真实值
# t_cls batch_size, 3, in_h, in_w, num_classes 种类真实值
#-----------------------------------------------------------------#
obj_mask, no_obj_mask, t_box, t_conf, t_cls, box_loss_scale_x, box_loss_scale_y = self.get_target(
targets, scaled_anchors, in_w, in_h
)
#---------------------------------------------------------------#
# 将预测结果进行解码,判断预测结果和真实值的重合程度
# 如果重合程度过大则忽略,因为这些特征点属于预测比较准确的特征点
# 作为负样本不合适
#---------------------------------------------------------------#
no_obj_mask, pred_boxes_for_ciou = self.get_ignore(
prediction, targets, scaled_anchors, in_w, in_h, no_obj_mask
)
#---------------------------------------------------------------#
# 计算全部loss
#---------------------------------------------------------------#
if self.cuda:
obj_mask, no_obj_mask = obj_mask.cuda(), no_obj_mask.cuda()
box_loss_scale_x, box_loss_scale_y = box_loss_scale_x.cuda(), box_loss_scale_y.cuda()
t_conf, t_cls = t_conf.cuda(), t_cls.cuda()
pred_boxes_for_ciou = pred_boxes_for_ciou.cuda()
t_box = t_box.cuda()
box_loss_scale = 2 - box_loss_scale_x * box_loss_scale_y
# 计算预测结果与真实结果的CIOU的loss
ciou = (1 - box_ciou(pred_boxes_for_ciou[obj_mask.bool()], t_box[obj_mask.bool()])) * box_loss_scale[obj_mask.bool()]
loss_loc = torch.sum(ciou)
# 计算置信度的loss
loss_conf = torch.sum(BCELoss(pred_conf, obj_mask) * obj_mask) + \
torch.sum(BCELoss(pred_conf, obj_mask) * no_obj_mask)
loss_cls = torch.sum(BCELoss(pred_cls[obj_mask == 1], smooth_labels(t_cls[obj_mask == 1], self.label_smooth, self.num_classes)))
loss = loss_conf * self.lambda_conf + loss_cls * self.lamnda_cls + loss_loc * self.lambda_loc
if self.normallize:
num_pos = torch.sum(obj_mask)
num_pos = torch.max(num_pos, torch.ones_like(num_pos))
else:
num_pos = batch_size / 3
return loss, num_pos
#---------------------------------------------------------------#
# 获取网络应该有的预测结果
# 找到哪些先验框内部包含目标
# 利用真实框和先验框计算交并比
# 即找到真实框对应的先验框和网格点位置
#---------------------------------------------------------------#
def get_target(self, target, anchors, in_w, in_h):
"""
@param:
-------
target:目标真实框
anchors:先验框
in_w, in_h:特征层的宽高
@Returns:
obj_mask:有目标的特征点
no_obj_mask:无目标的特征点
t_box:框位置xywh的真实值
t_conf:置信度的真实值
t_cls:类别的真实值
box_loss_scale_x,box_loss_scale_y:回归loss的比例,让小目标的loss更大,大目标的loss更小
-------
"""
# 疑问:始终不知道输入的target是什么?
# 答案:是目标真实框
# 每个batch的图片数量
batch_size = len(target)
# 获得当前特征层先验框所属的编号,方便后面对先验框进行筛选
anchor_index = [[0, 1, 2], [3, 4, 5], [6, 7, 8]][self.feature_length.index(in_w)]
subtract_index = [0, 3, 6][self.feature_length.index(in_w)]
# 创建全是0,或全是1的矩阵
obj_mask = torch.zeros(batch_size, int(self.num_anchors/3), in_h, in_w, requires_grad=False)
no_obj_mask = torch.ones(batch_size, int(self.num_anchors/3), in_h, in_w, requires_grad=False)
t_x = torch.zeros(batch_size, int(self.num_anchors/3), in_h, in_w, requires_grad=False)
t_y = torch.zeros(batch_size, int(self.num_anchors/3), in_h, in_w, requires_grad=False)
t_w = torch.zeros(batch_size, int(self.num_anchors/3), in_h, in_w, requires_grad=False)
t_h = torch.zeros(batch_size, int(self.num_anchors/3), in_h, in_w, requires_grad=False)
t_box = torch.zeros(batch_size, int(self.num_anchors/3), in_h, in_w, 4, requires_grad=False)
t_conf = torch.zeros(batch_size, int(self.num_anchors/3), in_h, in_w, requires_grad=False)
t_cls = torch.zeros(batch_size, int(self.num_anchors/3), in_h, in_w, self.num_classes, requires_grad=False)
box_loss_scale_x = torch.zeros(batch_size, int(self.num_anchors/3), in_h, in_w, requires_grad=False)
box_loss_scale_y = torch.zeros(batch_size, int(self.num_anchors/3), in_h, in_w, requires_grad=False)
for b in range(batch_size):
if len(target[b]) == 0:
continue
# target的值是小数的形式
# 计算出正样本在特征层的中心点和宽高
gxs = target[b][:, 0:1] * in_w
gys = target[b][:, 1:2] * in_h
gws = target[b][:, 2:3] * in_w
ghs = target[b][:, 3:4] * in_h
# 计算出正样本属于特征层的哪个特征点
# torch.floor()向下取整,对应的就是网格点的左上角位置
gis = torch.floor(gxs)
gjs = torch.floor(gys)
# 将真实框转换一个形式 num_true_box, 4
gt_box = torch.FloatTensor(torch.cat([torch.zeros_like(gws), torch.zeros_like(ghs) ,gws, ghs], 1))
#将先验框转换一个形式 9, 4
anchor_shapes = torch.FloatTensor(torch.cat((torch.zeros((self.num_anchors, 2)), torch.FloatTensor(anchors)), 1))
# 计算真实框与先验框交并比 num_true_box, 9
anch_iou = box_iou(gt_box, anchor_shapes)
#---------------------------------------------------------------#
# 计算重合度最大的先验框是哪个
# torch.argmax():返回指定维度最大值的序号
# https://blog.csdn.net/weixin_42494287/article/details/92797061
#---------------------------------------------------------------#
best_anchs = torch.argmax(anch_iou, dim=-1)
for i, best_anch in enumerate(best_anchs):
if best_anch not in anchor_index:
continue
#-------------------------------------------------------------#
# 取出各类坐标:
# gi和gj代表的是真实框对应的特征点的x轴y轴坐标
# gx和gy代表真实框在特征层的x轴和y轴坐标
# gw和gh代表真实框在特征层的宽和高
#-------------------------------------------------------------#
gi = gis[i].long()
gj = gjs[i].long()
gx, gy, gw, gh = gxs[i], gys[i], gws[i], ghs[i]
if (gj<in_h) and (gi<in_w):
best_anch = best_anch - subtract_index
# no_obj_mask代表无目标的特征点
no_obj_mask[b, best_anch, gj, gi] = 0
# obj_mask代表有目标的特征点
obj_mask[b, best_anch, gj, gi] = 1
# t_x,t_y代表中心的真实值
t_x[b, best_anch, gj, gi] = gx
t_y[b, best_anch, gj, gi] = gy
# t_w,t_h代表中心的真实值
t_w[b, best_anch, gj, gi] = gw
t_h[b, best_anch, gj, gi] = gh
# 用于获得xywh的比例
# 大目标loss权重小,小目标loss权重大
box_loss_scale_x[b, best_anch, gj, gi] = target[b][i, 2]
box_loss_scale_y[b, best_anch, gj, gi] = target[b][i, 3]
# t_conf,t_cls代表目标置信度和种类置信度
t_conf[b, best_anch, gj, gi] = 1
t_cls[b, best_anch, gj, gi, target[b][i, 4].long()] = 1
else:
print('Step {0} out of bound'.format(b))
print('gj: {0}, height: {1} | gi: {2}, width: {3}'.format(gj, in_h, gi, in_w))
continue
t_box[..., 0] = t_x
t_box[..., 1] = t_y
t_box[..., 2] = t_w
t_box[..., 3] = t_h
return obj_mask, no_obj_mask, t_box, t_conf, t_cls, box_loss_scale_x, box_loss_scale_y
#---------------------------------------------------------------#
# 将预测结果进行解码,判断预测结果和真实值的重合程度
# 如果重合程度过大则忽略,因为这些特征点属于预测比较准确的特征点
# 作为负样本
#---------------------------------------------------------------#
def get_ignore(self, prediction, target, scaled_anchors, in_w, in_h, no_obj_mask):
"""
@param:
-------
prediction:预测结果
targets:目标真实框
scaled_anchors:相对于特征层的先验框
in_w, in_h:特征层的宽高
no_obj_mask:无目标的特征点
@Returns:
-------
no_obj_mask:需要忽略的负样本
pred_boxes_for_ciou:网络的预测框
"""
# 每个batch的图片数量
batch_size = len(target)
# 获得当前特征层先验框所属的编号,方便后面对先验框进行筛选
anchor_index = [[0, 1, 2], [3, 4, 5], [6, 7, 8]][self.feature_length.index(in_w)]
scaled_anchors = np.array(scaled_anchors)[anchor_index]
# 先验框的中心位置的调整参数
x = torch.sigmoid(prediction[..., 0])
y = torch.sigmoid(prediction[..., 1])
# 先验框的宽高调整参数
w = prediction[..., 2] # width
h = prediction[..., 3] # Height
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
#---------------------------------------------------------------#
# 生成网格,先验框中心,网格左上角
# torch.linspace():返回一个一维的tensor,
# 这个张量包含了从start到end,分成steps个线段得到的向量
# https://blog.csdn.net/york1996/article/details/81671128
# repeat():沿着指定的维度重复tensor
#---------------------------------------------------------------#
grid_x = torch.linspace(0, in_w - 1, in_w).repeat(in_h, 1).repeat(
int(batch_size * self.num_anchors/3), 1, 1).view(x.shape).type(FloatTensor)
grid_y = torch.linspace(0, in_h - 1, in_h).repeat(in_w, 1).t().repeat(
int(batch_size * self.num_anchors/3), 1, 1).view(y.shape).type(FloatTensor)
# 生成先验框的宽高
anchor_w = FloatTensor(scaled_anchors).index_select(1, LongTensor([0]))
anchor_h = FloatTensor(scaled_anchors).index_select(1, LongTensor([1]))
anchor_w = anchor_w.repeat(batch_size, 1).repeat(1, 1, in_h * in_w).view(w.shape)
anchor_h = anchor_h.repeat(batch_size, 1).repeat(1, 1, in_h * in_w).view(h.shape)
# 计算调整后的先验框中心与宽高
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x + grid_x
pred_boxes[..., 1] = y + grid_y
pred_boxes[..., 2] = torch.exp(w) * anchor_w
pred_boxes[..., 3] = torch.exp(h) * anchor_h
for i in range(batch_size):
pred_boxes_for_ignore = pred_boxes[i]
# 将预测结果转换一个形式
# pred_boxes_for_ignore num_anchors, 4
pred_boxes_for_ignore = pred_boxes_for_ignore.view(-1, 4)
# 计算真实框,并把真实框转换成相对于特征层的大小
# gt_box num_true_box, 4
if len(target[i]) > 0:
gx = target[i][:, 0:1] * in_w
gy = target[i][:, 1:2] * in_h
gw = target[i][:, 2:3] * in_w
gh = target[i][:, 3:4] * in_h
gt_box = torch.FloatTensor(torch.cat([gx, gy, gw, gh], -1)).type(FloatTensor)
# 计算真实框与预测框交并比 num_true_box, num_anchors
anch_iou = box_iou(gt_box, pred_boxes_for_ignore)
# 每个先验框对应真实框的最大重合度 anch_ious_max, num_anchors
anch_iou_max, _ = torch.max(anch_iou, dim=0)
anch_iou_max = anch_iou_max.view(pred_boxes[i].size()[:3])
no_obj_mask[i][anch_iou_max > self.ignore_threshold] = 0
return no_obj_mask, pred_boxes
| [
"torch.ones_like",
"torch.log",
"utils.utils.smooth_labels",
"torch.floor",
"torch.sigmoid",
"torch.argmax",
"torch.exp",
"torch.max",
"torch.FloatTensor",
"numpy.array",
"torch.sum",
"torch.linspace",
"torch.zeros",
"torch.zeros_like",
"utils.utils.box_iou",
"torch.cat",
"utils.util... | [((833, 877), 'utils.utils.clip_by_tensor', 'clip_by_tensor', (['pred', 'epsilon', '(1.0 - epsilon)'], {}), '(pred, epsilon, 1.0 - epsilon)\n', (847, 877), False, 'from utils.utils import box_iou, box_ciou, clip_by_tensor, smooth_labels\n'), ((4055, 4088), 'torch.sigmoid', 'torch.sigmoid', (['prediction[..., 4]'], {}), '(prediction[..., 4])\n', (4068, 4088), False, 'import torch\n'), ((4134, 4168), 'torch.sigmoid', 'torch.sigmoid', (['prediction[..., 5:]'], {}), '(prediction[..., 5:])\n', (4147, 4168), False, 'import torch\n'), ((6017, 6032), 'torch.sum', 'torch.sum', (['ciou'], {}), '(ciou)\n', (6026, 6032), False, 'import torch\n'), ((12933, 12966), 'torch.sigmoid', 'torch.sigmoid', (['prediction[..., 0]'], {}), '(prediction[..., 0])\n', (12946, 12966), False, 'import torch\n'), ((12979, 13012), 'torch.sigmoid', 'torch.sigmoid', (['prediction[..., 1]'], {}), '(prediction[..., 1])\n', (12992, 13012), False, 'import torch\n'), ((902, 917), 'torch.log', 'torch.log', (['pred'], {}), '(pred)\n', (911, 917), False, 'import torch\n'), ((937, 958), 'torch.log', 'torch.log', (['(1.0 - pred)'], {}), '(1.0 - pred)\n', (946, 958), False, 'import torch\n'), ((6496, 6515), 'torch.sum', 'torch.sum', (['obj_mask'], {}), '(obj_mask)\n', (6505, 6515), False, 'import torch\n'), ((9219, 9235), 'torch.floor', 'torch.floor', (['gxs'], {}), '(gxs)\n', (9230, 9235), False, 'import torch\n'), ((9254, 9270), 'torch.floor', 'torch.floor', (['gys'], {}), '(gys)\n', (9265, 9270), False, 'import torch\n'), ((9647, 9677), 'utils.utils.box_iou', 'box_iou', (['gt_box', 'anchor_shapes'], {}), '(gt_box, anchor_shapes)\n', (9654, 9677), False, 'from utils.utils import box_iou, box_ciou, clip_by_tensor, smooth_labels\n'), ((10017, 10047), 'torch.argmax', 'torch.argmax', (['anch_iou'], {'dim': '(-1)'}), '(anch_iou, dim=-1)\n', (10029, 10047), False, 'import torch\n'), ((12857, 12881), 'numpy.array', 'np.array', (['scaled_anchors'], {}), '(scaled_anchors)\n', (12865, 12881), True, 'import numpy as np\n'), ((14547, 14559), 'torch.exp', 'torch.exp', (['w'], {}), '(w)\n', (14556, 14559), False, 'import torch\n'), ((14600, 14612), 'torch.exp', 'torch.exp', (['h'], {}), '(h)\n', (14609, 14612), False, 'import torch\n'), ((6267, 6339), 'utils.utils.smooth_labels', 'smooth_labels', (['t_cls[obj_mask == 1]', 'self.label_smooth', 'self.num_classes'], {}), '(t_cls[obj_mask == 1], self.label_smooth, self.num_classes)\n', (6280, 6339), False, 'from utils.utils import box_iou, box_ciou, clip_by_tensor, smooth_labels\n'), ((6557, 6581), 'torch.ones_like', 'torch.ones_like', (['num_pos'], {}), '(num_pos)\n', (6572, 6581), False, 'import torch\n'), ((15340, 15378), 'utils.utils.box_iou', 'box_iou', (['gt_box', 'pred_boxes_for_ignore'], {}), '(gt_box, pred_boxes_for_ignore)\n', (15347, 15378), False, 'from utils.utils import box_iou, box_ciou, clip_by_tensor, smooth_labels\n'), ((15475, 15501), 'torch.max', 'torch.max', (['anch_iou'], {'dim': '(0)'}), '(anch_iou, dim=0)\n', (15484, 15501), False, 'import torch\n'), ((9363, 9384), 'torch.zeros_like', 'torch.zeros_like', (['gws'], {}), '(gws)\n', (9379, 9384), False, 'import torch\n'), ((9386, 9407), 'torch.zeros_like', 'torch.zeros_like', (['ghs'], {}), '(ghs)\n', (9402, 9407), False, 'import torch\n'), ((9511, 9545), 'torch.zeros', 'torch.zeros', (['(self.num_anchors, 2)'], {}), '((self.num_anchors, 2))\n', (9522, 9545), False, 'import torch\n'), ((9547, 9573), 'torch.FloatTensor', 'torch.FloatTensor', (['anchors'], {}), '(anchors)\n', (9564, 9573), False, 'import torch\n'), ((15204, 15235), 'torch.cat', 'torch.cat', (['[gx, gy, gw, gh]', '(-1)'], {}), '([gx, gy, gw, gh], -1)\n', (15213, 15235), False, 'import torch\n'), ((13678, 13711), 'torch.linspace', 'torch.linspace', (['(0)', '(in_w - 1)', 'in_w'], {}), '(0, in_w - 1, in_w)\n', (13692, 13711), False, 'import torch\n'), ((13841, 13874), 'torch.linspace', 'torch.linspace', (['(0)', '(in_h - 1)', 'in_h'], {}), '(0, in_h - 1, in_h)\n', (13855, 13874), False, 'import torch\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
@version: python3.7
@author: 'sun'
@license: Apache Licence
@contact:
@software: PyCharm
@file: CNN.py
@time: 2019/4/7 15:51
"""
import os
import numpy as np
import pandas as pd
import torch
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def load_glove_from_file(glove_filepath):
"""
Load the GloVe embeddings
Args:
glove_filepath (str): path to the glove embeddings file
Returns:
word_to_index (dict), embeddings (numpy.ndarary)
"""
word_to_index = {}
embeddings = []
with open(glove_filepath, "r") as fp:
for index, line in enumerate(fp):
line = line.split(" ") # each line: word num1 num2 ...
word_to_index[line[0]] = index # word = line[0]
embedding_i = np.array([float(val) for val in line[1:]])
embeddings.append(embedding_i)
return word_to_index, np.stack(embeddings)
def make_embedding_matrix(glove_filepath, words):
"""
Create embedding matrix for a specific set of words.
Args:
glove_filepath (str): file path to the glove embeddigns
words (list): list of words in the dataset
"""
word_to_idx, glove_embeddings = load_glove_from_file(glove_filepath)
embedding_size = glove_embeddings.shape[1]
final_embeddings = np.zeros((len(words), embedding_size))
for i, word in enumerate(words):
if word in word_to_idx:
final_embeddings[i, :] = glove_embeddings[word_to_idx[word]]
else:
embedding_i = torch.ones(1, embedding_size)
torch.nn.init.xavier_uniform_(embedding_i)
final_embeddings[i, :] = embedding_i
return final_embeddings
def make_train_state(args):
return {'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'learning_rate': args.learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': args.model_state_file}
def update_train_state(args, model, train_state):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
loss_tm1, loss_t = train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= train_state['early_stopping_best_val']:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state['early_stopping_best_val']:
torch.save(model.state_dict(), train_state['model_filename'])
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = \
train_state['early_stopping_step'] >= args.early_stopping_criteria
return train_state
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1) # 返回最大值和对应索引
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"os.path.exists",
"os.makedirs",
"torch.nn.init.xavier_uniform_",
"torch.eq",
"numpy.stack",
"numpy.random.seed",
"torch.ones"
] | [((281, 301), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (295, 301), True, 'import numpy as np\n'), ((306, 329), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (323, 329), False, 'import torch\n'), ((351, 383), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (377, 383), False, 'import torch\n'), ((423, 446), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (437, 446), False, 'import os\n'), ((456, 476), 'os.makedirs', 'os.makedirs', (['dirpath'], {}), '(dirpath)\n', (467, 476), False, 'import os\n'), ((1107, 1127), 'numpy.stack', 'np.stack', (['embeddings'], {}), '(embeddings)\n', (1115, 1127), True, 'import numpy as np\n'), ((1745, 1774), 'torch.ones', 'torch.ones', (['(1)', 'embedding_size'], {}), '(1, embedding_size)\n', (1755, 1774), False, 'import torch\n'), ((1787, 1829), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['embedding_i'], {}), '(embedding_i)\n', (1816, 1829), False, 'import torch\n'), ((3836, 3870), 'torch.eq', 'torch.eq', (['y_pred_indices', 'y_target'], {}), '(y_pred_indices, y_target)\n', (3844, 3870), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 22 16:44:23 2017
@author: kcarnold
"""
import numpy as np
from suggestion import clustering
from suggestion import suggestion_generator
#%%
cnnb = clustering.ConceptNetNumberBatch.load()
#%%
vec = cnnb['wait'] + cnnb['server']
sims = cnnb.vecs @ vec
#%%
[cnnb.id2term[i] for i in np.argsort(sims)[::-1][:50]]
#%%
import pickle
gs_w2v = pickle.load(open('/Users/kcarnold/first_yelp_w2v.pkl', 'rb'))
#%%
(cnnb['empty']) @ (cnnb['full']) | [
"numpy.argsort",
"suggestion.clustering.ConceptNetNumberBatch.load"
] | [((197, 236), 'suggestion.clustering.ConceptNetNumberBatch.load', 'clustering.ConceptNetNumberBatch.load', ([], {}), '()\n', (234, 236), False, 'from suggestion import clustering\n'), ((330, 346), 'numpy.argsort', 'np.argsort', (['sims'], {}), '(sims)\n', (340, 346), True, 'import numpy as np\n')] |
# Copyright (c) 2020 <NAME>.
# Full license can be found in the top level "LICENSE" file.
import numpy as np
from toast.timing import function_timer, Timer
from toast.utils import Logger
import toast.qarray as qa
from ...core.hardware import LAT_COROTATOR_OFFSET_DEG
XAXIS, YAXIS, ZAXIS = np.eye(3)
def add_corotator_args(parser):
parser.add_argument(
"--corotate-lat",
required=False,
action="store_true",
help="Rotate LAT receiver to maintain focalplane orientation",
dest="corotate_lat",
)
parser.add_argument(
"--no-corotate-lat",
required=False,
action="store_false",
help="Do not Rotate LAT receiver to maintain focalplane orientation",
dest="corotate_lat",
)
parser.set_defaults(corotate_lat=True)
return
def rotate_focalplane(args, data, comm):
""" The LAT focalplane projected on the sky rotates as the cryostat
(co-rotator) tilts. Usually the tilt is the same as the observing
elevation to maintain constant angle between the mirror and the cryostat.
This method must be called *before* expanding the detector pointing
from boresight.
"""
log = Logger.get()
timer = Timer()
timer.start()
for obs in data.obs:
if obs["telescope"] != "LAT":
continue
tod = obs["tod"]
cache_name = "corotator_angle_deg"
if tod.cache.exists(cache_name):
corotator_angle = tod.cache.reference(cache_name)
else:
# If a vector of co-rotator angles isn't already cached,
# make one now from the observation metadata. This will
# ensure they get recorded in the so3g files.
corotator_angle = obs["corotator_angle_deg"]
offset, nsample = tod.local_samples
tod.cache.put(cache_name, np.zeros(nsample) + corotator_angle)
el = np.degrees(tod.read_boresight_el())
rot = qa.rotation(
ZAXIS, np.radians(corotator_angle + el + LAT_COROTATOR_OFFSET_DEG)
)
quats = tod.read_boresight()
quats[:] = qa.mult(quats, rot)
try:
# If there are horizontal boresight quaternions, they need
# to be rotated as well.
quats = tod.read_boresight(azel=True)
quats[:] = qa.mult(quats, rot)
except Exception as e:
pass
if comm.comm_world is None or comm.comm_world.rank == 0:
timer.report_clear("Rotate focalplane")
return
| [
"toast.utils.Logger.get",
"numpy.radians",
"numpy.eye",
"toast.timing.Timer",
"toast.qarray.mult",
"numpy.zeros"
] | [((294, 303), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (300, 303), True, 'import numpy as np\n'), ((1198, 1210), 'toast.utils.Logger.get', 'Logger.get', ([], {}), '()\n', (1208, 1210), False, 'from toast.utils import Logger\n'), ((1223, 1230), 'toast.timing.Timer', 'Timer', ([], {}), '()\n', (1228, 1230), False, 'from toast.timing import function_timer, Timer\n'), ((2116, 2135), 'toast.qarray.mult', 'qa.mult', (['quats', 'rot'], {}), '(quats, rot)\n', (2123, 2135), True, 'import toast.qarray as qa\n'), ((1990, 2049), 'numpy.radians', 'np.radians', (['(corotator_angle + el + LAT_COROTATOR_OFFSET_DEG)'], {}), '(corotator_angle + el + LAT_COROTATOR_OFFSET_DEG)\n', (2000, 2049), True, 'import numpy as np\n'), ((2330, 2349), 'toast.qarray.mult', 'qa.mult', (['quats', 'rot'], {}), '(quats, rot)\n', (2337, 2349), True, 'import toast.qarray as qa\n'), ((1858, 1875), 'numpy.zeros', 'np.zeros', (['nsample'], {}), '(nsample)\n', (1866, 1875), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Testing suite for SkewStudent class.
"""
from __future__ import print_function, division
import unittest as ut
import numpy as np
from scipy.stats import t
from skewstudent import SkewStudent
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class SkewStudentTestCase(ut.TestCase):
"""Test SkewStudent distribution class."""
def test_init(self):
"""Test __init__."""
skewt = SkewStudent()
self.assertIsInstance(skewt.eta, float)
self.assertIsInstance(skewt.lam, float)
eta, lam = 5., -.2
skewt = SkewStudent(eta=eta, lam=lam)
self.assertEqual(skewt.eta, eta)
self.assertEqual(skewt.lam, lam)
def test_pdf(self):
"""Test pdf method."""
skewt = SkewStudent()
num = 50
arg = np.linspace(-1, 1, num)
pdf = skewt.pdf(arg)
self.assertEqual(pdf.shape[0], num)
self.assertIsInstance(skewt.pdf(0), float)
def test_cdf(self):
"""Test cdf method."""
skewt = SkewStudent()
num = 50
arg = np.linspace(-1, 1, num)
cdf = skewt.cdf(arg)
self.assertEqual(cdf.shape[0], num)
self.assertIsInstance(skewt.cdf(0), float)
def test_ppf(self):
"""Test ppf method."""
skewt = SkewStudent()
num = 50
arg = np.linspace(.01, .99, num)
ppf = skewt.ppf(arg)
self.assertEqual(ppf.shape[0], num)
self.assertIsInstance(skewt.ppf(.5), float)
def test_rvs(self):
"""Test ppf method."""
skewt = SkewStudent()
rvs = skewt.rvs()
self.assertIsInstance(rvs, float)
size = 2
rvs = skewt.rvs(size=size)
self.assertIsInstance(rvs, np.ndarray)
self.assertEqual(rvs.shape, (size, ))
size = (2, 3)
rvs = skewt.rvs(size=size)
self.assertIsInstance(rvs, np.ndarray)
self.assertEqual(rvs.shape, size)
def test_compare_with_t(self):
"""Compare with standard t distribution."""
eta = 5
skewt = SkewStudent(eta=eta, lam=0)
scale = 1/(eta/(eta-2))**.5
standt = t(eta, scale=scale)
arg = np.linspace(-2, 2, 100)
np.testing.assert_array_almost_equal(skewt.pdf(arg), standt.pdf(arg))
np.testing.assert_array_almost_equal(skewt.cdf(arg), standt.cdf(arg))
arg = np.linspace(.01, .99, 100)
np.testing.assert_array_almost_equal(skewt.ppf(arg), standt.ppf(arg))
if __name__ == '__main__':
ut.main()
| [
"unittest.main",
"scipy.stats.t",
"numpy.linspace",
"skewstudent.SkewStudent"
] | [((2550, 2559), 'unittest.main', 'ut.main', ([], {}), '()\n', (2557, 2559), True, 'import unittest as ut\n'), ((451, 464), 'skewstudent.SkewStudent', 'SkewStudent', ([], {}), '()\n', (462, 464), False, 'from skewstudent import SkewStudent\n'), ((606, 635), 'skewstudent.SkewStudent', 'SkewStudent', ([], {'eta': 'eta', 'lam': 'lam'}), '(eta=eta, lam=lam)\n', (617, 635), False, 'from skewstudent import SkewStudent\n'), ((792, 805), 'skewstudent.SkewStudent', 'SkewStudent', ([], {}), '()\n', (803, 805), False, 'from skewstudent import SkewStudent\n'), ((838, 861), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'num'], {}), '(-1, 1, num)\n', (849, 861), True, 'import numpy as np\n'), ((1060, 1073), 'skewstudent.SkewStudent', 'SkewStudent', ([], {}), '()\n', (1071, 1073), False, 'from skewstudent import SkewStudent\n'), ((1106, 1129), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'num'], {}), '(-1, 1, num)\n', (1117, 1129), True, 'import numpy as np\n'), ((1328, 1341), 'skewstudent.SkewStudent', 'SkewStudent', ([], {}), '()\n', (1339, 1341), False, 'from skewstudent import SkewStudent\n'), ((1374, 1402), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.99)', 'num'], {}), '(0.01, 0.99, num)\n', (1385, 1402), True, 'import numpy as np\n'), ((1600, 1613), 'skewstudent.SkewStudent', 'SkewStudent', ([], {}), '()\n', (1611, 1613), False, 'from skewstudent import SkewStudent\n'), ((2100, 2127), 'skewstudent.SkewStudent', 'SkewStudent', ([], {'eta': 'eta', 'lam': '(0)'}), '(eta=eta, lam=0)\n', (2111, 2127), False, 'from skewstudent import SkewStudent\n'), ((2181, 2200), 'scipy.stats.t', 't', (['eta'], {'scale': 'scale'}), '(eta, scale=scale)\n', (2182, 2200), False, 'from scipy.stats import t\n'), ((2215, 2238), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(100)'], {}), '(-2, 2, 100)\n', (2226, 2238), True, 'import numpy as np\n'), ((2411, 2439), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.99)', '(100)'], {}), '(0.01, 0.99, 100)\n', (2422, 2439), True, 'import numpy as np\n')] |
import gym
import numpy as np
env = gym.make("MountainCar-v0")
env.reset()
LEARNING_RATE = 0.1
DISCOUNT = 0.95 # how much we value future actions over current
EPISODES = 25000
SHOW_EVERY = 2000
print(env.observation_space.high) # print maximum position, velocity
print(env.observation_space.low) # print minimum position, velocity
print(env.action_space.n)
DISCRETE_OS_SIZE = [20] * len(
env.observation_space.high) # these determine the number of discrete values within a range
discrete_os_win_size = (env.observation_space.high - env.observation_space.low) / DISCRETE_OS_SIZE
print(discrete_os_win_size) # these are the step sizes
epsilon = 0.5 # higher number --> more random
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES // 2 # double div = type 2 division to an integer.
epsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)
q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE + [env.action_space.n])) # 20 x 20 x 3
ep_rewards = []
aggr_ep_rewards = {'ep': [], 'avg': [], 'min': [], 'max': []}
print(q_table.shape)
def get_discrete_state(state):
discrete_state = (state - env.observation_space.low) / discrete_os_win_size
return tuple(discrete_state.astype(np.int))
discrete_state = get_discrete_state(env.reset())
print(discrete_state)
print(np.argmax(q_table[discrete_state])) # get the action value, by selecting the index with the maximum q value
'''
done = False
while not done:
action = 2 # Push it right
new_state, reward, done, _ = env.step(action) # state is position and velocity
print(new_state)
env.render()
env.close()
'''
for episode in range(EPISODES):
if episode % SHOW_EVERY == 0:
print(episode)
render = True
else:
render = False
discrete_state = get_discrete_state(env.reset())
done = False
while not done:
if np.random.random() > epsilon:
action = np.argmax(q_table[discrete_state]) #
else:
action = np.random.randint(0, env.action_space.n)
new_state, reward, done, _ = env.step(action) # state is position and velocity
new_discrete_state = get_discrete_state(new_state)
env.render()
if render: # rendering environment is really slow, so render every 2000
env.render()
if not done:
max_future_q = np.max(q_table[new_discrete_state])
current_q = q_table[discrete_state + (action,)] # access the q-value entry from the 3D array
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
q_table[discrete_state + (action,)] = new_q # update the q_table with the new q value
elif new_state[0] >= env.goal_position:
print(f"We made it on episode {episode}")
q_table[discrete_state + (action,)] = 0 # if it reaches goal, then the reward is 0
discrete_state = new_discrete_state
if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
epsilon -= epsilon_decay_value
env.close()
| [
"numpy.random.random",
"numpy.argmax",
"numpy.max",
"numpy.random.randint",
"numpy.random.uniform",
"gym.make"
] | [((37, 63), 'gym.make', 'gym.make', (['"""MountainCar-v0"""'], {}), "('MountainCar-v0')\n", (45, 63), False, 'import gym\n'), ((896, 975), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-2)', 'high': '(0)', 'size': '(DISCRETE_OS_SIZE + [env.action_space.n])'}), '(low=-2, high=0, size=DISCRETE_OS_SIZE + [env.action_space.n])\n', (913, 975), True, 'import numpy as np\n'), ((1336, 1370), 'numpy.argmax', 'np.argmax', (['q_table[discrete_state]'], {}), '(q_table[discrete_state])\n', (1345, 1370), True, 'import numpy as np\n'), ((1897, 1915), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1913, 1915), True, 'import numpy as np\n'), ((1948, 1982), 'numpy.argmax', 'np.argmax', (['q_table[discrete_state]'], {}), '(q_table[discrete_state])\n', (1957, 1982), True, 'import numpy as np\n'), ((2021, 2061), 'numpy.random.randint', 'np.random.randint', (['(0)', 'env.action_space.n'], {}), '(0, env.action_space.n)\n', (2038, 2061), True, 'import numpy as np\n'), ((2384, 2419), 'numpy.max', 'np.max', (['q_table[new_discrete_state]'], {}), '(q_table[new_discrete_state])\n', (2390, 2419), True, 'import numpy as np\n')] |
# coding: utf-8
# # Mask R-CNN - Train on Shapes Dataset
#
#
# This notebook shows how to train Mask R-CNN on your own dataset. To keep things simple we use a synthetic dataset of shapes (squares, triangles, and circles) which enables fast training. You'd still need a GPU, though, because the network backbone is a Resnet101, which would be too slow to train on a CPU. On a GPU, you can start to get okay-ish results in a few minutes, and good results in less than an hour.
#
# The code of the *Shapes* dataset is included below. It generates images on the fly, so it doesn't require downloading any data. And it can generate images of any size, so we pick a small image size to train faster.
# In[1]:
import os
import pickle
import traceback
import tensorflow as tf
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.3
set_session(tf.Session(config=config))
import sys
import random
import skimage.io
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
from config import Config
import utils
import model as modellib
import visualize
from model import log
from USDataset import USDataset
# get_ipython().run_line_magic('matplotlib', 'inline')
# Root directory of the project
ROOT_DIR = os.getcwd()
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
dirs = os.listdir(MODEL_DIR)
dirs = [os.path.join(MODEL_DIR, filename) for filename in dirs]
ctimes = [os.path.getctime(filename) for filename in dirs]
MODEL_DIR = dirs[ctimes.index(max(ctimes))]
# Path to COCO trained weights
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# ## Configurations
# In[2]:
class ShapesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "ultar_sound"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + 3 shapes
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
RPN_ANCHOR_SCALES = (32, 64, 128) # anchor side in pixels
# # Reduce training ROIs per image because the images are small and have
# # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 320
#
# # Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
#
# # use small validation steps since the epoch is small
# VALIDATION_STEPS = 5
config = ShapesConfig()
# config.display()
# ## Notebook Preferences
# In[3]:
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
# In[5]:
# Training dataset
dataset_train = USDataset('train.txt')
dataset_train.prepare()
# Validation dataset
dataset_val = USDataset('data/label.txt')
dataset_val.prepare()
# In[6]:
# Load and display random samples
# image_ids = np.random.choice(dataset_train.image_ids, 4)
# for image_id in image_ids:
# image = dataset_train.load_image(image_id)
# mask, class_ids = dataset_train.load_mask(image_id)
# visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
# ## Ceate Model
# In[ ]:
# ## Detection
# In[11]:
class InferenceConfig(ShapesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
print(MODEL_DIR)
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(MODEL_DIR, "mask_rcnn_ultar_sound_000%s.h5" % sys.argv[-1])
model_path = os.path.join(MODEL_DIR, "mask_rcnn_ultar_sound_000%s.h5" % 8)
# model_path = os.path.join("mask_rcnn_shapes.h5")
# model_path = model.find_last()[1]
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# In[12]:
# Test on a random image
# image_id = random.choice(dataset_val.image_ids)
# original_image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_val, inference_config,
# image_id, use_mini_mask=False)
#
# log("original_image", original_image)
# log("image_meta", image_meta)
# log("gt_class_id", gt_bbox)
# log("gt_bbox", gt_bbox)
# log("gt_mask", gt_mask)
#
# visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
# dataset_train.class_names, figsize=(8, 8))
# In[13]:
# results = model.detect([original_image], verbose=1)
#
# r = results[0]
# visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
# dataset_val.class_names, r['scores'], ax=get_ax())
# ## Evaluation
# In[14]:
# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.
# image_ids = np.random.choice(dataset_val.image_ids, 330)
image_ids = dataset_val.image_ids
APs = []
for image_id in image_ids:
try:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
# print(image.shape)
molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
rois = r['rois']
area = (rois[:, 2] - rois[:, 0]) * (rois[:, 3] - rois[:, 1])
ids = np.transpose(np.asarray(np.where(area > 1000)), (0, 1))[0]
rois = np.asarray(r['rois'])[ids, :]
class_ids = np.asarray(r['class_ids'])[ids]
scores = np.asarray(r['scores'])[ids]
masks = np.asarray(r['masks'])[:, :, ids]
print(ids.shape[0])
if ids.shape[0] < 5:
ids = np.argsort(scores)[::-1][0: ids.shape[0]]
else:
ids = np.argsort(scores)[::-1][0: 5]
rois = rois[ids, :]
class_ids = class_ids[ids]
scores = scores[ids]
masks = masks[:, :, ids]
r = {'rois': rois, 'scores': scores, 'masks': masks, 'class_ids': class_ids}
IMAGE_DIR = ''
file_names = dataset_val.image_info[image_id]['path']
# print(file_names)
# image = skimage.io.imread(file_names)
# # Run detection
# results = model.detect([image], verbose=1)
# Visualize results
result_name = file_names.split('.')
fr = open(result_name[0] + '.dat', 'wb')
pickle.dump([image, r, ['mass']], fr)
fr.flush()
fr.close()
print(result_name[0] + '.dat')
# Compute AP
# AP, precisions, recalls, overlaps = utils.compute_ap(gt_bbox, gt_class_id,
# r["rois"], r["class_ids"], r["scores"])
# print("************************************")
except Exception as e:
traceback.print_exc()
# print(AP)
# print(precisions)
# print(recalls)
# print(overlaps)
# APs.append(AP)
print("mAP: ", np.mean(APs))
| [
"numpy.mean",
"os.listdir",
"pickle.dump",
"USDataset.USDataset",
"numpy.where",
"os.path.getctime",
"tensorflow.Session",
"model.load_image_gt",
"os.path.join",
"numpy.asarray",
"os.getcwd",
"numpy.argsort",
"model.MaskRCNN",
"model.mold_image",
"tensorflow.ConfigProto",
"traceback.pr... | [((932, 948), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (946, 948), True, 'import tensorflow as tf\n'), ((1440, 1451), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1449, 1451), False, 'import os\n'), ((1508, 1538), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (1520, 1538), False, 'import os\n'), ((1547, 1568), 'os.listdir', 'os.listdir', (['MODEL_DIR'], {}), '(MODEL_DIR)\n', (1557, 1568), False, 'import os\n'), ((1786, 1829), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mask_rcnn_coco.h5"""'], {}), "(ROOT_DIR, 'mask_rcnn_coco.h5')\n", (1798, 1829), False, 'import os\n'), ((3480, 3502), 'USDataset.USDataset', 'USDataset', (['"""train.txt"""'], {}), "('train.txt')\n", (3489, 3502), False, 'from USDataset import USDataset\n'), ((3563, 3590), 'USDataset.USDataset', 'USDataset', (['"""data/label.txt"""'], {}), "('data/label.txt')\n", (3572, 3590), False, 'from USDataset import USDataset\n'), ((4180, 4266), 'model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'inference_config', 'model_dir': 'MODEL_DIR'}), "(mode='inference', config=inference_config, model_dir=\n MODEL_DIR)\n", (4197, 4266), True, 'import model as modellib\n'), ((4502, 4563), 'os.path.join', 'os.path.join', (['MODEL_DIR', "('mask_rcnn_ultar_sound_000%s.h5' % 8)"], {}), "(MODEL_DIR, 'mask_rcnn_ultar_sound_000%s.h5' % 8)\n", (4514, 4563), False, 'import os\n'), ((1020, 1045), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1030, 1045), True, 'import tensorflow as tf\n'), ((1577, 1610), 'os.path.join', 'os.path.join', (['MODEL_DIR', 'filename'], {}), '(MODEL_DIR, filename)\n', (1589, 1610), False, 'import os\n'), ((1643, 1669), 'os.path.getctime', 'os.path.getctime', (['filename'], {}), '(filename)\n', (1659, 1669), False, 'import os\n'), ((3357, 3417), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'figsize': '(size * cols, size * rows)'}), '(rows, cols, figsize=(size * cols, size * rows))\n', (3369, 3417), True, 'import matplotlib.pyplot as plt\n'), ((8137, 8149), 'numpy.mean', 'np.mean', (['APs'], {}), '(APs)\n', (8144, 8149), True, 'import numpy as np\n'), ((6048, 6136), 'model.load_image_gt', 'modellib.load_image_gt', (['dataset_val', 'inference_config', 'image_id'], {'use_mini_mask': '(False)'}), '(dataset_val, inference_config, image_id,\n use_mini_mask=False)\n', (6070, 6136), True, 'import model as modellib\n'), ((7560, 7597), 'pickle.dump', 'pickle.dump', (["[image, r, ['mass']]", 'fr'], {}), "([image, r, ['mass']], fr)\n", (7571, 7597), False, 'import pickle\n'), ((6283, 6327), 'model.mold_image', 'modellib.mold_image', (['image', 'inference_config'], {}), '(image, inference_config)\n', (6302, 6327), True, 'import model as modellib\n'), ((6620, 6641), 'numpy.asarray', 'np.asarray', (["r['rois']"], {}), "(r['rois'])\n", (6630, 6641), True, 'import numpy as np\n'), ((6670, 6696), 'numpy.asarray', 'np.asarray', (["r['class_ids']"], {}), "(r['class_ids'])\n", (6680, 6696), True, 'import numpy as np\n'), ((6719, 6742), 'numpy.asarray', 'np.asarray', (["r['scores']"], {}), "(r['scores'])\n", (6729, 6742), True, 'import numpy as np\n'), ((6764, 6786), 'numpy.asarray', 'np.asarray', (["r['masks']"], {}), "(r['masks'])\n", (6774, 6786), True, 'import numpy as np\n'), ((7975, 7996), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7994, 7996), False, 'import traceback\n'), ((6570, 6591), 'numpy.where', 'np.where', (['(area > 1000)'], {}), '(area > 1000)\n', (6578, 6591), True, 'import numpy as np\n'), ((6873, 6891), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (6883, 6891), True, 'import numpy as np\n'), ((6947, 6965), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (6957, 6965), True, 'import numpy as np\n')] |
import folium
import itertools
import math
import numpy as np
def _degree_to_zoom_level(l1, l2, margin = 0.0):
degree = abs(l1 - l2) * (1 + margin)
zoom_level_int = 0
if degree != 0:
zoom_level_float = math.log(360/degree)/math.log(2)
zoom_level_int = int(zoom_level_float)
else:
zoom_level_int = 18
return zoom_level_int
def display_map(latitude = None, longitude = None, resolution = None):
""" Generates a folium map with a lat-lon bounded rectangle drawn on it. Folium maps can be
Args:
latitude (float,float): a tuple of latitude bounds in (min,max) format
longitude ((float, float)): a tuple of longitude bounds in (min,max) format
resolution ((float, float)): tuple in (lat,lon) format used to draw a grid on your map. Values denote
spacing of latitude and longitude lines. Gridding starts at top left
corner. Default displays no grid at all.
Returns:
folium.Map: A map centered on the lat lon bounds. A rectangle is drawn on this map detailing the
perimeter of the lat,lon bounds. A zoom level is calculated such that the resulting viewport is the
closest it can possibly get to the centered bounding rectangle without clipping it. An
optional grid can be overlaid with primitive interpolation.
.. _Folium
https://github.com/python-visualization/folium
"""
assert latitude is not None
assert longitude is not None
###### ###### ###### CALC ZOOM LEVEL ###### ###### ######
margin = -0.5
zoom_bias = 0
lat_zoom_level = _degree_to_zoom_level(*latitude, margin = margin) + zoom_bias
lon_zoom_level = _degree_to_zoom_level(*longitude, margin = margin) + zoom_bias
zoom_level = min(lat_zoom_level, lon_zoom_level)
###### ###### ###### CENTER POINT ###### ###### ######
center = [np.mean(latitude), np.mean(longitude)]
###### ###### ###### CREATE MAP ###### ###### ######
map_hybrid = folium.Map(
location=center,
zoom_start=zoom_level,
tiles=" http://mt1.google.com/vt/lyrs=y&z={z}&x={x}&y={y}",
attr="Google"
)
###### ###### ###### RESOLUTION GRID ###### ###### ######
if resolution is not None:
res_lat, res_lon = resolution
lats = np.arange(*latitude, abs(res_lat))
lons = np.arange(*longitude, abs(res_lon))
vertical_grid = map(lambda x :([x[0][0],x[1]],[x[0][1],x[1]]),itertools.product([latitude],lons))
horizontal_grid = map(lambda x :([x[1],x[0][0]],[x[1],x[0][1]]),itertools.product([longitude],lats))
for segment in vertical_grid:
folium.features.PolyLine(segment, color = 'white', opacity = 0.3).add_to(map_hybrid)
for segment in horizontal_grid:
folium.features.PolyLine(segment, color = 'white', opacity = 0.3).add_to(map_hybrid)
###### ###### ###### BOUNDING BOX ###### ###### ######
line_segments = [(latitude[0],longitude[0]),
(latitude[0],longitude[1]),
(latitude[1],longitude[1]),
(latitude[1],longitude[0]),
(latitude[0],longitude[0])
]
map_hybrid.add_child(
folium.features.PolyLine(
locations=line_segments,
color='red',
opacity=0.8)
)
map_hybrid.add_child(folium.features.LatLngPopup())
return map_hybrid
| [
"numpy.mean",
"itertools.product",
"folium.features.LatLngPopup",
"math.log",
"folium.Map",
"folium.features.PolyLine"
] | [((2122, 2252), 'folium.Map', 'folium.Map', ([], {'location': 'center', 'zoom_start': 'zoom_level', 'tiles': '""" http://mt1.google.com/vt/lyrs=y&z={z}&x={x}&y={y}"""', 'attr': '"""Google"""'}), "(location=center, zoom_start=zoom_level, tiles=\n ' http://mt1.google.com/vt/lyrs=y&z={z}&x={x}&y={y}', attr='Google')\n", (2132, 2252), False, 'import folium\n'), ((1993, 2010), 'numpy.mean', 'np.mean', (['latitude'], {}), '(latitude)\n', (2000, 2010), True, 'import numpy as np\n'), ((2012, 2030), 'numpy.mean', 'np.mean', (['longitude'], {}), '(longitude)\n', (2019, 2030), True, 'import numpy as np\n'), ((3434, 3509), 'folium.features.PolyLine', 'folium.features.PolyLine', ([], {'locations': 'line_segments', 'color': '"""red"""', 'opacity': '(0.8)'}), "(locations=line_segments, color='red', opacity=0.8)\n", (3458, 3509), False, 'import folium\n'), ((3579, 3608), 'folium.features.LatLngPopup', 'folium.features.LatLngPopup', ([], {}), '()\n', (3606, 3608), False, 'import folium\n'), ((234, 256), 'math.log', 'math.log', (['(360 / degree)'], {}), '(360 / degree)\n', (242, 256), False, 'import math\n'), ((255, 266), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (263, 266), False, 'import math\n'), ((2608, 2643), 'itertools.product', 'itertools.product', (['[latitude]', 'lons'], {}), '([latitude], lons)\n', (2625, 2643), False, 'import itertools\n'), ((2716, 2752), 'itertools.product', 'itertools.product', (['[longitude]', 'lats'], {}), '([longitude], lats)\n', (2733, 2752), False, 'import itertools\n'), ((2804, 2865), 'folium.features.PolyLine', 'folium.features.PolyLine', (['segment'], {'color': '"""white"""', 'opacity': '(0.3)'}), "(segment, color='white', opacity=0.3)\n", (2828, 2865), False, 'import folium\n'), ((2954, 3015), 'folium.features.PolyLine', 'folium.features.PolyLine', (['segment'], {'color': '"""white"""', 'opacity': '(0.3)'}), "(segment, color='white', opacity=0.3)\n", (2978, 3015), False, 'import folium\n')] |
#!env python
import urllib.request
import re
import os
import tempfile
import numpy as np
import pandas as pd
import pdfminer
from pdfminer.high_level import extract_pages
def _extract_text(fn):
found_elements = []
for page_layout in extract_pages(fn, maxpages=1):
for element in page_layout:
if isinstance(element, pdfminer.layout.LTTextBoxHorizontal):
found_elements.append(element)
found_elements.sort(key=lambda e: e.bbox[0])
found_elements.sort(key=lambda e: e.bbox[1], reverse=True)
return [e.get_text().strip() for e in found_elements]
def _parse_numbers(idx, entries):
return [float(e.strip('*').replace(',', '.').strip()) if ',' in e else int(e.strip('*').strip()) for e in entries[idx+1:idx+3]]
def _calc_rolling_average(df):
mask = df['Daily new'] == 0
df.loc[mask, 'Daily new'] = df["Total"].diff()[mask]
df['Daily new 7-day average'] = df['Daily new'].rolling(window=7).mean()
return df
def fetch_stats(out_dir, pdf_dir="", verbose=True):
urls = [
"https://www.rhein-neckar-kreis.de/start/landratsamt/coronavirus+fallzahlen+03-07.html",
"https://www.rhein-neckar-kreis.de/start/landratsamt/coronavirus+fallzahlen+08-09.html",
"https://www.rhein-neckar-kreis.de/start/landratsamt/coronavirus+fallzahlen.html"
]
p = re.compile('a href="(.+?Faktenblatt_Corona_RNK\.pdf)" title="" target="_blank">')
if verbose:
print("Checking updates...")
pdf_urls = []
for url in urls:
with urllib.request.urlopen(url) as f:
t = f.read().decode("utf-8")
pdf_urls += list(p.findall(t))
tmp_obj = None
if pdf_dir == "":
tmp_obj = tempfile.TemporaryDirectory()
pdf_dir = tmp_obj.name
elif not os.path.exists(pdf_dir):
os.mkdir(pdf_dir)
df_headers = ["Total", "Recovered", "Deaths", "Quarantined", "7 Day Incidents", "Daily new"]
if os.path.exists(os.path.join(out_dir, 'hd_stats.json')):
hd_stats = pd.read_json(os.path.join(out_dir, 'hd_stats.json'), orient="split").T
rnk_stats = pd.read_json(os.path.join(out_dir, 'rnk_stats.json'), orient="split").T
else:
hd_stats = pd.DataFrame(columns=df_headers)
rnk_stats = pd.DataFrame(columns=df_headers)
url_root = "https://www.rhein-neckar-kreis.de/"
for pdf_url in pdf_urls:
pdf_fn = pdf_url.split('/')[-1]
date = pd.Timestamp("20%s-%s-%s"%(pdf_fn[:2], pdf_fn[2:4], pdf_fn[4:6]))
if date in rnk_stats.index:
if verbose:
print(f"Found data on {date.strftime('%Y-%m-%d')}, skipping...")
continue
if not os.path.exists(os.path.join(pdf_dir, pdf_fn)):
print("Downloading %s..."%pdf_fn)
with urllib.request.urlopen(url_root + pdf_url) as f, open(os.path.join(pdf_dir, pdf_fn), "wb") as fo:
fo.write(f.read())
print("Parsing %s..."%pdf_fn)
covid_numbers = np.zeros([2, 6], dtype=float) # Rows: RNK, HD
# Cols: positive, recovered, deaths, quarantined, 7-day-incidences, difference yesterday
covid_numbers[:, 4] = np.NaN
entries = _extract_text(os.path.join(pdf_dir, pdf_fn))
flags = np.zeros(5, dtype=bool)
for idx, e in enumerate(entries):
# RNK: idx == 0
# HD: idx == 1
if not flags[0] and ("Positive" in e or "Gesamtzahl" in e): # Positive
covid_numbers[:, 0] = _parse_numbers(idx, entries)
flags[0] = True
if not flags[1] and "Genesene" in e: # Recovered
if 'Datenbank-Fehlers' in entries[-3] and 'Genesene Personen' in entries[-3]:
# Some numbers are not avilable due to database failure
covid_numbers[:, 1] = np.nan
else:
covid_numbers[:, 1] = _parse_numbers(idx, entries)
flags[1] = True
if not flags[2] and "Verstorbene" in e: # Deaths
covid_numbers[:, 2] = _parse_numbers(idx, entries)
flags[2] = True
if not flags[3] and "7-Tage-Inzidenz" in e: # 7-day-incidences
covid_numbers[:, 4] = _parse_numbers(idx, entries)
flags[3] = True
if not flags[4] and e == "Veränderung zum Vortag": # Daily new
covid_numbers[:, 5] = _parse_numbers(idx, entries)
flags[4] = True
if all(flags):
# found all numbers
break
covid_numbers[:, 3] = covid_numbers[:, 0] - covid_numbers[:, 1] - covid_numbers[:, 2] # Calculate quarantined
rnk_stats = rnk_stats.append(pd.DataFrame([covid_numbers[0]], columns=df_headers, index=[date]))
hd_stats = hd_stats.append(pd.DataFrame([covid_numbers[1]], columns=df_headers, index=[date]))
rnk_stats = rnk_stats.sort_index()
hd_stats = hd_stats.sort_index()
rnk_stats = _calc_rolling_average(rnk_stats)
rnk_stats.index = rnk_stats.index.strftime("%Y-%m-%d")
rnk_stats.T.to_json(os.path.join(out_dir, "rnk_stats.json"), orient="split")
hd_stats = _calc_rolling_average(hd_stats)
hd_stats.index = hd_stats.index.strftime("%Y-%m-%d")
hd_stats.T.to_json(os.path.join(out_dir, "hd_stats.json"), orient="split")
if tmp_obj:
tmp_obj.cleanup()
if verbose:
print("Done!")
if __name__ == "__main__":
import sys
fetch_stats(*sys.argv)
| [
"tempfile.TemporaryDirectory",
"os.path.exists",
"pdfminer.high_level.extract_pages",
"re.compile",
"os.path.join",
"numpy.zeros",
"os.mkdir",
"pandas.DataFrame",
"pandas.Timestamp"
] | [((259, 288), 'pdfminer.high_level.extract_pages', 'extract_pages', (['fn'], {'maxpages': '(1)'}), '(fn, maxpages=1)\n', (272, 288), False, 'from pdfminer.high_level import extract_pages\n'), ((1382, 1469), 're.compile', 're.compile', (['"""a href="(.+?Faktenblatt_Corona_RNK\\\\.pdf)" title="" target="_blank">"""'], {}), '(\n \'a href="(.+?Faktenblatt_Corona_RNK\\\\.pdf)" title="" target="_blank">\')\n', (1392, 1469), False, 'import re\n'), ((1762, 1791), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1789, 1791), False, 'import tempfile\n'), ((2013, 2051), 'os.path.join', 'os.path.join', (['out_dir', '"""hd_stats.json"""'], {}), "(out_dir, 'hd_stats.json')\n", (2025, 2051), False, 'import os\n'), ((2269, 2301), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df_headers'}), '(columns=df_headers)\n', (2281, 2301), True, 'import pandas as pd\n'), ((2323, 2355), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df_headers'}), '(columns=df_headers)\n', (2335, 2355), True, 'import pandas as pd\n'), ((2500, 2567), 'pandas.Timestamp', 'pd.Timestamp', (["('20%s-%s-%s' % (pdf_fn[:2], pdf_fn[2:4], pdf_fn[4:6]))"], {}), "('20%s-%s-%s' % (pdf_fn[:2], pdf_fn[2:4], pdf_fn[4:6]))\n", (2512, 2567), True, 'import pandas as pd\n'), ((3070, 3099), 'numpy.zeros', 'np.zeros', (['[2, 6]'], {'dtype': 'float'}), '([2, 6], dtype=float)\n', (3078, 3099), True, 'import numpy as np\n'), ((3377, 3400), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'bool'}), '(5, dtype=bool)\n', (3385, 3400), True, 'import numpy as np\n'), ((5249, 5288), 'os.path.join', 'os.path.join', (['out_dir', '"""rnk_stats.json"""'], {}), "(out_dir, 'rnk_stats.json')\n", (5261, 5288), False, 'import os\n'), ((5438, 5476), 'os.path.join', 'os.path.join', (['out_dir', '"""hd_stats.json"""'], {}), "(out_dir, 'hd_stats.json')\n", (5450, 5476), False, 'import os\n'), ((1838, 1861), 'os.path.exists', 'os.path.exists', (['pdf_dir'], {}), '(pdf_dir)\n', (1852, 1861), False, 'import os\n'), ((1872, 1889), 'os.mkdir', 'os.mkdir', (['pdf_dir'], {}), '(pdf_dir)\n', (1880, 1889), False, 'import os\n'), ((3329, 3358), 'os.path.join', 'os.path.join', (['pdf_dir', 'pdf_fn'], {}), '(pdf_dir, pdf_fn)\n', (3341, 3358), False, 'import os\n'), ((4860, 4926), 'pandas.DataFrame', 'pd.DataFrame', (['[covid_numbers[0]]'], {'columns': 'df_headers', 'index': '[date]'}), '([covid_numbers[0]], columns=df_headers, index=[date])\n', (4872, 4926), True, 'import pandas as pd\n'), ((4964, 5030), 'pandas.DataFrame', 'pd.DataFrame', (['[covid_numbers[1]]'], {'columns': 'df_headers', 'index': '[date]'}), '([covid_numbers[1]], columns=df_headers, index=[date])\n', (4976, 5030), True, 'import pandas as pd\n'), ((2087, 2125), 'os.path.join', 'os.path.join', (['out_dir', '"""hd_stats.json"""'], {}), "(out_dir, 'hd_stats.json')\n", (2099, 2125), False, 'import os\n'), ((2179, 2218), 'os.path.join', 'os.path.join', (['out_dir', '"""rnk_stats.json"""'], {}), "(out_dir, 'rnk_stats.json')\n", (2191, 2218), False, 'import os\n'), ((2773, 2802), 'os.path.join', 'os.path.join', (['pdf_dir', 'pdf_fn'], {}), '(pdf_dir, pdf_fn)\n', (2785, 2802), False, 'import os\n'), ((2924, 2953), 'os.path.join', 'os.path.join', (['pdf_dir', 'pdf_fn'], {}), '(pdf_dir, pdf_fn)\n', (2936, 2953), False, 'import os\n')] |
import numpy as np
from pyspedas import tinterpol, tdeflag
from pyspedas.geopack.get_w_params import get_w
from pytplot import get_data, store_data
def get_tsy_params(dst_tvar, imf_tvar, Np_tvar, Vp_tvar, model, pressure_tvar=None, newname=None, speed=False, g_variables=None):
"""
This procedure will interpolate inputs, generate
Tsyganenko model parameters and store them in a tplot
variable that can be passed directly to the model
procedure.
Input
------
dst_tvar: str
tplot variable containing the Dst index
imf_tvar: str
tplot variable containing the interplanetary
magnetic field vector in GSM coordinates
Np_tvar: str
tplot variable containing the solar wind
ion density (cm**-3)
Vp_tvar: str
tplot variable containing the proton velocity
model: str
Tsyganenko model; should be: 'T89', T96', 'T01','TS04'
Parameters
-----------
newname: str
name of the output variable; default: t96_par,
't01_par' or 'ts04_par', depending on the
model
speed: bool
Flag to indicate Vp_tvar is speed, and not velocity
(defaults to False)
pressure_tvar: str
Set this to specify a tplot variable containing
solar wind dynamic pressure data. If not supplied,
it will be calculated internally from proton density
and proton speed.
Returns
--------
Name of the tplot variable containing the parameters.
The parameters are:
(1) solar wind pressure pdyn (nanopascals),
(2) dst (nanotesla),
(3) byimf,
(4) bzimf (nanotesla)
(5-10) indices w1 - w6, calculated as time integrals from the beginning of a storm
see the reference (3) below, for a detailed definition of those variables
"""
model = model.lower()
if model not in ['t89', 't96', 't01', 'ts04']:
print('Unknown model: ' + model)
return
tdeflag(Np_tvar, method='remove_nan', overwrite=True)
tdeflag(dst_tvar, method='remove_nan', overwrite=True)
tdeflag(Vp_tvar, method='remove_nan', overwrite=True)
# interpolate the inputs to the Np timestamps
tinterpol(imf_tvar, Np_tvar, newname=imf_tvar+'_interp')
tinterpol(dst_tvar, Np_tvar, newname=dst_tvar+'_interp')
tinterpol(Vp_tvar, Np_tvar, newname=Vp_tvar+'_interp')
if pressure_tvar is not None:
tdeflag(pressure_tvar, method='remove_nan', overwrite=True)
tinterpol(pressure_tvar, Np_tvar, newname=pressure_tvar+'_interp')
Np_data = get_data(Np_tvar)
dst_data = get_data(dst_tvar+'_interp')
imf_data = get_data(imf_tvar+'_interp')
Vp_data = get_data(Vp_tvar+'_interp')
if pressure_tvar is not None:
P_data = get_data(pressure_tvar+'_interp')
if model == 't96':
out = np.array((P_data.y,
dst_data.y,
imf_data.y[:, 1],
imf_data.y[:, 2],
np.zeros(len(dst_data.y)),
np.zeros(len(dst_data.y)),
np.zeros(len(dst_data.y)),
np.zeros(len(dst_data.y)),
np.zeros(len(dst_data.y)),
np.zeros(len(dst_data.y))))
elif model == 't01':
if g_variables is None:
print('G variables required for T01 model; create a tplot variable containing the G variables, and provide the name of that keyword to the g_variables keyword.')
return
else:
if isinstance(g_variables, str):
g_data = get_data(g_variables)
if g_variables is None:
print('Problem reading G variable: ' + g_variables)
return
g1 = g_data.y[:, 0]
g2 = g_data.y[:, 1]
else:
if isinstance(g_variables, list):
g_variables = np.array(g_variables)
if len(g_variables.shape) > 1:
g1 = g_variables[:, 0]
g2 = g_variables[:, 1]
else:
g1 = np.repeat(g_variables[0], len(dst_data.y))
g2 = np.repeat(g_variables[1], len(dst_data.y))
out = np.array((P_data.y,
dst_data.y,
imf_data.y[:, 1],
imf_data.y[:, 2],
g1,
g2,
np.zeros(len(dst_data.y)),
np.zeros(len(dst_data.y)),
np.zeros(len(dst_data.y)),
np.zeros(len(dst_data.y))))
elif model == 'ts04':
params = get_w(trange=[np.nanmin(Np_data.times), np.nanmax(Np_data.times)], create_tvar=True)
# interpolate the inputs to the Np timestamps
tinterpol(params, Np_tvar, newname=params+'_interp')
w_data = get_data(params+'_interp')
if w_data is None:
print('Problem loading W variables for TS04 model.')
return
out = np.array((P_data.y,
dst_data.y,
imf_data.y[:, 1],
imf_data.y[:, 2],
w_data.y[:, 0],
w_data.y[:, 1],
w_data.y[:, 2],
w_data.y[:, 3],
w_data.y[:, 4],
w_data.y[:, 5]))
elif model == 't01':
print('not implemented yet')
return
if newname is None:
newname = model + '_par'
saved = store_data(newname, data={'x': dst_data.times, 'y': out.T})
if saved:
return newname
| [
"pytplot.get_data",
"pyspedas.tdeflag",
"numpy.array",
"pyspedas.tinterpol",
"pytplot.store_data",
"numpy.nanmax",
"numpy.nanmin"
] | [((2131, 2184), 'pyspedas.tdeflag', 'tdeflag', (['Np_tvar'], {'method': '"""remove_nan"""', 'overwrite': '(True)'}), "(Np_tvar, method='remove_nan', overwrite=True)\n", (2138, 2184), False, 'from pyspedas import tinterpol, tdeflag\n'), ((2189, 2243), 'pyspedas.tdeflag', 'tdeflag', (['dst_tvar'], {'method': '"""remove_nan"""', 'overwrite': '(True)'}), "(dst_tvar, method='remove_nan', overwrite=True)\n", (2196, 2243), False, 'from pyspedas import tinterpol, tdeflag\n'), ((2248, 2301), 'pyspedas.tdeflag', 'tdeflag', (['Vp_tvar'], {'method': '"""remove_nan"""', 'overwrite': '(True)'}), "(Vp_tvar, method='remove_nan', overwrite=True)\n", (2255, 2301), False, 'from pyspedas import tinterpol, tdeflag\n'), ((2357, 2415), 'pyspedas.tinterpol', 'tinterpol', (['imf_tvar', 'Np_tvar'], {'newname': "(imf_tvar + '_interp')"}), "(imf_tvar, Np_tvar, newname=imf_tvar + '_interp')\n", (2366, 2415), False, 'from pyspedas import tinterpol, tdeflag\n'), ((2418, 2476), 'pyspedas.tinterpol', 'tinterpol', (['dst_tvar', 'Np_tvar'], {'newname': "(dst_tvar + '_interp')"}), "(dst_tvar, Np_tvar, newname=dst_tvar + '_interp')\n", (2427, 2476), False, 'from pyspedas import tinterpol, tdeflag\n'), ((2479, 2535), 'pyspedas.tinterpol', 'tinterpol', (['Vp_tvar', 'Np_tvar'], {'newname': "(Vp_tvar + '_interp')"}), "(Vp_tvar, Np_tvar, newname=Vp_tvar + '_interp')\n", (2488, 2535), False, 'from pyspedas import tinterpol, tdeflag\n'), ((2727, 2744), 'pytplot.get_data', 'get_data', (['Np_tvar'], {}), '(Np_tvar)\n', (2735, 2744), False, 'from pytplot import get_data, store_data\n'), ((2760, 2790), 'pytplot.get_data', 'get_data', (["(dst_tvar + '_interp')"], {}), "(dst_tvar + '_interp')\n", (2768, 2790), False, 'from pytplot import get_data, store_data\n'), ((2804, 2834), 'pytplot.get_data', 'get_data', (["(imf_tvar + '_interp')"], {}), "(imf_tvar + '_interp')\n", (2812, 2834), False, 'from pytplot import get_data, store_data\n'), ((2847, 2876), 'pytplot.get_data', 'get_data', (["(Vp_tvar + '_interp')"], {}), "(Vp_tvar + '_interp')\n", (2855, 2876), False, 'from pytplot import get_data, store_data\n'), ((5877, 5936), 'pytplot.store_data', 'store_data', (['newname'], {'data': "{'x': dst_data.times, 'y': out.T}"}), "(newname, data={'x': dst_data.times, 'y': out.T})\n", (5887, 5936), False, 'from pytplot import get_data, store_data\n'), ((2577, 2636), 'pyspedas.tdeflag', 'tdeflag', (['pressure_tvar'], {'method': '"""remove_nan"""', 'overwrite': '(True)'}), "(pressure_tvar, method='remove_nan', overwrite=True)\n", (2584, 2636), False, 'from pyspedas import tinterpol, tdeflag\n'), ((2645, 2713), 'pyspedas.tinterpol', 'tinterpol', (['pressure_tvar', 'Np_tvar'], {'newname': "(pressure_tvar + '_interp')"}), "(pressure_tvar, Np_tvar, newname=pressure_tvar + '_interp')\n", (2654, 2713), False, 'from pyspedas import tinterpol, tdeflag\n'), ((2927, 2962), 'pytplot.get_data', 'get_data', (["(pressure_tvar + '_interp')"], {}), "(pressure_tvar + '_interp')\n", (2935, 2962), False, 'from pytplot import get_data, store_data\n'), ((5115, 5169), 'pyspedas.tinterpol', 'tinterpol', (['params', 'Np_tvar'], {'newname': "(params + '_interp')"}), "(params, Np_tvar, newname=params + '_interp')\n", (5124, 5169), False, 'from pyspedas import tinterpol, tdeflag\n'), ((5185, 5213), 'pytplot.get_data', 'get_data', (["(params + '_interp')"], {}), "(params + '_interp')\n", (5193, 5213), False, 'from pytplot import get_data, store_data\n'), ((5339, 5512), 'numpy.array', 'np.array', (['(P_data.y, dst_data.y, imf_data.y[:, 1], imf_data.y[:, 2], w_data.y[:, 0],\n w_data.y[:, 1], w_data.y[:, 2], w_data.y[:, 3], w_data.y[:, 4], w_data.\n y[:, 5])'], {}), '((P_data.y, dst_data.y, imf_data.y[:, 1], imf_data.y[:, 2], w_data.\n y[:, 0], w_data.y[:, 1], w_data.y[:, 2], w_data.y[:, 3], w_data.y[:, 4],\n w_data.y[:, 5]))\n', (5347, 5512), True, 'import numpy as np\n'), ((3789, 3810), 'pytplot.get_data', 'get_data', (['g_variables'], {}), '(g_variables)\n', (3797, 3810), False, 'from pytplot import get_data, store_data\n'), ((4126, 4147), 'numpy.array', 'np.array', (['g_variables'], {}), '(g_variables)\n', (4134, 4147), True, 'import numpy as np\n'), ((4982, 5006), 'numpy.nanmin', 'np.nanmin', (['Np_data.times'], {}), '(Np_data.times)\n', (4991, 5006), True, 'import numpy as np\n'), ((5008, 5032), 'numpy.nanmax', 'np.nanmax', (['Np_data.times'], {}), '(Np_data.times)\n', (5017, 5032), True, 'import numpy as np\n')] |
# module version
# This file serves to produce module versions for
# It's a sort of requirements.txt but to use outside a
from __future__ import print_function
try:
# for python 2
import Tkinter as tkinter
except ImportError:
# for python 3
import tkinter as tkinter
import cv2
import PIL.Image, PIL.ImageTk
import time
try:
# for python 2
import tkMessageBox
except ImportError:
# for python 3
from tkinter import messagebox as tkMessageBox
import pandas as pd
import time
import numpy as np
# make a dictionary
# Getting the "module has no attribute .__version__" for many of the requirements
# Dear python :)
x = {'python': __import__('sys').version,
#'tkinter': tkinter.__version__,
'cv2': cv2.__version__,
'pandas': pd.__version__,
#'time': time.__version__,
'numpy': np.__version__}
print(np.array(x).transpose()) | [
"numpy.array"
] | [((838, 849), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (846, 849), True, 'import numpy as np\n')] |
"""Implements the bilateral filter for images."""
from numpy import ceil, exp, dot, ogrid, arange
def bilateral_filter(im, size=None, sigma_r=None, sigma_d=1, **kwargs):
"""
Bilaterally filter an image. Uses Gaussian kernels for the spatial and intensity filters.
im is the image to filter, must be grayscale but can be any dimension
size is the kernel size, must be odd and >=3, defaults to int(max(5, 2*ceil(3*sigma_d)+1)).
sigma_r is the range/intensity standard deviation, defaults to image standard deviation.
sigma_d is the domain/spatial standard deviation, default to 1.
other keyword arguments are passed to scipy.ndimage.generic_filter.
This attempts to use a Cython optimized function if possible. Additionally in common cases many
parts are computed to greatly speed up the function.
REFERENCES
1. <NAME> and <NAME>, 1998, "Bilateral filtering for gray and color images". Sixth
International Conference on Computer Vision. pp. 839–846.
2. <NAME> and <NAME>, 2008, "Enhancing Contrast in Color Images Using Bilateral Filter and
Histogram Equalization Using Wavelet Coefficients", 2008 Second International Conference on
Future Generation Communication and Networking Symposia.
"""
from scipy.ndimage import generic_filter
if sigma_r is None: sigma_r = im.std()
if size is None:
size = int(max(5, 2*ceil(3*sigma_d)+1))
elif size < 3 or size%2 != 1:
raise ValueError(size)
# Calculate the kernels
spatial, scale, inten_lut = __bilateral_kernels(im.dtype, im.ndim, size, sigma_r, sigma_d)
try:
# Try to import Cython optimized code - 20 to 75x faster
from scipy import LowLevelCallable
import hist.exact.__bilateral_cy as cy
_bilateral_filter = LowLevelCallable.from_cython(
cy, 'bilateral_filter' if inten_lut is None else 'bilateral_filter_inten_lut',
cy.get_user_data(spatial, scale, inten_lut)) # pylint: disable=c-extension-no-member
except ImportError:
# Fallback to pure Python function
# Note: it seems the pure Python function actually gets slower with the intensity LUT
def _bilateral_filter(data):
diff = data - data[data.size // 2]
weight = exp(diff*diff*scale) * spatial
return dot(data, weight) / weight.sum()
return generic_filter(im, _bilateral_filter, size, **kwargs)
def __bilateral_kernels(dt, ndim, size, sigma_r, sigma_d):
"""
Computes the spatial kernel and the intensity kernel scale. Also computes the intensity LUT if
it makes sense. If not None is returned in its place.
"""
from ..util import get_dtype_min_max
# Calculate the fixed spatial kernel
scale = -1/(2*sigma_d*sigma_d)
dist2 = sum(x*x for x in ogrid[(slice(-(size//2), size//2+1),)*ndim])
spatial = (dist2*scale).ravel()
exp(spatial, spatial)
spatial /= spatial.sum()
# Calculate the complete intensity LUT kernel if it makes sense
# Don't do this for 32-bit+ integral images or floating-point images
scale = -1/(2*sigma_r*sigma_r)
intensity_lut = None
if dt.kind in 'uib' and dt.itemsize <= 2:
mn, mx = get_dtype_min_max(dt)
intensity_lut = arange(0, mx-mn+1)
intensity_lut *= intensity_lut
intensity_lut = intensity_lut * scale
exp(intensity_lut, intensity_lut)
return spatial, scale, intensity_lut
| [
"numpy.ceil",
"scipy.ndimage.generic_filter",
"hist.exact.__bilateral_cy.get_user_data",
"numpy.exp",
"numpy.dot",
"numpy.arange"
] | [((2394, 2447), 'scipy.ndimage.generic_filter', 'generic_filter', (['im', '_bilateral_filter', 'size'], {}), '(im, _bilateral_filter, size, **kwargs)\n', (2408, 2447), False, 'from scipy.ndimage import generic_filter\n'), ((2913, 2934), 'numpy.exp', 'exp', (['spatial', 'spatial'], {}), '(spatial, spatial)\n', (2916, 2934), False, 'from numpy import ceil, exp, dot, ogrid, arange\n'), ((3275, 3297), 'numpy.arange', 'arange', (['(0)', '(mx - mn + 1)'], {}), '(0, mx - mn + 1)\n', (3281, 3297), False, 'from numpy import ceil, exp, dot, ogrid, arange\n'), ((3387, 3420), 'numpy.exp', 'exp', (['intensity_lut', 'intensity_lut'], {}), '(intensity_lut, intensity_lut)\n', (3390, 3420), False, 'from numpy import ceil, exp, dot, ogrid, arange\n'), ((1948, 1991), 'hist.exact.__bilateral_cy.get_user_data', 'cy.get_user_data', (['spatial', 'scale', 'inten_lut'], {}), '(spatial, scale, inten_lut)\n', (1964, 1991), True, 'import hist.exact.__bilateral_cy as cy\n'), ((2299, 2323), 'numpy.exp', 'exp', (['(diff * diff * scale)'], {}), '(diff * diff * scale)\n', (2302, 2323), False, 'from numpy import ceil, exp, dot, ogrid, arange\n'), ((2349, 2366), 'numpy.dot', 'dot', (['data', 'weight'], {}), '(data, weight)\n', (2352, 2366), False, 'from numpy import ceil, exp, dot, ogrid, arange\n'), ((1413, 1430), 'numpy.ceil', 'ceil', (['(3 * sigma_d)'], {}), '(3 * sigma_d)\n', (1417, 1430), False, 'from numpy import ceil, exp, dot, ogrid, arange\n')] |
# Import relevant packages
import pandas as pd # Necessary for opening csv files
import numpy as np
import itertools # Necessary for plotting confusion matrix
from sklearn.metrics import confusion_matrix # Necessary for computing confusion matrix
import matplotlib.pyplot as plt
from sklearn.naive_bayes import *
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# ----------------------------------------------------------
print('Exercise: Naive Bayes on UCI Datasets')
print('=====================================')
# STEP 1: Load dataset
banknote_datadset = pd.read_csv('data_banknote_authentication.csv')
X = banknote_datadset.iloc[:, 0:4].values # 4-dimensional input containing wavelet variance, skewness, curtosis and image entropy
y = banknote_datadset.iloc[:, 4].values # 1-dimensional output containing 'real' or 'fake' label
# STEP 2: Define classification model
nb = GaussianNB()
# STEP 3: Train classficiation model
nb.fit(X, y)
print("Training set score: %f" % nb.score(X, y))
# STEP 4: Display confusion matrix
print('\n\nPrint confusion matrix')
class_names = ['fake', 'real']
y_pred = nb.predict(X)
cm = confusion_matrix(y, y_pred)
np.set_printoptions(precision=2)
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cm, classes=class_names, normalize=True, title='Normalized confusion matrix')
plt.show() | [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.text",
"sklearn.metrics.confusion_matrix",
"pandas.read_csv",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.set_printoptions",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.... | [((1482, 1529), 'pandas.read_csv', 'pd.read_csv', (['"""data_banknote_authentication.csv"""'], {}), "('data_banknote_authentication.csv')\n", (1493, 1529), True, 'import pandas as pd\n'), ((2045, 2072), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'y_pred'], {}), '(y, y_pred)\n', (2061, 2072), False, 'from sklearn.metrics import confusion_matrix\n'), ((2073, 2105), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (2092, 2105), True, 'import numpy as np\n'), ((2142, 2154), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2152, 2154), True, 'import matplotlib.pyplot as plt\n'), ((2256, 2266), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2264, 2266), True, 'import matplotlib.pyplot as plt\n'), ((555, 605), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (565, 605), True, 'import matplotlib.pyplot as plt\n'), ((610, 626), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (619, 626), True, 'import matplotlib.pyplot as plt\n'), ((631, 645), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (643, 645), True, 'import matplotlib.pyplot as plt\n'), ((691, 735), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (701, 735), True, 'import matplotlib.pyplot as plt\n'), ((740, 771), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (750, 771), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1217), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1215, 1217), True, 'import matplotlib.pyplot as plt\n'), ((1222, 1246), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (1232, 1246), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1280), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (1261, 1280), True, 'import matplotlib.pyplot as plt\n'), ((1091, 1199), 'matplotlib.pyplot.text', 'plt.text', (['j', 'i', 'cm[i, j]'], {'horizontalalignment': '"""center"""', 'color': "('white' if cm[i, j] > thresh else 'black')"}), "(j, i, cm[i, j], horizontalalignment='center', color='white' if cm[\n i, j] > thresh else 'black')\n", (1099, 1199), True, 'import matplotlib.pyplot as plt\n')] |
"""utilities"""
import os
import numpy as np
# def sep(default='-', nchar=80):
# print(default * nchar)
def get_pkg_filename(filename, path='data'):
"""Return filename for data contained in this pacakge."""
directory = os.path.dirname(__file__)
return os.path.join(directory, path, filename)
def flatten(list_):
"""Via https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists"""
return [item for sublist in list_ for item in sublist]
def isiterable(obj):
"""Return True iff an object is iterable."""
try:
iter(obj)
except Exception:
return False
else:
return True
def remove_none_values_from_dict(dict_):
"""Remove none values, like `None` and `np.nan` from the dict."""
def t(x):
return (x is None) or (isinstance(x, float) and np.isnan(x))
result = {k: v for k, v in dict_.items() if not t(v)}
return result
| [
"os.path.dirname",
"os.path.join",
"numpy.isnan"
] | [((234, 259), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (249, 259), False, 'import os\n'), ((271, 310), 'os.path.join', 'os.path.join', (['directory', 'path', 'filename'], {}), '(directory, path, filename)\n', (283, 310), False, 'import os\n'), ((849, 860), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (857, 860), True, 'import numpy as np\n')] |
import numpy as np
def ask_sumxy(level):
ask = level_sumxy(level)
if ask:
return [
'''Halle dos números enteros tales que la suma sea {} y la diferencia sea {}".'''.format(ask[2], ask[3]),
[ask[0], ask[1]]
]
def level_sumxy(level):
min = 2 * level + 3
max = 5 * level + 15
return _sumxy(min, max)
def _sumxy(min, max):
if min < 3 or max - min < 5:
return None
sum = np.random.randint(min, max)
y = np.random.randint(1, min - 1)
x = sum - y
return [x, y, sum, abs(x - y)]
## 1:E
## 2:S
def ask_multiplesxy(level):
ask = level_multiplesxy(level)
if ask:
return [
'''La suma de 2 números es {}. Si el mayor es {} veces el menor. Halle el número menor".'''.format(ask[2], ask[3]),
[ask[0], ask[1]]
]
def level_multiplesxy(level):
min = (level + 30)
max = (3 * level + 30)
return _multiplesxy(min, max)
# x + y = z
# x = k * y
# y = z / (k + 1)
def _multiplesxy(min, max):
if min < 30 or min > max:
return None
z = np.random.randint(min, max)
k = np.random.randint(2, 10)
y = z // (k + 1)
x = k * y
# lowest, highest, sum, factor
return [y, x, x + y, k]
## 2:E
print('level_sumxy')
for i in range(2, 30):
print(ask_sumxy(i))
print('level_multiplesxy')
for i in range(2, 30):
print(ask_multiplesxy(i))
| [
"numpy.random.randint"
] | [((389, 416), 'numpy.random.randint', 'np.random.randint', (['min', 'max'], {}), '(min, max)\n', (406, 416), True, 'import numpy as np\n'), ((422, 451), 'numpy.random.randint', 'np.random.randint', (['(1)', '(min - 1)'], {}), '(1, min - 1)\n', (439, 451), True, 'import numpy as np\n'), ((962, 989), 'numpy.random.randint', 'np.random.randint', (['min', 'max'], {}), '(min, max)\n', (979, 989), True, 'import numpy as np\n'), ((995, 1019), 'numpy.random.randint', 'np.random.randint', (['(2)', '(10)'], {}), '(2, 10)\n', (1012, 1019), True, 'import numpy as np\n')] |
import numpy as np
from one.api import ONE
def get_dlc_XYs(eid, video_type, query_type='remote'):
#video_type = 'left'
Times = one.load_dataset(eid,f'alf/_ibl_{video_type}Camera.times.npy',
query_type=query_type)
cam = one.load_dataset(eid,f'alf/_ibl_{video_type}Camera.dlc.pqt',
query_type=query_type)
points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()])
# Set values to nan if likelyhood is too low # for pqt: .to_numpy()
XYs = {}
for point in points:
x = np.ma.masked_where(
cam[point + '_likelihood'] < 0.9, cam[point + '_x'])
x = x.filled(np.nan)
y = np.ma.masked_where(
cam[point + '_likelihood'] < 0.9, cam[point + '_y'])
y = y.filled(np.nan)
XYs[point] = np.array(
[x, y])
return Times, XYs
def get_licks(XYs):
'''
define a frame as a lick frame if
x or y for left or right tongue point
change more than half the sdt of the diff
'''
licks = []
for point in ['tongue_end_l', 'tongue_end_r']:
for c in XYs[point]:
thr = np.nanstd(np.diff(c))/4
licks.append(set(np.where(abs(np.diff(c))>thr)[0]))
return sorted(list(set.union(*licks)))
def get_lick_times(eid, combine=False, video_type='left'):
if combine:
# combine licking events from left and right cam
lick_times = []
for video_type in ['right','left']:
times, XYs = get_dlc_XYs(eid, video_type)
r = get_licks(XYs)
# cover case that there are less times than DLC points
idx = np.where(np.array(r)<len(times))[0][-1]
lick_times.append(times[r[:idx]])
lick_times = sorted(np.concatenate(lick_times))
else:
times, XYs = get_dlc_XYs(eid, video_type)
r = get_licks(XYs)
# cover case that there are less times than DLC points
idx = np.where(np.array(r)<len(times))[0][-1]
lick_times = times[r[:idx]]
return lick_times
if __name__ == "__main__":
'''
There should be one pqt file per camera, e.g. _ibl_leftCamera.features.pqt
and it will contain columns named in Pascal case,
the same way you would name an ALF attribute, e.g. pupilDiameter_raw and
lick_times.
'''
one = ONE()
eid = '572a95d1-39ca-42e1-8424-5c9ffcb2df87'
lick_times_left = get_lick_times(eid, video_type = 'left')
lick_times_right = get_lick_times(eid, video_type = 'right')
| [
"numpy.diff",
"numpy.ma.masked_where",
"numpy.array",
"one.api.ONE",
"numpy.concatenate"
] | [((2467, 2472), 'one.api.ONE', 'ONE', ([], {}), '()\n', (2470, 2472), False, 'from one.api import ONE\n'), ((577, 648), 'numpy.ma.masked_where', 'np.ma.masked_where', (["(cam[point + '_likelihood'] < 0.9)", "cam[point + '_x']"], {}), "(cam[point + '_likelihood'] < 0.9, cam[point + '_x'])\n", (595, 648), True, 'import numpy as np\n'), ((703, 774), 'numpy.ma.masked_where', 'np.ma.masked_where', (["(cam[point + '_likelihood'] < 0.9)", "cam[point + '_y']"], {}), "(cam[point + '_likelihood'] < 0.9, cam[point + '_y'])\n", (721, 774), True, 'import numpy as np\n'), ((838, 854), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (846, 854), True, 'import numpy as np\n'), ((1840, 1866), 'numpy.concatenate', 'np.concatenate', (['lick_times'], {}), '(lick_times)\n', (1854, 1866), True, 'import numpy as np\n'), ((1191, 1201), 'numpy.diff', 'np.diff', (['c'], {}), '(c)\n', (1198, 1201), True, 'import numpy as np\n'), ((2054, 2065), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (2062, 2065), True, 'import numpy as np\n'), ((1714, 1725), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (1722, 1725), True, 'import numpy as np\n'), ((1246, 1256), 'numpy.diff', 'np.diff', (['c'], {}), '(c)\n', (1253, 1256), True, 'import numpy as np\n')] |
# BSD 3-Clause License
#
# Copyright (c) 2020, IPASC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from ipasc_tool.core import MetaDatum, MetadataDeviceTags, MetadataAcquisitionTags
class PAData:
"""
The PAData class is the core class for accessing the information contained in the HDF5 files.
Using the iohandler.file_reader.load_data method yields an instance of this class.
It is structured into three main parts:
(1) a numpy array containing the binary data
(2) a dictionary with the acquisition metadata
(3) a dictionary with the device meta data
Furthermore, this class contains convenience methods to access all fields within the HDF5 dictionary, without
the necessity to know the internal structure by heart.
"""
def __init__(self, binary_time_series_data: np.ndarray = None,
meta_data_acquisition: dict = None,
meta_data_device: dict = None):
"""
Creates an instance of the PAData class.
:param binary_time_series_data: a numpy array that must not be None
:param meta_data_acquisition: a dictionary. If None will be initialized as an empty dictionary.
:param meta_data_device: a dictionary. If None will be initialized as an empty dictionary.
"""
if binary_time_series_data is None:
binary_time_series_data = None
if meta_data_acquisition is None:
meta_data_acquisition = dict()
if meta_data_device is None:
meta_data_device = dict()
self.binary_time_series_data = binary_time_series_data
self.meta_data_acquisition = meta_data_acquisition
self.meta_data_device = meta_data_device
def get_illuminator_ids(self) -> list:
"""
:return: a list of all ids of the illumination elements
"""
return list(self.meta_data_device[MetadataDeviceTags.ILLUMINATORS.tag].keys())
def get_detector_ids(self):
"""
:return: a list of all ids of the detection elements
"""
return self.meta_data_device[MetadataDeviceTags.DETECTORS.tag].keys()
def get_acquisition_meta_datum(self, meta_data_tag: MetaDatum) -> object:
"""
This method returns data from the acquisition meta data dictionary
:param meta_data_tag: the MetaDatum instance for which to get the information.
:return: return value might be None, if the specified meta data tag was not found in the dictionary.
"""
if meta_data_tag.tag in self.meta_data_acquisition:
return self.meta_data_acquisition[meta_data_tag.tag]
else:
return None
def get_custom_meta_datum(self, meta_data_tag: str) -> object:
"""
This method returns data from the acquisition meta data dictionary.
:param meta_data_tag: a string instance for which to get the information.
:return: return value might be None, if the specified meta data tag was not found in the dictionary.
"""
if meta_data_tag in self.meta_data_acquisition:
return self.meta_data_acquisition[meta_data_tag]
else:
return None
def get_device_uuid(self):
"""
The UUID is a universally unique identifier to the device description that can be referenced.
:return: return value can be None, of no UUID was found in the meta data.
"""
if MetadataDeviceTags.UUID.tag in self.meta_data_device[MetadataDeviceTags.GENERAL.tag]:
return self.meta_data_device[MetadataDeviceTags.GENERAL.tag][MetadataDeviceTags.UUID.tag]
else:
return None
def get_field_of_view(self):
"""
The field of view defines an approximate cube of the area detectable by the PA imaging device in 3D cartesian
coordinates [x1, x2, x3]. The field of view always starts in the origin of the coordinate system (which is
defined as the centroid of the top-left transducer element when looking at the device normal to the imaging
plane) and expands in the positive x1, x2, x3 directions.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
if MetadataDeviceTags.FIELD_OF_VIEW.tag in self.meta_data_device[MetadataDeviceTags.GENERAL.tag]:
return self.meta_data_device[MetadataDeviceTags.GENERAL.tag][MetadataDeviceTags.FIELD_OF_VIEW.tag]
else:
return None
def get_number_of_illuminators(self):
"""
The number of illuminators quantifies the number of illuminators that are used in the respective PA imaging
device. Each of these illuminators is described by a set of illumination geometry parameters.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
if MetadataDeviceTags.NUMBER_OF_ILLUMINATION_ELEMENTS.tag in self.meta_data_device[MetadataDeviceTags.GENERAL.tag]:
return self.meta_data_device[MetadataDeviceTags.GENERAL.tag][MetadataDeviceTags.NUMBER_OF_ILLUMINATION_ELEMENTS.tag]
else:
return None
def get_number_of_detectors(self):
"""
The number of detectors quantifies the number of transducer elements that are used in the respective PA imaging
device. Each of these transducer elements is described by a set of detection geometry parameters.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
if MetadataDeviceTags.NUMBER_OF_DETECTION_ELEMENTS.tag in self.meta_data_device[MetadataDeviceTags.GENERAL.tag]:
return self.meta_data_device[MetadataDeviceTags.GENERAL.tag][MetadataDeviceTags.NUMBER_OF_DETECTION_ELEMENTS.tag]
else:
return None
def get_illuminator_position(self, identifier=None):
"""
The illuminator position defines the position of the illuminator centroid in 3D cartesian coordinates
[x1, x2, x3] .
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.ILLUMINATOR_POSITION, identifier)
def get_illuminator_orientation(self, identifier=None):
"""
The illuminator orientation defines the rotation of the illuminator in 3D cartesian coordinates [r1, r2, r3].
It is the normal of the planar illuminator surface.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.ILLUMINATOR_ORIENTATION, identifier)
def get_illuminator_geometry(self, identifier=None):
"""
The illuminator shape defines the shape of the optical fibres, so it describes whether the illuminator is a
point illuminator, or has a more continuous form. Illuminators can only have planar emitting surfaces.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.ILLUMINATOR_GEOMETRY, identifier)
def get_illuminator_geometry_type(self, identifier=None):
"""
The illuminator geometry type defines the shape of the optical fibre (bundle) output.
It determines the interpretation of the data in the illuminator geometry field.
The following geometry types are currently supported:
"CIRCULAR" - defined by a single value that determines the radius of the circle
"SPHERE" - defined by a single value that determines the radius of the sphere
"CUBOID" - defined by three values that determine the extent of the cuboid in x, y,
and z dimensions before the position and orientation transforms.
"MESH" - defined by a STL-formatted string that determines the positions of points
and faces before the position and orientation transforms.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.ILLUMINATOR_GEOMETRY_TYPE, identifier)
def get_wavelength_range(self, identifier=None):
"""
The wavelength range quantifies the wavelength range that the illuminator is capable of generating by
reporting three values: the minimum wavelength max, the maximum wavelength max and a metric for the
accuracy accuracy: (min, max, accuracy). This parameter could for instance be (700, 900, 1.2), meaning
that this illuminator can be tuned from 700 nm to 900 nm with an accuracy of 1.2 nm.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.WAVELENGTH_RANGE, identifier)
def get_energy_profile(self, identifier=None):
"""
The laser energy profile field is a discretized functional of wavelength (nm) that represents the laser energy
of the illuminator with regard to the wavelength. Thereby, systematic differences in multispectral image
acquisitions can be accounted for.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.LASER_ENERGY_PROFILE, identifier)
def get_stability_profile(self, identifier=None):
"""
The laser noise profile field is a functional of wavelength (nm) that represents the standard deviation
of the pulse-to-pulse laser energy of the illuminator with regard to the wavelength.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.LASER_STABILITY_PROFILE, identifier)
def get_pulse_width(self, identifier=None):
"""
The pulse duration or pulse width describes the total length of a laser pulse, measured as the time interval
between the half-power points on the leading and trailing edges of the pulse.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.PULSE_WIDTH, identifier)
def get_beam_profile(self, identifier=None):
"""
The beam intensity profile is a function of a spatial position that specifies the relative laser beam
intensity according to the planar emitting surface of the illuminator shape.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.BEAM_INTENSITY_PROFILE, identifier)
def get_beam_profile_distance(self):
"""
The distance from the light source for measuring its beam intensity profile.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.BEAM_INTENSITY_PROFILE_DISTANCE)
def get_beam_divergence(self, identifier=None):
"""
The beam divergence angles represent the opening angles of the laser beam from the illuminator shape with
respect to the orientation vector. This angle represented by the standard deviation of the beam divergence.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.BEAM_DIVERGENCE_ANGLES, identifier)
def get_illuminator_attribute_for_tag(self, metadatum_tag, identifier=None):
if identifier is not None:
if isinstance(identifier, int):
if identifier < 0 or identifier >= self.get_number_of_illuminators():
raise ValueError("The illuminator position " + str(identifier) + "was out of range.")
else:
return list(self.meta_data_device[MetadataDeviceTags.ILLUMINATORS.tag].values())[identifier][
metadatum_tag.tag]
elif isinstance(identifier, str):
if identifier not in self.get_illuminator_ids():
raise ValueError("The illuminator id " + str(identifier) + "was not valid.")
else:
return self.meta_data_device[MetadataDeviceTags.ILLUMINATORS.tag][identifier][metadatum_tag.tag]
else:
raise ValueError("identifier must be int or string.")
else:
positions = []
for id in self.get_illuminator_ids():
positions.append(self.meta_data_device[MetadataDeviceTags.ILLUMINATORS.tag][id][metadatum_tag.tag])
if metadatum_tag.dtype == np.ndarray:
return np.asarray(positions)
else:
return positions
def get_detector_position(self, identifier=None):
"""
The element position defines the position of the detection element centroid in 3D cartesian coordinates
[x1, x2, x3].
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_detector_attribute_for_tag(MetadataDeviceTags.DETECTOR_POSITION, identifier)
def get_detector_orientation(self, identifier=None):
"""
The element orientation defines the rotation of the detection element in 3D cartesian coordinates
[r1, r2, r3] in radians.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_detector_attribute_for_tag(MetadataDeviceTags.DETECTOR_ORIENTATION, identifier)
def get_detector_geometry(self, identifier=None):
"""
The element size defines the size of the detection element in 3D cartesian coordinates [x1, x2, x3]
relative to its position and orientation.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_detector_attribute_for_tag(MetadataDeviceTags.DETECTOR_GEOMETRY, identifier)
def get_detector_geometry_type(self, identifier=None):
"""
The detector geometry type defines how to interpret the data in the detector geometry field.
The following geometry types are currently supported:
"CIRCULAR" - defined by a single value that determines the radius of the circle
"SPHERE" - defined by a single value that determines the radius of the sphere
"CUBOID" - defined by three values that determine the extent of the cuboid in x, y,
and z dimensions before the position and orientation transforms.
"MESH" - defined by a STL-formatted string that determines the positions of points
and faces before the position and orientation transforms.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_detector_attribute_for_tag(MetadataDeviceTags.DETECTOR_GEOMETRY_TYPE, identifier)
def get_frequency_response(self, identifier=None):
"""
The frequency response is a functional that characterizes the response of the detection element to the
frequency of the incident pressure waves.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_detector_attribute_for_tag(MetadataDeviceTags.FREQUENCY_RESPONSE, identifier)
def get_angular_response(self, identifier=None):
"""
The angular response field characterizes the angular sensitivity of the detection element to the incident
angle (relative to the elements orientation) of the incoming pressure wave.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_detector_attribute_for_tag(MetadataDeviceTags.ANGULAR_RESPONSE, identifier)
def get_detector_attribute_for_tag(self, metadatum_tag, identifier=None):
if identifier is not None:
if isinstance(identifier, int):
if identifier < 0 or identifier >= self.get_number_of_detectors():
raise ValueError("The detector position " + str(identifier) + "was out of range.")
else:
return list(self.meta_data_device[MetadataDeviceTags.DETECTORS.tag].values())[identifier][
metadatum_tag.tag]
elif isinstance(identifier, str):
if identifier not in self.get_detector_ids():
raise ValueError("The detector id " + str(identifier) + "was not valid.")
else:
return self.meta_data_device[MetadataDeviceTags.DETECTORS.tag][identifier][metadatum_tag.tag]
else:
raise ValueError("detector must be int or string.")
else:
positions = []
for id in self.get_detector_ids():
positions.append(self.meta_data_device[MetadataDeviceTags.DETECTORS.tag][id][metadatum_tag.tag])
return np.asarray(positions)
def get_encoding(self):
"""
The encoding field is representative of the character set that was used to encode the binary data and the
metadata. E.g. one of ‘UTF-8’, ‘ASCII’, ‘CP-1252’, …
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.ENCODING)
def get_compression(self):
"""
The compression field is representative of the compression method that was used to compress the binary data.
E.g. one of ‘raw’, ‘gzip’, …
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.COMPRESSION)
def get_data_UUID(self):
"""
128-bit Integer displayed as a hexadecimal string in 5 groups separated by hyphens, in the form 8-4-4-4-12 for
a total of 36 characters. The UUID is randomly generated using the UUID Version 4 standard.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.UUID)
def get_data_type(self):
"""
The data type field represents the datatype of the binary data. This field is given in the C++ data type naming
convention. E.g. ‘short’, ‘unsigned short’, ‘int’, ‘unsigned int’, ‘long’, ‘unsigned long’, ‘long long’,
‘float’, ‘double’, ‘long double’.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.DATA_TYPE)
def get_dimensionality(self):
"""
The dimensionality field represents the acquisition format of the binary data and specifies the number of
spatiotemporal dimensions of the data that is comprised of one or more frames. E.g. ‘1D’, ‘2D’, ‘3D’, ‘1D+t’,
2D+t’, ‘3D+t’. In this notion, the time series sampling of one transducer would count as a “spatial” dimension.
These are defined as 1D = [𝝉], 2D = [x1, 𝝉], 3D = [x1, 𝝉, x2]. The “+t” will then add a time dimension for
multiple of these frames.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.DIMENSIONALITY)
def get_sizes(self):
"""
The sizes field quantifies the number of data points in each of the dimensions specified in the dimensionality
field. e.g. [128, 2560, 26] with a “2D+t” dimensionality.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.SIZES)
def get_device_reference(self):
"""
A reference to the UUID of the PA imaging device description as defined in part 1.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.PHOTOACOUSTIC_IMAGING_DEVICE)
def get_pulse_laser_energy(self):
"""
The pulse laser energy field specifies the pulse-to-pulse laser energy that was measured for the acquisition
of the raw time series data.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.PULSE_LASER_ENERGY)
def get_time_stamps(self):
"""
The frame acquisition timestamps field indicates the timestamp of the acquisition system.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.FRAME_ACQUISITION_TIMESTAMPS)
def get_wavelengths(self):
"""
The acquisition optical wavelengths field is a 1D array that contains a list of all wavelengths used for the
image acquisition.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.ACQUISITION_OPTICAL_WAVELENGTHS)
def get_time_gain_compensation(self):
"""
The time gain compensation field is a 1D array that contains the relative factors which have been used to modify
the time series data to correct for the effect of attenuation.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.TIME_GAIN_COMPENSATION)
def get_overall_gain(self):
"""
The overall gain is a single value describing a factor that has been applied to all values of the raw time
series data.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.OVERALL_GAIN)
def get_element_dependent_gain(self):
"""
The element-dependent gain field is a 2D array that contains the relative factors which have been used to
perform apodization.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.ELEMENT_DEPENDENT_GAIN)
def get_temperature(self):
"""
The temperature control field indicates the temperature during image acquisition.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.TEMPERATURE_CONTROL)
def get_coupling_agent(self):
"""
A string representation of the acoustic coupling agent that was used. For example, the following options are
possible: D2O, H2O and US-gel.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.ACOUSTIC_COUPLING_AGENT)
def get_assumed_speed_of_sound(self):
"""
A value representing the assumed speed of sound in the entire imaging medium, covering both the imaged medium a
nd the coupling agent.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.ASSUMED_GLOBAL_SPEED_OF_SOUND)
def get_scanning_method(self):
"""
A string representation of the scanning method that was used.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.SCANNING_METHOD)
def get_sampling_rate(self):
"""
The A/D sampling rate refers to the rate at which ipasc_examples of the analog signal are taken to be converted into
digital form.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.AD_SAMPLING_RATE)
def get_frequency_filter(self):
"""
The frequency threshold levels that have been applied to filter the raw time series data.
:return: return value can be None, of the key was not found in the meta data dictionary.
"""
return self.get_acquisition_meta_datum(MetadataAcquisitionTags.FREQUENCY_DOMAIN_FILTER)
| [
"numpy.asarray"
] | [((18983, 19004), 'numpy.asarray', 'np.asarray', (['positions'], {}), '(positions)\n', (18993, 19004), True, 'import numpy as np\n'), ((14593, 14614), 'numpy.asarray', 'np.asarray', (['positions'], {}), '(positions)\n', (14603, 14614), True, 'import numpy as np\n')] |
# Reads data from QuickBooks Excel output, and combines two detail profit and loss files (from two time periods) into one
# file which keeps the detail from each time period but also shows the comparision between the two periods
import numpy as np
import pandas as pd
def read_db_from_file(filename):
# Read in the data from Excel and drop unusued columns
df = pd.read_excel(filename)
del df['Unnamed: 6']
df = df.drop([0,1,2,3,4])
return df
def parse_QB_data(filename):
# df1 will be our output dataframe
df1 = pd.DataFrame(columns=['Category','Date','Memo', 'Amount'])
lastdescr = 'start'
newheading = False
descr = ""
for index, row in df.iterrows():
descr = row["Business name"]
date1 = row['Unnamed: 1']
memo1 = row['Unnamed: 3']
memo2 = row['Unnamed: 4']
amount = row['Unnamed: 5']
if np.isnan(amount):
lastdescr = descr
if isinstance(descr, float):
if np.isnan(descr):
# This code is to combine the memo1 and memo2, removing NaNs (equivalent to blank cells in the original files):
concat_memo = True
if isinstance(memo1, float):
if np.isnan(memo1):
memo = str(memo2)
concat_memo = False
if isinstance(memo2, float):
if np.isnan(memo2):
memo = str(memo1)
concat_memo = False
if isinstance(memo1, float) and isinstance(memo2, float):
if np.isnan(memo1) and np.isnan(memo2):
memo = ""
concat_memo = False
if concat_memo:
memo = str(memo1) + ", " + str(memo2)
df1 = df1.append({"Category": lastdescr, "Date": date1, "Memo": memo, "Amount": amount}, ignore_index=True)
df1['Date'] = pd.to_datetime(df1['Date'])
return df1
def combine_year_data(df2019, df2020):
dfout = pd.DataFrame(columns=['Category', 'Date', 'Memo', 'Amount', 'Total','Difference','Difference_Percent'])
lastcategory = "start"
total = 0
for index, row in df2019.iterrows():
category = row["Category"]
date = row['Date']
memo = row['Memo']
amount = row['Amount']
if category != lastcategory: # We found a new category
# Write out a subtotal
dfout = dfout.append({"Memo": "A:", "Total": total}, ignore_index=True)
# Find the next year's entries for that category
total2 = 0
for index2, row2 in df2020.iterrows():
# This simplistic approach is really slow (? O(n^2)) but works fine for this use case
# which is just a small number of transactions
category2 = row2["Category"]
date2 = row2['Date']
memo2 = row2['Memo']
amount2 = row2['Amount']
if category2 == lastcategory:
dfout = dfout.append(row2)
total2 += amount2
# Write out a subtotal
dfout = dfout.append({"Memo": "B:", "Total": total2}, ignore_index=True)
if total == 0: # Don't calculate percentage if total = 0 as that will throw an error
dfout = dfout.append({"Difference": total2-total}, ignore_index=True)
else:
dfout = dfout.append({"Difference": total2-total, "Difference_Percent": (total2-total)/total}, ignore_index=True)
dfout = dfout.append({"Category": ""}, ignore_index=True)
total = 0
dfout = dfout.append({"Category": category, "Date": date, "Memo": memo, "Amount": amount}, ignore_index=True)
lastcategory = category
total += amount
return dfout
df = read_db_from_file(r'2019.xlsx')
df2019 = parse_QB_data(df)
df = read_db_from_file(r'2020.xlsx')
df2020 = parse_QB_data(df)
dfout = combine_year_data(df2019, df2020)
dfout.to_excel("out.xlsx", index=False)
| [
"pandas.DataFrame",
"numpy.isnan",
"pandas.to_datetime",
"pandas.read_excel"
] | [((372, 395), 'pandas.read_excel', 'pd.read_excel', (['filename'], {}), '(filename)\n', (385, 395), True, 'import pandas as pd\n'), ((544, 604), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Category', 'Date', 'Memo', 'Amount']"}), "(columns=['Category', 'Date', 'Memo', 'Amount'])\n", (556, 604), True, 'import pandas as pd\n'), ((1976, 2003), 'pandas.to_datetime', 'pd.to_datetime', (["df1['Date']"], {}), "(df1['Date'])\n", (1990, 2003), True, 'import pandas as pd\n'), ((2072, 2181), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Category', 'Date', 'Memo', 'Amount', 'Total', 'Difference',\n 'Difference_Percent']"}), "(columns=['Category', 'Date', 'Memo', 'Amount', 'Total',\n 'Difference', 'Difference_Percent'])\n", (2084, 2181), True, 'import pandas as pd\n'), ((889, 905), 'numpy.isnan', 'np.isnan', (['amount'], {}), '(amount)\n', (897, 905), True, 'import numpy as np\n'), ((989, 1004), 'numpy.isnan', 'np.isnan', (['descr'], {}), '(descr)\n', (997, 1004), True, 'import numpy as np\n'), ((1238, 1253), 'numpy.isnan', 'np.isnan', (['memo1'], {}), '(memo1)\n', (1246, 1253), True, 'import numpy as np\n'), ((1409, 1424), 'numpy.isnan', 'np.isnan', (['memo2'], {}), '(memo2)\n', (1417, 1424), True, 'import numpy as np\n'), ((1609, 1624), 'numpy.isnan', 'np.isnan', (['memo1'], {}), '(memo1)\n', (1617, 1624), True, 'import numpy as np\n'), ((1629, 1644), 'numpy.isnan', 'np.isnan', (['memo2'], {}), '(memo2)\n', (1637, 1644), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from scipy.interpolate import make_interp_spline
from scipy.interpolate import CubicSpline
import matplotlib.pyplot as plt
#
# Filename: interpolate.py
# Author: <NAME> <<EMAIL>>
# Copyright: 2021 <NAME>
# License: MIT license
#
def data(infile):
df = pd.read_csv(infile)
listex = df["t"]
listey = df["maf"]
#print (listex,listey)
return listex,listey
infile = "pz.csv"
x, y = data (infile)
x = np.array(x)
y = np.array(y)
X_Y_Spline = (x, y)
#X_Y_Spline = make_interp_spline(x,y)
X_Y_Spline = CubicSpline(x, y)
print(X_Y_Spline.c)
xx = np.linspace(1, 2, 500)
X_ = np.linspace(x.min(), x.max(), 500)
Y_ = X_Y_Spline(X_)
# Plotting the Graph
fig, ax = plt.subplots()
afont = {'fontname':'Arial'}
tfont = {'fontname':'Times New Roman'}
my_xticks = ['$t_a$', '$t_b$', '$t_c$', '$t_d$', '$t_e$']
plt.xticks(x, my_xticks)
ax.plot(x,y,"s", color= "brown", label ='MAF = "Mutant allele fraction"\n$t_a$ = "blood sample from $t_0$-$t_1$"\n$t_b$ = "blood sample from $t_1$" \n$t_c$ = "blood sample from $t_1$-$t_2$"\n$t_d$ = "blood sample from $t_1$-$t_2$"\n$t_e$ = "blood sample from $t_3$-$t_4$"')
ax.plot(X_, Y_, "y", label = "Spline interpolating empiric points")
legend = ax.legend(bbox_to_anchor=[0.4, 0.05], loc="lower center", shadow=True, fontsize='small')
plt.title("Title", style = 'italic', fontsize = 'medium', **tfont)
plt.xlabel("TIME", **tfont)
plt.ylabel("MAF(%)", **tfont)
plt.savefig("img.png")
plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"scipy.interpolate.CubicSpline",
"matplotlib.pyplot.ylabel",
"pandas.read_csv",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((482, 493), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (490, 493), True, 'import numpy as np\n'), ((499, 510), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (507, 510), True, 'import numpy as np\n'), ((586, 603), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['x', 'y'], {}), '(x, y)\n', (597, 603), False, 'from scipy.interpolate import CubicSpline\n'), ((633, 655), 'numpy.linspace', 'np.linspace', (['(1)', '(2)', '(500)'], {}), '(1, 2, 500)\n', (644, 655), True, 'import numpy as np\n'), ((753, 767), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (765, 767), True, 'import matplotlib.pyplot as plt\n'), ((898, 922), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'my_xticks'], {}), '(x, my_xticks)\n', (908, 922), True, 'import matplotlib.pyplot as plt\n'), ((1367, 1429), 'matplotlib.pyplot.title', 'plt.title', (['"""Title"""'], {'style': '"""italic"""', 'fontsize': '"""medium"""'}), "('Title', style='italic', fontsize='medium', **tfont)\n", (1376, 1429), True, 'import matplotlib.pyplot as plt\n'), ((1435, 1462), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TIME"""'], {}), "('TIME', **tfont)\n", (1445, 1462), True, 'import matplotlib.pyplot as plt\n'), ((1464, 1493), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MAF(%)"""'], {}), "('MAF(%)', **tfont)\n", (1474, 1493), True, 'import matplotlib.pyplot as plt\n'), ((1495, 1517), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""img.png"""'], {}), "('img.png')\n", (1506, 1517), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1529), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1527, 1529), True, 'import matplotlib.pyplot as plt\n'), ((314, 333), 'pandas.read_csv', 'pd.read_csv', (['infile'], {}), '(infile)\n', (325, 333), True, 'import pandas as pd\n')] |
import tensorflow as tf
import numpy as np
import os
from tqdm import tqdm
import argparse
import sys
def session(graph=None, allow_soft_placement=True,
log_device_placement=False, allow_growth=True):
""" return a Session with simple config """
config = tf.ConfigProto(allow_soft_placement=allow_soft_placement,
log_device_placement=log_device_placement)
config.gpu_options.allow_growth = allow_growth
return tf.Session(graph=graph, config=config)
def parse_tfrecord_tf(record):
features = tf.parse_single_example(record, features={
'shape': tf.FixedLenFeature([3], tf.int64),
'data': tf.FixedLenFeature([], tf.string)})
data = tf.decode_raw(features['data'], tf.uint8)
return tf.reshape(data, features['shape'])
def adjust_dynamic_range(data, drange_in, drange_out):
if drange_in != drange_out:
scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (
np.float32(drange_in[1]) - np.float32(drange_in[0]))
bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
data = data * scale + bias
return data
def get_images(data_dir, sess, batch_size):
dset = tf.data.TFRecordDataset(data_dir)
dset = dset.map(parse_tfrecord_tf, num_parallel_calls=16)
dset = dset.batch(batch_size)
train_iterator = tf.data.Iterator.from_structure(dset.output_types, dset.output_shapes)
training_init_op =train_iterator.make_initializer(dset)
image_batch = train_iterator.get_next()
sess.run(training_init_op)
return image_batch
def main(args):
print(args)
sess = session()
image_batch = get_images(data_dir=args.data_path, sess=sess, batch_size=args.batch_size)
start = args.start
end = args.end
tfr_prefix = args.save_dir
os.makedirs(tfr_prefix, exist_ok=True)
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
tfr_file = tfr_prefix + '/' + args.save_file_name
tfr_writer = tf.python_io.TFRecordWriter(tfr_file, tfr_opt)
for i in tqdm(range(args.total_nums)):
img = sess.run(image_batch)
if i >= start and i < end:
img_ = img[0, :, :, :]
quant = np.rint(img_).clip(0, 255).astype(np.uint8)
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
tfr_writer.write(ex.SerializeToString())
tfr_writer.close()
if __name__ == "__main__":
import signal
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, required=True,
help='Location of the tfrecords images')
parser.add_argument("--save_dir", type=str, required=True,
help="Location to save tfrecords file")
parser.add_argument("--save_file_name", type=str, required=True,
help="save file name")
parser.add_argument("--batch_size", type=int, default=1,
help="size of the input batch")
parser.add_argument('--start', type=int, default=65000)
parser.add_argument('--end', type=int, default=70000)
parser.add_argument('--total_nums', type=int, default=70000)
args = parser.parse_args()
main(args) | [
"tensorflow.data.TFRecordDataset",
"sys.exit",
"os.makedirs",
"argparse.ArgumentParser",
"tensorflow.Session",
"tensorflow.decode_raw",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.Int64List",
"numpy.rint",
"tensorflow.reshape",
"tensorflow.ConfigProto",
"tensorflow.data.Iterator.f... | [((277, 381), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': 'allow_soft_placement', 'log_device_placement': 'log_device_placement'}), '(allow_soft_placement=allow_soft_placement,\n log_device_placement=log_device_placement)\n', (291, 381), True, 'import tensorflow as tf\n'), ((468, 506), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph', 'config': 'config'}), '(graph=graph, config=config)\n', (478, 506), True, 'import tensorflow as tf\n'), ((712, 753), 'tensorflow.decode_raw', 'tf.decode_raw', (["features['data']", 'tf.uint8'], {}), "(features['data'], tf.uint8)\n", (725, 753), True, 'import tensorflow as tf\n'), ((765, 800), 'tensorflow.reshape', 'tf.reshape', (['data', "features['shape']"], {}), "(data, features['shape'])\n", (775, 800), True, 'import tensorflow as tf\n'), ((1225, 1258), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['data_dir'], {}), '(data_dir)\n', (1248, 1258), True, 'import tensorflow as tf\n'), ((1376, 1446), 'tensorflow.data.Iterator.from_structure', 'tf.data.Iterator.from_structure', (['dset.output_types', 'dset.output_shapes'], {}), '(dset.output_types, dset.output_shapes)\n', (1407, 1446), True, 'import tensorflow as tf\n'), ((1834, 1872), 'os.makedirs', 'os.makedirs', (['tfr_prefix'], {'exist_ok': '(True)'}), '(tfr_prefix, exist_ok=True)\n', (1845, 1872), False, 'import os\n'), ((1888, 1959), 'tensorflow.python_io.TFRecordOptions', 'tf.python_io.TFRecordOptions', (['tf.python_io.TFRecordCompressionType.NONE'], {}), '(tf.python_io.TFRecordCompressionType.NONE)\n', (1916, 1959), True, 'import tensorflow as tf\n'), ((2031, 2077), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['tfr_file', 'tfr_opt'], {}), '(tfr_file, tfr_opt)\n', (2058, 2077), True, 'import tensorflow as tf\n'), ((2754, 2779), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2777, 2779), False, 'import argparse\n'), ((1055, 1080), 'numpy.float32', 'np.float32', (['drange_out[0]'], {}), '(drange_out[0])\n', (1065, 1080), True, 'import numpy as np\n'), ((2728, 2739), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2736, 2739), False, 'import sys\n'), ((614, 647), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[3]', 'tf.int64'], {}), '([3], tf.int64)\n', (632, 647), True, 'import tensorflow as tf\n'), ((665, 698), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (683, 698), True, 'import tensorflow as tf\n'), ((907, 932), 'numpy.float32', 'np.float32', (['drange_out[1]'], {}), '(drange_out[1])\n', (917, 932), True, 'import numpy as np\n'), ((935, 960), 'numpy.float32', 'np.float32', (['drange_out[0]'], {}), '(drange_out[0])\n', (945, 960), True, 'import numpy as np\n'), ((986, 1010), 'numpy.float32', 'np.float32', (['drange_in[1]'], {}), '(drange_in[1])\n', (996, 1010), True, 'import numpy as np\n'), ((1013, 1037), 'numpy.float32', 'np.float32', (['drange_in[0]'], {}), '(drange_in[0])\n', (1023, 1037), True, 'import numpy as np\n'), ((1083, 1107), 'numpy.float32', 'np.float32', (['drange_in[0]'], {}), '(drange_in[0])\n', (1093, 1107), True, 'import numpy as np\n'), ((2248, 2261), 'numpy.rint', 'np.rint', (['img_'], {}), '(img_)\n', (2255, 2261), True, 'import numpy as np\n'), ((2416, 2453), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'quant.shape'}), '(value=quant.shape)\n', (2434, 2453), True, 'import tensorflow as tf\n')] |
import firedrake as fd
from firedrake import sqrt, inner, dx
from firedrake import sin, grad, pi, sym, div
from letop.physics import (
NavierStokesBrinkmannForm,
NavierStokesBrinkmannSolver,
mark_no_flow_regions,
InteriorBC,
)
import pytest
def test_solver_no_flow_region():
mesh = fd.Mesh("./2D_mesh.msh")
no_flow = [2]
no_flow_markers = [1]
mesh = mark_no_flow_regions(mesh, no_flow, no_flow_markers)
P2 = fd.VectorElement("CG", mesh.ufl_cell(), 1)
P1 = fd.FiniteElement("CG", mesh.ufl_cell(), 1)
TH = P2 * P1
W = fd.FunctionSpace(mesh, TH)
(v, q) = fd.TestFunctions(W)
# Stokes 1
w_sol1 = fd.Function(W)
nu = fd.Constant(0.05)
F = NavierStokesBrinkmannForm(W, w_sol1, nu, beta_gls=2.0)
x, y = fd.SpatialCoordinate(mesh)
u_mms = fd.as_vector(
[sin(2.0 * pi * x) * sin(pi * y), sin(pi * x) * sin(2.0 * pi * y)]
)
p_mms = -0.5 * (u_mms[0] ** 2 + u_mms[1] ** 2)
f_mms_u = (
grad(u_mms) * u_mms + grad(p_mms) - 2.0 * nu * div(sym(grad(u_mms)))
)
f_mms_p = div(u_mms)
F += -inner(f_mms_u, v) * dx - f_mms_p * q * dx
bc1 = fd.DirichletBC(W.sub(0), u_mms, "on_boundary")
bc2 = fd.DirichletBC(W.sub(1), p_mms, "on_boundary")
bc_no_flow = InteriorBC(W.sub(0), fd.Constant((0.0, 0.0)), no_flow_markers)
solver_parameters = {"ksp_max_it": 500, "ksp_monitor": None}
problem1 = fd.NonlinearVariationalProblem(
F, w_sol1, bcs=[bc1, bc2, bc_no_flow]
)
solver1 = NavierStokesBrinkmannSolver(
problem1,
options_prefix="navier_stokes",
solver_parameters=solver_parameters,
)
solver1.solve()
u_sol, _ = w_sol1.split()
u_mms_func = fd.interpolate(u_mms, W.sub(0))
error = fd.errornorm(u_sol, u_mms_func)
assert error < 0.07
def run_solver(r):
mesh = fd.UnitSquareMesh(2 ** r, 2 ** r)
P2 = fd.VectorElement("CG", mesh.ufl_cell(), 1)
P1 = fd.FiniteElement("CG", mesh.ufl_cell(), 1)
TH = P2 * P1
W = fd.FunctionSpace(mesh, TH)
(v, q) = fd.TestFunctions(W)
# Stokes 1
w_sol1 = fd.Function(W)
nu = fd.Constant(0.05)
F = NavierStokesBrinkmannForm(W, w_sol1, nu, beta_gls=2.0)
from firedrake import sin, grad, pi, sym, div, inner
x, y = fd.SpatialCoordinate(mesh)
u_mms = fd.as_vector(
[sin(2.0 * pi * x) * sin(pi * y), sin(pi * x) * sin(2.0 * pi * y)]
)
p_mms = -0.5 * (u_mms[0] ** 2 + u_mms[1] ** 2)
f_mms_u = (
grad(u_mms) * u_mms + grad(p_mms) - 2.0 * nu * div(sym(grad(u_mms)))
)
f_mms_p = div(u_mms)
F += -inner(f_mms_u, v) * dx - f_mms_p * q * dx
bc1 = fd.DirichletBC(W.sub(0), u_mms, "on_boundary")
bc2 = fd.DirichletBC(W.sub(1), p_mms, "on_boundary")
solver_parameters = {"ksp_max_it": 200}
problem1 = fd.NonlinearVariationalProblem(F, w_sol1, bcs=[bc1, bc2])
solver1 = NavierStokesBrinkmannSolver(
problem1,
options_prefix="navier_stokes",
solver_parameters=solver_parameters,
)
solver1.solve()
u_sol, _ = w_sol1.split()
fd.File("test_u_sol.pvd").write(u_sol)
u_mms_func = fd.interpolate(u_mms, W.sub(0))
error = fd.errornorm(u_sol, u_mms_func)
print(f"Error: {error}")
return error
def run_convergence_test():
import numpy as np
diff = np.array([run_solver(i) for i in range(4, 7)])
return np.log2(diff[:-1] / diff[1:])
def test_l2_conv():
assert (run_convergence_test() > 0.8).all()
| [
"firedrake.UnitSquareMesh",
"firedrake.Constant",
"firedrake.Mesh",
"firedrake.div",
"letop.physics.NavierStokesBrinkmannSolver",
"firedrake.NonlinearVariationalProblem",
"firedrake.FunctionSpace",
"firedrake.grad",
"firedrake.TestFunctions",
"firedrake.File",
"firedrake.errornorm",
"firedrake... | [((304, 328), 'firedrake.Mesh', 'fd.Mesh', (['"""./2D_mesh.msh"""'], {}), "('./2D_mesh.msh')\n", (311, 328), True, 'import firedrake as fd\n'), ((384, 436), 'letop.physics.mark_no_flow_regions', 'mark_no_flow_regions', (['mesh', 'no_flow', 'no_flow_markers'], {}), '(mesh, no_flow, no_flow_markers)\n', (404, 436), False, 'from letop.physics import NavierStokesBrinkmannForm, NavierStokesBrinkmannSolver, mark_no_flow_regions, InteriorBC\n'), ((566, 592), 'firedrake.FunctionSpace', 'fd.FunctionSpace', (['mesh', 'TH'], {}), '(mesh, TH)\n', (582, 592), True, 'import firedrake as fd\n'), ((606, 625), 'firedrake.TestFunctions', 'fd.TestFunctions', (['W'], {}), '(W)\n', (622, 625), True, 'import firedrake as fd\n'), ((655, 669), 'firedrake.Function', 'fd.Function', (['W'], {}), '(W)\n', (666, 669), True, 'import firedrake as fd\n'), ((679, 696), 'firedrake.Constant', 'fd.Constant', (['(0.05)'], {}), '(0.05)\n', (690, 696), True, 'import firedrake as fd\n'), ((705, 759), 'letop.physics.NavierStokesBrinkmannForm', 'NavierStokesBrinkmannForm', (['W', 'w_sol1', 'nu'], {'beta_gls': '(2.0)'}), '(W, w_sol1, nu, beta_gls=2.0)\n', (730, 759), False, 'from letop.physics import NavierStokesBrinkmannForm, NavierStokesBrinkmannSolver, mark_no_flow_regions, InteriorBC\n'), ((772, 798), 'firedrake.SpatialCoordinate', 'fd.SpatialCoordinate', (['mesh'], {}), '(mesh)\n', (792, 798), True, 'import firedrake as fd\n'), ((1070, 1080), 'firedrake.div', 'div', (['u_mms'], {}), '(u_mms)\n', (1073, 1080), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((1409, 1478), 'firedrake.NonlinearVariationalProblem', 'fd.NonlinearVariationalProblem', (['F', 'w_sol1'], {'bcs': '[bc1, bc2, bc_no_flow]'}), '(F, w_sol1, bcs=[bc1, bc2, bc_no_flow])\n', (1439, 1478), True, 'import firedrake as fd\n'), ((1507, 1617), 'letop.physics.NavierStokesBrinkmannSolver', 'NavierStokesBrinkmannSolver', (['problem1'], {'options_prefix': '"""navier_stokes"""', 'solver_parameters': 'solver_parameters'}), "(problem1, options_prefix='navier_stokes',\n solver_parameters=solver_parameters)\n", (1534, 1617), False, 'from letop.physics import NavierStokesBrinkmannForm, NavierStokesBrinkmannSolver, mark_no_flow_regions, InteriorBC\n'), ((1756, 1787), 'firedrake.errornorm', 'fd.errornorm', (['u_sol', 'u_mms_func'], {}), '(u_sol, u_mms_func)\n', (1768, 1787), True, 'import firedrake as fd\n'), ((1844, 1877), 'firedrake.UnitSquareMesh', 'fd.UnitSquareMesh', (['(2 ** r)', '(2 ** r)'], {}), '(2 ** r, 2 ** r)\n', (1861, 1877), True, 'import firedrake as fd\n'), ((2007, 2033), 'firedrake.FunctionSpace', 'fd.FunctionSpace', (['mesh', 'TH'], {}), '(mesh, TH)\n', (2023, 2033), True, 'import firedrake as fd\n'), ((2047, 2066), 'firedrake.TestFunctions', 'fd.TestFunctions', (['W'], {}), '(W)\n', (2063, 2066), True, 'import firedrake as fd\n'), ((2096, 2110), 'firedrake.Function', 'fd.Function', (['W'], {}), '(W)\n', (2107, 2110), True, 'import firedrake as fd\n'), ((2120, 2137), 'firedrake.Constant', 'fd.Constant', (['(0.05)'], {}), '(0.05)\n', (2131, 2137), True, 'import firedrake as fd\n'), ((2146, 2200), 'letop.physics.NavierStokesBrinkmannForm', 'NavierStokesBrinkmannForm', (['W', 'w_sol1', 'nu'], {'beta_gls': '(2.0)'}), '(W, w_sol1, nu, beta_gls=2.0)\n', (2171, 2200), False, 'from letop.physics import NavierStokesBrinkmannForm, NavierStokesBrinkmannSolver, mark_no_flow_regions, InteriorBC\n'), ((2271, 2297), 'firedrake.SpatialCoordinate', 'fd.SpatialCoordinate', (['mesh'], {}), '(mesh)\n', (2291, 2297), True, 'import firedrake as fd\n'), ((2569, 2579), 'firedrake.div', 'div', (['u_mms'], {}), '(u_mms)\n', (2572, 2579), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((2807, 2864), 'firedrake.NonlinearVariationalProblem', 'fd.NonlinearVariationalProblem', (['F', 'w_sol1'], {'bcs': '[bc1, bc2]'}), '(F, w_sol1, bcs=[bc1, bc2])\n', (2837, 2864), True, 'import firedrake as fd\n'), ((2879, 2989), 'letop.physics.NavierStokesBrinkmannSolver', 'NavierStokesBrinkmannSolver', (['problem1'], {'options_prefix': '"""navier_stokes"""', 'solver_parameters': 'solver_parameters'}), "(problem1, options_prefix='navier_stokes',\n solver_parameters=solver_parameters)\n", (2906, 2989), False, 'from letop.physics import NavierStokesBrinkmannForm, NavierStokesBrinkmannSolver, mark_no_flow_regions, InteriorBC\n'), ((3171, 3202), 'firedrake.errornorm', 'fd.errornorm', (['u_sol', 'u_mms_func'], {}), '(u_sol, u_mms_func)\n', (3183, 3202), True, 'import firedrake as fd\n'), ((3372, 3401), 'numpy.log2', 'np.log2', (['(diff[:-1] / diff[1:])'], {}), '(diff[:-1] / diff[1:])\n', (3379, 3401), True, 'import numpy as np\n'), ((1285, 1308), 'firedrake.Constant', 'fd.Constant', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (1296, 1308), True, 'import firedrake as fd\n'), ((1003, 1014), 'firedrake.grad', 'grad', (['p_mms'], {}), '(p_mms)\n', (1007, 1014), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((2502, 2513), 'firedrake.grad', 'grad', (['p_mms'], {}), '(p_mms)\n', (2506, 2513), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((3071, 3096), 'firedrake.File', 'fd.File', (['"""test_u_sol.pvd"""'], {}), "('test_u_sol.pvd')\n", (3078, 3096), True, 'import firedrake as fd\n'), ((834, 851), 'firedrake.sin', 'sin', (['(2.0 * pi * x)'], {}), '(2.0 * pi * x)\n', (837, 851), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((854, 865), 'firedrake.sin', 'sin', (['(pi * y)'], {}), '(pi * y)\n', (857, 865), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((867, 878), 'firedrake.sin', 'sin', (['(pi * x)'], {}), '(pi * x)\n', (870, 878), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((881, 898), 'firedrake.sin', 'sin', (['(2.0 * pi * y)'], {}), '(2.0 * pi * y)\n', (884, 898), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((981, 992), 'firedrake.grad', 'grad', (['u_mms'], {}), '(u_mms)\n', (985, 992), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((1091, 1108), 'firedrake.inner', 'inner', (['f_mms_u', 'v'], {}), '(f_mms_u, v)\n', (1096, 1108), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((2333, 2350), 'firedrake.sin', 'sin', (['(2.0 * pi * x)'], {}), '(2.0 * pi * x)\n', (2336, 2350), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((2353, 2364), 'firedrake.sin', 'sin', (['(pi * y)'], {}), '(pi * y)\n', (2356, 2364), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((2366, 2377), 'firedrake.sin', 'sin', (['(pi * x)'], {}), '(pi * x)\n', (2369, 2377), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((2380, 2397), 'firedrake.sin', 'sin', (['(2.0 * pi * y)'], {}), '(2.0 * pi * y)\n', (2383, 2397), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((2480, 2491), 'firedrake.grad', 'grad', (['u_mms'], {}), '(u_mms)\n', (2484, 2491), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((2590, 2607), 'firedrake.inner', 'inner', (['f_mms_u', 'v'], {}), '(f_mms_u, v)\n', (2595, 2607), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((1036, 1047), 'firedrake.grad', 'grad', (['u_mms'], {}), '(u_mms)\n', (1040, 1047), False, 'from firedrake import sin, grad, pi, sym, div, inner\n'), ((2535, 2546), 'firedrake.grad', 'grad', (['u_mms'], {}), '(u_mms)\n', (2539, 2546), False, 'from firedrake import sin, grad, pi, sym, div, inner\n')] |
"""
Code here stores automatic analysis routines for ABF files given their protocol.
There are several analysis routines which are general (show all sweeps
continuously, show sweeps stacked, show sweeps overlayed, etc) and can be used
for almost any protocol (or ABFs with unknown protocol).
Some analysis routines are specific for specific protocols.
These routines are highly specific to the nature of the scientific work I do,
and this document may not be useful to others beyond an example of how to use
pyABF to set-up an automatic analysis pipeline for electrophysiology data.
"""
import os
PATH_HERE = os.path.dirname(__file__)
PATH_DATA = os.path.abspath(os.path.dirname(__file__)+"/../../data/abfs/")
import sys
sys.path.insert(0, PATH_HERE+"/../../src/")
sys.path.append(R"C:\Users\swharden\Documents\GitHub\pyABF\src")
import pyabf
import os
import numpy as np
import matplotlib.pyplot as plt
import logging
log = logging.getLogger(__name__)
log.debug(f"autoabf imported")
log.setLevel(level=logging.WARN)
# default size of the images being made
FIGSIZE = (8, 6)
FIGSIZE_WIDE = (FIGSIZE[0]*1.6, FIGSIZE[1]*1)
# automatically generated figures are saved in this subfolder
from abfnav import DATAFOLDER
# Little operations to apply on graphs
def _secLookUp(abf, timeSec1, timeSec2, returnPoints=False):
"""returns tangible times in seconds."""
assert isinstance(abf, pyabf.ABF)
if timeSec1 is None:
timeSec1 = 0
if timeSec2 is None:
timeSec2 = abf.sweepLengthSec
if returnPoints:
return int(timeSec1*abf.dataRate), int(timeSec2*abf.dataRate)
else:
return timeSec1, timeSec2
def shadeDigitalOutput(abf, digitalOutputChannel=4, color='r'):
"""In sweep view, shade the epoch number."""
log.debug("shading digital outputs")
digitalWaveforms = pyabf.stimulus.digitalWaveformEpochs(abf)
epochPoints = pyabf.stimulus.epochPoints(abf)
outputStateByEpoch = digitalWaveforms[digitalOutputChannel]
for epochNumber, outputState in enumerate(outputStateByEpoch):
if outputState == 1:
t1 = epochPoints[epochNumber]*abf.dataSecPerPoint
t2 = epochPoints[epochNumber+1]*abf.dataSecPerPoint
plt.axvspan(t1, t2, color=color, alpha=.3, lw=0)
def shadeAllBackgrounds(color=(1.0, 1.0, 0.9)):
"""make the background color a certain color for every subplot."""
log.debug("shading all backgrounds", color)
for i, ax in enumerate(plt.gcf().axes):
ax.set_facecolor(color)
def addComments(abf, minutes = False, showLabels = True):
"""
Call on a graph with a horizontal time in seconds to add vertical lines and
labels to every abf comment.
"""
log.debug("adding comments to graphs")
assert isinstance(abf, pyabf.ABF)
if not abf.tagComments:
return
for comment, timeSec in zip(abf.tagComments, abf.tagTimesSec):
xPos = timeSec
if minutes:
xPos /= 60.0
plt.axvline(xPos, color='r', lw=2, alpha=.5, ls='--')
X1, X2, Y1, Y2 = plt.axis()
Y2 = Y2-abs(Y2-Y1)*.02
if showLabels:
plt.text(xPos, Y2, comment, color='r', rotation='vertical',
ha='right', va='top', weight='bold', alpha=.5, size=8)
### Code here acts on the active matplotlib figure or subplot ###
def plotFigNew(abf, figsize=FIGSIZE):
"""create a figure"""
log.debug("creating new figure")
plt.figure(figsize=figsize)
return
def plotFigSave(abf, tag="", tight=True, closeToo=True, grid=True,
unknown=False, title=None, labelAxes=True):
"""save a figure"""
log.debug("saving figure outputs")
assert isinstance(abf, pyabf.ABF)
# apply title only to single-subplot figures
if len(plt.gcf().axes) == 1:
if title:
plt.title(title)
if labelAxes:
plt.ylabel(abf.sweepLabelY)
plt.xlabel(abf.sweepLabelX)
# apply a grid to all subplots
if grid:
for i, ax in enumerate(plt.gcf().axes):
ax.grid(alpha=.5, ls="--")
# decorate unknown plots in a special way
shade_unknown_graphs = True
if unknown:
abf.protocol = abf.protocol + "(UNKNOWN)"
protocolColor = "r"
if unknown and shade_unknown_graphs:
for i, ax in enumerate(plt.gcf().axes):
ax.set_facecolor((1.0, 0.9, 0.9))
else:
protocolColor = '.5'
# optionally tight
if tight:
plt.tight_layout()
# convert horizontal units to minutes
for ax in plt.gcf().axes:
if not "sec" in ax.get_xlabel():
continue
if ax.get_xticks()[-1] < 120:
continue
xticks = ["%.02f" % (x/60) for x in ax.get_xticks()]
ax.set_xticklabels(xticks)
ax.set_xlabel("time (minutes)")
# add text to the lower corner
plt.gcf().text(0.005, 0.005, f"{abf.abfID}\n{abf.protocol}",
transform=plt.gca().transAxes, fontsize=10,
verticalalignment='bottom', family='monospace',
color=protocolColor)
abfDir = os.path.dirname(abf.abfFilePath)
if unknown:
fnOut = abf.abfID+"_UNKNOWN_"+tag+".png"
else:
fnOut = abf.abfID+"_"+tag+".png"
pathOut = os.path.join(abfDir, DATAFOLDER, fnOut)
if not os.path.exists(os.path.dirname(pathOut)):
log.info(f"creating {os.path.dirname(pathOut)}")
os.mkdir(os.path.dirname(pathOut))
log.debug(f"saving {fnOut}")
plt.savefig(pathOut)
if closeToo:
plt.close()
return
# Code here indicates how to make common graphs
def generic_ap_steps(abf):
"""Create a plot for generic AP steps."""
log.debug("generic plot: AP steps")
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
# all sweeps overlayed
axOvr = plt.gcf().add_subplot(2, 2, 1)
pyabf.plot.sweeps(abf, axis=axOvr, alpha=.5)
axOvr.set_title(f"Sweep Overlay")
# all sweeps stacked
axStack = plt.gcf().add_subplot(2, 2, 2)
pyabf.plot.sweeps(abf, axis=axStack, alpha=.5, offsetYunits=100)
axStack.set_title(f"Sweeps Stacked")
# first sweep with APs
axAp = plt.gcf().add_subplot(2, 2, 3)
p1, p2 = _secLookUp(abf, 0, 1, True)
for sweep in abf.sweepList:
abf.setSweep(sweep)
if np.max(abf.sweepY[p1:p2]) > 10:
break
pyabf.plot.sweeps(abf, sweepNumbers=[abf.sweepNumber], axis=axAp, alpha=1)
axAp.axis([p1/abf.dataRate, p2/abf.dataRate, None, None])
axAp.set_title(f"First Action Potential")
# AP gain curve
axGain = plt.gcf().add_subplot(2, 2, 4)
for epochNumber, color in zip([1, 4], ['C0', 'C1']):
if epochNumber >= len(pyabf.stimulus.epochValues(abf)[0]):
continue
currents = pyabf.stimulus.epochValues(abf)[:, epochNumber]
epochSec1 = pyabf.stimulus.epochPoints(abf)[epochNumber]/abf.dataRate
epochSec2 = pyabf.stimulus.epochPoints(abf)[epochNumber+1]/abf.dataRate
[apFreqInBin, apFreqFirst] = pyabf.ap.ap_freq_per_sweep(
abf, epochNumber)
axGain.plot(currents, apFreqInBin, '.-', color=color)
axGain.plot(currents, apFreqFirst, '.:', color=color)
axStack.axvspan(epochSec1, epochSec2, color=color, alpha=.1)
axGain.set_title(f"AP Gain Curve")
axGain.set_ylabel("AP Frequency (Hz)")
axGain.set_xlabel("Applied Current (pA)")
axGain.axhline(40, color='r', alpha=.2, ls='--', lw=2)
plotFigSave(abf, tag="generic-overlay", labelAxes=False)
def generic_iv(abf, timeSec1, timeSec2, sweepStepMv, firstSweepMv, filter=True):
"""Create a graph plotting the I/V between two points."""
log.debug("generic plot: IV curve")
# enable lowpass filter
if filter:
pyabf.filter.gaussian(abf, 2)
# measure currents for each step
currentAvg = pyabf.stats.rangeAverage(abf, timeSec1, timeSec2)
currentErr = pyabf.stats.rangeStdev(abf, timeSec1, timeSec2)
voltage = np.arange(abf.sweepCount)*sweepStepMv+firstSweepMv
plotFigNew(abf, figsize=FIGSIZE_WIDE) # double wide
ax1 = plt.gcf().add_subplot(1, 2, 1)
ax2 = plt.gcf().add_subplot(1, 2, 2)
# create the overlay figure
pyabf.plot.sweeps(abf, axis=ax1, linewidth=2, alpha=.8)
ax1.axvspan(timeSec1, timeSec2, color='r', alpha=.1)
ax1.set_title(f"{abf.abfID} I/V Source Sweeps")
dY = (np.nanmax(currentAvg) - np.nanmin(currentAvg))*.2
ax1.axis([None, None, np.nanmin(currentAvg)-dY, np.nanmax(currentAvg)+dY])
# create the IV curve
ax2.axhline(0, ls='--', alpha=.5, color='k')
ax2.axvline(-70, ls='--', alpha=.5, color='k')
ax2.plot(voltage, currentAvg, '.-', lw=2, ms=20)
ax2.set_ylabel("Current (pA)")
ax2.set_xlabel("Voltage (mV)")
ax2.set_title(f"{abf.abfID} I/V Relationship")
plotFigSave(abf, tag="IV", labelAxes=False)
def generic_overlay(abf, color=None, unknown=False, alpha=None):
"""plot every sweep semi-transparent on top of the next."""
log.debug("generic plot: overlay")
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
for channel in abf.channelList:
ax = plt.gcf().add_subplot(abf.channelCount, 1, channel+1)
pyabf.plot.sweeps(abf, axis=ax, color=color,
channel=channel, alpha=alpha)
ax.set_title(f"{abf.abfID} (Ch{channel+1}) Sweep Overlay")
plotFigSave(abf, tag="generic-overlay", unknown=unknown)
return
def generic_overlay_average(abf, baselineSec1=None, baselineSec2=None):
"""plot every sweep semi-transparent on top of the next and show the average of all."""
log.debug("generic plot: overlay average")
assert isinstance(abf, pyabf.ABF)
if baselineSec2:
abf.sweepBaseline(baselineSec1, baselineSec2)
plotFigNew(abf)
for channel in abf.channelList:
ax = plt.gcf().add_subplot(abf.channelCount, 1, channel+1)
if baselineSec2:
ax.axhline(0, color='k', ls=':')
pyabf.plot.sweeps(abf, axis=ax, color='C0', channel=channel, alpha=.2)
ax.set_title(f"{abf.abfID} (Ch{channel+1}) Sweep Overlay")
averageSweep = pyabf.sweep.averageTrace(abf, channel)
ax.plot(abf.sweepX, averageSweep, color='k')
plotFigSave(abf, tag="generic-overlay")
return
def generic_continuous(abf, unknown=False, alpha=1):
"""plot every sweep continuously through time."""
log.debug("generic plot: continuous")
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
for channel in abf.channelList:
ax = plt.gcf().add_subplot(abf.channelCount, 1, channel+1)
pyabf.plot.sweeps(abf, axis=ax, continuous=True,
channel=channel, color='b', alpha=alpha,
linewidth=.5)
ax.set_title(f"{abf.abfID} (Ch{channel+1}) Continuous Signal")
addComments(abf)
plotFigSave(abf, tag="generic-continuous", unknown=unknown)
return
def generic_first_sweep(abf, timeSec1=None, timeSec2=None):
"""plot every sweep continuously through time."""
log.debug("generic plot: first sweep")
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
for channel in abf.channelList:
ax = plt.gcf().add_subplot(abf.channelCount, 1, channel+1)
pyabf.plot.sweeps(abf, sweepNumbers=[0], axis=ax,
channel=channel, color='b', alpha=1,
startAtSec=timeSec1, endAtSec=timeSec2)
ax.set_title(f"{abf.abfID} (Ch{channel+1}) First Sweep")
plotFigSave(abf, tag="generic-first-sweep")
return
def generic_average_over_time(abf, timeSec1=None, timeSec2=None):
"""plot the average of every sweep continuously through time."""
log.debug("generic plot: average over time")
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
for channel in abf.channelList:
ax = plt.gcf().add_subplot(abf.channelCount, 1, channel+1)
sweepTimes = np.arange(abf.sweepCount)*abf.sweepLengthSec
sweepAvgs = pyabf.stats.rangeAverage(
abf, timeSec1, timeSec2, channel=channel)
sweepErr = pyabf.stats.rangeStdev(
abf, timeSec1, timeSec2, channel=channel)
if len(sweepTimes) > 20:
ax.errorbar(sweepTimes, sweepAvgs, sweepErr, alpha=.3)
ax.plot(sweepTimes, sweepAvgs, ".-", color='C0')
ax.margins(0, .1)
else:
ax.errorbar(sweepTimes, sweepAvgs, sweepErr, alpha=1,
ms=10, marker='.', ls='-', capsize=5)
timeNote = "%.02f - %.02f sec" % (_secLookUp(abf, timeSec1, timeSec2))
ax.set_title(f"{abf.abfID} (Ch{channel+1}) [{timeNote}]")
addComments(abf)
plotFigSave(abf, tag=f"generic-average-over-time")
return
def generic_memtest_over_time(abf):
"""The first epoch is a VC step, so show memtest properties over time."""
log.debug("generic plot: memtest analysis")
assert isinstance(abf, pyabf.ABF)
IhBySweep, RmBySweep, RaBySweep, CmBySweep = pyabf.memtest.step_valuesBySweep(abf)
sweepTimesMin = np.arange(abf.sweepCount)*abf.sweepLengthSec/60
plotFigNew(abf)
ax1 = plt.gcf().add_subplot(2, 2, 1)
ax1.plot(sweepTimesMin, IhBySweep, '.', color='C0')
ax1.set_title("Holding Current (Ih)")
ax1.set_ylabel("Clamp Current (pA)")
ax1.set_xlabel("Experiment Time (min)")
ax1.margins(0,.4)
addComments(abf, True, False)
ax2 = plt.gcf().add_subplot(2, 2, 2)
ax2.plot(sweepTimesMin, RmBySweep, '.', color='C3')
ax2.set_title("Membrane Resistance (Rm)")
ax2.set_ylabel("Resistance (MOhm)")
ax2.set_xlabel("Experiment Time (min)")
ax2.margins(0,.4)
ax2.axis([None,None,0,None])
addComments(abf, True, False)
ax3 = plt.gcf().add_subplot(2, 2, 3)
ax3.plot(sweepTimesMin, RaBySweep, '.', color='C7')
ax3.set_title("Access Resistance (Ra)")
ax3.set_ylabel("Resistance (MOhm)")
ax3.set_xlabel("Experiment Time (min)")
ax3.margins(0,.4)
ax3.axis([None,None,0,None])
addComments(abf, True, False)
ax4 = plt.gcf().add_subplot(2, 2, 4)
ax4.plot(sweepTimesMin, CmBySweep, '.', color='C1')
ax4.set_title("Membrane Capacitance (Cm)")
ax4.set_ylabel("Capacitance (pF)")
ax4.set_xlabel("Experiment Time (min)")
ax4.margins(0,.4)
ax4.axis([None,None,0,None])
addComments(abf, True, False)
plotFigSave(abf, tag=f"generic-memtest", labelAxes=False)
return
def generic_paired_pulse(abf, p1sec1, p1sec2, p2sec1, p2sec2):
"""single pulse or paired pulse analysis."""
log.debug("generic plot: pulse analysis")
assert isinstance(abf, pyabf.ABF)
sweepTimes = np.arange(abf.sweepCount)*abf.sweepLengthSec
# PULSE 1
plotFigNew(abf)
ax = plt.gcf().add_subplot(2, 1, 1)
sweepAvgs1 = pyabf.stats.rangeAverage(abf, p1sec1, p1sec2)
sweepErr1 = pyabf.stats.rangeStdev(abf, p1sec1, p1sec2)
ax.errorbar(sweepTimes, sweepAvgs1, sweepErr1, ms=10,
marker='.', ls='-', capsize=5, color='r')
timeNote = "%.02f - %.02f sec" % (_secLookUp(abf, p1sec1, p1sec2))
ax.set_title(f"{abf.abfID} Pulse 1 [{timeNote}]")
ax.set_ylabel(abf.sweepLabelY)
ax.set_xlabel(abf.sweepLabelX)
addComments(abf)
# PULSE 2
ax = plt.gcf().add_subplot(2, 1, 2)
sweepAvgs2 = pyabf.stats.rangeAverage(abf, p2sec1, p2sec2)
sweepErr2 = pyabf.stats.rangeStdev(abf, p2sec1, p2sec2)
ax.errorbar(sweepTimes, sweepAvgs1, sweepErr2, ms=10,
marker='.', ls='-', capsize=5, color='r')
timeNote = "%.02f - %.02f sec" % (_secLookUp(abf, p2sec1, p2sec2))
ax.set_title(f"{abf.abfID} Pulse 1 [{timeNote}]")
addComments(abf)
ax.set_ylabel(abf.sweepLabelY)
ax.set_xlabel(abf.sweepLabelX)
plotFigSave(abf, tag=f"generic-paired-pulses", labelAxes=False)
# RATIO
plotFigNew(abf)
ax = plt.gcf().add_subplot(1, 1, 1) # pulse2/pulse1 ratio
ratioAvg = sweepAvgs2/sweepAvgs1
# how should this be measured?
ratioErr = np.sqrt(np.power(sweepErr1, 2)+np.power(sweepErr2, 2))
ratioErr = sweepErr2*np.nan
ax.errorbar(sweepTimes, ratioAvg, ratioErr, ms=20,
marker='.', ls='-', capsize=5, color='r')
ax.set_title(f"{abf.abfID} Paired Pulse Ratio [p2/p1]")
addComments(abf)
ax.set_ylabel(abf.sweepLabelY)
ax.set_xlabel(abf.sweepLabelX)
plotFigSave(abf, tag=f"generic-paired-pulse-ratio", labelAxes=False)
return
def generic_memtest_ramp(abf, msg=False):
"""analyzes the ramp part of a sweep to calculate Cm"""
log.debug("generic plot: Cm ramp")
assert(isinstance(abf,pyabf.ABF))
plotFigNew(abf)
# plot the memtest
ax1 = plt.gcf().add_subplot(121)
pyabf.plot.sweeps(abf, axis=ax1)
ax1.set_title("All Sweeps (overlay)")
if msg:
bbox = dict(facecolor='white', edgecolor='black',
boxstyle='round,pad=.4')
ax1.text(0.96, 0.96, msg, verticalalignment='top',
horizontalalignment='right', fontsize=12, bbox=bbox,
transform=plt.gca().transAxes, family='monospace')
# plot the ramp
ax2 = plt.gcf().add_subplot(222)
ax2.set_title("Cm Ramp (phase)")
for sweepNumber in abf.sweepList:
abf.setSweep(sweepNumber)
cmInfo = pyabf.memtest._cm_ramp_points_and_voltages(abf)
if not cmInfo:
continue
rampPoints, rampVoltages = cmInfo
rampData = abf.sweepY[rampPoints[0]:rampPoints[2]]
color = plt.get_cmap("winter")(sweepNumber/abf.sweepCount)
trace1 = rampData[:int(len(rampData)/2)][::-1]
trace2 = rampData[int(len(rampData)/2):]
ax2.plot(trace1, color=color, alpha=.2)
ax2.plot(trace2, color=color, alpha=.2)
ax2.set_ylabel("current (pA)")
ax2.set_xlabel("data point (index)")
# plot the cms
cms = pyabf.memtest.cm_ramp_valuesBySweep(abf)
cmAvg = np.mean(cms)
cmErr = np.std(cms)
ax4 = plt.gcf().add_subplot(224)
ax4.set_title("Cm = %.02f +/- %.02f pF" % (cmAvg, cmErr))
ax4.set_ylabel("capacitance (pA)")
ax4.set_xlabel("sweep number")
ax4.plot(cms, '.', ms=10, alpha=.8)
ax4.axhline(cmAvg, color='r', ls='--', lw=2, alpha=.5)
plotFigSave(abf, tag="memtest", labelAxes=False)
def generic_ap_freqPerSweep(abf):
"""
Create a plot showing the AP frequency by sweep.
"""
log.debug("generic plot: AP Frequency Per Sweep")
assert isinstance(abf, pyabf.ABF)
apsPerSweep = [0]*abf.sweepCount
sweepTimesSec = np.arange(abf.sweepCount)*abf.sweepLengthSec
for sweep in abf.sweepList:
abf.setSweep(sweep)
sweepApPoints = pyabf.ap.ap_points_currentSweep(abf)
apsPerSweep[sweep] = len(sweepApPoints)
plotFigNew(abf)
plt.grid(alpha=.5,ls='--')
plt.plot(sweepTimesSec, apsPerSweep, '.-', ms=10)
plt.ylabel("Sweep AP Count")
plt.xlabel("Experiment Time (seconds)")
addComments(abf)
plotFigSave(abf, tag="apFreqBySweep", labelAxes=False)
def generic_trace_before_after_drug(abf, minAfterDrug = 2, minBeforeDrug = .5, isolateEpoch=3):
"""create a plot showing the average of n sweeps before and after the first drug."""
assert isinstance(abf, pyabf.ABF)
for drugNumber in range(len(abf.tagComments)):
# determine ideal drug times for before/after drug applied
baselineSweepTimeMin = abf.tagTimesMin[drugNumber] - minBeforeDrug
baselineSweep = int(baselineSweepTimeMin*60/abf.sweepLengthSec)
baselineSweep = max(0, baselineSweep)
drugSweepTimeMin = abf.tagTimesMin[drugNumber] + minAfterDrug
drugSweep = int(drugSweepTimeMin*60/abf.sweepLengthSec)
drugSweep = min(drugSweep, abf.sweepCount-1)
# isolate just the part of the trace we are interested in
if (isolateEpoch):
i1 = pyabf.stimulus.epochPoints(abf)[isolateEpoch]
i2 = pyabf.stimulus.epochPoints(abf)[isolateEpoch+1]
else:
i1=0
i2=abf.sweepPointCount
# load ramp data from ideal times
pyabf.filter.gaussian(abf, 3)
abf.setSweep(baselineSweep)
rampBaseline = abf.sweepY[i1:i2]
abf.setSweep(drugSweep)
rampDrug = abf.sweepY[i1:i2]
rampDiff = rampDrug - rampBaseline
# create the plot
plotFigNew(abf)
ax1 = plt.gcf().add_subplot(211)
ax2 = plt.gcf().add_subplot(212)
ax1.set_title("Representative traces around drug %d (%s)"%(drugNumber, abf.tagComments[drugNumber]))
ax1.plot(abf.sweepX[i1:i2], rampBaseline, label="-%.02f min"%minBeforeDrug, lw=2, alpha=.7)
ax1.plot(abf.sweepX[i1:i2], rampDrug, label="+%.02f min"%minAfterDrug, lw=2, alpha=.7)
ax1.legend()
pyabf.filter.gaussian(abf, 3) # apply lowpass filter
ax2.set_title("Ramp Difference")
ax2.plot(abf.sweepX[i1:i2], rampDiff, lw=2, alpha=.7, color='C3')
ax2.axhline(0,color='k',ls='--')
ax2.legend()
plotFigSave(abf, tag="ramp-drug%02d"%drugNumber)
return
# Code defines which routines or generic graphs to use for each protocol
def unknown(abf):
"""unknown protocol."""
log.debug("running method for unknown protocol")
assert isinstance(abf, pyabf.ABF)
totalLengthSec = abf.sweepCount*abf.sweepLengthSec
if abf.sweepLengthSec < 10 and totalLengthSec < 60*2:
generic_overlay(abf, unknown=True)
else:
generic_continuous(abf, unknown=True)
generic_average_over_time(abf)
def protocol_0111(abf):
"""0111 continuous ramp.pro"""
assert isinstance(abf, pyabf.ABF)
msToPlot = 20
ptToPlot = msToPlot*abf.dataPointsPerMs
abf.setSweep(0)
segY = abf.sweepY[0:ptToPlot]
timeAPsec = 0
# isolate the 1st AP we find
for sweep in abf.sweepList:
abf.setSweep(sweep)
apPoints = pyabf.ap.ap_points_currentSweep(abf)
# ignore APs close to the start of the sweep
apPoints = [x for x in apPoints if x > ptToPlot]
if len(apPoints):
pt1 = int(apPoints[0]-ptToPlot/2)
segY = abf.sweepY[pt1:pt1+ptToPlot]
timeAPsec = apPoints[0]/abf.dataRate+sweep*abf.sweepLengthSec
break
# prepare the first derivative and X units
segYd = np.diff(segY)
segYd = np.append(segYd, segYd[-1])
segYd = segYd * abf.dataRate / 1000
segX = np.arange(len(segYd))-len(segYd)/2
segX = segX/abf.dataRate*1000
plotFigNew(abf)
# plot the first AP (mV)
ax1 = plt.gcf().add_subplot(2, 2, 1)
pyabf.plot.sweeps(abf, continuous=True, axis=ax1,
linewidth=1, color='C0', alpha=1)
zoomSec = .25
ax1.set_title("First AP: Voltage")
ax1.axis([timeAPsec-zoomSec, timeAPsec+zoomSec, None, None])
# plot the first AP (V/sec)
ax2 = plt.gcf().add_subplot(2, 2, 2)
ax2.set_title("First AP: Velocity")
ax2.set_ylabel("Velocity (mV/ms)")
ax2.set_xlabel("time (ms)")
ax2.axhline(-100, color='k', ls=':', lw=2, alpha=.2)
ax2.plot(segX, segYd, color='r')
ax2.margins(0, .05)
# plot the whole ABF
ax3 = plt.gcf().add_subplot(2, 2, 3)
pyabf.plot.sweeps(abf, continuous=True, axis=ax3,
linewidth=1, color='C0', alpha=1)
zoomSec = .25
ax3.set_title("Full Signal")
ax3.margins(0, .05)
# plot the first AP (V/sec)
ax4 = plt.gcf().add_subplot(2, 2, 4)
ax4.set_title("First AP: Phase Plot")
ax4.set_xlabel("Membrane Potential (mV)")
ax4.set_ylabel("Velocity (mV/ms)")
ax4.plot(segY, segYd, '.-', color='C1')
ax4.margins(.1, .1)
ax4.axis([ax1.axis()[2], ax1.axis()[3], ax2.axis()[2], ax2.axis()[3]])
plotFigSave(abf, tag=f"rampAP", labelAxes=False)
def protocol_0101(abf):
"""0112 0101 tau -10pA"""
assert isinstance(abf, pyabf.ABF)
generic_overlay_average(abf, baselineSec1=0, baselineSec2=0.1)
return
def protocol_0102(abf):
"""0102 IC sine sweep.pro"""
assert isinstance(abf, pyabf.ABF)
generic_overlay(abf)
return
def protocol_0112(abf):
"""0112 steps dual -50 to 150 step 10.pro"""
assert isinstance(abf, pyabf.ABF)
generic_ap_steps(abf)
protocol_0111(abf)
return
def protocol_0113(abf):
"""0113 steps dual -100 to 300 step 25.pro"""
assert isinstance(abf, pyabf.ABF)
generic_ap_steps(abf)
protocol_0111(abf)
return
def protocol_0114(abf):
"""0114 steps dual -100 to 2000 step 100.pro"""
assert isinstance(abf, pyabf.ABF)
generic_ap_steps(abf)
protocol_0111(abf)
return
def protocol_0121(abf):
"""0121 IC sine sweep 0 +- 20 pA.pro"""
assert isinstance(abf, pyabf.ABF)
generic_overlay(abf)
return
def protocol_0122(abf):
"""0122 steps single -50 to 150 step 10.pro"""
assert isinstance(abf, pyabf.ABF)
generic_ap_steps(abf)
return
def protocol_0201(abf):
"""0201 memtest.pro"""
assert isinstance(abf, pyabf.ABF)
msg = pyabf.memtest.step_summary(abf)
if 2 in abf._epochPerDacSection.nEpochType:
# there is a ramp and a step
generic_memtest_ramp(abf, msg)
else:
# there is no ramp
plotFigNew(abf)
ax1 = plt.gcf().add_subplot(111)
pyabf.plot.sweeps(abf, axis=ax1)
ax1.set_title("MemTest (without ramp)")
bbox = dict(facecolor='white', edgecolor='black',
boxstyle='round,pad=.4')
ax1.text(0.96, 0.96, msg, verticalalignment='top',
horizontalalignment='right',
transform=plt.gca().transAxes, fontsize=16,
bbox=bbox, family='monospace')
plotFigSave(abf, tag="memtest")
return
def protocol_0202(abf):
"""0202 IV dual"""
assert isinstance(abf, pyabf.ABF)
generic_iv(abf, .8, 1, 10, -110)
return
def protocol_0203(abf):
"""0203 IV fast.pro"""
assert isinstance(abf, pyabf.ABF)
generic_iv(abf, .8, 1, 5, -110)
return
def protocol_0204(abf):
"""0204 Cm ramp.pro"""
assert isinstance(abf, pyabf.ABF)
generic_memtest_ramp(abf)
return
def protocol_0221(abf):
"""0221 VC sine sweep 70 +- 5 mV.pro"""
assert isinstance(abf, pyabf.ABF)
generic_overlay(abf)
return
def protocol_0222(abf):
"""0222 VC sine sweep 70 +- 5 mV.pro"""
assert isinstance(abf, pyabf.ABF)
generic_overlay(abf)
return
def protocol_0301(abf):
"""0301 ic gap free.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
return
def protocol_0302(abf):
"""0302 IC 10s IC ramp drug.pro"""
assert isinstance(abf, pyabf.ABF)
generic_ap_freqPerSweep(abf)
generic_trace_before_after_drug(abf, isolateEpoch=None)
return
def protocol_0303(abf):
"""0303 IC 10s opto.pro"""
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
shadeDigitalOutput(abf, 4, color='g')
verticalOffset = 0
for sweep in abf.sweepList:
abf.setSweep(sweep)
if abf.sweepUnitsY == "mV":
traceColor = 'b'
else:
traceColor = 'r'
plt.plot(abf.sweepX, abf.sweepY + verticalOffset*sweep, color=traceColor, lw=.5, alpha=.5)
plt.margins(0,.1)
plt.title(f"OVerlay of {abf.sweepCount} sweeps")
plotFigSave(abf, tag="opto-stacked", labelAxes=True)
return
def protocol_0312(abf):
"""0312 ic cosine 10s.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_ap_freqPerSweep(abf)
generic_trace_before_after_drug(abf, isolateEpoch=None)
return
def protocol_0401(abf):
"""0401 VC 2s MT-70.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_average_over_time(abf, timeSec1=1)
generic_memtest_over_time(abf)
return
def protocol_0402(abf):
"""0402 VC 2s MT-50.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_average_over_time(abf, timeSec1=1)
generic_memtest_over_time(abf)
return
def protocol_0403(abf):
"""0402 VC 2s MT-70.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_average_over_time(abf, timeSec1=1)
generic_memtest_over_time(abf)
return
def protocol_0404(abf):
"""0404 VC 2s MT2-70 ramp -110-50.pro"""
assert isinstance(abf, pyabf.ABF)
generic_average_over_time(abf, timeSec1=1.5)
generic_trace_before_after_drug(abf)
generic_memtest_over_time(abf)
return
def protocol_0405(abf):
"""0404 VC 2s MT2-70 ramp -110-50.pro"""
assert isinstance(abf, pyabf.ABF)
generic_first_sweep(abf)
generic_continuous(abf)
generic_average_over_time(abf, timeSec1=1)
generic_memtest_over_time(abf)
return
def protocol_0406(abf):
"""0406 VC 10s MT-50.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_memtest_over_time(abf)
return
def protocol_0408(abf):
"""0408 VC 10s two step.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_memtest_over_time(abf)
return
def protocol_0409(abf):
"""0406 VC 10s MT-50.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_average_over_time(abf, 0, .4)
generic_memtest_over_time(abf)
return
def protocol_0501(abf):
"""0501 opto -50.pro"""
assert isinstance(abf, pyabf.ABF)
timeSec1, timeSec2 = 1.10, 1.30
p1, p2 = int(timeSec1*abf.dataRate), int(timeSec2*abf.dataRate)
# plot every sweep and the average of all sweeps
plotFigNew(abf)
shadeDigitalOutput(abf, 4)
for sweep in abf.sweepList:
abf.setSweep(sweep)
abf.sweepY[:p1] = np.nan
abf.sweepY[p2:] = np.nan
plt.plot(abf.sweepX, abf.sweepY, alpha=.2, color='.5')
avg = pyabf.sweep.averageTrace(abf, timeSec1=timeSec1, timeSec2=timeSec2)
abf.sweepY *= np.nan
abf.sweepY[p1:p2] = avg
plt.plot(abf.sweepX, abf.sweepY)
plotFigSave(abf, tag="opto-avg", labelAxes=True)
# make stacked graph
plotFigNew(abf)
shadeDigitalOutput(abf, 4)
vertOffset = False
for sweep in abf.sweepList:
abf.setSweep(sweep)
if not vertOffset:
vertOffset = np.max(abf.sweepY[p1:p2]) - np.min(abf.sweepY[p1:p2])
vertOffset *= 1.2
plt.plot(abf.sweepX[p1:p2], abf.sweepY[p1:p2] +
vertOffset*sweep, color='b', alpha=.7)
plotFigSave(abf, tag="opto-stacked", labelAxes=True)
return
def protocol_0502(abf):
"""0502 opto 0.pro"""
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
shadeDigitalOutput(abf, 4, color='g')
verticalOffset = 0
for sweep in abf.sweepList:
abf.setSweep(sweep)
if abf.sweepUnitsY == "mV":
traceColor = 'b'
else:
traceColor = 'r'
plt.plot(abf.sweepX, abf.sweepY + verticalOffset*sweep, color=traceColor, lw=.5, alpha=.5)
plt.margins(0,.1)
plt.title(f"OVerlay of {abf.sweepCount} sweeps")
plotFigSave(abf, tag="opto-stacked", labelAxes=True)
return
def protocol_0912(abf):
"""0912 VC 20s stim PPR 40ms.pro"""
assert isinstance(abf, pyabf.ABF)
p1sec = 2.31703
p2sec = p1sec + .05
pulseWidth = .04
generic_continuous(abf)
generic_average_over_time(abf, timeSec1=5)
generic_first_sweep(abf, 2, 3)
generic_paired_pulse(abf, p1sec, p1sec+pulseWidth,
p2sec, p2sec+pulseWidth)
generic_memtest_over_time(abf)
def protocol_0xxx(abf):
"""Protocols are tagged with this during development."""
assert isinstance(abf, pyabf.ABF)
if abf.protocol in ["0xxx VC 10s MT-50 stim", "0xxx VC 10s MT-70 stim"]:
protocol_0912(abf)
else:
unknown(abf)
### These protocol's were made for Kyle and Haley's ABF1 aging project data
def protocol_KK01(abf):
"""Kyle's old experiments: memtest-like protocol."""
assert isinstance(abf, pyabf.ABF)
generic_overlay(abf)
return
def protocol_KK02(abf):
"""Kyle's old experiments: VC memtest time course."""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_average_over_time(abf, timeSec1=1)
return
def protocol_KK03(abf):
"""Kyle's old experiments: AP gain."""
assert isinstance(abf, pyabf.ABF)
generic_ap_steps(abf)
return
def protocol_KK04(abf):
"""Kyle's old experiments: VC evoked EPSC."""
# fix tag times - why? 2-channels?
for i in range(len(abf.tagTimesMin)):
divBy = 4
abf.tagTimesMin[i] = abf.tagTimesMin[i]/divBy
abf.tagTimesSec[i] = abf.tagTimesSec[i]/divBy
generic_continuous(abf)
generic_average_over_time(abf, timeSec1=.65, timeSec2=.65+.15)
return
def protocol_OVLY(abf):
"""make overlay."""
assert isinstance(abf, pyabf.ABF)
generic_overlay(abf)
return
def protocol_OVAP(abf):
"""make overlay and do AP analysis."""
assert isinstance(abf, pyabf.ABF)
generic_overlay(abf)
generic_ap_steps(abf)
protocol_0111(abf)
return
def protocol_MTMN(abf):
"""genertic memtest probably while drug is applied."""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_memtest_over_time(abf)
return
if __name__=="__main__":
log.critical("DO NOT RUN THIS FILE DIRECTLY")
log.setLevel(logging.DEBUG)
fileToTest = R"X:\Data\SD\Piriform Oxytocin\core ephys 2018\Sagittal Pilot\2018_12_04_ts_0032.abf"
abf = pyabf.ABF(fileToTest)
print("ABF is protocol",abf.protocol)
protocol_0312(abf) | [
"logging.getLogger",
"pyabf.ap.ap_points_currentSweep",
"sys.path.insert",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"pyabf.memtest._cm_ramp_points_and_voltages",
"numpy.nanmin",
"sys.path.append",
"matplotlib.pyplot.margins",
"pyabf.memtest.step_valuesBySweep",
"numpy.arange",
"nu... | [((614, 639), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (629, 639), False, 'import os\n'), ((726, 771), 'sys.path.insert', 'sys.path.insert', (['(0)', "(PATH_HERE + '/../../src/')"], {}), "(0, PATH_HERE + '/../../src/')\n", (741, 771), False, 'import sys\n'), ((770, 839), 'sys.path.append', 'sys.path.append', (['"""C:\\\\Users\\\\swharden\\\\Documents\\\\GitHub\\\\pyABF\\\\src"""'], {}), "('C:\\\\Users\\\\swharden\\\\Documents\\\\GitHub\\\\pyABF\\\\src')\n", (785, 839), False, 'import sys\n'), ((932, 959), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (949, 959), False, 'import logging\n'), ((1828, 1869), 'pyabf.stimulus.digitalWaveformEpochs', 'pyabf.stimulus.digitalWaveformEpochs', (['abf'], {}), '(abf)\n', (1864, 1869), False, 'import pyabf\n'), ((1888, 1919), 'pyabf.stimulus.epochPoints', 'pyabf.stimulus.epochPoints', (['abf'], {}), '(abf)\n', (1914, 1919), False, 'import pyabf\n'), ((3433, 3460), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3443, 3460), True, 'import matplotlib.pyplot as plt\n'), ((5086, 5118), 'os.path.dirname', 'os.path.dirname', (['abf.abfFilePath'], {}), '(abf.abfFilePath)\n', (5101, 5118), False, 'import os\n'), ((5249, 5288), 'os.path.join', 'os.path.join', (['abfDir', 'DATAFOLDER', 'fnOut'], {}), '(abfDir, DATAFOLDER, fnOut)\n', (5261, 5288), False, 'import os\n'), ((5479, 5499), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pathOut'], {}), '(pathOut)\n', (5490, 5499), True, 'import matplotlib.pyplot as plt\n'), ((5845, 5890), 'pyabf.plot.sweeps', 'pyabf.plot.sweeps', (['abf'], {'axis': 'axOvr', 'alpha': '(0.5)'}), '(abf, axis=axOvr, alpha=0.5)\n', (5862, 5890), False, 'import pyabf\n'), ((6003, 6068), 'pyabf.plot.sweeps', 'pyabf.plot.sweeps', (['abf'], {'axis': 'axStack', 'alpha': '(0.5)', 'offsetYunits': '(100)'}), '(abf, axis=axStack, alpha=0.5, offsetYunits=100)\n', (6020, 6068), False, 'import pyabf\n'), ((6345, 6419), 'pyabf.plot.sweeps', 'pyabf.plot.sweeps', (['abf'], {'sweepNumbers': '[abf.sweepNumber]', 'axis': 'axAp', 'alpha': '(1)'}), '(abf, sweepNumbers=[abf.sweepNumber], axis=axAp, alpha=1)\n', (6362, 6419), False, 'import pyabf\n'), ((7823, 7872), 'pyabf.stats.rangeAverage', 'pyabf.stats.rangeAverage', (['abf', 'timeSec1', 'timeSec2'], {}), '(abf, timeSec1, timeSec2)\n', (7847, 7872), False, 'import pyabf\n'), ((7890, 7937), 'pyabf.stats.rangeStdev', 'pyabf.stats.rangeStdev', (['abf', 'timeSec1', 'timeSec2'], {}), '(abf, timeSec1, timeSec2)\n', (7912, 7937), False, 'import pyabf\n'), ((8180, 8236), 'pyabf.plot.sweeps', 'pyabf.plot.sweeps', (['abf'], {'axis': 'ax1', 'linewidth': '(2)', 'alpha': '(0.8)'}), '(abf, axis=ax1, linewidth=2, alpha=0.8)\n', (8197, 8236), False, 'import pyabf\n'), ((10097, 10135), 'pyabf.sweep.averageTrace', 'pyabf.sweep.averageTrace', (['abf', 'channel'], {}), '(abf, channel)\n', (10121, 10135), False, 'import pyabf\n'), ((12948, 12985), 'pyabf.memtest.step_valuesBySweep', 'pyabf.memtest.step_valuesBySweep', (['abf'], {}), '(abf)\n', (12980, 12985), False, 'import pyabf\n'), ((14740, 14785), 'pyabf.stats.rangeAverage', 'pyabf.stats.rangeAverage', (['abf', 'p1sec1', 'p1sec2'], {}), '(abf, p1sec1, p1sec2)\n', (14764, 14785), False, 'import pyabf\n'), ((14802, 14845), 'pyabf.stats.rangeStdev', 'pyabf.stats.rangeStdev', (['abf', 'p1sec1', 'p1sec2'], {}), '(abf, p1sec1, p1sec2)\n', (14824, 14845), False, 'import pyabf\n'), ((15250, 15295), 'pyabf.stats.rangeAverage', 'pyabf.stats.rangeAverage', (['abf', 'p2sec1', 'p2sec2'], {}), '(abf, p2sec1, p2sec2)\n', (15274, 15295), False, 'import pyabf\n'), ((15312, 15355), 'pyabf.stats.rangeStdev', 'pyabf.stats.rangeStdev', (['abf', 'p2sec1', 'p2sec2'], {}), '(abf, p2sec1, p2sec2)\n', (15334, 15355), False, 'import pyabf\n'), ((16641, 16673), 'pyabf.plot.sweeps', 'pyabf.plot.sweeps', (['abf'], {'axis': 'ax1'}), '(abf, axis=ax1)\n', (16658, 16673), False, 'import pyabf\n'), ((17778, 17818), 'pyabf.memtest.cm_ramp_valuesBySweep', 'pyabf.memtest.cm_ramp_valuesBySweep', (['abf'], {}), '(abf)\n', (17813, 17818), False, 'import pyabf\n'), ((17831, 17843), 'numpy.mean', 'np.mean', (['cms'], {}), '(cms)\n', (17838, 17843), True, 'import numpy as np\n'), ((17856, 17867), 'numpy.std', 'np.std', (['cms'], {}), '(cms)\n', (17862, 17867), True, 'import numpy as np\n'), ((18687, 18715), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.5)', 'ls': '"""--"""'}), "(alpha=0.5, ls='--')\n", (18695, 18715), True, 'import matplotlib.pyplot as plt\n'), ((18718, 18767), 'matplotlib.pyplot.plot', 'plt.plot', (['sweepTimesSec', 'apsPerSweep', '""".-"""'], {'ms': '(10)'}), "(sweepTimesSec, apsPerSweep, '.-', ms=10)\n", (18726, 18767), True, 'import matplotlib.pyplot as plt\n'), ((18772, 18800), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sweep AP Count"""'], {}), "('Sweep AP Count')\n", (18782, 18800), True, 'import matplotlib.pyplot as plt\n'), ((18805, 18844), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Experiment Time (seconds)"""'], {}), "('Experiment Time (seconds)')\n", (18815, 18844), True, 'import matplotlib.pyplot as plt\n'), ((22222, 22235), 'numpy.diff', 'np.diff', (['segY'], {}), '(segY)\n', (22229, 22235), True, 'import numpy as np\n'), ((22248, 22275), 'numpy.append', 'np.append', (['segYd', 'segYd[-1]'], {}), '(segYd, segYd[-1])\n', (22257, 22275), True, 'import numpy as np\n'), ((22492, 22579), 'pyabf.plot.sweeps', 'pyabf.plot.sweeps', (['abf'], {'continuous': '(True)', 'axis': 'ax1', 'linewidth': '(1)', 'color': '"""C0"""', 'alpha': '(1)'}), "(abf, continuous=True, axis=ax1, linewidth=1, color='C0',\n alpha=1)\n", (22509, 22579), False, 'import pyabf\n'), ((23094, 23181), 'pyabf.plot.sweeps', 'pyabf.plot.sweeps', (['abf'], {'continuous': '(True)', 'axis': 'ax3', 'linewidth': '(1)', 'color': '"""C0"""', 'alpha': '(1)'}), "(abf, continuous=True, axis=ax3, linewidth=1, color='C0',\n alpha=1)\n", (23111, 23181), False, 'import pyabf\n'), ((24895, 24926), 'pyabf.memtest.step_summary', 'pyabf.memtest.step_summary', (['abf'], {}), '(abf)\n', (24921, 24926), False, 'import pyabf\n'), ((27094, 27113), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (27105, 27113), True, 'import matplotlib.pyplot as plt\n'), ((27116, 27164), 'matplotlib.pyplot.title', 'plt.title', (['f"""OVerlay of {abf.sweepCount} sweeps"""'], {}), "(f'OVerlay of {abf.sweepCount} sweeps')\n", (27125, 27164), True, 'import matplotlib.pyplot as plt\n'), ((29665, 29732), 'pyabf.sweep.averageTrace', 'pyabf.sweep.averageTrace', (['abf'], {'timeSec1': 'timeSec1', 'timeSec2': 'timeSec2'}), '(abf, timeSec1=timeSec1, timeSec2=timeSec2)\n', (29689, 29732), False, 'import pyabf\n'), ((29790, 29822), 'matplotlib.pyplot.plot', 'plt.plot', (['abf.sweepX', 'abf.sweepY'], {}), '(abf.sweepX, abf.sweepY)\n', (29798, 29822), True, 'import matplotlib.pyplot as plt\n'), ((30798, 30817), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (30809, 30817), True, 'import matplotlib.pyplot as plt\n'), ((30820, 30868), 'matplotlib.pyplot.title', 'plt.title', (['f"""OVerlay of {abf.sweepCount} sweeps"""'], {}), "(f'OVerlay of {abf.sweepCount} sweeps')\n", (30829, 30868), True, 'import matplotlib.pyplot as plt\n'), ((33323, 33344), 'pyabf.ABF', 'pyabf.ABF', (['fileToTest'], {}), '(fileToTest)\n', (33332, 33344), False, 'import pyabf\n'), ((668, 693), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (683, 693), False, 'import os\n'), ((2968, 3022), 'matplotlib.pyplot.axvline', 'plt.axvline', (['xPos'], {'color': '"""r"""', 'lw': '(2)', 'alpha': '(0.5)', 'ls': '"""--"""'}), "(xPos, color='r', lw=2, alpha=0.5, ls='--')\n", (2979, 3022), True, 'import matplotlib.pyplot as plt\n'), ((3047, 3057), 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), '()\n', (3055, 3057), True, 'import matplotlib.pyplot as plt\n'), ((3859, 3886), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['abf.sweepLabelY'], {}), '(abf.sweepLabelY)\n', (3869, 3886), True, 'import matplotlib.pyplot as plt\n'), ((3895, 3922), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['abf.sweepLabelX'], {}), '(abf.sweepLabelX)\n', (3905, 3922), True, 'import matplotlib.pyplot as plt\n'), ((4452, 4470), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4468, 4470), True, 'import matplotlib.pyplot as plt\n'), ((4528, 4537), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4535, 4537), True, 'import matplotlib.pyplot as plt\n'), ((5525, 5536), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5534, 5536), True, 'import matplotlib.pyplot as plt\n'), ((7001, 7045), 'pyabf.ap.ap_freq_per_sweep', 'pyabf.ap.ap_freq_per_sweep', (['abf', 'epochNumber'], {}), '(abf, epochNumber)\n', (7027, 7045), False, 'import pyabf\n'), ((7738, 7767), 'pyabf.filter.gaussian', 'pyabf.filter.gaussian', (['abf', '(2)'], {}), '(abf, 2)\n', (7759, 7767), False, 'import pyabf\n'), ((9173, 9247), 'pyabf.plot.sweeps', 'pyabf.plot.sweeps', (['abf'], {'axis': 'ax', 'color': 'color', 'channel': 'channel', 'alpha': 'alpha'}), '(abf, axis=ax, color=color, channel=channel, alpha=alpha)\n', (9190, 9247), False, 'import pyabf\n'), ((9940, 10011), 'pyabf.plot.sweeps', 'pyabf.plot.sweeps', (['abf'], {'axis': 'ax', 'color': '"""C0"""', 'channel': 'channel', 'alpha': '(0.2)'}), "(abf, axis=ax, color='C0', channel=channel, alpha=0.2)\n", (9957, 10011), False, 'import pyabf\n'), ((10560, 10668), 'pyabf.plot.sweeps', 'pyabf.plot.sweeps', (['abf'], {'axis': 'ax', 'continuous': '(True)', 'channel': 'channel', 'color': '"""b"""', 'alpha': 'alpha', 'linewidth': '(0.5)'}), "(abf, axis=ax, continuous=True, channel=channel, color='b',\n alpha=alpha, linewidth=0.5)\n", (10577, 10668), False, 'import pyabf\n'), ((11215, 11346), 'pyabf.plot.sweeps', 'pyabf.plot.sweeps', (['abf'], {'sweepNumbers': '[0]', 'axis': 'ax', 'channel': 'channel', 'color': '"""b"""', 'alpha': '(1)', 'startAtSec': 'timeSec1', 'endAtSec': 'timeSec2'}), "(abf, sweepNumbers=[0], axis=ax, channel=channel, color=\n 'b', alpha=1, startAtSec=timeSec1, endAtSec=timeSec2)\n", (11232, 11346), False, 'import pyabf\n'), ((11951, 12017), 'pyabf.stats.rangeAverage', 'pyabf.stats.rangeAverage', (['abf', 'timeSec1', 'timeSec2'], {'channel': 'channel'}), '(abf, timeSec1, timeSec2, channel=channel)\n', (11975, 12017), False, 'import pyabf\n'), ((12050, 12114), 'pyabf.stats.rangeStdev', 'pyabf.stats.rangeStdev', (['abf', 'timeSec1', 'timeSec2'], {'channel': 'channel'}), '(abf, timeSec1, timeSec2, channel=channel)\n', (12072, 12114), False, 'import pyabf\n'), ((14603, 14628), 'numpy.arange', 'np.arange', (['abf.sweepCount'], {}), '(abf.sweepCount)\n', (14612, 14628), True, 'import numpy as np\n'), ((17212, 17259), 'pyabf.memtest._cm_ramp_points_and_voltages', 'pyabf.memtest._cm_ramp_points_and_voltages', (['abf'], {}), '(abf)\n', (17254, 17259), False, 'import pyabf\n'), ((18448, 18473), 'numpy.arange', 'np.arange', (['abf.sweepCount'], {}), '(abf.sweepCount)\n', (18457, 18473), True, 'import numpy as np\n'), ((18577, 18613), 'pyabf.ap.ap_points_currentSweep', 'pyabf.ap.ap_points_currentSweep', (['abf'], {}), '(abf)\n', (18608, 18613), False, 'import pyabf\n'), ((19987, 20016), 'pyabf.filter.gaussian', 'pyabf.filter.gaussian', (['abf', '(3)'], {}), '(abf, 3)\n', (20008, 20016), False, 'import pyabf\n'), ((20692, 20721), 'pyabf.filter.gaussian', 'pyabf.filter.gaussian', (['abf', '(3)'], {}), '(abf, 3)\n', (20713, 20721), False, 'import pyabf\n'), ((21803, 21839), 'pyabf.ap.ap_points_currentSweep', 'pyabf.ap.ap_points_currentSweep', (['abf'], {}), '(abf)\n', (21834, 21839), False, 'import pyabf\n'), ((25161, 25193), 'pyabf.plot.sweeps', 'pyabf.plot.sweeps', (['abf'], {'axis': 'ax1'}), '(abf, axis=ax1)\n', (25178, 25193), False, 'import pyabf\n'), ((26999, 27097), 'matplotlib.pyplot.plot', 'plt.plot', (['abf.sweepX', '(abf.sweepY + verticalOffset * sweep)'], {'color': 'traceColor', 'lw': '(0.5)', 'alpha': '(0.5)'}), '(abf.sweepX, abf.sweepY + verticalOffset * sweep, color=traceColor,\n lw=0.5, alpha=0.5)\n', (27007, 27097), True, 'import matplotlib.pyplot as plt\n'), ((29600, 29655), 'matplotlib.pyplot.plot', 'plt.plot', (['abf.sweepX', 'abf.sweepY'], {'alpha': '(0.2)', 'color': '""".5"""'}), "(abf.sweepX, abf.sweepY, alpha=0.2, color='.5')\n", (29608, 29655), True, 'import matplotlib.pyplot as plt\n'), ((30180, 30274), 'matplotlib.pyplot.plot', 'plt.plot', (['abf.sweepX[p1:p2]', '(abf.sweepY[p1:p2] + vertOffset * sweep)'], {'color': '"""b"""', 'alpha': '(0.7)'}), "(abf.sweepX[p1:p2], abf.sweepY[p1:p2] + vertOffset * sweep, color=\n 'b', alpha=0.7)\n", (30188, 30274), True, 'import matplotlib.pyplot as plt\n'), ((30703, 30801), 'matplotlib.pyplot.plot', 'plt.plot', (['abf.sweepX', '(abf.sweepY + verticalOffset * sweep)'], {'color': 'traceColor', 'lw': '(0.5)', 'alpha': '(0.5)'}), '(abf.sweepX, abf.sweepY + verticalOffset * sweep, color=traceColor,\n lw=0.5, alpha=0.5)\n', (30711, 30801), True, 'import matplotlib.pyplot as plt\n'), ((2218, 2267), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['t1', 't2'], {'color': 'color', 'alpha': '(0.3)', 'lw': '(0)'}), '(t1, t2, color=color, alpha=0.3, lw=0)\n', (2229, 2267), True, 'import matplotlib.pyplot as plt\n'), ((2463, 2472), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2470, 2472), True, 'import matplotlib.pyplot as plt\n'), ((3124, 3244), 'matplotlib.pyplot.text', 'plt.text', (['xPos', 'Y2', 'comment'], {'color': '"""r"""', 'rotation': '"""vertical"""', 'ha': '"""right"""', 'va': '"""top"""', 'weight': '"""bold"""', 'alpha': '(0.5)', 'size': '(8)'}), "(xPos, Y2, comment, color='r', rotation='vertical', ha='right', va=\n 'top', weight='bold', alpha=0.5, size=8)\n", (3132, 3244), True, 'import matplotlib.pyplot as plt\n'), ((3815, 3831), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3824, 3831), True, 'import matplotlib.pyplot as plt\n'), ((4841, 4850), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4848, 4850), True, 'import matplotlib.pyplot as plt\n'), ((5315, 5339), 'os.path.dirname', 'os.path.dirname', (['pathOut'], {}), '(pathOut)\n', (5330, 5339), False, 'import os\n'), ((5416, 5440), 'os.path.dirname', 'os.path.dirname', (['pathOut'], {}), '(pathOut)\n', (5431, 5440), False, 'import os\n'), ((5810, 5819), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5817, 5819), True, 'import matplotlib.pyplot as plt\n'), ((5968, 5977), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5975, 5977), True, 'import matplotlib.pyplot as plt\n'), ((6148, 6157), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6155, 6157), True, 'import matplotlib.pyplot as plt\n'), ((6291, 6316), 'numpy.max', 'np.max', (['abf.sweepY[p1:p2]'], {}), '(abf.sweepY[p1:p2])\n', (6297, 6316), True, 'import numpy as np\n'), ((6562, 6571), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6569, 6571), True, 'import matplotlib.pyplot as plt\n'), ((6758, 6789), 'pyabf.stimulus.epochValues', 'pyabf.stimulus.epochValues', (['abf'], {}), '(abf)\n', (6784, 6789), False, 'import pyabf\n'), ((7952, 7977), 'numpy.arange', 'np.arange', (['abf.sweepCount'], {}), '(abf.sweepCount)\n', (7961, 7977), True, 'import numpy as np\n'), ((8071, 8080), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8078, 8080), True, 'import matplotlib.pyplot as plt\n'), ((8112, 8121), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8119, 8121), True, 'import matplotlib.pyplot as plt\n'), ((8355, 8376), 'numpy.nanmax', 'np.nanmax', (['currentAvg'], {}), '(currentAvg)\n', (8364, 8376), True, 'import numpy as np\n'), ((8379, 8400), 'numpy.nanmin', 'np.nanmin', (['currentAvg'], {}), '(currentAvg)\n', (8388, 8400), True, 'import numpy as np\n'), ((11886, 11911), 'numpy.arange', 'np.arange', (['abf.sweepCount'], {}), '(abf.sweepCount)\n', (11895, 11911), True, 'import numpy as np\n'), ((13006, 13031), 'numpy.arange', 'np.arange', (['abf.sweepCount'], {}), '(abf.sweepCount)\n', (13015, 13031), True, 'import numpy as np\n'), ((13086, 13095), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13093, 13095), True, 'import matplotlib.pyplot as plt\n'), ((13367, 13376), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13374, 13376), True, 'import matplotlib.pyplot as plt\n'), ((13688, 13697), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13695, 13697), True, 'import matplotlib.pyplot as plt\n'), ((14003, 14012), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (14010, 14012), True, 'import matplotlib.pyplot as plt\n'), ((14692, 14701), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (14699, 14701), True, 'import matplotlib.pyplot as plt\n'), ((15202, 15211), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (15209, 15211), True, 'import matplotlib.pyplot as plt\n'), ((15798, 15807), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (15805, 15807), True, 'import matplotlib.pyplot as plt\n'), ((15947, 15969), 'numpy.power', 'np.power', (['sweepErr1', '(2)'], {}), '(sweepErr1, 2)\n', (15955, 15969), True, 'import numpy as np\n'), ((15970, 15992), 'numpy.power', 'np.power', (['sweepErr2', '(2)'], {}), '(sweepErr2, 2)\n', (15978, 15992), True, 'import numpy as np\n'), ((16610, 16619), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (16617, 16619), True, 'import matplotlib.pyplot as plt\n'), ((17059, 17068), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (17066, 17068), True, 'import matplotlib.pyplot as plt\n'), ((17421, 17443), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""winter"""'], {}), "('winter')\n", (17433, 17443), True, 'import matplotlib.pyplot as plt\n'), ((17878, 17887), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (17885, 17887), True, 'import matplotlib.pyplot as plt\n'), ((22457, 22466), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (22464, 22466), True, 'import matplotlib.pyplot as plt\n'), ((22763, 22772), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (22770, 22772), True, 'import matplotlib.pyplot as plt\n'), ((23059, 23068), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (23066, 23068), True, 'import matplotlib.pyplot as plt\n'), ((23318, 23327), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (23325, 23327), True, 'import matplotlib.pyplot as plt\n'), ((3763, 3772), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3770, 3772), True, 'import matplotlib.pyplot as plt\n'), ((4003, 4012), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4010, 4012), True, 'import matplotlib.pyplot as plt\n'), ((4304, 4313), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4311, 4313), True, 'import matplotlib.pyplot as plt\n'), ((4931, 4940), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4938, 4940), True, 'import matplotlib.pyplot as plt\n'), ((6826, 6857), 'pyabf.stimulus.epochPoints', 'pyabf.stimulus.epochPoints', (['abf'], {}), '(abf)\n', (6852, 6857), False, 'import pyabf\n'), ((6904, 6935), 'pyabf.stimulus.epochPoints', 'pyabf.stimulus.epochPoints', (['abf'], {}), '(abf)\n', (6930, 6935), False, 'import pyabf\n'), ((8431, 8452), 'numpy.nanmin', 'np.nanmin', (['currentAvg'], {}), '(currentAvg)\n', (8440, 8452), True, 'import numpy as np\n'), ((8457, 8478), 'numpy.nanmax', 'np.nanmax', (['currentAvg'], {}), '(currentAvg)\n', (8466, 8478), True, 'import numpy as np\n'), ((9111, 9120), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9118, 9120), True, 'import matplotlib.pyplot as plt\n'), ((9808, 9817), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9815, 9817), True, 'import matplotlib.pyplot as plt\n'), ((10498, 10507), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (10505, 10507), True, 'import matplotlib.pyplot as plt\n'), ((11153, 11162), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11160, 11162), True, 'import matplotlib.pyplot as plt\n'), ((11811, 11820), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11818, 11820), True, 'import matplotlib.pyplot as plt\n'), ((19759, 19790), 'pyabf.stimulus.epochPoints', 'pyabf.stimulus.epochPoints', (['abf'], {}), '(abf)\n', (19785, 19790), False, 'import pyabf\n'), ((19822, 19853), 'pyabf.stimulus.epochPoints', 'pyabf.stimulus.epochPoints', (['abf'], {}), '(abf)\n', (19848, 19853), False, 'import pyabf\n'), ((20273, 20282), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20280, 20282), True, 'import matplotlib.pyplot as plt\n'), ((20314, 20323), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20321, 20323), True, 'import matplotlib.pyplot as plt\n'), ((25126, 25135), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (25133, 25135), True, 'import matplotlib.pyplot as plt\n'), ((30088, 30113), 'numpy.max', 'np.max', (['abf.sweepY[p1:p2]'], {}), '(abf.sweepY[p1:p2])\n', (30094, 30113), True, 'import numpy as np\n'), ((30116, 30141), 'numpy.min', 'np.min', (['abf.sweepY[p1:p2]'], {}), '(abf.sweepY[p1:p2])\n', (30122, 30141), True, 'import numpy as np\n'), ((5371, 5395), 'os.path.dirname', 'os.path.dirname', (['pathOut'], {}), '(pathOut)\n', (5386, 5395), False, 'import os\n'), ((6681, 6712), 'pyabf.stimulus.epochValues', 'pyabf.stimulus.epochValues', (['abf'], {}), '(abf)\n', (6707, 6712), False, 'import pyabf\n'), ((16987, 16996), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (16994, 16996), True, 'import matplotlib.pyplot as plt\n'), ((25477, 25486), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (25484, 25486), True, 'import matplotlib.pyplot as plt\n')] |
# =============================================================================
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
import torch
from nemo import logging
__all__ = ['eval_iter_callback', 'eval_epochs_done_callback']
GLOBAL_KEYS = [
"loss",
"per_example_loss",
"beam_results",
"src_ids",
"src_first_tokens",
"pred",
"labels",
"labels_mask",
]
def eval_iter_callback(tensors, global_vars, tokenizer):
for key in GLOBAL_KEYS:
if key not in global_vars.keys():
global_vars[key] = []
for kv, v in tensors.items():
if "crossentropylossnm1" in kv:
for per_example_loss in v:
pel = per_example_loss.cpu().numpy().tolist()
global_vars["per_example_loss"].extend(pel)
if "logits" in kv:
for pred in v:
p = torch.argmax(pred, dim=-1).int().cpu().numpy().tolist()
global_vars["pred"].extend(p)
if "labels~" in kv:
for label in v:
l = label.cpu().numpy().tolist()
global_vars["labels"].extend(l)
if "labels_mask" in kv:
for mask in v:
m = mask.cpu().numpy().tolist()
global_vars["labels_mask"].extend(m)
def eval_epochs_done_callback(global_vars, validation_dataset=None):
losses = np.array(global_vars["per_example_loss"])
eval_loss = np.mean(losses)
global_vars["per_example_loss"] = []
labels = np.array([np.array(n) for n in global_vars["labels"]])
predictions = np.array([np.array(n) for n in global_vars["pred"]])
labels_mask = np.array([np.array(n) for n in global_vars["labels_mask"]])
for key in GLOBAL_KEYS:
global_vars[key] = []
lor = np.logical_or(labels == predictions, ~labels_mask.astype(np.bool))
accuracy = np.mean(np.all(lor, axis=1).astype(np.float32))
logging.info("------------------------------------------------------------")
logging.info("Validation loss: {0}".format(np.round(eval_loss, 3)))
logging.info("Sentence level accuracy: {0}".format(accuracy))
logging.info("------------------------------------------------------------")
return dict({"eval_loss": eval_loss})
| [
"numpy.mean",
"numpy.array",
"nemo.logging.info",
"numpy.all",
"numpy.round",
"torch.argmax"
] | [((2009, 2050), 'numpy.array', 'np.array', (["global_vars['per_example_loss']"], {}), "(global_vars['per_example_loss'])\n", (2017, 2050), True, 'import numpy as np\n'), ((2067, 2082), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (2074, 2082), True, 'import numpy as np\n'), ((2546, 2622), 'nemo.logging.info', 'logging.info', (['"""------------------------------------------------------------"""'], {}), "('------------------------------------------------------------')\n", (2558, 2622), False, 'from nemo import logging\n'), ((2765, 2841), 'nemo.logging.info', 'logging.info', (['"""------------------------------------------------------------"""'], {}), "('------------------------------------------------------------')\n", (2777, 2841), False, 'from nemo import logging\n'), ((2148, 2159), 'numpy.array', 'np.array', (['n'], {}), '(n)\n', (2156, 2159), True, 'import numpy as np\n'), ((2221, 2232), 'numpy.array', 'np.array', (['n'], {}), '(n)\n', (2229, 2232), True, 'import numpy as np\n'), ((2292, 2303), 'numpy.array', 'np.array', (['n'], {}), '(n)\n', (2300, 2303), True, 'import numpy as np\n'), ((2670, 2692), 'numpy.round', 'np.round', (['eval_loss', '(3)'], {}), '(eval_loss, 3)\n', (2678, 2692), True, 'import numpy as np\n'), ((2501, 2520), 'numpy.all', 'np.all', (['lor'], {'axis': '(1)'}), '(lor, axis=1)\n', (2507, 2520), True, 'import numpy as np\n'), ((1508, 1534), 'torch.argmax', 'torch.argmax', (['pred'], {'dim': '(-1)'}), '(pred, dim=-1)\n', (1520, 1534), False, 'import torch\n')] |
import os
from typing import List
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
from typing import Optional
import numpy as np
import pandas as pd
import scanpy as sc
from anndata import AnnData
from rich import print
WORKING_DIRECTORY = os.path.dirname(__file__)
def generate_expression_table(
adata,
cluster: str = "all",
subset_by: str = "cell_type",
xlabel: str = "days",
hue: str = None,
use_raw: bool = None,
):
"""
Args:
adata: Anndata object
cluster: Which label of the subsets to generate the table for. Use 'all' if for all subsets.
subset_by: Which label to subset the clusters by
xlabel: x-axis
hue: Value to color by
use_raw: Whether to use adata.raw.X for the calculations
Returns:
Gene expression table
"""
if cluster == "all":
cells = adata.obs_names
else:
cells = [True if val in cluster else False for val in adata.obs[subset_by]]
if use_raw:
gen_expression_table = pd.DataFrame(
adata[cells].raw.X.todense(), index=adata[cells].obs_names, columns=adata[cells].raw.var_names
)
else:
gen_expression_table = pd.DataFrame(
adata[cells].X, index=adata[cells].obs_names, columns=adata[cells].var_names
)
gen_expression_table["identifier"] = adata[cells].obs["identifier"]
gen_expression_table[xlabel] = adata[cells].obs[xlabel]
if hue:
# For multiple cluster, split internally per condition
if isinstance(cluster, list) and len(cluster) > 1 and subset_by != hue:
gen_expression_table[hue] = [f"{t}_{c}" for t, c in zip(adata[cells].obs[hue], adata[cells].obs[subset_by])]
else:
gen_expression_table[hue] = adata[cells].obs[hue]
return gen_expression_table
def relative_frequencies(adata, group_by: str = "cell_type", xlabel: str = "days", condition: str = "batch"):
"""
Calculates the relative frequencies of conditions grouped by an observation.
Args:
adata: AnnData Objet containing the data
group_by:
xlabel: x-axis label
condition:
Returns:
Relative frequencies in a Pandas DataFrame
"""
freqs = adata.obs.groupby(["identifier", group_by]).size()
samples = np.unique(adata.obs["identifier"])
ind = adata.obs[group_by].cat.categories
relative_frequencies = [freqs[ident] / sum(freqs[ident]) for ident in samples]
relative_frequencies = pd.DataFrame(relative_frequencies, columns=ind, index=samples).fillna(0)
# relFreqs[xlabel] = grouping.loc[samples, xlabel] ## when using Grouping Table
cell_types = {}
combis = adata.obs.groupby(["identifier", xlabel]).groups.keys()
for c in combis:
cell_types[c[0]] = c[1]
relative_frequencies[xlabel] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
# Todo, add for condition
if condition:
combis = adata.obs.groupby(["identifier", condition]).groups.keys()
for c in combis:
cell_types[c[0]] = c[1]
relative_frequencies[condition] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
return relative_frequencies
def relative_frequency_per_cluster(adata, group_by: str = "cell_type", xlabel: str = "days", condition=None):
"""
Calculates relative frequencies per cluster
Args:
adata: AnnData object containing the data
group_by: The label to group by for the clusters
xlabel: x-axis label
condition: condition to combine by
Returns:
Pandas DataFrame of relative frequencies
"""
frequencies = adata.obs.groupby([group_by, xlabel]).size()
celltypes = np.unique(adata.obs[group_by])
ind = adata.obs[xlabel].cat.categories
relative_frequencies = [frequencies[ident] / sum(frequencies[ident]) for ident in celltypes]
relative_frequencies = pd.DataFrame(relative_frequencies, columns=ind, index=celltypes).fillna(0)
cell_types = {}
combinations = adata.obs.groupby([group_by, xlabel]).groups.keys()
for combination in combinations:
cell_types[combination[0]] = combination[1]
relative_frequencies[group_by] = relative_frequencies.index # type: ignore
# Todo, add for condition
if condition:
combinations = adata.obs.groupby([group_by, condition]).groups.keys()
for combination in combinations:
cell_types[combination[0]] = combination[1]
relative_frequencies[condition] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
return relative_frequencies
def correlate_to_signature(
adata,
marker: pd.DataFrame,
log_fc_threshold: float = 0.7,
cell_type: str = "AT2 cells",
cell_type_label: str = "cell_type",
log_fc_label: str = "logfoldchange",
gene_label: str = "gene",
use_raw: bool = True,
):
"""
Correlations Score (based on cell type signature (logFC)) - alternative to sc.tl.score
Args:
adata: AnnData object containing the data
marker: Pandas DataFrame containing marker genes
log_fc_threshold: Log fold change label
cell_type: Cell type to calculate the correlation for
cell_type_label: Label of all cell types in the AnnData object
log_fc_label: Label of fold change in the AnnData object
gene_label: Label of genes in the AnnData object
use_raw: Whether to use adata.raw.X
Returns:
List of correlations
"""
from scipy.sparse import issparse
topmarker = marker[marker.loc[:, cell_type_label] == cell_type]
topmarker = topmarker.loc[topmarker.loc[:, log_fc_label] > log_fc_threshold, [gene_label, log_fc_label]]
gene_names = list(np.intersect1d(adata.var_names, topmarker.loc[:, gene_label].astype(str)))
topmarker = topmarker[topmarker.loc[:, gene_label].isin(gene_names)]
print(f"[bold blue]{len(gene_names)} genes used for correlation score to {cell_type}")
if use_raw:
if issparse(adata.raw.X):
gene_expression = adata.raw[:, gene_names].X.todense()
else:
gene_expression = adata.raw[:, gene_names].X
else:
if issparse(adata.X):
gene_expression = adata[:, gene_names].X.todense()
else:
gene_expression = adata[:, gene_names].X
gene_expression = pd.DataFrame(gene_expression.T, index=gene_names)
# For each cell separately
gene_expression = pd.DataFrame.fillna(gene_expression, value=0)
res = [
np.correlate(topmarker.loc[:, log_fc_label], gene_expression.iloc[:, c])[0]
for c in range(gene_expression.shape[1])
]
return res
def remove_outliers(cords, eps: int = 1, min_samples: int = 2):
"""
Remove outlying cells based on UMAP embeddings with DBScan (density based clustering)
Call as: sub.obs["d_cluster"] = remove_outliers(sub.obsm["X_umap"], min_samples = 10)
Args:
cords: adata UMAP coordinates, typically adata.obsm["X_umap"]
eps: Maximum distance between two clusters to still be considered neighbors
min_samples: Minimum samples of a cluster
Returns:
Pandas DataFrame of clusters
"""
from natsort import natsorted
from sklearn.cluster import DBSCAN
clustering = DBSCAN(eps=eps, min_samples=min_samples).fit(cords)
cluster = clustering.labels_.astype("U")
return pd.Categorical(cluster, categories=natsorted(np.unique(cluster)))
def add_percentages(adata, table, ids, group_by: str, threshold: int = 0, gene_label: str = "gene"):
"""
Add columns to existing diffxpy table specifying percentage of expressing cells
Args:
adata: AnnData object containing the data
table: Table as generated by diffxpy
ids:
group_by: Label to group by
threshold:
gene_label: Label of the genes
Returns:
Table containing percentage of expressing cells
"""
for ident in ids:
cells = adata.obs_names[adata.obs[group_by] == ident]
data_temp = pd.DataFrame(
((adata[cells].layers["counts"] > threshold).sum(0) / adata[cells].layers["counts"].shape[0]).T,
index=adata.var_names,
)
if gene_label == "index":
table[f"pct.{ident}s"] = data_temp.reindex(table.index.values).values
else:
table[f"pct.{ident}s"] = data_temp.reindex(table.loc[:, gene_label]).values
return table
def ranksums_between_groups(
table, id1: str = "bystander", id2: str = "infected", xlabel: str = "condition", cells=None, score: str = "Axin2"
):
"""
Perform Wilcoxon Rank-sum test between two groups.
Args:
table:
id1:
id2:
xlabel: x-axis label
cells:
score:
Returns:
Pandas DataFrame containing test statistic and p-value
"""
from scipy import stats
if cells is not None:
table = table.loc[cells].copy()
group1 = table[table.loc[:, xlabel] == id1].copy()
group2 = table[table.loc[:, xlabel] == id2].copy()
t, p = stats.ranksums(group1.loc[:, score], group2.loc[:, score])
result = pd.DataFrame(columns=["wilcoxon_ranksum", "pval"])
result.loc[0] = [t, p]
return result
def generate_count_object(
adata,
hue: str = "disease",
cell_type_label: str = "cell_type",
cell_type: List[str] = None,
min_samples: int = 2,
min_cells: int = 5,
ref: str = "healthy",
subset: List[str] = None,
layer: str = "counts",
outliers_removal: bool = False,
):
"""
@Meshal what is this really supposed to do?
Args:
adata: AnnData object
hue: Value to color by
cell_type_label: Label containing cell types
cell_type: Cells type to generate counts for
min_samples: Minimum samples for outlier removal with DBScan
min_cells: Minimal number of cells
ref:
subset:
layer:
outliers_removal: Whether to remove outliers or not
Returns:
AnnData object containing counts
Example Call:
subset = ['3d PI-KO', '3d PI-WT']
raw_counts = generate_count_object(adata,
condition = "grouping",
cell_type_label = "celltype_refined", cell_type = ["AT2"],
ref = "3d PI-WT",
subset = subset)
"""
adata_subset = adata[adata.obs.grouping.isin(subset)]
cells = [
True if (adata_subset.obs[cell_type_label][i] in cell_type) else False for i in range(adata_subset.n_obs) # type: ignore
]
# Raw count data for diffxpy
obs = adata_subset[cells].obs.copy()
var = adata_subset.var_names.copy()
adata_raw = sc.AnnData(X=adata_subset[cells].layers[layer].copy())
adata_raw.obs = obs
adata_raw.var.index = var
adata_raw.obsm = adata_subset[cells].obsm.copy()
# Also automate tidy up with DBScan :)
if outliers_removal:
adata_raw.obs["dcluster"] = remove_outliers(adata_raw.obsm["X_umap"], min_samples=min_samples)
sc.pl.umap(adata_raw, color=[hue, "dcluster"])
adata_raw = adata_raw[adata_raw.obs.dcluster == "0"].copy()
sc.pp.filter_genes(adata_raw, min_cells=min_cells)
# Set reference as first column
adata_raw.obs.loc[:, hue].cat.reorder_categories([ref, np.setdiff1d(subset, ref)[0]], inplace=True)
pal = adata_subset.uns[f"{hue}_colors"]
sc.pl.umap(adata_raw, color=[hue], palette=list(pal))
return adata_raw
def tidy_de_table(de_test, adata, cells, ids=None, qval_thresh: float = 0.9, group_by: str = "treatment", cols=None):
"""
Sorts diffxpy de table and adds percentages of expression per group
Args:
de_test: diffxpy de test
adata: AnnData object
cells:
ids:
qval_thresh:
group_by:
cols:
Returns:
Pandas Dataframe of diffxpy table with percentages
"""
result = de_test.summary().sort_values(by=["qval"], ascending=True)
result = result[result.qval < qval_thresh].loc[:, cols].copy()
# Add percentages
result = add_percentages(adata[cells], result, ids=ids, group_by=group_by)
return result
def correlate_means_to_gene(means: pd.DataFrame, corr_gene: str = "EOMES"):
"""
Calculate gene to gene correlation based on a mean expression table
Args:
means:
corr_gene:
Returns:
Pandas DataFrame of correlations
"""
import scipy.stats
genes = means.columns.values
cors = pd.DataFrame(index=genes, columns=["spearman_corr", "pvalue"])
# tab = sc.get.obs_df(sub, keys = [corr_gene], layer = None, use_raw = True)
table = means.loc[:, [corr_gene]].values
# Loop over all genes.
for gene in genes:
tmp = scipy.stats.spearmanr(table, means.loc[:, [gene]]) # Spearman's rho
cors.loc[gene, :] = tmp[0:2]
cors.dropna(axis=0, inplace=True)
cors.sort_values("spearman_corr", ascending=False, inplace=True)
return cors
def extended_marker_table(
adata: AnnData,
qval_thresh: float = 0.05,
cell_type_label: str = "cell_type",
gene_ranks_key: str = "rank_genes_groups",
):
"""
Generates an extended marker table with cell types and percentages of expressed cell types per cluster.
Run scanpy.tl.rank_genes_groups before using this function.
Args:
adata: AnnData object containing ranked genes
qval_thresh: Threshold to filter the log fold change for
cell_type_label: Label containing all cell types
gene_ranks_key: Key for the ranked gene groups (generated by sc.tl.rank_genes_groups)
Returns:
A Pandas DataFrame
"""
result = adata.uns[gene_ranks_key]
all_markers = []
for cluster in result["names"].dtype.names:
current = pd.DataFrame(
{
"gene": result["names"][cluster],
"score": result["scores"][cluster],
"log_FC": result["logfoldchanges"][cluster],
"pval": result["pvals"][cluster],
"pval_adj": result["pvals_adj"][cluster],
"cell_type": cluster,
}
)
# Add percentage expressed per cell type
adata.obs["group"] = ["within" if ct == cluster else "outside" for ct in adata.obs.loc[:, cell_type_label]]
current = add_percentages(adata, table=current, group_by="group", gene_label="gene", ids=["within", "outside"])
all_markers.append(current)
all_markers_df = pd.concat(all_markers)
all_markers_df = all_markers_df[all_markers_df.pval_adj < qval_thresh].copy()
return all_markers_df
def generate_pseudobulk(adata: AnnData, group_key: str = "identifier", sep="\t", save: str = None) -> pd.DataFrame:
"""
Generates a pseudobulk for a given key of groups in the AnnData object.
Looks like:
+------------+------------------+------------------+
| Genes | Group Member 1 | Group Member 2 |
+============+==================+==================+
| Gene 1 | Value 1 | Value 2 |
+------------+------------------+------------------+
| Gene 2 | Value 2 | Value 3 |
+------------+------------------+------------------+
Args:
adata: AnnData object
group_key: The key to group by. E.g. by mice, by condition, ... (default: 'identifier')
sep: Separator to use when saving the pseudobulk table (default: '\t')
save: Path to save the pseudobulk table to (default: None)
Returns:
A Pandas DataFrame containing the pseudobulk table
"""
pseudobulk = pd.DataFrame(data=adata.var_names.values, columns=["Genes"])
for i in adata.obs.loc[:, group_key].cat.categories:
temp = adata.obs.loc[:, group_key] == i
pseudobulk[i] = adata[temp].X.sum(0, dtype=int) # column sums (genes)
if save:
pseudobulk.to_csv(save, sep=sep, index=False)
return pseudobulk
def automated_marker_annotation(
adata: AnnData,
organism: str,
tissue: str,
marker_file: str,
key: str = "rank_genes_groups",
normalize: Optional[Literal["reference", "data"]] = "reference",
p_value: float = 0.05,
log_fold_change: float = 2,
):
"""
Calculates a marker gene overlap based on pre-existing annotations.
Currently supported marker files:
+------------+------------+------------------------------+
| Organism | Tissue | Marker File |
+============+============+==============================+
| Mouse | Lung | lung_particle_markers.txt |
+------------+------------+------------------------------+
| Human | NA | |
+------------+------------+------------------------------+
Args:
adata: AnnData object containing ranked genes
organism: Currently supported: 'mouse'
tissue: Currently supported: 'lung'
marker_file: Name of the marker file to be used - refer to table
key: Key of ranked genes in adata (default: 'rank_genes_groups')
normalize: Normalization option for the marker gene overlap output (default: 'reference')
p_value: p-value threshold for existing marker genes (default: 0.05)
log_fold_change: log fold change threshold for existing marker genes (default: 2)
Returns:
Pandas DataFrame of overlapping genes. Visualize with a Seaborn Heatmap
"""
supported_organisms = {"mouse"}
supported_tissues = {"lung"}
supported_marker_files = {"lung_particle_markers.txt"}
if organism not in supported_organisms:
print(f"[bold red]Unfortunately organism {organism} is not yet supported.")
return
if tissue not in supported_tissues:
print(f"[bold red]Unfortunately tissue {tissue} is not yet supported.")
return
if marker_file not in supported_marker_files:
print(f"[bold red]Unfortunately marker file {marker_file} could not be found. Please check your spelling.")
return
marker_table = pd.read_csv(f"{WORKING_DIRECTORY}/markers/{marker_file}", sep="\t", index_col=None)
marker_table = marker_table[
(marker_table.logfoldchange > log_fold_change) & (marker_table.pval_adj < p_value)
].copy()
marker = dict()
for ct in marker_table["cell_type"].unique():
tmp = marker_table[marker_table["cell_type"] == ct]
marker[ct] = tmp.gene.values
return sc.tl.marker_gene_overlap(adata, marker, key=key, normalize=normalize)
| [
"pandas.DataFrame.fillna",
"numpy.unique",
"pandas.read_csv",
"scanpy.tl.marker_gene_overlap",
"scipy.sparse.issparse",
"os.path.dirname",
"scanpy.pp.filter_genes",
"rich.print",
"numpy.correlate",
"numpy.setdiff1d",
"scipy.stats.ranksums",
"scanpy.pl.umap",
"pandas.DataFrame",
"pandas.con... | [((310, 335), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (325, 335), False, 'import os\n'), ((2375, 2409), 'numpy.unique', 'np.unique', (["adata.obs['identifier']"], {}), "(adata.obs['identifier'])\n", (2384, 2409), True, 'import numpy as np\n'), ((3824, 3854), 'numpy.unique', 'np.unique', (['adata.obs[group_by]'], {}), '(adata.obs[group_by])\n', (3833, 3854), True, 'import numpy as np\n'), ((6485, 6534), 'pandas.DataFrame', 'pd.DataFrame', (['gene_expression.T'], {'index': 'gene_names'}), '(gene_expression.T, index=gene_names)\n', (6497, 6534), True, 'import pandas as pd\n'), ((6589, 6634), 'pandas.DataFrame.fillna', 'pd.DataFrame.fillna', (['gene_expression'], {'value': '(0)'}), '(gene_expression, value=0)\n', (6608, 6634), True, 'import pandas as pd\n'), ((9219, 9277), 'scipy.stats.ranksums', 'stats.ranksums', (['group1.loc[:, score]', 'group2.loc[:, score]'], {}), '(group1.loc[:, score], group2.loc[:, score])\n', (9233, 9277), False, 'from scipy import stats\n'), ((9291, 9341), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['wilcoxon_ranksum', 'pval']"}), "(columns=['wilcoxon_ranksum', 'pval'])\n", (9303, 9341), True, 'import pandas as pd\n'), ((11392, 11442), 'scanpy.pp.filter_genes', 'sc.pp.filter_genes', (['adata_raw'], {'min_cells': 'min_cells'}), '(adata_raw, min_cells=min_cells)\n', (11410, 11442), True, 'import scanpy as sc\n'), ((12738, 12800), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'genes', 'columns': "['spearman_corr', 'pvalue']"}), "(index=genes, columns=['spearman_corr', 'pvalue'])\n", (12750, 12800), True, 'import pandas as pd\n'), ((14736, 14758), 'pandas.concat', 'pd.concat', (['all_markers'], {}), '(all_markers)\n', (14745, 14758), True, 'import pandas as pd\n'), ((15867, 15927), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'adata.var_names.values', 'columns': "['Genes']"}), "(data=adata.var_names.values, columns=['Genes'])\n", (15879, 15927), True, 'import pandas as pd\n'), ((18324, 18411), 'pandas.read_csv', 'pd.read_csv', (['f"""{WORKING_DIRECTORY}/markers/{marker_file}"""'], {'sep': '"""\t"""', 'index_col': 'None'}), "(f'{WORKING_DIRECTORY}/markers/{marker_file}', sep='\\t',\n index_col=None)\n", (18335, 18411), True, 'import pandas as pd\n'), ((18725, 18795), 'scanpy.tl.marker_gene_overlap', 'sc.tl.marker_gene_overlap', (['adata', 'marker'], {'key': 'key', 'normalize': 'normalize'}), '(adata, marker, key=key, normalize=normalize)\n', (18750, 18795), True, 'import scanpy as sc\n'), ((1265, 1360), 'pandas.DataFrame', 'pd.DataFrame', (['adata[cells].X'], {'index': 'adata[cells].obs_names', 'columns': 'adata[cells].var_names'}), '(adata[cells].X, index=adata[cells].obs_names, columns=adata[\n cells].var_names)\n', (1277, 1360), True, 'import pandas as pd\n'), ((6132, 6153), 'scipy.sparse.issparse', 'issparse', (['adata.raw.X'], {}), '(adata.raw.X)\n', (6140, 6153), False, 'from scipy.sparse import issparse\n'), ((6314, 6331), 'scipy.sparse.issparse', 'issparse', (['adata.X'], {}), '(adata.X)\n', (6322, 6331), False, 'from scipy.sparse import issparse\n'), ((11272, 11318), 'scanpy.pl.umap', 'sc.pl.umap', (['adata_raw'], {'color': "[hue, 'dcluster']"}), "(adata_raw, color=[hue, 'dcluster'])\n", (11282, 11318), True, 'import scanpy as sc\n'), ((14030, 14271), 'pandas.DataFrame', 'pd.DataFrame', (["{'gene': result['names'][cluster], 'score': result['scores'][cluster],\n 'log_FC': result['logfoldchanges'][cluster], 'pval': result['pvals'][\n cluster], 'pval_adj': result['pvals_adj'][cluster], 'cell_type': cluster}"], {}), "({'gene': result['names'][cluster], 'score': result['scores'][\n cluster], 'log_FC': result['logfoldchanges'][cluster], 'pval': result[\n 'pvals'][cluster], 'pval_adj': result['pvals_adj'][cluster],\n 'cell_type': cluster})\n", (14042, 14271), True, 'import pandas as pd\n'), ((17895, 17970), 'rich.print', 'print', (['f"""[bold red]Unfortunately organism {organism} is not yet supported."""'], {}), "(f'[bold red]Unfortunately organism {organism} is not yet supported.')\n", (17900, 17970), False, 'from rich import print\n'), ((18035, 18106), 'rich.print', 'print', (['f"""[bold red]Unfortunately tissue {tissue} is not yet supported."""'], {}), "(f'[bold red]Unfortunately tissue {tissue} is not yet supported.')\n", (18040, 18106), False, 'from rich import print\n'), ((18181, 18298), 'rich.print', 'print', (['f"""[bold red]Unfortunately marker file {marker_file} could not be found. Please check your spelling."""'], {}), "(\n f'[bold red]Unfortunately marker file {marker_file} could not be found. Please check your spelling.'\n )\n", (18186, 18298), False, 'from rich import print\n'), ((2566, 2628), 'pandas.DataFrame', 'pd.DataFrame', (['relative_frequencies'], {'columns': 'ind', 'index': 'samples'}), '(relative_frequencies, columns=ind, index=samples)\n', (2578, 2628), True, 'import pandas as pd\n'), ((4023, 4087), 'pandas.DataFrame', 'pd.DataFrame', (['relative_frequencies'], {'columns': 'ind', 'index': 'celltypes'}), '(relative_frequencies, columns=ind, index=celltypes)\n', (4035, 4087), True, 'import pandas as pd\n'), ((6655, 6727), 'numpy.correlate', 'np.correlate', (['topmarker.loc[:, log_fc_label]', 'gene_expression.iloc[:, c]'], {}), '(topmarker.loc[:, log_fc_label], gene_expression.iloc[:, c])\n', (6667, 6727), True, 'import numpy as np\n'), ((7422, 7462), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': 'eps', 'min_samples': 'min_samples'}), '(eps=eps, min_samples=min_samples)\n', (7428, 7462), False, 'from sklearn.cluster import DBSCAN\n'), ((7576, 7594), 'numpy.unique', 'np.unique', (['cluster'], {}), '(cluster)\n', (7585, 7594), True, 'import numpy as np\n'), ((11539, 11564), 'numpy.setdiff1d', 'np.setdiff1d', (['subset', 'ref'], {}), '(subset, ref)\n', (11551, 11564), True, 'import numpy as np\n')] |
import typing
import sys
import numpy as np
def dfs(
l: int,
r: int,
a: np.array,
cache: np.array,
) -> int:
if l + r == a.size: return 0
v = cache[l, r]
if v != 1 << 50: return v
x = dfs(l + 1, r, a, cache)
y = dfs(l, r + 1, a, cache)
if (l + r) & 1 == 0:
v = max(
x + a[l],
y + a[-1 - r],
)
else:
v = min(
x - a[l],
y - a[-1 - r],
)
cache[l, r] = v
return v
def solve(
n: int,
a: np.array,
) -> typing.NoReturn:
inf = 1 << 50
cache = np.full(
(n + 1, n + 1),
inf,
np.int64,
)
print(dfs(0, 0, a, cache))
def main() -> typing.NoReturn:
n = int(input())
a = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
solve(n, a)
OJ = 'ONLINE_JUDGE'
if sys.argv[-1] == OJ:
from numba import njit, i8
from numba.pycc import CC
cc = CC('my_module')
dfs = njit(dfs)
fn = solve
signature = (i8, i8[:])
cc.export(
fn.__name__,
signature,
)(fn)
cc.compile()
exit(0)
from my_module import solve
main() | [
"numba.njit",
"numba.pycc.CC",
"sys.stdin.readline",
"my_module.solve",
"numpy.full"
] | [((518, 556), 'numpy.full', 'np.full', (['(n + 1, n + 1)', 'inf', 'np.int64'], {}), '((n + 1, n + 1), inf, np.int64)\n', (525, 556), True, 'import numpy as np\n'), ((737, 748), 'my_module.solve', 'solve', (['n', 'a'], {}), '(n, a)\n', (742, 748), False, 'from my_module import solve\n'), ((859, 874), 'numba.pycc.CC', 'CC', (['"""my_module"""'], {}), "('my_module')\n", (861, 874), False, 'from numba.pycc import CC\n'), ((883, 892), 'numba.njit', 'njit', (['dfs'], {}), '(dfs)\n', (887, 892), False, 'from numba import njit, i8\n'), ((676, 696), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (694, 696), False, 'import sys\n')] |
import hashlib
import re
import requests
import warnings
from io import StringIO
import urllib
from typing import Any, List, Tuple, Union
import pyopenms as oms
import numpy as np
import pronto
from Bio import SeqIO, SeqRecord
"""
Utility functions that do not contribute directly to QC calculations
"""
def sha256fromfile(abs_file_path: str) -> str:
"""
sha256fromfile will create a sha256 digest from the file at given path.
To preserve memory and speed up the digest,
the file is digested with the help of a memoryview and hashlib.sha256().update.
:raises
Parameters
----------
abs_file_path : str
The absolute path to the file to digest
Returns
-------
str
The cast or unchanged argument
Raises
------
FileNotFoundError
If abs_file_path is not a file
"""
sha = hashlib.sha256()
b = bytearray(128 * 1024)
mv = memoryview(b)
with open(abs_file_path, 'rb', buffering=0) as f:
for n in iter(lambda: f.readinto(mv), 0):
sha.update(mv[:n])
return sha.hexdigest()
def cast_if_int(pot_int: Any) -> Union[int, Any]:
"""
cast_if_int convenience function to cast to int
Due to the frequent use of numpy.dtypes and pyOpenMS return of binary encode strings,
this function will ease the level of verbosity.
Parameters
----------
pot_int : Any
The potential int value
Returns
-------
Union[int,Any]
In case the argument is cast-able into int, will return that int, unchanged argument otherwise.
"""
try:
return int(pot_int)
except ValueError as e:
return pot_int
def spec_native_id(spec: oms.MSSpectrum) -> Union[int, None]:
"""
spec_native_id convenience function to retrieve the native id number from a spectrum
Since the spectrums native id is a string formatted with much additional, albeit
usually redundant information, this method cuts through the clutter and extracts
the numerical id.
Parameters
----------
spec : oms.MSSpectrum
Spectrum to get the native id from
Returns
-------
Union[int,None]
Return is None if spectrum native id cannot be interpreted (e.g. not of scan=number format)
"""
spre = spec.getNativeID()
if spre:
matches = re.findall("scan=(\d+)$", spre)
if len(matches) != 1: # should really never be >1 with the `$`
return None
else:
return cast_if_int(matches[0])
else:
return None
def pep_native_id(p: oms.Peptide) -> Union[int, None]:
"""
pep_native_id convenience function to retrieve the native id number from an identification
Counterpart to spec_native_id.
Identifications loaded from mzid et al. should carry the native id to which spectra they
carry the identification information (as 'spectrum_reference'). Since the spectrums
native id is a string formatted with much additional, albeit usually redundant
information, this method cuts through the clutter and extracts the numerical id.
Parameters
----------
p : oms.Peptide
PeptideIdentification from which to get the native id of the involved spectrum
Returns
-------
Union[int,None]
Return is None if native id cannot be interpreted (e.g. not of scan=number format)
"""
spre = p.getMetaValue('spectrum_reference')
if spre:
matches = re.findall("scan=(\d+)$", spre)
if len(matches) != 1: # should really never be >1 with the `$`
return None
else:
return cast_if_int(matches[0])
else:
return None
def getMassDifference(theo_mz: float, exp_mz: float, use_ppm: bool = True) -> float:
"""
getMassDifference convenience function to easily switch the delta mass to either [ppm] or [Da] format.
Given two masses, the calculated result will be the delta mass, in [ppm] if requested.
The difference is **not** absolute.
Parameters
----------
theo_mz : float
First mass
exp_mz : float
Second mass
use_ppm : bool, optional
switch from simple [Da] difference to [ppm], by default True
Returns
-------
float
[description]
"""
error: float = (exp_mz - theo_mz)
if use_ppm:
error = error / (theo_mz * 1e-6)
return error
def getTrapTime(spec: oms.MSSpectrum, acqusition_unavailable=False) -> float:
"""
getTrapTime for a given MSn spectrum, return the ion trap collection time applied during acquisition.
The ion collection time, usually the same for spectra of one level from one
run is taken from the mzML file. The value is sourced from 'MS:1000927' but
returned as a negative value if no cvTerm was available (not mandatory in mzML).
This means in particular that MS1 spectra will all have negative values returned.
In case pyopenms < 2.5.0 , use the acqusition_unavailable flag
and execute TrapTimeTool before QCCalculator execution.
Parameters
----------
spec : oms.MSSpectrum
Spectrum to get the trap time from
acqusition_unavailable : bool, optional
In case access to AcquisitionInfo through pyopenms is unavailable, by default False
Returns
-------
float
Ion trap collection time in [ms] for given MSn
"""
tt = -1.0
if acqusition_unavailable:
if spec.metaValueExists('MS:1000927'):
tt = spec.getMetaValue('MS:1000927')
else:
if not spec.getAcquisitionInfo():
for j in spec.getAcquisitionInfo():
if j.metaValueExists("MS:1000927"):
tt = j.getMetaValue("MS:1000927")
break
return tt
def extractDistributionStats(value_array: np.array) -> Tuple:
"""
extractDistributionStats pulls descriptive distribution stats from an numpy array of values
Extracted are the quartiles, sigma, mean, and outlier values ><1.5*IQR (in no guaranteed order)
Parameters
----------
value_array : np.array
numpy array of ndim=1, all values of one type
Returns
-------
Tuple
In order the values Q1, Q2, Q3, sigma, mean, outliers
"""
q1, q2, q3 = np.quantile(value_array, [.25, .5, .75])
s = np.std(value_array)
m = np.mean(value_array)
low_out = q1 - (1.5 * (q3 - q1))
high_out = q3 + (1.5 * (q3 - q1))
ol = np.extract((value_array < low_out) | (value_array > high_out), value_array)
return q1, q2, q3, s, m, ol
def getUniProtSequences(accessions: List[str]) -> Union[List[SeqRecord.SeqRecord], None]:
"""
getUniProtSequences retrieves the
The function will formulate a query to uniprot and parse the result into a list of Bio.SeqRecord s.
No check on the completeness of the result is done here.
--------------------------------------------------------
However, no isoforms are reported. So the number of SeqRecord s should be equal to the number of
accessions queried initially.
Parameters
----------
accessions : List[str]
The list of uniprot accessions to query for their sequence
Returns
-------
Union[List[SeqRecord.SeqRecord],None]
The resulting list of SeqRecord accessions
# https://docs.python.org/3/library/xml.etree.elementtree.html#pull-api-for-non-blocking-parsing
"""
acc = '+OR+'.join(['id:' + a for a in accessions])
params = {"query": acc, "format": "fasta", "include": "no"} # no isoforms
response = requests.get("https://www.uniprot.org/uniprot/", params=params,
verify=False) # ugh, https certificate verification does not work OOTB with uniprot.org
if not response.ok:
response.raise_for_status()
warnings.warn("UniProt query for sequences unsuccessful.")
return None
seqs = [s for s in SeqIO.parse(StringIO(response.text), format='fasta')]
return seqs
def obtainOntology(url: str) -> pronto.Ontology:
"""
obtainOntology provides pronto ontology objects and handles aspects of provision
An ontology can be requested by URL or common names which draws the latest available
version of the respective ontology. Available are 'psi-ms', 'psi-qc', 'units', 'pride', 'MS', 'QC', 'UO'.
Parameters
----------
url : str
Either the url or a common name
Returns
-------
pronto.Ontology
pronto ontology
Raises
------
poof
general NameError with url information
"""
usual_suspects = {"psi-ms": "https://github.com/HUPO-PSI/psi-ms-CV/releases/download/4.1.31/psi-ms.obo",
"psi-qc": "https://raw.githubusercontent.com/HUPO-PSI/mzQC/master/cv/qc-cv.obo",
"units": "http://purl.obolibrary.org/obo/uo.owl",
"pride": "http://purl.obolibrary.org/obo/pride_cv.obo"}
usual_suspects.update({"MS": usual_suspects["psi-ms"],
"QC": usual_suspects["psi-qc"],
"UO": usual_suspects["units"]})
# TODO move hardcoded urls to file
if url in usual_suspects:
url = usual_suspects[url]
try:
with urllib.request.urlopen(url, timeout=10) as obo_in:
obo = pronto.Ontology(obo_in)
except Exception as e:
poof = NameError(
"Unable to obtain {}; please make sure the url exists, is available, and contains a parseable ontology.".format(
url))
raise poof from e
# TODO separate 404 from connection/transmission error
return obo
| [
"numpy.mean",
"hashlib.sha256",
"pronto.Ontology",
"io.StringIO",
"requests.get",
"numpy.quantile",
"numpy.std",
"warnings.warn",
"re.findall",
"numpy.extract",
"urllib.request.urlopen"
] | [((858, 874), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (872, 874), False, 'import hashlib\n'), ((6092, 6135), 'numpy.quantile', 'np.quantile', (['value_array', '[0.25, 0.5, 0.75]'], {}), '(value_array, [0.25, 0.5, 0.75])\n', (6103, 6135), True, 'import numpy as np\n'), ((6139, 6158), 'numpy.std', 'np.std', (['value_array'], {}), '(value_array)\n', (6145, 6158), True, 'import numpy as np\n'), ((6165, 6185), 'numpy.mean', 'np.mean', (['value_array'], {}), '(value_array)\n', (6172, 6185), True, 'import numpy as np\n'), ((6265, 6340), 'numpy.extract', 'np.extract', (['((value_array < low_out) | (value_array > high_out))', 'value_array'], {}), '((value_array < low_out) | (value_array > high_out), value_array)\n', (6275, 6340), True, 'import numpy as np\n'), ((7366, 7443), 'requests.get', 'requests.get', (['"""https://www.uniprot.org/uniprot/"""'], {'params': 'params', 'verify': '(False)'}), "('https://www.uniprot.org/uniprot/', params=params, verify=False)\n", (7378, 7443), False, 'import requests\n'), ((2297, 2329), 're.findall', 're.findall', (['"""scan=(\\\\d+)$"""', 'spre'], {}), "('scan=(\\\\d+)$', spre)\n", (2307, 2329), False, 'import re\n'), ((3382, 3414), 're.findall', 're.findall', (['"""scan=(\\\\d+)$"""', 'spre'], {}), "('scan=(\\\\d+)$', spre)\n", (3392, 3414), False, 'import re\n'), ((7603, 7661), 'warnings.warn', 'warnings.warn', (['"""UniProt query for sequences unsuccessful."""'], {}), "('UniProt query for sequences unsuccessful.')\n", (7616, 7661), False, 'import warnings\n'), ((8988, 9027), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {'timeout': '(10)'}), '(url, timeout=10)\n', (9010, 9027), False, 'import urllib\n'), ((9051, 9074), 'pronto.Ontology', 'pronto.Ontology', (['obo_in'], {}), '(obo_in)\n', (9066, 9074), False, 'import pronto\n'), ((7711, 7734), 'io.StringIO', 'StringIO', (['response.text'], {}), '(response.text)\n', (7719, 7734), False, 'from io import StringIO\n')] |
import os
from time import perf_counter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
from sparse import sparse
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
import torchvision.transforms.functional as TF
from util import *
class MoleculeDataset(Dataset):
"""
PyTorch Dataset class to load molecular images and InChIs
"""
def __init__(self, mode, shard_id, source_dir, img_size, prerotated=False,
unrotated=False, rotate=True, p=0.5):
self.mode = mode
self.shard_id = shard_id
if self.mode == 'eval':
self.shard_size = 200000
elif self.mode == 'train':
self.shard_size = 175000
elif self.mode == 'val' or self.mode == 'test':
self.shard_size = 25000
self.img_size = img_size
self.prerotated = prerotated
self.unrotated = unrotated
if self.prerotated:
self.rotate = False
else:
self.rotate = rotate
self.p = p
if self.prerotated and mode != 'eval':
self.sparse_path = os.path.join(source_dir, '{}_shards/prerotated'.format(self.mode), 'shard{}.npz'.format(shard_id))
elif self.unrotated and mode == 'eval':
self.sparse_path = os.path.join(source_dir, '{}_shards/unrotated'.format(self.mode), 'shard{}.npz'.format(shard_id))
else:
self.sparse_path = os.path.join(source_dir, '{}_shards'.format(self.mode), 'shard{}.npz'.format(shard_id))
self.sparse_imgs = sparse.load_npz(self.sparse_path)
if mode != 'eval':
self.inchi_path = os.path.join(source_dir, '{}_shards'.format(self.mode), 'encoded_inchis.npy')
self.encoded_inchis = np.load(self.inchi_path)
else:
self.img_id_path = os.path.join(source_dir, '{}_shards'.format(self.mode), 'img_id_shard{}.csv'.format(shard_id))
self.img_ids = pd.read_csv(self.img_id_path)
def __getitem__(self, i):
### grab image
# start = perf_counter()
sparse_img = self.sparse_imgs[i,:,:,:]
# stop = perf_counter()
# grab_sparse_img = stop - start
# start = perf_counter()
img = sparse_img.todense().astype(np.float32)
# stop = perf_counter()
# cast_to_dense = stop - start
img = torch.tensor(img)
if self.img_size != 256:
img = img.unsqueeze(0)
img = F.interpolate(img, size=(self.img_size, self.img_size))
img = img.squeeze(0)
# start = perf_counter()
if self.rotate:
angles = [0, 90, 180, 270]
angle = np.random.choice(angles, size=1, p=[1 - self.p, self.p / 3, self.p / 3, self.p / 3])
if angle == 0:
pass
elif angle == 90:
img = torch.rot90(img, 1, [1,2])
elif angle == 180:
img = torch.rot90(img, 1, [1,2])
img = torch.rot90(img, 1, [1,2])
elif angle == 270:
img = torch.rot90(img, -1, [1,2])
# stop = perf_counter()
# rotate_img = stop - start
### grab inchi
if self.mode != 'eval':
# start = perf_counter()
inchi_idx = i + (self.shard_size*self.shard_id)
inchi_data = torch.tensor(self.encoded_inchis[inchi_idx]).long()
encoded_inchi = inchi_data[:-1]
inchi_length = inchi_data[-1]
# stop = perf_counter()
# grab_inchi = stop - start
# log_file = open('logs/log_dataloader_times.txt', 'a')
# log_file.write('{},{},{},{}\n'.format(grab_sparse_img, cast_to_dense, rotate_img, grab_inchi))
# log_file.close()
return img, encoded_inchi, inchi_length
else:
img_idx = i
return img, torch.tensor(img_idx).long()
def __len__(self):
return self.sparse_imgs.shape[0]
| [
"pandas.read_csv",
"numpy.random.choice",
"torch.rot90",
"torch.tensor",
"torch.nn.functional.interpolate",
"sparse.sparse.load_npz",
"numpy.load"
] | [((1591, 1624), 'sparse.sparse.load_npz', 'sparse.load_npz', (['self.sparse_path'], {}), '(self.sparse_path)\n', (1606, 1624), False, 'from sparse import sparse\n'), ((2396, 2413), 'torch.tensor', 'torch.tensor', (['img'], {}), '(img)\n', (2408, 2413), False, 'import torch\n'), ((1795, 1819), 'numpy.load', 'np.load', (['self.inchi_path'], {}), '(self.inchi_path)\n', (1802, 1819), True, 'import numpy as np\n'), ((1987, 2016), 'pandas.read_csv', 'pd.read_csv', (['self.img_id_path'], {}), '(self.img_id_path)\n', (1998, 2016), True, 'import pandas as pd\n'), ((2500, 2555), 'torch.nn.functional.interpolate', 'F.interpolate', (['img'], {'size': '(self.img_size, self.img_size)'}), '(img, size=(self.img_size, self.img_size))\n', (2513, 2555), True, 'import torch.nn.functional as F\n'), ((2705, 2794), 'numpy.random.choice', 'np.random.choice', (['angles'], {'size': '(1)', 'p': '[1 - self.p, self.p / 3, self.p / 3, self.p / 3]'}), '(angles, size=1, p=[1 - self.p, self.p / 3, self.p / 3, \n self.p / 3])\n', (2721, 2794), True, 'import numpy as np\n'), ((2890, 2917), 'torch.rot90', 'torch.rot90', (['img', '(1)', '[1, 2]'], {}), '(img, 1, [1, 2])\n', (2901, 2917), False, 'import torch\n'), ((3373, 3417), 'torch.tensor', 'torch.tensor', (['self.encoded_inchis[inchi_idx]'], {}), '(self.encoded_inchis[inchi_idx])\n', (3385, 3417), False, 'import torch\n'), ((2970, 2997), 'torch.rot90', 'torch.rot90', (['img', '(1)', '[1, 2]'], {}), '(img, 1, [1, 2])\n', (2981, 2997), False, 'import torch\n'), ((3019, 3046), 'torch.rot90', 'torch.rot90', (['img', '(1)', '[1, 2]'], {}), '(img, 1, [1, 2])\n', (3030, 3046), False, 'import torch\n'), ((3909, 3930), 'torch.tensor', 'torch.tensor', (['img_idx'], {}), '(img_idx)\n', (3921, 3930), False, 'import torch\n'), ((3099, 3127), 'torch.rot90', 'torch.rot90', (['img', '(-1)', '[1, 2]'], {}), '(img, -1, [1, 2])\n', (3110, 3127), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
import cv2
import mahotas
def plot_histogram(image, title, mask = None):
chans = cv2.split(image)
colors = ("b", "g", "r")
plt.figure()
plt.title(title)
plt.xlabel("Bins")
plt.ylabel("# of Pixels")
for (chan, color) in zip(chans, colors):
hist = cv2.calcHist([chan], [0], mask, [256], [0, 256])
plt.plot(hist, color = color)
plt.xlim([0, 256])
plt.show()
def plot_histogram_green(image, title, mask = None):
chans = cv2.split(image)
colors = ("b", "g", "r")
plt.figure()
plt.title(title)
plt.xlabel("Bins")
plt.ylabel("# of Pixels")
for (chan, color) in zip(chans, colors):
# for full range [256], everything was at white pixel value (which is a 256) (because most of the image is white for "masked_replace_white") .
# Reduced the range to [255], so that we can see the histogram of the green pixels.
hist = cv2.calcHist([chan], [0], mask, [255], [0, 255])
plt.plot(hist, color = color)
plt.xlim([0, 255])
plt.show()
def otsu_thresh(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
#cv2.imshow("Image gray", image)
T = mahotas.thresholding.otsu(blurred)
print('Otsu’s threshold: %d' % T)
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < T] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow("Otsu", thresh)
def rid_cav_thresh(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
T = mahotas.thresholding.rc(blurred)
print ('Riddler-Calvard: %d' % T)
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow("Riddler-Calvard", thresh)
def combined (img):
#b, g, r = cv2.split(img)
b = img[:, :, 0]
g = img[:, :, 1]
r = img[:, :, 2]
#r_max = g_max = b_max = 255
r_max = np.amax(r)
g_max = np.amax(g)
b_max = np.amax(b)
#print (r)
#cv2.imshow("<NAME>",b)
red_norm = r/r_max
green_norm = g/g_max
blue_norm = b/b_max
# print(red_norm.shape)
norm = red_norm + blue_norm + green_norm
small_num = 0.0001
r = red_norm/(norm+small_num)
g = green_norm/(norm+small_num)
b = blue_norm/(norm+small_num)
#print('normalized r g b values: %d %d %d' %(r, g, b))
ExG = 2*g - r - b #excess green
ExGR = ExG -1.4*r - g #excess green minus red
CIVE = -(0.441*r - 0.811*g + 0.385*b + 18.78745) #color index of vegetation extraction
#redistribute the weights without VEG
w_ExG = 0.28
w_ExGR = 0.34
w_CIVE = 0.38
combined = w_ExG * ExG + w_ExGR * ExGR + w_CIVE * CIVE
return combined
def linear_map (image): ##combined image 'combined', is linearly mapped to range in [0, 255], after which, it is thresholded by applying the Otsu’s
max_value = np.max(combined(image))
min_value = np.min(combined(image))
#print(min_value, max_value)
#mapped combined image value (which is mostly negative) to 0-255
new_min = 0
new_max = 255
old_range = max_value - min_value
new_range = new_max - new_min
lin_map = (((combined(image).astype(np.float64) - min_value) * new_range) / old_range) + new_min
image_map = lin_map.astype(np.uint8)
#cv2.imshow("Green Extracted Image", image_map)
return image_map
#clahe only applicable to gray image. applying clahe to LAB format
# then change to bgr for other functions (histogram, combined: which takes bgr input) to work
#then change back to bgr again
cliplimit = 0
def clahe_bgr(image, cliplimit, gridsize = 8):
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
lab_planes = cv2.split(lab)
clahe = cv2.createCLAHE(cliplimit, tileGridSize=(gridsize, gridsize))
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return bgr
#img = cv2.imread('/home/sayem/Desktop/cs557_project/Final_project/low_quality_image/coc01.jpg')
#img = cv2.imread('/home/sayem/Desktop/cs557_project/Final_project/brightness_control/val_rag_01_middark.jpg')
img = cv2.imread('/home/sayem/Desktop/cs557_project/Final_project/rag053.jpg')
#cv2.imshow("Image", img)
rows, cols, _ = img.shape
print(img.shape)
#showing otsu threshold of green extracted image
T = mahotas.thresholding.otsu(linear_map(img)) #input original image
#T = mahotas.thresholding.otsu(linear_map(clahe_im)) #input CLAHE image
print ('Otsu’s threshold combined: %d' % T)
thresh_com = linear_map(img).copy()
thresh_com[thresh_com > T] = 0
thresh_com[thresh_com > 0] = 255
thresh_com_mask= cv2.bitwise_not(thresh_com)
#cv2.imshow("Otsu Green Extracted", thresh_com_mask) #showing otsu threshold of green extracted image
# convert single channel mask back into 3 channels
mask_rgb = cv2.cvtColor(thresh_com_mask, cv2.COLOR_GRAY2RGB)
# perform bitwise and on mask to obtain cut-out image that is not green
masked_img = cv2.bitwise_and(img, mask_rgb)
# replace the cut-out parts (black) with white
masked_replace_white = cv2.addWeighted(masked_img, 1, cv2.cvtColor(thresh_com, cv2.COLOR_GRAY2RGB), 1, 0)
plt.imshow(cv2.cvtColor(masked_replace_white, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
#plot_histogram(img, "Histogram for Original Image")
plot_histogram_green(masked_replace_white, "Histogram of extracted green image")
'''where m is the mean value of a, i.e. the average gray level
m = summation (a . pa)
The mean determines the average level of brightness, where low, high and
medium values indicate the degree of light which has impacted
the device.
'''
""" Let a be a random variable denoting gray levels, the nth moment of a about the mean is
defined as (<NAME>, 2008):
meu_n(a) = summation ((a-m)^n * p(a))
Variance is a measure of gray-level contrast, where high values indicate dispersion of values around
the mean and low values are indicative of a high concentration
of values around the mean.
"""
"""The skewness measures the asymmetry in the distribution. A right skewness is presented when the histo-
gram displays a large tail oriented towards high brightness values
and high concentration in the part of low brightness values (posi-
tive skewness). In the opposite case the skewness is negative.
skew = mu_3/(mu_2)^1.5
"""
"""kurtosis provides information about the peakedness in the distri-
bution; low kurtosis indicates flat top parts in the histogram
around the mean but high values are indicative of peaks around
the mean with high slopes and large tails. Skewness and kurtosis
are both zero for Gaussian distributions.
kurtosis = mu4/mu2^2
"""
"""An image with sufficient contrast should be identified by mean val-
ues in the central part of histogram, high variance, low skewness
(positive or negative) and high kurtosis. On the contrary, an image
with insufficient contrast is identified by mean values either low or
high, high skewness (positive or negative) and low kurtosis.
"""
# input of the stat parameters are "masked_replace_white". The hypothesis is, as we are calculating stat parameters
#only on green pixels, how light affects the weeds can be determined from the stat parameters. stat parameters can be input of
# adaptive CLAHE. This way contrast of the image is only changed according to the effect of the light on weeds. Other Objects in the image
# will not be increased randomly (noise). Even if noise increases, it will affect the weed classification.
def stat_blue (img):
# changed range to [255] to exclude the white pixels
hist_blue = cv2.calcHist([img], [0], None, [255], [0, 255])
rows, cols, _ = img.shape
probability_pa = hist_blue / (rows * cols)
a = np.arange(256)
#mean
m = np.sum(hist_blue * probability_pa)
#m = np.sum(hist_blue/256)
print("Blue channel mean", m)
# 1st moment
mu_1 = np.sum((hist_blue - m) * probability_pa)
#print("1st moment mu1", mu_1)
# 2nd moment
mu_2 = np.sum(np.power((hist_blue - m), 2) * probability_pa)
print("Blue channel 2nd moment mu2 (Variance)", mu_2)
# 3rd moment
mu_3 = np.sum(np.power((hist_blue - m), 3) * probability_pa)
# 4th moment
mu_4 = np.sum(np.power((hist_blue - m), 4) * probability_pa)
skewness = mu_3 / (np.power(mu_2, 1.5))
#print("skewness", skewness)
print("Blue channel abs. skewness", np.absolute(skewness))
kurtosis = mu_4 / (np.power(mu_2, 2))
print("Blue channel Kurtosis", kurtosis)
def stat_green(img):
# changed range to [255] to exclude the white pixels
hist_green = cv2.calcHist([img], [1], None, [255], [0, 255])
rows, cols, _ = img.shape
probability_pa = hist_green / (rows * cols)
a = np.arange(256)
# mean
m = np.sum(hist_green * probability_pa)
#m = np.sum(hist_green / 256)
print("Green channel mean", m)
# 1st moment
mu_1 = np.sum((hist_green - m) * probability_pa)
# print("1st moment mu1", mu_1)
# 2nd moment
mu_2 = np.sum(np.power((hist_green - m), 2) * probability_pa)
print("Green channel 2nd moment mu2 (Variance)", mu_2)
# 3rd moment
mu_3 = np.sum(np.power((hist_green - m), 3) * probability_pa)
# 4th moment
mu_4 = np.sum(np.power((hist_green - m), 4) * probability_pa)
skewness = mu_3 / (np.power(mu_2, 1.5))
# print("skewness", skewness)
print("Green channel abs. skewness", np.absolute(skewness))
kurtosis = mu_4 / (np.power(mu_2, 2))
print("Green channel Kurtosis", kurtosis)
def stat_red(img):
# changed range to [255] to exclude the white pixels
hist_red = cv2.calcHist([img], [2], None, [255], [0, 255])
rows, cols, _ = img.shape
probability_pa = hist_red / (rows * cols)
a = np.arange(256)
# mean
m = np.sum(hist_red * probability_pa)
#m = np.sum(hist_red / 256)
print("Red channel mean", m)
# 1st moment
mu_1 = np.sum((hist_red - m) * probability_pa)
# print("1st moment mu1", mu_1)
# 2nd moment
mu_2 = np.sum(np.power((hist_red - m), 2) * probability_pa)
print("Red channel 2nd moment mu2 (Variance)", mu_2)
# 3rd moment
mu_3 = np.sum(np.power((hist_red - m), 3) * probability_pa)
# 4th moment
mu_4 = np.sum(np.power((hist_red - m), 4) * probability_pa)
skewness = mu_3 / (np.power(mu_2, 1.5))
# print("skewness", skewness)
print("Red channel abs. skewness", np.absolute(skewness))
kurtosis = mu_4 / (np.power(mu_2, 2))
print("Red channel Kurtosis", kurtosis)
clahe_img = clahe_bgr(img, cliplimit, gridsize=8)
#cv2.imshow("Clahe applied image",clahe_img)
numpy_horizontal = np.hstack((img, clahe_img))
numpy_horizontal_concat = np.concatenate((img, clahe_img), axis=1)
image_resized = cv2.resize(numpy_horizontal_concat, (0, 0), None, .25, .25)
cv2.imshow('Original (LHS) & CLAHE (RHS)', image_resized)
plot_histogram(clahe_img, "Clahe histogram")
#chech how application of CLAHE changed image stat
print("Green masked image stat:")
stat_green(masked_replace_white) #showing results of the green channel for green segmented image without the white pixel results
print("CLAHE modified image stat:")
stat_green(clahe_img) #stat of green channel for clahe applied image
"""
For the following input image:
img = cv2.imread('/home/sayem/Desktop/cs557_project/Final_project/brightness_control/val_rag_01_midbright.jpg')
cliplimit = 18 at CLAHE made the histogram more gaussian (by looking). Calculate the stat parameters for "clahe_img"
to see skewness and kurtosis. Skewness and kurtosis are both zero for Gaussian distributions. Goal is to go towards
gaussian distribution.
"""
cv2.waitKey(0) | [
"numpy.hstack",
"matplotlib.pyplot.ylabel",
"cv2.imshow",
"mahotas.thresholding.rc",
"numpy.arange",
"cv2.calcHist",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"matplotlib.pyplot.axis",
"cv2.waitKey",
"cv2.merge",
"cv2.cvtColor",
"cv2.split",
"matplotlib.p... | [((4250, 4322), 'cv2.imread', 'cv2.imread', (['"""/home/sayem/Desktop/cs557_project/Final_project/rag053.jpg"""'], {}), "('/home/sayem/Desktop/cs557_project/Final_project/rag053.jpg')\n", (4260, 4322), False, 'import cv2\n'), ((4745, 4772), 'cv2.bitwise_not', 'cv2.bitwise_not', (['thresh_com'], {}), '(thresh_com)\n', (4760, 4772), False, 'import cv2\n'), ((4939, 4988), 'cv2.cvtColor', 'cv2.cvtColor', (['thresh_com_mask', 'cv2.COLOR_GRAY2RGB'], {}), '(thresh_com_mask, cv2.COLOR_GRAY2RGB)\n', (4951, 4988), False, 'import cv2\n'), ((5075, 5105), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'mask_rgb'], {}), '(img, mask_rgb)\n', (5090, 5105), False, 'import cv2\n'), ((5327, 5342), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5335, 5342), True, 'import matplotlib.pyplot as plt\n'), ((5343, 5353), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5351, 5353), True, 'import matplotlib.pyplot as plt\n'), ((10705, 10732), 'numpy.hstack', 'np.hstack', (['(img, clahe_img)'], {}), '((img, clahe_img))\n', (10714, 10732), True, 'import numpy as np\n'), ((10759, 10799), 'numpy.concatenate', 'np.concatenate', (['(img, clahe_img)'], {'axis': '(1)'}), '((img, clahe_img), axis=1)\n', (10773, 10799), True, 'import numpy as np\n'), ((10816, 10877), 'cv2.resize', 'cv2.resize', (['numpy_horizontal_concat', '(0, 0)', 'None', '(0.25)', '(0.25)'], {}), '(numpy_horizontal_concat, (0, 0), None, 0.25, 0.25)\n', (10826, 10877), False, 'import cv2\n'), ((10876, 10933), 'cv2.imshow', 'cv2.imshow', (['"""Original (LHS) & CLAHE (RHS)"""', 'image_resized'], {}), "('Original (LHS) & CLAHE (RHS)', image_resized)\n", (10886, 10933), False, 'import cv2\n'), ((11714, 11728), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (11725, 11728), False, 'import cv2\n'), ((137, 153), 'cv2.split', 'cv2.split', (['image'], {}), '(image)\n', (146, 153), False, 'import cv2\n'), ((187, 199), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (197, 199), True, 'import matplotlib.pyplot as plt\n'), ((204, 220), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (213, 220), True, 'import matplotlib.pyplot as plt\n'), ((225, 243), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bins"""'], {}), "('Bins')\n", (235, 243), True, 'import matplotlib.pyplot as plt\n'), ((248, 273), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# of Pixels"""'], {}), "('# of Pixels')\n", (258, 273), True, 'import matplotlib.pyplot as plt\n'), ((454, 464), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (462, 464), True, 'import matplotlib.pyplot as plt\n'), ((532, 548), 'cv2.split', 'cv2.split', (['image'], {}), '(image)\n', (541, 548), False, 'import cv2\n'), ((582, 594), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (592, 594), True, 'import matplotlib.pyplot as plt\n'), ((599, 615), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (608, 615), True, 'import matplotlib.pyplot as plt\n'), ((620, 638), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bins"""'], {}), "('Bins')\n", (630, 638), True, 'import matplotlib.pyplot as plt\n'), ((643, 668), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# of Pixels"""'], {}), "('# of Pixels')\n", (653, 668), True, 'import matplotlib.pyplot as plt\n'), ((1092, 1102), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1100, 1102), True, 'import matplotlib.pyplot as plt\n'), ((1143, 1182), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1155, 1182), False, 'import cv2\n'), ((1197, 1231), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(5, 5)', '(0)'], {}), '(image, (5, 5), 0)\n', (1213, 1231), False, 'import cv2\n'), ((1279, 1313), 'mahotas.thresholding.otsu', 'mahotas.thresholding.otsu', (['blurred'], {}), '(blurred)\n', (1304, 1313), False, 'import mahotas\n'), ((1448, 1471), 'cv2.bitwise_not', 'cv2.bitwise_not', (['thresh'], {}), '(thresh)\n', (1463, 1471), False, 'import cv2\n'), ((1476, 1502), 'cv2.imshow', 'cv2.imshow', (['"""Otsu"""', 'thresh'], {}), "('Otsu', thresh)\n", (1486, 1502), False, 'import cv2\n'), ((1543, 1582), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1555, 1582), False, 'import cv2\n'), ((1597, 1631), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(5, 5)', '(0)'], {}), '(image, (5, 5), 0)\n', (1613, 1631), False, 'import cv2\n'), ((1641, 1673), 'mahotas.thresholding.rc', 'mahotas.thresholding.rc', (['blurred'], {}), '(blurred)\n', (1664, 1673), False, 'import mahotas\n'), ((1809, 1832), 'cv2.bitwise_not', 'cv2.bitwise_not', (['thresh'], {}), '(thresh)\n', (1824, 1832), False, 'import cv2\n'), ((1837, 1874), 'cv2.imshow', 'cv2.imshow', (['"""Riddler-Calvard"""', 'thresh'], {}), "('Riddler-Calvard', thresh)\n", (1847, 1874), False, 'import cv2\n'), ((2034, 2044), 'numpy.amax', 'np.amax', (['r'], {}), '(r)\n', (2041, 2044), True, 'import numpy as np\n'), ((2057, 2067), 'numpy.amax', 'np.amax', (['g'], {}), '(g)\n', (2064, 2067), True, 'import numpy as np\n'), ((2080, 2090), 'numpy.amax', 'np.amax', (['b'], {}), '(b)\n', (2087, 2090), True, 'import numpy as np\n'), ((3749, 3787), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2LAB'], {}), '(image, cv2.COLOR_BGR2LAB)\n', (3761, 3787), False, 'import cv2\n'), ((3805, 3819), 'cv2.split', 'cv2.split', (['lab'], {}), '(lab)\n', (3814, 3819), False, 'import cv2\n'), ((3832, 3893), 'cv2.createCLAHE', 'cv2.createCLAHE', (['cliplimit'], {'tileGridSize': '(gridsize, gridsize)'}), '(cliplimit, tileGridSize=(gridsize, gridsize))\n', (3847, 3893), False, 'import cv2\n'), ((3951, 3972), 'cv2.merge', 'cv2.merge', (['lab_planes'], {}), '(lab_planes)\n', (3960, 3972), False, 'import cv2\n'), ((3983, 4019), 'cv2.cvtColor', 'cv2.cvtColor', (['lab', 'cv2.COLOR_LAB2BGR'], {}), '(lab, cv2.COLOR_LAB2BGR)\n', (3995, 4019), False, 'import cv2\n'), ((5208, 5252), 'cv2.cvtColor', 'cv2.cvtColor', (['thresh_com', 'cv2.COLOR_GRAY2RGB'], {}), '(thresh_com, cv2.COLOR_GRAY2RGB)\n', (5220, 5252), False, 'import cv2\n'), ((5272, 5325), 'cv2.cvtColor', 'cv2.cvtColor', (['masked_replace_white', 'cv2.COLOR_BGR2RGB'], {}), '(masked_replace_white, cv2.COLOR_BGR2RGB)\n', (5284, 5325), False, 'import cv2\n'), ((7672, 7719), 'cv2.calcHist', 'cv2.calcHist', (['[img]', '[0]', 'None', '[255]', '[0, 255]'], {}), '([img], [0], None, [255], [0, 255])\n', (7684, 7719), False, 'import cv2\n'), ((7805, 7819), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (7814, 7819), True, 'import numpy as np\n'), ((7839, 7873), 'numpy.sum', 'np.sum', (['(hist_blue * probability_pa)'], {}), '(hist_blue * probability_pa)\n', (7845, 7873), True, 'import numpy as np\n'), ((7968, 8008), 'numpy.sum', 'np.sum', (['((hist_blue - m) * probability_pa)'], {}), '((hist_blue - m) * probability_pa)\n', (7974, 8008), True, 'import numpy as np\n'), ((8674, 8721), 'cv2.calcHist', 'cv2.calcHist', (['[img]', '[1]', 'None', '[255]', '[0, 255]'], {}), '([img], [1], None, [255], [0, 255])\n', (8686, 8721), False, 'import cv2\n'), ((8808, 8822), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (8817, 8822), True, 'import numpy as np\n'), ((8843, 8878), 'numpy.sum', 'np.sum', (['(hist_green * probability_pa)'], {}), '(hist_green * probability_pa)\n', (8849, 8878), True, 'import numpy as np\n'), ((8977, 9018), 'numpy.sum', 'np.sum', (['((hist_green - m) * probability_pa)'], {}), '((hist_green - m) * probability_pa)\n', (8983, 9018), True, 'import numpy as np\n'), ((9689, 9736), 'cv2.calcHist', 'cv2.calcHist', (['[img]', '[2]', 'None', '[255]', '[0, 255]'], {}), '([img], [2], None, [255], [0, 255])\n', (9701, 9736), False, 'import cv2\n'), ((9821, 9835), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (9830, 9835), True, 'import numpy as np\n'), ((9856, 9889), 'numpy.sum', 'np.sum', (['(hist_red * probability_pa)'], {}), '(hist_red * probability_pa)\n', (9862, 9889), True, 'import numpy as np\n'), ((9984, 10023), 'numpy.sum', 'np.sum', (['((hist_red - m) * probability_pa)'], {}), '((hist_red - m) * probability_pa)\n', (9990, 10023), True, 'import numpy as np\n'), ((335, 383), 'cv2.calcHist', 'cv2.calcHist', (['[chan]', '[0]', 'mask', '[256]', '[0, 256]'], {}), '([chan], [0], mask, [256], [0, 256])\n', (347, 383), False, 'import cv2\n'), ((392, 419), 'matplotlib.pyplot.plot', 'plt.plot', (['hist'], {'color': 'color'}), '(hist, color=color)\n', (400, 419), True, 'import matplotlib.pyplot as plt\n'), ((430, 448), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 256]'], {}), '([0, 256])\n', (438, 448), True, 'import matplotlib.pyplot as plt\n'), ((973, 1021), 'cv2.calcHist', 'cv2.calcHist', (['[chan]', '[0]', 'mask', '[255]', '[0, 255]'], {}), '([chan], [0], mask, [255], [0, 255])\n', (985, 1021), False, 'import cv2\n'), ((1030, 1057), 'matplotlib.pyplot.plot', 'plt.plot', (['hist'], {'color': 'color'}), '(hist, color=color)\n', (1038, 1057), True, 'import matplotlib.pyplot as plt\n'), ((1068, 1086), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 255]'], {}), '([0, 255])\n', (1076, 1086), True, 'import matplotlib.pyplot as plt\n'), ((8372, 8391), 'numpy.power', 'np.power', (['mu_2', '(1.5)'], {}), '(mu_2, 1.5)\n', (8380, 8391), True, 'import numpy as np\n'), ((8466, 8487), 'numpy.absolute', 'np.absolute', (['skewness'], {}), '(skewness)\n', (8477, 8487), True, 'import numpy as np\n'), ((8513, 8530), 'numpy.power', 'np.power', (['mu_2', '(2)'], {}), '(mu_2, 2)\n', (8521, 8530), True, 'import numpy as np\n'), ((9387, 9406), 'numpy.power', 'np.power', (['mu_2', '(1.5)'], {}), '(mu_2, 1.5)\n', (9395, 9406), True, 'import numpy as np\n'), ((9483, 9504), 'numpy.absolute', 'np.absolute', (['skewness'], {}), '(skewness)\n', (9494, 9504), True, 'import numpy as np\n'), ((9530, 9547), 'numpy.power', 'np.power', (['mu_2', '(2)'], {}), '(mu_2, 2)\n', (9538, 9547), True, 'import numpy as np\n'), ((10384, 10403), 'numpy.power', 'np.power', (['mu_2', '(1.5)'], {}), '(mu_2, 1.5)\n', (10392, 10403), True, 'import numpy as np\n'), ((10478, 10499), 'numpy.absolute', 'np.absolute', (['skewness'], {}), '(skewness)\n', (10489, 10499), True, 'import numpy as np\n'), ((10525, 10542), 'numpy.power', 'np.power', (['mu_2', '(2)'], {}), '(mu_2, 2)\n', (10533, 10542), True, 'import numpy as np\n'), ((8079, 8105), 'numpy.power', 'np.power', (['(hist_blue - m)', '(2)'], {}), '(hist_blue - m, 2)\n', (8087, 8105), True, 'import numpy as np\n'), ((8219, 8245), 'numpy.power', 'np.power', (['(hist_blue - m)', '(3)'], {}), '(hist_blue - m, 3)\n', (8227, 8245), True, 'import numpy as np\n'), ((8301, 8327), 'numpy.power', 'np.power', (['(hist_blue - m)', '(4)'], {}), '(hist_blue - m, 4)\n', (8309, 8327), True, 'import numpy as np\n'), ((9090, 9117), 'numpy.power', 'np.power', (['(hist_green - m)', '(2)'], {}), '(hist_green - m, 2)\n', (9098, 9117), True, 'import numpy as np\n'), ((9232, 9259), 'numpy.power', 'np.power', (['(hist_green - m)', '(3)'], {}), '(hist_green - m, 3)\n', (9240, 9259), True, 'import numpy as np\n'), ((9315, 9342), 'numpy.power', 'np.power', (['(hist_green - m)', '(4)'], {}), '(hist_green - m, 4)\n', (9323, 9342), True, 'import numpy as np\n'), ((10095, 10120), 'numpy.power', 'np.power', (['(hist_red - m)', '(2)'], {}), '(hist_red - m, 2)\n', (10103, 10120), True, 'import numpy as np\n'), ((10233, 10258), 'numpy.power', 'np.power', (['(hist_red - m)', '(3)'], {}), '(hist_red - m, 3)\n', (10241, 10258), True, 'import numpy as np\n'), ((10314, 10339), 'numpy.power', 'np.power', (['(hist_red - m)', '(4)'], {}), '(hist_red - m, 4)\n', (10322, 10339), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Find cuts of a page and annotate them based on the table separators
Copyright Naver Labs Europe 2018
<NAME>
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
from optparse import OptionParser
import operator
from collections import defaultdict
from lxml import etree
import numpy as np
import shapely.geometry as geom
import shapely.affinity
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
from common.trace import traceln
from xml_formats.PageXml import MultiPageXml, PageXml
from util.Polygon import Polygon
from util.Shape import ShapeLoader, PolygonPartition
from tasks.DU_Table.DU_ABPTableSkewed_CutAnnotator import _isBaselineNotO, _isBaselineInTable,\
computePRF
from tasks.DU_Table.DU_ABPTableRCAnnotation import computeMaxRowSpan
from util.partitionEvaluation import evalPartitions
from util.jaccard import jaccard_distance
class CutAnnotator:
"""
Cutting the page horizontally
"""
fRATIO = 0.66
def __init__(self):
pass
def get_separator_YX_from_DOM(self, root, fMinPageCoverage):
"""
get the x and y of the GT table separators
return lists of y, for horizontal and of x for vertical separators, per page
return [(y_list, x_list), ...]
"""
ltlYlX = []
for ndPage in MultiPageXml.getChildByName(root, 'Page'):
w, h = int(ndPage.get("imageWidth")), int(ndPage.get("imageHeight"))
lYi, lXi = [], []
l = MultiPageXml.getChildByName(ndPage,'TableRegion')
if len(l) != 1:
if l:
traceln("** warning ** %d TableRegion instead of expected 1" % len(l))
else:
traceln("** warning ** no TableRegion, expected 1")
if l:
for ndTR in l:
#enumerate the table separators
for ndSep in MultiPageXml.getChildByName(ndTR,'SeparatorRegion'):
sPoints=MultiPageXml.getChildByName(ndSep,'Coords')[0].get('points')
[(x1,y1),(x2,y2)] = Polygon.parsePoints(sPoints).lXY
dx, dy = abs(x2-x1), abs(y2-y1)
if dx > dy:
#horizontal table line
if dx > (fMinPageCoverage*w):
#ym = (y1+y2)/2.0 # 2.0 to support python2
lYi.append((y1,y2))
else:
if dy > (fMinPageCoverage*h):
#xm = (x1+x2)/2.0
lXi.append((x1,x2))
ltlYlX.append( (lYi, lXi) )
return ltlYlX
def getHisto(self, lNd, w, _fMinHorizProjection, h, _fMinVertiProjection
, fRatio=1.0
, fMinHLen=None):
"""
return two Numpy array reflecting the histogram of projections of objects
first array along Y axis (horizontal projection), 2nd along X axis
(vertical projection)
when fMinHLen is given , we do not scale horizontally text shorter than fMinHLen
"""
hy = np.zeros((h,), np.float)
hx = np.zeros((w,), np.float)
for nd in lNd:
sPoints=MultiPageXml.getChildByName(nd,'Coords')[0].get('points')
try:
x1,y1,x2,y2 = Polygon.parsePoints(sPoints).fitRectangle()
if fMinHLen is None or abs(x2-x1) > fMinHLen:
_x1, _x2 = self.scale(x1, x2, fRatio)
else:
_x1, _x2 = x1, x2
_y1, _y2 = self.scale(y1, y2, fRatio)
hy[_y1:_y2+1] += float(x2 - x1) / w
hx[_x1:_x2+1] += float(y2 - y1) / h
except ZeroDivisionError:
pass
except ValueError:
pass
return hy, hx
@classmethod
def scale(cls, a, b, fRatio):
"""
a,b are integers
apply a scaling factor to the segment
make sure its length remains non-zero
return 2 integers
"""
if fRatio == 1.0: return (a,b) # the code below does it, but no need...
l = b - a # signed length
ll = int(round(l * fRatio)) # new signed length
dl2 = (l - ll) / 2.0
ll2a = int(round(dl2))
ll2b = (l - ll) - ll2a
return a + ll2a, b - ll2b
# labels...
def _getLabel(self, i,j, liGT):
"""
i,j are the index of teh start and end of interval of zeros
liGT is a list of pair of pixel coordinates
an interval of zeros is positive if it contains either end of the
separator or its middle.
"""
for iGT, jGT in liGT:
mGT = (iGT+jGT) // 2
if i <= iGT and iGT <= j:
return "S"
elif i <= jGT and jGT <= j:
return "S"
elif i <= mGT and mGT <= j:
return "S"
return "O"
def getCentreOfZeroAreas(self, h, liGT=None):
"""
liGT is the groundtruth indices
return a list of center of areas contains consecutive 0s
"""
lij = [] #list of area indices
i0 = None # index of start of a 0 area
imax = h.shape[0]
i = 0
while i < imax:
if i0 is None: # we were in a non-zero area
if h[i] <= 0: i0 = i # start of an area of 0s
else: # we were in a zero area
if h[i] > 0:
# end of area of 0s
lij.append((i0, i-1))
i0 = None
i += 1
if not i0 is None:
lij.append((i0, imax-1))
if liGT is None:
liLbl = [None] * len(lij)
else:
liLbl = [self._getLabel(i,j,liGT) for (i,j) in lij]
#take middle
li = [ (j + i) // 2 for (i,j) in lij ]
return li, liLbl
def getLowestOfZeroAreas(self, h, liGT=None):
"""
liGT is the groundtruth indices
return a list of lowest points of areas contains consecutive 0s
"""
lijm = [] #list of area indices
i0 = None # index of start of a 0 area
imax = h.shape[0]
i = 0
minV, minI = None, None
while i < imax:
if i0 is None: # we were in a non-zero area
if h[i] <= 0:
i0 = i # start of an area of 0s
minV, minI = h[i0], i0
else: # we were in a zero area
if h[i] > 0:
# end of area of 0s
lijm.append((i0, i-1, minI))
i0 = None
else:
if h[i] <= minV: # take rightmost
minV, minI = h[i], i
i += 1
if not i0 is None:
minV, minI = h[i0], i0
i = i0 + 1
while i < imax:
if h[i] < minV: # tale leftmost
minV, minI = h[i], i
i += 1
lijm.append((i0, imax-1, minI))
if liGT is None:
liLbl = [None] * len(lijm)
else:
liLbl = [self._getLabel(i,j,liGT) for (i,j,_m) in lijm]
#take middle
li = [ m for (_i,_j, m) in lijm ]
return li, liLbl
def add_cut_to_DOM(self, root,
fMinHorizProjection=0.05,
fMinVertiProjection=0.05,
ltlYlX=[]
, fRatio = 1.0
, fMinHLen = None):
"""
for each page, compute the histogram of projection of text on Y then X
axis.
From this histogram, find cuts.
fMinProjection determines the threholds as a percentage of width (resp
height) of page. Any bin lower than it is considered as zero.
Map cuts to table separators to annotate them
Dynamically tune the threshold for cutting so as to reflect most separators
as a cut.
Tag them if ltlYlX is given
ltlYlX is a list of (ltY1Y2, ltX1X2) per page.
ltY1Y2 is the list of (Y1, Y2) of horizontal separators,
ltX1X2 is the list of (X1, X2) of vertical separators.
Modify the XML DOM by adding a separator cut, annotated if GT given
"""
domid = 0 #to add unique separator id
llX, llY = [], []
for iPage, ndPage in enumerate(MultiPageXml.getChildByName(root, 'Page')):
try:
lYi, lXi = ltlYlX[iPage]
#except TypeError:
except:
lYi, lXi = [], []
w, h = int(ndPage.get("imageWidth")), int(ndPage.get("imageHeight"))
#Histogram of projections
lndTexLine = MultiPageXml.getChildByName(ndPage, 'TextLine')
aYHisto, aXHisto = self.getHisto(lndTexLine,
w, fMinHorizProjection,
h, fMinVertiProjection
, fRatio
, fMinHLen=fMinHLen)
aYHisto = aYHisto - fMinHorizProjection
aXHisto = aXHisto - fMinVertiProjection
#find the centre of each area of 0s and its label
lY, lYLbl = self.getCentreOfZeroAreas(aYHisto, lYi)
# lX, lXLbl = self.getCentreOfZeroAreas(aXHisto, lXi)
lX, lXLbl = self.getLowestOfZeroAreas(aXHisto, lXi)
traceln(lY)
traceln(lX)
traceln(" - %d horizontal cuts" % len(lY))
traceln(" - %d vertical cuts" % len(lX))
#ndTR = MultiPageXml.getChildByName(ndPage,'TableRegion')[0]
# horizontal grid lines
for y, ylbl in zip(lY, lYLbl):
domid += 1
self.addPageXmlSeparator(ndPage, ylbl, 0, y, w, y, domid)
# Vertical grid lines
for x, xlbl in zip(lX, lXLbl):
domid += 1
self.addPageXmlSeparator(ndPage, xlbl, x, 0, x, h, domid)
llX.append(lX)
llY.append(lY)
return (llY, llX)
@classmethod
def addPageXmlSeparator(cls, nd, sLabel, x1, y1, x2, y2, domid):
ndSep = MultiPageXml.createPageXmlNode("CutSeparator")
if not sLabel is None:
# propagate the groundtruth info we have
ndSep.set("type", sLabel)
if abs(x2-x1) > abs(y2-y1):
ndSep.set("orient", "0")
else:
ndSep.set("orient", "90")
ndSep.set("id", "s_%d"%domid)
nd.append(ndSep)
ndCoord = MultiPageXml.createPageXmlNode("Coords")
MultiPageXml.setPoints(ndCoord, [(x1, y1), (x2, y2)])
ndSep.append(ndCoord)
return ndSep
def remove_cuts_from_dom(self, root):
"""
clean the DOM from any existing cut
return the number of removed cut lines
"""
lnd = MultiPageXml.getChildByName(root,'CutSeparator')
n = len(lnd)
for nd in lnd:
nd.getparent().remove(nd)
#check...
lnd = MultiPageXml.getChildByName(root,'CutSeparator')
assert len(lnd) == 0
return n
def loadPageCol(self, ndPage, fRatio
, shaper_fun=ShapeLoader.node_to_Point
, funIndex=lambda x: x._du_index):
"""
load the page, looking for Baseline
can filter by DU_row
return a list of shapely objects
, a dict of sorted list of objects, by column
GT BUG: some Baseline are assigned to the wrong Cell
=> we also fix this here....
"""
loBaseline = [] # list of Baseline shapes
i = 0
dsetTableByCol = defaultdict(set) # sets of object ids, by col
dsetTableDataByCol = defaultdict(set) # sets of object ids, by col
dO = {}
dNodeSeen = {}
# first associate a unique id to each baseline and list them
lshapeCell = []
lOrphanBaselineShape = []
lCells = MultiPageXml.getChildByName(ndPage, "TableCell")
maxHeaderRowSpan = computeMaxRowSpan(lCells)
traceln(" - maxHeaderRowSpan=", maxHeaderRowSpan)
for ndCell in lCells:
row, col = int(ndCell.get("row")), int(ndCell.get("col"))
rowSpan = int(ndCell.get("rowSpan"))
plg = ShapeLoader.node_to_Polygon(ndCell)
#ymin, ymax of polygon
lx = [_x for _x, _y in plg.exterior.coords]
xmin, xmax = min(lx), max(lx)
plg._row = row
plg._col = col
plg._xmin, plg._xmax = xmin, xmax
lshapeCell.append(plg)
for nd in MultiPageXml.getChildByName(ndCell, "Baseline"):
nd.set("du_index", "%d" % i)
ndParent = nd.getparent()
dNodeSeen[ndParent.get('id')] = True
# Baseline as a shapely object
try:
o = shaper_fun(nd) #make a LineString
except Exception as e:
traceln("ERROR: id=", nd.getparent().get("id"))
raise e
# scale the objects, as done when cutting!!
# useless currently since we make a Point...
o = shapely.affinity.scale(o, xfact=fRatio, yfact=fRatio)
o._du_index = i
o._du_nd = nd
o._dom_id = nd.getparent().get("id")
loBaseline.append(o)
# is this object in the correct cell???
# We must use the centroid of the text box, otherwise a baseline
# may be assigned to the next row
# NOOO x = ShapeLoader.node_to_Polygon(ndParent).centroid.x
# we must look for the leftest coordinate
# NO CHECK FOR COLUMNS
dsetTableByCol[col].add(funIndex(o))
if (row+rowSpan) > maxHeaderRowSpan:
dsetTableDataByCol[col].add(funIndex(o))
i += 1
# if lOrphanBaselineShape:
# traceln(" *** error: %d Baseline in incorrect row - fixing this..." % len(lOrphanBaselineShape))
# for o in lOrphanBaselineShape:
# bestrow, bestdeltacol = 0, 9999
# try:
# y = o.y
# except:
# y = o.centroid.y
# for plg in lshapeCell:
# if plg._ymin <= y and y <= plg._ymax:
# # sounds good
# deltacol = abs(o._bad_cell._col - plg._col)
# if deltacol == 0:
# # same column, ok it is that one
# bestrow = plg._row
# break
# else:
# if bestdeltacol > deltacol:
# bestdeltacol = deltacol
# bestrow = plg._row
# traceln("\t id=%s misplaced in row=%s instead of row=%s" %(
# o._du_nd.getparent().get("id")
# , o._bad_cell._row
# , bestrow))
# dsetTableByCol[bestrow].add(o._du_index)
# del o._bad_cell
# and (UGLY) process all Baseline outside any TableCell...
for nd in MultiPageXml.getChildByName(ndPage, "Baseline"):
try:
dNodeSeen[nd.getparent().get('id')]
except:
#OLD "GOOD" CODE HERE
nd.set("du_index", "%d" % i)
# Baseline as a shapely object
o = shaper_fun(nd) #make a LineString
# scale the objects, as done when cutting!!
o = shapely.affinity.scale(o, xfact=fRatio)
o._du_index = i
o._du_nd = nd
o._dom_id = nd.getparent().get("id")
loBaseline.append(o)
i += 1
return loBaseline, dsetTableByCol, dsetTableDataByCol, maxHeaderRowSpan
class NoSeparatorException(Exception):
pass
class BaselineCutAnnotator(CutAnnotator):
"""
Much simpler approach:
- a block is defined by its baseline.
- the baseline of each block defines a possible cut
- a parameter defines if the corresponding block is above or below the cut
- so a cut defines a partition of the page block
We use the table annotation to determine the baseline that is the on top
or bottom of each table line (or column)
"""
bSIO = False # by default, we use SO as labels
#iModulo = 1
def __init__(self, bCutIsBeforeText=True):
CutAnnotator.__init__(self)
self.bCutIsBeforeText = bCutIsBeforeText
#self._fModulo = float(self.iModulo)
@classmethod
def setLabelScheme_SIO(cls):
cls.bSIO = True
return True
# def setModulo(self, iModulo):
# self.iModulo = iModulo
# self._fModulo = float(self.iModulo)
# def moduloSnap(self, x, y):
# """
# return the same coordinate modulo the current modulo
# """
# return (int(round(x / self.fModulo)) * self.iModulo,
# int(round(y / self.fModulo)) * self.iModulo)
@classmethod
def getDomBaselineXY(cls, domNode):
"""
find the baseline descendant node and return its "central" point
"""
try:
ndBaseline = MultiPageXml.getChildByName(domNode,'Baseline')[0]
except IndexError as e:
traceln("WARNING: No Baseline child in ", domNode.get('id'))
raise e
x, y = cls.getPolylineAverageXY(ndBaseline)
# modulo should be done only after the GT assigns labels.
return (x, y)
@classmethod
def getPolylineAverageXY(cls, ndPolyline):
"""
weighted average X and average Y of a polyline
the weight indicate how long each segment at a given X, or Y, was.
"""
sPoints=ndPolyline.get('points')
lXY = Polygon.parsePoints(sPoints).lXY
# list of X and Y values and respective weights
lXYWxWy = [((x1+x2)/2.0, abs(y2-y1), # for how long at this X?
(y1+y2)/2.0, abs(x2-x1)) \
for (x1,y1), (x2, y2) in zip(lXY, lXY[1:])]
fWeightedSumX = sum(x*wx for x, wx, _, _ in lXYWxWy)
fWeightedSumY = sum(y*wy for _, _, y, wy in lXYWxWy)
fSumWeightX = sum( wx for _, wx , _, _ in lXYWxWy)
fSumWeightY = sum( wy for _, _ , _, wy in lXYWxWy)
Xavg = int(round(fWeightedSumX/fSumWeightX)) if fSumWeightX > 0 else 0
Yavg = int(round(fWeightedSumY/fSumWeightY)) if fSumWeightY > 0 else 0
# Xavg, Yavg = self.moduloSnap(Xavg, Yavg)
return (Xavg, Yavg)
def _getLabelFromSeparator(self, ltXY, tlYlX, w, h):
"""
ltXY is the list of (X, Y) of the "central" point of each baseline
tlYlX are the coordinates of the GT separators
ltY1Y2 is the list of (Y1, Y2) of horizontal separators,
ltX1X2 is the list of (X1, X2) of vertical separators.
w, h are the page width and height
if self.bCutIsBeforeText is True, we look for the highest baseline below
or on each separator (which is possibly not horizontal)
if self.bCutIsBeforeText is False, we look for the lowest baseline above
or on each separator (which is possibly not horizontal)
#TODO
Same idea for vertical separators ( ***** NOT DONE ***** )
return lX, lY, lXLbl, lYLbl
"""
ltY1Y2, ltX1X2 = tlYlX
#rough horizontal and vertical bounds
try:
ymin = operator.add(*min(ltY1Y2)) / 2.0 # ~~ (miny1+miny2)/2.0
ymax = operator.add(*max(ltY1Y2)) / 2.0
xmin = operator.add(*min(ltX1X2)) / 2.0
xmax = operator.add(*max(ltX1X2)) / 2.0
except ValueError:
raise NoSeparatorException("No groundtruth")
# find best baseline for each table separator
setBestY = set()
for (y1, y2) in ltY1Y2:
bestY = 999999 if self.bCutIsBeforeText else -1
bFound = False
for x, y in ltXY:
if x < xmin or xmax < x: # text outside table, ignore it
continue
#y of separator at x
ysep = int(round(y1 + float(y2-y1) * x / w))
if self.bCutIsBeforeText:
if ysep <= y and y < bestY and y < ymax:
#separator is above and baseline is above all others
bestY, bFound = y, True
else:
if ysep >= y and y > bestY and y > ymin:
bestY, bFound = y, True
if bFound:
setBestY.add(bestY)
setBestX = set()
for (x1, x2) in ltX1X2:
bestX = 999999 if self.bCutIsBeforeText else -1
bFound = False
for x, y in ltXY:
if y < ymin or ymax < y: # text outside table, ignore it
continue
#x of separator at Y
xsep = int(round(x1 + float(x2-x1) * x / h))
if self.bCutIsBeforeText:
if xsep <= x and x < bestX and x < xmax:
#separator is above and baseline is above all others
bestX, bFound = x, True
else:
if xsep >= x and x > bestX and x > xmin:
bestX, bFound = x, True
if bFound:
setBestX.add(bestX)
# zero or one cut given a position
lY = list(set(y for _, y in ltXY)) # zero or 1 cut per Y
lY.sort()
lX = list(set(x for x, _ in ltXY)) # zero or 1 cut per X
lX.sort()
if self.bSIO:
# O*, S, (S|I)*, O*
if setBestY:
lYLbl = [ ("S" if y in setBestY \
else ("I" if ymin <= y and y <= ymax else "O")) \
for y in lY]
else:
lYLbl = ["O"] * len(lY) # should never happen...
if setBestX:
lXLbl = [ ("S" if x in setBestX \
else ("I" if xmin <= x and x <= xmax else "O")) \
for x in lX]
else:
lXLbl = ["O"] * len(lX) # should never happen...
else:
# annotate the best baseline-based separator
lYLbl = [ ("S" if y in setBestY else "O") for y in lY]
lXLbl = [ ("S" if x in setBestX else "O") for x in lX]
return lY, lYLbl, lX, lXLbl
# def _getLabelFromCells(self, ltXY, lCells):
# """
#
# NOT FINISHED
#
# SOME spans are ignored, some not
#
# This is done when making the straight separator, based on their length.
#
# ltXY is the list of (X, Y) of the "central" point of each baseline
# lCells is the list of cells of the table
#
# For Y labels (horizontal cuts):
# - if self.bCutIsBeforeText is True, we look for the highest baseline of
# each table line.
# - if self.bCutIsBeforeText is False, we look for the lowest baseline of
# each table line.
#
# same idea for X labels (vertical cuts)
#
# returns the list of Y labels, the list of X labels
# """
#
# lYLbl, lXLbl = [], []
#
# traceln("DIRTY: ignore rowspan above 5")
# lCells = list(filter(lambda x: int(x.get('rowSpan')) < 5, lCells))
# dBestByRow = collections.defaultdict(lambda _: None) # row->best_Y
# dBestByCol = collections.defaultdict(lambda _: None) # col->best_X
#
# dRowSep_lSgmt = collections.defaultdict(list)
# dColSep_lSgmt = collections.defaultdict(list)
# for cell in lCells:
# row, col, rowSpan, colSpan = [int(cell.get(sProp)) for sProp \
# in ["row", "col", "rowSpan", "colSpan"] ]
# coord = cell.xpath("./a:%s" % ("Coords"),namespaces={"a":MultiPageXml.NS_PAGE_XML})[0]
# sPoints = coord.get('points')
# plgn = Polygon.parsePoints(sPoints)
# lT, lR, lB, lL = plgn.partitionSegmentTopRightBottomLeft()
#
# #now the top segments contribute to row separator of index: row
# dRowSep_lSgmt[row].extend(lT)
# #now the bottom segments contribute to row separator of index: row+rowSpan
# dRowSep_lSgmt[row+rowSpan].extend(lB)
#
# dColSep_lSgmt[col].extend(lL)
# dColSep_lSgmt[col+colSpan].extend(lR)
def add_cut_to_DOM(self, root, ltlYlX=[]):
"""
for each page:
- sort the block by their baseline average y
- the sorted list of Ys defines the cuts.
Tag them if ltlYlX is given
ltlYlX is a list of (ltY1Y2, ltX1X2) per page.
ltY1Y2 is the list of (Y1, Y2) of horizontal separators,
ltX1X2 is the list of (X1, X2) of vertical separators.
Modify the XML DOM by adding a separator cut, annotated if GT given
"""
domid = 0 #to add unique separator id
ltlYCutXCut = []
for iPage, ndPage in enumerate(MultiPageXml.getChildByName(root, 'Page')):
w, h = int(ndPage.get("imageWidth")), int(ndPage.get("imageHeight"))
# list of Ys of baselines, and indexing of block by Y
#list of (X,Y)
ltXY = []
lndTexLine = MultiPageXml.getChildByName(ndPage, 'TextLine')
for ndBlock in lndTexLine:
try:
ltXY.append(self.getDomBaselineXY(ndBlock))
except:
pass
# Groundtruth if any
#lCells= MultiPageXml.getChildByName(ndPage, 'TableCell')
# let's collect the segment forming the separators
try:
lY, lYLbl, lX, lXLbl = self._getLabelFromSeparator(ltXY,
ltlYlX[iPage], w, h)
except NoSeparatorException:
lX = list(set(x for x, _ in ltXY)) # zero or 1 cut per X
lY = list(set(y for _, y in ltXY)) # zero or 1 cut per Y
lX.sort() # to have a nice XML
lY.sort()
lXLbl = [None] * len(lX)
lYLbl = [None] * len(lY)
ndTR = MultiPageXml.getChildByName(root,'TableRegion')[0]
#Vertical grid lines
for y, ylbl in zip(lY, lYLbl):
domid += 1
self.addPageXmlSeparator(ndTR, ylbl, 0, y, w, y, domid)
traceln(" - added %d horizontal cuts" % len(lX))
#horizontal grid lines
for x, xlbl in zip(lX, lXLbl):
domid += 1
self.addPageXmlSeparator(ndTR, xlbl, x, 0, x, h, domid)
traceln(" - added %d vertical cuts" % len(lY))
ltlYCutXCut.append( ([y for _,y in ltXY],
[x for x,_ in ltXY]))
return ltlYCutXCut
# ------------------------------------------------------------------
def main(sFilename, sOutFilename, fMinHorizProjection=0.05, fMinVertiProjection=0.05
, bBaselineFirst=False
, bBaselineLast=False
, bSIO=False):
print("- cutting: %s --> %s"%(sFilename, sOutFilename))
# Some grid line will be O or I simply because they are too short.
fMinPageCoverage = 0.5 # minimum proportion of the page crossed by a grid line
# we want to ignore col- and row- spans
#for the pretty printer to format better...
parser = etree.XMLParser(remove_blank_text=True)
doc = etree.parse(sFilename, parser)
root=doc.getroot()
if bBaselineFirst:
doer = BaselineCutAnnotator(bCutIsBeforeText=True)
if bSIO: doer.setLabelScheme_SIO()
elif bBaselineLast:
doer = BaselineCutAnnotator(bCutIsBeforeText=False)
if bSIO: doer.setLabelScheme_SIO()
else:
doer = CutAnnotator()
print("doer=%s"%doer)
#map the groundtruth table separators to our grid, per page (1 in tABP)
ltlYlX = doer.get_separator_YX_from_DOM(root, fMinPageCoverage)
# Find cuts and map them to GT
#
if bBaselineFirst or bBaselineLast:
doer.add_cut_to_DOM(root, ltlYlX=ltlYlX)
else:
doer.add_cut_to_DOM(root, ltlYlX=ltlYlX,
fMinHorizProjection=fMinHorizProjection,
fMinVertiProjection=fMinVertiProjection,)
#l_DU_row_Y, l_DU_row_GT = doer.predict(root)
doc.write(sOutFilename, encoding='utf-8',pretty_print=True,xml_declaration=True)
print('Annotated cut separators added into %s'%sOutFilename)
global_maxHeaderRowSpan = None
def _isBaselineInTableData(nd):
"""
a Baseline in a TableRegion belongs to a TableCell element
"""
global global_maxHeaderRowSpan
v = nd.getparent().getparent().get("row")
if v is None:
return False
else:
return int(v) >= global_maxHeaderRowSpan
def get_col_partition(doer, sxpCut, dNS
, sFilename, lFilterFun
, fRatio
, bVerbose=False
, funIndex=lambda x: x._du_index
):
"""
return the GT partition in columns, as well as 1 partition per filter function
"""
global global_maxHeaderRowSpan
if bVerbose: traceln("- loading %s"%sFilename)
parser = etree.XMLParser()
doc = etree.parse(sFilename, parser)
root=doc.getroot()
llsetRun = []
pnum = 0
lndPage = MultiPageXml.getChildByName(root, 'Page')
assert len(lndPage) == 1, "NOT SUPPORTED: file has many pages - soorry"
for ndPage in lndPage:
pnum += 1
if bVerbose: traceln(" - page %s - loading table GT" % pnum)
loBaseline, dsetTableByCol, dsetTableDataByCol, global_maxHeaderRowSpan = doer.loadPageCol(ndPage, fRatio
, funIndex=funIndex)
if bVerbose: traceln(" - found %d objects on page" % (len(loBaseline)))
# make a dictionary of cumulative sets, and the set of all objects
lTableColK = sorted(dsetTableByCol.keys())
lTableDataColK = sorted(dsetTableDataByCol.keys())
if bVerbose:
traceln(" - found %d cols" % (len(lTableColK)))
traceln(" - found %d objects in the table" % (sum(len(v) for v in dsetTableByCol.values())))
traceln(" - found %d objects in the table data" % (sum(len(v) for v in dsetTableDataByCol.values())))
lNdCut = ndPage.xpath(sxpCut, namespaces=dNS)
if bVerbose:
traceln(" - found %d cuts" % (len(lNdCut)))
else:
traceln("- loaded %40s " % sFilename
, " %6d cols %6d 'S' cuts" % ( len(lTableColK)
, len(lNdCut))
, " %6d objects %6d table objects" % (
len(loBaseline)
, sum(len(v) for v in dsetTableByCol.values())
)
)
loCut = []
for ndCut in lNdCut:
#now we need to infer the bounding box of that object
(x1, y1), (x2, y2) = PageXml.getPointList(ndCut) #the polygon
# Create the shapely shape
loCut.append(geom.LineString([(x1, y1), (x2, y2)]))
w,h = float(ndPage.get("imageWidth")), float(ndPage.get("imageHeight"))
# # Add a fictive cut at top of page
# loCut.append(geom.LineString([(0, 0), (w, 0)]))
# # Add a fictive cut at end of page
# loCut.append(geom.LineString([(0, h), (w, h)]))
# order it by line centroid x
loCut.sort(key=lambda o: o.centroid.x)
# dcumset is the GT!!
lsetGT = [dsetTableByCol[k] for k in lTableColK] # list of set of du_index
lsetDataGT = [dsetTableDataByCol[k] for k in lTableDataColK]
# NOW, look at predictions
for filterFun in lFilterFun:
loBaselineInTable = [o for o in loBaseline if filterFun(o._du_nd)]
if bVerbose: traceln(" - %d objects on page predicted in table (%d out)" % (
len(loBaselineInTable)
, len(loBaseline) - len(loBaselineInTable)))
# Now create the list of partitions created by the Cuts
lsetRun = []
partition = PolygonPartition(loBaselineInTable)
if True: # or bCutOnLeft:
#cut if above the text that led to its creation
setAllPrevIds = set([]) # cumulative set of what was already taken
for oCut in loCut:
lo = partition.getObjectOnRightOfLine(oCut)
setIds = set(funIndex(o) for o in lo)
#print(oCut.centroid.x, setIds)
if setAllPrevIds:
prevColIds = setAllPrevIds.difference(setIds) # content of previous row
if prevColIds:
#an empty set is denoting alternative cuts leading to same partition
lsetRun.append(prevColIds)
setAllPrevIds = setIds
else:
assert False, "look at this code..."
# #cut if below the text that led to its creation
# cumSetIds = set([]) # cumulative set
# for oCut in loCut:
# lo = partition.getObjectAboveLine(oCut)
# setIds = set(o._du_index for o in lo)
# rowIds = setIds.difference(cumSetIds) # only last row!
# if rowIds:
# #an empty set is denoting alternative cuts leading to same partition
# lsetRun.append(rowIds)
# cumSetIds = setIds
# _debugPartition("run", lsetRun)
# _debugPartition("ref", lsetGT)
llsetRun.append(lsetRun)
return lsetGT, lsetDataGT, llsetRun
def op_eval_col(lsFilename, fSimil, fRatio, bVerbose=False):
"""
We load the XML
- get the CutSeparator elements
- get the text objects (geometry=Baseline)
-
"""
global global_maxHeaderRowSpan
nOk, nErr, nMiss = 0, 0, 0
if fSimil is None:
#lfSimil = [ i / 100 for i in range(75, 101, 5)]
lfSimil = [ i / 100 for i in range(70, 101, 10)]
else:
lfSimil = [fSimil]
# we use only BIO + separators
dOkErrMissOnlyCol = { fSimil:(0,0,0) for fSimil in lfSimil }
dOkErrMissOnlyCol.update({'name':'OnlyCol'
, 'FilterFun':_isBaselineNotO})
# we use the TableRegion + separators
dOkErrMissTableCol = { fSimil:(0,0,0) for fSimil in lfSimil }
dOkErrMissTableCol.update({'name':'TableCol'
, 'FilterFun':_isBaselineInTable})
# we use the TableRegion excluding the header + separators
dOkErrMissTableDataCol = { fSimil:(0,0,0) for fSimil in lfSimil }
dOkErrMissTableDataCol.update({'name':'TableDataCol'
, 'FilterFun':_isBaselineInTableData})
ldOkErrMiss = [dOkErrMissOnlyCol, dOkErrMissTableCol, dOkErrMissTableDataCol]
lFilterFun = [d['FilterFun'] for d in ldOkErrMiss]
# sxpCut = './/pc:CutSeparator[@orient="0" and @DU_type="S"]' #how to find the cuts
sxpCut = './/pc:CutSeparator[@orient="90"]' #how to find the cuts
dNS = {"pc":PageXml.NS_PAGE_XML}
doer = CutAnnotator()
traceln(" - Cut selector = ", sxpCut)
# load objects: Baseline and Cuts
for n, sFilename in enumerate(lsFilename):
lsetGT, lsetDataGT, llsetRun = get_col_partition(doer, sxpCut, dNS
, sFilename, lFilterFun
, fRatio
, bVerbose=False
, funIndex=lambda x: x._du_index # simpler to view
# , funIndex=lambda x: x._dom_id # more precise
)
pnum = 1 # only support single-page file...
for dOkErrMiss, lsetRun in zip(ldOkErrMiss, llsetRun):
if dOkErrMiss['name'] == "TableDataCol":
# we need to filter also the GT to discard the header from the column
_lsetGT = lsetDataGT
else:
_lsetGT = lsetGT
if bVerbose:
traceln("----- RUN ----- ")
for s in lsetRun: traceln("run ", sorted(s))
traceln("----- REF ----- ")
for s in _lsetGT: traceln("ref ", sorted(s))
for fSimil in lfSimil:
nOk, nErr, nMiss = dOkErrMiss[fSimil]
_nOk, _nErr, _nMiss, _lFound, _lErr, _lMissed = evalPartitions(lsetRun, _lsetGT, fSimil, jaccard_distance)
nOk += _nOk
nErr += _nErr
nMiss += _nMiss
if bVerbose or fSimil == 1.0:
_fP, _fR, _fF = computePRF(_nOk, _nErr, _nMiss)
traceln("%4d %8s simil:%.2f P %5.1f R %5.1f F1 %5.1f ok=%6d err=%6d miss=%6d %s page=%d" %(
n+1, dOkErrMiss['name'], fSimil
, _fP, _fR, _fF
, _nOk, _nErr, _nMiss
, os.path.basename(sFilename), pnum))
dOkErrMiss[fSimil] = (nOk, nErr, nMiss)
for dOkErrMiss in [dOkErrMissOnlyCol, dOkErrMissTableCol, dOkErrMissTableDataCol]:
traceln()
name = dOkErrMiss['name']
for fSimil in lfSimil:
nOk, nErr, nMiss = dOkErrMiss[fSimil]
fP, fR, fF = computePRF(nOk, nErr, nMiss)
traceln("ALL %8s simil:%.2f P %5.1f R %5.1f F1 %5.1f " % (name, fSimil, fP, fR, fF )
, " "
,"ok=%d err=%d miss=%d" %(nOk, nErr, nMiss))
return (nOk, nErr, nMiss)
def test_scale():
assert (1,3) == CutAnnotator.scale(1, 3, 1.0)
assert (3,1) == CutAnnotator.scale(3, 1, 1.0)
def symcheck(a, b, r, aa, bb):
assert (aa, bb) == CutAnnotator.scale(a, b, r), (a, b, r, aa, bb)
assert (bb, aa) == CutAnnotator.scale(b, a, r), (b, a, r, bb, aa)
symcheck(1, 2, 1.0, 1, 2)
symcheck(1, 1, 1.0, 1, 1)
symcheck(1, 10, 1.0, 1, 10)
assert (2,7) == CutAnnotator.scale(0 , 10, 0.5)
assert (8,3) == CutAnnotator.scale(10, 0 , 0.5)
assert (-2,-7) == CutAnnotator.scale(-0 , -10, 0.5)
assert (-8,-3) == CutAnnotator.scale(-10, -0 , 0.5)
assert (1,1) == CutAnnotator.scale(1, 1, 0.33)
# ------------------------------------------------------------------
if __name__ == "__main__":
usage = ""
parser = OptionParser(usage=usage, version="0.1")
parser.add_option("--baseline_first", dest='bBaselineFirst', action="store_true", help="Cut based on first baeline of row or column")
parser.add_option("--SIO" , dest='bSIO' , action="store_true", help="SIO labels")
# ---
#parse the command line
(options, args) = parser.parse_args()
#load mpxml
sFilename = args[0]
try:
sOutFilename = args[1]
except:
sp, sf = os.path.split(sFilename)
sOutFilename = os.path.join(sp, "cut-" + sf)
try:
fMinH = float(args[2])
except:
fMinH = None
if fMinH is None:
main(sFilename, sOutFilename, bBaselineFirst=options.bBaselineFirst, bSIO=options.bSIO)
else:
fMinV = float(args[4]) # specify none or both
main(sFilename, sOutFilename, fMinH, fMinV, bBaselineFirst=options.bBaselineFirst, bSIO=options.bSIO)
| [
"xml_formats.PageXml.MultiPageXml.createPageXmlNode",
"util.Polygon.Polygon.parsePoints",
"os.path.split",
"util.partitionEvaluation.evalPartitions",
"xml_formats.PageXml.PageXml.getPointList",
"xml_formats.PageXml.MultiPageXml.getChildByName",
"xml_formats.PageXml.MultiPageXml.setPoints",
"shapely.ge... | [((29376, 29415), 'lxml.etree.XMLParser', 'etree.XMLParser', ([], {'remove_blank_text': '(True)'}), '(remove_blank_text=True)\n', (29391, 29415), False, 'from lxml import etree\n'), ((29426, 29456), 'lxml.etree.parse', 'etree.parse', (['sFilename', 'parser'], {}), '(sFilename, parser)\n', (29437, 29456), False, 'from lxml import etree\n'), ((31268, 31285), 'lxml.etree.XMLParser', 'etree.XMLParser', ([], {}), '()\n', (31283, 31285), False, 'from lxml import etree\n'), ((31296, 31326), 'lxml.etree.parse', 'etree.parse', (['sFilename', 'parser'], {}), '(sFilename, parser)\n', (31307, 31326), False, 'from lxml import etree\n'), ((31401, 31442), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['root', '"""Page"""'], {}), "(root, 'Page')\n", (31428, 31442), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((37666, 37703), 'common.trace.traceln', 'traceln', (['""" - Cut selector = """', 'sxpCut'], {}), "(' - Cut selector = ', sxpCut)\n", (37673, 37703), False, 'from common.trace import traceln\n'), ((40855, 40895), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage', 'version': '"""0.1"""'}), "(usage=usage, version='0.1')\n", (40867, 40895), False, 'from optparse import OptionParser\n'), ((1698, 1739), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['root', '"""Page"""'], {}), "(root, 'Page')\n", (1725, 1739), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((3624, 3648), 'numpy.zeros', 'np.zeros', (['(h,)', 'np.float'], {}), '((h,), np.float)\n', (3632, 3648), True, 'import numpy as np\n'), ((3662, 3686), 'numpy.zeros', 'np.zeros', (['(w,)', 'np.float'], {}), '((w,), np.float)\n', (3670, 3686), True, 'import numpy as np\n'), ((11142, 11188), 'xml_formats.PageXml.MultiPageXml.createPageXmlNode', 'MultiPageXml.createPageXmlNode', (['"""CutSeparator"""'], {}), "('CutSeparator')\n", (11172, 11188), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((11517, 11557), 'xml_formats.PageXml.MultiPageXml.createPageXmlNode', 'MultiPageXml.createPageXmlNode', (['"""Coords"""'], {}), "('Coords')\n", (11547, 11557), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((11566, 11619), 'xml_formats.PageXml.MultiPageXml.setPoints', 'MultiPageXml.setPoints', (['ndCoord', '[(x1, y1), (x2, y2)]'], {}), '(ndCoord, [(x1, y1), (x2, y2)])\n', (11588, 11619), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((11864, 11913), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['root', '"""CutSeparator"""'], {}), "(root, 'CutSeparator')\n", (11891, 11913), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((12027, 12076), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['root', '"""CutSeparator"""'], {}), "(root, 'CutSeparator')\n", (12054, 12076), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((12728, 12744), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (12739, 12744), False, 'from collections import defaultdict\n'), ((12803, 12819), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (12814, 12819), False, 'from collections import defaultdict\n'), ((13050, 13098), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['ndPage', '"""TableCell"""'], {}), "(ndPage, 'TableCell')\n", (13077, 13098), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((13126, 13151), 'tasks.DU_Table.DU_ABPTableRCAnnotation.computeMaxRowSpan', 'computeMaxRowSpan', (['lCells'], {}), '(lCells)\n', (13143, 13151), False, 'from tasks.DU_Table.DU_ABPTableRCAnnotation import computeMaxRowSpan\n'), ((13160, 13211), 'common.trace.traceln', 'traceln', (['""" - maxHeaderRowSpan="""', 'maxHeaderRowSpan'], {}), "(' - maxHeaderRowSpan=', maxHeaderRowSpan)\n", (13167, 13211), False, 'from common.trace import traceln\n'), ((16520, 16567), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['ndPage', '"""Baseline"""'], {}), "(ndPage, 'Baseline')\n", (16547, 16567), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((31221, 31256), 'common.trace.traceln', 'traceln', (["('- loading %s' % sFilename)"], {}), "('- loading %s' % sFilename)\n", (31228, 31256), False, 'from common.trace import traceln\n'), ((39630, 39639), 'common.trace.traceln', 'traceln', ([], {}), '()\n', (39637, 39639), False, 'from common.trace import traceln\n'), ((1874, 1924), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['ndPage', '"""TableRegion"""'], {}), "(ndPage, 'TableRegion')\n", (1901, 1924), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((9192, 9233), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['root', '"""Page"""'], {}), "(root, 'Page')\n", (9219, 9233), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((9541, 9588), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['ndPage', '"""TextLine"""'], {}), "(ndPage, 'TextLine')\n", (9568, 9588), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((10303, 10314), 'common.trace.traceln', 'traceln', (['lY'], {}), '(lY)\n', (10310, 10314), False, 'from common.trace import traceln\n'), ((10327, 10338), 'common.trace.traceln', 'traceln', (['lX'], {}), '(lX)\n', (10334, 10338), False, 'from common.trace import traceln\n'), ((13379, 13414), 'util.Shape.ShapeLoader.node_to_Polygon', 'ShapeLoader.node_to_Polygon', (['ndCell'], {}), '(ndCell)\n', (13406, 13414), False, 'from util.Shape import ShapeLoader, PolygonPartition\n'), ((13718, 13765), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['ndCell', '"""Baseline"""'], {}), "(ndCell, 'Baseline')\n", (13745, 13765), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((19294, 19322), 'util.Polygon.Polygon.parsePoints', 'Polygon.parsePoints', (['sPoints'], {}), '(sPoints)\n', (19313, 19322), False, 'from util.Polygon import Polygon\n'), ((26860, 26901), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['root', '"""Page"""'], {}), "(root, 'Page')\n", (26887, 26901), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((27130, 27177), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['ndPage', '"""TextLine"""'], {}), "(ndPage, 'TextLine')\n", (27157, 27177), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((31586, 31635), 'common.trace.traceln', 'traceln', (["(' - page %s - loading table GT' % pnum)"], {}), "(' - page %s - loading table GT' % pnum)\n", (31593, 31635), False, 'from common.trace import traceln\n'), ((33262, 33289), 'xml_formats.PageXml.PageXml.getPointList', 'PageXml.getPointList', (['ndCut'], {}), '(ndCut)\n', (33282, 33289), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((34502, 34537), 'util.Shape.PolygonPartition', 'PolygonPartition', (['loBaselineInTable'], {}), '(loBaselineInTable)\n', (34518, 34537), False, 'from util.Shape import ShapeLoader, PolygonPartition\n'), ((39780, 39808), 'tasks.DU_Table.DU_ABPTableSkewed_CutAnnotator.computePRF', 'computePRF', (['nOk', 'nErr', 'nMiss'], {}), '(nOk, nErr, nMiss)\n', (39790, 39808), False, 'from tasks.DU_Table.DU_ABPTableSkewed_CutAnnotator import _isBaselineNotO, _isBaselineInTable, computePRF\n'), ((39821, 39972), 'common.trace.traceln', 'traceln', (["('ALL %8s simil:%.2f P %5.1f R %5.1f F1 %5.1f ' % (name, fSimil, fP, fR,\n fF))", '""" """', "('ok=%d err=%d miss=%d' % (nOk, nErr, nMiss))"], {}), "('ALL %8s simil:%.2f P %5.1f R %5.1f F1 %5.1f ' % (name, fSimil,\n fP, fR, fF), ' ', 'ok=%d err=%d miss=%d' % (nOk, nErr, nMiss))\n", (39828, 39972), False, 'from common.trace import traceln\n'), ((41340, 41364), 'os.path.split', 'os.path.split', (['sFilename'], {}), '(sFilename)\n', (41353, 41364), False, 'import sys, os\n'), ((41388, 41417), 'os.path.join', 'os.path.join', (['sp', "('cut-' + sf)"], {}), "(sp, 'cut-' + sf)\n", (41400, 41417), False, 'import sys, os\n'), ((18695, 18743), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['domNode', '"""Baseline"""'], {}), "(domNode, 'Baseline')\n", (18722, 18743), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((28067, 28115), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['root', '"""TableRegion"""'], {}), "(root, 'TableRegion')\n", (28094, 28115), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((33368, 33405), 'shapely.geometry.LineString', 'geom.LineString', (['[(x1, y1), (x2, y2)]'], {}), '([(x1, y1), (x2, y2)])\n', (33383, 33405), True, 'import shapely.geometry as geom\n'), ((38535, 38562), 'common.trace.traceln', 'traceln', (['"""----- RUN ----- """'], {}), "('----- RUN ----- ')\n", (38542, 38562), False, 'from common.trace import traceln\n'), ((38640, 38667), 'common.trace.traceln', 'traceln', (['"""----- REF ----- """'], {}), "('----- REF ----- ')\n", (38647, 38667), False, 'from common.trace import traceln\n'), ((38882, 38940), 'util.partitionEvaluation.evalPartitions', 'evalPartitions', (['lsetRun', '_lsetGT', 'fSimil', 'jaccard_distance'], {}), '(lsetRun, _lsetGT, fSimil, jaccard_distance)\n', (38896, 38940), False, 'from util.partitionEvaluation import evalPartitions\n'), ((738, 766), 'os.path.abspath', 'os.path.abspath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (753, 766), False, 'import sys, os\n'), ((2107, 2158), 'common.trace.traceln', 'traceln', (['"""** warning ** no TableRegion, expected 1"""'], {}), "('** warning ** no TableRegion, expected 1')\n", (2114, 2158), False, 'from common.trace import traceln\n'), ((2293, 2345), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['ndTR', '"""SeparatorRegion"""'], {}), "(ndTR, 'SeparatorRegion')\n", (2320, 2345), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((39116, 39147), 'tasks.DU_Table.DU_ABPTableSkewed_CutAnnotator.computePRF', 'computePRF', (['_nOk', '_nErr', '_nMiss'], {}), '(_nOk, _nErr, _nMiss)\n', (39126, 39147), False, 'from tasks.DU_Table.DU_ABPTableSkewed_CutAnnotator import _isBaselineNotO, _isBaselineInTable, computePRF\n'), ((3739, 3780), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['nd', '"""Coords"""'], {}), "(nd, 'Coords')\n", (3766, 3780), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n'), ((3844, 3872), 'util.Polygon.Polygon.parsePoints', 'Polygon.parsePoints', (['sPoints'], {}), '(sPoints)\n', (3863, 3872), False, 'from util.Polygon import Polygon\n'), ((2483, 2511), 'util.Polygon.Polygon.parsePoints', 'Polygon.parsePoints', (['sPoints'], {}), '(sPoints)\n', (2502, 2511), False, 'from util.Polygon import Polygon\n'), ((39438, 39465), 'os.path.basename', 'os.path.basename', (['sFilename'], {}), '(sFilename)\n', (39454, 39465), False, 'import sys, os\n'), ((2378, 2422), 'xml_formats.PageXml.MultiPageXml.getChildByName', 'MultiPageXml.getChildByName', (['ndSep', '"""Coords"""'], {}), "(ndSep, 'Coords')\n", (2405, 2422), False, 'from xml_formats.PageXml import MultiPageXml, PageXml\n')] |
import json
import numpy as np
from matplotlib import pyplot as plt
from pathlib import Path
mainPath = Path('/yourpathhere/')
folderPath5 = mainPath.joinpath('MNIST_5Dim')
folderPath50 = mainPath.joinpath('MNIST_50Dim')
folderPath75 = mainPath.joinpath('MNIST_75Dim')
folderPath100 = mainPath.joinpath('MNIST_100Dim')
filePath5 = folderPath5.joinpath('losses_and_nfes.json')
filePath50 = folderPath50.joinpath('losses_and_nfes.json')
filePath75 = folderPath75.joinpath('losses_and_nfes.json')
filePath100 = folderPath100.joinpath('losses_and_nfes.json')
with open(filePath5) as f:
d = json.load(f)
print(d)
dicAnode = d[0]
dicNode = d[1]
accuracyAnode5 = np.array(dicAnode['epoch_accuracy_history'])
nfeAnode5 = np.array(dicAnode['epoch_total_nfe_history'])
lossAnode5 = np.array(dicAnode['epoch_loss_history'])
with open(filePath50) as f:
d = json.load(f)
print(d)
dicAnode = d[0]
accuracyAnode50 = np.array(dicAnode['epoch_accuracy_history'])
nfeAnode50 = np.array(dicAnode['epoch_total_nfe_history'])
lossAnode50 = np.array(dicAnode['epoch_loss_history'])
with open(filePath75) as f:
d = json.load(f)
print(d)
dicAnode = d[0]
accuracyAnode75 = np.array(dicAnode['epoch_accuracy_history'])
nfeAnode75 = np.array(dicAnode['epoch_total_nfe_history'])
lossAnode75 = np.array(dicAnode['epoch_loss_history'])
with open(filePath100) as f:
d = json.load(f)
print(d)
dicAnode = d[0]
accuracyAnode100 = np.array(dicAnode['epoch_accuracy_history'])
nfeAnode100 = np.array(dicAnode['epoch_total_nfe_history'])
lossAnode100 = np.array(dicAnode['epoch_loss_history'])
accuracyNode = np.array(dicNode['epoch_accuracy_history'])
nfeNode = np.array(dicNode['epoch_total_nfe_history'])
lossNode = np.array(dicNode['epoch_loss_history'])
epochs = np.arange(1, len(np.squeeze(accuracyAnode5))+1)
# recreating figures from the json files saved by the experimental runs - 5 augmented Dimensions
fig1, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 3))
ax1.plot(epochs, np.squeeze(accuracyAnode5), label='ANODE')
ax1.plot(epochs, np.squeeze(accuracyNode), label='NODE')
ax1.set_xlabel('Epochs')
ax1.set_ylabel('Accuracy')
ax1.set_ylim([0.75, 1.05])
ax1.legend()
ax2.scatter(nfeAnode5, accuracyAnode5, label='ANODE')
ax2.scatter(nfeNode, accuracyNode, label='NODE')
ax2.set_xlabel('NFE')
ax2.set_xlim([20, 130])
ax2.set_ylim([0.75, 1.05])
ax2.legend()
ax3.plot(epochs, np.squeeze(lossAnode5), label='ANODE')
ax3.plot(epochs, np.squeeze(lossNode), label='NODE')
ax3.set_xlabel('Epochs')
ax3.set_ylabel('loss')
# plt.xlim([0, 130])
ax3.legend()
plt.tight_layout()
plt.savefig('test5Dim.png', format='png', dpi=400, bbox_inches='tight')
# recreating figures from the json files saved by the experimental runs - 5 augmented Dimensions
fig2, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 3))
ax1.plot(epochs, np.squeeze(accuracyAnode5), label='p=5')
ax1.plot(epochs, np.squeeze(accuracyAnode50), label='p=50')
ax1.plot(epochs, np.squeeze(accuracyAnode75), label='p=75')
ax1.plot(epochs, np.squeeze(accuracyAnode100), label='p=100')
ax1.set_xlabel('Epochs')
ax1.set_ylabel('Accuracy')
ax1.set_ylim([0.75, 1.05])
ax1.legend()
ax2.scatter(nfeAnode5, accuracyAnode5, label='p=5')
ax2.scatter(nfeAnode50, accuracyAnode50, label='p=50')
ax2.scatter(nfeAnode75, accuracyAnode75, label='p=75')
ax2.scatter(nfeAnode100, accuracyAnode100, label='p=100')
ax2.set_xlabel('NFE')
ax2.set_xlim([20, 130])
ax2.set_ylim([0.75, 1.05])
ax2.legend()
ax3.plot(epochs, np.squeeze(lossAnode5), label='p=5')
ax3.plot(epochs, np.squeeze(lossAnode50), label='p=50')
ax3.plot(epochs, np.squeeze(lossAnode75), label='p=75')
ax3.plot(epochs, np.squeeze(lossAnode100), label='p=100')
ax3.set_xlabel('Epochs')
ax3.set_ylabel('loss')
# plt.xlim([0, 130])
ax3.legend()
plt.tight_layout()
plt.savefig('augmentedDimensionComparison.png', format='png', dpi=400, bbox_inches='tight')
| [
"matplotlib.pyplot.savefig",
"pathlib.Path",
"numpy.squeeze",
"numpy.array",
"matplotlib.pyplot.tight_layout",
"json.load",
"matplotlib.pyplot.subplots"
] | [((105, 127), 'pathlib.Path', 'Path', (['"""/yourpathhere/"""'], {}), "('/yourpathhere/')\n", (109, 127), False, 'from pathlib import Path\n'), ((667, 711), 'numpy.array', 'np.array', (["dicAnode['epoch_accuracy_history']"], {}), "(dicAnode['epoch_accuracy_history'])\n", (675, 711), True, 'import numpy as np\n'), ((724, 769), 'numpy.array', 'np.array', (["dicAnode['epoch_total_nfe_history']"], {}), "(dicAnode['epoch_total_nfe_history'])\n", (732, 769), True, 'import numpy as np\n'), ((783, 823), 'numpy.array', 'np.array', (["dicAnode['epoch_loss_history']"], {}), "(dicAnode['epoch_loss_history'])\n", (791, 823), True, 'import numpy as np\n'), ((921, 965), 'numpy.array', 'np.array', (["dicAnode['epoch_accuracy_history']"], {}), "(dicAnode['epoch_accuracy_history'])\n", (929, 965), True, 'import numpy as np\n'), ((979, 1024), 'numpy.array', 'np.array', (["dicAnode['epoch_total_nfe_history']"], {}), "(dicAnode['epoch_total_nfe_history'])\n", (987, 1024), True, 'import numpy as np\n'), ((1039, 1079), 'numpy.array', 'np.array', (["dicAnode['epoch_loss_history']"], {}), "(dicAnode['epoch_loss_history'])\n", (1047, 1079), True, 'import numpy as np\n'), ((1177, 1221), 'numpy.array', 'np.array', (["dicAnode['epoch_accuracy_history']"], {}), "(dicAnode['epoch_accuracy_history'])\n", (1185, 1221), True, 'import numpy as np\n'), ((1235, 1280), 'numpy.array', 'np.array', (["dicAnode['epoch_total_nfe_history']"], {}), "(dicAnode['epoch_total_nfe_history'])\n", (1243, 1280), True, 'import numpy as np\n'), ((1295, 1335), 'numpy.array', 'np.array', (["dicAnode['epoch_loss_history']"], {}), "(dicAnode['epoch_loss_history'])\n", (1303, 1335), True, 'import numpy as np\n'), ((1435, 1479), 'numpy.array', 'np.array', (["dicAnode['epoch_accuracy_history']"], {}), "(dicAnode['epoch_accuracy_history'])\n", (1443, 1479), True, 'import numpy as np\n'), ((1494, 1539), 'numpy.array', 'np.array', (["dicAnode['epoch_total_nfe_history']"], {}), "(dicAnode['epoch_total_nfe_history'])\n", (1502, 1539), True, 'import numpy as np\n'), ((1555, 1595), 'numpy.array', 'np.array', (["dicAnode['epoch_loss_history']"], {}), "(dicAnode['epoch_loss_history'])\n", (1563, 1595), True, 'import numpy as np\n'), ((1612, 1655), 'numpy.array', 'np.array', (["dicNode['epoch_accuracy_history']"], {}), "(dicNode['epoch_accuracy_history'])\n", (1620, 1655), True, 'import numpy as np\n'), ((1666, 1710), 'numpy.array', 'np.array', (["dicNode['epoch_total_nfe_history']"], {}), "(dicNode['epoch_total_nfe_history'])\n", (1674, 1710), True, 'import numpy as np\n'), ((1722, 1761), 'numpy.array', 'np.array', (["dicNode['epoch_loss_history']"], {}), "(dicNode['epoch_loss_history'])\n", (1730, 1761), True, 'import numpy as np\n'), ((1941, 1975), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(9, 3)'}), '(1, 3, figsize=(9, 3))\n', (1953, 1975), True, 'from matplotlib import pyplot as plt\n'), ((2567, 2585), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2583, 2585), True, 'from matplotlib import pyplot as plt\n'), ((2586, 2657), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test5Dim.png"""'], {'format': '"""png"""', 'dpi': '(400)', 'bbox_inches': '"""tight"""'}), "('test5Dim.png', format='png', dpi=400, bbox_inches='tight')\n", (2597, 2657), True, 'from matplotlib import pyplot as plt\n'), ((2780, 2814), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(9, 3)'}), '(1, 3, figsize=(9, 3))\n', (2792, 2814), True, 'from matplotlib import pyplot as plt\n'), ((3777, 3795), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3793, 3795), True, 'from matplotlib import pyplot as plt\n'), ((3796, 3891), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""augmentedDimensionComparison.png"""'], {'format': '"""png"""', 'dpi': '(400)', 'bbox_inches': '"""tight"""'}), "('augmentedDimensionComparison.png', format='png', dpi=400,\n bbox_inches='tight')\n", (3807, 3891), True, 'from matplotlib import pyplot as plt\n'), ((593, 605), 'json.load', 'json.load', (['f'], {}), '(f)\n', (602, 605), False, 'import json\n'), ((861, 873), 'json.load', 'json.load', (['f'], {}), '(f)\n', (870, 873), False, 'import json\n'), ((1117, 1129), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1126, 1129), False, 'import json\n'), ((1374, 1386), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1383, 1386), False, 'import json\n'), ((1993, 2019), 'numpy.squeeze', 'np.squeeze', (['accuracyAnode5'], {}), '(accuracyAnode5)\n', (2003, 2019), True, 'import numpy as np\n'), ((2053, 2077), 'numpy.squeeze', 'np.squeeze', (['accuracyNode'], {}), '(accuracyNode)\n', (2063, 2077), True, 'import numpy as np\n'), ((2393, 2415), 'numpy.squeeze', 'np.squeeze', (['lossAnode5'], {}), '(lossAnode5)\n', (2403, 2415), True, 'import numpy as np\n'), ((2449, 2469), 'numpy.squeeze', 'np.squeeze', (['lossNode'], {}), '(lossNode)\n', (2459, 2469), True, 'import numpy as np\n'), ((2832, 2858), 'numpy.squeeze', 'np.squeeze', (['accuracyAnode5'], {}), '(accuracyAnode5)\n', (2842, 2858), True, 'import numpy as np\n'), ((2893, 2920), 'numpy.squeeze', 'np.squeeze', (['accuracyAnode50'], {}), '(accuracyAnode50)\n', (2903, 2920), True, 'import numpy as np\n'), ((2955, 2982), 'numpy.squeeze', 'np.squeeze', (['accuracyAnode75'], {}), '(accuracyAnode75)\n', (2965, 2982), True, 'import numpy as np\n'), ((3017, 3045), 'numpy.squeeze', 'np.squeeze', (['accuracyAnode100'], {}), '(accuracyAnode100)\n', (3027, 3045), True, 'import numpy as np\n'), ((3488, 3510), 'numpy.squeeze', 'np.squeeze', (['lossAnode5'], {}), '(lossAnode5)\n', (3498, 3510), True, 'import numpy as np\n'), ((3542, 3565), 'numpy.squeeze', 'np.squeeze', (['lossAnode50'], {}), '(lossAnode50)\n', (3552, 3565), True, 'import numpy as np\n'), ((3598, 3621), 'numpy.squeeze', 'np.squeeze', (['lossAnode75'], {}), '(lossAnode75)\n', (3608, 3621), True, 'import numpy as np\n'), ((3654, 3678), 'numpy.squeeze', 'np.squeeze', (['lossAnode100'], {}), '(lossAnode100)\n', (3664, 3678), True, 'import numpy as np\n'), ((1789, 1815), 'numpy.squeeze', 'np.squeeze', (['accuracyAnode5'], {}), '(accuracyAnode5)\n', (1799, 1815), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import rospy
import math
import numpy as np
from sensor_msgs.msg import LaserScan
#######################################
# Laser Scan:
# Header: Seq, Stamp, frame_id
# Angle_min, Angle_max, Angle_Increment, Time_Increment
# Scan time, range_min, range_max, ranges, intensities
#######################################
class Laser_Filter:
def __init__(self):
#rospy.on_shutdown(self.save_csv)
self.laser_sub = rospy.Subscriber('/base_scan', LaserScan, self.laser_callback)
self.scan_pub = rospy.Publisher('/laser_scan', LaserScan, queue_size= 1)
def laser_callback(self, msg):
filtered_values = LaserScan()
distance = np.array(msg.ranges)
filtered_values.header = msg.header
filtered_values.angle_increment = msg.angle_increment
filtered_values.time_increment = msg.time_increment
filtered_values.scan_time = msg.scan_time
filtered_values.range_min = msg.range_min
filtered_values.range_max = msg.range_max
filtered_values.intensities = msg.intensities
angle = filtered_values.angle_increment
min_angle = msg.angle_min
max_angle = msg.angle_max
median_filter_size = rospy.get_param('median_filter_size')
if median_filter_size < 1:
median_filter_size = 1
elif median_filter_size > len(distance)/2 - 1:
median_filter_size = int(len(distance)/2 - 1)
filtered_values_ranges = np.zeros(len(distance))
for i in range(len(distance) - median_filter_size - 1):
if i < median_filter_size:
filtered_values_ranges[i] = 0
else:
filtered_values_ranges[i] = np.median(distance[(i - median_filter_size):(i + median_filter_size+1)])
if filtered_values_ranges[i] > msg.range_max or filtered_values_ranges[i] < 0:
filtered_values_ranges[i] = 0
filtered_values.ranges = filtered_values_ranges
filtered_values.angle_min = min_angle
filtered_values.angle_max = max_angle
self.scan_pub.publish(filtered_values)
if __name__ == '__main__':
rospy.init_node('median_filter', anonymous=True)
laser_filter = Laser_Filter()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
| [
"sensor_msgs.msg.LaserScan",
"numpy.median",
"rospy.Subscriber",
"rospy.init_node",
"rospy.get_param",
"numpy.array",
"rospy.spin",
"rospy.Publisher"
] | [((1932, 1980), 'rospy.init_node', 'rospy.init_node', (['"""median_filter"""'], {'anonymous': '(True)'}), "('median_filter', anonymous=True)\n", (1947, 1980), False, 'import rospy\n'), ((442, 504), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_scan"""', 'LaserScan', 'self.laser_callback'], {}), "('/base_scan', LaserScan, self.laser_callback)\n", (458, 504), False, 'import rospy\n'), ((523, 578), 'rospy.Publisher', 'rospy.Publisher', (['"""/laser_scan"""', 'LaserScan'], {'queue_size': '(1)'}), "('/laser_scan', LaserScan, queue_size=1)\n", (538, 578), False, 'import rospy\n'), ((634, 645), 'sensor_msgs.msg.LaserScan', 'LaserScan', ([], {}), '()\n', (643, 645), False, 'from sensor_msgs.msg import LaserScan\n'), ((659, 679), 'numpy.array', 'np.array', (['msg.ranges'], {}), '(msg.ranges)\n', (667, 679), True, 'import numpy as np\n'), ((1132, 1169), 'rospy.get_param', 'rospy.get_param', (['"""median_filter_size"""'], {}), "('median_filter_size')\n", (1147, 1169), False, 'import rospy\n'), ((2020, 2032), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2030, 2032), False, 'import rospy\n'), ((1539, 1609), 'numpy.median', 'np.median', (['distance[i - median_filter_size:i + median_filter_size + 1]'], {}), '(distance[i - median_filter_size:i + median_filter_size + 1])\n', (1548, 1609), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
def four_dim_scatter(data, year, x_name, y_name, c_name, s_name, smin=1,
smax=10, figsize = (16, 10), index_name = "Year",
color_bar = "Dark2",discrete_color = False, pp = None):
# select a subset of the data
data_year = data[(data.index.get_level_values(index_name) == year)][plot_vars]
# remove any row with nan value
data_year = data_year.dropna(thresh =len(data_year.columns))
# Create a figure and axis; choose size of the figure
fig, ax = plt.subplots(figsize=figsize)
# set plot values before hand, easier to interpret
x = data_year[x_name]
y = data_year[y_name]
c = data_year[c_name]
s = data_year[s_name]
#chooses color schema, number of colors
if discrete_color:
color_divisions = len(set(data[c_name].dropna()))
cmap = plt.cm.get_cmap(color_bar, color_divisions)
else:
cmap = plt.cm.get_cmap(color_bar)
# use ax to plot scatter()
scatter = scatter_legend(ax, x, y, s, c, cmap, s_name, smin, smax)
setup_cbar(scatter, color_divisions, c_name)
# set tick line length to 0
ax.tick_params(axis=u'both', which=u'both',length=0)
# set axis value labels fontsize
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
# set axis range
plt.xlabel(x_name,fontsize = 20)
plt.ylabel(y_name, fontsize = 20)
plt.xlim(3,10)
plt.ylim(3,10)
# Make title year for each scatter plot
plt.title(str(year)[:4], fontsize = 30)
plt.show()
if pp != None:
pp.savefig(fig, bbox_inches = "tight")
plt.close()
def scatter_legend(ax, x, y, s, c, cmap, s_name, smin, smax):
scatter = ax.scatter(x=x,y=y,s=s,c=c,cmap=cmap)
# create legend for scatter dot sizes
# first build blank plots with desired sizes for legend
smid = (smin + smax) / 2
gmin = plt.scatter([],[], s=10, marker='o', color='#555555')
gmid = plt.scatter([],[], s=smid / 4, marker='o', color='#555555')
gmax = plt.scatter([],[], s=smax / 4, marker='o', color='#555555')
# create legend for plot size
ax.legend((gmin,gmid,gmax),
("", s_name, ""),
# bbox_to_anchor and loc set position of legend
bbox_to_anchor=(0, -0.17),
loc='lower left',
fontsize=12)
return scatter
def setup_cbar(scatter, color_divisions, c_name):
# include colorbar
cbar = plt.colorbar(scatter)
# this centers the number bar value labels
# change some of the values passed to see what happens....
tick_locs = (np.arange(1, color_divisions+1) + .82)\
*(color_divisions-1)/color_divisions
cbar.set_ticks(tick_locs)
# choose numbers 1 through 4 as colorbar labels
cbar.set_ticklabels(np.arange(1,color_divisions+1))
cbar.ax.tick_params(labelsize=20)
# add general label to colorbar
cbar.set_label(c_name, size=20)
#changes background of plot
plt.style.use('ggplot')
#import data
data = pd.DataFrame.from_csv("fraserDataWithRGDPPC.csv", index_col=[0,1])
# create list of each index set from multi index
years = list(sorted(set(data.index.get_level_values('Year'))))
country = list(sorted(set(data.index.get_level_values('ISO_Code'))))
#choose variables that will be plotted for each year in scatter
plot_vars = ["Sound Money", "Government Consumption",
"RGDP Per Capita","Quartile"]
# Normalize income so that 1 represents the maximum value of RGDP Per Capita
# This will allow dot to be easily adjusted
data["RGDP Per Capita"] = data["RGDP Per Capita"] /\
max(data["RGDP Per Capita"]) * 1000
smin = min(data["RGDP Per Capita"])
smax = max(data["RGDP Per Capita"])
pp = PdfPages("Scatter Plots.pdf")
x = "Sound Money"
y ="Government Consumption"
c = "Quartile"
s = "RGDP Per Capita"
for year in years:
four_dim_scatter(data, year, x, y, c, s,smin, smax, index_name = "Year",
discrete_color=True, pp=pp)
x = "Sound Money"
y = "Government Consumption"
c = "Quartile"
s = "RGDP Per Capita"
pp.close() | [
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"pandas.DataFrame.from_csv",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.scatter",
"matplot... | [((3065, 3088), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (3078, 3088), True, 'import matplotlib.pyplot as plt\n'), ((3110, 3177), 'pandas.DataFrame.from_csv', 'pd.DataFrame.from_csv', (['"""fraserDataWithRGDPPC.csv"""'], {'index_col': '[0, 1]'}), "('fraserDataWithRGDPPC.csv', index_col=[0, 1])\n", (3131, 3177), True, 'import pandas as pd\n'), ((3815, 3844), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['"""Scatter Plots.pdf"""'], {}), "('Scatter Plots.pdf')\n", (3823, 3844), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((640, 669), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (652, 669), True, 'import matplotlib.pyplot as plt\n'), ((1363, 1386), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (1373, 1386), True, 'import matplotlib.pyplot as plt\n'), ((1393, 1416), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (1403, 1416), True, 'import matplotlib.pyplot as plt\n'), ((1444, 1475), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_name'], {'fontsize': '(20)'}), '(x_name, fontsize=20)\n', (1454, 1475), True, 'import matplotlib.pyplot as plt\n'), ((1481, 1512), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_name'], {'fontsize': '(20)'}), '(y_name, fontsize=20)\n', (1491, 1512), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1534), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(3)', '(10)'], {}), '(3, 10)\n', (1527, 1534), True, 'import matplotlib.pyplot as plt\n'), ((1538, 1553), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(3)', '(10)'], {}), '(3, 10)\n', (1546, 1553), True, 'import matplotlib.pyplot as plt\n'), ((1650, 1660), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1658, 1660), True, 'import matplotlib.pyplot as plt\n'), ((1731, 1742), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1740, 1742), True, 'import matplotlib.pyplot as plt\n'), ((2006, 2060), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'s': '(10)', 'marker': '"""o"""', 'color': '"""#555555"""'}), "([], [], s=10, marker='o', color='#555555')\n", (2017, 2060), True, 'import matplotlib.pyplot as plt\n'), ((2071, 2131), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'s': '(smid / 4)', 'marker': '"""o"""', 'color': '"""#555555"""'}), "([], [], s=smid / 4, marker='o', color='#555555')\n", (2082, 2131), True, 'import matplotlib.pyplot as plt\n'), ((2160, 2220), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'s': '(smax / 4)', 'marker': '"""o"""', 'color': '"""#555555"""'}), "([], [], s=smax / 4, marker='o', color='#555555')\n", (2171, 2220), True, 'import matplotlib.pyplot as plt\n'), ((2554, 2575), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['scatter'], {}), '(scatter)\n', (2566, 2575), True, 'import matplotlib.pyplot as plt\n'), ((975, 1018), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['color_bar', 'color_divisions'], {}), '(color_bar, color_divisions)\n', (990, 1018), True, 'import matplotlib.pyplot as plt\n'), ((1044, 1070), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['color_bar'], {}), '(color_bar)\n', (1059, 1070), True, 'import matplotlib.pyplot as plt\n'), ((2894, 2927), 'numpy.arange', 'np.arange', (['(1)', '(color_divisions + 1)'], {}), '(1, color_divisions + 1)\n', (2903, 2927), True, 'import numpy as np\n'), ((2703, 2736), 'numpy.arange', 'np.arange', (['(1)', '(color_divisions + 1)'], {}), '(1, color_divisions + 1)\n', (2712, 2736), True, 'import numpy as np\n')] |
import numpy as np
input_s1 = \
'''3
7 4
2 4 6
8 5 9 3'''
input_s = \
'''75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23'''
input_l = []
for elem in input_s.split('\n'):
temp_ind = list(map(int, elem.lstrip().split(' ')))
input_l.append(temp_ind)
depth = len(input_l)
mmap = np.array(input_l)
print(mmap)
for i in range(1, depth):
mmap[i][0] += mmap[i - 1][0]
mmap[i][i] += mmap[i-1][i-1]
for j in range(1, i):
mmap[i][j] += max(mmap[i - 1][j], mmap[i - 1][j - 1])
print(mmap)
max_val = 0
for i in range(depth):
max_val = max(max_val, mmap[depth - 1][i])
print(max_val)
| [
"numpy.array"
] | [((704, 721), 'numpy.array', 'np.array', (['input_l'], {}), '(input_l)\n', (712, 721), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import pybullet as p
from ravens.tasks import Task
from ravens import utils
class Stacking(Task):
def __init__(self):
super().__init__()
self.ee = 'suction'
self.max_steps = 12
self.metric = 'pose'
self.primitive = 'pick_place'
self.rotation_eps = np.deg2rad(45)
def reset(self, env):
# Add base.
base_size = (0.05, 0.15, 0.005)
base_urdf = 'assets/stacking/stand.urdf'
base_pose = self.random_pose(env, base_size)
env.add_object(base_urdf, base_pose, fixed=True)
# Block colors.
colors = [utils.COLORS['purple'],
utils.COLORS['blue'],
utils.COLORS['green'],
utils.COLORS['yellow'],
utils.COLORS['orange'],
utils.COLORS['red']]
# Add blocks.
block_ids = []
block_size = (0.04, 0.04, 0.04)
block_urdf = 'assets/stacking/block.urdf'
for i in range(6):
block_pose = self.random_pose(env, block_size)
block_id = env.add_object(block_urdf, block_pose)
p.changeVisualShape(block_id, -1, rgbaColor=colors[i] + [1])
block_ids.append(block_id)
# Associate placement locations.
self.num_steps = 6
self.goal = {'places': {}, 'steps': []}
self.goal['places'] = {
0: (self.apply(base_pose, (0, -0.05, 0.03)), base_pose[1]),
1: (self.apply(base_pose, (0, 0, 0.03)), base_pose[1]),
2: (self.apply(base_pose, (0, 0.05, 0.03)), base_pose[1]),
3: (self.apply(base_pose, (0, -0.025, 0.08)), base_pose[1]),
4: (self.apply(base_pose, (0, 0.025, 0.08)), base_pose[1]),
5: (self.apply(base_pose, (0, 0, 0.13)), base_pose[1])}
block_symmetry = np.pi / 2
self.goal['steps'] = [
{block_ids[0]: (block_symmetry, [0, 1, 2]),
block_ids[1]: (block_symmetry, [0, 1, 2]),
block_ids[2]: (block_symmetry, [0, 1, 2])},
{block_ids[3]: (block_symmetry, [3, 4]),
block_ids[4]: (block_symmetry, [3, 4])},
{block_ids[5]: (block_symmetry, [5])}]
| [
"numpy.deg2rad",
"pybullet.changeVisualShape"
] | [((346, 360), 'numpy.deg2rad', 'np.deg2rad', (['(45)'], {}), '(45)\n', (356, 360), True, 'import numpy as np\n'), ((1175, 1235), 'pybullet.changeVisualShape', 'p.changeVisualShape', (['block_id', '(-1)'], {'rgbaColor': '(colors[i] + [1])'}), '(block_id, -1, rgbaColor=colors[i] + [1])\n', (1194, 1235), True, 'import pybullet as p\n')] |
import os
import sys
import time
import json
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import gensim.downloader as api
from transformers import AutoTokenizer
from src.model.MyLSTM import MyLSTM
from src.Util.CSVLogger import CSVLogger
from src.model.ModelBertLSTM import MyLSTM_Bert
from src.MyDataset import MyDataset_WithPreprocess as MyDataset, _batch_to_tensor
glove_settings = {
"run_name": "GloveLSTM",
"output_dir": "./output/GloveRun",
"load_checkpoint": "",
"mode": "train", # train, eval
"embedding_settings": {
"embedding_type": "gensim", # gensim, bert
## embeding_model: ["glove-wiki-gigaword-300", "word2vec-google-news-300", "distilbert-base-uncased"]
"embedding_model": "glove-wiki-gigaword-300"
},
"data_settings": {
"train_data": "./data/glove_ids_train.csv.zip",
"val_data": "./data/glove_ids_val.csv.zip",
"test_data": "./data/glove_ids_test.csv.zip",
"max_seq_len": 200,
"input_type": "index", # index, clean, unclean
"store_processed": False,
"batch_size": 96
},
"model_settings": {
"lstm_layer": 2,
"hidden_dim": 128,
"target_size": 7,
"dropout_prob": 0.2
},
"train_settings": {
"epochs": 100,
"learning_rate": 0.000005,
"grad_clip": 5
}
}
word2vec_settings = {
"run_name": "Word2VecLSTM",
"output_dir": "./output/Word2VecRun",
"load_checkpoint": "",
"mode": "train", # train, eval
"embedding_settings": {
"embedding_type": "gensim", # gensim, bert
## embeding_model: ["glove-wiki-gigaword-300", "word2vec-google-news-300", "distilbert-base-uncased"]
"embedding_model": "word2vec-google-news-300"
},
"data_settings": {
"train_data": "./data/glove_ids_train.csv.zip",
"val_data": "./data/glove_ids_val.csv.zip",
"test_data": "./data/glove_ids_test.csv.zip",
"max_seq_len": 200,
"input_type": "index", # index, clean, unclean
"store_processed": False,
"batch_size": 96
},
"model_settings": {
"lstm_layer": 2,
"hidden_dim": 128,
"target_size": 7,
"dropout_prob": 0.2
},
"train_settings": {
"epochs": 100,
"learning_rate": 0.000005,
"grad_clip": 5
}
}
bert_settings = {
"run_name": "BertLSTM",
"output_dir": "./output/BertRun",
# if load checkpoint is not empty (""), trainer will try to import checkpoint!
"load_checkpoint": "",
"mode": "train", # train, eval
"embedding_settings": {
## embedding_type can be: ["gensim" | "bert"]
"embedding_type": "bert",
## embeding_model can be: ["glove-wiki-gigaword-300" | "word2vec-google-news-300" | "distilbert-base-uncased"]
"embedding_model": "distilbert-base-uncased"
},
"data_settings": {
"train_data": "./data/bert_ids_train.csv.zip",
"val_data": "./data/bert_ids_val.csv.zip",
"test_data": "./data/bert_ids_test.csv.zip",
"max_seq_len": 200,
"input_type": "index",
"store_processed": False,
"batch_size": 96
},
"model_settings": {
"lstm_layer": 2,
"hidden_dim": 128,
"target_size": 7,
"dropout_prob": 0.2,
"train_bert": False
},
"train_settings": {
"epochs": 100,
"learning_rate": 0.000005,
"grad_clip": 5
}
}
class ModelTrainer:
def __init__(self, settings, emb_model=None):
# ################################ #
# IDENTIFY THE DEVICE FOR TRAINING #
# ################################ #
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.load = settings["load_checkpoint"]
self.run_mode = settings["mode"]
if self.load != "" and self.run_mode == "train":
checkpoint = self.get_chkp(settings["load_checkpoint"])
self.output_dir = os.path.abspath("/".join(self.load.split("/")[:-2]))
self.settings = checkpoint["settings"]
else:
self.settings = settings
self.output_dir = settings["output_dir"]
self.output_dir = os.path.join(self.output_dir,
f"{datetime.now().strftime('%y-%m-%d_%H%M%S')}_{settings['run_name']}")
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self.embedding_model_name = settings["embedding_settings"]["embedding_model"]
if isinstance(emb_model, type(None)):
if self.embedding_model_name != "distilbert-base-uncased":
print(f"embedding model ({self.embedding_model_name}) is loading...")
self.emb_model = api.load(self.embedding_model_name)
elif self.embedding_model_name == "distilbert-base-uncased":
print(f"embedding model ({self.embedding_model_name}) is loading...")
self.emb_model = AutoTokenizer.from_pretrained(self.embedding_model_name)
else:
print(f"embedding model is loaded from the argument.")
self.emb_model = emb_model
# ########### #
# IMPORT DATA #
# ########### #
print("Importing Data...")
self.data_train = MyDataset(path_data=settings["data_settings"]["train_data"],
emb_model=self.emb_model,
emb_type=settings["embedding_settings"]["embedding_type"],
max_seq_len=settings["data_settings"]["max_seq_len"],
input_type=settings["data_settings"]["input_type"],
store_processed=settings["data_settings"]["store_processed"],
output_dir=self.output_dir)
self.data_val = MyDataset(path_data=settings["data_settings"]["val_data"],
emb_model=self.emb_model,
emb_type=settings["embedding_settings"]["embedding_type"],
max_seq_len=settings["data_settings"]["max_seq_len"],
input_type=settings["data_settings"]["input_type"],
store_processed=settings["data_settings"]["store_processed"],
output_dir=self.output_dir)
self.data_test = MyDataset(path_data=settings["data_settings"]["test_data"],
emb_model=self.emb_model,
emb_type=settings["embedding_settings"]["embedding_type"],
max_seq_len=settings["data_settings"]["max_seq_len"],
input_type=settings["data_settings"]["input_type"],
store_processed=settings["data_settings"]["store_processed"],
output_dir=self.output_dir)
# ############ #
# Data Loaders #
# ############ #
print("Creating DataLoaders...")
self.batch_size = settings["data_settings"]["batch_size"]
self.dataloader_train = DataLoader(self.data_train,
batch_size=self.batch_size,
shuffle=True,
collate_fn=_batch_to_tensor,
drop_last=True)
self.dataloader_val = DataLoader(self.data_val,
batch_size=self.batch_size,
shuffle=True,
collate_fn=_batch_to_tensor,
drop_last=True)
self.dataloader_test = DataLoader(self.data_test,
batch_size=self.batch_size,
shuffle=True,
collate_fn=_batch_to_tensor,
drop_last=True)
# #################### #
# INITIALIZE THE MODEL #
# #################### #
print("Initializing Model...")
if settings["embedding_settings"]["embedding_type"]=="bert":
self.model = MyLSTM_Bert(lstm_layers=settings["model_settings"]["lstm_layer"],
hidden_dim=settings["model_settings"]["hidden_dim"],
target_size=settings["model_settings"]["target_size"],
dropout_prob=settings["model_settings"]["dropout_prob"],
device=self.device,
bert_pretrained=settings["embedding_settings"]["embedding_model"],
seq_len=settings["data_settings"]["max_seq_len"],
train_bert=settings["model_settings"]["train_bert"])
else:
self.model = MyLSTM(emb_vectors=self.emb_model.vectors,
lstm_layers=settings["model_settings"]["lstm_layer"],
hidden_dim=settings["model_settings"]["hidden_dim"],
target_size=settings["model_settings"]["target_size"],
dropout_prob=settings["model_settings"]["dropout_prob"],
device=self.device,
seq_len=settings["data_settings"]["max_seq_len"])
self.model = self.model.to(self.device)
self.loss_function = torch.nn.NLLLoss()
self.optimizer = torch.optim.Adam(self.model.parameters(),
lr=settings["train_settings"]["learning_rate"])
# ################# #
# TRAINING SETTINGS #
# ################# #
if self.load != "" and self.run_mode == "eval":
self.eval(data_loader=self.dataloader_test)
elif self.load != "" and self.run_mode == "train":
# load the last training
print(f"The Model is loading... {self.load}")
self.model.load_state_dict(checkpoint["state_dict"])
self.epochs = settings["train_settings"]["epochs"]
self.batch_counter = 0
self.epoch_counter = checkpoint["epoch"]
self.grad_clip = settings["train_settings"]["grad_clip"]
self.valid_loss_min = checkpoint["valid_loss_min"]
self.valid_acc_max = checkpoint["valid_acc_max"]
self.metrics = checkpoint["metrics"]
self.csv_logger = CSVLogger(self.metrics.keys())
for i in range(len(self.metrics["train_loss"])):
self.csv_logger.log({
"train_loss": self.metrics["train_loss"][i],
"val_loss": self.metrics["val_loss"][i],
"train_acc": self.metrics["train_acc"][i],
"val_acc": self.metrics["val_acc"][i]
})
elif self.load == "":
self.epochs = settings["train_settings"]["epochs"]
self.batch_counter = 0
self.epoch_counter = 0
self.grad_clip = settings["train_settings"]["grad_clip"]
self.valid_loss_min = np.Inf
self.valid_acc_max = np.NINF
# Initialize metric container
self.metrics = {
"train_loss": [],
"val_loss": [],
"train_acc": [],
"val_acc": []
}
self.csv_logger = CSVLogger(self.metrics.keys())
with open(os.path.join(self.output_dir, "settings.json"), "w") as f:
json.dump(settings, f)
else:
sys.exit("load and mode Settings are not identified!")
def train(self):
print_step_every = 100
self.model.switch_train()
while self.epoch_counter < self.epochs:
self.epoch_counter += 1
epoch = self.epoch_counter
# initialize intermediate metrics
self.batch_counter = 0
training_loss = 0
training_acc = 0
# time vars
ep_start_time = time.time()
temp_time = time.time()
# initialize hiddens
h = self.model.init_hidden(self.batch_size)
for inputs, labels in self.dataloader_train:
self.batch_counter += 1
h = tuple([e.data for e in h])
inputs, labels = inputs.to(self.device), labels.to(self.device)
self.optimizer.zero_grad()
output, h = self.model.forward(inputs, h)
preds = torch.max(output, dim=1)[1]
loss = self.loss_function(output, labels)
training_loss += loss.item()
training_acc += torch.sum(preds == labels).item() / labels.shape[0]
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip)
self.optimizer.step()
if self.batch_counter % print_step_every == 0 or self.batch_counter == 1:
print("Epoch: {}/{}...".format(epoch + 1, self.epochs),
"Step: {}...".format(self.batch_counter),
"Loss: {:.6f}...".format(training_loss / self.batch_counter),
"Acc: {:.2f}".format(training_acc / self.batch_counter),
"Time: {}... secs".format(time.time() - temp_time))
temp_time = time.time()
# ##### END OF EPOCH ##### #
if self.batch_counter == len(self.dataloader_train) - 1:
val_loss, val_acc = self.eval(self.dataloader_val)
row = {
"train_loss": training_loss / len(self.dataloader_train),
"train_acc": training_acc / len(self.dataloader_train),
"val_loss": val_loss,
"val_acc": val_acc
}
self._log_metrics(row)
print("Epoch: {}/{}...".format(epoch + 1, self.epochs),
"Step: {}...".format(self.batch_counter),
"Loss: {:.6f}...".format(row["train_loss"]),
"Acc: {:.2f}".format(row["train_acc"]),
"Val Loss: {:.6f}".format(row["val_loss"]),
"Val Acc: {:.2f}".format(row["val_acc"]))
# ## SAVE MODEL ## #
# save best validation accuracy
if self.valid_acc_max < row["val_acc"]:
self.valid_acc_max = row["val_acc"]
# self.save_model(fname=f"minAcc_epoch")
# save best validation loss
if self.valid_loss_min > row["val_loss"]:
self.valid_loss_min = row["val_loss"]
self.save_model(fname=f"minLoss_epoch")
# save last epoch
self.save_model(fname="last_epoch")
ep_end_time = time.time()
print("Epoch: {} Ended in {} secs...".format(epoch, ep_end_time - ep_start_time))
def _log_metrics(self, row):
self.metrics["train_loss"].append(row["train_loss"])
self.metrics["train_acc"].append(row["train_acc"])
self.metrics["val_loss"].append(row["val_loss"])
self.metrics["val_acc"].append(row["val_acc"])
self.csv_logger.log(row)
def eval(self, data_loader=None):
test_loss = 0
test_acc = 0
test_h = self.model.init_hidden(self.batch_size)
self.model.eval()
data_loader = self.dataloader_test if isinstance(data_loader, type(None)) else data_loader
for inp, lab in data_loader:
test_h = tuple([each.data for each in test_h])
inp, lab = inp.to(self.device), lab.to(self.device)
out, test_h = self.model(inp, test_h)
preds = torch.max(out, dim=1)[1]
loss = self.loss_function(out, lab)
test_loss += loss.item()
test_acc += torch.sum(preds == lab).item() / lab.shape[0]
self.model.switch_train()
loss = test_loss / len(data_loader)
acc = test_acc / len(data_loader)
return loss, acc
def predict(self, data_loader=None):
all_preds = []
all_y = []
test_h = self.model.init_hidden(self.batch_size)
self.model.eval()
data_loader = self.dataloader_test if isinstance(data_loader, type(None)) else data_loader
for inp, lab in data_loader:
test_h = tuple([each.data for each in test_h])
inp, lab = inp.to(self.device), lab.to(self.device)
out, test_h = self.model(inp, test_h)
preds = torch.max(out, dim=1)[1]
all_preds.extend(preds.detach().numpy())
all_y.extend(lab.detach().numpy())
self.model.switch_train()
return np.array(all_preds), np.array(all_y)
def save_model(self, fname="last_epoch"):
state = {
"settings": self.settings,
"epoch": self.epoch_counter,
"metrics": self.metrics,
"valid_acc_max": self.valid_acc_max,
"valid_loss_min": self.valid_loss_min,
"optimizer": self.optimizer.state_dict(),
"state_dict": self.model.state_dict()
}
o_dir = os.path.join(self.output_dir, "checkpoints")
if not os.path.exists(o_dir):
os.makedirs(o_dir)
path_out = os.path.join(o_dir, f"{fname}.chkp")
torch.save(state, path_out)
def load_model(self, chkp_path):
checkpoint = self.get_chkp(chkp_path)
print(f"The Model is loading... {self.load}")
self.output_dir = os.path.abspath("/".join(chkp_path.split("/")[:-2]))
self.settings = checkpoint["settings"]
self.model.load_state_dict(checkpoint["state_dict"])
self.epoch_counter = checkpoint["epoch"]
self.valid_loss_min = checkpoint["valid_loss_min"]
self.valid_acc_max = checkpoint["valid_acc_max"]
self.metrics = checkpoint["metrics"]
def get_chkp(self, chkp_path):
return torch.load(chkp_path, map_location=self.device)
def save_plots(self, fname="learningCurve"):
x = np.arange(len(self.metrics["train_loss"])) + 1
fig, ax = plt.subplots()
ax.plot(x, self.metrics["train_loss"], label="train")
ax.plot(x, self.metrics["val_loss"], label="val")
ax.set_title('Learning Curve - Loss')
plt.legend(loc='lower right')
plt.savefig(os.path.join(self.output_dir, f"{fname}_Loss.png"))
fig, ax = plt.subplots()
ax.plot(x, self.metrics["train_acc"], label="train")
ax.plot(x, self.metrics["val_acc"], label="val")
ax.set_title('Learning Curve - Accuracy')
plt.legend(loc='lower right')
plt.savefig(os.path.join(self.output_dir, f"{fname}_Acc.png")) | [
"torch.max",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"transformers.AutoTokenizer.from_pretrained",
"sys.exit",
"os.path.exists",
"src.model.MyLSTM.MyLSTM",
"src.MyDataset.MyDataset_WithPreprocess",
"torch.nn.NLLLoss",
"torch.save",
"time.time",
"matplotlib.pyplot.legend",
"t... | [((5498, 5858), 'src.MyDataset.MyDataset_WithPreprocess', 'MyDataset', ([], {'path_data': "settings['data_settings']['train_data']", 'emb_model': 'self.emb_model', 'emb_type': "settings['embedding_settings']['embedding_type']", 'max_seq_len': "settings['data_settings']['max_seq_len']", 'input_type': "settings['data_settings']['input_type']", 'store_processed': "settings['data_settings']['store_processed']", 'output_dir': 'self.output_dir'}), "(path_data=settings['data_settings']['train_data'], emb_model=self\n .emb_model, emb_type=settings['embedding_settings']['embedding_type'],\n max_seq_len=settings['data_settings']['max_seq_len'], input_type=\n settings['data_settings']['input_type'], store_processed=settings[\n 'data_settings']['store_processed'], output_dir=self.output_dir)\n", (5507, 5858), True, 'from src.MyDataset import MyDataset_WithPreprocess as MyDataset, _batch_to_tensor\n'), ((6080, 6438), 'src.MyDataset.MyDataset_WithPreprocess', 'MyDataset', ([], {'path_data': "settings['data_settings']['val_data']", 'emb_model': 'self.emb_model', 'emb_type': "settings['embedding_settings']['embedding_type']", 'max_seq_len': "settings['data_settings']['max_seq_len']", 'input_type': "settings['data_settings']['input_type']", 'store_processed': "settings['data_settings']['store_processed']", 'output_dir': 'self.output_dir'}), "(path_data=settings['data_settings']['val_data'], emb_model=self.\n emb_model, emb_type=settings['embedding_settings']['embedding_type'],\n max_seq_len=settings['data_settings']['max_seq_len'], input_type=\n settings['data_settings']['input_type'], store_processed=settings[\n 'data_settings']['store_processed'], output_dir=self.output_dir)\n", (6089, 6438), True, 'from src.MyDataset import MyDataset_WithPreprocess as MyDataset, _batch_to_tensor\n'), ((6649, 7008), 'src.MyDataset.MyDataset_WithPreprocess', 'MyDataset', ([], {'path_data': "settings['data_settings']['test_data']", 'emb_model': 'self.emb_model', 'emb_type': "settings['embedding_settings']['embedding_type']", 'max_seq_len': "settings['data_settings']['max_seq_len']", 'input_type': "settings['data_settings']['input_type']", 'store_processed': "settings['data_settings']['store_processed']", 'output_dir': 'self.output_dir'}), "(path_data=settings['data_settings']['test_data'], emb_model=self.\n emb_model, emb_type=settings['embedding_settings']['embedding_type'],\n max_seq_len=settings['data_settings']['max_seq_len'], input_type=\n settings['data_settings']['input_type'], store_processed=settings[\n 'data_settings']['store_processed'], output_dir=self.output_dir)\n", (6658, 7008), True, 'from src.MyDataset import MyDataset_WithPreprocess as MyDataset, _batch_to_tensor\n'), ((7415, 7533), 'torch.utils.data.DataLoader', 'DataLoader', (['self.data_train'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'collate_fn': '_batch_to_tensor', 'drop_last': '(True)'}), '(self.data_train, batch_size=self.batch_size, shuffle=True,\n collate_fn=_batch_to_tensor, drop_last=True)\n', (7425, 7533), False, 'from torch.utils.data import DataLoader\n'), ((7733, 7849), 'torch.utils.data.DataLoader', 'DataLoader', (['self.data_val'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'collate_fn': '_batch_to_tensor', 'drop_last': '(True)'}), '(self.data_val, batch_size=self.batch_size, shuffle=True,\n collate_fn=_batch_to_tensor, drop_last=True)\n', (7743, 7849), False, 'from torch.utils.data import DataLoader\n'), ((8042, 8159), 'torch.utils.data.DataLoader', 'DataLoader', (['self.data_test'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'collate_fn': '_batch_to_tensor', 'drop_last': '(True)'}), '(self.data_test, batch_size=self.batch_size, shuffle=True,\n collate_fn=_batch_to_tensor, drop_last=True)\n', (8052, 8159), False, 'from torch.utils.data import DataLoader\n'), ((9877, 9895), 'torch.nn.NLLLoss', 'torch.nn.NLLLoss', ([], {}), '()\n', (9893, 9895), False, 'import torch\n'), ((17831, 17875), 'os.path.join', 'os.path.join', (['self.output_dir', '"""checkpoints"""'], {}), "(self.output_dir, 'checkpoints')\n", (17843, 17875), False, 'import os\n'), ((17964, 18000), 'os.path.join', 'os.path.join', (['o_dir', 'f"""{fname}.chkp"""'], {}), "(o_dir, f'{fname}.chkp')\n", (17976, 18000), False, 'import os\n'), ((18009, 18036), 'torch.save', 'torch.save', (['state', 'path_out'], {}), '(state, path_out)\n', (18019, 18036), False, 'import torch\n'), ((18623, 18670), 'torch.load', 'torch.load', (['chkp_path'], {'map_location': 'self.device'}), '(chkp_path, map_location=self.device)\n', (18633, 18670), False, 'import torch\n'), ((18798, 18812), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (18810, 18812), True, 'import matplotlib.pyplot as plt\n'), ((18987, 19016), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (18997, 19016), True, 'import matplotlib.pyplot as plt\n'), ((19108, 19122), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19120, 19122), True, 'import matplotlib.pyplot as plt\n'), ((19299, 19328), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (19309, 19328), True, 'import matplotlib.pyplot as plt\n'), ((3860, 3885), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3883, 3885), False, 'import torch\n'), ((3836, 3856), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3848, 3856), False, 'import torch\n'), ((3891, 3910), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3903, 3910), False, 'import torch\n'), ((8557, 9006), 'src.model.ModelBertLSTM.MyLSTM_Bert', 'MyLSTM_Bert', ([], {'lstm_layers': "settings['model_settings']['lstm_layer']", 'hidden_dim': "settings['model_settings']['hidden_dim']", 'target_size': "settings['model_settings']['target_size']", 'dropout_prob': "settings['model_settings']['dropout_prob']", 'device': 'self.device', 'bert_pretrained': "settings['embedding_settings']['embedding_model']", 'seq_len': "settings['data_settings']['max_seq_len']", 'train_bert': "settings['model_settings']['train_bert']"}), "(lstm_layers=settings['model_settings']['lstm_layer'],\n hidden_dim=settings['model_settings']['hidden_dim'], target_size=\n settings['model_settings']['target_size'], dropout_prob=settings[\n 'model_settings']['dropout_prob'], device=self.device, bert_pretrained=\n settings['embedding_settings']['embedding_model'], seq_len=settings[\n 'data_settings']['max_seq_len'], train_bert=settings['model_settings'][\n 'train_bert'])\n", (8568, 9006), False, 'from src.model.ModelBertLSTM import MyLSTM_Bert\n'), ((9276, 9626), 'src.model.MyLSTM.MyLSTM', 'MyLSTM', ([], {'emb_vectors': 'self.emb_model.vectors', 'lstm_layers': "settings['model_settings']['lstm_layer']", 'hidden_dim': "settings['model_settings']['hidden_dim']", 'target_size': "settings['model_settings']['target_size']", 'dropout_prob': "settings['model_settings']['dropout_prob']", 'device': 'self.device', 'seq_len': "settings['data_settings']['max_seq_len']"}), "(emb_vectors=self.emb_model.vectors, lstm_layers=settings[\n 'model_settings']['lstm_layer'], hidden_dim=settings['model_settings'][\n 'hidden_dim'], target_size=settings['model_settings']['target_size'],\n dropout_prob=settings['model_settings']['dropout_prob'], device=self.\n device, seq_len=settings['data_settings']['max_seq_len'])\n", (9282, 9626), False, 'from src.model.MyLSTM import MyLSTM\n'), ((12486, 12497), 'time.time', 'time.time', ([], {}), '()\n', (12495, 12497), False, 'import time\n'), ((12522, 12533), 'time.time', 'time.time', ([], {}), '()\n', (12531, 12533), False, 'import time\n'), ((17382, 17401), 'numpy.array', 'np.array', (['all_preds'], {}), '(all_preds)\n', (17390, 17401), True, 'import numpy as np\n'), ((17403, 17418), 'numpy.array', 'np.array', (['all_y'], {}), '(all_y)\n', (17411, 17418), True, 'import numpy as np\n'), ((17891, 17912), 'os.path.exists', 'os.path.exists', (['o_dir'], {}), '(o_dir)\n', (17905, 17912), False, 'import os\n'), ((17926, 17944), 'os.makedirs', 'os.makedirs', (['o_dir'], {}), '(o_dir)\n', (17937, 17944), False, 'import os\n'), ((19037, 19087), 'os.path.join', 'os.path.join', (['self.output_dir', 'f"""{fname}_Loss.png"""'], {}), "(self.output_dir, f'{fname}_Loss.png')\n", (19049, 19087), False, 'import os\n'), ((19349, 19398), 'os.path.join', 'os.path.join', (['self.output_dir', 'f"""{fname}_Acc.png"""'], {}), "(self.output_dir, f'{fname}_Acc.png')\n", (19361, 19398), False, 'import os\n'), ((4558, 4589), 'os.path.exists', 'os.path.exists', (['self.output_dir'], {}), '(self.output_dir)\n', (4572, 4589), False, 'import os\n'), ((4607, 4635), 'os.makedirs', 'os.makedirs', (['self.output_dir'], {}), '(self.output_dir)\n', (4618, 4635), False, 'import os\n'), ((4959, 4994), 'gensim.downloader.load', 'api.load', (['self.embedding_model_name'], {}), '(self.embedding_model_name)\n', (4967, 4994), True, 'import gensim.downloader as api\n'), ((16381, 16402), 'torch.max', 'torch.max', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (16390, 16402), False, 'import torch\n'), ((17206, 17227), 'torch.max', 'torch.max', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (17215, 17227), False, 'import torch\n'), ((5187, 5243), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.embedding_model_name'], {}), '(self.embedding_model_name)\n', (5216, 5243), False, 'from transformers import AutoTokenizer\n'), ((12029, 12083), 'sys.exit', 'sys.exit', (['"""load and mode Settings are not identified!"""'], {}), "('load and mode Settings are not identified!')\n", (12037, 12083), False, 'import sys\n'), ((12975, 12999), 'torch.max', 'torch.max', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (12984, 12999), False, 'import torch\n'), ((13860, 13871), 'time.time', 'time.time', ([], {}), '()\n', (13869, 13871), False, 'import time\n'), ((15470, 15481), 'time.time', 'time.time', ([], {}), '()\n', (15479, 15481), False, 'import time\n'), ((11980, 12002), 'json.dump', 'json.dump', (['settings', 'f'], {}), '(settings, f)\n', (11989, 12002), False, 'import json\n'), ((16516, 16539), 'torch.sum', 'torch.sum', (['(preds == lab)'], {}), '(preds == lab)\n', (16525, 16539), False, 'import torch\n'), ((11905, 11951), 'os.path.join', 'os.path.join', (['self.output_dir', '"""settings.json"""'], {}), "(self.output_dir, 'settings.json')\n", (11917, 11951), False, 'import os\n'), ((13139, 13165), 'torch.sum', 'torch.sum', (['(preds == labels)'], {}), '(preds == labels)\n', (13148, 13165), False, 'import torch\n'), ((4470, 4484), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4482, 4484), False, 'from datetime import datetime\n'), ((13802, 13813), 'time.time', 'time.time', ([], {}), '()\n', (13811, 13813), False, 'import time\n')] |
import numpy as np
from pathlib import Path
from sklearn import datasets
from unittest.mock import patch
from pyclustertend import vat, ivat, compute_ordered_dissimilarity_matrix, compute_ivat_ordered_dissimilarity_matrix
TEST_DIR = Path(__file__).resolve().parent
def test_compute_ordered_dissimilarity_matrix():
# given
iris = datasets.load_iris()
iris_dataset = iris.data
expected_ordered_matrix = np.load(TEST_DIR / 'data/iris_vat.npy')
# when
ordered_matrix = compute_ordered_dissimilarity_matrix(iris_dataset)
# then
np.testing.assert_allclose(ordered_matrix, expected_ordered_matrix, atol=0.1)
def test_compute_ivat_ordered_dissimilarity_matrix():
# given
iris = datasets.load_iris()
iris_dataset = iris.data
expected_ordered_matrix = np.load(TEST_DIR / 'data/iris_ivat.npy')
# when
ordered_matrix = compute_ivat_ordered_dissimilarity_matrix(iris_dataset)
# then
np.testing.assert_allclose(ordered_matrix, expected_ordered_matrix, atol=0.1)
@patch('pyclustertend.visual_assessment_of_tendency.compute_ivat_ordered_dissimilarity_matrix')
def test_ivat_call_compute_ivat_ordered_dissimilarity_matrix_to_obtain_the_ordered_matrix(mock_compute_ivat):
# given
iris = datasets.load_iris()
iris_dataset = iris.data
mock_compute_ivat.return_value = np.ones((3, 3))
# when
ivat(iris_dataset)
# then
mock_compute_ivat.assert_called_once_with(iris_dataset)
@patch('pyclustertend.visual_assessment_of_tendency.compute_ordered_dissimilarity_matrix')
def test_vat_call_compute_ivat_ordered_dissimilarity_matrix_to_obtain_the_ordered_matrix(mock_compute_vat):
# given
iris = datasets.load_iris()
iris_dataset = iris.data
mock_compute_vat.return_value = np.ones((3, 3))
# when
ivat(iris_dataset)
# then
mock_compute_vat.assert_called_once_with(iris_dataset)
@patch('pyclustertend.visual_assessment_of_tendency.compute_ivat_ordered_dissimilarity_matrix')
def test_ivat_does_not_return_the_matrix_by_default(mock_compute_ivat):
# given
iris = datasets.load_iris()
iris_dataset = iris.data
mock_compute_ivat.return_value = np.ones((3, 3))
# when
output_result = ivat(iris_dataset)
# then
assert output_result is None
@patch('pyclustertend.visual_assessment_of_tendency.compute_ordered_dissimilarity_matrix')
def test_vat_does_not_return_the_matrix_by_default(mock_compute_vat):
# given
iris = datasets.load_iris()
iris_dataset = iris.data
mock_compute_vat.return_value = np.ones((3, 3))
# when
output_result = vat(iris_dataset)
# then
assert output_result is None
| [
"sklearn.datasets.load_iris",
"pyclustertend.compute_ordered_dissimilarity_matrix",
"pyclustertend.compute_ivat_ordered_dissimilarity_matrix",
"numpy.ones",
"pathlib.Path",
"numpy.testing.assert_allclose",
"pyclustertend.vat",
"pyclustertend.ivat",
"numpy.load",
"unittest.mock.patch"
] | [((1025, 1129), 'unittest.mock.patch', 'patch', (['"""pyclustertend.visual_assessment_of_tendency.compute_ivat_ordered_dissimilarity_matrix"""'], {}), "(\n 'pyclustertend.visual_assessment_of_tendency.compute_ivat_ordered_dissimilarity_matrix'\n )\n", (1030, 1129), False, 'from unittest.mock import patch\n'), ((1466, 1565), 'unittest.mock.patch', 'patch', (['"""pyclustertend.visual_assessment_of_tendency.compute_ordered_dissimilarity_matrix"""'], {}), "(\n 'pyclustertend.visual_assessment_of_tendency.compute_ordered_dissimilarity_matrix'\n )\n", (1471, 1565), False, 'from unittest.mock import patch\n'), ((1898, 2002), 'unittest.mock.patch', 'patch', (['"""pyclustertend.visual_assessment_of_tendency.compute_ivat_ordered_dissimilarity_matrix"""'], {}), "(\n 'pyclustertend.visual_assessment_of_tendency.compute_ivat_ordered_dissimilarity_matrix'\n )\n", (1903, 2002), False, 'from unittest.mock import patch\n'), ((2290, 2389), 'unittest.mock.patch', 'patch', (['"""pyclustertend.visual_assessment_of_tendency.compute_ordered_dissimilarity_matrix"""'], {}), "(\n 'pyclustertend.visual_assessment_of_tendency.compute_ordered_dissimilarity_matrix'\n )\n", (2295, 2389), False, 'from unittest.mock import patch\n'), ((341, 361), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (359, 361), False, 'from sklearn import datasets\n'), ((421, 460), 'numpy.load', 'np.load', (["(TEST_DIR / 'data/iris_vat.npy')"], {}), "(TEST_DIR / 'data/iris_vat.npy')\n", (428, 460), True, 'import numpy as np\n'), ((494, 544), 'pyclustertend.compute_ordered_dissimilarity_matrix', 'compute_ordered_dissimilarity_matrix', (['iris_dataset'], {}), '(iris_dataset)\n', (530, 544), False, 'from pyclustertend import vat, ivat, compute_ordered_dissimilarity_matrix, compute_ivat_ordered_dissimilarity_matrix\n'), ((561, 638), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ordered_matrix', 'expected_ordered_matrix'], {'atol': '(0.1)'}), '(ordered_matrix, expected_ordered_matrix, atol=0.1)\n', (587, 638), True, 'import numpy as np\n'), ((718, 738), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (736, 738), False, 'from sklearn import datasets\n'), ((798, 838), 'numpy.load', 'np.load', (["(TEST_DIR / 'data/iris_ivat.npy')"], {}), "(TEST_DIR / 'data/iris_ivat.npy')\n", (805, 838), True, 'import numpy as np\n'), ((872, 927), 'pyclustertend.compute_ivat_ordered_dissimilarity_matrix', 'compute_ivat_ordered_dissimilarity_matrix', (['iris_dataset'], {}), '(iris_dataset)\n', (913, 927), False, 'from pyclustertend import vat, ivat, compute_ordered_dissimilarity_matrix, compute_ivat_ordered_dissimilarity_matrix\n'), ((944, 1021), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ordered_matrix', 'expected_ordered_matrix'], {'atol': '(0.1)'}), '(ordered_matrix, expected_ordered_matrix, atol=0.1)\n', (970, 1021), True, 'import numpy as np\n'), ((1253, 1273), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (1271, 1273), False, 'from sklearn import datasets\n'), ((1340, 1355), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (1347, 1355), True, 'import numpy as np\n'), ((1372, 1390), 'pyclustertend.ivat', 'ivat', (['iris_dataset'], {}), '(iris_dataset)\n', (1376, 1390), False, 'from pyclustertend import vat, ivat, compute_ordered_dissimilarity_matrix, compute_ivat_ordered_dissimilarity_matrix\n'), ((1687, 1707), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (1705, 1707), False, 'from sklearn import datasets\n'), ((1773, 1788), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (1780, 1788), True, 'import numpy as np\n'), ((1805, 1823), 'pyclustertend.ivat', 'ivat', (['iris_dataset'], {}), '(iris_dataset)\n', (1809, 1823), False, 'from pyclustertend import vat, ivat, compute_ordered_dissimilarity_matrix, compute_ivat_ordered_dissimilarity_matrix\n'), ((2088, 2108), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (2106, 2108), False, 'from sklearn import datasets\n'), ((2175, 2190), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (2182, 2190), True, 'import numpy as np\n'), ((2223, 2241), 'pyclustertend.ivat', 'ivat', (['iris_dataset'], {}), '(iris_dataset)\n', (2227, 2241), False, 'from pyclustertend import vat, ivat, compute_ordered_dissimilarity_matrix, compute_ivat_ordered_dissimilarity_matrix\n'), ((2473, 2493), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (2491, 2493), False, 'from sklearn import datasets\n'), ((2559, 2574), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (2566, 2574), True, 'import numpy as np\n'), ((2607, 2624), 'pyclustertend.vat', 'vat', (['iris_dataset'], {}), '(iris_dataset)\n', (2610, 2624), False, 'from pyclustertend import vat, ivat, compute_ordered_dissimilarity_matrix, compute_ivat_ordered_dissimilarity_matrix\n'), ((235, 249), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (239, 249), False, 'from pathlib import Path\n')] |
from pommerman.agents import BaseAgent
import numpy as np
from . import dqn_agent_utilities
from . import pytorch_dqn_learner
debug_dqn_agent = False
class DqnAgent(BaseAgent):
def __init__(self, *args, **kwargs):
super(DqnAgent, self).__init__(*args, **kwargs)
self.game_agent_id = None # set this from board observation
#self.boardSize = None
self.testing = False
self.currentTimeStep = 0
self.currentEpisode = 0
self.dqn_engine = pytorch_dqn_learner.dqn_learning() # this is the interfacing class between dqnAgent and general DqnLearning algorithm (domain agnostic hopefully)
self.dqn_engine.episode_durations = []
self.dqn_engine.episode_rewards = []
try:
self.dqn_engine.load_trained_model(moreTraining=True) # if a model file exists
except:
print("no model file found")
def act(self, obs, action_space): # can exploit or explore based on dqn algorithm
# set current state - action is set from dqn_engine act method
if self.currentTimeStep == 0: # set agentId from game board
our_agent_pos = np.array(obs['position'])
board = np.array(obs['board'])
self.game_agent_id = board[our_agent_pos[0]][our_agent_pos[1]]
self.dqn_engine.current_state = dqn_agent_utilities.generate_NN_input(self.game_agent_id, obs, self.currentTimeStep)
else:
self.dqn_engine.next_state = dqn_agent_utilities.generate_NN_input(self.game_agent_id, obs, self.currentTimeStep)
self.dqn_engine.reward = 0
self.dqn_engine.save_to_buffer_learn(self.currentEpisode)
# prepare for the next interaction
self.dqn_engine.current_state = dqn_agent_utilities.generate_NN_input(self.game_agent_id, obs, self.currentTimeStep) # for the next transition
self.currentTimeStep += 1
if debug_dqn_agent:
print("passed to dqn learner to act")
return self.dqn_engine.act(self.dqn_engine.current_state, action_space.n, self.testing) # 6 is the action_space for Pommerman
def episode_end(self, reward):
self.dqn_engine.next_state = None # To make sure NN approximation for V(terminal) = 0
self.dqn_engine.reward = reward
if debug_dqn_agent:
print(f"episode reward is {reward}")
self.dqn_engine.save_to_buffer_learn(self.currentEpisode)
self.dqn_engine.episode_rewards.append(reward)
self.dqn_engine.episode_durations.append(self.currentTimeStep)
self.dqn_engine.plot_durations()
if self.currentEpisode % 1000 == 0: # save model every 1000 time - might save multiple models in a run
self.dqn_engine.save_trained_model(self.currentEpisode)
self.currentTimeStep = 0
self.currentEpisode += 1 | [
"numpy.array"
] | [((1156, 1181), 'numpy.array', 'np.array', (["obs['position']"], {}), "(obs['position'])\n", (1164, 1181), True, 'import numpy as np\n'), ((1202, 1224), 'numpy.array', 'np.array', (["obs['board']"], {}), "(obs['board'])\n", (1210, 1224), True, 'import numpy as np\n')] |
'''
Created by <NAME>
ISIA Lab, Faculty of Engineering University of Mons, Mons (Belgium)
<EMAIL>
Source: SEEN SOON
Copyright (C) 2019 - UMons
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
'''
import numpy as np
import math as m
import torch.nn as nn
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.interpolate import griddata
from torch.utils.data.dataset import Dataset
import torch.optim as optim
from sklearn.preprocessing import scale
from scipy.signal import resample
from torch.utils.data import DataLoader, random_split, Subset
'''
def array_to_epochs(data, channels, sampling_frequency, montage='standard_1020', channel_type=['eeg']):
channel_type = channel_type * len(channels)
info = mne.create_info(ch_names=channels, sfreq=sampling_frequency, ch_types=channel_type,
montage=mne.channels.make_standard_montage(montage), verbose=50)
event_id, tmin, tmax = 1, -1., data.shape[1] / sampling_frequency + 0.5
baseline = (None, 0)
events = np.array([[100, 0, event_id]])
epochs = mne.EpochsArray(data.reshape(1, data.shape[0], data.shape[1]), info=info, events=events,
event_id={'arbitrary': 1}, verbose=50)
return epochs
def compute_psd(epoch, fmin=-1., fmax=60.):
psds, freqs = psd_multitaper(epoch, fmin=fmin, fmax=fmax, n_jobs=10, verbose=50)
return resample(psds, num=1500, axis=2)[0, :]
'''
def image_generation(feature_matrix, electrodes_loc, n_gridpoints):
n_electrodes = electrodes_loc.shape[0] # number of electrodes
n_bands = feature_matrix.shape[1] // n_electrodes # number of frequency bands considered in the feature matrix
n_samples = feature_matrix.shape[0] # number of samples to consider in the feature matrix.
# Checking the dimension of the feature matrix
if feature_matrix.shape[1] % n_electrodes != 0:
print('The combination feature matrix - electrodes locations is not working.')
assert feature_matrix.shape[1] % n_electrodes == 0
new_feat = []
# Reshape a novel feature matrix with a list of array with shape [n_samples x n_electrodes] for each frequency band
for bands in range(n_bands):
new_feat.append(feature_matrix[:, bands * n_electrodes: (bands + 1) * n_electrodes])
# Creation of a meshgrid data interpolation
# Creation of an empty grid
grid_x, grid_y = np.mgrid[
np.min(electrodes_loc[:, 0]): np.max(electrodes_loc[:, 0]): n_gridpoints * 1j, # along x_axis
np.min(electrodes_loc[:, 1]): np.max(electrodes_loc[:, 1]): n_gridpoints * 1j # along y_axis
]
interpolation_img = []
# Interpolation
# Creation of the empty interpolated feature matrix
for bands in range(n_bands):
interpolation_img.append(np.zeros([n_samples, n_gridpoints, n_gridpoints]))
# Interpolation between the points
print('Signals interpolations.')
for sample in range(n_samples):
for bands in range(n_bands):
interpolation_img[bands][sample, :, :] = griddata(electrodes_loc, new_feat[bands][sample, :], (grid_x, grid_y), method='cubic', fill_value=np.nan)
# Normalization - replacing the nan values by interpolation
for bands in range(n_bands):
interpolation_img[bands][~np.isnan(interpolation_img[bands])] = scale(interpolation_img[bands][~np.isnan(interpolation_img[bands])])
interpolation_img[bands] = np.nan_to_num(interpolation_img[bands])
return np.swapaxes(np.asarray(interpolation_img), 0, 1) # swap axes to have [samples, colors, W, H]
class EEGImagesDataset(Dataset):
"""EEGLearn Images Dataset from EEG."""
def __init__(self, label, image):
self.label = label
self.Images = image
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
image = self.Images[idx]
label = self.label[idx]
sample = (image, label)
return sample
class CombDataset(Dataset):
"""EEGLearn Images Dataset from EEG."""
def __init__(self, label, image, array):
self.label = label
self.array = array
self.Images = image
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
image = self.Images[idx]
label = self.label[idx]
array = self.array[idx]
sample = (image, array, label)
return sample
def Test_Model(net, Testloader, criterion, is_cuda=True):
running_loss = 0.0
evaluation = []
criterion_ae = nn.MSELoss()
# y_pred = []
# y_true = []
for i, data in enumerate(Testloader, 0):
input_img, labels = data
input_img = input_img.to(torch.float32)
if is_cuda:
input_img = input_img.cuda()
outputs = net(input_img)
_, predicted = torch.max(outputs.cpu().data, 1)
evaluation.append((predicted == labels).tolist())
# y_pred.extend(predicted)
# y_true.extend(labels)
loss = criterion(outputs, labels.long().cuda()) # + criterion_ae(out_ae, input_img)
running_loss += loss.item()
running_loss = running_loss / (i + 1)
evaluation = [item for sublist in evaluation for item in sublist]
running_acc = sum(evaluation) / len(evaluation)
# plt.hist(y_pred, bins=4, rwidth=0.5, alpha=0.5)
# plt.hist(y_true, bins=4, rwidth=0.5, alpha=0.5)
# plt.show()
return running_loss, running_acc
def TrainTest_Model(model, trainloader, testloader, n_epoch=30, opti='SGD', learning_rate=0.0001, is_cuda=True,
print_epoch=5, verbose=False):
if is_cuda:
net = model().cuda()
else:
net = model()
criterion = nn.CrossEntropyLoss()
criterion_AE = nn.MSELoss()
if opti == 'SGD':
optimizer = optim.SGD(net.parameters(), lr=learning_rate)
elif opti == 'Adam':
optimizer = optim.Adam(net.parameters(), lr=learning_rate)
else:
print("Optimizer: " + optim + " not implemented.")
for epoch in range(n_epoch):
running_loss = 0.0
evaluation = []
net.train()
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs.to(torch.float32).cuda())
_, predicted = torch.max(outputs.cpu().data, 1)
evaluation.append((predicted == labels).tolist())
loss = criterion(outputs, labels.long().cuda()) # + criterion_AE(out_ae, inputs.to(torch.float32).cuda())
loss.backward()
optimizer.step()
running_loss += loss.item()
running_loss = running_loss / (i + 1)
evaluation = [item for sublist in evaluation for item in sublist]
running_acc = sum(evaluation) / len(evaluation)
net.eval()
validation_loss, validation_acc = Test_Model(net, testloader, criterion, True)
if epoch % print_epoch == (print_epoch - 1):
print('[%d, %3d]\tloss: %.3f\tAccuracy : %.3f\t\tval-loss: %.3f\tval-Accuracy : %.3f' %
(epoch + 1, n_epoch, running_loss, running_acc, validation_loss, validation_acc))
if verbose:
print('Finished Training \n loss: %.3f\tAccuracy : %.3f\t\tval-loss: %.3f\tval-Accuracy : %.3f' %
(running_loss, running_acc, validation_loss, validation_acc))
return (running_loss, running_acc, validation_loss, validation_acc)
def set_requires_grad(model, requires_grad=True):
for param in model.parameters():
param.requires_grad = requires_grad
def iter_over(train_loader, test_loader):
iter_test_loader = iter(test_loader)
for i, data_train in enumerate(Trainloader, 0):
try:
data_test = next(iter_test_loader)
except StopIteration:
iter_test_loader = iter(test_loader)
data_test = next(iter_test_loader)
yield i, data_train, data_test
| [
"torch.nn.CrossEntropyLoss",
"scipy.interpolate.griddata",
"numpy.asarray",
"numpy.max",
"torch.nn.MSELoss",
"torch.is_tensor",
"numpy.zeros",
"numpy.isnan",
"numpy.min",
"numpy.nan_to_num"
] | [((5355, 5367), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5365, 5367), True, 'import torch.nn as nn\n'), ((6518, 6539), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6537, 6539), True, 'import torch.nn as nn\n'), ((6559, 6571), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6569, 6571), True, 'import torch.nn as nn\n'), ((4132, 4171), 'numpy.nan_to_num', 'np.nan_to_num', (['interpolation_img[bands]'], {}), '(interpolation_img[bands])\n', (4145, 4171), True, 'import numpy as np\n'), ((4195, 4224), 'numpy.asarray', 'np.asarray', (['interpolation_img'], {}), '(interpolation_img)\n', (4205, 4224), True, 'import numpy as np\n'), ((4548, 4568), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (4563, 4568), False, 'import torch\n'), ((5021, 5041), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (5036, 5041), False, 'import torch\n'), ((3496, 3545), 'numpy.zeros', 'np.zeros', (['[n_samples, n_gridpoints, n_gridpoints]'], {}), '([n_samples, n_gridpoints, n_gridpoints])\n', (3504, 3545), True, 'import numpy as np\n'), ((3751, 3860), 'scipy.interpolate.griddata', 'griddata', (['electrodes_loc', 'new_feat[bands][sample, :]', '(grid_x, grid_y)'], {'method': '"""cubic"""', 'fill_value': 'np.nan'}), "(electrodes_loc, new_feat[bands][sample, :], (grid_x, grid_y),\n method='cubic', fill_value=np.nan)\n", (3759, 3860), False, 'from scipy.interpolate import griddata\n'), ((3085, 3113), 'numpy.min', 'np.min', (['electrodes_loc[:, 0]'], {}), '(electrodes_loc[:, 0])\n', (3091, 3113), True, 'import numpy as np\n'), ((3115, 3143), 'numpy.max', 'np.max', (['electrodes_loc[:, 0]'], {}), '(electrodes_loc[:, 0])\n', (3121, 3143), True, 'import numpy as np\n'), ((3201, 3229), 'numpy.min', 'np.min', (['electrodes_loc[:, 1]'], {}), '(electrodes_loc[:, 1])\n', (3207, 3229), True, 'import numpy as np\n'), ((3231, 3259), 'numpy.max', 'np.max', (['electrodes_loc[:, 1]'], {}), '(electrodes_loc[:, 1])\n', (3237, 3259), True, 'import numpy as np\n'), ((3990, 4024), 'numpy.isnan', 'np.isnan', (['interpolation_img[bands]'], {}), '(interpolation_img[bands])\n', (3998, 4024), True, 'import numpy as np\n'), ((4060, 4094), 'numpy.isnan', 'np.isnan', (['interpolation_img[bands]'], {}), '(interpolation_img[bands])\n', (4068, 4094), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import io
import requests
import subprocess
import time
import numpy as np
from PIL import Image
from PIL import ImageEnhance
import tflite_runtime.interpreter as tflite
#TODO: use a context_mananger to dispose the stream
def raspi_capture():
cmd = "raspistill -w 224 -h 224 -o /home/pi/images/capture.jpg -rot 270 -n -ss 2000000 -br 70 -co 50"
subprocess.call(cmd, shell=True)
img = Image.open("/home/pi/images/capture.jpg")
#img = img.rotate(90)
# brighten for now
#enhancer = ImageEnhance.Brightness(img)
#img = enhancer.enhance(brightness)
img.save("/home/pi/images/pil.jpg")
return img
# from: https://support.pushover.app/i44-example-code-and-pushover-libraries#python-image
def send_message(args, img, result, old):
stream = io.BytesIO()
img.save(stream, format="jpeg")
stream.seek(0)
msg = "%s [%s]" % (result[2], ','.join([x[1][0] for x in old]))
r = requests.post("https://api.pushover.net/1/messages.json", data = {
"token": args.app_token,
"user": args.user_token,
"message": msg
},
files = {
"attachment": ("image.jpg", stream, "image/jpeg")
})
print("sent message: %s" % result[2])
print(r.text)
class Scorer:
def __init__(self, model_file, label_file, input_mean, input_std):
labels = self._load_labels(label_file)
interpreter = tflite.Interpreter(model_path=model_file)
interpreter.allocate_tensors()
# NxHxWxC, H:1, W:2
self.labels = labels
self.input_mean = input_mean
self.input_std = input_std
self.input_details = interpreter.get_input_details()
self.output_details = interpreter.get_output_details()
self.height = self.input_details[0]['shape'][1]
self.width = self.input_details[0]['shape'][2]
self.interpreter = interpreter
print("init %s x %s" % (self.width, self.height))
def _load_labels(self, filename):
with open(filename, 'r') as f:
return [line.strip() for line in f.readlines()]
def score(img):
# add N dim
input_data = np.expand_dims(img, axis=0)
# check the type of the input tensor
floating_model = self.input_details[0]['dtype'] == np.float32
if floating_model:
input_data = (np.float32(input_data) - self.input_mean) / self.input_std
self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
self.interpreter.invoke()
output_data = self.interpreter.get_tensor(self.output_details[0]['index'])
results = np.squeeze(output_data)
top_k = results.argsort()[-5:][::-1]
result = None
for i in top_k:
msg = ""
if floating_model:
msg = '{:08.6f}: {}'.format(float(results[i]), self.labels[i])
else:
msg = '{:08.6f}: {}'.format(float(results[i] / 255.0), self.labels[i])
if result is None:
result = (results[i], self.labels[i], msg)
print(msg)
print("---")
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-m',
'--model_file',
default='/tmp/mobilenet_v1_1.0_224_quant.tflite',
help='.tflite model to be executed')
parser.add_argument(
'-l',
'--label_file',
default='/tmp/labels.txt',
help='name of file containing labels')
parser.add_argument(
'--input_mean',
default=127.5, type=float,
help='input_mean')
parser.add_argument(
'--input_std',
default=127.5, type=float,
help='input standard deviation')
parser.add_argument(
'--brightness',
default=100, type=int,
help='brightness')
parser.add_argument(
'--user_token',
default='<PASSWORD>',
help='pushover user token')
parser.add_argument(
'--app_token',
default='some<PASSWORD>',
help='pushover app token')
args = parser.parse_args()
scorer = Scorer(args.model_file, args.label_file, args.input_mean, args.input_std)
old_results = []
last_result = ""
count = 0
while True:
#time.sleep(5)
img = raspi_capture()
result = scorer.score(img)
old_results.append(result)
if len(old_results) > 10:
old_results = old_results[-10:]
# ignore low confidence
if result[0] < 0.6:
print("ignore low confidence %s" % result[2])
continue
# increment and reset on change
count = count + 1
if result[1] != last_result:
count = 0
last_result = result[1]
print("reset to %s" % last_result)
# send message when stable for 30s
msg = "%s [%s]" % (result[2], ','.join([x[1][0] for x in old_results]))
print(msg)
if count == 6:
send_message(args, img, result, old_results)
| [
"tflite_runtime.interpreter.Interpreter",
"requests.post",
"PIL.Image.open",
"argparse.ArgumentParser",
"io.BytesIO",
"numpy.squeeze",
"subprocess.call",
"numpy.expand_dims",
"numpy.float32"
] | [((479, 511), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (494, 511), False, 'import subprocess\n'), ((520, 561), 'PIL.Image.open', 'Image.open', (['"""/home/pi/images/capture.jpg"""'], {}), "('/home/pi/images/capture.jpg')\n", (530, 561), False, 'from PIL import Image\n'), ((883, 895), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (893, 895), False, 'import io\n'), ((1020, 1218), 'requests.post', 'requests.post', (['"""https://api.pushover.net/1/messages.json"""'], {'data': "{'token': args.app_token, 'user': args.user_token, 'message': msg}", 'files': "{'attachment': ('image.jpg', stream, 'image/jpeg')}"}), "('https://api.pushover.net/1/messages.json', data={'token':\n args.app_token, 'user': args.user_token, 'message': msg}, files={\n 'attachment': ('image.jpg', stream, 'image/jpeg')})\n", (1033, 1218), False, 'import requests\n'), ((3052, 3077), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3075, 3077), False, 'import argparse\n'), ((1442, 1483), 'tflite_runtime.interpreter.Interpreter', 'tflite.Interpreter', ([], {'model_path': 'model_file'}), '(model_path=model_file)\n', (1460, 1483), True, 'import tflite_runtime.interpreter as tflite\n'), ((2125, 2152), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (2139, 2152), True, 'import numpy as np\n'), ((2576, 2599), 'numpy.squeeze', 'np.squeeze', (['output_data'], {}), '(output_data)\n', (2586, 2599), True, 'import numpy as np\n'), ((2309, 2331), 'numpy.float32', 'np.float32', (['input_data'], {}), '(input_data)\n', (2319, 2331), True, 'import numpy as np\n')] |
"""Functions to fit MRI SPGR signal to obtain T1.
Created 28 September 2020
@authors: <NAME>
@email: <EMAIL>
@institution: University of Edinburgh, UK
Functions:
fit_vfa_2_point: obtain T1 using analytical formula based on two images
fit_vfa_linear: obtain T1 using linear regression
fit_vfa_nonlinear: obtain T1 using non-linear least squares fit
fit_hifi: obtain T1 by fitting a combination of SPGR and IR-SPGR scans
spgr_signal: get SPGR signal
irspgr_signal: get IR-SPGR signal
"""
import numpy as np
from scipy.optimize import curve_fit, least_squares
from fitting import calculator
class vfa_2points(calculator):
def __init__(self, fa, tr):
self.fa = np.asarray(fa)
self.tr = tr
self.fa_rad = np.pi*self.fa/180
def proc(self, s, k_fa=1):
with np.errstate(divide='ignore', invalid='ignore'):
fa_true = k_fa * self.fa_rad
sr = s[0] / s[1]
t1 = self.tr / np.log(
(sr*np.sin(fa_true[1])*np.cos(fa_true[0]) -
np.sin(fa_true[0])*np.cos(fa_true[1])) /
(sr*np.sin(fa_true[1]) - np.sin(fa_true[0])))
s0 = s[0] * ((1-np.exp(-self.tr/t1)*np.cos(fa_true[0])) /
((1-np.exp(-self.tr/t1))*np.sin(fa_true[0])))
t1 = np.nan if ~np.isreal(t1) | (t1 <= 0) | np.isinf(t1) else t1
s0 = np.nan if (s0 <= 0) | np.isinf(s0) else s0
return {'s0': s0, 't1': t1}
class vfa_linear(calculator):
def __init__(self, fa, tr):
self.fa = np.asarray(fa)
self.tr = tr
self.fa_rad = np.pi*self.fa/180
def proc(self, s, k_fa=1):
fa_true = k_fa * self.fa_rad
y = s / np.sin(fa_true)
x = s / np.tan(fa_true)
A = np.stack([x, np.ones(x.shape)], axis=1)
slope, intercept = np.linalg.lstsq(A, y, rcond=None)[0]
is_valid = (intercept > 0) and (0. < slope < 1.)
t1, s0 = (-self.tr/np.log(slope),
intercept/(1-slope)) if is_valid else (np.nan, np.nan)
return {'s0': s0, 't1': t1}
class vfa_nonlinear(calculator):
def __init__(self, fa, tr):
self.fa = np.asarray(fa)
self.tr = tr
self.fa_rad = np.pi*self.fa/180
self.linear_fitter = vfa_linear(fa, tr)
def proc(self, s, k_fa=1):
# use linear fit to obtain initial guess
result_linear = self.linear_fitter.proc(s, k_fa=k_fa)
x_linear = np.array((result_linear['s0'], result_linear['t1']))
if (~np.isnan(x_linear[0]) & ~np.isnan(x_linear[1])):
x0 = x_linear
else:
x0 = np.array([s[0] / spgr_signal(1., 1., self.tr, k_fa*self.fa[0]), 1.])
result = least_squares(self.__residuals, x0, args=(s, k_fa), bounds=((1e-8,1e-8),(np.inf,np.inf)), method='trf',
x_scale=x0
)
if result.success is False:
raise ArithmeticError(f'Unable to fit VFA data'
f': {result.message}')
s0, t1 = result.x
return {'s0': s0, 't1': t1}
def __residuals(self, x, s, k_fa):
s0, t1 = x
s_est = spgr_signal(s0, t1, self.tr, k_fa*self.fa)
return s - s_est
class hifi(calculator):
def __init__(self, esp, ti, n, b, td, centre):
self.esp = esp
self.ti = ti
self.n = n
self.b = b
self.td = td
self.centre = centre
# get information about the scans
self.n_scans = len(esp)
self.is_ir = ~np.isnan(ti)
self.is_spgr = ~self.is_ir
self.idx_spgr = np.where(self.is_spgr)[0]
self.n_spgr = self.idx_spgr.size
self.get_linear_estimate = self.n_spgr > 1 and np.all(
np.isclose(esp[self.idx_spgr], esp[self.idx_spgr[0]]))
self.linear_fitter = vfa_linear( b[self.is_spgr], esp[self.idx_spgr[0]])
def proc(self, s, k_fa_fixed=None):
# First get a quick linear T1 estimate
if self.get_linear_estimate: # If >1 SPGR, use linear VFA fit
result_lin = self.linear_fitter.proc(s[self.is_spgr])
if ~np.isnan(result_lin['s0']) and ~np.isnan(result_lin['t1']):
s0_init, t1_init = result_lin['s0'], result_lin['t1']
else: # if result invalid, assume T1=1
t1_init = 1
s0_init = s[self.idx_spgr[0]] / spgr_signal(1, t1_init,
self.esp[self.idx_spgr[0]],
self.b[self.idx_spgr[0]])
elif self.n_spgr == 1: # If 1 SPGR, assume T1=1 and estimate s0 based on this scan
t1_init = 1
s0_init = s[self.idx_spgr[0]] / spgr_signal(1, t1_init,
self.esp[self.idx_spgr[0]],
self.b[self.idx_spgr[0]])
else: # If 0 SPGR, assume T1=1 and estimate s0 based on 1st scan
t1_init = 1
s0_init = s[0] / irspgr_signal(1, t1_init, self.esp[0], self.ti[0], self.n[0], self.b[0],
180, self.td[0], self.centre[0])
# Non-linear fit
if k_fa_fixed is None:
k_init = 1
bounds = ([0, 0, 0], [np.inf, np.inf, np.inf])
else:
k_init = k_fa_fixed
bounds = ([0, 0, 1], [np.inf, np.inf, 1])
x_0 = np.array([t1_init, s0_init, k_init])
result = least_squares(self.__residuals, x_0, args=(s,), bounds=bounds, method='trf',
x_scale=(t1_init, s0_init, k_init)
)
x_opt = result.x if result.success else (np.nan, np.nan, np.nan)
t1_opt, s0_opt, k_fa_opt = x_opt
s_opt = self.__signal(x_opt)
return {'t1': t1_opt, 's0': s0_opt, 'k_fa': k_fa_opt, 's_opt': s_opt}
def __residuals(self, x, s):
return s - self.__signal(x)
def __signal(self, x):
t1, s0, k_fa = x
s = np.zeros(self.n_scans)
s[self.is_ir] = irspgr_signal(s0, t1, self.esp[self.is_ir], self.ti[self.is_ir],
self.n[self.is_ir], k_fa*self.b[self.is_ir], self.td[self.is_ir],
self.centre[self.is_ir])
s[self.is_spgr] = spgr_signal(s0, t1, self.esp[self.is_spgr],
k_fa*self.b[self.is_spgr])
return s
def spgr_signal(s0, t1, tr, fa):
"""Return signal for SPGR sequence.
Parameters
----------
s0 : float
Equilibrium signal.
t1 : float
T1 value (s).
tr : float
TR value (s).
fa : float
Flip angle (deg).
Returns
-------
s : float
Steady-state SPGR signal.
"""
fa_rad = np.pi*fa/180
e = np.exp(-tr/t1)
s = s0 * (((1-e)*np.sin(fa_rad)) /
(1-e*np.cos(fa_rad)))
return s
def irspgr_signal(s0, t1, esp, ti, n, b, td=0, centre=0.5):
"""Return signal for IR-SPGR sequence.
Uses formula by Deichmann et al. (2000) to account for modified
apparent relaxation rate during the pulse train. Note inversion is assumed
to be ideal.
Parameters
----------
s0 : float
Equilibrium signal.
t1 : float
T1 value (s).
esp : float
Echo spacing (s). For SPGR, this is the TR.
ti : float
Inversion time (s). Note this is the actual time delay between the
inversion pulse and the start of the echo train. The effective TI
may be different, e.g for linear phase encoding of the echo train.
n : int
Number of excitation pulses per inversion pulse
b : float
Readout pulse flip angle (deg)
td : float
Delay between end of readout train and the next inversion (s).
centre : float
Time in readout train when centre of k-space is acquired,
expressed as a fraction of the readout duration. e.g. = 0 for
centric phase encoding, = 0.5 for linear phase encoding.
Returns
-------
s : float
Steady-state IR-SPGR signal.
"""
b_rad = np.pi*b/180
tau = esp * n
t1_star = (1/t1 - 1/esp*np.log(np.cos(b_rad)))**-1
m0_star = s0 * ((1-np.exp(-esp/t1)) / (1-np.exp(-esp/t1_star)))
r1 = -tau/t1_star
e1 = np.exp(r1)
e2 = np.exp(-td/t1)
e3 = np.exp(-ti/t1)
a1 = m0_star * (1-e1)
a2 = s0 * (1 - e2)
a3 = s0 * (1 - e3)
a = a3 - a2*e3 - a1*e2*e3
b = -e1*e2*e3
m1 = a/(1-b)
s = np.abs((
m0_star + (m1-m0_star)*np.exp(centre*r1))*np.sin(b_rad))
return s | [
"scipy.optimize.least_squares",
"numpy.isclose",
"numpy.tan",
"numpy.ones",
"numpy.where",
"numpy.log",
"numpy.asarray",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.errstate",
"numpy.isnan",
"numpy.cos",
"numpy.linalg.lstsq",
"numpy.isreal",
"numpy.sin",
"numpy.isinf"
] | [((6908, 6924), 'numpy.exp', 'np.exp', (['(-tr / t1)'], {}), '(-tr / t1)\n', (6914, 6924), True, 'import numpy as np\n'), ((8518, 8528), 'numpy.exp', 'np.exp', (['r1'], {}), '(r1)\n', (8524, 8528), True, 'import numpy as np\n'), ((8538, 8554), 'numpy.exp', 'np.exp', (['(-td / t1)'], {}), '(-td / t1)\n', (8544, 8554), True, 'import numpy as np\n'), ((8562, 8578), 'numpy.exp', 'np.exp', (['(-ti / t1)'], {}), '(-ti / t1)\n', (8568, 8578), True, 'import numpy as np\n'), ((698, 712), 'numpy.asarray', 'np.asarray', (['fa'], {}), '(fa)\n', (708, 712), True, 'import numpy as np\n'), ((1550, 1564), 'numpy.asarray', 'np.asarray', (['fa'], {}), '(fa)\n', (1560, 1564), True, 'import numpy as np\n'), ((2182, 2196), 'numpy.asarray', 'np.asarray', (['fa'], {}), '(fa)\n', (2192, 2196), True, 'import numpy as np\n'), ((2468, 2520), 'numpy.array', 'np.array', (["(result_linear['s0'], result_linear['t1'])"], {}), "((result_linear['s0'], result_linear['t1']))\n", (2476, 2520), True, 'import numpy as np\n'), ((2727, 2851), 'scipy.optimize.least_squares', 'least_squares', (['self.__residuals', 'x0'], {'args': '(s, k_fa)', 'bounds': '((1e-08, 1e-08), (np.inf, np.inf))', 'method': '"""trf"""', 'x_scale': 'x0'}), "(self.__residuals, x0, args=(s, k_fa), bounds=((1e-08, 1e-08),\n (np.inf, np.inf)), method='trf', x_scale=x0)\n", (2740, 2851), False, 'from scipy.optimize import curve_fit, least_squares\n'), ((5462, 5498), 'numpy.array', 'np.array', (['[t1_init, s0_init, k_init]'], {}), '([t1_init, s0_init, k_init])\n', (5470, 5498), True, 'import numpy as np\n'), ((5516, 5632), 'scipy.optimize.least_squares', 'least_squares', (['self.__residuals', 'x_0'], {'args': '(s,)', 'bounds': 'bounds', 'method': '"""trf"""', 'x_scale': '(t1_init, s0_init, k_init)'}), "(self.__residuals, x_0, args=(s,), bounds=bounds, method='trf',\n x_scale=(t1_init, s0_init, k_init))\n", (5529, 5632), False, 'from scipy.optimize import curve_fit, least_squares\n'), ((6052, 6074), 'numpy.zeros', 'np.zeros', (['self.n_scans'], {}), '(self.n_scans)\n', (6060, 6074), True, 'import numpy as np\n'), ((827, 873), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (838, 873), True, 'import numpy as np\n'), ((1719, 1734), 'numpy.sin', 'np.sin', (['fa_true'], {}), '(fa_true)\n', (1725, 1734), True, 'import numpy as np\n'), ((1751, 1766), 'numpy.tan', 'np.tan', (['fa_true'], {}), '(fa_true)\n', (1757, 1766), True, 'import numpy as np\n'), ((1846, 1879), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'y'], {'rcond': 'None'}), '(A, y, rcond=None)\n', (1861, 1879), True, 'import numpy as np\n'), ((3574, 3586), 'numpy.isnan', 'np.isnan', (['ti'], {}), '(ti)\n', (3582, 3586), True, 'import numpy as np\n'), ((3646, 3668), 'numpy.where', 'np.where', (['self.is_spgr'], {}), '(self.is_spgr)\n', (3654, 3668), True, 'import numpy as np\n'), ((8785, 8798), 'numpy.sin', 'np.sin', (['b_rad'], {}), '(b_rad)\n', (8791, 8798), True, 'import numpy as np\n'), ((1354, 1366), 'numpy.isinf', 'np.isinf', (['t1'], {}), '(t1)\n', (1362, 1366), True, 'import numpy as np\n'), ((1410, 1422), 'numpy.isinf', 'np.isinf', (['s0'], {}), '(s0)\n', (1418, 1422), True, 'import numpy as np\n'), ((1792, 1808), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (1799, 1808), True, 'import numpy as np\n'), ((2534, 2555), 'numpy.isnan', 'np.isnan', (['x_linear[0]'], {}), '(x_linear[0])\n', (2542, 2555), True, 'import numpy as np\n'), ((2559, 2580), 'numpy.isnan', 'np.isnan', (['x_linear[1]'], {}), '(x_linear[1])\n', (2567, 2580), True, 'import numpy as np\n'), ((3788, 3841), 'numpy.isclose', 'np.isclose', (['esp[self.idx_spgr]', 'esp[self.idx_spgr[0]]'], {}), '(esp[self.idx_spgr], esp[self.idx_spgr[0]])\n', (3798, 3841), True, 'import numpy as np\n'), ((6944, 6958), 'numpy.sin', 'np.sin', (['fa_rad'], {}), '(fa_rad)\n', (6950, 6958), True, 'import numpy as np\n'), ((8441, 8458), 'numpy.exp', 'np.exp', (['(-esp / t1)'], {}), '(-esp / t1)\n', (8447, 8458), True, 'import numpy as np\n'), ((8463, 8485), 'numpy.exp', 'np.exp', (['(-esp / t1_star)'], {}), '(-esp / t1_star)\n', (8469, 8485), True, 'import numpy as np\n'), ((1972, 1985), 'numpy.log', 'np.log', (['slope'], {}), '(slope)\n', (1978, 1985), True, 'import numpy as np\n'), ((4169, 4195), 'numpy.isnan', 'np.isnan', (["result_lin['s0']"], {}), "(result_lin['s0'])\n", (4177, 4195), True, 'import numpy as np\n'), ((4201, 4227), 'numpy.isnan', 'np.isnan', (["result_lin['t1']"], {}), "(result_lin['t1'])\n", (4209, 4227), True, 'import numpy as np\n'), ((6981, 6995), 'numpy.cos', 'np.cos', (['fa_rad'], {}), '(fa_rad)\n', (6987, 6995), True, 'import numpy as np\n'), ((8398, 8411), 'numpy.cos', 'np.cos', (['b_rad'], {}), '(b_rad)\n', (8404, 8411), True, 'import numpy as np\n'), ((8766, 8785), 'numpy.exp', 'np.exp', (['(centre * r1)'], {}), '(centre * r1)\n', (8772, 8785), True, 'import numpy as np\n'), ((1280, 1298), 'numpy.sin', 'np.sin', (['fa_true[0]'], {}), '(fa_true[0])\n', (1286, 1298), True, 'import numpy as np\n'), ((1326, 1339), 'numpy.isreal', 'np.isreal', (['t1'], {}), '(t1)\n', (1335, 1339), True, 'import numpy as np\n'), ((1139, 1157), 'numpy.sin', 'np.sin', (['fa_true[0]'], {}), '(fa_true[0])\n', (1145, 1157), True, 'import numpy as np\n'), ((1188, 1209), 'numpy.exp', 'np.exp', (['(-self.tr / t1)'], {}), '(-self.tr / t1)\n', (1194, 1209), True, 'import numpy as np\n'), ((1208, 1226), 'numpy.cos', 'np.cos', (['fa_true[0]'], {}), '(fa_true[0])\n', (1214, 1226), True, 'import numpy as np\n'), ((1259, 1280), 'numpy.exp', 'np.exp', (['(-self.tr / t1)'], {}), '(-self.tr / t1)\n', (1265, 1280), True, 'import numpy as np\n'), ((1019, 1037), 'numpy.cos', 'np.cos', (['fa_true[0]'], {}), '(fa_true[0])\n', (1025, 1037), True, 'import numpy as np\n'), ((1057, 1075), 'numpy.sin', 'np.sin', (['fa_true[0]'], {}), '(fa_true[0])\n', (1063, 1075), True, 'import numpy as np\n'), ((1076, 1094), 'numpy.cos', 'np.cos', (['fa_true[1]'], {}), '(fa_true[1])\n', (1082, 1094), True, 'import numpy as np\n'), ((1118, 1136), 'numpy.sin', 'np.sin', (['fa_true[1]'], {}), '(fa_true[1])\n', (1124, 1136), True, 'import numpy as np\n'), ((1000, 1018), 'numpy.sin', 'np.sin', (['fa_true[1]'], {}), '(fa_true[1])\n', (1006, 1018), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def increment_blinker(blink_counter, blink_flag, blink_duration=8):
blink_counter += 1
if blink_counter >= blink_duration * 2:
blink_flag = True
blink_counter = 0
elif blink_counter >= blink_duration:
blink_flag = False
return blink_counter, blink_flag
def round_up_game(canvas, particles, blink_flag, mouse_loc):
game_mouse_size = 100
mouse_x, mouse_y = mouse_loc
game_mouse_color = (150, 100, 30)
if mouse_x != float('inf') and mouse_y != float('inf'):
cv2.circle(canvas, (mouse_x, mouse_y),
radius=game_mouse_size,
color=game_mouse_color,
thickness=-1)
particle_hit_count = 0
for particle in particles:
particle.check_hit((mouse_x, mouse_y), game_mouse_size)
if not particle.is_hit and np.linalg.norm(particle.location - particle.game_target) <= 30:
if blink_flag:
particle.color = (30, 30, 200)
else:
particle.color = (100, 100, 150)
game_target = \
tuple(np.random.randint(1, canvas.shape[1], 1)) + \
tuple(np.random.randint(1, canvas.shape[0], 1))
particle.game_target = np.array(game_target)
elif particle.is_hit:
particle_hit_count += 1
particle.update(canvas,
mouse_loc=None,
target=particle.game_target)
particle.show(canvas)
game_mode = True
if particle_hit_count == len(particles):
game_mode = False
else:
cv2.putText(canvas,
'Caught {} of {}'.format(particle_hit_count, len(particles)),
(10, 40),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), thickness=1)
cv2.putText(canvas,
'(mouse over the particles)',
(10, 65),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (150, 150, 150), thickness=1)
return canvas, game_mode
| [
"cv2.putText",
"numpy.array",
"cv2.circle",
"numpy.random.randint",
"numpy.linalg.norm"
] | [((555, 660), 'cv2.circle', 'cv2.circle', (['canvas', '(mouse_x, mouse_y)'], {'radius': 'game_mouse_size', 'color': 'game_mouse_color', 'thickness': '(-1)'}), '(canvas, (mouse_x, mouse_y), radius=game_mouse_size, color=\n game_mouse_color, thickness=-1)\n', (565, 660), False, 'import cv2\n'), ((1846, 1971), 'cv2.putText', 'cv2.putText', (['canvas', '"""(mouse over the particles)"""', '(10, 65)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(150, 150, 150)'], {'thickness': '(1)'}), "(canvas, '(mouse over the particles)', (10, 65), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, (150, 150, 150), thickness=1)\n", (1857, 1971), False, 'import cv2\n'), ((1271, 1292), 'numpy.array', 'np.array', (['game_target'], {}), '(game_target)\n', (1279, 1292), True, 'import numpy as np\n'), ((871, 927), 'numpy.linalg.norm', 'np.linalg.norm', (['(particle.location - particle.game_target)'], {}), '(particle.location - particle.game_target)\n', (885, 927), True, 'import numpy as np\n'), ((1126, 1166), 'numpy.random.randint', 'np.random.randint', (['(1)', 'canvas.shape[1]', '(1)'], {}), '(1, canvas.shape[1], 1)\n', (1143, 1166), True, 'import numpy as np\n'), ((1194, 1234), 'numpy.random.randint', 'np.random.randint', (['(1)', 'canvas.shape[0]', '(1)'], {}), '(1, canvas.shape[0], 1)\n', (1211, 1234), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Automates the SSVEP experiment
"""
from mules import MulesClient
from experiment import *
import subprocess
import numpy as np
if __name__ == "__main__":
###################
## Parameters
###################
np.random.seed(40)
videos = np.arange(6) + 1
videos = np.random.permutation(videos)
time_between = 5;
mules_path = r'C:\Program Files (x86)\MuSAE_Lab\MuLES\mules.exe'
# Number of DEVICE in MuLES config.ini file
mules_eeg_device = 'DEVICE01'
mules_ecg_device = "DEVICE07"
###################
## Execute other software
###################
# Execute MuLES
subprocess.Popen(mules_path + ' -- "' + mules_eeg_device + '"' + ' PORT=30000 LOG=T TCP=T')
# Execute MuLES
subprocess.Popen(mules_path + ' -- "' + mules_ecg_device + '"' + ' PORT=31000 LOG=T TCP=T')
# Pause for the Experimenter to confirm the quality of the Epoc electrodes
pause(20)
###################
## TCP/IP clients
###################
# TCP Client for MuLES
mules_eeg = MulesClient('localhost', 30000)
pause(2)
mules_ecg = MulesClient('localhost', 31000)
# TCP Client for Unity
unity = TcpClient('localhost', 40000)
unity.connect()
# Wait for Unity App
pause(3)
for video in videos:
# Tone start
tone(500, 500)
pause(1)
# Send command to Unity
unity.writeInt32(video)
# Send trigger to MuLES
mules_eeg.sendtrigger(video)
mules_ecg.sendtrigger(video)
# Receive command that video ended
video_end = unity.readInt32()
# Send trigger to MuLES
mules_eeg.sendtrigger(video)
mules_ecg.sendtrigger(video)
# Pause between videos
pause(time_between)
###################
## End Experiment
###################
mules_eeg.kill()
mules_ecg.kill()
unity.writeInt32(66)
unity.close() | [
"subprocess.Popen",
"mules.MulesClient",
"numpy.random.seed",
"numpy.arange",
"numpy.random.permutation"
] | [((258, 276), 'numpy.random.seed', 'np.random.seed', (['(40)'], {}), '(40)\n', (272, 276), True, 'import numpy as np\n'), ((320, 349), 'numpy.random.permutation', 'np.random.permutation', (['videos'], {}), '(videos)\n', (341, 349), True, 'import numpy as np\n'), ((670, 765), 'subprocess.Popen', 'subprocess.Popen', (['(mules_path + \' -- "\' + mules_eeg_device + \'"\' + \' PORT=30000 LOG=T TCP=T\')'], {}), '(mules_path + \' -- "\' + mules_eeg_device + \'"\' +\n \' PORT=30000 LOG=T TCP=T\')\n', (686, 765), False, 'import subprocess\n'), ((786, 881), 'subprocess.Popen', 'subprocess.Popen', (['(mules_path + \' -- "\' + mules_ecg_device + \'"\' + \' PORT=31000 LOG=T TCP=T\')'], {}), '(mules_path + \' -- "\' + mules_ecg_device + \'"\' +\n \' PORT=31000 LOG=T TCP=T\')\n', (802, 881), False, 'import subprocess\n'), ((1095, 1126), 'mules.MulesClient', 'MulesClient', (['"""localhost"""', '(30000)'], {}), "('localhost', 30000)\n", (1106, 1126), False, 'from mules import MulesClient\n'), ((1156, 1187), 'mules.MulesClient', 'MulesClient', (['"""localhost"""', '(31000)'], {}), "('localhost', 31000)\n", (1167, 1187), False, 'from mules import MulesClient\n'), ((290, 302), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (299, 302), True, 'import numpy as np\n')] |
import numpy as np
outfilename = 'Inc/windowfunctions.h'
# keep both following lines in sync!
size = 32768
bits = 15
def Hann(i, N):
return 0.5*(1.0-np.cos(2*np.pi*i/N))
def Barlett(i, N):
return 1.0 - np.abs((i-N/2) / (N/2))
def Welch(i, N):
return 1.0 - np.square((i-N/2) / (N/2))
windows = [Hann, Barlett, Welch]
def generate_window_table_entries(window):
entries = ''
rows = int(pow(2, bits-4-1)) # in each row should be 8 entries, -1 for just writing half of the the size
_size = float(size)
for row in range(rows):
entries += ' '
for col in range(16):
i = row * 16 + col
value = window(i, _size) # Use float!
entries += str(value)
entries += ', '
entries += '\n'
entries = entries[:-3] # remove last \n, colon and whitespace
return entries
def gen_table():
content = 'const FFT_DATATYPE windows[WINDOW_FUNCTIONS][WINDOW_FUNCTION_TABLE_SIZE/2] = {\n'
for window in windows:
content += ' {\n'
content += generate_window_table_entries(window) + '\n'
content += ' },\n'
content = content[:-2] # remove last \n, and colon.
content += '\n};\n'
return content
def gen_wss_table_entries(window):
entries = '{'
for _bits in range(1, bits+1):
N = int(pow(2, _bits))
wss = np.sum(np.square(window(np.linspace(0, N, num=N), N)))
entries += str(wss) + ', '
entries = entries[:-2]
entries += '}'
return entries
def gen_wss_table():
table = 'const float windows_ss[%s][%s] = {\n' % (len(windows), str(bits))
for window in windows:
table += ' ' + gen_wss_table_entries(window) + ',\n'
table = table[:-2]
table += '\n};'
return table
def main():
if int(pow(2, bits)) != size:
raise ValueError("size and bits settings doesn't match")
with open(outfilename, 'w') as f:
f.write('/* Generated by "generate_window_functions.py". Consider changing the script for modifications. */\n')
f.write('#ifndef WINDOWFUNCTIONS_H_\n')
f.write('#define WINDOWFUNCTIONS_H_\n\n')
f.write('#include "fft.h"\n')
f.write('#if (WINDOW_FUNCTION_TABLE_SIZE != {})\n'.format(size))
f.write('#error "The windowfunctiontable must have the resolution of the doubled max fft size"\n')
f.write('#endif\n\n')
f.write('#if (WINDOW_FUNCTIONS != {})\n'.format(len(windows)))
f.write('#error "The windowfunctiontable must have as much window functiond data as defined in the fft.h"\n')
f.write('#endif\n\n')
f.write(gen_table() + '\n\n')
f.write(gen_wss_table() + '\n')
f.write('#endif\n')
if __name__ == '__main__':
main()
| [
"numpy.abs",
"numpy.linspace",
"numpy.cos",
"numpy.square"
] | [((213, 242), 'numpy.abs', 'np.abs', (['((i - N / 2) / (N / 2))'], {}), '((i - N / 2) / (N / 2))\n', (219, 242), True, 'import numpy as np\n'), ((272, 304), 'numpy.square', 'np.square', (['((i - N / 2) / (N / 2))'], {}), '((i - N / 2) / (N / 2))\n', (281, 304), True, 'import numpy as np\n'), ((155, 180), 'numpy.cos', 'np.cos', (['(2 * np.pi * i / N)'], {}), '(2 * np.pi * i / N)\n', (161, 180), True, 'import numpy as np\n'), ((1397, 1421), 'numpy.linspace', 'np.linspace', (['(0)', 'N'], {'num': 'N'}), '(0, N, num=N)\n', (1408, 1421), True, 'import numpy as np\n')] |
"""Unit test for DataLoader (public methods only)"""
import unittest
import numpy as np
import os
from dicom_data_preprocess.loader import DataLoader
__author__ = '<NAME>'
class TestLoader(unittest.TestCase):
@classmethod
def setUpClass(TestLoader):
TestLoader.output_dir = 'tests/data/output_data/'
TestLoader.images = np.load('tests/data/output_data/dicom_images.npy')
TestLoader.masks = np.load('tests/data/output_data/mask_images.npy')
TestLoader.metadata = np.load('tests/data/output_data/meta_images.npy')
TestLoader.mini_batch_size = 2
def test_random_mini_batches(self):
print('Testing the shuffling and partitioning of the loader')
loader = DataLoader(output_dir=TestLoader.output_dir,
images=TestLoader.images,
masks=TestLoader.masks,
metadata=TestLoader.metadata,
mini_batch_size=TestLoader.mini_batch_size)
minibatches = loader.random_mini_batches()
x, y, z = minibatches[0]
self.assertEqual(len(minibatches), 4)
self.assertEqual(np.shape(x)[0], 2)
self.assertEqual(np.shape(y)[0], 2)
self.assertEqual(np.shape(z)[1], 2)
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"numpy.shape",
"numpy.load",
"dicom_data_preprocess.loader.DataLoader"
] | [((1108, 1123), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1121, 1123), False, 'import unittest\n'), ((329, 379), 'numpy.load', 'np.load', (['"""tests/data/output_data/dicom_images.npy"""'], {}), "('tests/data/output_data/dicom_images.npy')\n", (336, 379), True, 'import numpy as np\n'), ((401, 450), 'numpy.load', 'np.load', (['"""tests/data/output_data/mask_images.npy"""'], {}), "('tests/data/output_data/mask_images.npy')\n", (408, 450), True, 'import numpy as np\n'), ((475, 524), 'numpy.load', 'np.load', (['"""tests/data/output_data/meta_images.npy"""'], {}), "('tests/data/output_data/meta_images.npy')\n", (482, 524), True, 'import numpy as np\n'), ((671, 848), 'dicom_data_preprocess.loader.DataLoader', 'DataLoader', ([], {'output_dir': 'TestLoader.output_dir', 'images': 'TestLoader.images', 'masks': 'TestLoader.masks', 'metadata': 'TestLoader.metadata', 'mini_batch_size': 'TestLoader.mini_batch_size'}), '(output_dir=TestLoader.output_dir, images=TestLoader.images,\n masks=TestLoader.masks, metadata=TestLoader.metadata, mini_batch_size=\n TestLoader.mini_batch_size)\n', (681, 848), False, 'from dicom_data_preprocess.loader import DataLoader\n'), ((984, 995), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (992, 995), True, 'import numpy as np\n'), ((1022, 1033), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (1030, 1033), True, 'import numpy as np\n'), ((1060, 1071), 'numpy.shape', 'np.shape', (['z'], {}), '(z)\n', (1068, 1071), True, 'import numpy as np\n')] |
import numpy as np
from rockets.rocket import Rocket
class Starship(Rocket):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.radius = 4.5
self.height = 50
self.propellant_mass = 3.3e6
self.dry_mass = 120e3
self.max_thrust = 2e5 * 3
self.Isp = 386
self.max_v_gimbal = 30
self.max_gimbal = 20
self.min_throttle = 0#0.1 / 3
self.fuel_level = 0.2
self.reenters_engine_first = False
self.rated_for_human_reentry = True
self.front_wing_area = 19.4
self.rear_wing_area = 38.6
self.wing_thickness = 0.5
if self.planet.is3D:
self.control_vec = np.zeros(7)
else:
self.control_vec = np.zeros(4)
def update(self, dt):
wing_angs = np.zeros((2,2)) #* np.pi/2 # ang=0 is perpendicular to hull
# [[front_left, front_right],
# [rear_left, rear_right]]
gimbal_cntrl = np.ones(2) * np.pi/2
throttle = 0
wing_angs = np.clip(wing_angs, 0, np.pi/2)
x, y, z = np.eye(3)
common_factor = 0.5* self.planet.rho(self.altitude) * self.speed**2
# Starship drags can be decomposed to those perpendicular to the hull
# and those parallel to the hull.
front_drag = 0.1 * self.wing_thickness*2 * np.cos(self.attackAngle) * common_factor
rear_drag = 0.1 * self.wing_thickness*3 * np.cos(self.attackAngle) * common_factor
# assume front and rear drags behave as a linear system
folded_wing_torque_effect = (2*front_drag*np.cos(np.mean(wing_angs[0]))
+ 2.5*rear_drag*np.cos(np.mean(wing_angs[1])))*y
total_drag = (front_drag + rear_drag) * -x
# Clearly perpendicular lift forces dominate
front_lift = 0.6 * self.front_wing_area* np.cos(wing_angs[0]) * np.sin(self.attackAngle) * common_factor
rear_lift = 0.6 * self.rear_wing_area * np.cos(wing_angs[1]) * np.sin(self.attackAngle) * common_factor
# model pitch torque as difference between lateral drags with some restoring torque
pitch_torque = (22*np.mean(front_lift) - 18*np.mean(rear_lift))*y
# assumed to only depend on forces perpendicular to hull
roll_torque = (front_lift[0]+rear_lift[0] -front_lift[1]-rear_lift[1])*6*x
yaw_torque = (front_lift[0]+rear_lift[1] -front_lift[1]-rear_lift[0])*z
# TODO: compute lift forces
force_sum = total_drag
torque_sum = folded_wing_torque_effect
self.acc = self.acc + force_sum / self.mass
self.ddrot = self.ddrot + torque_sum / self.inertia
class F9(Rocket):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.radius = 3.7/2
self.height = 40
self.propellant_mass = 420e3
self.dry_mass = 30e3
self.max_thrust = 854e3 * 3
self.Isp = 282
self.max_v_gimbal = 30
self.max_gimbal = 20
self.min_throttle = 0#0.1 / 3
self.fuel_level = 0.15
self.gf_eff_area = 1.8
if self.planet.is3D:
self.control_vec = np.zeros(7)
else:
self.control_vec = np.zeros(4)
def update(self, dt):
gf_angs = np.ones((2,2))
grid_fin_angs = np.clip(-20*np.pi/180, 0, 20*np.pi/180)
# [[y, -y, z, -z]]
gimbal_cntrl = np.ones(2)
throttle = 0
x, y, z = np.eye(3)
gf_axes = np.array([y, -y, z, -z])
force_dir = np.array([z, -z, -y, y])
common_factor = 0.5* self.planet.rho(self.altitude) * self.gf_eff_area * self.speed**2
gf_forces = 0.4 * common_factor * np.sin(gf_angs.ravel())
f = np.asarray([gf_forces[i] * force_dir[i] for i in range(4)])
gf_torques = np.cross(x*self.height/2 + gf_axes*(self.radius+1), f)
gf_drag = 4 *0.5 * common_factor * x
force_sum = gf_drag
torque_sum = np.sum(gf_torques, axis=0)
self.acc = self.acc + force_sum / self.mass
self.ddrot = self.ddrot + torque_sum / self.inertia
def typicalEntry(self):
num = int(input('Enter 1 for RTLS, 2 for ADSL: '))
if num == 1:
self.startAt([0, 120e3+self.planet.R, 0], [200, 0, 0])
elif num == 2: #TODO: these are guesstimates based on online graphs
self.startAt([0, 120e3+self.planet.R, 0], [200, 0, 0]) | [
"numpy.clip",
"numpy.mean",
"numpy.eye",
"numpy.ones",
"numpy.cross",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.cos",
"numpy.sin"
] | [((836, 852), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (844, 852), True, 'import numpy as np\n'), ((1055, 1087), 'numpy.clip', 'np.clip', (['wing_angs', '(0)', '(np.pi / 2)'], {}), '(wing_angs, 0, np.pi / 2)\n', (1062, 1087), True, 'import numpy as np\n'), ((1105, 1114), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1111, 1114), True, 'import numpy as np\n'), ((3313, 3328), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3320, 3328), True, 'import numpy as np\n'), ((3353, 3400), 'numpy.clip', 'np.clip', (['(-20 * np.pi / 180)', '(0)', '(20 * np.pi / 180)'], {}), '(-20 * np.pi / 180, 0, 20 * np.pi / 180)\n', (3360, 3400), True, 'import numpy as np\n'), ((3444, 3454), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (3451, 3454), True, 'import numpy as np\n'), ((3503, 3512), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3509, 3512), True, 'import numpy as np\n'), ((3531, 3555), 'numpy.array', 'np.array', (['[y, -y, z, -z]'], {}), '([y, -y, z, -z])\n', (3539, 3555), True, 'import numpy as np\n'), ((3576, 3600), 'numpy.array', 'np.array', (['[z, -z, -y, y]'], {}), '([z, -z, -y, y])\n', (3584, 3600), True, 'import numpy as np\n'), ((3857, 3919), 'numpy.cross', 'np.cross', (['(x * self.height / 2 + gf_axes * (self.radius + 1))', 'f'], {}), '(x * self.height / 2 + gf_axes * (self.radius + 1), f)\n', (3865, 3919), True, 'import numpy as np\n'), ((4008, 4034), 'numpy.sum', 'np.sum', (['gf_torques'], {'axis': '(0)'}), '(gf_torques, axis=0)\n', (4014, 4034), True, 'import numpy as np\n'), ((720, 731), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (728, 731), True, 'import numpy as np\n'), ((777, 788), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (785, 788), True, 'import numpy as np\n'), ((3199, 3210), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (3207, 3210), True, 'import numpy as np\n'), ((3256, 3267), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (3264, 3267), True, 'import numpy as np\n'), ((993, 1003), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1000, 1003), True, 'import numpy as np\n'), ((1373, 1397), 'numpy.cos', 'np.cos', (['self.attackAngle'], {}), '(self.attackAngle)\n', (1379, 1397), True, 'import numpy as np\n'), ((1465, 1489), 'numpy.cos', 'np.cos', (['self.attackAngle'], {}), '(self.attackAngle)\n', (1471, 1489), True, 'import numpy as np\n'), ((1913, 1937), 'numpy.sin', 'np.sin', (['self.attackAngle'], {}), '(self.attackAngle)\n', (1919, 1937), True, 'import numpy as np\n'), ((2026, 2050), 'numpy.sin', 'np.sin', (['self.attackAngle'], {}), '(self.attackAngle)\n', (2032, 2050), True, 'import numpy as np\n'), ((1890, 1910), 'numpy.cos', 'np.cos', (['wing_angs[0]'], {}), '(wing_angs[0])\n', (1896, 1910), True, 'import numpy as np\n'), ((2003, 2023), 'numpy.cos', 'np.cos', (['wing_angs[1]'], {}), '(wing_angs[1])\n', (2009, 2023), True, 'import numpy as np\n'), ((2192, 2211), 'numpy.mean', 'np.mean', (['front_lift'], {}), '(front_lift)\n', (2199, 2211), True, 'import numpy as np\n'), ((2217, 2235), 'numpy.mean', 'np.mean', (['rear_lift'], {}), '(rear_lift)\n', (2224, 2235), True, 'import numpy as np\n'), ((1627, 1648), 'numpy.mean', 'np.mean', (['wing_angs[0]'], {}), '(wing_angs[0])\n', (1634, 1648), True, 'import numpy as np\n'), ((1710, 1731), 'numpy.mean', 'np.mean', (['wing_angs[1]'], {}), '(wing_angs[1])\n', (1717, 1731), True, 'import numpy as np\n')] |
#!/usr/bin/env python
'''This example compares recurrent layer performance on a sine-generation task.
The task is to generate a complex sine wave that is constructed as a
superposition of a small set of pure frequencies. All networks are constructed
with one input (which receives all zero values), one recurrent hidden layer, and
one output (which is tasked with matching the target sine wave). Each model is
trained and then its predicted output is plotted for easy visual comparison of
the behavior of the different layer models.
For this task, the clockwork RNN layer tends to perform the best of the layer
models, even though the clockwork layer uses the simplest activation (linear)
and has the fewest parameters (~2000 for a 64-node hidden layer, versus ~4000
for a vanilla RNN and ~17000 for an LSTM). The vanilla RNN layer tends to do the
worst, or at the least is the most sensitive to the initialization of the
parameters. The other layer models fall somewhere in the middle but tend only to
match the dominant frequency in the target wave.
'''
import matplotlib.pyplot as plt
import numpy as np
import theanets
COLORS = ['#d62728', '#1f77b4', '#2ca02c', '#9467bd', '#ff7f0e',
'#e377c2', '#8c564b', '#bcbd22', '#7f7f7f', '#17becf']
BATCH_SIZE = 2
# Construct a complex sine wave as a sum of pure-frequency waves.
TAU = 2 * np.pi
T = np.linspace(0, TAU, 256)
SIN = sum(c * np.sin(TAU * f * T) for c, f in ((2, 1.5), (3, 1.8), (4, 1.1)))
# Create an input dataset consisting of all zeros, and an output dataset
# containing the target sine wave. We have to stack the target sine wave here
# because recurrent models expect a tensor with three dimensions, and the batch
# size for recurrent networks must be greater than 1.
ZERO = np.zeros((BATCH_SIZE, len(T), 1), 'f')
WAVES = np.concatenate([SIN[None, :, None]] * BATCH_SIZE, axis=0).astype('f')
# Set up plotting axes to show the output result and learning curves.
_, (wave_ax, learn_ax) = plt.subplots(2, 1)
# Plot the target wave.
wave_ax.plot(T, SIN, ':', label='Target', alpha=0.7, color='#111111')
# For each layer type, train a model containing that layer, and plot its
# predicted output.
for i, layer in enumerate((
dict(form='rnn', activation='linear', diagonal=0.5),
dict(form='rnn', activation='relu', diagonal=0.5),
dict(form='rrnn', activation='relu', rate='vector', diagonal=0.5),
dict(form='scrn', activation='elu'),
dict(form='gru', activation='relu'),
dict(form='lstm', activation='tanh'),
dict(form='clockwork', activation='linear', periods=(1, 4, 16, 64)))):
name = '{form}+{activation}'.format(**layer)
layer['size'] = 64
theanets.log('training {} model', name)
net = theanets.recurrent.Regressor([1, layer, 1])
losses = []
for tm, _ in net.itertrain([ZERO, WAVES],
monitor_gradients=True,
batch_size=BATCH_SIZE,
algorithm='rmsprop',
learning_rate=0.0001,
momentum=0.9,
min_improvement=0.01):
losses.append(tm['loss'])
prd = net.predict(ZERO)
wave_ax.plot(T, prd[0, :, 0].flatten(), label=name, alpha=0.7, color=COLORS[i])
learn_ax.plot(losses, label=name, alpha=0.7, color=COLORS[i])
# Make the plots look nice.
for ax in [wave_ax, learn_ax]:
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position(('outward', 6))
ax.spines['left'].set_position(('outward', 6))
wave_ax.set_ylabel('Amplitude')
wave_ax.set_xlabel('Time')
learn_ax.set_ylabel('Loss')
learn_ax.set_xlabel('Training Epoch')
learn_ax.grid(True)
plt.legend()
plt.show()
| [
"theanets.log",
"theanets.recurrent.Regressor",
"numpy.linspace",
"numpy.concatenate",
"numpy.sin",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1362, 1386), 'numpy.linspace', 'np.linspace', (['(0)', 'TAU', '(256)'], {}), '(0, TAU, 256)\n', (1373, 1386), True, 'import numpy as np\n'), ((1973, 1991), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (1985, 1991), True, 'import matplotlib.pyplot as plt\n'), ((3822, 3834), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3832, 3834), True, 'import matplotlib.pyplot as plt\n'), ((3835, 3845), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3843, 3845), True, 'import matplotlib.pyplot as plt\n'), ((2696, 2735), 'theanets.log', 'theanets.log', (['"""training {} model"""', 'name'], {}), "('training {} model', name)\n", (2708, 2735), False, 'import theanets\n'), ((2746, 2789), 'theanets.recurrent.Regressor', 'theanets.recurrent.Regressor', (['[1, layer, 1]'], {}), '([1, layer, 1])\n', (2774, 2789), False, 'import theanets\n'), ((1806, 1863), 'numpy.concatenate', 'np.concatenate', (['([SIN[None, :, None]] * BATCH_SIZE)'], {'axis': '(0)'}), '([SIN[None, :, None]] * BATCH_SIZE, axis=0)\n', (1820, 1863), True, 'import numpy as np\n'), ((1401, 1420), 'numpy.sin', 'np.sin', (['(TAU * f * T)'], {}), '(TAU * f * T)\n', (1407, 1420), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import skimage.io as io
import skimage.filters as flt
#%matplotlib inline
# since we can't use imports
import numpy as np
import scipy.ndimage.filters as flt
import warnings
from sklearn.mixture import GaussianMixture as GM
from scipy import interpolate, ndimage
from tqdm import tqdm
import skimage.measure as measure
from joblib import Parallel, delayed
from skimage.io import imread, imsave
import skimage as sk
import os
def anisodiff(img,niter=1,kappa=50,gamma=0.1,step=(1.,1.),sigma=0, option=1,ploton=False):
"""
Anisotropic diffusion.
Usage:
imgout = anisodiff(im, niter, kappa, gamma, option)
Arguments:
img - input image
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the image will be plotted on every iteration
Returns:
imgout - diffused image.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x and y axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
<NAME> and <NAME>.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by <NAME>
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by <NAME>
Department of Pharmacology
University of Oxford
<<EMAIL>>
June 2000 original version.
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
"""
# ...you could always diffuse each color channel independently if you
# really want
print("performing 2D anisotropic filtering...")
if img.ndim == 3:
warnings.warn("Only grayscale images allowed, converting to 2D matrix")
img = img.mean(2)
# initialize output array
img = img.astype('float32')
imgout = img.copy()
# initialize some internal variables
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
# create the plot figure, if requested
if ploton:
import pylab as pl
from time import sleep
fig = pl.figure(figsize=(20,5.5),num="Anisotropic diffusion")
ax1,ax2 = fig.add_subplot(1,2,1),fig.add_subplot(1,2,2)
ax1.imshow(img,interpolation='nearest')
ih = ax2.imshow(imgout,interpolation='nearest',animated=True)
ax1.set_title("Original image")
ax2.set_title("Iteration 0")
fig.canvas.draw()
for ii in tqdm(np.arange(1,niter)):
# calculate the diffs
deltaS[:-1,: ] = np.diff(imgout,axis=0)
deltaE[:,:-1] = np.diff(imgout,axis=1)
if 0<sigma:
deltaSf=flt.gaussian_filter(deltaS,sigma)
deltaEf=flt.gaussian_filter(deltaE,sigma)
else:
deltaSf=deltaS
deltaEf=deltaE
# conduction gradients (only need to compute one per dim!)
if option == 1:
gS = np.exp(-(deltaSf/kappa)**2.)/step[0]
gE = np.exp(-(deltaEf/kappa)**2.)/step[1]
elif option == 2:
gS = 1./(1.+(deltaSf/kappa)**2.)/step[0]
gE = 1./(1.+(deltaEf/kappa)**2.)/step[1]
# update matrices
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'North/West' by one
# pixel. don't as questions. just do it. trust me.
NS[:] = S
EW[:] = E
NS[1:,:] -= S[:-1,:]
EW[:,1:] -= E[:,:-1]
# update the image
imgout += gamma*(NS+EW)
if ploton:
iterstring = "Iteration %i" %(ii+1)
ih.set_data(imgout)
ax2.set_title(iterstring)
fig.canvas.draw()
# sleep(0.01)
return imgout
def anisodiff3(stack,niter=1,kappa=50,gamma=0.1,step=(1.,1.,1.), sigma=0, option=1,ploton=False):
"""
3D Anisotropic diffusion.
Usage:
stackout = anisodiff(stack, niter, kappa, gamma, option)
Arguments:
stack - input stack
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (z,y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the middle z-plane will be plotted on every
iteration
Returns:
stackout - diffused stack.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x,y and/or z axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
<NAME> and <NAME>.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by <NAME>
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by <NAME>
Department of Pharmacology
University of Oxford
<<EMAIL>>
June 2000 original version.
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
"""
# ...you could always diffuse each color channel independently if you
# really want
print("performing 3D anisotropic filtering...")
if stack.ndim == 4:
warnings.warn("Only grayscale stacks allowed, converting to 3D matrix")
stack = stack.mean(3)
# initialize output array
stack = stack.astype('float32')
stackout = stack.copy()
# initialize some internal variables
deltaS = np.zeros_like(stackout)
deltaE = deltaS.copy()
deltaD = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
UD = deltaS.copy()
gS = np.ones_like(stackout)
gE = gS.copy()
gD = gS.copy()
# create the plot figure, if requested
if ploton:
import pylab as pl
from time import sleep
showplane = stack.shape[0]//2
fig = pl.figure(figsize=(20,5.5),num="Anisotropic diffusion")
ax1,ax2 = fig.add_subplot(1,2,1),fig.add_subplot(1,2,2)
ax1.imshow(stack[showplane,...].squeeze(),interpolation='nearest')
ih = ax2.imshow(stackout[showplane,...].squeeze(),interpolation='nearest',animated=True)
ax1.set_title("Original stack (Z = %i)" %showplane)
ax2.set_title("Iteration 0")
fig.canvas.draw()
for ii in tqdm(np.arange(1,niter)):
# calculate the diffs
deltaD[:-1,: ,: ] = np.diff(stackout,axis=0)
deltaS[: ,:-1,: ] = np.diff(stackout,axis=1)
deltaE[: ,: ,:-1] = np.diff(stackout,axis=2)
if 0<sigma:
deltaDf=flt.gaussian_filter(deltaD,sigma)
deltaSf=flt.gaussian_filter(deltaS,sigma)
deltaEf=flt.gaussian_filter(deltaE,sigma)
else:
deltaDf=deltaD
deltaSf=deltaS
deltaEf=deltaE
# conduction gradients (only need to compute one per dim!)
if option == 1:
gD = np.exp(-(deltaD/kappa)**2.)/step[0]
gS = np.exp(-(deltaS/kappa)**2.)/step[1]
gE = np.exp(-(deltaE/kappa)**2.)/step[2]
elif option == 2:
gD = 1./(1.+(deltaD/kappa)**2.)/step[0]
gS = 1./(1.+(deltaS/kappa)**2.)/step[1]
gE = 1./(1.+(deltaE/kappa)**2.)/step[2]
# update matrices
D = gD*deltaD
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'Up/North/West' by one
# pixel. don't as questions. just do it. trust me.
UD[:] = D
NS[:] = S
EW[:] = E
UD[1:,: ,: ] -= D[:-1,: ,: ]
NS[: ,1:,: ] -= S[: ,:-1,: ]
EW[: ,: ,1:] -= E[: ,: ,:-1]
# update the image
stackout += gamma*(UD+NS+EW)
if ploton:
iterstring = "Iteration %i" %(ii+1)
ih.set_data(stackout[showplane,...].squeeze())
ax2.set_title(iterstring)
fig.canvas.draw()
# sleep(0.01)
return stackout
def get_phase_frac(im, phase_val, ax):
dim = im.shape
if len(dim) == 2:
im = im.reshape(-1, dim[0], dim[1])
dim = im.shape
im_ = np.where(im == phase_val, 1, 0)
if ax == 'x':
result = np.sum(im_, axis=0).sum(axis=0) / (dim[0] * dim[2])
elif ax == 'y':
result = np.sum(im_, axis=0).sum(axis=1) / (dim[0] * dim[1])
elif ax == 'z':
result = np.sum(im_, axis=1).sum(axis=1) / (dim[1] * dim[2])
elif ax is None:
result = np.mean(im_)
return result
def get_saturation(im, air_phase_val, sat_phase_val, ax):
sat_frac = get_phase_frac(im, phase_val=sat_phase_val, ax=ax)
air_frac = get_phase_frac(im, phase_val=air_phase_val, ax=ax)
saturation = sat_frac / (sat_frac + air_frac)
return saturation
def get_z_aixs_profile(im_stack, agg_func=None, phase_val=None):
"""returns an array where each item reprpesents a measure of the grayscale profile
for the correspoiding slice
"""
# if phase_val is None:
# phase_val = 255
if phase_val is not None:
im_stack = np.where(im_stack == phase_val, 1, 0)
shape = im_stack.shape
x = np.reshape(im_stack, (shape[0], shape[1]*shape[2]))
if agg_func is None:
agg_func = np.mean
profile = agg_func(x, axis=1)
return profile
def norm_stack(im_stack, normalizer=None, how=None):
shape = im_stack.shape
if normalizer is None:
normalizer = im_stack[0]
if how is None:
how = "ratio"
if how == "ratio":
im_normed = im_stack / normalizer
elif how == "diff":
im_normed = im_stack - normalizer
# im_normed_f = []
# sigma=10
# im_normed_f.extend(Parallel(n_jobs=5)(delayed(gauss_filter)(slc, sigma) for slc in im_normed))
im_normed_f = np.zeros_like(im_stack, dtype=np.float32)
for slc_idx in np.arange(shape[0]):
slc = im_normed[slc_idx]
im_normed_f[slc_idx, :, :] = flt.gaussian_filter(slc, sigma=10)
return im_normed_f
def get_porosity(im, phase_val):
ps = np.mean(np.where(im==phase_val, 1, 0))
return ps
def gaussian(x, mu, var):
sig = var ** (1/2)
return (1/(sig * (2*np.pi)**(1/2))) * np.exp(-(1/2) * ((x - mu)/sig)**2)
#return np.exp(-(1/2) * ((x - mu)/sig)**2)
def gauss_fit1D(data, n_comp=1, comps=[''], bins=50, n_sample=100, title='', rand_state=42):
""" gets intensity points and number of components and
plots an approximate gaussian fit of the different components"""
gmm = GM(n_components=n_comp, covariance_type='full', tol=0.0001, random_state=rand_state).fit(data)
vars_ = gmm.covariances_.flatten()
stds = np.sqrt(vars_)
mus = gmm.means_.flatten()
ws = gmm.weights_.flatten()
x_vals = np.linspace(np.min(data), np.max(data), num=n_sample)
g_hats = np.zeros((len(x_vals), n_comp))
for i in range(n_comp):
g_hat = gaussian(x_vals, mus[i], vars_[i])
g_hats[:, i] = g_hat
# sum gaussians
g_hats_total = np.dot(g_hats, ws)
fig, ax = plt.subplots(figsize=(12, 8))
ax.hist(data, bins=bins, density=True)
for comp in range(n_comp):
ax.plot(x_vals, ws[comp] * g_hats[:, comp], label="mu = %.2f, std = %.2f, %s" % \
(mus[comp], stds[comp], comps[comp]))
ax.plot(x_vals, g_hats_total, label='fit')
ax.set_xlabel('Grey scale value')
ax.set_ylabel('density')
ax.set_title(title, fontsize=19)
ax.legend()
return fig, ax, mus, stds, ws
def get_cl_boundary(im, layer_to_keep='top', offset=0, connect=False):
""" im: segmented catalyst outline image"""
#cl_outline_im = imread(os.path.join(cl_outline_im_path))
dim = im.shape
# check what layer of the outline to keep, since cathode is at bottom,
# you want to keep the top layer
if layer_to_keep == "bottom":
#np.argmax returns the index of the first max which is the first 1
bndry_slc = dim[0] - 1 - np.argmax(im[::-1], axis=0) #matrix
bndry_slc = np.where(bndry_slc == dim[0] - 1, 0, bndry_slc)
elif layer_to_keep == "top":
bndry_slc = np.argmax(im, axis=0)
# bndry_slc is a matrix with non zero items which represents the
# border point for the particular location e.g if (x, y) = 100 -> the
# border point for loc (x,y) occurs on slice 100
row, col = np.where(bndry_slc > 0)
z = bndry_slc[row, col]
# interp_smooth is 2D array where value of point (x, y) is the
# boundary slice number. This is also smoothened with filter
interp_im = np.zeros(dim, dtype=np.uint8)
ny = np.arange(0, dim[1])
nx = np.arange(0, dim[2])
[xx,yy] = np.meshgrid(nx,ny) #grid to fit interpolation values
if connect:
interp = interpolate.griddata((col, row), z, (xx, yy), method='nearest')
interp = ndimage.uniform_filter(interp, size=10) + offset
interp_im[interp, yy, xx] = 255
else:
interp_im[z, row, col] = 255
return interp_im
def fill_boundary(im, side_to_keep='top', offset=None):
# filling up the relevat boundaries. For the setup, the cathode is up and the
# anode is down. The filling strategy is to fill from slice 0 to top of cathode_cl
# for cathode gdl and from last slice to bottom anode CL
# create empty image, fill each slice with slice number and subtract interp-smooth.
# Essentially: since interp_smooth holds the boundary slice number for each point x,y
# we subtract it from each of the slices (which we fill with slice number) and if -ve
# it implies it is below it, and if +ve it means the point is above it
if offset is None:
offset = 0
#im = imread(os.path.join(cl_outline_im_path))
dim = im.shape
# check what layer of the outline to keep, since cathode is at bottom,
# you want to keep the top layer
bndry_slc = np.argmax(im, axis=0)
if side_to_keep == 'top':
bndry_slc = bndry_slc - offset
if side_to_keep == 'bottom':
bndry_slc = bndry_slc + offset
# bndry_slc is a matrix with non zero items which represents the
# border point for the particular location e.g if (x, y) = 100 -> the
# border point for loc (x,y) occurs on slice 100
del im
gdl_mask = np.zeros(dim, dtype=np.uint8)
decision_im = gdl_mask \
+ np.arange(dim[0]).reshape(-1, 1, 1) \
- bndry_slc
del bndry_slc
if side_to_keep == 'top':
mz, mrow, mcol = np.where(decision_im < 0) # select point before boundary
elif side_to_keep == 'bottom':
mz, mrow, mcol = np.where(decision_im > 0) # select points below boundary
gdl_mask[mz, mrow, mcol] = 255
return gdl_mask
def sep_cathode(ccseg_path, full_im_path, offset, trim=True):
im = imread(full_im_path)
cseg = imread(ccseg_path)
im_dtype = str(im.dtype)
# get boundary and mask
gdl_bondry = get_cl_boundary(cseg, layer_to_keep='bottom', offset=offset) # bottom boundary of ccl
c_gdl_mask = fill_boundary(gdl_bondry, side_to_keep='bottom') # mask of gdl
del gdl_bondry, cseg
# select cathode gdl region
im_cgdl = np.int32(np.int32(c_gdl_mask / 255) * np.int32(im))
del c_gdl_mask, im
if trim:
# trim original data to include just the cgdl region
z, _, _ = np.nonzero(im_cgdl)
gdl_cut_point = np.min(z) # slice of the topmost gdl point
#im_cgdl = im_cgdl[gdl_cut_point:,:, :].copy()
# the plus 2 is added so that the data siet matches the dry in dimension
im_cgdl = im_cgdl[gdl_cut_point:,:, :].copy()
return np.array(im_cgdl, dtype=im_dtype)
def gauss_filter(image, sigma):
return sk.filters.gaussian(image, sigma, preserve_range=True)
def correct_illum(im, sigma=10, ref_region_spec=(15, 747, 100)):
"""
Corrects illumination across the stack of radiographs for quantitative information.
The algorithm uses a reference patch which gives a sort of basis vector.
The relevant scaling is obtained for the whole plane.
The basis vector is further decomposed into pattern, and trend component modelled by the change in GSV
"""
shape = im.shape
im_G = []
im_G.extend(Parallel(n_jobs=5)(delayed(gauss_filter)(slc, sigma) for slc in im))
im_G = np.array(im_G, dtype=np.float32)
# pick the edge that would serve as reference where image does not change
# in the through plane direction
#l=15; r=150; t=747; b=1080
l = ref_region_spec[0]; t = ref_region_spec[1]; w = ref_region_spec[2]
r = l + w; b = t + w
#ref_centre_idx = (l+(r-l)//2, t+(b-l)//2)
im_G_ref_patch = im_G[:, t:b, l:r]
# compute the throughplane change
ref_patch_mean = np.mean(im_G_ref_patch, axis=1).mean(axis=1)
# subtract mean appears to do what I just want see calc in one note
ref_patch_mean0 = ref_patch_mean - np.mean(ref_patch_mean)
im_G_mean0 = im_G - np.mean(im_G, axis=0)
del im_G
# Derive trend to be removed from signal. This improves the computation of the scale field
p = np.polyfit(np.arange(shape[0]), ref_patch_mean0, deg=1) # degree 1 works well for now
trend = np.polyval(p, np.arange(shape[0]))
trend_ = trend + np.abs(np.min(trend))
ref_patch_detrend = ref_patch_mean0 - trend
im_G_detrend = im_G_mean0 - trend.reshape(-1, 1, 1)
del im_G_mean0
ref_patch_norm = ref_patch_detrend / np.linalg.norm(ref_patch_detrend)
im_scale_field = np.sum(ref_patch_norm.reshape(-1, 1, 1) * im_G_detrend, axis=0, dtype=np.float32)
# recreate: we would use the pattern on the throu plane change at this reference and scale it accordingly
# accross the plane
im_correction = im_scale_field * np.float32(ref_patch_norm.reshape(-1, 1, 1)) + np.float32(trend.reshape(-1, 1, 1))
#del im_scale_field
im_corrected = im - im_correction
del im_correction
return np.float32(im_corrected)#, im_scale_field
def correct_illum_m2(im, sigma=5, ref_region_spec=(15, 747, 100)):
#im_G = np.zeros_like(im)
# for i, slc in enumerate(im):
# im_G[i] = sk.filters.gaussian(slc, sigma=sigma, preserve_range=True)
shape = im.shape
im_G = []
im_G.extend(Parallel(n_jobs=5)(delayed(gauss_filter)(slc, sigma) for slc in im))
im_G = np.float32(np.array(im_G))
# pick the edge that would serve as reference where image does not change
# in the through plane direction
#l=15; r=150; t=747; b=1080
l = ref_region_spec[0]; t = ref_region_spec[1]; w = ref_region_spec[2]
r = l + w; b = t + w
ref_centre_idx = (l+(r-l)//2, t+(b-l)//2)
im_G_ref_patch = im_G[:, t:b, l:r].copy()
# compute the throughplane change
ref_patch_mean = np.mean(im_G_ref_patch, axis=1).mean(axis=1)
# subtract mean appears to do what I just want see calc in one note
ref_patch_mean = ref_patch_mean - np.mean(ref_patch_mean)
# we would use the pattern on the throu plane change at this reference and scale it accordingly
# accross the plane
ref_patch_norm = ref_patch_mean / np.linalg.norm(ref_patch_mean)
im_scale_field = np.sum(ref_patch_norm.reshape(-1, 1, 1) * im_G, axis=0)
im_correction = im_scale_field * ref_patch_norm.reshape(-1, 1, 1)
#im_corrected = im - im_correction + im[0] # this one takes the image to typical values
im_corrected = im - im_correction
return im_corrected#, im_scale_field
def correct_illum_m3(im, sigma=250, ref_region_spec=(15, 747, 100)):
#im_G = np.zeros_like(im)
# for i, slc in enumerate(im):
# im_G[i] = sk.filters.gaussian(slc, sigma=sigma, preserve_range=True)
shape = im.shape
im_G = []
im_G.extend(Parallel(n_jobs=5)(delayed(gauss_filter)(slc, sigma) for slc in im))
im_G = np.float32(np.array(im_G))
# pick the edge that would serve as reference where image does not change
# in the through plane direction
l = ref_region_spec[0]; t = ref_region_spec[1]; w = ref_region_spec[2]
r = l + w; b = t + w
#l=15; r=150; t=747; b=1080
crop_centre_idx = (l+(r-l)//2, t+(b-l)//2)
im_G_edge_crop = im_G[:, t:b, l:r]
# compute the throughplane change
z_sum = np.mean(im_G_edge_crop, axis=1).mean(axis=1)
z_sum_diff = np.diff(z_sum)
# we would use the patter on the throu plane change at this reference and scale it accordingly
# accross the plane
# first pick a notable point in the through-plane direction: that max
max_delta_idx = np.argmax(z_sum_diff) # index of max change
# find the corresponding changes aacross the plane
im_max_diff = im_G[max_delta_idx+1] - im_G[max_delta_idx]
# find the scaling factor: how that point scales across the field
# this scaling will be used to generate other points from the reference through-plane change
im_scale_field = im_max_diff / z_sum_diff[max_delta_idx]
#im_scale_field = im_max_diff / im_max_diff[crop_centre_idx[1], crop_centre_idx[0]]
# generate full stack changes and include initial zero
im_correction_delta = im_scale_field * z_sum_diff.reshape(-1, 1, 1)
im_correction_delta = np.insert(im_correction_delta, 0,
np.zeros((shape[1],shape[2])), axis=0)
# the corrected image will be the raw image with the cumulative sum removed
im_corrected = im - np.cumsum(im_correction_delta, axis=0)
return im_corrected #, im_scale_field
def filter_particle_area(label, thresh):
"""
return: f_label - particle_size filtered image according to thresh and each pixel now has the component size
"""
f_label = label.copy()
ms = measure.regionprops(label)
for region in ms:
if region.area < thresh:
f_label[f_label == region.label ] = 0
else:
f_label[f_label == region.label] = region.area
return f_label
def crop_from_centre(x, width):
mid_x = x.shape[1] // 2
mid_y = x.shape[2] // 2
crop = x[:, mid_x-(width//2):mid_x+(width//2), mid_y-(width//2):mid_y+(width//2)]
return crop
def save_figure(fname, obj, figdir=None):
"""wrapper to save figure in desied directory"""
if figdir is None:
figdir = r"..\\..\\..\\..\\..\\..\sfu\phd\research_in_motion\development\write-up\misc"
path = os.path.join(os.path.normpath(figdir), fname)
obj.savefig(path, bbox_inches='tight')
def save_image(fname, obj, save_dir=None):
"""wrapper to save figure in desied directory"""
if save_dir is None:
figdir = r""
path = os.path.join(os.path.normpath(save_dir), fname)
imsave(path, obj) | [
"numpy.sqrt",
"scipy.ndimage.filters.gaussian_filter",
"numpy.int32",
"numpy.array",
"numpy.linalg.norm",
"joblib.delayed",
"numpy.arange",
"numpy.mean",
"numpy.reshape",
"numpy.where",
"scipy.ndimage.uniform_filter",
"numpy.diff",
"numpy.max",
"numpy.exp",
"os.path.normpath",
"numpy.d... | [((2875, 2896), 'numpy.zeros_like', 'np.zeros_like', (['imgout'], {}), '(imgout)\n', (2888, 2896), True, 'import numpy as np\n'), ((2979, 2999), 'numpy.ones_like', 'np.ones_like', (['imgout'], {}), '(imgout)\n', (2991, 2999), True, 'import numpy as np\n'), ((7202, 7225), 'numpy.zeros_like', 'np.zeros_like', (['stackout'], {}), '(stackout)\n', (7215, 7225), True, 'import numpy as np\n'), ((7358, 7380), 'numpy.ones_like', 'np.ones_like', (['stackout'], {}), '(stackout)\n', (7370, 7380), True, 'import numpy as np\n'), ((9836, 9867), 'numpy.where', 'np.where', (['(im == phase_val)', '(1)', '(0)'], {}), '(im == phase_val, 1, 0)\n', (9844, 9867), True, 'import numpy as np\n'), ((10879, 10932), 'numpy.reshape', 'np.reshape', (['im_stack', '(shape[0], shape[1] * shape[2])'], {}), '(im_stack, (shape[0], shape[1] * shape[2]))\n', (10889, 10932), True, 'import numpy as np\n'), ((11523, 11564), 'numpy.zeros_like', 'np.zeros_like', (['im_stack'], {'dtype': 'np.float32'}), '(im_stack, dtype=np.float32)\n', (11536, 11564), True, 'import numpy as np\n'), ((11584, 11603), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (11593, 11603), True, 'import numpy as np\n'), ((12404, 12418), 'numpy.sqrt', 'np.sqrt', (['vars_'], {}), '(vars_)\n', (12411, 12418), True, 'import numpy as np\n'), ((12745, 12763), 'numpy.dot', 'np.dot', (['g_hats', 'ws'], {}), '(g_hats, ws)\n', (12751, 12763), True, 'import numpy as np\n'), ((12779, 12808), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (12791, 12808), True, 'import matplotlib.pyplot as plt\n'), ((14124, 14147), 'numpy.where', 'np.where', (['(bndry_slc > 0)'], {}), '(bndry_slc > 0)\n', (14132, 14147), True, 'import numpy as np\n'), ((14343, 14372), 'numpy.zeros', 'np.zeros', (['dim'], {'dtype': 'np.uint8'}), '(dim, dtype=np.uint8)\n', (14351, 14372), True, 'import numpy as np\n'), ((14383, 14403), 'numpy.arange', 'np.arange', (['(0)', 'dim[1]'], {}), '(0, dim[1])\n', (14392, 14403), True, 'import numpy as np\n'), ((14413, 14433), 'numpy.arange', 'np.arange', (['(0)', 'dim[2]'], {}), '(0, dim[2])\n', (14422, 14433), True, 'import numpy as np\n'), ((14453, 14472), 'numpy.meshgrid', 'np.meshgrid', (['nx', 'ny'], {}), '(nx, ny)\n', (14464, 14472), True, 'import numpy as np\n'), ((15677, 15698), 'numpy.argmax', 'np.argmax', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (15686, 15698), True, 'import numpy as np\n'), ((16077, 16106), 'numpy.zeros', 'np.zeros', (['dim'], {'dtype': 'np.uint8'}), '(dim, dtype=np.uint8)\n', (16085, 16106), True, 'import numpy as np\n'), ((16628, 16648), 'skimage.io.imread', 'imread', (['full_im_path'], {}), '(full_im_path)\n', (16634, 16648), False, 'from skimage.io import imread, imsave\n'), ((16660, 16678), 'skimage.io.imread', 'imread', (['ccseg_path'], {}), '(ccseg_path)\n', (16666, 16678), False, 'from skimage.io import imread, imsave\n'), ((17457, 17490), 'numpy.array', 'np.array', (['im_cgdl'], {'dtype': 'im_dtype'}), '(im_cgdl, dtype=im_dtype)\n', (17465, 17490), True, 'import numpy as np\n'), ((17540, 17594), 'skimage.filters.gaussian', 'sk.filters.gaussian', (['image', 'sigma'], {'preserve_range': '(True)'}), '(image, sigma, preserve_range=True)\n', (17559, 17594), True, 'import skimage as sk\n'), ((18147, 18179), 'numpy.array', 'np.array', (['im_G'], {'dtype': 'np.float32'}), '(im_G, dtype=np.float32)\n', (18155, 18179), True, 'import numpy as np\n'), ((19759, 19783), 'numpy.float32', 'np.float32', (['im_corrected'], {}), '(im_corrected)\n', (19769, 19783), True, 'import numpy as np\n'), ((22122, 22136), 'numpy.diff', 'np.diff', (['z_sum'], {}), '(z_sum)\n', (22129, 22136), True, 'import numpy as np\n'), ((22356, 22377), 'numpy.argmax', 'np.argmax', (['z_sum_diff'], {}), '(z_sum_diff)\n', (22365, 22377), True, 'import numpy as np\n'), ((23512, 23538), 'skimage.measure.regionprops', 'measure.regionprops', (['label'], {}), '(label)\n', (23531, 23538), True, 'import skimage.measure as measure\n'), ((24481, 24498), 'skimage.io.imsave', 'imsave', (['path', 'obj'], {}), '(path, obj)\n', (24487, 24498), False, 'from skimage.io import imread, imsave\n'), ((2635, 2706), 'warnings.warn', 'warnings.warn', (['"""Only grayscale images allowed, converting to 2D matrix"""'], {}), "('Only grayscale images allowed, converting to 2D matrix')\n", (2648, 2706), False, 'import warnings\n'), ((3151, 3208), 'pylab.figure', 'pl.figure', ([], {'figsize': '(20, 5.5)', 'num': '"""Anisotropic diffusion"""'}), "(figsize=(20, 5.5), num='Anisotropic diffusion')\n", (3160, 3208), True, 'import pylab as pl\n'), ((3514, 3533), 'numpy.arange', 'np.arange', (['(1)', 'niter'], {}), '(1, niter)\n', (3523, 3533), True, 'import numpy as np\n'), ((3591, 3614), 'numpy.diff', 'np.diff', (['imgout'], {'axis': '(0)'}), '(imgout, axis=0)\n', (3598, 3614), True, 'import numpy as np\n'), ((3638, 3661), 'numpy.diff', 'np.diff', (['imgout'], {'axis': '(1)'}), '(imgout, axis=1)\n', (3645, 3661), True, 'import numpy as np\n'), ((6950, 7021), 'warnings.warn', 'warnings.warn', (['"""Only grayscale stacks allowed, converting to 3D matrix"""'], {}), "('Only grayscale stacks allowed, converting to 3D matrix')\n", (6963, 7021), False, 'import warnings\n'), ((7590, 7647), 'pylab.figure', 'pl.figure', ([], {'figsize': '(20, 5.5)', 'num': '"""Anisotropic diffusion"""'}), "(figsize=(20, 5.5), num='Anisotropic diffusion')\n", (7599, 7647), True, 'import pylab as pl\n'), ((8027, 8046), 'numpy.arange', 'np.arange', (['(1)', 'niter'], {}), '(1, niter)\n', (8036, 8046), True, 'import numpy as np\n'), ((8108, 8133), 'numpy.diff', 'np.diff', (['stackout'], {'axis': '(0)'}), '(stackout, axis=0)\n', (8115, 8133), True, 'import numpy as np\n'), ((8162, 8187), 'numpy.diff', 'np.diff', (['stackout'], {'axis': '(1)'}), '(stackout, axis=1)\n', (8169, 8187), True, 'import numpy as np\n'), ((8216, 8241), 'numpy.diff', 'np.diff', (['stackout'], {'axis': '(2)'}), '(stackout, axis=2)\n', (8223, 8241), True, 'import numpy as np\n'), ((10805, 10842), 'numpy.where', 'np.where', (['(im_stack == phase_val)', '(1)', '(0)'], {}), '(im_stack == phase_val, 1, 0)\n', (10813, 10842), True, 'import numpy as np\n'), ((11675, 11709), 'scipy.ndimage.filters.gaussian_filter', 'flt.gaussian_filter', (['slc'], {'sigma': '(10)'}), '(slc, sigma=10)\n', (11694, 11709), True, 'import scipy.ndimage.filters as flt\n'), ((11796, 11827), 'numpy.where', 'np.where', (['(im == phase_val)', '(1)', '(0)'], {}), '(im == phase_val, 1, 0)\n', (11804, 11827), True, 'import numpy as np\n'), ((11940, 11980), 'numpy.exp', 'np.exp', (['(-(1 / 2) * ((x - mu) / sig) ** 2)'], {}), '(-(1 / 2) * ((x - mu) / sig) ** 2)\n', (11946, 11980), True, 'import numpy as np\n'), ((12508, 12520), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (12514, 12520), True, 'import numpy as np\n'), ((12522, 12534), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (12528, 12534), True, 'import numpy as np\n'), ((13768, 13815), 'numpy.where', 'np.where', (['(bndry_slc == dim[0] - 1)', '(0)', 'bndry_slc'], {}), '(bndry_slc == dim[0] - 1, 0, bndry_slc)\n', (13776, 13815), True, 'import numpy as np\n'), ((14546, 14609), 'scipy.interpolate.griddata', 'interpolate.griddata', (['(col, row)', 'z', '(xx, yy)'], {'method': '"""nearest"""'}), "((col, row), z, (xx, yy), method='nearest')\n", (14566, 14609), False, 'from scipy import interpolate, ndimage\n'), ((16303, 16328), 'numpy.where', 'np.where', (['(decision_im < 0)'], {}), '(decision_im < 0)\n', (16311, 16328), True, 'import numpy as np\n'), ((17160, 17179), 'numpy.nonzero', 'np.nonzero', (['im_cgdl'], {}), '(im_cgdl)\n', (17170, 17179), True, 'import numpy as np\n'), ((17206, 17215), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (17212, 17215), True, 'import numpy as np\n'), ((18737, 18760), 'numpy.mean', 'np.mean', (['ref_patch_mean'], {}), '(ref_patch_mean)\n', (18744, 18760), True, 'import numpy as np\n'), ((18788, 18809), 'numpy.mean', 'np.mean', (['im_G'], {'axis': '(0)'}), '(im_G, axis=0)\n', (18795, 18809), True, 'import numpy as np\n'), ((18938, 18957), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (18947, 18957), True, 'import numpy as np\n'), ((19039, 19058), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (19048, 19058), True, 'import numpy as np\n'), ((19267, 19300), 'numpy.linalg.norm', 'np.linalg.norm', (['ref_patch_detrend'], {}), '(ref_patch_detrend)\n', (19281, 19300), True, 'import numpy as np\n'), ((20162, 20176), 'numpy.array', 'np.array', (['im_G'], {}), '(im_G)\n', (20170, 20176), True, 'import numpy as np\n'), ((20740, 20763), 'numpy.mean', 'np.mean', (['ref_patch_mean'], {}), '(ref_patch_mean)\n', (20747, 20763), True, 'import numpy as np\n'), ((20931, 20961), 'numpy.linalg.norm', 'np.linalg.norm', (['ref_patch_mean'], {}), '(ref_patch_mean)\n', (20945, 20961), True, 'import numpy as np\n'), ((21651, 21665), 'numpy.array', 'np.array', (['im_G'], {}), '(im_G)\n', (21659, 21665), True, 'import numpy as np\n'), ((23067, 23097), 'numpy.zeros', 'np.zeros', (['(shape[1], shape[2])'], {}), '((shape[1], shape[2]))\n', (23075, 23097), True, 'import numpy as np\n'), ((23211, 23249), 'numpy.cumsum', 'np.cumsum', (['im_correction_delta'], {'axis': '(0)'}), '(im_correction_delta, axis=0)\n', (23220, 23249), True, 'import numpy as np\n'), ((24197, 24221), 'os.path.normpath', 'os.path.normpath', (['figdir'], {}), '(figdir)\n', (24213, 24221), False, 'import os\n'), ((24442, 24468), 'os.path.normpath', 'os.path.normpath', (['save_dir'], {}), '(save_dir)\n', (24458, 24468), False, 'import os\n'), ((3702, 3736), 'scipy.ndimage.filters.gaussian_filter', 'flt.gaussian_filter', (['deltaS', 'sigma'], {}), '(deltaS, sigma)\n', (3721, 3736), True, 'import scipy.ndimage.filters as flt\n'), ((3756, 3790), 'scipy.ndimage.filters.gaussian_filter', 'flt.gaussian_filter', (['deltaE', 'sigma'], {}), '(deltaE, sigma)\n', (3775, 3790), True, 'import scipy.ndimage.filters as flt\n'), ((8282, 8316), 'scipy.ndimage.filters.gaussian_filter', 'flt.gaussian_filter', (['deltaD', 'sigma'], {}), '(deltaD, sigma)\n', (8301, 8316), True, 'import scipy.ndimage.filters as flt\n'), ((8336, 8370), 'scipy.ndimage.filters.gaussian_filter', 'flt.gaussian_filter', (['deltaS', 'sigma'], {}), '(deltaS, sigma)\n', (8355, 8370), True, 'import scipy.ndimage.filters as flt\n'), ((8390, 8424), 'scipy.ndimage.filters.gaussian_filter', 'flt.gaussian_filter', (['deltaE', 'sigma'], {}), '(deltaE, sigma)\n', (8409, 8424), True, 'import scipy.ndimage.filters as flt\n'), ((12258, 12347), 'sklearn.mixture.GaussianMixture', 'GM', ([], {'n_components': 'n_comp', 'covariance_type': '"""full"""', 'tol': '(0.0001)', 'random_state': 'rand_state'}), "(n_components=n_comp, covariance_type='full', tol=0.0001, random_state=\n rand_state)\n", (12260, 12347), True, 'from sklearn.mixture import GaussianMixture as GM\n'), ((13711, 13738), 'numpy.argmax', 'np.argmax', (['im[::-1]'], {'axis': '(0)'}), '(im[::-1], axis=0)\n', (13720, 13738), True, 'import numpy as np\n'), ((13878, 13899), 'numpy.argmax', 'np.argmax', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (13887, 13899), True, 'import numpy as np\n'), ((14627, 14666), 'scipy.ndimage.uniform_filter', 'ndimage.uniform_filter', (['interp'], {'size': '(10)'}), '(interp, size=10)\n', (14649, 14666), False, 'from scipy import interpolate, ndimage\n'), ((16421, 16446), 'numpy.where', 'np.where', (['(decision_im > 0)'], {}), '(decision_im > 0)\n', (16429, 16446), True, 'import numpy as np\n'), ((17003, 17029), 'numpy.int32', 'np.int32', (['(c_gdl_mask / 255)'], {}), '(c_gdl_mask / 255)\n', (17011, 17029), True, 'import numpy as np\n'), ((17032, 17044), 'numpy.int32', 'np.int32', (['im'], {}), '(im)\n', (17040, 17044), True, 'import numpy as np\n'), ((18066, 18084), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(5)'}), '(n_jobs=5)\n', (18074, 18084), False, 'from joblib import Parallel, delayed\n'), ((18581, 18612), 'numpy.mean', 'np.mean', (['im_G_ref_patch'], {'axis': '(1)'}), '(im_G_ref_patch, axis=1)\n', (18588, 18612), True, 'import numpy as np\n'), ((19088, 19101), 'numpy.min', 'np.min', (['trend'], {}), '(trend)\n', (19094, 19101), True, 'import numpy as np\n'), ((20070, 20088), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(5)'}), '(n_jobs=5)\n', (20078, 20088), False, 'from joblib import Parallel, delayed\n'), ((20585, 20616), 'numpy.mean', 'np.mean', (['im_G_ref_patch'], {'axis': '(1)'}), '(im_G_ref_patch, axis=1)\n', (20592, 20616), True, 'import numpy as np\n'), ((21559, 21577), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(5)'}), '(n_jobs=5)\n', (21567, 21577), False, 'from joblib import Parallel, delayed\n'), ((22060, 22091), 'numpy.mean', 'np.mean', (['im_G_edge_crop'], {'axis': '(1)'}), '(im_G_edge_crop, axis=1)\n', (22067, 22091), True, 'import numpy as np\n'), ((3968, 4001), 'numpy.exp', 'np.exp', (['(-(deltaSf / kappa) ** 2.0)'], {}), '(-(deltaSf / kappa) ** 2.0)\n', (3974, 4001), True, 'import numpy as np\n'), ((4022, 4055), 'numpy.exp', 'np.exp', (['(-(deltaEf / kappa) ** 2.0)'], {}), '(-(deltaEf / kappa) ** 2.0)\n', (4028, 4055), True, 'import numpy as np\n'), ((8628, 8660), 'numpy.exp', 'np.exp', (['(-(deltaD / kappa) ** 2.0)'], {}), '(-(deltaD / kappa) ** 2.0)\n', (8634, 8660), True, 'import numpy as np\n'), ((8681, 8713), 'numpy.exp', 'np.exp', (['(-(deltaS / kappa) ** 2.0)'], {}), '(-(deltaS / kappa) ** 2.0)\n', (8687, 8713), True, 'import numpy as np\n'), ((8734, 8766), 'numpy.exp', 'np.exp', (['(-(deltaE / kappa) ** 2.0)'], {}), '(-(deltaE / kappa) ** 2.0)\n', (8740, 8766), True, 'import numpy as np\n'), ((9908, 9927), 'numpy.sum', 'np.sum', (['im_'], {'axis': '(0)'}), '(im_, axis=0)\n', (9914, 9927), True, 'import numpy as np\n'), ((10191, 10203), 'numpy.mean', 'np.mean', (['im_'], {}), '(im_)\n', (10198, 10203), True, 'import numpy as np\n'), ((16159, 16176), 'numpy.arange', 'np.arange', (['dim[0]'], {}), '(dim[0])\n', (16168, 16176), True, 'import numpy as np\n'), ((18085, 18106), 'joblib.delayed', 'delayed', (['gauss_filter'], {}), '(gauss_filter)\n', (18092, 18106), False, 'from joblib import Parallel, delayed\n'), ((20089, 20110), 'joblib.delayed', 'delayed', (['gauss_filter'], {}), '(gauss_filter)\n', (20096, 20110), False, 'from joblib import Parallel, delayed\n'), ((21578, 21599), 'joblib.delayed', 'delayed', (['gauss_filter'], {}), '(gauss_filter)\n', (21585, 21599), False, 'from joblib import Parallel, delayed\n'), ((10006, 10025), 'numpy.sum', 'np.sum', (['im_'], {'axis': '(0)'}), '(im_, axis=0)\n', (10012, 10025), True, 'import numpy as np\n'), ((10100, 10119), 'numpy.sum', 'np.sum', (['im_'], {'axis': '(1)'}), '(im_, axis=1)\n', (10106, 10119), True, 'import numpy as np\n')] |
import os
import sys
import argparse
from keras.models import load_model
from scipy.misc import imresize
from skimage import filters, img_as_ubyte, io
import numpy as np
import tensorflow as tf
import cv2
model = load_model('extract_foreground_model.hdf5')
graph = tf.get_default_graph()
def predict(image):
"""
Predicts the contours of a person on the image.
:param image: Image on which the prediction happens
:return: The mask with prediction
"""
with graph.as_default():
# Make prediction
prediction = model.predict(image[None, :, :, :])
prediction = prediction.reshape((224, 224, -1))
return prediction
def overlay_transparent(background, overlay):
"""
Overlays two images over each other. It assumes that both pictures have the
same size. Picture to be overlayed should have transparency channel.
:param background: background picture
:param overlay: picture to be put on top of the background
:return: merged picture
"""
h, w = overlay.shape[0], overlay.shape[1]
if overlay.shape[2] < 4:
overlay = np.concatenate(
[overlay, np.ones((h, w, 1), dtype=overlay.dtype) * 255],
axis=2, )
overlay_img = overlay[..., :3]
mask = overlay[..., 3:] / 255.0
background[:h, :w] = (1.0 - mask) * background[:h, :w] + mask * overlay_img
return background
def reduce_background(img, saturation, brightness_factor):
"""
Reduces the saturation of the image and makes it brighter.
:param img: image to be processed
:param saturation: number by which the saturation should be reduced
:param brightness_factor: factor by which the brightness shall be changed
:return: modified picture
"""
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
v += np.uint8((255 - v) / brightness_factor)
s[s < saturation] = 0
s[s >= saturation] -= saturation
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
def process_background(image):
"""
Smooths received image and calls the function to make it brighter and less
saturated.
:param image: image to be processed
:return: processed image
"""
background = np.array(image)
smoothed = filters.gaussian(
background, sigma=80, multichannel=True, mode='reflect')
reduced_background = reduce_background(img_as_ubyte(smoothed), 120, 1.5)
return reduced_background
def retrieve_person(image):
"""
Uses neural network to find a person on a photo.
:param image: image to retrieve the person
:return: retrieved person (numpy array with transparency set to the areas
that person was not discovered)
"""
img = image.copy()
head = np.array(cv2.resize(img, (224, 224))) / 255.0
head_prediction_small = predict(head[:, :, 0:3])
head_prediction = imresize(
head_prediction_small[:, :, 1], (image.shape[0], image.shape[1]))
# prediction almost always returns ~1/40 lowest rows as not being part of
# person, but it is almost always part of it. Correct it.
to_correct = int(image.shape[0]/40)
head_prediction[image.shape[0] - to_correct:, :] = \
head_prediction[image.shape[0] - to_correct:
image.shape[0] - (to_correct - 1), :]
person = np.append(img, head_prediction[:, :, None], axis=-1)
return person
def process_img_background(image, name, location):
"""
Calls method for finding person on the image and one for processing the
background. After all it saves the modified picture.
:param image: image to be processed
:param name: name under which the image shall be saved
:param location: location where the image shall be saved
"""
face = retrieve_person(image)
background = process_background(image)
joined_image = overlay_transparent(background, face)
final_img = cv2.cvtColor(joined_image, cv2.COLOR_BGR2RGB)
file_path = os.path.join(location, name + '.png')
cv2.imwrite(file_path, final_img)
def cli_argument_parser(argv):
"""
Argument parser.
:param argv: commandline arguments
:return: parsed arguments
"""
parser = argparse.ArgumentParser(description='Soften background.')
parser.add_argument('--photos_folder',
help='folder containing photos to edit',
default='sample')
parser.add_argument('--photos_format',
help='format of photos to be edited, i.e.: jpg',
default='jpg')
parser.add_argument('--output_folder',
help='folder where to store result',
default='out')
return parser.parse_args(argv)
def main(argv):
p = cli_argument_parser(argv)
if not os.path.exists(p.photos_folder):
raise FileExistsError("Provided photos folder does not exist.")
if not os.path.exists(p.output_folder):
os.makedirs(p.output_folder)
ic = io.ImageCollection(p.photos_folder + '/*.' + p.photos_format)
for i in range(len(ic)):
try:
print("processing ", os.path.basename(ic.files[i][:-4]))
process_img_background(
ic[i], os.path.basename(ic.files[i][:-4]), p.output_folder)
except Exception as e:
print("Exception happened!", e)
if __name__ == '__main__':
main(sys.argv[1:])
| [
"numpy.uint8",
"numpy.array",
"scipy.misc.imresize",
"os.path.exists",
"argparse.ArgumentParser",
"skimage.io.ImageCollection",
"skimage.img_as_ubyte",
"tensorflow.get_default_graph",
"cv2.merge",
"numpy.ones",
"cv2.cvtColor",
"cv2.split",
"cv2.resize",
"skimage.filters.gaussian",
"cv2.i... | [((215, 258), 'keras.models.load_model', 'load_model', (['"""extract_foreground_model.hdf5"""'], {}), "('extract_foreground_model.hdf5')\n", (225, 258), False, 'from keras.models import load_model\n'), ((268, 290), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (288, 290), True, 'import tensorflow as tf\n'), ((1761, 1797), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (1773, 1797), False, 'import cv2\n'), ((1812, 1826), 'cv2.split', 'cv2.split', (['hsv'], {}), '(hsv)\n', (1821, 1826), False, 'import cv2\n'), ((1837, 1876), 'numpy.uint8', 'np.uint8', (['((255 - v) / brightness_factor)'], {}), '((255 - v) / brightness_factor)\n', (1845, 1876), True, 'import numpy as np\n'), ((1957, 1977), 'cv2.merge', 'cv2.merge', (['(h, s, v)'], {}), '((h, s, v))\n', (1966, 1977), False, 'import cv2\n'), ((1988, 2030), 'cv2.cvtColor', 'cv2.cvtColor', (['final_hsv', 'cv2.COLOR_HSV2BGR'], {}), '(final_hsv, cv2.COLOR_HSV2BGR)\n', (2000, 2030), False, 'import cv2\n'), ((2276, 2291), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2284, 2291), True, 'import numpy as np\n'), ((2307, 2380), 'skimage.filters.gaussian', 'filters.gaussian', (['background'], {'sigma': '(80)', 'multichannel': '(True)', 'mode': '"""reflect"""'}), "(background, sigma=80, multichannel=True, mode='reflect')\n", (2323, 2380), False, 'from skimage import filters, img_as_ubyte, io\n'), ((2921, 2995), 'scipy.misc.imresize', 'imresize', (['head_prediction_small[:, :, 1]', '(image.shape[0], image.shape[1])'], {}), '(head_prediction_small[:, :, 1], (image.shape[0], image.shape[1]))\n', (2929, 2995), False, 'from scipy.misc import imresize\n'), ((3370, 3422), 'numpy.append', 'np.append', (['img', 'head_prediction[:, :, None]'], {'axis': '(-1)'}), '(img, head_prediction[:, :, None], axis=-1)\n', (3379, 3422), True, 'import numpy as np\n'), ((3954, 3999), 'cv2.cvtColor', 'cv2.cvtColor', (['joined_image', 'cv2.COLOR_BGR2RGB'], {}), '(joined_image, cv2.COLOR_BGR2RGB)\n', (3966, 3999), False, 'import cv2\n'), ((4016, 4053), 'os.path.join', 'os.path.join', (['location', "(name + '.png')"], {}), "(location, name + '.png')\n", (4028, 4053), False, 'import os\n'), ((4058, 4091), 'cv2.imwrite', 'cv2.imwrite', (['file_path', 'final_img'], {}), '(file_path, final_img)\n', (4069, 4091), False, 'import cv2\n'), ((4245, 4302), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Soften background."""'}), "(description='Soften background.')\n", (4268, 4302), False, 'import argparse\n'), ((5044, 5105), 'skimage.io.ImageCollection', 'io.ImageCollection', (["(p.photos_folder + '/*.' + p.photos_format)"], {}), "(p.photos_folder + '/*.' + p.photos_format)\n", (5062, 5105), False, 'from skimage import filters, img_as_ubyte, io\n'), ((2433, 2455), 'skimage.img_as_ubyte', 'img_as_ubyte', (['smoothed'], {}), '(smoothed)\n', (2445, 2455), False, 'from skimage import filters, img_as_ubyte, io\n'), ((4849, 4880), 'os.path.exists', 'os.path.exists', (['p.photos_folder'], {}), '(p.photos_folder)\n', (4863, 4880), False, 'import os\n'), ((4965, 4996), 'os.path.exists', 'os.path.exists', (['p.output_folder'], {}), '(p.output_folder)\n', (4979, 4996), False, 'import os\n'), ((5006, 5034), 'os.makedirs', 'os.makedirs', (['p.output_folder'], {}), '(p.output_folder)\n', (5017, 5034), False, 'import os\n'), ((2809, 2836), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (2819, 2836), False, 'import cv2\n'), ((5181, 5215), 'os.path.basename', 'os.path.basename', (['ic.files[i][:-4]'], {}), '(ic.files[i][:-4])\n', (5197, 5215), False, 'import os\n'), ((5276, 5310), 'os.path.basename', 'os.path.basename', (['ic.files[i][:-4]'], {}), '(ic.files[i][:-4])\n', (5292, 5310), False, 'import os\n'), ((1146, 1185), 'numpy.ones', 'np.ones', (['(h, w, 1)'], {'dtype': 'overlay.dtype'}), '((h, w, 1), dtype=overlay.dtype)\n', (1153, 1185), True, 'import numpy as np\n')] |
from utils.bert_utils import get_bert_layer_representations
import time as tm
import numpy as np
import torch
import os
import argparse
def save_layer_representations(model_layer_dict, model_name, seq_len, save_dir):
for layer in model_layer_dict.keys():
np.save('{}/{}_length_{}_layer_{}.npy'.format(save_dir,model_name,seq_len,layer+1),np.vstack(model_layer_dict[layer]))
print('Saved extracted features to {}'.format(save_dir))
return 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--nlp_model", default='bert', choices=model_options)
parser.add_argument("--sequence_length", type=int, default=1, help='length of context to provide to NLP model (default: 1)')
parser.add_argument("--output_dir", required=True, help='directory to save extracted representations to')
args = parser.parse_args()
print(args)
text_array = np.load(os.getcwd() + '/data/stimuli_words.npy')
remove_chars = [",","\"","@"]
word_ind_to_extract = -2
nlp_features = get_bert_layer_representations(args.sequence_length, text_array, remove_chars, word_ind_to_extract)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
save_layer_representations(nlp_features, args.nlp_model, args.sequence_length, args.output_dir)
| [
"os.path.exists",
"os.makedirs",
"argparse.ArgumentParser",
"utils.bert_utils.get_bert_layer_representations",
"os.getcwd",
"numpy.vstack"
] | [((552, 577), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (575, 577), False, 'import argparse\n'), ((1117, 1220), 'utils.bert_utils.get_bert_layer_representations', 'get_bert_layer_representations', (['args.sequence_length', 'text_array', 'remove_chars', 'word_ind_to_extract'], {}), '(args.sequence_length, text_array,\n remove_chars, word_ind_to_extract)\n', (1147, 1220), False, 'from utils.bert_utils import get_bert_layer_representations\n'), ((1237, 1268), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (1251, 1268), False, 'import os\n'), ((1278, 1306), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (1289, 1306), False, 'import os\n'), ((382, 416), 'numpy.vstack', 'np.vstack', (['model_layer_dict[layer]'], {}), '(model_layer_dict[layer])\n', (391, 416), True, 'import numpy as np\n'), ((989, 1000), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (998, 1000), False, 'import os\n')] |
import numpy
from matchms import Fragments, Spectrum
from matchms.filtering import normalize_intensities
def _create_test_spectrum():
intensities = numpy.array([1, 1, 5, 5, 5, 5, 7, 7, 7, 9, 9], dtype="float")
return _create_test_spectrum_with_intensities(intensities)
def _create_test_spectrum_with_intensities(intensities):
mz = numpy.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110], dtype="float")
return Spectrum(mz=mz, intensities=intensities)
def test_peak_comments_after_filter():
spectrum_in: Spectrum = _create_test_spectrum()
spectrum_in.set("peak_comments", {10: "blub"})
spectrum = normalize_intensities(spectrum_in)
assert spectrum.get("peak_comments")[10] == "blub"
def test_reiterating_peak_comments():
mz = numpy.array([100.0003, 100.0004, 100.0005, 110., 200., 300., 400.0176], dtype='float')
intensities = numpy.array([1, 2, 3, 4, 5, 6, 7], dtype='float')
peak_comments = ["m/z 100.0003", None, "m/z 100.0005", "m/z 110.", "m/z 200.", "m/z 300.", "m/z 400.0176"]
peak_comments = {mz[i]: peak_comments[i] for i in range(len(mz))}
spectrum = Spectrum(mz=mz, intensities=intensities,
metadata={"peak_comments": peak_comments})
spectrum.peaks = Fragments(mz=numpy.array([100.0004, 110., 400.018], dtype='float'),
intensities=numpy.array([5, 4, 7], dtype='float'))
assert spectrum.peak_comments == {100.0004: "m/z 100.0003; m/z 100.0005", 110.: "m/z 110.", 400.018: "m/z 400.0176"}
| [
"numpy.array",
"matchms.filtering.normalize_intensities",
"matchms.Spectrum"
] | [((160, 221), 'numpy.array', 'numpy.array', (['[1, 1, 5, 5, 5, 5, 7, 7, 7, 9, 9]'], {'dtype': '"""float"""'}), "([1, 1, 5, 5, 5, 5, 7, 7, 7, 9, 9], dtype='float')\n", (171, 221), False, 'import numpy\n'), ((358, 432), 'numpy.array', 'numpy.array', (['[10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110]'], {'dtype': '"""float"""'}), "([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110], dtype='float')\n", (369, 432), False, 'import numpy\n'), ((445, 485), 'matchms.Spectrum', 'Spectrum', ([], {'mz': 'mz', 'intensities': 'intensities'}), '(mz=mz, intensities=intensities)\n', (453, 485), False, 'from matchms import Fragments, Spectrum\n'), ((653, 687), 'matchms.filtering.normalize_intensities', 'normalize_intensities', (['spectrum_in'], {}), '(spectrum_in)\n', (674, 687), False, 'from matchms.filtering import normalize_intensities\n'), ((797, 890), 'numpy.array', 'numpy.array', (['[100.0003, 100.0004, 100.0005, 110.0, 200.0, 300.0, 400.0176]'], {'dtype': '"""float"""'}), "([100.0003, 100.0004, 100.0005, 110.0, 200.0, 300.0, 400.0176],\n dtype='float')\n", (808, 890), False, 'import numpy\n'), ((903, 952), 'numpy.array', 'numpy.array', (['[1, 2, 3, 4, 5, 6, 7]'], {'dtype': '"""float"""'}), "([1, 2, 3, 4, 5, 6, 7], dtype='float')\n", (914, 952), False, 'import numpy\n'), ((1152, 1239), 'matchms.Spectrum', 'Spectrum', ([], {'mz': 'mz', 'intensities': 'intensities', 'metadata': "{'peak_comments': peak_comments}"}), "(mz=mz, intensities=intensities, metadata={'peak_comments':\n peak_comments})\n", (1160, 1239), False, 'from matchms import Fragments, Spectrum\n'), ((1298, 1352), 'numpy.array', 'numpy.array', (['[100.0004, 110.0, 400.018]'], {'dtype': '"""float"""'}), "([100.0004, 110.0, 400.018], dtype='float')\n", (1309, 1352), False, 'import numpy\n'), ((1397, 1434), 'numpy.array', 'numpy.array', (['[5, 4, 7]'], {'dtype': '"""float"""'}), "([5, 4, 7], dtype='float')\n", (1408, 1434), False, 'import numpy\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.