code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import torch
import numpy as np
import torch.nn as nn
import cv2
from scipy import signal
import imageio
from PIL import Image
import os
import os.path as osp
import numbers
import math
from torch.nn import functional as F
'''
convert image to tensor and back
'''
img = imageio.imread(pth, pilmode='RGB') # H W C
np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1))) #C H W
tensor = torch.from_numpy(np_transpose.copy()).float() # to torch.Tensor, so obvious
tensor = tensor.unsqueeze(0) #1 C H W, must do this if u r going to feed it
tensor.mul_(rgb_range / 255.) #in case you want to scale its range
#K next step I'm gonna make it numpy from torch.Tensor
img = img.squeeze(0).permute(1, 2, 0) #Back to H W C
img = img.copy().detach().cpu().numpy()
img = Image.from_numpy(img) #Convert to Image to save easier
img.save('path')
'''
Add blur to a tensor using Gaussian distribution
'''
class GaussianSmoothing(nn.Module):
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / std) ** 2 / 2)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
print(kernel.shape)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
print(kernel.shape)
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input):
return self.conv(input, weight=self.weight, groups=self.groups)
kernel = GaussianSmoothing(3, 7, 1.6) #channel, kernel_size and sigma value
# At this time I assume u alr have an image which is in torch.Tensor form, C W H
tensor = F.pad(tensor, (3, 3, 3, 3), mode='reflect')
'''
The reason u MUST have a padded tensor is because you will want to
keep ur image the original size after convolutional operation
The padding value is depend on your blur kernel size above. I set it 7 so
my padding value would be 3
'''
blur = kernel(tensor) #veri ez
'''
In this step, we are going to add noise to a tensor or even a batch of a lot of tensors
I would use noise generated according to Gaussian distribution
And I will add to a batch of tensors
'''
for name in folder:
names.append(name)
img = imageio.imread(osp.join(pth, name), pilmode='RGB')
img_t = np.ascontiguousarray(img).astype(np.uint8)
img_t = torch.from_numpy(img_t).permute(2,0,1).unsqueeze(0)
batch.append(img_t)
'''
K now I have a batch of tensors, nxt we will "make some noise go go"
'''
noises = np.random.normal(scale=30, size=batch.shape) #edit this scale
noises = noises.round()
noises = torch.from_numpy(noises).short() #It's better to represent this kernel with int16
batch = batch.short() + noises #so do ur image
batch = torch.clamp(batch, min=0, max=255).type(torch.uint8) #remember to change it back to uint8
for i in range(batch.shape[0]):
img = batch[i].permute(1,2,0).detach().cpu().numpy()
img = Image.fromarray(img, mode='RGB')
img.save(osp.join(pth, names[i]))
| [
"numpy.random.normal",
"torch.nn.functional.pad",
"PIL.Image.fromarray",
"os.path.join",
"math.sqrt",
"torch.from_numpy",
"numpy.ascontiguousarray",
"torch.exp",
"PIL.Image.from_numpy",
"torch.arange",
"torch.sum",
"imageio.imread",
"torch.clamp"
] | [((289, 323), 'imageio.imread', 'imageio.imread', (['pth'], {'pilmode': '"""RGB"""'}), "(pth, pilmode='RGB')\n", (303, 323), False, 'import imageio\n'), ((804, 825), 'PIL.Image.from_numpy', 'Image.from_numpy', (['img'], {}), '(img)\n', (820, 825), False, 'from PIL import Image\n'), ((2836, 2879), 'torch.nn.functional.pad', 'F.pad', (['tensor', '(3, 3, 3, 3)'], {'mode': '"""reflect"""'}), "(tensor, (3, 3, 3, 3), mode='reflect')\n", (2841, 2879), True, 'from torch.nn import functional as F\n'), ((3703, 3747), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(30)', 'size': 'batch.shape'}), '(scale=30, size=batch.shape)\n', (3719, 3747), True, 'import numpy as np\n'), ((4139, 4171), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {'mode': '"""RGB"""'}), "(img, mode='RGB')\n", (4154, 4171), False, 'from PIL import Image\n'), ((3431, 3450), 'os.path.join', 'osp.join', (['pth', 'name'], {}), '(pth, name)\n', (3439, 3450), True, 'import os.path as osp\n'), ((3802, 3826), 'torch.from_numpy', 'torch.from_numpy', (['noises'], {}), '(noises)\n', (3818, 3826), False, 'import torch\n'), ((3944, 3978), 'torch.clamp', 'torch.clamp', (['batch'], {'min': '(0)', 'max': '(255)'}), '(batch, min=0, max=255)\n', (3955, 3978), False, 'import torch\n'), ((4186, 4209), 'os.path.join', 'osp.join', (['pth', 'names[i]'], {}), '(pth, names[i])\n', (4194, 4209), True, 'import os.path as osp\n'), ((1898, 1915), 'torch.sum', 'torch.sum', (['kernel'], {}), '(kernel)\n', (1907, 1915), False, 'import torch\n'), ((3480, 3505), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {}), '(img)\n', (3500, 3505), True, 'import numpy as np\n'), ((1465, 1504), 'torch.arange', 'torch.arange', (['size'], {'dtype': 'torch.float32'}), '(size, dtype=torch.float32)\n', (1477, 1504), False, 'import torch\n'), ((1761, 1804), 'torch.exp', 'torch.exp', (['(-((mgrid - mean) / std) ** 2 / 2)'], {}), '(-((mgrid - mean) / std) ** 2 / 2)\n', (1770, 1804), False, 'import torch\n'), ((3536, 3559), 'torch.from_numpy', 'torch.from_numpy', (['img_t'], {}), '(img_t)\n', (3552, 3559), False, 'import torch\n'), ((1710, 1732), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (1719, 1732), False, 'import math\n')] |
"""
Sugarscape Constant Growback Model
================================
Replication of the model found in Netlogo:
<NAME>. and <NAME>. (2009). NetLogo Sugarscape 2 Constant Growback model.
http://ccl.northwestern.edu/netlogo/models/Sugarscape2ConstantGrowback.
Center for Connected Learning and Computer-Based Modeling,
Northwestern University, Evanston, IL.
"""
from datetime import datetime
from mesa import Model
from mesa.space import MultiGrid
from mesa.datacollection import DataCollector
from .agents import SsAgent, Sugar
from .schedule import RandomActivationByBreed
from .moving_stats import MovingStats
class SugarscapeCg(Model):
"""
Sugarscape 2 Constant Growback
"""
verbose = False # Print-monitoring
def __init__(
self, height=50, width=50, initial_population=100,
reproduce_prob=0.5, growback_factor=1,
):
"""
Create a new Constant Growback model with the given parameters.
Args:
initial_population: Number of population to start with
reproduce_prob: Probabilidade de que uma formiga se reproduza
em um passo da simulação
growback_factor: Amount that every sugarcane grows by each step
growback_factor: Quantidade de açúcar que cada cana
"""
# Set parameters
self.height = height
self.width = width
self.initial_population = initial_population
self.reproduce_prob = reproduce_prob
self.growback_factor = growback_factor
self.stats = MovingStats()
self.schedule = RandomActivationByBreed(self)
self.grid = MultiGrid(self.height, self.width, torus=False)
self.datacollector = DataCollector(
model_reporters = {
"SsAgent" : lambda m: m.schedule.get_breed_count(SsAgent),
"Average" : lambda m: m.stats.avg(),
"Oscillation" : lambda m: m.stats.osc(),
},
)
self.children = []
self.removals = []
# Create sugar
import numpy as np
sugar_distribution = np.genfromtxt("src/sugar-map.txt")
self.sugar_agent_at = np.ndarray((self.height, self.width), dtype=Sugar)
for _, x, y in self.grid.coord_iter():
max_sugar = sugar_distribution[x, y]
sugar = Sugar((x, y), self, max_sugar)
self.sugar_agent_at[x, y] = sugar
self.grid.place_agent(sugar, (x, y))
self.schedule.add(sugar)
# Create agent:
for i in range(self.initial_population):
x = self.random.randrange(self.width)
y = self.random.randrange(self.height)
sugar = self.random.randrange(6, 25)
metabolism = self.random.randrange(2, 4)
vision = self.random.randrange(1, 6)
ssa = SsAgent((x, y), self, False, sugar, metabolism, vision, reproduce_prob)
self.grid.place_agent(ssa, (x, y))
self.schedule.add(ssa)
self.running = True
self.datacollector.collect(self)
def schedule_add_child(self, child):
self.children.append(child)
def schedule_removal(self, agent):
self.removals.append(agent)
def step(self):
self.schedule.step()
self.stats.update_step(self.schedule.get_breed_count(SsAgent))
self.datacollector.collect(self)
if self.verbose:
print([self.schedule.time, self.schedule.get_breed_count(SsAgent)])
for child in self.children:
self.grid.place_agent(child, child.pos)
self.schedule.add(child)
for agent in self.removals:
self.grid._remove_agent(agent.pos, agent)
self.schedule.remove(agent)
self.children = []
self.removals = []
if len(self.schedule.agents_by_breed[SsAgent]) == 0:
self.running = False
if self.schedule.steps == 100 or (self.schedule.steps < 100 and not self.running):
timestamp = datetime.now().strftime("%Y-%m-%d_%H%M%S")
suffix = "_data_rp{}_gf{}_t{}.csv".format(
self.reproduce_prob,
self.growback_factor,
timestamp
)
import os
os.makedirs("csv", exist_ok=True)
self.datacollector \
.get_agent_vars_dataframe() \
.to_csv("csv/agent" + suffix)
self.datacollector \
.get_model_vars_dataframe() \
.to_csv("csv/model" + suffix)
def run_model(self, step_count=200):
if self.verbose:
print(
"Initial number Sugarscape Agent: ",
self.schedule.get_breed_count(SsAgent),
)
for i in range(step_count):
self.step()
if not self.running:
break
if self.verbose:
print("")
print(
"Final number Sugarscape Agent: ",
self.schedule.get_breed_count(SsAgent),
)
| [
"os.makedirs",
"mesa.space.MultiGrid",
"datetime.datetime.now",
"numpy.ndarray",
"numpy.genfromtxt"
] | [((1655, 1702), 'mesa.space.MultiGrid', 'MultiGrid', (['self.height', 'self.width'], {'torus': '(False)'}), '(self.height, self.width, torus=False)\n', (1664, 1702), False, 'from mesa.space import MultiGrid\n'), ((2133, 2167), 'numpy.genfromtxt', 'np.genfromtxt', (['"""src/sugar-map.txt"""'], {}), "('src/sugar-map.txt')\n", (2146, 2167), True, 'import numpy as np\n'), ((2198, 2248), 'numpy.ndarray', 'np.ndarray', (['(self.height, self.width)'], {'dtype': 'Sugar'}), '((self.height, self.width), dtype=Sugar)\n', (2208, 2248), True, 'import numpy as np\n'), ((4287, 4320), 'os.makedirs', 'os.makedirs', (['"""csv"""'], {'exist_ok': '(True)'}), "('csv', exist_ok=True)\n", (4298, 4320), False, 'import os\n'), ((4040, 4054), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4052, 4054), False, 'from datetime import datetime\n')] |
from collections.abc import Callable, Iterable, Mapping
from scipy.sparse import csr_matrix, csc_matrix
from scipy.sparse.linalg import expm
import cupy as cp
import math
import neuwon.species.voronoi
import numba.cuda
import numpy as np
F = 96485.3321233100184 # Faraday's constant, Coulombs per Mole of electrons
R = 8.31446261815324 # Universal gas constant
zero_c = 273.15 # Temperature, in Kelvins.
class Species:
""" """
def __init__(self, name,
charge = 0,
reversal_potential = None,
inside_concentration: "millimolar" = None,
outside_concentration: "millimolar" = None,
inside_diffusivity = None,
outside_diffusivity = None,
inside_decay_period = float("inf"),
outside_decay_period = float("inf"),
use_shells = False,
outside_grid = None,):
"""
Arguments
* inside_concentration: initial value.
* outside_concentration: initial value.
* reversal_potential: is one of: number, "nerst", "goldman_hodgkin_katz"
If diffusivity is not given, then the concentration is a global constant.
"""
self.name = str(name)
self.charge = int(charge)
if reversal_potential is None:
self.reversal_potential = None
else:
try:
self.reversal_potential = float(reversal_potential)
except ValueError:
self.reversal_potential = str(reversal_potential)
assert self.reversal_potential in ("nerst", "goldman_hodgkin_katz")
self.electric = (self.charge != 0) or (self.reversal_potential is not None)
if self.electric: assert self.reversal_potential is not None
self.inside_global_const = inside_diffusivity is None
self.outside_global_const = outside_diffusivity is None
self.inside_diffusivity = float(inside_diffusivity) if not self.inside_global_const else 0.0
self.outside_diffusivity = float(outside_diffusivity) if not self.outside_global_const else 0.0
self.inside_decay_period = float(inside_decay_period)
self.outside_decay_period = float(outside_decay_period)
self.use_shells = bool(use_shells)
self.inside_archetype = "Inside" if self.use_shells else "Segment"
self.outside_grid = tuple(float(x) for x in outside_grid) if outside_grid is not None else None
if inside_concentration is not None:
self.inside_concentration = float(inside_concentration)
assert self.inside_concentration >= 0.0
else:
self.inside_concentration = None
assert self.inside_global_const
assert self.inside_decay_period == float('inf')
if outside_concentration is not None:
self.outside_concentration = float(outside_concentration)
assert self.outside_concentration >= 0.0
else:
self.outside_concentration = None
assert self.outside_global_const
assert self.outside_decay_period == float('inf')
assert self.inside_diffusivity >= 0
assert self.outside_diffusivity >= 0
assert self.inside_decay_period > 0.0
assert self.outside_decay_period > 0.0
if self.inside_global_const: assert self.inside_decay_period == np.inf
if self.inside_global_const: assert not self.use_shells
if self.outside_global_const: assert self.outside_decay_period == np.inf
def get_name(self) -> str:
return self.name
def __repr__(self):
return "neuwon.species.Species(%s)"%self.name
def _initialize(self, database, time_step, celsius, input_clock):
inside_cls = database.get(self.inside_archetype)
if self.inside_concentration is not None:
if self.inside_global_const:
inside_cls.add_class_attribute(f"{self.name}_concentration",
initial_value=self.inside_concentration,
units="millimolar")
else:
inside_cls.add_attribute(f"{self.name}_concentration",
initial_value=self.inside_concentration,
units="millimolar")
inside_cls.add_attribute(f"{self.name}_delta_concentration",
initial_value=0.0,
units="millimolar / timestep")
if self.outside_concentration is not None:
outside_cls = database.get("Outside")
if self.outside_global_const:
outside_cls.add_class_attribute(f"{self.name}_concentration",
self.outside_concentration,
units="millimolar")
else:
outside_cls.add_attribute(f"{self.name}_concentration",
initial_value=self.outside_concentration,
units="millimolar")
outside_cls.add_attribute(f"{self.name}_delta_concentration",
initial_value=0.0,
units="millimolar / timestep")
if self.electric:
segment_cls = database.get("Segment")
segment_cls.add_attribute(f"{self.name}_conductance",
initial_value=0.0,
valid_range=(0, np.inf),
units="Siemens")
if isinstance(self.reversal_potential, float):
segment_cls.add_class_attribute(f"{self.name}_reversal_potential",
initial_value=self.reversal_potential,
units="mV")
else:
segment_cls.add_attribute(f"{self.name}_reversal_potential",
initial_value=np.nan,
units="mV")
input_clock.register_callback(lambda: self._accumulate_conductance(database, celsius) or True)
def _compute_reversal_potential(self, database, celsius):
x = database.get_data(f"Segment.{self.name}_reversal_potential")
if isinstance(x, float): return x
1/0 # The following code needs to be rewritten for the new database & schema.
inside = access(self.inside_archetype+"/concentrations/"+self.name)
outside = access("outside/concentrations/"+self.name)
if not isinstance(inside, float) and self.use_shells:
inside = inside[access("membrane/inside")]
if not isinstance(outside, float):
outside = outside[access("membrane/outside")]
T = access("T")
if self.reversal_potential == "nerst":
x[:] = self._nerst_potential(self.charge, T, inside, outside)
elif self.reversal_potential == "goldman_hodgkin_katz":
voltages = access("membrane/voltages")
x[:] = self._goldman_hodgkin_katz(self.charge, T, inside, outside, voltages)
else: raise NotImplementedError(self.reversal_potential)
return x
def _zero_accumulators(self, database):
def zero(component_name):
database.get_data(component_name).fill(0.0)
if self.electric:
zero(f"Segment.{self.name}_conductance")
if self.inside_diffusivity != 0.0:
zero(self.inside_archetype + "/delta_concentrations/"+self.name)
if self.outside_diffusivity != 0.0:
zero("outside/delta_concentrations/"+self.name)
def _accumulate_conductance(self, database, celsius):
sum_conductance = database.get_data("Segment.sum_conductance")
driving_voltage = database.get_data("Segment.driving_voltage")
species_conductance = database.get_data(f"Segment.{self.name}_conductance")
reversal_potential = self._compute_reversal_potential(database, celsius)
sum_conductance += species_conductance
driving_voltage += species_conductance * reversal_potential
def _advance(self, database):
# Calculate the transmembrane ion flows.
if self.inside_global_const and self.outside_global_const: return
if not (self.electric and self.charge != 0): return
reversal_potential = access("membrane/reversal_potentials/"+self.name)
g = access("membrane/conductances/"+self.name)
millimoles = g * (dt * reversal_potential - integral_v) / (self.charge * F)
if self.inside_diffusivity != 0:
if self.use_shells:
1/0
else:
volumes = access("membrane/inside/volumes")
concentrations = access("membrane/inside/concentrations/"+self.name)
concentrations += millimoles / volumes
if self.outside_diffusivity != 0:
volumes = access("outside/volumes")
self.outside.concentrations -= millimoles / self.geometry.outside_volumes
# Update chemical concentrations with local changes and diffusion.
if not self.inside_global_const:
x = access("membrane/inside/concentrations/"+self.name)
rr = access("membrane/inside/delta_concentrations/"+self.name)
irm = access("membrane/inside/diffusions/"+self.name)
x[:] = irm.dot(cp.maximum(0, x + rr * 0.5))
if not self.outside_global_const:
x = access("outside/concentrations/"+self.name)
rr = access("outside/delta_concentrations/"+self.name)
irm = access("outside/diffusions/"+self.name)
x[:] = irm.dot(cp.maximum(0, x + rr * 0.5))
def _inside_diffusion_coefficients(self, access):
dt = access("time_step") / 1000 / _ITERATIONS_PER_TIMESTEP
parents = access("membrane/parents").get()
lengths = access("membrane/lengths").get()
xareas = access("membrane/cross_sectional_areas").get()
volumes = access("membrane/inside/volumes").get()
if self.use_shells: raise NotImplementedError
src = []; dst = []; coef = []
for location in range(len(parents)):
parent = parents[location]
if parent == NULL: continue
flux = self.inside_diffusivity * xareas[location] / lengths[location]
src.append(location)
dst.append(parent)
coef.append(+dt * flux / volumes[parent])
src.append(location)
dst.append(location)
coef.append(-dt * flux / volumes[location])
src.append(parent)
dst.append(location)
coef.append(+dt * flux / volumes[location])
src.append(parent)
dst.append(parent)
coef.append(-dt * flux / volumes[parent])
for location in range(len(parents)):
src.append(location)
dst.append(location)
coef.append(-dt / self.inside_decay_period)
return (coef, (dst, src))
def _outside_diffusion_coefficients(self, access):
extracellular_tortuosity = 1.4 # TODO: FIXME: put this one back in the db?
D = self.outside_diffusivity / extracellular_tortuosity ** 2
dt = access("time_step") / 1000 / _ITERATIONS_PER_TIMESTEP
decay = -dt / self.outside_decay_period
recip_vol = (1.0 / access("outside/volumes")).get()
area = access("outside/neighbor_border_areas")
dist = access("outside/neighbor_distances")
flux_data = D * area.data / dist.data
src = np.empty(2*len(flux_data))
dst = np.empty(2*len(flux_data))
coef = np.empty(2*len(flux_data))
write_idx = 0
for location in range(len(recip_vol)):
for ii in range(area.indptr[location], area.indptr[location+1]):
neighbor = area.indices[ii]
flux = flux_data[ii]
src[write_idx] = location
dst[write_idx] = neighbor
coef[write_idx] = +dt * flux * recip_vol[neighbor]
write_idx += 1
src[write_idx] = location
dst[write_idx] = location
coef[write_idx] = -dt * flux * recip_vol[location] + decay
write_idx += 1
return (coef, (dst, src))
def _nerst_potential(charge, T, inside_concentration, outside_concentration):
xp = cp.get_array_module(inside_concentration)
ratio = xp.divide(outside_concentration, inside_concentration)
return xp.nan_to_num(1e3 * R * T / F / charge * xp.log(ratio))
def _goldman_hodgkin_katz(charge, T, inside_concentration, outside_concentration, voltages):
xp = cp.get_array_module(inside_concentration)
inside_concentration = inside_concentration * 1e-3 # Convert from millimolar to molar
outside_concentration = outside_concentration * 1e-3 # Convert from millimolar to molar
z = (charge * F / (R * T)) * voltages
return ((1e3 * charge * F) *
(inside_concentration * Species._efun(-z) - outside_concentration * Species._efun(z)))
@cp.fuse()
def _efun(z):
if abs(z) < 1e-4:
return 1 - z / 2
else:
return z / (math.exp(z) - 1)
class InsideMethods:
@classmethod
def _initialize(cls):
db.add_archetype("inside", doc="Intracellular space.")
db.add_attribute("membrane/inside", dtype="inside", doc="""
A reference to the outermost shell.
The shells and the innermost core are allocated in a contiguous block
with this referencing the start of range of length "membrane/shells" + 1.
""")
db.add_attribute("membrane/shells", dtype=np.uint8)
db.add_attribute("inside/membrane", dtype="membrane")
db.add_attribute("inside/shell_radius", units="μm")
db.add_attribute("inside/volumes",
# bounds=(epsilon * (1e-6)**3, None),
allow_invalid=True,
units="Liters")
db.add_sparse_matrix("inside/neighbor_distances", "inside")
db.add_sparse_matrix("inside/neighbor_border_areas", "inside")
class OutsideMethods:
@classmethod
def _initialize(cls):
db.add_archetype("outside", doc="Extracellular space using a voronoi diagram.")
db.add_attribute("membrane/outside", dtype="outside", doc="")
db.add_attribute("outside/coordinates", shape=(3,), units="μm")
db.add_kd_tree( "outside/tree", "outside/coordinates")
db.add_attribute("outside/volumes", units="Liters")
db.add_sparse_matrix("outside/neighbor_distances", "outside")
db.add_sparse_matrix("outside/neighbor_border_areas", "outside")
def _initialize_outside(self, locations):
self._initialize_outside_inner(locations)
touched = set()
for neighbors in self.db.access("outside/neighbor_distances")[locations]:
touched.update(neighbors.indices)
touched.difference_update(set(locations))
self._initialize_outside_inner(list(touched))
outside_volumes = access("outside/volumes")
fh_space = self.fh_space * s_areas[membrane_idx] * 1000
outside_volumes[access("membrane/outside")[membrane_idx]] = fh_space
def _initialize_outside_inner(self, locations):
# TODO: Consider https://en.wikipedia.org/wiki/Power_diagram
coordinates = self.db.access("outside/coordinates").get()
tree = self.db.access("outside/tree")
write_neighbor_cols = []
write_neighbor_dist = []
write_neighbor_area = []
for location in locations:
coords = coordinates[location]
potential_neighbors = tree.query_ball_point(coords, 2 * self.max_outside_radius)
potential_neighbors.remove(location)
volume, neighbors = neuwon.species.voronoi.voronoi_cell(location,
self.max_outside_radius, np.array(potential_neighbors, dtype=Pointer), coordinates)
write_neighbor_cols.append(list(neighbors['location']))
write_neighbor_dist.append(list(neighbors['distance']))
write_neighbor_area.append(list(neighbors['border_surface_area']))
self.db.access("outside/neighbor_distances",
sparse_matrix_write=(locations, write_neighbor_cols, write_neighbor_dist))
self.db.access("outside/neighbor_border_areas",
sparse_matrix_write=(locations, write_neighbor_cols, write_neighbor_area))
class _Linear_System:
def __init__(self, class_type, name, function, epsilon, doc="", allow_invalid=False,):
""" Add a system of linear & time-invariant differential equations.
Argument function(database_access) -> coefficients
For equations of the form: dX/dt = C * X
Where X is a component, of the same archetype as this linear system.
Where C is a matrix of coefficients, returned by the argument "function".
The database computes the propagator matrix but does not apply it.
The matrix is updated after any of the entity are created or destroyed.
"""
_Component.__init__(self, class_type, name, doc, allow_invalid=allow_invalid)
self.function = function
self.epsilon = float(epsilon)
self.data = None
coef = self.function(self.cls.database)
coef = scipy.sparse.csc_matrix(coef, shape=(self.archetype.size, self.archetype.size))
# Note: always use double precision floating point for building the impulse response matrix.
# TODO: Detect if the user returns f32 and auto-convert it to f64.
matrix = scipy.sparse.linalg.expm(coef)
# Prune the impulse response matrix.
matrix.data[np.abs(matrix.data) < self.epsilon] = 0
matrix.eliminate_zeros()
self.data = cupyx.scipy.sparse.csr_matrix(matrix, dtype=Real)
| [
"numpy.abs",
"cupy.get_array_module",
"numpy.array",
"cupy.maximum",
"cupy.fuse",
"math.exp"
] | [((13008, 13017), 'cupy.fuse', 'cp.fuse', ([], {}), '()\n', (13015, 13017), True, 'import cupy as cp\n'), ((12327, 12368), 'cupy.get_array_module', 'cp.get_array_module', (['inside_concentration'], {}), '(inside_concentration)\n', (12346, 12368), True, 'import cupy as cp\n'), ((12606, 12647), 'cupy.get_array_module', 'cp.get_array_module', (['inside_concentration'], {}), '(inside_concentration)\n', (12625, 12647), True, 'import cupy as cp\n'), ((9239, 9266), 'cupy.maximum', 'cp.maximum', (['(0)', '(x + rr * 0.5)'], {}), '(0, x + rr * 0.5)\n', (9249, 9266), True, 'import cupy as cp\n'), ((9528, 9555), 'cupy.maximum', 'cp.maximum', (['(0)', '(x + rr * 0.5)'], {}), '(0, x + rr * 0.5)\n', (9538, 9555), True, 'import cupy as cp\n'), ((13109, 13120), 'math.exp', 'math.exp', (['z'], {}), '(z)\n', (13117, 13120), False, 'import math\n'), ((15859, 15903), 'numpy.array', 'np.array', (['potential_neighbors'], {'dtype': 'Pointer'}), '(potential_neighbors, dtype=Pointer)\n', (15867, 15903), True, 'import numpy as np\n'), ((17679, 17698), 'numpy.abs', 'np.abs', (['matrix.data'], {}), '(matrix.data)\n', (17685, 17698), True, 'import numpy as np\n')] |
"""
Module for running FaIR
"""
import logging
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
import numpy as np
from scmdata import ScmRun, run_append
from ...settings import config
from ..utils._parallel_process import _parallel_process
from ._compat import fair_scm
LOGGER = logging.getLogger(__name__)
def run_fair(cfgs, output_vars): # pylint: disable=R0914
"""
Run FaIR
Parameters
----------
cfgs : list[dict]
List of configurations with which to run FaIR
output_vars : list[str]
Variables to output
Returns
-------
:obj:`ScmRun`
:obj:`ScmRun` instance with all results.
"""
res = []
updated_config = []
for i, cfg in enumerate(cfgs):
updated_config.append({})
for key, value in cfg.items():
if isinstance(value, list):
updated_config[i][key] = np.asarray(value)
else:
updated_config[i][key] = value
updated_config[i]["output_vars"] = output_vars
ncpu = int(config.get("FAIR_WORKER_NUMBER", multiprocessing.cpu_count()))
LOGGER.info("Running FaIR with %s workers", ncpu)
parallel_process_kwargs = dict(
func=_single_fair_iteration,
configuration=updated_config,
config_are_kwargs=False,
)
if ncpu > 1:
with ProcessPoolExecutor(ncpu) as pool:
res = _parallel_process(**parallel_process_kwargs, pool=pool,)
else:
res = _parallel_process(**parallel_process_kwargs)
res = run_append(res)
return res
def _single_fair_iteration(cfg): # pylint: disable=R0914
scenario = cfg.pop("scenario")
model = cfg.pop("model")
run_id = cfg.pop("run_id")
factors = {}
factors["gmst"] = cfg.pop("gmst_factor")
factors["ohu"] = cfg.pop("ohu_factor")
startyear = cfg.pop("startyear")
output_vars = cfg.pop("output_vars")
data, unit, nt = _process_output(fair_scm(**cfg), output_vars, factors)
data_scmrun = []
variables = []
units = []
for key, variable in data.items():
variables.append(key)
data_scmrun.append(variable)
units.append(unit[key])
tempres = ScmRun(
np.vstack(data_scmrun).T,
index=np.arange(startyear, startyear + nt),
columns={
"scenario": scenario,
"model": model,
"region": "World",
"variable": variables,
"unit": units,
"run_id": run_id,
},
)
return tempres
def _process_output(fair_output, output_vars, factors): # pylint: disable=R0915
"""
Make sense of FaIR1.6 output
Parameters
----------
fair_output : tuple
7-tuple of concentrations, forcing, temperature, lambda_eff, ohc, heatflux, airborne_emissions:
c : np.ndarray
(nt, 31) array of greenhouse gas concentrations
f : np.ndarray
(nt, 41) array of effective radiative forcings
t : np.ndarray
(nt,) array of temperature
lambda_eff: np.ndarray
effective climate feedback
ohc : np.ndarray
total ocean heat uptake
heatflux:
heat transfer into the ocean
airborne_emissions:
atmospheric carbon content
output_vars : list[str]
List of output variables
factors : dict[(Union[float, numpy.ndarray])]
ohu : ratio of ocean heat uptake to total Earth energy uptake
gmst : ratio of GMST to GSAT
Returns
-------
data : dict
dict of climate model output
unit : dict
dict of units corresponding to data
nt : int
number of timesteps modelled
"""
(
concentrations,
forcing,
temperature,
lambda_eff,
ohc,
heatflux,
airborne_emissions,
) = fair_output
data = {}
unit = {}
data["Atmospheric Concentrations|CO2"] = concentrations[:, 0]
data["Atmospheric Concentrations|CH4"] = concentrations[:, 1]
data["Atmospheric Concentrations|N2O"] = concentrations[:, 2]
data["Atmospheric Concentrations|CF4"] = concentrations[:, 3]
data["Atmospheric Concentrations|C2F6"] = concentrations[:, 4]
data["Atmospheric Concentrations|C6F14"] = concentrations[:, 5]
data["Atmospheric Concentrations|HFC23"] = concentrations[:, 6]
data["Atmospheric Concentrations|HFC32"] = concentrations[:, 7]
data["Atmospheric Concentrations|HFC125"] = concentrations[:, 8]
data["Atmospheric Concentrations|HFC134a"] = concentrations[:, 9]
data["Atmospheric Concentrations|HFC143a"] = concentrations[:, 10]
data["Atmospheric Concentrations|HFC227ea"] = concentrations[:, 11]
data["Atmospheric Concentrations|HFC245fa"] = concentrations[:, 12]
data["Atmospheric Concentrations|HFC4310mee"] = concentrations[:, 13]
data["Atmospheric Concentrations|SF6"] = concentrations[:, 14]
data["Atmospheric Concentrations|CFC11"] = concentrations[:, 15]
data["Atmospheric Concentrations|CFC12"] = concentrations[:, 16]
data["Atmospheric Concentrations|CFC113"] = concentrations[:, 17]
data["Atmospheric Concentrations|CFC114"] = concentrations[:, 18]
data["Atmospheric Concentrations|CFC115"] = concentrations[:, 19]
data["Atmospheric Concentrations|CCl4"] = concentrations[:, 20]
data["Atmospheric Concentrations|CH3CCl3"] = concentrations[:, 21]
data["Atmospheric Concentrations|HCFC22"] = concentrations[:, 22]
data["Atmospheric Concentrations|HCFC141b"] = concentrations[:, 23]
data["Atmospheric Concentrations|HCFC142b"] = concentrations[:, 24]
data["Atmospheric Concentrations|Halon1211"] = concentrations[:, 25]
data["Atmospheric Concentrations|Halon1202"] = concentrations[:, 26]
data["Atmospheric Concentrations|Halon1301"] = concentrations[:, 27]
data["Atmospheric Concentrations|Halon2402"] = concentrations[:, 28]
data["Atmospheric Concentrations|CH3Br"] = concentrations[:, 29]
data["Atmospheric Concentrations|CH3Cl"] = concentrations[:, 30]
data["Effective Radiative Forcing|CO2"] = forcing[:, 0]
data["Effective Radiative Forcing|CH4"] = forcing[:, 1]
data["Effective Radiative Forcing|N2O"] = forcing[:, 2]
data["Effective Radiative Forcing|CF4"] = forcing[:, 3]
data["Effective Radiative Forcing|C2F6"] = forcing[:, 4]
data["Effective Radiative Forcing|C6F14"] = forcing[:, 5]
data["Effective Radiative Forcing|HFC23"] = forcing[:, 6]
data["Effective Radiative Forcing|HFC32"] = forcing[:, 7]
data["Effective Radiative Forcing|HFC125"] = forcing[:, 8]
data["Effective Radiative Forcing|HFC134a"] = forcing[:, 9]
data["Effective Radiative Forcing|HFC143a"] = forcing[:, 10]
data["Effective Radiative Forcing|HFC227ea"] = forcing[:, 11]
data["Effective Radiative Forcing|HFC245fa"] = forcing[:, 12]
data["Effective Radiative Forcing|HFC4310mee"] = forcing[:, 13]
data["Effective Radiative Forcing|SF6"] = forcing[:, 14]
data["Effective Radiative Forcing|CFC11"] = forcing[:, 15]
data["Effective Radiative Forcing|CFC12"] = forcing[:, 16]
data["Effective Radiative Forcing|CFC113"] = forcing[:, 17]
data["Effective Radiative Forcing|CFC114"] = forcing[:, 18]
data["Effective Radiative Forcing|CFC115"] = forcing[:, 19]
data["Effective Radiative Forcing|CCl4"] = forcing[:, 20]
data["Effective Radiative Forcing|CH3CCl3"] = forcing[:, 21]
data["Effective Radiative Forcing|HCFC22"] = forcing[:, 22]
data["Effective Radiative Forcing|HCFC141b"] = forcing[:, 23]
data["Effective Radiative Forcing|HCFC142b"] = forcing[:, 24]
data["Effective Radiative Forcing|Halon1211"] = forcing[:, 25]
data["Effective Radiative Forcing|Halon1202"] = forcing[:, 26]
data["Effective Radiative Forcing|Halon1301"] = forcing[:, 27]
data["Effective Radiative Forcing|Halon2402"] = forcing[:, 28]
data["Effective Radiative Forcing|CH3Br"] = forcing[:, 29]
data["Effective Radiative Forcing|CH3Cl"] = forcing[:, 30]
data["Effective Radiative Forcing|Tropospheric Ozone"] = forcing[:, 31]
data["Effective Radiative Forcing|Stratospheric Ozone"] = forcing[:, 32]
data["Effective Radiative Forcing|CH4 Oxidation Stratospheric H2O"] = forcing[:, 33]
data["Effective Radiative Forcing|Contrails"] = forcing[:, 34]
data["Effective Radiative Forcing|Aerosols|Direct Effect|SOx"] = forcing[:, 35]
data[
"Effective Radiative Forcing|Aerosols|Direct Effect|Secondary Organic Aerosol"
] = forcing[:, 36]
data["Effective Radiative Forcing|Aerosols|Direct Effect|Nitrate"] = forcing[:, 37]
data["Effective Radiative Forcing|Aerosols|Direct Effect|BC"] = forcing[:, 38]
data["Effective Radiative Forcing|Aerosols|Direct Effect|OC"] = forcing[:, 39]
data["Effective Radiative Forcing|Aerosols|Indirect Effect"] = forcing[:, 40]
data["Effective Radiative Forcing|Black Carbon on Snow"] = forcing[:, 41]
data["Effective Radiative Forcing|Land-use Change"] = forcing[:, 42]
data["Effective Radiative Forcing|Volcanic"] = forcing[:, 43]
data["Effective Radiative Forcing|Solar"] = forcing[:, 44]
data["Effective Radiative Forcing"] = np.sum(forcing, axis=1)
data["Effective Radiative Forcing|Anthropogenic"] = np.sum(forcing[:, :43], axis=1)
data["Effective Radiative Forcing|Greenhouse Gases"] = np.sum(
forcing[:, :31], axis=1
)
# This definition does not include ozone and H2O from CH4 oxidation
data["Effective Radiative Forcing|Kyoto Gases"] = np.sum(forcing[:, :15], axis=1)
data["Effective Radiative Forcing|CO2, CH4 and N2O"] = np.sum(
forcing[:, :3], axis=1
)
# What is the rigorous definition here? CFCs are not included but contain F
data["Effective Radiative Forcing|F-Gases"] = np.sum(forcing[:, 3:15], axis=1)
data["Effective Radiative Forcing|Montreal Protocol Halogen Gases"] = np.sum(
forcing[:, 15:31], axis=1
)
data["Effective Radiative Forcing|Aerosols|Direct Effect"] = np.sum(
forcing[:, 35:40], axis=1
)
data["Effective Radiative Forcing|Aerosols"] = np.sum(forcing[:, 35:41], axis=1)
data["Effective Radiative Forcing|Ozone"] = np.sum(forcing[:, 31:33], axis=1)
data["Surface Air Temperature Change"] = temperature
data["Surface Air Ocean Blended Temperature Change"] = temperature * factors["gmst"]
data["Airborne Fraction"] = airborne_emissions
data["Effective Climate Feedback"] = lambda_eff
data["Heat Content"] = ohc
data["Heat Content|Ocean"] = ohc * factors["ohu"]
data["Net Energy Imbalance"] = heatflux
data["Heat Uptake"] = heatflux
data["Heat Uptake|Ocean"] = heatflux * factors["ohu"]
unit["Atmospheric Concentrations|CO2"] = "ppm"
unit["Atmospheric Concentrations|CH4"] = "ppb"
unit["Atmospheric Concentrations|N2O"] = "ppb"
unit["Atmospheric Concentrations|CF4"] = "ppt"
unit["Atmospheric Concentrations|C2F6"] = "ppt"
unit["Atmospheric Concentrations|C6F14"] = "ppt"
unit["Atmospheric Concentrations|HFC23"] = "ppt"
unit["Atmospheric Concentrations|HFC32"] = "ppt"
unit["Atmospheric Concentrations|HFC125"] = "ppt"
unit["Atmospheric Concentrations|HFC134a"] = "ppt"
unit["Atmospheric Concentrations|HFC143a"] = "ppt"
unit["Atmospheric Concentrations|HFC227ea"] = "ppt"
unit["Atmospheric Concentrations|HFC245fa"] = "ppt"
unit["Atmospheric Concentrations|HFC4310mee"] = "ppt"
unit["Atmospheric Concentrations|SF6"] = "ppt"
unit["Atmospheric Concentrations|CFC11"] = "ppt"
unit["Atmospheric Concentrations|CFC12"] = "ppt"
unit["Atmospheric Concentrations|CFC113"] = "ppt"
unit["Atmospheric Concentrations|CFC114"] = "ppt"
unit["Atmospheric Concentrations|CFC115"] = "ppt"
unit["Atmospheric Concentrations|CCl4"] = "ppt"
unit["Atmospheric Concentrations|CH3CCl3"] = "ppt"
unit["Atmospheric Concentrations|HCFC22"] = "ppt"
unit["Atmospheric Concentrations|HCFC141b"] = "ppt"
unit["Atmospheric Concentrations|HCFC142b"] = "ppt"
unit["Atmospheric Concentrations|Halon1211"] = "ppt"
unit["Atmospheric Concentrations|Halon1202"] = "ppt"
unit["Atmospheric Concentrations|Halon1301"] = "ppt"
unit["Atmospheric Concentrations|Halon2402"] = "ppt"
unit["Atmospheric Concentrations|CH3Br"] = "ppt"
unit["Atmospheric Concentrations|CH3Cl"] = "ppt"
unit["Effective Radiative Forcing|CO2"] = "W/m**2"
unit["Effective Radiative Forcing|CH4"] = "W/m**2"
unit["Effective Radiative Forcing|N2O"] = "W/m**2"
unit["Effective Radiative Forcing|CF4"] = "W/m**2"
unit["Effective Radiative Forcing|C2F6"] = "W/m**2"
unit["Effective Radiative Forcing|C6F14"] = "W/m**2"
unit["Effective Radiative Forcing|HFC23"] = "W/m**2"
unit["Effective Radiative Forcing|HFC32"] = "W/m**2"
unit["Effective Radiative Forcing|HFC125"] = "W/m**2"
unit["Effective Radiative Forcing|HFC134a"] = "W/m**2"
unit["Effective Radiative Forcing|HFC143a"] = "W/m**2"
unit["Effective Radiative Forcing|HFC227ea"] = "W/m**2"
unit["Effective Radiative Forcing|HFC245fa"] = "W/m**2"
unit["Effective Radiative Forcing|HFC4310mee"] = "W/m**2"
unit["Effective Radiative Forcing|SF6"] = "W/m**2"
unit["Effective Radiative Forcing|CFC11"] = "W/m**2"
unit["Effective Radiative Forcing|CFC12"] = "W/m**2"
unit["Effective Radiative Forcing|CFC113"] = "W/m**2"
unit["Effective Radiative Forcing|CFC114"] = "W/m**2"
unit["Effective Radiative Forcing|CFC115"] = "W/m**2"
unit["Effective Radiative Forcing|CCl4"] = "W/m**2"
unit["Effective Radiative Forcing|CH3CCl3"] = "W/m**2"
unit["Effective Radiative Forcing|HCFC22"] = "W/m**2"
unit["Effective Radiative Forcing|HCFC141b"] = "W/m**2"
unit["Effective Radiative Forcing|HCFC142b"] = "W/m**2"
unit["Effective Radiative Forcing|Halon1211"] = "W/m**2"
unit["Effective Radiative Forcing|Halon1202"] = "W/m**2"
unit["Effective Radiative Forcing|Halon1301"] = "W/m**2"
unit["Effective Radiative Forcing|Halon2402"] = "W/m**2"
unit["Effective Radiative Forcing|CH3Br"] = "W/m**2"
unit["Effective Radiative Forcing|CH3Cl"] = "W/m**2"
unit["Effective Radiative Forcing|Tropospheric Ozone"] = "W/m**2"
unit["Effective Radiative Forcing|Stratospheric Ozone"] = "W/m**2"
unit["Effective Radiative Forcing|CH4 Oxidation Stratospheric H2O"] = "W/m**2"
unit["Effective Radiative Forcing|Contrails"] = "W/m**2"
unit["Effective Radiative Forcing|Aerosols|Direct Effect|SOx"] = "W/m**2"
unit[
"Effective Radiative Forcing|Aerosols|Direct Effect|Secondary Organic Aerosol"
] = "W/m**2"
unit["Effective Radiative Forcing|Aerosols|Direct Effect|Nitrate"] = "W/m**2"
unit["Effective Radiative Forcing|Aerosols|Direct Effect|BC"] = "W/m**2"
unit["Effective Radiative Forcing|Aerosols|Direct Effect|OC"] = "W/m**2"
unit["Effective Radiative Forcing|Aerosols|Indirect Effect"] = "W/m**2"
unit["Effective Radiative Forcing|Black Carbon on Snow"] = "W/m**2"
unit["Effective Radiative Forcing|Land-use Change"] = "W/m**2"
unit["Effective Radiative Forcing|Volcanic"] = "W/m**2"
unit["Effective Radiative Forcing|Solar"] = "W/m**2"
unit["Effective Radiative Forcing"] = "W/m**2"
unit["Effective Radiative Forcing|Anthropogenic"] = "W/m**2"
unit["Effective Radiative Forcing|Greenhouse Gases"] = "W/m**2"
unit["Effective Radiative Forcing|Kyoto Gases"] = "W/m**2"
unit["Effective Radiative Forcing|CO2, CH4 and N2O"] = "W/m**2"
unit["Effective Radiative Forcing|F-Gases"] = "W/m**2"
unit["Effective Radiative Forcing|Montreal Protocol Halogen Gases"] = "W/m**2"
unit["Effective Radiative Forcing|Aerosols|Direct Effect"] = "W/m**2"
unit["Effective Radiative Forcing|Aerosols"] = "W/m**2"
unit["Effective Radiative Forcing|Ozone"] = "W/m**2"
unit["Surface Air Temperature Change"] = "K"
unit["Surface Air Ocean Blended Temperature Change"] = "K"
unit["Airborne Fraction"] = "dimensionless"
unit["Effective Climate Feedback"] = "W/m**2/K"
unit["Heat Content"] = "J"
unit["Heat Content|Ocean"] = "J"
unit["Net Energy Imbalance"] = "W/m**2"
unit["Heat Uptake"] = "W/m**2"
unit["Heat Uptake|Ocean"] = "W/m**2"
nt = len(temperature)
out = ({}, {}, nt)
for key in output_vars:
if key not in data:
LOGGER.warning("%s not available from FaIR", key)
continue
out[0][key] = data[key]
out[1][key] = unit[key]
return out
| [
"logging.getLogger",
"numpy.asarray",
"multiprocessing.cpu_count",
"numpy.sum",
"scmdata.run_append",
"numpy.vstack",
"concurrent.futures.ProcessPoolExecutor",
"numpy.arange"
] | [((308, 335), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (325, 335), False, 'import logging\n'), ((1550, 1565), 'scmdata.run_append', 'run_append', (['res'], {}), '(res)\n', (1560, 1565), False, 'from scmdata import ScmRun, run_append\n'), ((9283, 9306), 'numpy.sum', 'np.sum', (['forcing'], {'axis': '(1)'}), '(forcing, axis=1)\n', (9289, 9306), True, 'import numpy as np\n'), ((9363, 9394), 'numpy.sum', 'np.sum', (['forcing[:, :43]'], {'axis': '(1)'}), '(forcing[:, :43], axis=1)\n', (9369, 9394), True, 'import numpy as np\n'), ((9454, 9485), 'numpy.sum', 'np.sum', (['forcing[:, :31]'], {'axis': '(1)'}), '(forcing[:, :31], axis=1)\n', (9460, 9485), True, 'import numpy as np\n'), ((9626, 9657), 'numpy.sum', 'np.sum', (['forcing[:, :15]'], {'axis': '(1)'}), '(forcing[:, :15], axis=1)\n', (9632, 9657), True, 'import numpy as np\n'), ((9717, 9747), 'numpy.sum', 'np.sum', (['forcing[:, :3]'], {'axis': '(1)'}), '(forcing[:, :3], axis=1)\n', (9723, 9747), True, 'import numpy as np\n'), ((9892, 9924), 'numpy.sum', 'np.sum', (['forcing[:, 3:15]'], {'axis': '(1)'}), '(forcing[:, 3:15], axis=1)\n', (9898, 9924), True, 'import numpy as np\n'), ((9999, 10032), 'numpy.sum', 'np.sum', (['forcing[:, 15:31]'], {'axis': '(1)'}), '(forcing[:, 15:31], axis=1)\n', (10005, 10032), True, 'import numpy as np\n'), ((10112, 10145), 'numpy.sum', 'np.sum', (['forcing[:, 35:40]'], {'axis': '(1)'}), '(forcing[:, 35:40], axis=1)\n', (10118, 10145), True, 'import numpy as np\n'), ((10211, 10244), 'numpy.sum', 'np.sum', (['forcing[:, 35:41]'], {'axis': '(1)'}), '(forcing[:, 35:41], axis=1)\n', (10217, 10244), True, 'import numpy as np\n'), ((10293, 10326), 'numpy.sum', 'np.sum', (['forcing[:, 31:33]'], {'axis': '(1)'}), '(forcing[:, 31:33], axis=1)\n', (10299, 10326), True, 'import numpy as np\n'), ((1094, 1121), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1119, 1121), False, 'import multiprocessing\n'), ((1360, 1385), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', (['ncpu'], {}), '(ncpu)\n', (1379, 1385), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((2222, 2244), 'numpy.vstack', 'np.vstack', (['data_scmrun'], {}), '(data_scmrun)\n', (2231, 2244), True, 'import numpy as np\n'), ((2262, 2298), 'numpy.arange', 'np.arange', (['startyear', '(startyear + nt)'], {}), '(startyear, startyear + nt)\n', (2271, 2298), True, 'import numpy as np\n'), ((907, 924), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (917, 924), True, 'import numpy as np\n')] |
"""Uses Mk4py, aka metakit.
Python bindings are native-compiled: http://equi4.com/metakit/python.html
"""
from __future__ import print_function, absolute_import
import Mk4py
import numpy as np
import os
import struct
import zlib
from argparse import ArgumentParser
from construct import (
Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul,
Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek,
Embedded, ExprAdapter, this
)
from matplotlib import pyplot as plt
from .construct_utils import LazyField
def parse_wxd(f):
return WXDFile(f).load_spectrum(all_trajs=False, verbose=False)
class WXDFile(object):
'''Renishaw WiRE (*.wxd) file format parser.'''
def __init__(self, file_or_filepath):
# Note: we keep self.db around to avoid the db getting GC'd
if hasattr(file_or_filepath, 'mode'):
self.db = Mk4py.storage(file_or_filepath)
else:
self.db = Mk4py.storage(file_or_filepath, 0)
dirs = self.db.view('dirs')
self.table = dirs[0].files[0]._B
def print_info(self):
print('Subfile Name \tSize\tDate')
print('-' * 34)
for row in self.table:
print('\t'.join(map(str, (row.name, row.size, row.date))))
print('-' * 34)
def _row_data(self, row_name):
row, = self.table.select(name=row_name)
return zlib.decompress(row.contents)
def _last_row_data(self, row_name_prefix):
row = [r for r in self.table if r.name.startswith(row_name_prefix)][-1]
return zlib.decompress(row.contents)
def extract_xml(self, outfile):
data = self._row_data('XMLDocument')
a, size = struct.unpack('li', data[:12])
assert a == 0
assert size == len(data[12:])
text = data[12:].decode('utf16').encode('utf8')
with open(outfile, 'wb') as fh:
fh.write(text)
def extract_properties(self, outfile):
data = self._row_data('Properties')
props = Properties.parse(data)
with open(outfile, 'w') as fh:
for p in props:
if 'TaggedData' in p:
print('%s\t%r' % (p.label, repr(p.TaggedData.value)), file=fh)
def extract_analysis(self, outfile):
# TODO: figure out how to parse this part
# data = self._row_data('AnalysisResults')
raise NotImplementedError('AnalysisResults parser is NYI')
def load_spectrum(self, all_trajs=False, verbose=False):
# load bands from the last DataList* row
dlist = DataList.parse(self._last_row_data('DataList'))
bands = np.array(dlist.data.value, dtype=float)
assert dlist.size == bands.shape[0]
# load intensities from the last DataSet* row
dset = DataSet.parse(self._last_row_data('DataSet'))
if verbose:
for p in dset.Property:
if 'TaggedData' in p:
print(p.label, '=>', repr(p.TaggedData.value))
if not all_trajs:
dlist = dset.LabeledDataList[0]
intensities = np.array(dlist.data.value, dtype=float)
return np.column_stack((bands, intensities))
trajs = {}
for dlist in dset.LabeledDataList:
intensities = np.array(dlist.data.value, dtype=float)
trajs[dlist.label] = np.column_stack((bands, intensities))
return trajs
class VBStringAdapter(Adapter):
def __init__(self):
# TODO: replace this with construct.PascalString
vbs = Struct('length'/Int32ul,
'value'/Bytes(this.length - 2),
Const(b'\x00\x00')) # There's always an ending null
Adapter.__init__(self, vbs)
def _decode(self, obj, ctx):
return obj.value.decode('utf16').encode('utf8')
def _encode(self, obj, ctx):
x = obj.encode('utf16') + '\x00\x00'
return Container(length=len(x), value=x)
VBString = VBStringAdapter()
SomeKindOfEnumMaybe = Struct(
Padding(16), # XXX: almost certainly useful somehow
Int64ul
)
TaggedData = Struct(
'tag'/Int16ul,
'value'/Switch(this.tag, {
3: Int32sl,
5: Float64l,
7: Int64ul, # timestamp?
8: VBString,
9: SomeKindOfEnumMaybe,
11: Const(b'\x00' * 2), # null?
35: Const(b'\x00' * 6), # EOF
})
)
# Specialization for loading float data faster
TaggedFloat64 = ExprAdapter(
Struct(Const(5, Int16ul), 'value'/Float64l),
encoder=lambda obj, ctx: Container(value=obj.value),
decoder=lambda obj, ctx: obj.value)
DataList = Struct(
'size'/Int64ul,
LazyField(Array(lambda ctx: ctx.size, TaggedFloat64)),
Const('\xc0\xff\xee\x01') # XXX: probably useful
)
# XXX: hacks
bad_strings = ('\xc0\xff\xee\x01\x00\x00', '\x01#Eg\x00\x00')
Property = Struct(
'peek'/Peek(Bytes(6)),
Embedded(IfThenElse(
this.peek in bad_strings,
Padding(6),
Struct('label'/VBString, 'TaggedData'/TaggedData)))
)
Properties = GreedyRange(Property)
LabeledDataList = Struct(
'label'/VBString,
Padding(18),
'DataList'/Embedded(DataList)
)
DataSet = Struct(
'number'/Int64ul,
# XXX: may have more than two. Might use ctx.number to decide?
'LabeledDataList'/Array(2, LabeledDataList),
'Properties'/Properties
)
if __name__ == '__main__':
def main():
ap = ArgumentParser()
ap.add_argument('-v', '--verbose', action='store_true')
ap.add_argument('--xml', action='store_true',
help='Extract the associated XML document.')
ap.add_argument('--props', action='store_true', help='Extract properties.')
ap.add_argument('--analysis', action='store_true',
help='Extract analysis results.')
ap.add_argument('--plot', action='store_true', help='Plot all spectra.')
ap.add_argument('files', nargs='+', type=open)
args = ap.parse_args()
if args.analysis:
ap.error('--analysis is NYI at this point')
for f in args.files:
wxd = WXDFile(f)
if args.verbose:
wxd.print_info()
if args.xml:
wxd.extract_xml(f.name + '.xml')
if args.props:
wxd.extract_properties(f.name + '.props.txt')
if args.analysis:
wxd.extract_analysis(f.name + '.analysis.txt')
if args.plot:
spectra = wxd.load_spectrum(all_trajs=True, verbose=args.verbose)
plt.figure()
plt.title(os.path.basename(f.name))
for label, traj in spectra.items():
plt.plot(*traj.T, label=label)
plt.legend()
if args.plot:
plt.show()
main()
| [
"numpy.column_stack",
"numpy.array",
"construct.Const",
"construct.Adapter.__init__",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"construct.Array",
"zlib.decompress",
"struct.unpack",
"construct.Struct",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"construct.GreedyRange",... | [((4705, 4726), 'construct.GreedyRange', 'GreedyRange', (['Property'], {}), '(Property)\n', (4716, 4726), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((3699, 3710), 'construct.Padding', 'Padding', (['(16)'], {}), '(16)\n', (3706, 3710), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((4378, 4394), 'construct.Const', 'Const', (['"""Àÿî\x01"""'], {}), "('Àÿî\\x01')\n", (4383, 4394), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((4779, 4790), 'construct.Padding', 'Padding', (['(18)'], {}), '(18)\n', (4786, 4790), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((1322, 1351), 'zlib.decompress', 'zlib.decompress', (['row.contents'], {}), '(row.contents)\n', (1337, 1351), False, 'import zlib\n'), ((1485, 1514), 'zlib.decompress', 'zlib.decompress', (['row.contents'], {}), '(row.contents)\n', (1500, 1514), False, 'import zlib\n'), ((1605, 1635), 'struct.unpack', 'struct.unpack', (['"""li"""', 'data[:12]'], {}), "('li', data[:12])\n", (1618, 1635), False, 'import struct\n'), ((2447, 2486), 'numpy.array', 'np.array', (['dlist.data.value'], {'dtype': 'float'}), '(dlist.data.value, dtype=float)\n', (2455, 2486), True, 'import numpy as np\n'), ((3405, 3432), 'construct.Adapter.__init__', 'Adapter.__init__', (['self', 'vbs'], {}), '(self, vbs)\n', (3421, 3432), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((4141, 4158), 'construct.Const', 'Const', (['(5)', 'Int16ul'], {}), '(5, Int16ul)\n', (4146, 4158), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((4329, 4371), 'construct.Array', 'Array', (['(lambda ctx: ctx.size)', 'TaggedFloat64'], {}), '(lambda ctx: ctx.size, TaggedFloat64)\n', (4334, 4371), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((4807, 4825), 'construct.Embedded', 'Embedded', (['DataList'], {}), '(DataList)\n', (4815, 4825), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((4957, 4982), 'construct.Array', 'Array', (['(2)', 'LabeledDataList'], {}), '(2, LabeledDataList)\n', (4962, 4982), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((5066, 5082), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (5080, 5082), False, 'from argparse import ArgumentParser\n'), ((874, 905), 'Mk4py.storage', 'Mk4py.storage', (['file_or_filepath'], {}), '(file_or_filepath)\n', (887, 905), False, 'import Mk4py\n'), ((932, 966), 'Mk4py.storage', 'Mk4py.storage', (['file_or_filepath', '(0)'], {}), '(file_or_filepath, 0)\n', (945, 966), False, 'import Mk4py\n'), ((2850, 2889), 'numpy.array', 'np.array', (['dlist.data.value'], {'dtype': 'float'}), '(dlist.data.value, dtype=float)\n', (2858, 2889), True, 'import numpy as np\n'), ((2903, 2940), 'numpy.column_stack', 'np.column_stack', (['(bands, intensities)'], {}), '((bands, intensities))\n', (2918, 2940), True, 'import numpy as np\n'), ((3016, 3055), 'numpy.array', 'np.array', (['dlist.data.value'], {'dtype': 'float'}), '(dlist.data.value, dtype=float)\n', (3024, 3055), True, 'import numpy as np\n'), ((3083, 3120), 'numpy.column_stack', 'np.column_stack', (['(bands, intensities)'], {}), '((bands, intensities))\n', (3098, 3120), True, 'import numpy as np\n'), ((3348, 3366), 'construct.Const', 'Const', (["b'\\x00\\x00'"], {}), "(b'\\x00\\x00')\n", (3353, 3366), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((4208, 4234), 'construct.Container', 'Container', ([], {'value': 'obj.value'}), '(value=obj.value)\n', (4217, 4234), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((4540, 4548), 'construct.Bytes', 'Bytes', (['(6)'], {}), '(6)\n', (4545, 4548), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((4618, 4628), 'construct.Padding', 'Padding', (['(6)'], {}), '(6)\n', (4625, 4628), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((4638, 4691), 'construct.Struct', 'Struct', (["('label' / VBString)", "('TaggedData' / TaggedData)"], {}), "('label' / VBString, 'TaggedData' / TaggedData)\n", (4644, 4691), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((6273, 6283), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6281, 6283), True, 'from matplotlib import pyplot as plt\n'), ((3307, 3329), 'construct.Bytes', 'Bytes', (['(this.length - 2)'], {}), '(this.length - 2)\n', (3312, 3329), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((3977, 3995), 'construct.Const', 'Const', (["(b'\\x00' * 2)"], {}), "(b'\\x00' * 2)\n", (3982, 3995), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((4018, 4036), 'construct.Const', 'Const', (["(b'\\x00' * 6)"], {}), "(b'\\x00' * 6)\n", (4023, 4036), False, 'from construct import Struct, Int64ul, Array, Const, Float64l, Int32sl, Int16ul, Switch, Int32ul, Bytes, Padding, Adapter, Container, GreedyRange, IfThenElse, Peek, Embedded, ExprAdapter, this\n'), ((6086, 6098), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6096, 6098), True, 'from matplotlib import pyplot as plt\n'), ((6236, 6248), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6246, 6248), True, 'from matplotlib import pyplot as plt\n'), ((6117, 6141), 'os.path.basename', 'os.path.basename', (['f.name'], {}), '(f.name)\n', (6133, 6141), False, 'import os\n'), ((6197, 6227), 'matplotlib.pyplot.plot', 'plt.plot', (['*traj.T'], {'label': 'label'}), '(*traj.T, label=label)\n', (6205, 6227), True, 'from matplotlib import pyplot as plt\n')] |
'''
This program is not working completely I'm sorry about it because it's my fault.
This program have to worked for Count the greenPixels on webcam but it is not working very well.
Thank you
Sincerely,
<NAME>
'''
import cv2
import numpy as np
class greenfinder():
img = cv2.VideoCapture(0)
lower_green = np.array ([45, 100, 100])
upper_green = np.array ([75, 255, 255])
sensitivity = 15
lower_green_0 = np.array ([0, 100, 100])
upper_green_1 = np.array ([180, 255, 255])
mask_0 = cv2.inRange (img, lower_green_0, lower_green);
mask_1 = cv2.inRange (img, upper_green, upper_green_1);
mask = cv2.bitwise_or (mask_0, mask_1)
cv2.imshow ("Green Pixel", mask)
cv2.waitKey (0)
if __name__ == "__main__":
greenfinder() | [
"cv2.inRange",
"cv2.imshow",
"numpy.array",
"cv2.bitwise_or",
"cv2.VideoCapture",
"cv2.waitKey"
] | [((286, 305), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (302, 305), False, 'import cv2\n'), ((329, 353), 'numpy.array', 'np.array', (['[45, 100, 100]'], {}), '([45, 100, 100])\n', (337, 353), True, 'import numpy as np\n'), ((377, 401), 'numpy.array', 'np.array', (['[75, 255, 255]'], {}), '([75, 255, 255])\n', (385, 401), True, 'import numpy as np\n'), ((452, 475), 'numpy.array', 'np.array', (['[0, 100, 100]'], {}), '([0, 100, 100])\n', (460, 475), True, 'import numpy as np\n'), ((501, 526), 'numpy.array', 'np.array', (['[180, 255, 255]'], {}), '([180, 255, 255])\n', (509, 526), True, 'import numpy as np\n'), ((545, 589), 'cv2.inRange', 'cv2.inRange', (['img', 'lower_green_0', 'lower_green'], {}), '(img, lower_green_0, lower_green)\n', (556, 589), False, 'import cv2\n'), ((609, 653), 'cv2.inRange', 'cv2.inRange', (['img', 'upper_green', 'upper_green_1'], {}), '(img, upper_green, upper_green_1)\n', (620, 653), False, 'import cv2\n'), ((672, 702), 'cv2.bitwise_or', 'cv2.bitwise_or', (['mask_0', 'mask_1'], {}), '(mask_0, mask_1)\n', (686, 702), False, 'import cv2\n'), ((712, 743), 'cv2.imshow', 'cv2.imshow', (['"""Green Pixel"""', 'mask'], {}), "('Green Pixel', mask)\n", (722, 743), False, 'import cv2\n'), ((753, 767), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (764, 767), False, 'import cv2\n')] |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
# For reproducibility
np.random.seed(1000)
if __name__ == '__main__':
# Load MNIST digits
digits = load_digits()
# Show some random digits
selection = np.random.randint(0, 1797, size=100)
fig, ax = plt.subplots(10, 10, figsize=(10, 10))
samples = [digits.data[x].reshape((8, 8)) for x in selection]
for i in range(10):
for j in range(10):
ax[i, j].set_axis_off()
ax[i, j].imshow(samples[(i * 8) + j], cmap='gray')
plt.show()
# Perform a PCA on the digits dataset
pca = PCA(n_components=36, whiten=True)
X_pca = pca.fit_transform(digits.data / 255)
print('Explained variance ratio')
print(pca.explained_variance_ratio_)
# Plot the explained variance ratio
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
ax[0].set_xlabel('Component')
ax[0].set_ylabel('Variance ratio (%)')
ax[0].bar(np.arange(36), pca.explained_variance_ratio_ * 100.0)
ax[1].set_xlabel('Component')
ax[1].set_ylabel('Cumulative variance (%)')
ax[1].bar(np.arange(36), np.cumsum(pca.explained_variance_)[::-1])
plt.show()
# Rebuild from PCA and show the result
fig, ax = plt.subplots(10, 10, figsize=(10, 10))
samples = [pca.inverse_transform(X_pca[x]).reshape((8, 8)) for x in selection]
for i in range(10):
for j in range(10):
ax[i, j].set_axis_off()
ax[i, j].imshow(samples[(i * 8) + j], cmap='gray')
plt.show()
| [
"sklearn.decomposition.PCA",
"sklearn.datasets.load_digits",
"numpy.random.randint",
"numpy.random.seed",
"numpy.cumsum",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((193, 213), 'numpy.random.seed', 'np.random.seed', (['(1000)'], {}), '(1000)\n', (207, 213), True, 'import numpy as np\n'), ((279, 292), 'sklearn.datasets.load_digits', 'load_digits', ([], {}), '()\n', (290, 292), False, 'from sklearn.datasets import load_digits\n'), ((340, 376), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1797)'], {'size': '(100)'}), '(0, 1797, size=100)\n', (357, 376), True, 'import numpy as np\n'), ((392, 430), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(10)', '(10)'], {'figsize': '(10, 10)'}), '(10, 10, figsize=(10, 10))\n', (404, 430), True, 'import matplotlib.pyplot as plt\n'), ((655, 665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (663, 665), True, 'import matplotlib.pyplot as plt\n'), ((719, 752), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(36)', 'whiten': '(True)'}), '(n_components=36, whiten=True)\n', (722, 752), False, 'from sklearn.decomposition import PCA\n'), ((937, 972), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 6)'}), '(1, 2, figsize=(16, 6))\n', (949, 972), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1288), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1286, 1288), True, 'import matplotlib.pyplot as plt\n'), ((1347, 1385), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(10)', '(10)'], {'figsize': '(10, 10)'}), '(10, 10, figsize=(10, 10))\n', (1359, 1385), True, 'import matplotlib.pyplot as plt\n'), ((1627, 1637), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1635, 1637), True, 'import matplotlib.pyplot as plt\n'), ((1065, 1078), 'numpy.arange', 'np.arange', (['(36)'], {}), '(36)\n', (1074, 1078), True, 'import numpy as np\n'), ((1216, 1229), 'numpy.arange', 'np.arange', (['(36)'], {}), '(36)\n', (1225, 1229), True, 'import numpy as np\n'), ((1231, 1265), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_'], {}), '(pca.explained_variance_)\n', (1240, 1265), True, 'import numpy as np\n')] |
import time
import cv2
import numpy as np
from ..openvino_base.base_model import Base
EMOTION_STATES = ("neutral", "happy", "sad", "surprise", "anger")
class Emotions(Base):
"""Class for the Emotions Recognition Model."""
def __init__(
self,
model_name,
source_width=None,
source_height=None,
device="CPU",
threshold=0.60,
extensions=None,
**kwargs
):
super().__init__(
model_name,
source_width,
source_height,
device,
threshold,
extensions,
**kwargs
)
def preprocess_output(self, inference_results, image, show_bbox, **kwargs):
results = {}
emo_state = np.vstack(inference_results).ravel()
results["emotional_state"] = EMOTION_STATES[np.argmax(emo_state)]
if show_bbox:
self.draw_output(results, image, **kwargs)
results["image"] = image
return results
@staticmethod
def draw_output(results, image, **kwargs):
pass
| [
"numpy.vstack",
"numpy.argmax"
] | [((846, 866), 'numpy.argmax', 'np.argmax', (['emo_state'], {}), '(emo_state)\n', (855, 866), True, 'import numpy as np\n'), ((757, 785), 'numpy.vstack', 'np.vstack', (['inference_results'], {}), '(inference_results)\n', (766, 785), True, 'import numpy as np\n')] |
from manimlib.imports import *
import numpy as np
import bats
# slide scene
from manim_reveal import SlideScene
# manim interface with bats
from manimtda import *
import matplotlib
from scipy.spatial import ConvexHull
def gen_circle2(n, r=1.0):
theta = np.linspace(0, 2*np.pi*(1 - 1./n), n)
pts = np.array([r*np.cos(theta), r*np.sin(theta)], dtype=np.float)
return pts.T
def gen_point_cloud(n, scale=1.0):
"""
sample points on the unit square
"""
return np.random.uniform(low=-scale, high=scale, size=(n,2))
def get_dimension_filt(C):
"""
get dimension filtration data from bats complex
"""
spxs = []
ts = []
for d in range(C.maxdim() + 1):
ts.extend([d for _ in range(C.ncells(d))])
spxs.extend(C.get_simplices(d))
return spxs, ts
def dimension_filtration_from_bats(C, pts, **kwargs):
"""
get manimtda filtration from bats filtration
"""
spxs, ts = get_dimension_filt(C)
return SimplicialFiltration(pts, spxs, ts, **kwargs)
def get_convex_hull(s, pts, color=BLUE, **kwargs):
"""
get convex hull of pts[s] as a Manim polygon
"""
spts = np.array([pts[i] for i in s])
hull_2d = ConvexHull(spts[:,:-1])
P = Polygon(*[spts[i] for i in hull_2d.vertices],
color=color,
**kwargs
)
P.set_fill(color, opacity=0.2)
#P.round_corners(0.05)
#P.scale(1.3)
return P
def get_zzbar(k, color=BLUE):
ZZbar = VGroup(
Dot(3*LEFT, color=color),
Dot(ORIGIN, color=color),
Dot(3*RIGHT, color=color),
Line(3*LEFT, ORIGIN, color=color),
Line(ORIGIN, 3*RIGHT, color=color)
)
label = TexMobject("H_{}".format(k), color=color)
label.next_to(ZZbar, LEFT)
return VGroup(ZZbar, label)
def get_convex_hull_barycenter(s, pts):
"""
get barycenter of convex hull
"""
spts = np.array([pts[i] for i in s])
hull_2d = ConvexHull(spts[:,:-1])
hull_pts = spts[hull_2d.vertices]
return np.mean(hull_pts, axis=0)
class ZigzagNerve(SlideScene):
CONFIG={
"camera_config":{"background_color":"#F0F1EB"},
"video_slides_dir":"../video_slides"
}
def construct(self):
pts = gen_circle2(60, r=2.5)
pts = pts + np.random.normal(scale=0.15, size=pts.shape)
# pts = gen_point_cloud(100, scale=2.5)
# put points into bats
pbats = bats.DataSet(bats.Matrix(pts))
# get landmarks
lbats = bats.greedy_landmarks(pbats, 3, bats.Euclidean(), 0)
lbats2 = bats.greedy_landmarks(pbats, 3, bats.Euclidean(), 5)
lms = np.array(lbats.data()) # numpy array
# get cover based on landmarks - assign each point to nearest landmarks
coverU = bats.landmark_eps_cover(pbats, lbats, bats.LInfDist(), 1.0)
coverV = bats.landmark_eps_cover(pbats, lbats2, bats.LInfDist(), 1.0)
coverUV, fU, fV = bats.bivariate_cover(coverU, coverV)
# Fbats = WeakAlphaFiltration(pts)
Fbats = bats.RipsFiltration(pbats, bats.Euclidean(), 0.0, 2)
RFC = bats.ReducedFilteredChainComplex(Fbats, bats.F2())
pts = np.hstack((pts, np.zeros((pts.shape[0], 1))))
lms = np.hstack((lms, np.zeros((lms.shape[0], 1))))
# compute barycenters of hulls
bcU = np.array([get_convex_hull_barycenter(s, pts) for s in coverU])
bcV = np.array([get_convex_hull_barycenter(s, pts) for s in coverV])
bcUV = np.array([get_convex_hull_barycenter(s, pts) for s in coverUV])
# this is just for point cloud
F = filtration_from_bats(Fbats, pts, color=BLACK)
F.shift(3*RIGHT)
#title = TextMobject(r"Comparing Covers", color=BLACK).shift(3.5*UP)
anim = []
# this just generates the point cloud
Pcloud = F.step_to(0)
anim.append(FadeIn(Pcloud))
self.play(*anim)
self.slide_break()
# show the open sets in U
line0 = TexMobject(r"\mathcal{U}", r"=\{U_0, U_1, U_2\}", color=BLACK).shift(3*LEFT + 1*UP)
anim = [FadeIn(line0)]
hulls = [get_convex_hull(coverU[i], pts).shift(3*RIGHT) for i in range(len(coverU))]
hullsU = VGroup(*hulls)
anim.extend([FadeInFrom(h, DOWN) for h in hulls])
self.play(*anim)
self.slide_break()
# show the other cover V
line1 = TexMobject(r"\mathcal{V}", r"=\{V_0, V_1, V_2\}", color=BLACK).shift(3*LEFT + 1*UP)
line1.next_to(line0, DOWN)
anim = [FadeIn(line1)]
hulls = [get_convex_hull(coverV[i], pts, color=RED).shift(3*RIGHT) for i in range(len(coverV))]
hullsV = VGroup(*hulls)
anim.extend([FadeInFrom(h, DOWN) for h in hulls])
self.play(*anim)
self.slide_break()
line2 = TexMobject(r"\mathcal{U} \times_X \mathcal{V}", color=BLACK).shift(3*LEFT + 1*UP)
line2.next_to(line1, DOWN)
anim = [FadeIn(line2)]
hulls = [get_convex_hull(coverUV[i], pts, color=PURPLE).shift(3*RIGHT) for i in range(len(coverUV))]
hullsUV = VGroup(*hulls)
anim.extend([FadeOut(hullsU), FadeOut(hullsV), FadeInFrom(hullsUV, UP)])
self.play(*anim)
self.slide_break()
anim = [FadeOut(Pcloud), FadeOut(hullsUV)]
line0a = TexMobject(r"\mathcal{U}", color=BLACK).move_to((-3,2.5,0))
line1a = TexMobject(r"\mathcal{V}", color=BLACK).move_to((3,2.5,0))
# anim.append(ApplyMethod(line0.move_to, (-3,2.5,0)))
# anim.append(ApplyMethod(line1.move_to, (3,2.5,0)))
anim.append(Transform(line0, line0a))
anim.append(Transform(line1, line1a))
anim.append(ApplyMethod(line2.move_to, (0,2.5,0)))
self.play(*anim)
self.slide_break()
# shift and scale hulls
hullsU.shift(6*LEFT)
hullsUV.shift(3*LEFT)
hullsU.scale(0.3)
hullsV.scale(0.3)
hullsUV.scale(0.3)
self.play(FadeIn(hullsU), FadeIn(hullsV), FadeIn(hullsUV))
self.slide_break()
# Nerve Functor
# construct nerves
NU = bats.Nerve(coverU, 2)
NV = bats.Nerve(coverV, 2)
NUV = bats.Nerve(coverUV, 2)
# construct filtration on Nerve
FU = dimension_filtration_from_bats(NU, bcU, color=BLUE, tri_opacity=0.4)
FU.shift(3*LEFT)
FU.scale(0.3)
CU = FU.step_to(2)
labelU = TexMobject(r"\mathcal{N}(\mathcal{U})", color=BLACK).move_to((-3,2.5,0))
# construct filtration on Nerve
FV = dimension_filtration_from_bats(NV, bcV, color=RED, tri_opacity=0.4)
FV.shift(3*RIGHT)
FV.scale(0.3)
CV = FV.step_to(2)
labelV = TexMobject(r"\mathcal{N}(\mathcal{V})", color=BLACK).move_to((3,2.5,0))
# construct filtration on Nerve
FUV = dimension_filtration_from_bats(NUV, bcUV, color=PURPLE, tri_opacity=0.4)
FUV.scale(0.3)
CUV = FUV.step_to(2)
labelUV = TexMobject(r"\mathcal{N}(\mathcal{U}, \mathcal{V})", color=BLACK).move_to((0,2.5,0))
anim = [Transform(hullsU, CU), Transform(hullsV, CV), Transform(hullsUV, CUV)]
anim.extend([Transform(line0, labelU), Transform(line1, labelV), Transform(line2, labelUV)])
self.play(*anim)
self.slide_break()
# arrows for maps
arU = TexMobject(r"\xleftarrow{p_U}",color=BLACK).move_to((-1.5,2.5,0))
arV = TexMobject(r"\xrightarrow{p_V}",color=BLACK).move_to((1.5,2.5,0))
arUc = TexMobject(r"\xleftarrow{}",color=BLACK).move_to((-1.5,0,0))
arVc = TexMobject(r"\xrightarrow{}",color=BLACK).move_to((1.5,0,0))
self.play(*[FadeIn(ar) for ar in [arU, arV, arUc, arVc]])
self.slide_break()
# apply homology functor
homtxt = TexMobject(
r"H_k(\mathcal{N}(\mathcal{U}))",
r"\xleftarrow{H_k(p_U)}",
r"H_k(\mathcal{N}(\mathcal{U}, \mathcal{V}))",
r"\xrightarrow{H_k(p_V)}",
r"H_k(\mathcal{N}(\mathcal{V}))",
color=BLACK
).shift(2.5*UP)
homtxt1 = TexMobject(r"H_k(\mathcal{N}(\mathcal{U}))", color=BLACK)
homtxt2 = TexMobject(r"\xleftarrow{H_k(p_U)}", color=BLACK)
homtxt3 = TexMobject(r"H_k(\mathcal{N}(\mathcal{U}, \mathcal{V}))", color=BLACK).shift(2.5*UP)
homtxt4 = TexMobject(r"\xrightarrow{H_k(p_V)}", color=BLACK)
homtxt5 = TexMobject(r"H_k(\mathcal{N}(\mathcal{V}))", color=BLACK)
homtxt2.next_to(homtxt3, LEFT)
homtxt1.next_to(homtxt2, LEFT)
homtxt4.next_to(homtxt3, RIGHT)
homtxt5.next_to(homtxt4, RIGHT)
# arUh = TexMobject(r"\xleftarrow{H_k(p_U)}",color=BLACK).move_to((-1.5,2.5,0))
# arVh = TexMobject(r"\xrightarrow{H_k(p_V)}",color=BLACK).move_to((1.5,2.5,0))
# labelUh = TexMobject(r"H_k(\mathcal{N}(\mathcal{U}))", color=BLACK).move_to((-3,2.5,0))
# labelVh = TexMobject(r"H_k(\mathcal{N}(\mathcal{V}))", color=BLACK).move_to((3,2.5,0))
# labelUVh = TexMobject(r"H_k(\mathcal{N}(\mathcal{U}, \mathcal{V}))", color=BLACK).move_to((0,2.5,0))
# produce zigzag barcode
ZZ0 = get_zzbar(0, color=BLUE)
ZZ0.shift(2*DOWN)
ZZ1 = get_zzbar(1, color=RED)
ZZ1.shift(3*DOWN)
dgrp =VGroup(line0, arU, line2, arV, line1)
anim = [ Transform( dgrp, homtxt), ShowCreation(ZZ0), ShowCreation(ZZ1)]
# anim = [ FadeOut( dgrp), FadeIn(homtxt), ShowCreation(ZZ0), ShowCreation(ZZ1)]
#anim.extend([FadeOut(l) for l in [arU, arV, label0, label1, label2]])
self.play(*anim)
self.wait()
| [
"numpy.random.normal",
"numpy.mean",
"bats.bivariate_cover",
"bats.Matrix",
"bats.Nerve",
"bats.LInfDist",
"scipy.spatial.ConvexHull",
"numpy.array",
"numpy.linspace",
"bats.Euclidean",
"bats.F2",
"numpy.cos",
"numpy.zeros",
"numpy.random.uniform",
"numpy.sin"
] | [((260, 304), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi * (1 - 1.0 / n))', 'n'], {}), '(0, 2 * np.pi * (1 - 1.0 / n), n)\n', (271, 304), True, 'import numpy as np\n'), ((476, 530), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-scale)', 'high': 'scale', 'size': '(n, 2)'}), '(low=-scale, high=scale, size=(n, 2))\n', (493, 530), True, 'import numpy as np\n'), ((1085, 1114), 'numpy.array', 'np.array', (['[pts[i] for i in s]'], {}), '([pts[i] for i in s])\n', (1093, 1114), True, 'import numpy as np\n'), ((1126, 1150), 'scipy.spatial.ConvexHull', 'ConvexHull', (['spts[:, :-1]'], {}), '(spts[:, :-1])\n', (1136, 1150), False, 'from scipy.spatial import ConvexHull\n'), ((1727, 1756), 'numpy.array', 'np.array', (['[pts[i] for i in s]'], {}), '([pts[i] for i in s])\n', (1735, 1756), True, 'import numpy as np\n'), ((1768, 1792), 'scipy.spatial.ConvexHull', 'ConvexHull', (['spts[:, :-1]'], {}), '(spts[:, :-1])\n', (1778, 1792), False, 'from scipy.spatial import ConvexHull\n'), ((1835, 1860), 'numpy.mean', 'np.mean', (['hull_pts'], {'axis': '(0)'}), '(hull_pts, axis=0)\n', (1842, 1860), True, 'import numpy as np\n'), ((2660, 2696), 'bats.bivariate_cover', 'bats.bivariate_cover', (['coverU', 'coverV'], {}), '(coverU, coverV)\n', (2680, 2696), False, 'import bats\n'), ((5413, 5434), 'bats.Nerve', 'bats.Nerve', (['coverU', '(2)'], {}), '(coverU, 2)\n', (5423, 5434), False, 'import bats\n'), ((5442, 5463), 'bats.Nerve', 'bats.Nerve', (['coverV', '(2)'], {}), '(coverV, 2)\n', (5452, 5463), False, 'import bats\n'), ((5472, 5494), 'bats.Nerve', 'bats.Nerve', (['coverUV', '(2)'], {}), '(coverUV, 2)\n', (5482, 5494), False, 'import bats\n'), ((2079, 2123), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.15)', 'size': 'pts.shape'}), '(scale=0.15, size=pts.shape)\n', (2095, 2123), True, 'import numpy as np\n'), ((2215, 2231), 'bats.Matrix', 'bats.Matrix', (['pts'], {}), '(pts)\n', (2226, 2231), False, 'import bats\n'), ((2293, 2309), 'bats.Euclidean', 'bats.Euclidean', ([], {}), '()\n', (2307, 2309), False, 'import bats\n'), ((2357, 2373), 'bats.Euclidean', 'bats.Euclidean', ([], {}), '()\n', (2371, 2373), False, 'import bats\n'), ((2546, 2561), 'bats.LInfDist', 'bats.LInfDist', ([], {}), '()\n', (2559, 2561), False, 'import bats\n'), ((2618, 2633), 'bats.LInfDist', 'bats.LInfDist', ([], {}), '()\n', (2631, 2633), False, 'import bats\n'), ((2772, 2788), 'bats.Euclidean', 'bats.Euclidean', ([], {}), '()\n', (2786, 2788), False, 'import bats\n'), ((2846, 2855), 'bats.F2', 'bats.F2', ([], {}), '()\n', (2853, 2855), False, 'import bats\n'), ((320, 333), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (326, 333), True, 'import numpy as np\n'), ((337, 350), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (343, 350), True, 'import numpy as np\n'), ((2882, 2909), 'numpy.zeros', 'np.zeros', (['(pts.shape[0], 1)'], {}), '((pts.shape[0], 1))\n', (2890, 2909), True, 'import numpy as np\n'), ((2936, 2963), 'numpy.zeros', 'np.zeros', (['(lms.shape[0], 1)'], {}), '((lms.shape[0], 1))\n', (2944, 2963), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from pecanpy.graph import BaseGraph, AdjlstGraph, SparseGraph, DenseGraph
MAT = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 0]], dtype=float)
INDPTR = np.array([0, 2, 3, 4], dtype=np.uint32)
INDICES = np.array([1, 2, 0, 0], dtype=np.uint32)
DATA = np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float32)
ADJLST = [{1: 1.0, 2: 1.0}, {0: 1}, {0: 1}]
IDS = ["a", "b", "c"]
IDMAP = {"a": 0, "b": 1, "c": 2}
class TestBaseGraph(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.g = BaseGraph()
cls.g.set_ids(IDS)
def test_set_ids(self):
self.assertEqual(self.g.IDlst, IDS)
self.assertEqual(self.g.IDmap, IDMAP)
def test_properties(self):
self.assertEqual(self.g.num_nodes, 3)
with self.assertRaises(NotImplementedError):
self.assertEqual(self.g.num_edges, 4)
with self.assertRaises(NotImplementedError):
self.assertEqual(self.g.density, 2/3)
class TestAdjlstGraph(unittest.TestCase):
def test_from_mat(self):
g = AdjlstGraph.from_mat(MAT, IDS)
self.assertEqual(g._data, ADJLST)
self.assertEqual(g.IDlst, IDS)
def test_properties(self):
self.g = AdjlstGraph.from_mat(MAT, IDS)
self.assertEqual(self.g.num_nodes, 3)
self.assertEqual(self.g.num_edges, 4)
self.assertEqual(self.g.density, 2/3)
class TestSparseGraph(unittest.TestCase):
def tearDown(self):
del self.g
def validate(self):
self.assertTrue(np.all(self.g.indptr == INDPTR))
self.assertTrue(np.all(self.g.indices == INDICES))
self.assertTrue(np.all(self.g.data == DATA))
self.assertEqual(self.g.IDlst, IDS)
def test_from_mat(self):
self.g = SparseGraph.from_mat(MAT, IDS)
self.validate()
def test_from_adjlst_graph(self):
self.g = SparseGraph.from_adjlst_graph(AdjlstGraph.from_mat(MAT, IDS))
self.validate()
def test_properties(self):
self.g = SparseGraph.from_mat(MAT, IDS)
self.assertEqual(self.g.num_nodes, 3)
self.assertEqual(self.g.num_edges, 4)
self.assertEqual(self.g.density, 2/3)
class TestDenseGraph(unittest.TestCase):
def tearDown(self):
del self.g
def validate(self):
self.assertTrue(np.all(self.g.data == MAT))
self.assertEqual(self.g.IDlst, IDS)
def test_from_mat(self):
self.g = DenseGraph.from_mat(MAT, IDS)
self.validate()
def test_from_adjlst_graph(self):
self.g = DenseGraph.from_adjlst_graph(AdjlstGraph.from_mat(MAT, IDS))
self.validate()
def test_properties(self):
self.g = DenseGraph.from_mat(MAT, IDS)
self.assertEqual(self.g.num_nodes, 3)
self.assertEqual(self.g.num_edges, 4)
self.assertEqual(self.g.density, 2/3)
if __name__ == "__main__":
unittest.main()
| [
"numpy.all",
"pecanpy.graph.DenseGraph.from_mat",
"pecanpy.graph.AdjlstGraph.from_mat",
"numpy.array",
"pecanpy.graph.BaseGraph",
"unittest.main",
"pecanpy.graph.SparseGraph.from_mat"
] | [((117, 173), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 0], [1, 0, 0]]'], {'dtype': 'float'}), '([[0, 1, 1], [1, 0, 0], [1, 0, 0]], dtype=float)\n', (125, 173), True, 'import numpy as np\n'), ((183, 222), 'numpy.array', 'np.array', (['[0, 2, 3, 4]'], {'dtype': 'np.uint32'}), '([0, 2, 3, 4], dtype=np.uint32)\n', (191, 222), True, 'import numpy as np\n'), ((233, 272), 'numpy.array', 'np.array', (['[1, 2, 0, 0]'], {'dtype': 'np.uint32'}), '([1, 2, 0, 0], dtype=np.uint32)\n', (241, 272), True, 'import numpy as np\n'), ((280, 328), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0]'], {'dtype': 'np.float32'}), '([1.0, 1.0, 1.0, 1.0], dtype=np.float32)\n', (288, 328), True, 'import numpy as np\n'), ((2870, 2885), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2883, 2885), False, 'import unittest\n'), ((528, 539), 'pecanpy.graph.BaseGraph', 'BaseGraph', ([], {}), '()\n', (537, 539), False, 'from pecanpy.graph import BaseGraph, AdjlstGraph, SparseGraph, DenseGraph\n'), ((1055, 1085), 'pecanpy.graph.AdjlstGraph.from_mat', 'AdjlstGraph.from_mat', (['MAT', 'IDS'], {}), '(MAT, IDS)\n', (1075, 1085), False, 'from pecanpy.graph import BaseGraph, AdjlstGraph, SparseGraph, DenseGraph\n'), ((1216, 1246), 'pecanpy.graph.AdjlstGraph.from_mat', 'AdjlstGraph.from_mat', (['MAT', 'IDS'], {}), '(MAT, IDS)\n', (1236, 1246), False, 'from pecanpy.graph import BaseGraph, AdjlstGraph, SparseGraph, DenseGraph\n'), ((1757, 1787), 'pecanpy.graph.SparseGraph.from_mat', 'SparseGraph.from_mat', (['MAT', 'IDS'], {}), '(MAT, IDS)\n', (1777, 1787), False, 'from pecanpy.graph import BaseGraph, AdjlstGraph, SparseGraph, DenseGraph\n'), ((2003, 2033), 'pecanpy.graph.SparseGraph.from_mat', 'SparseGraph.from_mat', (['MAT', 'IDS'], {}), '(MAT, IDS)\n', (2023, 2033), False, 'from pecanpy.graph import BaseGraph, AdjlstGraph, SparseGraph, DenseGraph\n'), ((2425, 2454), 'pecanpy.graph.DenseGraph.from_mat', 'DenseGraph.from_mat', (['MAT', 'IDS'], {}), '(MAT, IDS)\n', (2444, 2454), False, 'from pecanpy.graph import BaseGraph, AdjlstGraph, SparseGraph, DenseGraph\n'), ((2669, 2698), 'pecanpy.graph.DenseGraph.from_mat', 'DenseGraph.from_mat', (['MAT', 'IDS'], {}), '(MAT, IDS)\n', (2688, 2698), False, 'from pecanpy.graph import BaseGraph, AdjlstGraph, SparseGraph, DenseGraph\n'), ((1521, 1552), 'numpy.all', 'np.all', (['(self.g.indptr == INDPTR)'], {}), '(self.g.indptr == INDPTR)\n', (1527, 1552), True, 'import numpy as np\n'), ((1578, 1611), 'numpy.all', 'np.all', (['(self.g.indices == INDICES)'], {}), '(self.g.indices == INDICES)\n', (1584, 1611), True, 'import numpy as np\n'), ((1637, 1664), 'numpy.all', 'np.all', (['(self.g.data == DATA)'], {}), '(self.g.data == DATA)\n', (1643, 1664), True, 'import numpy as np\n'), ((1898, 1928), 'pecanpy.graph.AdjlstGraph.from_mat', 'AdjlstGraph.from_mat', (['MAT', 'IDS'], {}), '(MAT, IDS)\n', (1918, 1928), False, 'from pecanpy.graph import BaseGraph, AdjlstGraph, SparseGraph, DenseGraph\n'), ((2306, 2332), 'numpy.all', 'np.all', (['(self.g.data == MAT)'], {}), '(self.g.data == MAT)\n', (2312, 2332), True, 'import numpy as np\n'), ((2564, 2594), 'pecanpy.graph.AdjlstGraph.from_mat', 'AdjlstGraph.from_mat', (['MAT', 'IDS'], {}), '(MAT, IDS)\n', (2584, 2594), False, 'from pecanpy.graph import BaseGraph, AdjlstGraph, SparseGraph, DenseGraph\n')] |
from matplotlib import pyplot as plt
import numpy as np
from math import cos, sin, atan
class Neuron():
def __init__(self, x, y, weight=[]):
self.x = x
self.y = y
if weight: self.weight = weight
def draw(self,texter):
circle = plt.Circle((self.x, self.y), radius=neuron_radius, fill=False)
text = plt.annotate(texter,(self.x,self.y),size='large',ha='center',va='center')
plt.gca().add_patch(circle)
class Layer():
def __init__(self, network, number_of_neurons,text,weights=[]):
self.previous_layer = self.__get_previous_layer(network)
self.y = self.__calculate_layer_y_position()
self.neurons = self.__intialise_neurons(number_of_neurons, weights)
self.text = text
def __intialise_neurons(self, number_of_neurons,weights):
neurons = []
x = self.__calculate_left_margin_so_layer_is_centered(number_of_neurons)
for iteration in xrange(number_of_neurons):
if weights:
neuron = Neuron(x,self.y,weight = weights[iteration])
else:
neuron = Neuron(x, self.y,weight = 'no')
neurons.append(neuron)
x += horizontal_distance_between_neurons
return neurons
def __calculate_left_margin_so_layer_is_centered(self, number_of_neurons):
return horizontal_distance_between_neurons * (number_of_neurons_in_widest_layer - number_of_neurons) / 2
def __calculate_layer_y_position(self):
if self.previous_layer:
return self.previous_layer.y + vertical_distance_between_layers
else:
return 0
def __get_previous_layer(self, network):
if len(network.layers) > 0:
return network.layers[-1]
else:
return None
def __line_between_two_neurons(self, neuron1, neuron2,weight=[]):
angle = atan((neuron2.x - neuron1.x) / float(neuron2.y - neuron1.y))
x_adjustment = neuron_radius * sin(angle)
y_adjustment = neuron_radius * cos(angle)
line = plt.Line2D((neuron1.x - x_adjustment, neuron2.x + x_adjustment), (neuron1.y - y_adjustment, neuron2.y + y_adjustment),color='k')
plt.gca().add_line(line)
if weight:
if neuron1.x < 8 and neuron1.x != neuron2.x:
text = plt.text((neuron1.x+neuron2.x)/2.5,(neuron1.y+neuron2.y)/4,weight[0],size='large',ha='center')
elif neuron1.x > 8 and neuron1.x != neuron2.x:
text = plt.text((neuron1.x+neuron2.x)/1.7,(neuron1.y+neuron2.y)/4,weight[1],size='large',ha='center')
elif neuron2.x < 8 and neuron1.x == neuron2.x:
text = plt.text((neuron1.x+neuron2.x)/2,(neuron1.y+neuron2.y)/2,weight[0],size='large',ha='right')
elif neuron2.x > 8 and neuron1.x == neuron2.x:
text = plt.text((neuron1.x+neuron2.x)/2,(neuron1.y+neuron2.y)/2,weight[1],size='large',ha='left')
elif neuron2.x < 8:
text = plt.text((neuron1.x+neuron2.x)/2,(neuron1.y+neuron2.y)/2,weight,size='large',ha='right')
elif neuron2.x > 8:
text = plt.text((neuron1.x+neuron2.x)/2,(neuron1.y+neuron2.y)/2,weight,size='large',ha='left')
def draw(self,text):
for neuron,texter in zip(self.neurons,text):
neuron.draw(texter)
if self.previous_layer:
for i,previous_layer_neuron in enumerate(self.previous_layer.neurons):
if previous_layer_neuron.weight == 'no':
self.__line_between_two_neurons(neuron, previous_layer_neuron, weight=[])
elif np.size(previous_layer_neuron.weight) < 2:
self.__line_between_two_neurons(neuron, previous_layer_neuron, weight=previous_layer_neuron.weight)
else:
self.__line_between_two_neurons(neuron, previous_layer_neuron, weight=previous_layer_neuron.weight)
class NeuralNetwork():
def __init__(self):
self.layers = []
def add_layer(self, number_of_neurons,text,weights=[]):
layer = Layer(self, number_of_neurons,text,weights=weights)
self.layers.append(layer)
def draw(self):
plt.figure(figsize=(8,8))
for layer in self.layers:
layer.draw(layer.text)
plt.axis('scaled')
plt.xticks([])
plt.yticks([])
plt.show()
vertical_distance_between_layers = 6
horizontal_distance_between_neurons = 8
neuron_radius = 1.5
number_of_neurons_in_widest_layer = 3 | [
"matplotlib.pyplot.text",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.gca",
"numpy.size",
"math.cos",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.Line2D",
"matplotlib.pyplot.axis",
"math.sin",
"matplo... | [((269, 331), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(self.x, self.y)'], {'radius': 'neuron_radius', 'fill': '(False)'}), '((self.x, self.y), radius=neuron_radius, fill=False)\n', (279, 331), True, 'from matplotlib import pyplot as plt\n'), ((347, 425), 'matplotlib.pyplot.annotate', 'plt.annotate', (['texter', '(self.x, self.y)'], {'size': '"""large"""', 'ha': '"""center"""', 'va': '"""center"""'}), "(texter, (self.x, self.y), size='large', ha='center', va='center')\n", (359, 425), True, 'from matplotlib import pyplot as plt\n'), ((2060, 2193), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(neuron1.x - x_adjustment, neuron2.x + x_adjustment)', '(neuron1.y - y_adjustment, neuron2.y + y_adjustment)'], {'color': '"""k"""'}), "((neuron1.x - x_adjustment, neuron2.x + x_adjustment), (neuron1.y -\n y_adjustment, neuron2.y + y_adjustment), color='k')\n", (2070, 2193), True, 'from matplotlib import pyplot as plt\n'), ((4299, 4325), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (4309, 4325), True, 'from matplotlib import pyplot as plt\n'), ((4402, 4420), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (4410, 4420), True, 'from matplotlib import pyplot as plt\n'), ((4429, 4443), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4439, 4443), True, 'from matplotlib import pyplot as plt\n'), ((4452, 4466), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4462, 4466), True, 'from matplotlib import pyplot as plt\n'), ((4475, 4485), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4483, 4485), True, 'from matplotlib import pyplot as plt\n'), ((1984, 1994), 'math.sin', 'sin', (['angle'], {}), '(angle)\n', (1987, 1994), False, 'from math import cos, sin, atan\n'), ((2034, 2044), 'math.cos', 'cos', (['angle'], {}), '(angle)\n', (2037, 2044), False, 'from math import cos, sin, atan\n'), ((429, 438), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (436, 438), True, 'from matplotlib import pyplot as plt\n'), ((2197, 2206), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2204, 2206), True, 'from matplotlib import pyplot as plt\n'), ((2323, 2434), 'matplotlib.pyplot.text', 'plt.text', (['((neuron1.x + neuron2.x) / 2.5)', '((neuron1.y + neuron2.y) / 4)', 'weight[0]'], {'size': '"""large"""', 'ha': '"""center"""'}), "((neuron1.x + neuron2.x) / 2.5, (neuron1.y + neuron2.y) / 4, weight\n [0], size='large', ha='center')\n", (2331, 2434), True, 'from matplotlib import pyplot as plt\n'), ((2500, 2611), 'matplotlib.pyplot.text', 'plt.text', (['((neuron1.x + neuron2.x) / 1.7)', '((neuron1.y + neuron2.y) / 4)', 'weight[1]'], {'size': '"""large"""', 'ha': '"""center"""'}), "((neuron1.x + neuron2.x) / 1.7, (neuron1.y + neuron2.y) / 4, weight\n [1], size='large', ha='center')\n", (2508, 2611), True, 'from matplotlib import pyplot as plt\n'), ((2678, 2786), 'matplotlib.pyplot.text', 'plt.text', (['((neuron1.x + neuron2.x) / 2)', '((neuron1.y + neuron2.y) / 2)', 'weight[0]'], {'size': '"""large"""', 'ha': '"""right"""'}), "((neuron1.x + neuron2.x) / 2, (neuron1.y + neuron2.y) / 2, weight[0\n ], size='large', ha='right')\n", (2686, 2786), True, 'from matplotlib import pyplot as plt\n'), ((2852, 2959), 'matplotlib.pyplot.text', 'plt.text', (['((neuron1.x + neuron2.x) / 2)', '((neuron1.y + neuron2.y) / 2)', 'weight[1]'], {'size': '"""large"""', 'ha': '"""left"""'}), "((neuron1.x + neuron2.x) / 2, (neuron1.y + neuron2.y) / 2, weight[1\n ], size='large', ha='left')\n", (2860, 2959), True, 'from matplotlib import pyplot as plt\n'), ((3660, 3697), 'numpy.size', 'np.size', (['previous_layer_neuron.weight'], {}), '(previous_layer_neuron.weight)\n', (3667, 3697), True, 'import numpy as np\n'), ((2998, 3102), 'matplotlib.pyplot.text', 'plt.text', (['((neuron1.x + neuron2.x) / 2)', '((neuron1.y + neuron2.y) / 2)', 'weight'], {'size': '"""large"""', 'ha': '"""right"""'}), "((neuron1.x + neuron2.x) / 2, (neuron1.y + neuron2.y) / 2, weight,\n size='large', ha='right')\n", (3006, 3102), True, 'from matplotlib import pyplot as plt\n'), ((3142, 3245), 'matplotlib.pyplot.text', 'plt.text', (['((neuron1.x + neuron2.x) / 2)', '((neuron1.y + neuron2.y) / 2)', 'weight'], {'size': '"""large"""', 'ha': '"""left"""'}), "((neuron1.x + neuron2.x) / 2, (neuron1.y + neuron2.y) / 2, weight,\n size='large', ha='left')\n", (3150, 3245), True, 'from matplotlib import pyplot as plt\n')] |
"""
.. module:: magneticSensor
:synopsis: Magnetic sensor reader
.. moduleauthor:: <NAME> <<EMAIL>>
Data reader for Arduino controlled magnetic field sensor
"""
__author__ = '<NAME>'
import time
import numpy as np
from serial.serialutil import SerialException
from labtools.log import create_logger
from labtools.utils.instr import BaseDevice, InstrError, process_if_initialized
from .conf import TIMEOUT, LOGLEVEL, SIMULATE, BAUDRATE
if SIMULATE:
from labtools.pi._test.serial_test import Serial, comports
else:
from serial import Serial
from serial.tools.list_ports import comports
logger = create_logger(__name__, LOGLEVEL)
def findPort():
"""
Scans all serial ports for first two lines. It returns serial port name
if it returns "Hello" and "init"
Returns
-------
port : str
Port name or None if none are found
Examples
--------
>>> findPort()
'COM7'
"""
for portdesc in comports():
port, desc, dev = portdesc
s = Serial(timeout=TIMEOUT, baudrate=BAUDRATE)
try:
s.port = port
s.open()
except SerialException:
logger.info('Could not open port {}'.format(port))
else:
if _checkPort(s):
return port
finally:
s.close()
logger.warn('Could not find any port')
def _checkPort(serial):
logger.info('Checkinig port {} for init lines.'.format(serial.port))
time.sleep(1) # timeout 1 second
line1 = serial.readline().strip()
line2 = serial.readline().strip()
if line1 == b'Hello' and line2 == b'init':
logger.debug('Sensor found on port {}'.format(serial.port))
return True
else:
logger.debug('Got {} instead of "Hello" and {} instead of "init" on port {}.'.format(line1, line2, serial.port))
return False
def _serial_default():
return Serial(timeout=TIMEOUT, baudrate=BAUDRATE)
class Magsensor(BaseDevice):
"""
Sensor for reading magnetic field in (x,y,z)
"""
def __init__(self, serial=None):
self._initialized = False
if serial is not None:
self.serial = serial
else:
self.serial = _serial_default()
def init(self, port=None, baudrate=None):
"""Opens connection to a device. If port is not given and serial has
not yet been configured, it will automatically open a valid port.
:param port: str
port number or None for default (search for port)
:param baudrate: int
"""
logger.info('Initializing Magsensor.')
self._initialized = False
self._info = 'Unknown'
if baudrate is not None:
self.serial.baudrate = baudrate
if port is not None:
self.serial.port = port
if not self.serial.is_open:
self.serial.open()
if not _checkPort(self.serial):
# if self.serial does not meet requrements
raise InstrError('Port {} does not meet requirements.'.format(self.serial.port))
else:
port = findPort()
if port is None:
raise InstrError('Sensor not found in any port.')
self.serial.port = port
self.serial.open()
if not _checkPort(self.serial):
# if self.serial does not meet requrements
raise InstrError('Port {} does not meet requirements in second init.'.format(self.serial.port))
self._info = 'MicroteslaSensor by <NAME>'
self._initialized = True
@process_if_initialized
def readData(self, nAvg=1, sigmaQ=False):
""" Flushes input and reads lines of data. Averages over nAvg
:param nAvg: int
number of averages
:param sigmaQ: bool
return sigma
:return: ndarray
magnetic field in micro tesla
"""
x, y, z = _read(self.serial)
tab = np.array([[x, y, z]])
for i in range(nAvg - 1):
x, y, z = _read(self.serial, flushQ=False)
tab = np.append(tab, [[x, y, z]], axis=0)
if sigmaQ:
return np.concatenate((tab.mean(axis=0), tab.std(axis=0)))
return np.mean(tab, axis=0)
@process_if_initialized
def readTimeAndData(self, nAvg=1, sigmaQ=False):
""" Flushes input and reads lines of data. Averages over nAvg
:param nAvg: int
number of averages
:param sigmaQ: bool
return sigma
:return: ndarray
magnetic field in micro tesla
"""
tab = np.empty((0, 3))
t0 = time.time()
for i in range(nAvg):
x, y, z = _read(self.serial, flushQ=False)
tab = np.append(tab, [[x, y, z]], axis=0)
t1 = time.time()
if sigmaQ:
return (t0 + t1) / 2, np.concatenate((tab.mean(axis=0), tab.std(axis=0)))
return (t0 + t1) / 2, np.mean(tab, axis=0)
def close(self):
"""Closes connection to the device
"""
logger.info('Closing port {}'.format(self.serial.port))
self._initialized = False
self._info = 'Unknown'
self.serial.close()
def __del__(self):
self.close()
def _flush(serial):
"""
Flushes serial input
"""
logger.debug('Flushing serial input on port {}'.format(serial.port))
serial.reset_input_buffer()
def _read(serial, flushQ=True):
"""
Flushes serial input and reads one line of data
:return: float x, float y, float z
magnetic field in micro tesla
"""
if flushQ:
_flush(serial)
logger.debug('Reading output from serial port %s' % serial.port)
t = serial.readline()
try:
return _formatInput(t)
except InstrError:
logger.warn('Not able to split input "{}".'.format(t))
return _read(serial, flushQ=False)
def _formatInput(line):
"""
Formats input to get Bx, By, Bz
:param line: string line
:return: float Bx, float By, float Bz or None
"""
logger.debug('Formatting output {}'.format(line))
line = line.split(b'(')[-1]
line = line.split(b')')[0]
try:
x, y, z = [float(k.decode()) for k in line.split()]
return x, y, z
except ValueError:
raise InstrError('Not able to split input "{}".'.format(line))
# if __name__ == '__main__':
# m = Magsensor()
# m.init()
| [
"numpy.mean",
"serial.tools.list_ports.comports",
"time.sleep",
"numpy.append",
"numpy.array",
"serial.Serial",
"numpy.empty",
"labtools.utils.instr.InstrError",
"time.time",
"labtools.log.create_logger"
] | [((615, 648), 'labtools.log.create_logger', 'create_logger', (['__name__', 'LOGLEVEL'], {}), '(__name__, LOGLEVEL)\n', (628, 648), False, 'from labtools.log import create_logger\n'), ((923, 933), 'serial.tools.list_ports.comports', 'comports', ([], {}), '()\n', (931, 933), False, 'from serial.tools.list_ports import comports\n'), ((1437, 1450), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1447, 1450), False, 'import time\n'), ((1872, 1914), 'serial.Serial', 'Serial', ([], {'timeout': 'TIMEOUT', 'baudrate': 'BAUDRATE'}), '(timeout=TIMEOUT, baudrate=BAUDRATE)\n', (1878, 1914), False, 'from serial import Serial\n'), ((982, 1024), 'serial.Serial', 'Serial', ([], {'timeout': 'TIMEOUT', 'baudrate': 'BAUDRATE'}), '(timeout=TIMEOUT, baudrate=BAUDRATE)\n', (988, 1024), False, 'from serial import Serial\n'), ((3917, 3938), 'numpy.array', 'np.array', (['[[x, y, z]]'], {}), '([[x, y, z]])\n', (3925, 3938), True, 'import numpy as np\n'), ((4189, 4209), 'numpy.mean', 'np.mean', (['tab'], {'axis': '(0)'}), '(tab, axis=0)\n', (4196, 4209), True, 'import numpy as np\n'), ((4514, 4530), 'numpy.empty', 'np.empty', (['(0, 3)'], {}), '((0, 3))\n', (4522, 4530), True, 'import numpy as np\n'), ((4544, 4555), 'time.time', 'time.time', ([], {}), '()\n', (4553, 4555), False, 'import time\n'), ((4709, 4720), 'time.time', 'time.time', ([], {}), '()\n', (4718, 4720), False, 'import time\n'), ((4047, 4082), 'numpy.append', 'np.append', (['tab', '[[x, y, z]]'], {'axis': '(0)'}), '(tab, [[x, y, z]], axis=0)\n', (4056, 4082), True, 'import numpy as np\n'), ((4660, 4695), 'numpy.append', 'np.append', (['tab', '[[x, y, z]]'], {'axis': '(0)'}), '(tab, [[x, y, z]], axis=0)\n', (4669, 4695), True, 'import numpy as np\n'), ((4857, 4877), 'numpy.mean', 'np.mean', (['tab'], {'axis': '(0)'}), '(tab, axis=0)\n', (4864, 4877), True, 'import numpy as np\n'), ((3123, 3166), 'labtools.utils.instr.InstrError', 'InstrError', (['"""Sensor not found in any port."""'], {}), "('Sensor not found in any port.')\n", (3133, 3166), False, 'from labtools.utils.instr import BaseDevice, InstrError, process_if_initialized\n')] |
from __future__ import division
import numpy as np
from scipy.fftpack import fft, ifft
from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from math import sqrt, pi
def initialize_all(y0, t0, t1, n):
""" An initialization routine for the different ODE solving
methods in the lab. This initializes Y, T, and h. """
if isinstance(y0, np.ndarray):
Y = np.empty((n, y0.size),dtype=complex).squeeze()
else:
Y = np.empty(n,dtype=complex)
Y[0] = y0
T = np.linspace(t0, t1, n)
h = float(t1 - t0) / (n - 1)
return Y, T, h
def RK4(f, y0, t0, t1, n):
""" Use the RK4 method to compute an approximate solution
to the ODE y' = f(t, y) at n equispaced parameter values from t0 to t
with initial conditions y(t0) = y0.
'y0' is assumed to be either a constant or a one-dimensional numpy array.
't0' and 't1' are assumed to be constants.
'f' is assumed to accept two arguments.
The first is a constant giving the current value of t.
The second is a one-dimensional numpy array of the same size as y.
This function returns an array Y of shape (n,) if
y is a constant or an array of size 1.
It returns an array of shape (n, y.size) otherwise.
In either case, Y[i] is the approximate value of y at
the i'th value of np.linspace(t0, t, n).
"""
Y, T, h = initialize_all(y0, t0, t1, n)
for i in xrange(1, n):
K1 = f(T[i-1], Y[i-1])
# print "Y[i-1].shape = ", Y[i-1].shape
tplus = (T[i] + T[i-1]) * .5
K2 = f(tplus, Y[i-1] + .5 * h * K1)
K3 = f(tplus, Y[i-1] + .5 * h * K2)
K4 = f(T[i], Y[i-1] + h * K3)
# print "K1 + 2 * K2 + 2 * K3 + K4.shape = ", (K1 + 2 * K2 + 2 * K3 + K4).shape
Y[i] = Y[i-1] + (h / 6.) * (K1 + 2 * K2 + 2 * K3 + K4)
return T, Y
| [
"numpy.linspace",
"numpy.empty"
] | [((524, 546), 'numpy.linspace', 'np.linspace', (['t0', 't1', 'n'], {}), '(t0, t1, n)\n', (535, 546), True, 'import numpy as np\n'), ((482, 508), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'complex'}), '(n, dtype=complex)\n', (490, 508), True, 'import numpy as np\n'), ((422, 459), 'numpy.empty', 'np.empty', (['(n, y0.size)'], {'dtype': 'complex'}), '((n, y0.size), dtype=complex)\n', (430, 459), True, 'import numpy as np\n')] |
import numpy as np
import os, sys, random, copy
import torch
try:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
except:
pass
from detectron2.structures import BoxMode
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.utils.visualizer import Visualizer
from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
from detectron2.data.build import build_detection_train_loader, build_batch_data_loader
from data.cgrcnn_dataset_as_torch_loader import cgrcnn_dataset_torch
def get_grasp_dicts(root, mode="train"):
img_path = root + "images/{}/".format(mode)
bbx_path = root + "pruned_rbbxs/"
image_filenames = os.listdir(img_path)
dataset_dicts = []
for idx, filename in enumerate(image_filenames):
record = {}
record["file_name"] = img_path + filename
height, width = np.load(record["file_name"]).astype(np.float32).shape
record["image_id"] = idx
record["height"] = height
record["width"] = width
rbbxs = np.load(bbx_path + filename, allow_pickle=True)
grasps = []
for rbbx in rbbxs:
rbox = rbbx[[0, 1, 4, 3, 5]]
grasp = {
"bbox": rbox.tolist(),
"bbox_mode": BoxMode.XYWHA_ABS,
"category_id": 1,
"metric": rbbx[8],
"tilt": rbbx[6],
"z": rbbx[2],
}
grasps.append(grasp)
record["annotations"] = grasps
dataset_dicts.append(record)
return dataset_dicts
def cgrcnn_mapper(dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict)
depth = np.load(dataset_dict["file_name"]).astype(np.float32)
inst = Instances(depth.shape)
depth = torch.from_numpy(np.tile(depth, (3, 1, 1)))
grasps = dataset_dict["annotations"]
gt_boxes, gt_tilts, gt_z, gt_metric = None, None, None, None
for grasp in grasps:
box, z, tilt, metric = np.array(grasp["bbox"]), np.array(grasp["z"]), np.array(grasp["tilt"]), np.array(grasp["metric"])
if gt_boxes is None:
gt_boxes, gt_tilts, gt_z, gt_metric = box, tilt, z, metric
else:
gt_boxes = np.vstack((gt_boxes, box))
gt_tilts = np.hstack((gt_tilts, tilt))
gt_z = np.hstack((gt_z, z))
gt_metric = np.hstack((gt_metric, metric))
inst.gt_boxes = RotatedBoxes(torch.from_numpy(gt_boxes.astype(np.float32).reshape(-1, 5)))
# inst.gt_tilts = torch.from_numpy(gt_tilts.astype(np.float32))
# inst.gt_z = torch.from_numpy(gt_z.astype(np.float32))
# inst.gt_metric = torch.from_numpy(gt_metric.astype(np.float32))
inst.gt_classes = torch.ones(gt_boxes.shape[0], dtype=torch.int64)
return {"image": depth, "instances": inst}
def build_as_detection_loader(cfg, root):
# d = "train"
# dataset_dicts = get_grasp_dicts(root, mode=d)
# inputs = cgrcnn_mapper(dataset_dicts[0])
for d in ["train", "test"]:
DatasetCatalog.register("grasp_" + d, lambda d=d: get_grasp_dicts(root, mode=d))
MetadataCatalog.get("grasp_" + d).set(thing_classes=["grasps"])
grasp_metadata = MetadataCatalog.get("grasp_train")
trainloader = build_detection_train_loader(cfg, mapper=cgrcnn_mapper)
return trainloader
def build_as_torch_loader(root, mode="train", batch_size=16, num_workers=0):
if mode == "train":
train_dataset = cgrcnn_dataset_torch(root, mode=mode)
train_sampler = torch.utils.data.RandomSampler(train_dataset, replacement=False, num_samples=None, generator=None)
trainloader = build_batch_data_loader(dataset=train_dataset, sampler=train_sampler, total_batch_size=batch_size, aspect_ratio_grouping=False, num_workers=num_workers)
return trainloader
elif mode == "test":
test_dataset = cgrcnn_dataset_torch(root, mode=mode)
test_sampler = torch.utils.data.RandomSampler(test_dataset, replacement=False, num_samples=None, generator=None)
testloader = build_batch_data_loader(dataset=test_dataset, sampler=test_sampler, total_batch_size=batch_size, aspect_ratio_grouping=False, num_workers=num_workers)
return testloader
| [
"numpy.tile",
"os.listdir",
"detectron2.data.build.build_detection_train_loader",
"data.cgrcnn_dataset_as_torch_loader.cgrcnn_dataset_torch",
"numpy.hstack",
"torch.utils.data.RandomSampler",
"sys.path.remove",
"detectron2.structures.Instances",
"numpy.array",
"detectron2.data.build.build_batch_da... | [((71, 134), 'sys.path.remove', 'sys.path.remove', (['"""/opt/ros/kinetic/lib/python2.7/dist-packages"""'], {}), "('/opt/ros/kinetic/lib/python2.7/dist-packages')\n", (86, 134), False, 'import os, sys, random, copy\n'), ((696, 716), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (706, 716), False, 'import os, sys, random, copy\n'), ((1659, 1686), 'copy.deepcopy', 'copy.deepcopy', (['dataset_dict'], {}), '(dataset_dict)\n', (1672, 1686), False, 'import os, sys, random, copy\n'), ((1764, 1786), 'detectron2.structures.Instances', 'Instances', (['depth.shape'], {}), '(depth.shape)\n', (1773, 1786), False, 'from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes\n'), ((2746, 2794), 'torch.ones', 'torch.ones', (['gt_boxes.shape[0]'], {'dtype': 'torch.int64'}), '(gt_boxes.shape[0], dtype=torch.int64)\n', (2756, 2794), False, 'import torch\n'), ((3231, 3265), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['"""grasp_train"""'], {}), "('grasp_train')\n", (3250, 3265), False, 'from detectron2.data import MetadataCatalog, DatasetCatalog\n'), ((3289, 3344), 'detectron2.data.build.build_detection_train_loader', 'build_detection_train_loader', (['cfg'], {'mapper': 'cgrcnn_mapper'}), '(cfg, mapper=cgrcnn_mapper)\n', (3317, 3344), False, 'from detectron2.data.build import build_detection_train_loader, build_batch_data_loader\n'), ((1068, 1115), 'numpy.load', 'np.load', (['(bbx_path + filename)'], {'allow_pickle': '(True)'}), '(bbx_path + filename, allow_pickle=True)\n', (1075, 1115), True, 'import numpy as np\n'), ((1816, 1841), 'numpy.tile', 'np.tile', (['depth', '(3, 1, 1)'], {}), '(depth, (3, 1, 1))\n', (1823, 1841), True, 'import numpy as np\n'), ((3494, 3531), 'data.cgrcnn_dataset_as_torch_loader.cgrcnn_dataset_torch', 'cgrcnn_dataset_torch', (['root'], {'mode': 'mode'}), '(root, mode=mode)\n', (3514, 3531), False, 'from data.cgrcnn_dataset_as_torch_loader import cgrcnn_dataset_torch\n'), ((3556, 3658), 'torch.utils.data.RandomSampler', 'torch.utils.data.RandomSampler', (['train_dataset'], {'replacement': '(False)', 'num_samples': 'None', 'generator': 'None'}), '(train_dataset, replacement=False,\n num_samples=None, generator=None)\n', (3586, 3658), False, 'import torch\n'), ((3677, 3838), 'detectron2.data.build.build_batch_data_loader', 'build_batch_data_loader', ([], {'dataset': 'train_dataset', 'sampler': 'train_sampler', 'total_batch_size': 'batch_size', 'aspect_ratio_grouping': '(False)', 'num_workers': 'num_workers'}), '(dataset=train_dataset, sampler=train_sampler,\n total_batch_size=batch_size, aspect_ratio_grouping=False, num_workers=\n num_workers)\n', (3700, 3838), False, 'from detectron2.data.build import build_detection_train_loader, build_batch_data_loader\n'), ((1699, 1733), 'numpy.load', 'np.load', (["dataset_dict['file_name']"], {}), "(dataset_dict['file_name'])\n", (1706, 1733), True, 'import numpy as np\n'), ((2010, 2033), 'numpy.array', 'np.array', (["grasp['bbox']"], {}), "(grasp['bbox'])\n", (2018, 2033), True, 'import numpy as np\n'), ((2035, 2055), 'numpy.array', 'np.array', (["grasp['z']"], {}), "(grasp['z'])\n", (2043, 2055), True, 'import numpy as np\n'), ((2057, 2080), 'numpy.array', 'np.array', (["grasp['tilt']"], {}), "(grasp['tilt'])\n", (2065, 2080), True, 'import numpy as np\n'), ((2082, 2107), 'numpy.array', 'np.array', (["grasp['metric']"], {}), "(grasp['metric'])\n", (2090, 2107), True, 'import numpy as np\n'), ((2245, 2271), 'numpy.vstack', 'np.vstack', (['(gt_boxes, box)'], {}), '((gt_boxes, box))\n', (2254, 2271), True, 'import numpy as np\n'), ((2295, 2322), 'numpy.hstack', 'np.hstack', (['(gt_tilts, tilt)'], {}), '((gt_tilts, tilt))\n', (2304, 2322), True, 'import numpy as np\n'), ((2342, 2362), 'numpy.hstack', 'np.hstack', (['(gt_z, z)'], {}), '((gt_z, z))\n', (2351, 2362), True, 'import numpy as np\n'), ((2387, 2417), 'numpy.hstack', 'np.hstack', (['(gt_metric, metric)'], {}), '((gt_metric, metric))\n', (2396, 2417), True, 'import numpy as np\n'), ((3905, 3942), 'data.cgrcnn_dataset_as_torch_loader.cgrcnn_dataset_torch', 'cgrcnn_dataset_torch', (['root'], {'mode': 'mode'}), '(root, mode=mode)\n', (3925, 3942), False, 'from data.cgrcnn_dataset_as_torch_loader import cgrcnn_dataset_torch\n'), ((3966, 4068), 'torch.utils.data.RandomSampler', 'torch.utils.data.RandomSampler', (['test_dataset'], {'replacement': '(False)', 'num_samples': 'None', 'generator': 'None'}), '(test_dataset, replacement=False, num_samples\n =None, generator=None)\n', (3996, 4068), False, 'import torch\n'), ((4085, 4244), 'detectron2.data.build.build_batch_data_loader', 'build_batch_data_loader', ([], {'dataset': 'test_dataset', 'sampler': 'test_sampler', 'total_batch_size': 'batch_size', 'aspect_ratio_grouping': '(False)', 'num_workers': 'num_workers'}), '(dataset=test_dataset, sampler=test_sampler,\n total_batch_size=batch_size, aspect_ratio_grouping=False, num_workers=\n num_workers)\n', (4108, 4244), False, 'from detectron2.data.build import build_detection_train_loader, build_batch_data_loader\n'), ((3146, 3179), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (["('grasp_' + d)"], {}), "('grasp_' + d)\n", (3165, 3179), False, 'from detectron2.data import MetadataCatalog, DatasetCatalog\n'), ((892, 920), 'numpy.load', 'np.load', (["record['file_name']"], {}), "(record['file_name'])\n", (899, 920), True, 'import numpy as np\n')] |
import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import model
import simulation
import complexity
import rateless
import plot
import stats
from math import log2
from evaluation.binsearch import SampleEvaluator
from evaluation import analytic
from solvers.heuristicsolver import HeuristicSolver
from functools import partial
# pyplot setup
plt.style.use('ggplot')
plt.rc('pgf', texsystem='pdflatex')
plt.rc('text', usetex=True)
plt.rcParams['text.latex.preamble'] = [r'\usepackage{lmodern}']
plt.rcParams['figure.figsize'] = (6, 6)
plt.rcParams['figure.dpi'] = 200
def get_parameters_size_10():
'''Get a list of parameters for the size plot.'''
rows_per_server = 2000
rows_per_partition = 10
code_rate = 2/3
muq = 2
num_columns = None
num_outputs_factor = 10
parameters = list()
num_servers = [5, 8, 20, 50, 80, 125, 200]
for servers in num_servers:
par = model.SystemParameters.fixed_complexity_parameters(
rows_per_server=rows_per_server,
rows_per_partition=rows_per_partition,
min_num_servers=servers,
code_rate=code_rate,
muq=muq,
num_columns=num_columns,
num_outputs_factor=num_outputs_factor
)
parameters.append(par)
return parameters
def get_parameters_size_20():
'''Get a list of parameters for the size plot.'''
rows_per_server = 2000
rows_per_partition = 20
code_rate = 2/3
muq = 2
num_columns = None
num_outputs_factor = 10
parameters = list()
num_servers = [5, 8, 20, 50, 80, 125, 200]
for servers in num_servers:
par = model.SystemParameters.fixed_complexity_parameters(
rows_per_server=rows_per_server,
rows_per_partition=rows_per_partition,
min_num_servers=servers,
code_rate=code_rate,
muq=muq,
num_columns=num_columns,
num_outputs_factor=num_outputs_factor
)
parameters.append(par)
return parameters
def r10_pdf(overhead_levels, num_inputs=None, **kwargs):
'''approximate completion PDF for R10 codes'''
assert num_inputs is not None
overhead_levels = np.fromiter(overhead_levels, dtype=float)
overhead_levels.sort()
if overhead_levels[1]-1 >= 0:
overhead_levels -= 1
pdf = np.zeros(len(overhead_levels))
# evaluate the CDF at discrete points and extrapolate between them
pf = [1e-1, 1e-2, 1e-3]
a = [5.43476844e-03, 9.24066372e-03, 1.36168053e-02]
# average of 1e-1 and 1e-2 values since the distance to the measured pf is
# much smaller for these points.
b = (8.09230267e-06 + 8.01977332e-06) / 2
f = lambda t,a,b: a+b*t
eps = np.finfo(float).eps
# compute the overhead required for a failure probability of 1e-1, 1e-2,
# 1e-3, and close to 0.
y1 = 1-1e-1
y2 = 1-1e-2
y3 = 1-1e-3
y4 = 1
x1 = f(num_inputs, a[0], b) # 1e-1
x2 = f(num_inputs, a[1], b) # 1e-2
x3 = f(num_inputs, a[2], b) # 1e-3
x4 = 2*x3 # assume failure probability 0 here
# find the break points
i1 = np.searchsorted(overhead_levels, x1)
i2 = np.searchsorted(overhead_levels, x2)
i3 = np.searchsorted(overhead_levels, x3)
i4 = np.searchsorted(overhead_levels, x4)
# assign the derivative of the cdf
pdf[i1-1] = y1
pdf[i1:i2] = (y2-y1) / (i2-i1)
pdf[i2:i3] = (y3-y2) / (i3-i2)
pdf[i3:i4] = (y4-y3) / (i4-i3)
pdf[i4:] = 0
print('pdf', pdf)
assert np.allclose(pdf.sum(), 1), "sum of pdf should be 1, but is {}".format(pdf.sum())
return pdf
def rq_pdf(overhead_levels, num_inputs=None, **kwargs):
'''approximate completion PDF for RQ codes'''
assert num_inputs is not None
overhead_levels = np.fromiter(overhead_levels, dtype=float)
overhead_levels.sort()
if overhead_levels[1]-1 >= 0:
overhead_levels -= 1
cdf = np.zeros(len(overhead_levels))
cdf[overhead_levels >= 0] = 1-1/100
cdf[overhead_levels >= 1/num_inputs] = 1-1/1e4
cdf[overhead_levels >= 2/num_inputs] = 1-1/1e6
cdf[overhead_levels >= 3/num_inputs] = 1 # assume zero failure probability here
pdf = np.zeros(len(overhead_levels))
pdf[1:] = np.diff(cdf)
pdf[0] = cdf[0]
assert np.allclose(pdf.sum(), 1), "sum of pdf should be 1, but is {}".format(pdf.sum())
return pdf
def R10_decoding_complexity(parameters):
assert isinstance(parameters, model.SystemParameters)
# linear regression of complexity at failure probability 1e-1
a = 2.22413913e+02
b = 3.64861777e-02
# complexity as fp=1e-1 as a function of num_inputs
f = lambda t,a,b: a+b*t
return f(parameters.num_source_rows, a, b)
def RQ_decoding_complexity(parameters):
assert isinstance(parameters, model.SystemParameters)
# TODO: fix
# linear regression of complexity at failure probability 1e-1
a = 2.22413913e+02
b = 3.64861777e-02
# complexity as fp=1e-1 as a function of num_inputs
f = lambda t,a,b: a+b*t
return f(parameters.num_source_rows, a, b)
def rateless_evaluate(parameters, code='R10', pdf_fun=None, cachedir=None, partitioned=False):
'''evaluate LT code performance.
args:
parameters: system parameters.
returns: dict with performance results.
'''
assert isinstance(parameters, model.SystemParameters)
assert code in ['R10', 'RQ']
result = dict()
# we encode each column of the input matrix separately
if code == 'R10':
result['encoding_multiplications'] = R10_encoding_complexity(parameters)
elif code == 'RQ':
result['encoding_multiplications'] = RQ_encoding_complexity(parameters)
result['encoding_multiplications'] *= parameters.num_columns
# we decode each output vector separately
if code == 'R10':
result['decoding_multiplications'] = R10_decoding_complexity(parameters)
elif code == 'RQ':
result['decoding_multiplications'] = RQ_decoding_complexity(parameters)
result['decoding_multiplications'] *= parameters.num_outputs
# each coded row is encoded by server_storage * q = muq servers.
result['encoding_multiplications'] *= parameters.muq
# compute encoding delay
result['encode'] = stats.order_mean_shiftexp(
parameters.num_servers,
parameters.num_servers,
parameter=result['encoding_multiplications'] / parameters.num_servers,
)
# compute decoding delay
result['reduce'] = stats.order_mean_shiftexp(
parameters.q,
parameters.q,
parameter=result['decoding_multiplications'] / parameters.q,
)
# simulate the map phase load/delay. this simulation takes into account the
# probability of decoding at various levels of overhead.
simulated = rateless.performance_integral(
parameters=parameters,
num_inputs=parameters.num_source_rows,
target_overhead=1,
mode=0,
delta=0,
pdf_fun=pdf_fun,
max_overhead=1.1,
cachedir=cachedir,
)
result['delay'] = simulated['delay']
result['load'] = simulated['load']
return result
# Setup the evaluators
sample_100 = SampleEvaluator(num_samples=100)
sample_1000 = SampleEvaluator(num_samples=1000)
# evaluator functions
uncoded_fun = partial(
simulation.simulate,
directory='./results/Uncoded/',
samples=1,
parameter_eval=analytic.uncoded_performance,
)
heuristic_fun = partial(
simulation.simulate,
directory='./results/Heuristic/',
samples=1,
solver=HeuristicSolver(),
assignment_eval=sample_1000,
)
lt_fun = partial(
simulation.simulate,
directory='./results/LT_3_1/',
samples=1,
parameter_eval=partial(
rateless.evaluate,
target_overhead=1.3,
target_failure_probability=1e-1,
),
)
r10_fun = partial(
simulation.simulate,
directory='./results/R10/',
samples=1,
parameter_eval=partial(
rateless_evaluate,
code='R10',
pdf_fun=r10_pdf,
cachedir='./results/R10',
),
)
rq_fun = partial(
simulation.simulate,
directory='./results/RQ/',
samples=1,
parameter_eval=partial(
rateless_evaluate,
code='RQ',
pdf_fun=rq_pdf,
),
)
rs_fun = partial(
simulation.simulate,
directory='./results/RS/',
samples=1,
parameter_eval=analytic.mds_performance,
)
heuristic_plot_settings = {
'label': r'BDC',
'color': 'r',
'marker': 'd-'}
heuristic_fft_plot_settings = {
'label': r'BDC FFT',
'color': 'g',
'marker': 's-'}
r10_plot_settings = {
'label': r'R10',
'color': 'g',
'marker': 's-',
'linewidth': 4,
'size': 2}
rq_plot_settings = {
'label': r'RQ',
'color': 'b',
'marker': 'd-',
'linewidth': 4,
'size': 2}
lt_plot_settings = {
'label': r'LT',
'color': 'c',
'marker': 'v-',
'linewidth': 4,
'size': 2}
rs_plot_settings = {
'label': r'RS BM',
'color': 'k',
'marker': 'v-'}
rs_fft_plot_settings = {
'label': r'RS FFT',
'color': 'k',
'marker': '-o'}
def complexity_from_df(df):
'''assuming GF256 source symbols'''
# each binary operations means adding two GF256 symbols
df['complexity'] = 8*df['b']
# each GF256-addition means multiplying by a GF256-symbols and adding then
# adding.
df['complexity'] += (8*log2(8)+8)*df['f']
return df
def R10_encoding_complexity(p, partitioned=True):
'''return the encoding complexity per source matrix column'''
assert isinstance(p, model.SystemParameters)
df = pd.read_csv("./R10.csv")
complexity_from_df(df)
K = p.num_source_rows
if partitioned:
K /= p.rows_per_batch
i = df['K'].searchsorted(K)
if i >= len(df):
return np.inf
K1, K2 = df['K'].values[i-1], df['K'].values[i]
c1, c2 = df['complexity'].values[i-1], df['complexity'].values[i]
precode = ((K-K1)*c1 + (K2-K)*c2) / (K2-K1)
if partitioned:
precode *= p.rows_per_batch
outer = p.num_coded_rows * 4.631353378295898 * 8
return float(precode + outer)
def RQ_encoding_complexity(p, partitioned=True):
'''return the encoding complexity per source matrix column'''
assert isinstance(p, model.SystemParameters)
df = pd.read_csv("./RQ.csv")
complexity_from_df(df)
K = p.num_source_rows
if partitioned:
K /= p.rows_per_batch
i = df['K'].searchsorted(K)
if i >= len(df):
return np.inf
K1, K2 = df['K'].values[i-1], df['K'].values[i]
c1, c2 = df['complexity'].values[i-1], df['complexity'].values[i]
precode = ((K-K1)*c1 + (K2-K)*c2) / (K2-K1)
if partitioned:
precode *= p.rows_per_batch
outer = p.num_coded_rows * 7.152566 * 8
return float(precode + outer)
def precode_complexity_plot():
R10 = pd.read_csv("./R10.csv")
complexity_from_df(R10)
RQ = pd.read_csv("./RQ.csv")
complexity_from_df(RQ)
print(R10)
print(RQ)
plt.figure()
plt.semilogx(R10['K'], R10['complexity'] / R10['K'], label='R10')
plt.semilogx(RQ['K'], RQ['complexity'] / RQ['K'], label='RQ')
plt.title("Raptor Precode Complexity")
plt.xlabel('Source Symbols')
plt.ylabel('Complexity Per Source Symbol')
plt.tight_layout()
plt.xlim(0, 1e5)
plt.ylim(0, 600)
plt.savefig('./plots/180419/precode_complexity.png', dpi='figure')
plt.show()
return
def encoding_complexity_plot():
parameters = get_parameters_size_10()
x = [p.num_source_rows for p in parameters]
plt.figure()
R10 = [R10_encoding_complexity(p) for p in parameters]
RQ = [RQ_encoding_complexity(p) for p in parameters]
plt.semilogy(x, R10, label='R10 Partitioned')
plt.semilogy(x, RQ, label='RQ Partitioned')
R10 = [R10_encoding_complexity(p, partitioned=False) for p in parameters]
RQ = [RQ_encoding_complexity(p, partitioned=False) for p in parameters]
plt.semilogy(x, R10, label='R10')
plt.semilogy(x, RQ, label='RQ')
plt.title("Raptor Encoding Complexity")
plt.xlabel('Source Symbols')
plt.ylabel('Complexity')
plt.legend()
plt.tight_layout()
plt.xlim(0, 140000)
plt.ylim(1e5, 1e10)
plt.savefig('./plots/180419/encode_complexity.png', dpi='figure')
plt.show()
def partitioning_plot():
# num_inputs = [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000]
# overhead_levels = np.linspace(0, 0.1)
# plt.figure()
# for K in num_inputs:
# plt.plot(
# overhead_levels,
# r10_pdf(overhead_levels, num_inputs=K),
# label="R10 $K={}$".format(K),
# )
# plt.plot(
# overhead_levels,
# RQ_pdf(overhead_levels, num_inputs=K),
# label="RQ $K={}$".format(K),
# )
# plt.title("R10 Completion PDF")
# plt.xlabel("Relative Overhead")
# plt.ylabel("Probability")
# plt.xlim((0, 0.1))
# plt.ylim((0, 1))
# plt.legend()
# plt.tight_layout()
# plt.show()
# return
parameters = plot.get_parameters_partitioning()
uncoded = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=uncoded_fun,
map_complexity_fun=complexity.map_complexity_uncoded,
encode_delay_fun=lambda x: 0,
reduce_delay_fun=lambda x: 0,
)
heuristic = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=heuristic_fun,
map_complexity_fun=complexity.map_complexity_unified,
encode_delay_fun=complexity.partitioned_encode_delay,
reduce_delay_fun=complexity.partitioned_reduce_delay,
)
r10 = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=r10_fun,
map_complexity_fun=complexity.map_complexity_unified,
encode_delay_fun=False,
reduce_delay_fun=False,
)
rq = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=rq_fun,
map_complexity_fun=complexity.map_complexity_unified,
encode_delay_fun=False,
reduce_delay_fun=False,
)
plot.load_delay_plot(
[heuristic,
r10,
rq],
[heuristic_plot_settings,
r10_plot_settings,
rq_plot_settings],
'num_partitions',
xlabel=r'Partitions $T$',
normalize=uncoded,
show=False,
xlim_bot=(10, 3000),
ylim_top=(0.51, 0.58),
ylim_bot=(0, 200),
)
plt.savefig("./plots/180419/load_delay.png", dpi='figure')
plot.encode_decode_plot(
[heuristic,
r10,
rq],
[heuristic_plot_settings,
r10_plot_settings,
rq_plot_settings],
'num_partitions',
xlabel=r'Partitions $T$',
normalize=None,
show=False,
xlim_bot=(2, 3000),
ylim_top=(0.2, 1),
ylim_mid=(0, 0.00035),
ylim_bot=(0, 0.8),
)
# plt.savefig("./plots/180328/phases.png", dpi='figure')
plt.show()
def size_plot():
parameters = get_parameters_size_20()[:-1]
# parameters = plot.get_parameters_partitioning_2()
uncoded = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=uncoded_fun,
map_complexity_fun=complexity.map_complexity_uncoded,
encode_delay_fun=lambda x: 0,
reduce_delay_fun=lambda x: 0,
)
lt = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=lt_fun,
map_complexity_fun=complexity.map_complexity_unified,
encode_delay_fun=False,
reduce_delay_fun=False,
)
r10 = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=r10_fun,
map_complexity_fun=complexity.map_complexity_unified,
encode_delay_fun=False,
reduce_delay_fun=False,
)
rq = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=rq_fun,
map_complexity_fun=complexity.map_complexity_unified,
encode_delay_fun=False,
reduce_delay_fun=False,
)
parameters = get_parameters_size_10()[:-1]
parameters[-2:] = get_parameters_size_20()[-3:-1]
heuristic = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=heuristic_fun,
map_complexity_fun=complexity.map_complexity_unified,
encode_delay_fun=complexity.partitioned_encode_delay,
reduce_delay_fun=complexity.partitioned_reduce_delay,
)
plot.load_delay_plot(
[heuristic,
lt,
r10,
rq],
[heuristic_plot_settings,
lt_plot_settings,
r10_plot_settings,
rq_plot_settings],
'num_servers',
xlabel=r'Servers $K$',
normalize=uncoded,
show=False,
xlim_bot=(6, 201),
ylim_top=(0.4, 1),
ylim_bot=(0.8, 2.4),
)
# plt.savefig("./plots/180309/load_delay.png")
plot.encode_decode_plot(
[heuristic,
lt,
r10,
rq],
[heuristic_plot_settings,
lt_plot_settings,
r10_plot_settings,
rq_plot_settings],
'num_servers',
xlabel=r'Servers $K$',
normalize=None,
show=False,
xlim_bot=(6, 201),
ylim_top=(0, 0.3),
ylim_bot=(0, 0.001),
)
# plt.savefig("./plots/180309/encode_decode.png")
plt.show()
return
def rs_plot():
parameters = get_parameters_size_20()
[print(p) for p in parameters]
uncoded = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=uncoded_fun,
map_complexity_fun=complexity.map_complexity_uncoded,
encode_delay_fun=lambda x: 0,
reduce_delay_fun=lambda x: 0,
)
rs = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=rs_fun,
map_complexity_fun=complexity.map_complexity_unified,
encode_delay_fun=partial(
complexity.partitioned_encode_delay,
partitions=1,
algorithm='bm',
),
reduce_delay_fun=partial(
complexity.partitioned_reduce_delay,
partitions=1,
algorithm='bm',
),
)
rs_fft = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=rs_fun,
map_complexity_fun=complexity.map_complexity_unified,
encode_delay_fun=partial(
complexity.partitioned_encode_delay,
partitions=1
),
reduce_delay_fun=partial(
complexity.partitioned_reduce_delay,
partitions=1,
),
)
lt = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=lt_fun,
map_complexity_fun=complexity.map_complexity_unified,
encode_delay_fun=False,
reduce_delay_fun=False,
)
parameters = get_parameters_size_10()
parameters[-3:] = get_parameters_size_20()[-3:]
heuristic = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=heuristic_fun,
map_complexity_fun=complexity.map_complexity_unified,
encode_delay_fun=partial(
complexity.partitioned_encode_delay,
algorithm='bm',
),
reduce_delay_fun=partial(
complexity.partitioned_reduce_delay,
algorithm='bm',
),
)
heuristic_fft = simulation.simulate_parameter_list(
parameter_list=parameters,
simulate_fun=heuristic_fun,
map_complexity_fun=complexity.map_complexity_unified,
encode_delay_fun=complexity.partitioned_encode_delay,
reduce_delay_fun=complexity.partitioned_reduce_delay,
)
plot.load_delay_plot(
[heuristic,
heuristic_fft,
# rs,
rs_fft,
lt],
[heuristic_plot_settings,
heuristic_fft_plot_settings,
# rs_plot_settings,
rs_fft_plot_settings,
lt_plot_settings],
'num_servers',
xlabel=r'Servers $K$',
normalize=uncoded,
show=False,
xlim_bot=(6, 201),
ylim_top=(0.4, 0.7),
ylim_bot=(0.5, 4.5),
)
plt.savefig("./plots/180419/fft_8_load_delay.png", dpi='figure')
plot.encode_decode_plot(
[heuristic,
rs,
rs_fft],
[heuristic_plot_settings,
rs_plot_settings,
rs_fft_plot_settings],
'num_servers',
xlabel=r'Servers $K$',
normalize=None,
show=False,
# xlim_bot=(6, 201),
# ylim_top=(0, 0.3),
# ylim_bot=(0, 0.001),
)
plt.show()
return
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
# partitioning_plot()
# size_plot()
rs_plot()
# encoding_complexity_plot()
# parameters = get_parameters_size_10()
# precode_complexity_plot()
# c = RQ_encoding_complexity(parameters[2])
# print(c)
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"math.log2",
"matplotlib.pyplot.semilogy",
"numpy.fromiter",
"plot.encode_decode_plot",
"numpy.searchsorted",
"matplotlib.pyplot.xlabel",
"stats.order_mean_shiftexp",
"matplotlib.pyplot.style.use",
"numpy.diff",
"simulation.simulate_parameter_list... | [((378, 401), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (391, 401), True, 'import matplotlib.pyplot as plt\n'), ((402, 437), 'matplotlib.pyplot.rc', 'plt.rc', (['"""pgf"""'], {'texsystem': '"""pdflatex"""'}), "('pgf', texsystem='pdflatex')\n", (408, 437), True, 'import matplotlib.pyplot as plt\n'), ((439, 466), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (445, 466), True, 'import matplotlib.pyplot as plt\n'), ((7191, 7223), 'evaluation.binsearch.SampleEvaluator', 'SampleEvaluator', ([], {'num_samples': '(100)'}), '(num_samples=100)\n', (7206, 7223), False, 'from evaluation.binsearch import SampleEvaluator\n'), ((7238, 7271), 'evaluation.binsearch.SampleEvaluator', 'SampleEvaluator', ([], {'num_samples': '(1000)'}), '(num_samples=1000)\n', (7253, 7271), False, 'from evaluation.binsearch import SampleEvaluator\n'), ((7309, 7429), 'functools.partial', 'partial', (['simulation.simulate'], {'directory': '"""./results/Uncoded/"""', 'samples': '(1)', 'parameter_eval': 'analytic.uncoded_performance'}), "(simulation.simulate, directory='./results/Uncoded/', samples=1,\n parameter_eval=analytic.uncoded_performance)\n", (7316, 7429), False, 'from functools import partial\n'), ((8279, 8390), 'functools.partial', 'partial', (['simulation.simulate'], {'directory': '"""./results/RS/"""', 'samples': '(1)', 'parameter_eval': 'analytic.mds_performance'}), "(simulation.simulate, directory='./results/RS/', samples=1,\n parameter_eval=analytic.mds_performance)\n", (8286, 8390), False, 'from functools import partial\n'), ((2228, 2269), 'numpy.fromiter', 'np.fromiter', (['overhead_levels'], {'dtype': 'float'}), '(overhead_levels, dtype=float)\n', (2239, 2269), True, 'import numpy as np\n'), ((3150, 3186), 'numpy.searchsorted', 'np.searchsorted', (['overhead_levels', 'x1'], {}), '(overhead_levels, x1)\n', (3165, 3186), True, 'import numpy as np\n'), ((3196, 3232), 'numpy.searchsorted', 'np.searchsorted', (['overhead_levels', 'x2'], {}), '(overhead_levels, x2)\n', (3211, 3232), True, 'import numpy as np\n'), ((3242, 3278), 'numpy.searchsorted', 'np.searchsorted', (['overhead_levels', 'x3'], {}), '(overhead_levels, x3)\n', (3257, 3278), True, 'import numpy as np\n'), ((3288, 3324), 'numpy.searchsorted', 'np.searchsorted', (['overhead_levels', 'x4'], {}), '(overhead_levels, x4)\n', (3303, 3324), True, 'import numpy as np\n'), ((3798, 3839), 'numpy.fromiter', 'np.fromiter', (['overhead_levels'], {'dtype': 'float'}), '(overhead_levels, dtype=float)\n', (3809, 3839), True, 'import numpy as np\n'), ((4252, 4264), 'numpy.diff', 'np.diff', (['cdf'], {}), '(cdf)\n', (4259, 4264), True, 'import numpy as np\n'), ((6269, 6417), 'stats.order_mean_shiftexp', 'stats.order_mean_shiftexp', (['parameters.num_servers', 'parameters.num_servers'], {'parameter': "(result['encoding_multiplications'] / parameters.num_servers)"}), "(parameters.num_servers, parameters.num_servers,\n parameter=result['encoding_multiplications'] / parameters.num_servers)\n", (6294, 6417), False, 'import stats\n'), ((6498, 6617), 'stats.order_mean_shiftexp', 'stats.order_mean_shiftexp', (['parameters.q', 'parameters.q'], {'parameter': "(result['decoding_multiplications'] / parameters.q)"}), "(parameters.q, parameters.q, parameter=result[\n 'decoding_multiplications'] / parameters.q)\n", (6523, 6617), False, 'import stats\n'), ((6802, 6992), 'rateless.performance_integral', 'rateless.performance_integral', ([], {'parameters': 'parameters', 'num_inputs': 'parameters.num_source_rows', 'target_overhead': '(1)', 'mode': '(0)', 'delta': '(0)', 'pdf_fun': 'pdf_fun', 'max_overhead': '(1.1)', 'cachedir': 'cachedir'}), '(parameters=parameters, num_inputs=parameters.\n num_source_rows, target_overhead=1, mode=0, delta=0, pdf_fun=pdf_fun,\n max_overhead=1.1, cachedir=cachedir)\n', (6831, 6992), False, 'import rateless\n'), ((9594, 9618), 'pandas.read_csv', 'pd.read_csv', (['"""./R10.csv"""'], {}), "('./R10.csv')\n", (9605, 9618), True, 'import pandas as pd\n'), ((10284, 10307), 'pandas.read_csv', 'pd.read_csv', (['"""./RQ.csv"""'], {}), "('./RQ.csv')\n", (10295, 10307), True, 'import pandas as pd\n'), ((10832, 10856), 'pandas.read_csv', 'pd.read_csv', (['"""./R10.csv"""'], {}), "('./R10.csv')\n", (10843, 10856), True, 'import pandas as pd\n'), ((10894, 10917), 'pandas.read_csv', 'pd.read_csv', (['"""./RQ.csv"""'], {}), "('./RQ.csv')\n", (10905, 10917), True, 'import pandas as pd\n'), ((10979, 10991), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10989, 10991), True, 'import matplotlib.pyplot as plt\n'), ((10996, 11061), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (["R10['K']", "(R10['complexity'] / R10['K'])"], {'label': '"""R10"""'}), "(R10['K'], R10['complexity'] / R10['K'], label='R10')\n", (11008, 11061), True, 'import matplotlib.pyplot as plt\n'), ((11066, 11127), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (["RQ['K']", "(RQ['complexity'] / RQ['K'])"], {'label': '"""RQ"""'}), "(RQ['K'], RQ['complexity'] / RQ['K'], label='RQ')\n", (11078, 11127), True, 'import matplotlib.pyplot as plt\n'), ((11132, 11170), 'matplotlib.pyplot.title', 'plt.title', (['"""Raptor Precode Complexity"""'], {}), "('Raptor Precode Complexity')\n", (11141, 11170), True, 'import matplotlib.pyplot as plt\n'), ((11175, 11203), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Source Symbols"""'], {}), "('Source Symbols')\n", (11185, 11203), True, 'import matplotlib.pyplot as plt\n'), ((11208, 11250), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Complexity Per Source Symbol"""'], {}), "('Complexity Per Source Symbol')\n", (11218, 11250), True, 'import matplotlib.pyplot as plt\n'), ((11255, 11273), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11271, 11273), True, 'import matplotlib.pyplot as plt\n'), ((11278, 11299), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(100000.0)'], {}), '(0, 100000.0)\n', (11286, 11299), True, 'import matplotlib.pyplot as plt\n'), ((11299, 11315), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(600)'], {}), '(0, 600)\n', (11307, 11315), True, 'import matplotlib.pyplot as plt\n'), ((11320, 11386), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./plots/180419/precode_complexity.png"""'], {'dpi': '"""figure"""'}), "('./plots/180419/precode_complexity.png', dpi='figure')\n", (11331, 11386), True, 'import matplotlib.pyplot as plt\n'), ((11391, 11401), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11399, 11401), True, 'import matplotlib.pyplot as plt\n'), ((11540, 11552), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11550, 11552), True, 'import matplotlib.pyplot as plt\n'), ((11674, 11719), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['x', 'R10'], {'label': '"""R10 Partitioned"""'}), "(x, R10, label='R10 Partitioned')\n", (11686, 11719), True, 'import matplotlib.pyplot as plt\n'), ((11724, 11767), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['x', 'RQ'], {'label': '"""RQ Partitioned"""'}), "(x, RQ, label='RQ Partitioned')\n", (11736, 11767), True, 'import matplotlib.pyplot as plt\n'), ((11927, 11960), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['x', 'R10'], {'label': '"""R10"""'}), "(x, R10, label='R10')\n", (11939, 11960), True, 'import matplotlib.pyplot as plt\n'), ((11965, 11996), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['x', 'RQ'], {'label': '"""RQ"""'}), "(x, RQ, label='RQ')\n", (11977, 11996), True, 'import matplotlib.pyplot as plt\n'), ((12002, 12041), 'matplotlib.pyplot.title', 'plt.title', (['"""Raptor Encoding Complexity"""'], {}), "('Raptor Encoding Complexity')\n", (12011, 12041), True, 'import matplotlib.pyplot as plt\n'), ((12046, 12074), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Source Symbols"""'], {}), "('Source Symbols')\n", (12056, 12074), True, 'import matplotlib.pyplot as plt\n'), ((12079, 12103), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Complexity"""'], {}), "('Complexity')\n", (12089, 12103), True, 'import matplotlib.pyplot as plt\n'), ((12108, 12120), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12118, 12120), True, 'import matplotlib.pyplot as plt\n'), ((12125, 12143), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12141, 12143), True, 'import matplotlib.pyplot as plt\n'), ((12148, 12167), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(140000)'], {}), '(0, 140000)\n', (12156, 12167), True, 'import matplotlib.pyplot as plt\n'), ((12172, 12205), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(100000.0)', '(10000000000.0)'], {}), '(100000.0, 10000000000.0)\n', (12180, 12205), True, 'import matplotlib.pyplot as plt\n'), ((12196, 12261), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./plots/180419/encode_complexity.png"""'], {'dpi': '"""figure"""'}), "('./plots/180419/encode_complexity.png', dpi='figure')\n", (12207, 12261), True, 'import matplotlib.pyplot as plt\n'), ((12266, 12276), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12274, 12276), True, 'import matplotlib.pyplot as plt\n'), ((13029, 13063), 'plot.get_parameters_partitioning', 'plot.get_parameters_partitioning', ([], {}), '()\n', (13061, 13063), False, 'import plot\n'), ((13078, 13288), 'simulation.simulate_parameter_list', 'simulation.simulate_parameter_list', ([], {'parameter_list': 'parameters', 'simulate_fun': 'uncoded_fun', 'map_complexity_fun': 'complexity.map_complexity_uncoded', 'encode_delay_fun': '(lambda x: 0)', 'reduce_delay_fun': '(lambda x: 0)'}), '(parameter_list=parameters, simulate_fun=\n uncoded_fun, map_complexity_fun=complexity.map_complexity_uncoded,\n encode_delay_fun=lambda x: 0, reduce_delay_fun=lambda x: 0)\n', (13112, 13288), False, 'import simulation\n'), ((13343, 13608), 'simulation.simulate_parameter_list', 'simulation.simulate_parameter_list', ([], {'parameter_list': 'parameters', 'simulate_fun': 'heuristic_fun', 'map_complexity_fun': 'complexity.map_complexity_unified', 'encode_delay_fun': 'complexity.partitioned_encode_delay', 'reduce_delay_fun': 'complexity.partitioned_reduce_delay'}), '(parameter_list=parameters, simulate_fun=\n heuristic_fun, map_complexity_fun=complexity.map_complexity_unified,\n encode_delay_fun=complexity.partitioned_encode_delay, reduce_delay_fun=\n complexity.partitioned_reduce_delay)\n', (13377, 13608), False, 'import simulation\n'), ((13652, 13846), 'simulation.simulate_parameter_list', 'simulation.simulate_parameter_list', ([], {'parameter_list': 'parameters', 'simulate_fun': 'r10_fun', 'map_complexity_fun': 'complexity.map_complexity_unified', 'encode_delay_fun': '(False)', 'reduce_delay_fun': '(False)'}), '(parameter_list=parameters, simulate_fun=\n r10_fun, map_complexity_fun=complexity.map_complexity_unified,\n encode_delay_fun=False, reduce_delay_fun=False)\n', (13686, 13846), False, 'import simulation\n'), ((13894, 14087), 'simulation.simulate_parameter_list', 'simulation.simulate_parameter_list', ([], {'parameter_list': 'parameters', 'simulate_fun': 'rq_fun', 'map_complexity_fun': 'complexity.map_complexity_unified', 'encode_delay_fun': '(False)', 'reduce_delay_fun': '(False)'}), '(parameter_list=parameters, simulate_fun=\n rq_fun, map_complexity_fun=complexity.map_complexity_unified,\n encode_delay_fun=False, reduce_delay_fun=False)\n', (13928, 14087), False, 'import simulation\n'), ((14131, 14387), 'plot.load_delay_plot', 'plot.load_delay_plot', (['[heuristic, r10, rq]', '[heuristic_plot_settings, r10_plot_settings, rq_plot_settings]', '"""num_partitions"""'], {'xlabel': '"""Partitions $T$"""', 'normalize': 'uncoded', 'show': '(False)', 'xlim_bot': '(10, 3000)', 'ylim_top': '(0.51, 0.58)', 'ylim_bot': '(0, 200)'}), "([heuristic, r10, rq], [heuristic_plot_settings,\n r10_plot_settings, rq_plot_settings], 'num_partitions', xlabel=\n 'Partitions $T$', normalize=uncoded, show=False, xlim_bot=(10, 3000),\n ylim_top=(0.51, 0.58), ylim_bot=(0, 200))\n", (14151, 14387), False, 'import plot\n'), ((14495, 14553), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./plots/180419/load_delay.png"""'], {'dpi': '"""figure"""'}), "('./plots/180419/load_delay.png', dpi='figure')\n", (14506, 14553), True, 'import matplotlib.pyplot as plt\n'), ((14559, 14833), 'plot.encode_decode_plot', 'plot.encode_decode_plot', (['[heuristic, r10, rq]', '[heuristic_plot_settings, r10_plot_settings, rq_plot_settings]', '"""num_partitions"""'], {'xlabel': '"""Partitions $T$"""', 'normalize': 'None', 'show': '(False)', 'xlim_bot': '(2, 3000)', 'ylim_top': '(0.2, 1)', 'ylim_mid': '(0, 0.00035)', 'ylim_bot': '(0, 0.8)'}), "([heuristic, r10, rq], [heuristic_plot_settings,\n r10_plot_settings, rq_plot_settings], 'num_partitions', xlabel=\n 'Partitions $T$', normalize=None, show=False, xlim_bot=(2, 3000),\n ylim_top=(0.2, 1), ylim_mid=(0, 0.00035), ylim_bot=(0, 0.8))\n", (14582, 14833), False, 'import plot\n'), ((15010, 15020), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15018, 15020), True, 'import matplotlib.pyplot as plt\n'), ((15157, 15367), 'simulation.simulate_parameter_list', 'simulation.simulate_parameter_list', ([], {'parameter_list': 'parameters', 'simulate_fun': 'uncoded_fun', 'map_complexity_fun': 'complexity.map_complexity_uncoded', 'encode_delay_fun': '(lambda x: 0)', 'reduce_delay_fun': '(lambda x: 0)'}), '(parameter_list=parameters, simulate_fun=\n uncoded_fun, map_complexity_fun=complexity.map_complexity_uncoded,\n encode_delay_fun=lambda x: 0, reduce_delay_fun=lambda x: 0)\n', (15191, 15367), False, 'import simulation\n'), ((15415, 15608), 'simulation.simulate_parameter_list', 'simulation.simulate_parameter_list', ([], {'parameter_list': 'parameters', 'simulate_fun': 'lt_fun', 'map_complexity_fun': 'complexity.map_complexity_unified', 'encode_delay_fun': '(False)', 'reduce_delay_fun': '(False)'}), '(parameter_list=parameters, simulate_fun=\n lt_fun, map_complexity_fun=complexity.map_complexity_unified,\n encode_delay_fun=False, reduce_delay_fun=False)\n', (15449, 15608), False, 'import simulation\n'), ((15657, 15851), 'simulation.simulate_parameter_list', 'simulation.simulate_parameter_list', ([], {'parameter_list': 'parameters', 'simulate_fun': 'r10_fun', 'map_complexity_fun': 'complexity.map_complexity_unified', 'encode_delay_fun': '(False)', 'reduce_delay_fun': '(False)'}), '(parameter_list=parameters, simulate_fun=\n r10_fun, map_complexity_fun=complexity.map_complexity_unified,\n encode_delay_fun=False, reduce_delay_fun=False)\n', (15691, 15851), False, 'import simulation\n'), ((15899, 16092), 'simulation.simulate_parameter_list', 'simulation.simulate_parameter_list', ([], {'parameter_list': 'parameters', 'simulate_fun': 'rq_fun', 'map_complexity_fun': 'complexity.map_complexity_unified', 'encode_delay_fun': '(False)', 'reduce_delay_fun': '(False)'}), '(parameter_list=parameters, simulate_fun=\n rq_fun, map_complexity_fun=complexity.map_complexity_unified,\n encode_delay_fun=False, reduce_delay_fun=False)\n', (15933, 16092), False, 'import simulation\n'), ((16248, 16513), 'simulation.simulate_parameter_list', 'simulation.simulate_parameter_list', ([], {'parameter_list': 'parameters', 'simulate_fun': 'heuristic_fun', 'map_complexity_fun': 'complexity.map_complexity_unified', 'encode_delay_fun': 'complexity.partitioned_encode_delay', 'reduce_delay_fun': 'complexity.partitioned_reduce_delay'}), '(parameter_list=parameters, simulate_fun=\n heuristic_fun, map_complexity_fun=complexity.map_complexity_unified,\n encode_delay_fun=complexity.partitioned_encode_delay, reduce_delay_fun=\n complexity.partitioned_reduce_delay)\n', (16282, 16513), False, 'import simulation\n'), ((16552, 16819), 'plot.load_delay_plot', 'plot.load_delay_plot', (['[heuristic, lt, r10, rq]', '[heuristic_plot_settings, lt_plot_settings, r10_plot_settings, rq_plot_settings\n ]', '"""num_servers"""'], {'xlabel': '"""Servers $K$"""', 'normalize': 'uncoded', 'show': '(False)', 'xlim_bot': '(6, 201)', 'ylim_top': '(0.4, 1)', 'ylim_bot': '(0.8, 2.4)'}), "([heuristic, lt, r10, rq], [heuristic_plot_settings,\n lt_plot_settings, r10_plot_settings, rq_plot_settings], 'num_servers',\n xlabel='Servers $K$', normalize=uncoded, show=False, xlim_bot=(6, 201),\n ylim_top=(0.4, 1), ylim_bot=(0.8, 2.4))\n", (16572, 16819), False, 'import plot\n'), ((16998, 17265), 'plot.encode_decode_plot', 'plot.encode_decode_plot', (['[heuristic, lt, r10, rq]', '[heuristic_plot_settings, lt_plot_settings, r10_plot_settings, rq_plot_settings\n ]', '"""num_servers"""'], {'xlabel': '"""Servers $K$"""', 'normalize': 'None', 'show': '(False)', 'xlim_bot': '(6, 201)', 'ylim_top': '(0, 0.3)', 'ylim_bot': '(0, 0.001)'}), "([heuristic, lt, r10, rq], [heuristic_plot_settings,\n lt_plot_settings, r10_plot_settings, rq_plot_settings], 'num_servers',\n xlabel='Servers $K$', normalize=None, show=False, xlim_bot=(6, 201),\n ylim_top=(0, 0.3), ylim_bot=(0, 0.001))\n", (17021, 17265), False, 'import plot\n'), ((17447, 17457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17455, 17457), True, 'import matplotlib.pyplot as plt\n'), ((17577, 17787), 'simulation.simulate_parameter_list', 'simulation.simulate_parameter_list', ([], {'parameter_list': 'parameters', 'simulate_fun': 'uncoded_fun', 'map_complexity_fun': 'complexity.map_complexity_uncoded', 'encode_delay_fun': '(lambda x: 0)', 'reduce_delay_fun': '(lambda x: 0)'}), '(parameter_list=parameters, simulate_fun=\n uncoded_fun, map_complexity_fun=complexity.map_complexity_uncoded,\n encode_delay_fun=lambda x: 0, reduce_delay_fun=lambda x: 0)\n', (17611, 17787), False, 'import simulation\n'), ((18728, 18921), 'simulation.simulate_parameter_list', 'simulation.simulate_parameter_list', ([], {'parameter_list': 'parameters', 'simulate_fun': 'lt_fun', 'map_complexity_fun': 'complexity.map_complexity_unified', 'encode_delay_fun': '(False)', 'reduce_delay_fun': '(False)'}), '(parameter_list=parameters, simulate_fun=\n lt_fun, map_complexity_fun=complexity.map_complexity_unified,\n encode_delay_fun=False, reduce_delay_fun=False)\n', (18762, 18921), False, 'import simulation\n'), ((19509, 19774), 'simulation.simulate_parameter_list', 'simulation.simulate_parameter_list', ([], {'parameter_list': 'parameters', 'simulate_fun': 'heuristic_fun', 'map_complexity_fun': 'complexity.map_complexity_unified', 'encode_delay_fun': 'complexity.partitioned_encode_delay', 'reduce_delay_fun': 'complexity.partitioned_reduce_delay'}), '(parameter_list=parameters, simulate_fun=\n heuristic_fun, map_complexity_fun=complexity.map_complexity_unified,\n encode_delay_fun=complexity.partitioned_encode_delay, reduce_delay_fun=\n complexity.partitioned_reduce_delay)\n', (19543, 19774), False, 'import simulation\n'), ((19812, 20115), 'plot.load_delay_plot', 'plot.load_delay_plot', (['[heuristic, heuristic_fft, rs_fft, lt]', '[heuristic_plot_settings, heuristic_fft_plot_settings, rs_fft_plot_settings,\n lt_plot_settings]', '"""num_servers"""'], {'xlabel': '"""Servers $K$"""', 'normalize': 'uncoded', 'show': '(False)', 'xlim_bot': '(6, 201)', 'ylim_top': '(0.4, 0.7)', 'ylim_bot': '(0.5, 4.5)'}), "([heuristic, heuristic_fft, rs_fft, lt], [\n heuristic_plot_settings, heuristic_fft_plot_settings,\n rs_fft_plot_settings, lt_plot_settings], 'num_servers', xlabel=\n 'Servers $K$', normalize=uncoded, show=False, xlim_bot=(6, 201),\n ylim_top=(0.4, 0.7), ylim_bot=(0.5, 4.5))\n", (19832, 20115), False, 'import plot\n'), ((20280, 20344), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./plots/180419/fft_8_load_delay.png"""'], {'dpi': '"""figure"""'}), "('./plots/180419/fft_8_load_delay.png', dpi='figure')\n", (20291, 20344), True, 'import matplotlib.pyplot as plt\n'), ((20350, 20539), 'plot.encode_decode_plot', 'plot.encode_decode_plot', (['[heuristic, rs, rs_fft]', '[heuristic_plot_settings, rs_plot_settings, rs_fft_plot_settings]', '"""num_servers"""'], {'xlabel': '"""Servers $K$"""', 'normalize': 'None', 'show': '(False)'}), "([heuristic, rs, rs_fft], [heuristic_plot_settings,\n rs_plot_settings, rs_fft_plot_settings], 'num_servers', xlabel=\n 'Servers $K$', normalize=None, show=False)\n", (20373, 20539), False, 'import plot\n'), ((20716, 20726), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20724, 20726), True, 'import matplotlib.pyplot as plt\n'), ((20771, 20810), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (20790, 20810), False, 'import logging\n'), ((944, 1199), 'model.SystemParameters.fixed_complexity_parameters', 'model.SystemParameters.fixed_complexity_parameters', ([], {'rows_per_server': 'rows_per_server', 'rows_per_partition': 'rows_per_partition', 'min_num_servers': 'servers', 'code_rate': 'code_rate', 'muq': 'muq', 'num_columns': 'num_columns', 'num_outputs_factor': 'num_outputs_factor'}), '(rows_per_server=\n rows_per_server, rows_per_partition=rows_per_partition, min_num_servers\n =servers, code_rate=code_rate, muq=muq, num_columns=num_columns,\n num_outputs_factor=num_outputs_factor)\n', (994, 1199), False, 'import model\n'), ((1673, 1928), 'model.SystemParameters.fixed_complexity_parameters', 'model.SystemParameters.fixed_complexity_parameters', ([], {'rows_per_server': 'rows_per_server', 'rows_per_partition': 'rows_per_partition', 'min_num_servers': 'servers', 'code_rate': 'code_rate', 'muq': 'muq', 'num_columns': 'num_columns', 'num_outputs_factor': 'num_outputs_factor'}), '(rows_per_server=\n rows_per_server, rows_per_partition=rows_per_partition, min_num_servers\n =servers, code_rate=code_rate, muq=muq, num_columns=num_columns,\n num_outputs_factor=num_outputs_factor)\n', (1723, 1928), False, 'import model\n'), ((2760, 2775), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2768, 2775), True, 'import numpy as np\n'), ((7559, 7576), 'solvers.heuristicsolver.HeuristicSolver', 'HeuristicSolver', ([], {}), '()\n', (7574, 7576), False, 'from solvers.heuristicsolver import HeuristicSolver\n'), ((7725, 7804), 'functools.partial', 'partial', (['rateless.evaluate'], {'target_overhead': '(1.3)', 'target_failure_probability': '(0.1)'}), '(rateless.evaluate, target_overhead=1.3, target_failure_probability=0.1)\n', (7732, 7804), False, 'from functools import partial\n'), ((7950, 8036), 'functools.partial', 'partial', (['rateless_evaluate'], {'code': '"""R10"""', 'pdf_fun': 'r10_pdf', 'cachedir': '"""./results/R10"""'}), "(rateless_evaluate, code='R10', pdf_fun=r10_pdf, cachedir=\n './results/R10')\n", (7957, 8036), False, 'from functools import partial\n'), ((8182, 8235), 'functools.partial', 'partial', (['rateless_evaluate'], {'code': '"""RQ"""', 'pdf_fun': 'rq_pdf'}), "(rateless_evaluate, code='RQ', pdf_fun=rq_pdf)\n", (8189, 8235), False, 'from functools import partial\n'), ((18022, 18096), 'functools.partial', 'partial', (['complexity.partitioned_encode_delay'], {'partitions': '(1)', 'algorithm': '"""bm"""'}), "(complexity.partitioned_encode_delay, partitions=1, algorithm='bm')\n", (18029, 18096), False, 'from functools import partial\n'), ((18170, 18244), 'functools.partial', 'partial', (['complexity.partitioned_reduce_delay'], {'partitions': '(1)', 'algorithm': '"""bm"""'}), "(complexity.partitioned_reduce_delay, partitions=1, algorithm='bm')\n", (18177, 18244), False, 'from functools import partial\n'), ((18499, 18557), 'functools.partial', 'partial', (['complexity.partitioned_encode_delay'], {'partitions': '(1)'}), '(complexity.partitioned_encode_delay, partitions=1)\n', (18506, 18557), False, 'from functools import partial\n'), ((18618, 18676), 'functools.partial', 'partial', (['complexity.partitioned_reduce_delay'], {'partitions': '(1)'}), '(complexity.partitioned_reduce_delay, partitions=1)\n', (18625, 18676), False, 'from functools import partial\n'), ((19264, 19324), 'functools.partial', 'partial', (['complexity.partitioned_encode_delay'], {'algorithm': '"""bm"""'}), "(complexity.partitioned_encode_delay, algorithm='bm')\n", (19271, 19324), False, 'from functools import partial\n'), ((19386, 19446), 'functools.partial', 'partial', (['complexity.partitioned_reduce_delay'], {'algorithm': '"""bm"""'}), "(complexity.partitioned_reduce_delay, algorithm='bm')\n", (19393, 19446), False, 'from functools import partial\n'), ((9385, 9392), 'math.log2', 'log2', (['(8)'], {}), '(8)\n', (9389, 9392), False, 'from math import log2\n')] |
import numpy
import json
import os
import sys
import time
import sh_common
if len(sys.argv) != 2:
print("import_vgg7.py JSONPATH")
print(" i.e. import_vgg7.py /home/you/Documents/External/waifu2x/models/vgg_7/art/scale2.0x_model.json")
sys.exit(1)
try:
os.mkdir("model-kipper")
except:
pass
data_list = json.load(open(sys.argv[1], "rb"))
idx = 0
for i in range(7):
layer = data_list[i]
w = numpy.array(layer["weight"])
w.reshape((-1, 3, 3)).transpose((0, 2, 1))
b = numpy.array(layer["bias"])
sh_common.save_param("kipper", idx, w)
idx += 1
sh_common.save_param("kipper", idx, b)
idx += 1
| [
"numpy.array",
"sh_common.save_param",
"os.mkdir",
"sys.exit"
] | [((249, 260), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (257, 260), False, 'import sys\n'), ((271, 295), 'os.mkdir', 'os.mkdir', (['"""model-kipper"""'], {}), "('model-kipper')\n", (279, 295), False, 'import os\n'), ((422, 450), 'numpy.array', 'numpy.array', (["layer['weight']"], {}), "(layer['weight'])\n", (433, 450), False, 'import numpy\n'), ((506, 532), 'numpy.array', 'numpy.array', (["layer['bias']"], {}), "(layer['bias'])\n", (517, 532), False, 'import numpy\n'), ((537, 575), 'sh_common.save_param', 'sh_common.save_param', (['"""kipper"""', 'idx', 'w'], {}), "('kipper', idx, w)\n", (557, 575), False, 'import sh_common\n'), ((593, 631), 'sh_common.save_param', 'sh_common.save_param', (['"""kipper"""', 'idx', 'b'], {}), "('kipper', idx, b)\n", (613, 631), False, 'import sh_common\n')] |
# Import the standard modules
import sqlite3
import spiceypy
# Import the installed modules
import pandas as pd
import numpy as np
# Import matplotlib for plotting
from matplotlib import pyplot as plt
# Import scipy for the Kernel Density Estimator functionality
from scipy import stats
#%%
# Connect to the comet database. This database has been created in tutorial
# part 7, however, due to its small size the database is uploaded on GitHub
con = sqlite3.connect('../_databases/_comets/mpc_comets.db')
# Set a cursor
cur = con.cursor()
# Create a pandas dataframe that contains the name of the comet (needed later),
# the semi-major axis, inclination and eccentricity
# for P type ...
P_TYPE_DF = pd.read_sql('SELECT NAME, SEMI_MAJOR_AXIS_AU, INCLINATION_DEG, ' \
'ECCENTRICITY FROM comets_main WHERE ORBIT_TYPE="P"', \
con)
# ... and C type comets. For this type: set the eccentricity smaller 1 (bound
# orbits)
C_TYPE_DF = pd.read_sql('SELECT NAME, SEMI_MAJOR_AXIS_AU, INCLINATION_DEG, ' \
'ECCENTRICITY FROM comets_main WHERE ORBIT_TYPE="C" ' \
'AND ECCENTRICITY<1', con)
#%%
# The Tisserand parameter will help us to distinguish between Jupiter Family
# Comets (JFCs) and Non-JFCss more easily. For this parameter (next block) we
# need the semi-major axis of Jupiter
# Import a kernel meta file
spiceypy.furnsh('kernel_meta.txt')
# Set any Ephemeris time (ET)
SAMPLE_ET = spiceypy.utc2et('2000-001T00:00:00')
# Compute the state vector of Jupiter in ECLIPJ2000 (Jupiter (599) is not
# available in the kernel, we use the barycentre (5))
STATE_VEC_JUPITER, _ = spiceypy.spkgeo(targ=5, \
et=SAMPLE_ET, \
ref='ECLIPJ2000', \
obs=10)
# Get the G*M value of the Sun
_, GM_SUN_PRE = spiceypy.bodvcd(bodyid=10, item='GM', maxn=1)
GM_SUN = GM_SUN_PRE[0]
# Compute the orbital elements of Jupiter
ORB_ELEM_JUPITER = spiceypy.oscltx(STATE_VEC_JUPITER, SAMPLE_ET, GM_SUN)
# Get the semi-major axis value
A_JUPITER_KM = ORB_ELEM_JUPITER[-2]
# Convert the value from km to AU
A_JUPITER_AU = spiceypy.convrt(A_JUPITER_KM, 'km', 'AU')
#%%
# Define a lambda function for the Tisserand parameter, a, i and e are the
# input parameters semi-major axis, inclination and eccentricity, respectively
tisr_jup = lambda a, i, e: (A_JUPITER_AU / a) + 2 * np.cos(i) \
* np.sqrt((a / A_JUPITER_AU) * (1 - (e**2.0)))
# Create a new dataframe columns that contains the Tisserand parameter
P_TYPE_DF.loc[:, 'TISSERAND_JUP'] = \
P_TYPE_DF.apply(lambda x: (tisr_jup(a=x['SEMI_MAJOR_AXIS_AU'], \
i=np.radians(x['INCLINATION_DEG']), \
e=x['ECCENTRICITY'])), axis=1)
C_TYPE_DF.loc[:, 'TISSERAND_JUP'] = \
C_TYPE_DF.apply(lambda x: (tisr_jup(a=x['SEMI_MAJOR_AXIS_AU'], \
i=np.radians(x['INCLINATION_DEG']), \
e=x['ECCENTRICITY'])), axis=1)
#%%
# Print some descriptive statistics of the P type comets
print('Descriptive statistics of the Tisserand parameter of P type comets')
print(f'{P_TYPE_DF["TISSERAND_JUP"].describe()}')
print('\n')
# Compute the percentage of Jupiter-Family Comets (JFCs) based on P types
PERC_P_TYPE_JFCS = len(P_TYPE_DF.loc[(P_TYPE_DF["TISSERAND_JUP"] > 2) \
& (P_TYPE_DF["TISSERAND_JUP"] < 3)]) \
/ len(P_TYPE_DF.index) * 100
PERC_P_TYPE_JFCS = round(PERC_P_TYPE_JFCS, 0)
# Print how many P comets have a Tisserand parameter between 2 and 3:
print('Percentage of P type comets with a Tisserand parameter between ' \
f'2 and 3: {PERC_P_TYPE_JFCS}%')
print('\n')
# Print some descriptive statistics of the C type comets
print('Descriptive statistics of the Tisserand parameter of C type comets')
print(f'{C_TYPE_DF["TISSERAND_JUP"].describe()}')
print('\n')
#%%
# We define a function to add a new column in an already existing database
# table. This code snippet may be helpful in the future
def add_col2tab(con_db, cur_db, tab_name, col_name, col_type):
"""
This function adds a new column to an already existing SQLite table.
Setting a new or editing an existing key (primary or foreign) is not
possible.
Parameters
----------
con_db : sqlite3.Connection
Connection object to the SQLite database.
cur_db : sqlite3.Cursor
Connection corresponding cursor.
tab_name : str
Table name.
col_name : str
New column name that shall be added.
col_type : str
New column name corresponding SQLite column type.
Returns
-------
None.
"""
# Iterate through all existing column names of the database table using
# the PRAGMA table_info command
for row in cur_db.execute(f'PRAGMA table_info({tab_name})'):
# If the column exists: exit the function
if row[1] == col_name:
break
# If the column is not existing yet, add the new column
else:
cur_db.execute(f'ALTER TABLE {tab_name} ' \
f'ADD COLUMN {col_name} {col_type}')
con_db.commit()
# Add a new column in the comets_main table for the Tisserand parameters
add_col2tab(con_db=con, \
cur_db=cur, \
tab_name='comets_main', \
col_name='TISSERAND_JUP', \
col_type='REAL')
#%%
# Add the Tisserand parameter results to the database
cur.executemany('UPDATE comets_main SET TISSERAND_JUP=? WHERE NAME=?', \
P_TYPE_DF[['TISSERAND_JUP', 'NAME']].values)
con.commit()
cur.executemany('UPDATE comets_main SET TISSERAND_JUP=? WHERE NAME=?', \
C_TYPE_DF[['TISSERAND_JUP', 'NAME']].values)
con.commit()
#%%
# Compute the KDE distribution for the Tisserand values, ranging from -1 to
# 5
TISSERAND_RANGE = np.linspace(0, 5, 1000)
# Kernel and distribution computation for the P type comets
P_TYPE_TISR_KERNEL = stats.gaussian_kde(P_TYPE_DF['TISSERAND_JUP'])
P_TYPE_TISR_DISTR = P_TYPE_TISR_KERNEL(TISSERAND_RANGE)
# Kernel and distribution computation for the C type comets
C_TYPE_TISR_KERNEL = stats.gaussian_kde(C_TYPE_DF['TISSERAND_JUP'])
C_TYPE_TISR_DISTR = C_TYPE_TISR_KERNEL(TISSERAND_RANGE)
#%%
# Square-root choice for the histograms number of bins
nr_of_bins = lambda data_array: int(np.floor(np.sqrt(len(data_array))))
# Let's set a dark background
plt.style.use('dark_background')
# Set a default font size for better readability
plt.rcParams.update({'font.size': 14})
# Create a figure and axis
fig, ax = plt.subplots(figsize=(12, 8))
# Histogram of the P and C type comets' Tisserand parameter.
ax.hist(P_TYPE_DF['TISSERAND_JUP'], \
bins=nr_of_bins(P_TYPE_DF['TISSERAND_JUP']), \
density=True, color='tab:orange', alpha=0.5, label='P Type')
ax.hist(C_TYPE_DF['TISSERAND_JUP'], \
bins=nr_of_bins(C_TYPE_DF['TISSERAND_JUP']), \
density=True, color='tab:blue', alpha=0.5, label='C Type')
# Plot the KDE of the P type comets
ax.plot(TISSERAND_RANGE, P_TYPE_TISR_DISTR, color='tab:orange', alpha=1, linestyle='solid')
# Plot the KDE of the C type comets
ax.plot(TISSERAND_RANGE, C_TYPE_TISR_DISTR, color='tab:blue', alpha=1, linestyle='solid')
# Set an x axis limits
ax.set_xlim(0, 5)
# Add a grid for better readability
ax.grid(axis='both', linestyle='dashed', alpha=0.2)
# Set an x and y label
ax.set_xlabel('Tisserand Parameter w.r.t. Jupiter')
ax.set_ylabel('Normalised Distribution')
# Re-define the opacity (alpha value) of the markers / lines in the
# legend for better visibility
leg = ax.legend(fancybox=True, loc='upper right', framealpha=1)
for lh in leg.legendHandles:
lh.set_alpha(1)
# Save the figure
plt.savefig('comets_kde_tisserand_jup.png', dpi=300)
| [
"spiceypy.convrt",
"numpy.radians",
"spiceypy.oscltx",
"scipy.stats.gaussian_kde",
"sqlite3.connect",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"spiceypy.spkgeo",
"spiceypy.utc2et",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.rcParams.update",
"numpy.linspace",
"spiceypy.bodvcd",
"n... | [((454, 508), 'sqlite3.connect', 'sqlite3.connect', (['"""../_databases/_comets/mpc_comets.db"""'], {}), "('../_databases/_comets/mpc_comets.db')\n", (469, 508), False, 'import sqlite3\n'), ((706, 836), 'pandas.read_sql', 'pd.read_sql', (['"""SELECT NAME, SEMI_MAJOR_AXIS_AU, INCLINATION_DEG, ECCENTRICITY FROM comets_main WHERE ORBIT_TYPE="P\\""""', 'con'], {}), '(\n \'SELECT NAME, SEMI_MAJOR_AXIS_AU, INCLINATION_DEG, ECCENTRICITY FROM comets_main WHERE ORBIT_TYPE="P"\'\n , con)\n', (717, 836), True, 'import pandas as pd\n'), ((983, 1132), 'pandas.read_sql', 'pd.read_sql', (['"""SELECT NAME, SEMI_MAJOR_AXIS_AU, INCLINATION_DEG, ECCENTRICITY FROM comets_main WHERE ORBIT_TYPE="C" AND ECCENTRICITY<1"""', 'con'], {}), '(\n \'SELECT NAME, SEMI_MAJOR_AXIS_AU, INCLINATION_DEG, ECCENTRICITY FROM comets_main WHERE ORBIT_TYPE="C" AND ECCENTRICITY<1\'\n , con)\n', (994, 1132), True, 'import pandas as pd\n'), ((1409, 1443), 'spiceypy.furnsh', 'spiceypy.furnsh', (['"""kernel_meta.txt"""'], {}), "('kernel_meta.txt')\n", (1424, 1443), False, 'import spiceypy\n'), ((1487, 1523), 'spiceypy.utc2et', 'spiceypy.utc2et', (['"""2000-001T00:00:00"""'], {}), "('2000-001T00:00:00')\n", (1502, 1523), False, 'import spiceypy\n'), ((1676, 1739), 'spiceypy.spkgeo', 'spiceypy.spkgeo', ([], {'targ': '(5)', 'et': 'SAMPLE_ET', 'ref': '"""ECLIPJ2000"""', 'obs': '(10)'}), "(targ=5, et=SAMPLE_ET, ref='ECLIPJ2000', obs=10)\n", (1691, 1739), False, 'import spiceypy\n'), ((1911, 1956), 'spiceypy.bodvcd', 'spiceypy.bodvcd', ([], {'bodyid': '(10)', 'item': '"""GM"""', 'maxn': '(1)'}), "(bodyid=10, item='GM', maxn=1)\n", (1926, 1956), False, 'import spiceypy\n'), ((2042, 2095), 'spiceypy.oscltx', 'spiceypy.oscltx', (['STATE_VEC_JUPITER', 'SAMPLE_ET', 'GM_SUN'], {}), '(STATE_VEC_JUPITER, SAMPLE_ET, GM_SUN)\n', (2057, 2095), False, 'import spiceypy\n'), ((2215, 2256), 'spiceypy.convrt', 'spiceypy.convrt', (['A_JUPITER_KM', '"""km"""', '"""AU"""'], {}), "(A_JUPITER_KM, 'km', 'AU')\n", (2230, 2256), False, 'import spiceypy\n'), ((5985, 6008), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(1000)'], {}), '(0, 5, 1000)\n', (5996, 6008), True, 'import numpy as np\n'), ((6091, 6137), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (["P_TYPE_DF['TISSERAND_JUP']"], {}), "(P_TYPE_DF['TISSERAND_JUP'])\n", (6109, 6137), False, 'from scipy import stats\n'), ((6276, 6322), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (["C_TYPE_DF['TISSERAND_JUP']"], {}), "(C_TYPE_DF['TISSERAND_JUP'])\n", (6294, 6322), False, 'from scipy import stats\n'), ((6543, 6575), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (6556, 6575), True, 'from matplotlib import pyplot as plt\n'), ((6626, 6664), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (6645, 6664), True, 'from matplotlib import pyplot as plt\n'), ((6703, 6732), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (6715, 6732), True, 'from matplotlib import pyplot as plt\n'), ((7854, 7906), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""comets_kde_tisserand_jup.png"""'], {'dpi': '(300)'}), "('comets_kde_tisserand_jup.png', dpi=300)\n", (7865, 7906), True, 'from matplotlib import pyplot as plt\n'), ((2510, 2552), 'numpy.sqrt', 'np.sqrt', (['(a / A_JUPITER_AU * (1 - e ** 2.0))'], {}), '(a / A_JUPITER_AU * (1 - e ** 2.0))\n', (2517, 2552), True, 'import numpy as np\n'), ((2469, 2478), 'numpy.cos', 'np.cos', (['i'], {}), '(i)\n', (2475, 2478), True, 'import numpy as np\n'), ((2776, 2808), 'numpy.radians', 'np.radians', (["x['INCLINATION_DEG']"], {}), "(x['INCLINATION_DEG'])\n", (2786, 2808), True, 'import numpy as np\n'), ((3024, 3056), 'numpy.radians', 'np.radians', (["x['INCLINATION_DEG']"], {}), "(x['INCLINATION_DEG'])\n", (3034, 3056), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
.. module:: hindex
:synopsis: Calculate the hindex.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import sys
import pandas as pd
import numpy as np
# determine if we are loading from a jupyter notebook (to make pretty progress bars)
if 'ipykernel' in sys.modules:
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
from pyscisci.utils import zip2dict
### H index
def hindex(a):
"""
Calculate the h index for the array of citation values. See :cite:`hirsch2005index` for the definition.
Parameters
----------
:param a : numpy array
An array of citation counts for each publication by the Author.
Returns
-------
int
The Hindex
"""
d = np.sort(a)[::-1] - np.arange(a.shape[0])
return (d>0).sum()
def compute_hindex(df, colgroupby, colcountby, show_progress=False):
"""
Calculate the h index for each group in the DataFrame. See :cite:`hirsch2005index` for the definition.
The algorithmic implementation for each author can be found in :py:func:`citationanalysis.author_hindex`.
Parameters
----------
:param df : DataFrame
A DataFrame with the citation information for each Author.
:param colgroupby : str
The DataFrame column with Author Ids.
:param colcountby : str
The DataFrame column with Citation counts for each publication.
Returns
-------
DataFrame
DataFrame with 2 columns: colgroupby, 'Hindex'
"""
# register our pandas apply with tqdm for a progress bar
tqdm.pandas(desc='Hindex', disable= not show_progress)
newname_dict = zip2dict([str(colcountby), '0'], [str(colgroupby)+'Hindex']*2)
return df.groupby(colgroupby, sort=False)[colcountby].progress_apply(hindex).to_frame().reset_index().rename(columns=newname_dict)
| [
"numpy.sort",
"tqdm.tqdm.pandas",
"numpy.arange"
] | [((1577, 1630), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {'desc': '"""Hindex"""', 'disable': '(not show_progress)'}), "(desc='Hindex', disable=not show_progress)\n", (1588, 1630), False, 'from tqdm import tqdm\n'), ((763, 784), 'numpy.arange', 'np.arange', (['a.shape[0]'], {}), '(a.shape[0])\n', (772, 784), True, 'import numpy as np\n'), ((744, 754), 'numpy.sort', 'np.sort', (['a'], {}), '(a)\n', (751, 754), True, 'import numpy as np\n')] |
# author: <NAME>
# date: 2022-03-25
"""
Usage: single_linear_regression.py --xtrainpath=<xtrainpath> --ytrainpath=<ytrainpath> --preprocessorpath=<preprocessorpath> --bestalpha=<bestalpha> --path=<path>
Options:
--xtrainpath=<xtrainpath>: csv file previously saved in the previous script the training data for the x-axis of ridge regression
--ytrainpath=<ytrainpath>: csv file previously saved int the previous script the training data for the y-axis of ridge regression
--preprocessorpath=<preprocessorpath>: path for the preprocessor pickle file saved.
--bestalpha=<bestalpha>: path for best alphs pickle
--path=<path>: path for the downloading data
"""
from docopt import docopt
import pickle
import pandas as pd
import numpy as np
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
import matplotlib.pyplot as plt
opt = docopt(__doc__)
np.random.seed(12)
def ridge_pipline(processor,alpha):
# This function is a helper function to create a specific case for pipline creating a ridge pipline
# Parameter:
# --processor:
ridge_pipeline = make_pipeline(processor, Ridge(alpha=alpha))
return ridge_pipeline
def cross_validation(ridgepip,xtrain,ytrain):
cross_validate(ridgepip, xtrain, ytrain, cv=10, return_train_score=True)
def write_csv(pd,out_dir):
pd.to_csv(out_dir, index=True)
def make_plot(cv_ridge,path):
ridge = plt.plot(np.arange(len(cv_ridge)),
cv_ridge['test_score'],
'-0')
plt.title('Figure 3: RidgeCV Folds = 10')
plt.xlabel('CV Fold Iterations')
plt.ylabel('CV Accuracy')
plt.savefig(path+"cv_plot.png")
def main(xtrainpath, ytrainpath,preprocessorpath,bestalpha,path):
xtrain = pd.read_pickle(xtrainpath)
ytrain = pd.read_pickle(ytrainpath)
preprocessor = pickle.load(open(preprocessorpath, "rb"))
best_alpha = pickle.load(open(bestalpha,"rb"))
ridge_pipeline = make_pipeline(preprocessor, Ridge(alpha=best_alpha))
cv_ridge = pd.DataFrame(cross_validate(ridge_pipeline, xtrain, ytrain, cv=10, return_train_score=True))
write_csv(cv_ridge,"data/processed/cv_ridge.csv")
make_plot(cv_ridge,path)
if __name__ == "__main__":
main(opt["--xtrainpath"], opt["--ytrainpath"], opt["--preprocessorpath"], opt["--bestalpha"], opt["--path"])
| [
"pandas.read_pickle",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.cross_validate",
"matplotlib.pyplot.xlabel",
"sklearn.linear_model.Ridge",
"numpy.random.seed",
"matplotlib.pyplot.title",
"pandas.to_csv",
"docopt.docopt"
] | [((962, 977), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (968, 977), False, 'from docopt import docopt\n'), ((979, 997), 'numpy.random.seed', 'np.random.seed', (['(12)'], {}), '(12)\n', (993, 997), True, 'import numpy as np\n'), ((1320, 1392), 'sklearn.model_selection.cross_validate', 'cross_validate', (['ridgepip', 'xtrain', 'ytrain'], {'cv': '(10)', 'return_train_score': '(True)'}), '(ridgepip, xtrain, ytrain, cv=10, return_train_score=True)\n', (1334, 1392), False, 'from sklearn.model_selection import cross_validate\n'), ((1425, 1455), 'pandas.to_csv', 'pd.to_csv', (['out_dir'], {'index': '(True)'}), '(out_dir, index=True)\n', (1434, 1455), True, 'import pandas as pd\n'), ((1610, 1651), 'matplotlib.pyplot.title', 'plt.title', (['"""Figure 3: RidgeCV Folds = 10"""'], {}), "('Figure 3: RidgeCV Folds = 10')\n", (1619, 1651), True, 'import matplotlib.pyplot as plt\n'), ((1656, 1688), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""CV Fold Iterations"""'], {}), "('CV Fold Iterations')\n", (1666, 1688), True, 'import matplotlib.pyplot as plt\n'), ((1693, 1718), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CV Accuracy"""'], {}), "('CV Accuracy')\n", (1703, 1718), True, 'import matplotlib.pyplot as plt\n'), ((1723, 1756), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + 'cv_plot.png')"], {}), "(path + 'cv_plot.png')\n", (1734, 1756), True, 'import matplotlib.pyplot as plt\n'), ((1835, 1861), 'pandas.read_pickle', 'pd.read_pickle', (['xtrainpath'], {}), '(xtrainpath)\n', (1849, 1861), True, 'import pandas as pd\n'), ((1875, 1901), 'pandas.read_pickle', 'pd.read_pickle', (['ytrainpath'], {}), '(ytrainpath)\n', (1889, 1901), True, 'import pandas as pd\n'), ((1223, 1241), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'alpha'}), '(alpha=alpha)\n', (1228, 1241), False, 'from sklearn.linear_model import Ridge\n'), ((2063, 2086), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'best_alpha'}), '(alpha=best_alpha)\n', (2068, 2086), False, 'from sklearn.linear_model import Ridge\n'), ((2116, 2194), 'sklearn.model_selection.cross_validate', 'cross_validate', (['ridge_pipeline', 'xtrain', 'ytrain'], {'cv': '(10)', 'return_train_score': '(True)'}), '(ridge_pipeline, xtrain, ytrain, cv=10, return_train_score=True)\n', (2130, 2194), False, 'from sklearn.model_selection import cross_validate\n')] |
from matplotlib import pyplot
from mpl_toolkits import mplot3d
import struct
import numpy
class stl_object:
'''
stl_object() -> new empty stl_object
stl_object(triangles,normals,header,triangle_numbers) -> new stl_object
triangles: List of triangles (List) of 3 tupules, each tupule (x,y,z) is a vertex of the triangle
[[(xa,ya,za),(xb,yb,zb),(xc,yc,zc)],..]
normals: List of tupules (x,y,z) representing normals of the corresponding triangle in list
[(x,y,z),...]
header: string
triangle_numbers: int
*Assign x_max,x_min, y_max... after creation of stl_object using parameters
'''
def __init__(self,triangles=None,normals=None,header=None,triangle_numbers=None):
self.triangles=triangles
self.normals=normals
self.header=header
self.triangle_numbers=triangle_numbers
self.x_max=None
self.x_min=None
self.y_max=None
self.y_min=None
self.z_max=None
self.z_min=None
def read_from_file(self,file_name):
'''
stl_object.read_from_file('file_path/file_name') -> None -- fill an empty stl_object
'''
self.x_max=-1e6
self.x_min=1e6
self.y_max=-1e6
self.y_min=1e6
self.z_max=-1e6
self.z_min=1e6
self.triangles=list([])
self.normals=list([])
with open(file_name,'rb') as f:
self.header=f.read(80).decode() #read header
self.triangle_numbers=int.from_bytes(f.read(4),'little')
for i in range(self.triangle_numbers):
[xn,yn,zn]=struct.unpack('fff',f.read(12)) #normal
[x1,y1,z1]=struct.unpack('fff',f.read(12)) #vertex 1
[x2,y2,z2]=struct.unpack('fff',f.read(12)) #vertex 2
[x3,y3,z3]=struct.unpack('fff',f.read(12)) #vertex 3
f.read(2)
self.normals.append([xn,yn,zn])
self.triangles.append([(x1,y1,z1),(x2,y2,z2),(x3,y3,z3)]) #store the triangle coordinates
#get the extreme coordinates to adjust the axes
self.x_max=max(self.x_max,x1,x2,x3)
self.x_min=min(self.x_min,x1,x2,x3)
self.y_max=max(self.y_max,y1,y2,y3)
self.y_min=min(self.y_min,y1,y2,y3)
self.z_max=max(self.z_max,z1,z2,z3)
self.z_min=min(self.z_min,z1,z2,z3)
print('{} triangles read from {} with header {}'.format(self.triangle_numbers,file_name,self.header)) #print number of triangles
def display(self,axis='off'):
'''
stl_object.display() -> None -- display an stl object
'''
figure=pyplot.figure()
ax=figure.add_subplot(111,projection='3d')
x_range=self.x_max-self.x_min
y_range=self.y_max-self.y_min
z_range=self.z_max-self.z_min
max_range=max(x_range,y_range,z_range)
half_range=max_range/2.0
x_mean=0.5*(self.x_max+self.x_min)
y_mean=0.5*(self.y_max+self.y_min)
z_mean=0.5*(self.z_max+self.z_min)
poly3d=mplot3d.art3d.Poly3DCollection(self.triangles)
poly3d.set_edgecolor('green')
ax.add_collection3d(poly3d)
ax.auto_scale_xyz([x_mean-half_range,x_mean+half_range],[y_mean-half_range,y_mean+half_range],[z_mean-half_range,z_mean+half_range])
ax.set_aspect('equal', adjustable='box')
pyplot.axis(axis)
if axis=='on':
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
pyplot.show()
def rotated_max_min(self,direction):
'''
stl_object.rotated_max_min(direction) -> float,float,float,float,float,float,numpy.array
-- returns x_max_new,x_min_new,y_max_new,y_min_new,z_max_new,z_min_new,rot_mat with 'direction' towards z axis
'''
x_max_new=-1e6
x_min_new=1e6
y_max_new=-1e6
y_min_new=1e6
z_max_new=-1e6
z_min_new=1e6
rot_mat=[]
new_x=numpy.array([0,direction[2],-direction[1]])
new_x=new_x/numpy.linalg.norm(new_x)
new_y=numpy.cross(direction,new_x)
rot_mat=numpy.array([new_x,new_y,direction])
for i in range(self.triangle_numbers):
X_1=numpy.array([self.triangles[i][0][0],self.triangles[i][0][1],self.triangles[i][0][2]])
X_2=numpy.array([self.triangles[i][1][0],self.triangles[i][1][1],self.triangles[i][1][2]])
X_3=numpy.array([self.triangles[i][2][0],self.triangles[i][2][1],self.triangles[i][2][2]])
X_1=numpy.dot(rot_mat,X_1)
X_2=numpy.dot(rot_mat,X_2)
X_3=numpy.dot(rot_mat,X_3)
[x1,y1,z1]=X_1
[x2,y2,z2]=X_2
[x3,y3,z3]=X_3
x_max_new=max(x_max_new,x1,x2,x3)
x_min_new=min(x_min_new,x1,x2,x3)
y_max_new=max(y_max_new,y1,y2,y3)
y_min_new=min(y_min_new,y1,y2,y3)
z_max_new=max(z_max_new,z1,z2,z3)
z_min_new=min(z_min_new,z1,z2,z3)
return x_max_new,x_min_new,y_max_new,y_min_new,z_max_new,z_min_new,rot_mat
| [
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"numpy.cross",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.dot",
"numpy.linalg.norm",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] | [((2856, 2871), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (2869, 2871), False, 'from matplotlib import pyplot\n'), ((3275, 3321), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'mplot3d.art3d.Poly3DCollection', (['self.triangles'], {}), '(self.triangles)\n', (3305, 3321), False, 'from mpl_toolkits import mplot3d\n'), ((3605, 3622), 'matplotlib.pyplot.axis', 'pyplot.axis', (['axis'], {}), '(axis)\n', (3616, 3622), False, 'from matplotlib import pyplot\n'), ((3747, 3760), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (3758, 3760), False, 'from matplotlib import pyplot\n'), ((4217, 4262), 'numpy.array', 'numpy.array', (['[0, direction[2], -direction[1]]'], {}), '([0, direction[2], -direction[1]])\n', (4228, 4262), False, 'import numpy\n'), ((4320, 4349), 'numpy.cross', 'numpy.cross', (['direction', 'new_x'], {}), '(direction, new_x)\n', (4331, 4349), False, 'import numpy\n'), ((4365, 4403), 'numpy.array', 'numpy.array', (['[new_x, new_y, direction]'], {}), '([new_x, new_y, direction])\n', (4376, 4403), False, 'import numpy\n'), ((4281, 4305), 'numpy.linalg.norm', 'numpy.linalg.norm', (['new_x'], {}), '(new_x)\n', (4298, 4305), False, 'import numpy\n'), ((4466, 4559), 'numpy.array', 'numpy.array', (['[self.triangles[i][0][0], self.triangles[i][0][1], self.triangles[i][0][2]]'], {}), '([self.triangles[i][0][0], self.triangles[i][0][1], self.\n triangles[i][0][2]])\n', (4477, 4559), False, 'import numpy\n'), ((4569, 4662), 'numpy.array', 'numpy.array', (['[self.triangles[i][1][0], self.triangles[i][1][1], self.triangles[i][1][2]]'], {}), '([self.triangles[i][1][0], self.triangles[i][1][1], self.\n triangles[i][1][2]])\n', (4580, 4662), False, 'import numpy\n'), ((4672, 4765), 'numpy.array', 'numpy.array', (['[self.triangles[i][2][0], self.triangles[i][2][1], self.triangles[i][2][2]]'], {}), '([self.triangles[i][2][0], self.triangles[i][2][1], self.\n triangles[i][2][2]])\n', (4683, 4765), False, 'import numpy\n'), ((4791, 4814), 'numpy.dot', 'numpy.dot', (['rot_mat', 'X_1'], {}), '(rot_mat, X_1)\n', (4800, 4814), False, 'import numpy\n'), ((4830, 4853), 'numpy.dot', 'numpy.dot', (['rot_mat', 'X_2'], {}), '(rot_mat, X_2)\n', (4839, 4853), False, 'import numpy\n'), ((4869, 4892), 'numpy.dot', 'numpy.dot', (['rot_mat', 'X_3'], {}), '(rot_mat, X_3)\n', (4878, 4892), False, 'import numpy\n')] |
import carla
import os
import sys
import cv2
import json
import numpy as np
CARLA_ROOT = os.getenv("CARLA_ROOT")
if CARLA_ROOT is None:
raise ValueError("CARLA_ROOT must be defined.")
scriptdir = CARLA_ROOT + "PythonAPI/"
sys.path.append(scriptdir)
from examples.synchronous_mode import CarlaSyncMode
scriptdir = os.path.abspath(__file__).split('scripts')[0] + 'scripts/carla/'
sys.path.append(scriptdir)
from scenarios.run_intersection_scenario import CarlaParams, DroneVizParams, VehicleParams, PredictionParams, RunIntersectionScenario
def setup_intersection_scenario(scenario_dict, ego_init_dict, savedir):
# This is simply used to start up the scenarios with vehicles determining the route.
# The route is simply queried for overlays - the actual policies are never run here.
carla_params = CarlaParams(**scenario_dict["carla_params"])
drone_viz_params = DroneVizParams(**scenario_dict["drone_viz_params"])
pred_params = PredictionParams()
vehicles_params_list = []
policy_type = "lk_pi"
for vp_dict in scenario_dict["vehicle_params"]:
if vp_dict["role"] == "static":
# Not generating static vehicles
# vehicles_params_list.append( VehicleParams(**vp_dict) )
continue
elif vp_dict["role"] == "target":
vp_dict["policy_type"] = policy_type
vehicles_params_list.append( VehicleParams(**vp_dict) )
elif vp_dict["role"] == "ego":
vp_dict.update(ego_init_dict)
vp_dict["policy_type"] = policy_type
vehicles_params_list.append( VehicleParams(**vp_dict) )
else:
raise ValueError(f"Invalid vehicle role: {vp_dict['role']}")
runner = RunIntersectionScenario(carla_params,
drone_viz_params,
vehicles_params_list,
pred_params,
savedir)
return runner
def get_drone_snapshot(runner):
# Get a single drone image on which to overlay trajectories.
img_drone = None
with CarlaSyncMode(runner.world, runner.drone, fps=runner.carla_fps) as sync_mode:
_, img = sync_mode.tick(timeout=runner.timeout)
img_drone = np.frombuffer(img.raw_data, dtype=np.uint8)
img_drone = np.reshape(img_drone, (img.height, img.width, 4))
img_drone = img_drone[:, :, :3]
img_drone = cv2.resize(img_drone, (runner.viz_params.img_width, runner.viz_params.img_height), interpolation = cv2.INTER_AREA)
return img_drone
def overlay_trajectories(img, runner, line_thickness=5, goal_radius=10):
# Code to overlay the reference trajectories for every agent.
def xy_to_px_center(xy):
px = runner.A_world_to_drone @ xy + runner.b_world_to_drone
center_x = int(px[0])
center_y = int(px[1])
return center_x, center_y
for (veh_policy, veh_color) in zip(runner.vehicle_policies, runner.vehicle_colors):
veh_color = veh_color[::-1] # RGB to BGR
xy_traj = veh_policy._frenet_traj.trajectory[:, 1:3]
pts = [xy_to_px_center(xy) for xy in xy_traj]
for px_ind in range(len(pts)-1):
cv2.line(img, pts[px_ind], pts[px_ind+1], veh_color, thickness=line_thickness)
cv2.circle(img, pts[-1], goal_radius, veh_color, thickness=-1)
if __name__ == '__main__':
TOWN_NUM = 7 # 5 or 7
if TOWN_NUM == 5:
scenario_suffix = ""
scenarios_to_overlay = [1, 2, 3]
elif TOWN_NUM == 7:
scenario_suffix = "_t7"
scenarios_to_overlay = [1, 2, 3, 4]
else:
raise ValueError(TOWN_NUM)
img = None
for scenario_num in scenarios_to_overlay:
# Loading + Setup.
scenario_path = os.path.join(scriptdir, f"scenarios/scenario_{scenario_num:02d}{scenario_suffix}.json")
ego_init_path = os.path.join(scriptdir, "scenarios/ego_init_00.json")
scenario_dict = json.load(open(scenario_path, "r"))
ego_init_dict = json.load(open(ego_init_path, "r"))
scenario_name = scenario_path.split("/")[-1].split('.json')[0]
savedir = os.path.join( os.path.abspath(__file__).split("scripts")[0], "results/route_viz/" )
runner = None
try:
runner = setup_intersection_scenario(scenario_dict, ego_init_dict, savedir)
img = get_drone_snapshot(runner)
overlay_trajectories(img, runner)
except Exception as e:
print(e)
finally:
if runner:
for actor in runner.vehicle_actors:
actor.destroy()
runner.drone.destroy()
cv2.destroyAllWindows()
cv2.imwrite(os.path.join(savedir, f"scenario_route{scenario_suffix}_{scenario_num}.png"), img)
| [
"scenarios.run_intersection_scenario.CarlaParams",
"scenarios.run_intersection_scenario.DroneVizParams",
"numpy.reshape",
"os.getenv",
"scenarios.run_intersection_scenario.RunIntersectionScenario",
"cv2.line",
"os.path.join",
"scenarios.run_intersection_scenario.PredictionParams",
"examples.synchron... | [((90, 113), 'os.getenv', 'os.getenv', (['"""CARLA_ROOT"""'], {}), "('CARLA_ROOT')\n", (99, 113), False, 'import os\n'), ((228, 254), 'sys.path.append', 'sys.path.append', (['scriptdir'], {}), '(scriptdir)\n', (243, 254), False, 'import sys\n'), ((385, 411), 'sys.path.append', 'sys.path.append', (['scriptdir'], {}), '(scriptdir)\n', (400, 411), False, 'import sys\n'), ((820, 864), 'scenarios.run_intersection_scenario.CarlaParams', 'CarlaParams', ([], {}), "(**scenario_dict['carla_params'])\n", (831, 864), False, 'from scenarios.run_intersection_scenario import CarlaParams, DroneVizParams, VehicleParams, PredictionParams, RunIntersectionScenario\n'), ((888, 939), 'scenarios.run_intersection_scenario.DroneVizParams', 'DroneVizParams', ([], {}), "(**scenario_dict['drone_viz_params'])\n", (902, 939), False, 'from scenarios.run_intersection_scenario import CarlaParams, DroneVizParams, VehicleParams, PredictionParams, RunIntersectionScenario\n'), ((963, 981), 'scenarios.run_intersection_scenario.PredictionParams', 'PredictionParams', ([], {}), '()\n', (979, 981), False, 'from scenarios.run_intersection_scenario import CarlaParams, DroneVizParams, VehicleParams, PredictionParams, RunIntersectionScenario\n'), ((1729, 1832), 'scenarios.run_intersection_scenario.RunIntersectionScenario', 'RunIntersectionScenario', (['carla_params', 'drone_viz_params', 'vehicles_params_list', 'pred_params', 'savedir'], {}), '(carla_params, drone_viz_params,\n vehicles_params_list, pred_params, savedir)\n', (1752, 1832), False, 'from scenarios.run_intersection_scenario import CarlaParams, DroneVizParams, VehicleParams, PredictionParams, RunIntersectionScenario\n'), ((2123, 2186), 'examples.synchronous_mode.CarlaSyncMode', 'CarlaSyncMode', (['runner.world', 'runner.drone'], {'fps': 'runner.carla_fps'}), '(runner.world, runner.drone, fps=runner.carla_fps)\n', (2136, 2186), False, 'from examples.synchronous_mode import CarlaSyncMode\n'), ((2277, 2320), 'numpy.frombuffer', 'np.frombuffer', (['img.raw_data'], {'dtype': 'np.uint8'}), '(img.raw_data, dtype=np.uint8)\n', (2290, 2320), True, 'import numpy as np\n'), ((2341, 2390), 'numpy.reshape', 'np.reshape', (['img_drone', '(img.height, img.width, 4)'], {}), '(img_drone, (img.height, img.width, 4))\n', (2351, 2390), True, 'import numpy as np\n'), ((2451, 2568), 'cv2.resize', 'cv2.resize', (['img_drone', '(runner.viz_params.img_width, runner.viz_params.img_height)'], {'interpolation': 'cv2.INTER_AREA'}), '(img_drone, (runner.viz_params.img_width, runner.viz_params.\n img_height), interpolation=cv2.INTER_AREA)\n', (2461, 2568), False, 'import cv2\n'), ((3314, 3376), 'cv2.circle', 'cv2.circle', (['img', 'pts[-1]', 'goal_radius', 'veh_color'], {'thickness': '(-1)'}), '(img, pts[-1], goal_radius, veh_color, thickness=-1)\n', (3324, 3376), False, 'import cv2\n'), ((3784, 3875), 'os.path.join', 'os.path.join', (['scriptdir', 'f"""scenarios/scenario_{scenario_num:02d}{scenario_suffix}.json"""'], {}), "(scriptdir,\n f'scenarios/scenario_{scenario_num:02d}{scenario_suffix}.json')\n", (3796, 3875), False, 'import os\n'), ((3896, 3949), 'os.path.join', 'os.path.join', (['scriptdir', '"""scenarios/ego_init_00.json"""'], {}), "(scriptdir, 'scenarios/ego_init_00.json')\n", (3908, 3949), False, 'import os\n'), ((3226, 3311), 'cv2.line', 'cv2.line', (['img', 'pts[px_ind]', 'pts[px_ind + 1]', 'veh_color'], {'thickness': 'line_thickness'}), '(img, pts[px_ind], pts[px_ind + 1], veh_color, thickness=line_thickness\n )\n', (3234, 3311), False, 'import cv2\n'), ((4694, 4717), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4715, 4717), False, 'import cv2\n'), ((4738, 4814), 'os.path.join', 'os.path.join', (['savedir', 'f"""scenario_route{scenario_suffix}_{scenario_num}.png"""'], {}), "(savedir, f'scenario_route{scenario_suffix}_{scenario_num}.png')\n", (4750, 4814), False, 'import os\n'), ((320, 345), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (335, 345), False, 'import os\n'), ((1403, 1427), 'scenarios.run_intersection_scenario.VehicleParams', 'VehicleParams', ([], {}), '(**vp_dict)\n', (1416, 1427), False, 'from scenarios.run_intersection_scenario import CarlaParams, DroneVizParams, VehicleParams, PredictionParams, RunIntersectionScenario\n'), ((1601, 1625), 'scenarios.run_intersection_scenario.VehicleParams', 'VehicleParams', ([], {}), '(**vp_dict)\n', (1614, 1625), False, 'from scenarios.run_intersection_scenario import CarlaParams, DroneVizParams, VehicleParams, PredictionParams, RunIntersectionScenario\n'), ((4174, 4199), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4189, 4199), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 10 00:01:29 2021
@author: lukepinkel
"""
import tqdm
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib as mpl
import scipy.sparse as sps
import matplotlib.pyplot as plt
from .model_matrices import (construct_model_matrices, make_theta, make_gcov,
get_jacmats, transform_theta, get_d2_chol, lndet_gmat,
inverse_transform_theta, vcrepara_grad,
VarCorrReparam, RestrictedModel)
from ..utilities.data_utils import _check_shape
from ..utilities.linalg_operations import invech_chol, invech
from ..utilities.special_mats import lmat, nmat
from ..utilities.numerical_derivs import so_gc_cd, so_fc_cd, fo_fc_cd
from ..pyglm.families import (Binomial, ExponentialFamily, Poisson, NegativeBinomial)
from ..utilities.output import get_param_table
from sksparse.cholmod import cholesky
class LMM:
def __init__(self, formula, data, weights=None, rcov=None):
"""
Parameters
----------
formula : string
lme4 style formula with random effects specified by terms in
parentheses with a bar
data : dataframe
Dataframe containing data. Missing values should be dropped
manually before passing the dataframe.
weights : ndarray, optional
Array of model weights. The default is None, which sets the
weights to one internally.
Returns
-------
None.
"""
indices = {}
X, Z, y, dims, levels, fe_vars = construct_model_matrices(formula, data, return_fe=True)
theta, theta_indices = make_theta(dims)
indices['theta'] = theta_indices
G, g_indices = make_gcov(theta, indices, dims)
indices['g'] = g_indices
XZ, Xty, Zty, yty = np.hstack([X, Z]), X.T.dot(y), Z.T.dot(y), y.T.dot(y)
XZ = sp.sparse.csc_matrix(XZ)
C, m = sps.csc_matrix(XZ.T.dot(XZ)), sps.csc_matrix(np.vstack([Xty, Zty]))
M = sps.bmat([[C, m], [m.T, yty]])
M = M.tocsc()
self.fe_vars = fe_vars
self.X, self.Z, self.y, self.dims, self.levels = X, Z, y, dims, levels
self.XZ, self.Xty, self.Zty, self.yty = XZ, Xty, Zty, yty
self.C, self.m, self.M = C, m, M
self.theta, self.theta_chol = theta, transform_theta(theta, dims, indices)
self.G = G
self.indices = indices
self.R = sps.eye(Z.shape[0])
self.Zs = sps.csc_matrix(Z)
self.g_derivs, self.jac_inds = get_jacmats(self.Zs, self.dims,
self.indices['theta'],
self.indices['g'], self.theta)
self.t_indices = list(zip(*np.triu_indices(len(theta))))
self.elim_mats, self.symm_mats, self.iden_mats = {}, {}, {}
self.d2g_dchol = {}
for key in self.levels:
p = self.dims[key]['n_vars']
self.elim_mats[key] = lmat(p).A
self.symm_mats[key] = nmat(p).A
self.iden_mats[key] = np.eye(p)
self.d2g_dchol[key] = get_d2_chol(self.dims[key])
self.bounds = [(0, None) if x==1 else (None, None) for x in self.theta[:-1]]+[(None, None)]
self.bounds_2 = [(1e-6, None) if x==1 else (None, None) for x in self.theta[:-1]]+[(None, None)]
self.zero_mat = sp.sparse.eye(self.X.shape[1])*0.0
self.zero_mat2 = sp.sparse.eye(1)*0.0
self.rcov = rcov
if rcov is None:
self.XtX = self.X.T.dot(self.X)
self.ZtZ = self.Zs.T.dot(self.Zs)
self.ZtX = self.Zs.T.dot(self.X)
def update_mme(self, Ginv, Rinv):
"""
Parameters
----------
Ginv: sparse matrix
scipy sparse matrix with inverse covariance block diagonal
s: float
resid covariance
Returns
-------
M: sparse matrix
updated mixed model matrix
"""
if type(Rinv) in [float, int, np.float64, np.float32, np.float16,
np.int, np.int16, np.int32, np.int64]:
M = self.M.copy()/Rinv
else:
RZX = Rinv.dot(self.XZ)
C = sps.csc_matrix(RZX.T.dot(self.XZ))
Ry = Rinv.dot(self.y)
m = sps.csc_matrix(np.vstack([self.X.T.dot(Ry), self.Zs.T.dot(Ry)]))
M = sps.bmat([[C, m], [m.T, self.y.T.dot(Ry)]]).tocsc()
Omega = sp.sparse.block_diag([self.zero_mat, Ginv, self.zero_mat2])
M+=Omega
return M
def update_gmat(self, theta, inverse=False):
"""
Parameters
----------
theta: ndarray
covariance parameters on the original scale
inverse: bool
whether or not to inverse G
Returns
-------
G: sparse matrix
updated random effects covariance
"""
G = self.G
for key in self.levels:
ng = self.dims[key]['n_groups']
theta_i = theta[self.indices['theta'][key]]
if inverse:
theta_i = np.linalg.inv(invech(theta_i)).reshape(-1, order='F')
else:
theta_i = invech(theta_i).reshape(-1, order='F')
G.data[self.indices['g'][key]] = np.tile(theta_i, ng)
return G
def loglike(self, theta, reml=True, use_sw=False, use_sparse=True):
"""
Parameters
----------
theta: array_like
The original parameterization of the model parameters
Returns
-------
loglike: scalar
Log likelihood of the model
"""
Ginv = self.update_gmat(theta, inverse=True)
M = self.update_mme(Ginv, theta[-1])
if (M.nnz / np.product(M.shape) < 0.05) and use_sparse:
L = cholesky(M.tocsc()).L().A
else:
L = np.linalg.cholesky(M.A)
ytPy = np.diag(L)[-1]**2
logdetG = lndet_gmat(theta, self.dims, self.indices)
logdetR = np.log(theta[-1]) * self.Z.shape[0]
if reml:
logdetC = np.sum(2*np.log(np.diag(L))[:-1])
ll = logdetR + logdetC + logdetG + ytPy
else:
Rinv = self.R / theta[-1]
RZ = Rinv.dot(self.Zs)
Q = Ginv + self.Zs.T.dot(RZ)
_, logdetV = cholesky(Q).slogdet()
ll = logdetR + logdetV + logdetG + ytPy
return ll
def vinvcrossprod(self, A, B, theta):
"""
Parameters
----------
X : ndarray
Array with first dimension equal to number of observations.
theta : ndarray
covariance parameters.
Returns
-------
XtVX : ndarray
X' V^{-1} X.
"""
Rinv = self.R / theta[-1]
Ginv = self.update_gmat(theta, inverse=True)
RZ = Rinv.dot(self.Zs)
Q = Ginv + self.Zs.T.dot(RZ)
M = cholesky(Q).inv()
AtRB = ((Rinv.dot(B)).T.dot(A)).T
AtRZ = (RZ.T.dot(A)).T
ZtRB = RZ.T.dot(B)
AtVB = AtRB - (M.dot(ZtRB)).T.dot(AtRZ.T).T
return AtVB
def gradient(self, theta, reml=True, use_sw=False):
"""
Parameters
----------
theta: array_like
The original parameterization of the components
Returns
-------
gradient: array_like
The gradient of the log likelihood with respect to the covariance
parameterization
Notes
-----
"""
s = theta[-1]
Rinv = self.R / s
Ginv = self.update_gmat(theta, inverse=True)
if self.rcov is None:
RZ = self.Zs / s
RX = self.X / s
Ry = self.y / s
ZtRZ = self.ZtZ / s
XtRX = self.XtX / s
ZtRX = self.ZtX / s
ZtRy = self.Zty / s
else:
RZ = Rinv.dot(self.Zs)
RX = Rinv.dot(self.X)
Ry = Rinv.dot(self.y)
ZtRZ = RZ.T.dot(self.Zs)
XtRX = self.X.T.dot(RX)
ZtRX = RZ.T.dot(self.X)
ZtRy = RZ.T.dot(self.y)
Q = Ginv + ZtRZ
M = cholesky(Q).inv()
ZtWZ = ZtRZ - ZtRZ.dot(M).dot(ZtRZ)
MZtRX = M.dot(ZtRX)
XtWX = XtRX - ZtRX.T.dot(MZtRX)
XtWX_inv = np.linalg.inv(XtWX)
ZtWX = ZtRX - ZtRZ.dot(MZtRX)
WX = RX - RZ.dot(MZtRX)
U = XtWX_inv.dot(WX.T)
Vy = Ry - RZ.dot(M.dot(ZtRy))
Py = Vy - WX.dot(U.dot(self.y))
ZtPy = self.Zs.T.dot(Py)
grad = []
for key in (self.levels):
ind = self.jac_inds[key]
ZtWZi = ZtWZ[ind][:, ind]
ZtWXi = ZtWX[ind]
ZtPyi = ZtPy[ind]
for dGdi in self.g_derivs[key]:
g1 = dGdi.dot(ZtWZi).diagonal().sum()
g2 = ZtPyi.T.dot(dGdi.dot(ZtPyi))
if reml:
g3 = np.trace(XtWX_inv.dot(ZtWXi.T.dot(dGdi.dot(ZtWXi))))
else:
g3 = 0
gi = g1 - g2 - g3
grad.append(gi)
for dR in self.g_derivs['resid']:
g1 = Rinv.diagonal().sum() - (M.dot((RZ.T).dot(dR).dot(RZ))).diagonal().sum()
g2 = Py.T.dot(Py)
if reml:
g3 = np.trace(XtWX_inv.dot(WX.T.dot(WX)))
else:
g3 = 0
gi = g1 - g2 - g3
grad.append(gi)
grad = np.concatenate(grad)
grad = _check_shape(np.array(grad))
return grad
def hessian(self, theta, reml=True, use_sw=False):
"""
Parameters
----------
theta: array_like
The original parameterization of the components
Returns
-------
H: array_like
The hessian of the log likelihood with respect to the covariance
parameterization
Notes
-----
This function has the infrastructure to support more complex residual
covariances that are yet to be implemented.
"""
Ginv = self.update_gmat(theta, inverse=True)
Rinv = self.R / theta[-1]
RZ = Rinv.dot(self.Zs)
Q = Ginv + self.Zs.T.dot(RZ)
M = cholesky(Q).inv()
W = Rinv - RZ.dot(M).dot(RZ.T)
WZ = W.dot(self.Zs)
WX = W.dot(self.X)
XtWX = WX.T.dot(self.X)
ZtWX = self.Zs.T.dot(WX)
U = np.linalg.solve(XtWX, WX.T)
ZtP = WZ.T - ZtWX.dot(np.linalg.solve(XtWX, WX.T))
ZtPZ = self.Zs.T.dot(ZtP.T)
Py = W.dot(self.y) - WX.dot(U.dot(self.y))
ZtPy = self.Zs.T.dot(Py)
PPy = W.dot(Py) - WX.dot(U.dot(Py))
ZtPPy = self.Zs.T.dot(PPy)
H = np.zeros((len(self.theta), len(self.theta)))
PJ, yPZJ, ZPJ = [], [], []
ix = []
for key in (self.levels):
ind = self.jac_inds[key]
ZtPZi = ZtPZ[ind]
ZtPyi = ZtPy[ind]
ZtPi = ZtP[ind]
for i in range(len(self.g_derivs[key])):
Gi = self.g_derivs[key][i]
PJ.append(Gi.dot(ZtPZi))
yPZJ.append(Gi.dot(ZtPyi))
ZPJ.append((Gi.dot(ZtPi)).T)
ix.append(ind)
t_indices = list(zip(*np.triu_indices(len(self.theta)-1)))
for i, j in t_indices:
ZtPZij = ZtPZ[ix[i]][:, ix[j]]
PJi, PJj = PJ[i][:, ix[j]], PJ[j][:, ix[i]]
yPZJi, JjZPy = yPZJ[i], yPZJ[j]
Hij = -np.einsum('ij,ji->', PJi, PJj)\
+ (2 * (yPZJi.T.dot(ZtPZij)).dot(JjZPy))[0]
H[i, j] = H[j, i] = Hij
dR = self.g_derivs['resid'][0]
dRZtP = (dR.dot(ZtP.T))
for i in range(len(self.theta)-1):
yPZJi = yPZJ[i]
ZPJi = ZPJ[i]
ZtPPyi = ZtPPy[ix[i]]
H[i, -1] = H[-1, i] = 2*yPZJi.T.dot(ZtPPyi) - np.einsum('ij,ji->', ZPJi.T, dRZtP[:, ix[i]])
P = W - WX.dot(U)
H[-1, -1] = Py.T.dot(PPy)*2 - np.einsum("ij,ji->", P, P)
return H
def update_chol(self, theta, inverse=False):
"""
Parameters
----------
theta: array_like
array containing the lower triangular components of the cholesky
for each random effect covariance
inverse: bool
Returns
-------
L_dict: dict of array_like
Dictionary whose keys and values correspond to level names
and the corresponding cholesky of the level's random effects
covariance
"""
L_dict = {}
for key in self.levels:
theta_i = theta[self.indices['theta'][key]]
L_i = invech_chol(theta_i)
L_dict[key] = L_i
return L_dict
def dg_dchol(self, L_dict):
"""
Parameters
----------
L_dict: dict of array_like
Dictionary whose keys and values correspond to level names
and the corresponding cholesky of the level's random effects
covariance
Returns
-------
Jf: dict of array_like
For each level contains the derivative of the cholesky parameters
with respect to the covariance
Notes
-----
Function evaluates the derivative of the cholesky parameterization
with respect to the lower triangular components of the covariance
"""
Jf = {}
for key in self.levels:
L = L_dict[key]
E = self.elim_mats[key]
N = self.symm_mats[key]
I = self.iden_mats[key]
Jf[key] = E.dot(N.dot(np.kron(L, I))).dot(E.T)
return Jf
def loglike_c(self, theta_chol, reml=True, use_sw=False):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
loglike: scalar
Log likelihood of the model
"""
theta = inverse_transform_theta(theta_chol.copy(), self.dims, self.indices)
return self.loglike(theta, reml, use_sw)
def gradient_c(self, theta_chol, reml=True, use_sw=False):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
gradient: array_like
The gradient of the log likelihood with respect to the covariance
parameterization
"""
theta = inverse_transform_theta(theta_chol.copy(), self.dims, self.indices)
return self.gradient(theta, reml, use_sw)
def hessian_c(self, theta_chol, reml=True):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
hessian: array_like
The hessian of the log likelihood with respect to the covariance
parameterization
"""
theta = inverse_transform_theta(theta_chol.copy(), self.dims, self.indices)
return self.hessian(theta, reml)
def gradient_chol(self, theta_chol, reml=True, use_sw=False):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
gradient: array_like
The gradient of the log likelihood with respect to the cholesky
parameterization
"""
L_dict = self.update_chol(theta_chol)
Jf_dict = self.dg_dchol(L_dict)
Jg = self.gradient_c(theta_chol, reml, use_sw)
Jf = sp.linalg.block_diag(*Jf_dict.values())
Jf = np.pad(Jf, [[0, 1]])
Jf[-1, -1] = np.exp(theta_chol[-1])
return Jg.dot(Jf)
def hessian_chol(self, theta_chol, reml=True):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
hessian: array_like
The hessian of the log likelihood with respect to the cholesky
parameterization
"""
L_dict = self.update_chol(theta_chol)
Jf_dict = self.dg_dchol(L_dict)
Hq = self.hessian_c(theta_chol, reml)
Jg = self.gradient_c(theta_chol, reml)
Hf = self.d2g_dchol
Jf = sp.linalg.block_diag(*Jf_dict.values())
Jf = np.pad(Jf, [[0, 1]])
Jf[-1, -1] = np.exp(theta_chol[-1])
A = Jf.T.dot(Hq).dot(Jf)
B = np.zeros_like(Hq)
for key in self.levels:
ix = self.indices['theta'][key]
Jg_i = Jg[ix]
Hf_i = Hf[key]
C = np.einsum('i,ijk->jk', Jg_i, Hf_i)
B[ix, ix[:, None]] += C
B[-1, -1] = Jg[-1] * np.exp(theta_chol[-1])
H = A + B
return H
def _compute_effects(self, theta=None):
"""
Parameters
----------
theta : ndarray, optional
Model parameters in the covariance form
Returns
-------
beta : ndarray
Fixed effects estimated at theta.
XtViX_inv : ndarray
Fixed effects covariance matrix.
u : ndarray
Random effect estimate at theta.
G : csc_matrix
Random effects covariance matrix.
R : dia_matrix
Matrix of residual covariance.
V : csc_matrix
Model covariance matrix given fixed effects.
"""
theta = self.theta if theta is None else theta
Ginv = self.update_gmat(theta, inverse=True)
M = self.update_mme(Ginv, theta[-1])
XZy = self.XZ.T.dot(self.y) / theta[-1]
chol_fac = cholesky(M[:-1, :-1].tocsc())
betau = chol_fac.solve_A(XZy)
u = betau[self.X.shape[1]:].reshape(-1)
beta = betau[:self.X.shape[1]].reshape(-1)
Rinv = self.R / theta[-1]
RZ = Rinv.dot(self.Zs)
Q = Ginv + self.Zs.T.dot(RZ)
M = cholesky(Q).inv()
XtRinvX = self.X.T.dot(Rinv.dot(self.X))
XtRinvZ = self.X.T.dot(Rinv.dot(self.Z))
XtVinvX = XtRinvX - XtRinvZ.dot(M.dot(XtRinvZ.T))
XtVinvX_inv = np.linalg.inv(XtVinvX)
return beta, XtVinvX_inv, u
def _optimize(self, reml=True, use_grad=True, use_hess=False, approx_hess=False,
opt_kws={}):
"""
Parameters
----------
use_grad : bool, optional
If true, the analytic gradient is used during optimization.
The default is True.
use_hess : bool, optional
If true, the analytic hessian is used during optimization.
The default is False.
approx_hess: bool, optional
If true, uses the gradient to approximate the hessian
opt_kws : dict, optional
Dictionary of options to use in scipy.optimize.minimize.
The default is {}.
Returns
-------
None.
"""
default_opt_kws = dict(verbose=0, gtol=1e-6, xtol=1e-6)
for key, value in default_opt_kws.items():
if key not in opt_kws.keys():
opt_kws[key] = value
if use_grad:
if use_hess:
hess = self.hessian_chol
elif approx_hess:
hess = lambda x, reml: so_gc_cd(self.gradient_chol, x, args=(reml,))
else:
hess = None
optimizer = sp.optimize.minimize(self.loglike_c, self.theta, args=(reml,),
jac=self.gradient_chol, hess=hess,
options=opt_kws, bounds=self.bounds,
method='trust-constr')
else:
jac = lambda x, reml: fo_fc_cd(self.loglike_c, x, args=(reml,))
hess = lambda x, reml: so_fc_cd(self.loglike_c, x, args=(reml,))
optimizer = sp.optimize.minimize(self.loglike_c, self.theta, args=(reml,),
jac=jac, hess=hess, bounds=self.bounds,
method='trust-constr', options=opt_kws)
theta_chol = optimizer.x
theta = inverse_transform_theta(theta_chol.copy(), self.dims, self.indices)
return theta, theta_chol, optimizer
def _post_fit(self, theta, theta_chol, optimizer, reml=True,
use_grad=True, analytic_se=False):
"""
Parameters
----------
use_grad : bool, optional
If true and analytic_se is False, the gradient is used in the
numerical approximation of the hessian. The default is True.
analytic_se : bool, optional
If true, then the hessian is used to compute standard errors.
The default is False.
Returns
-------
None.
"""
beta, XtWX_inv, u = self._compute_effects(theta)
params = np.concatenate([beta, theta])
re_covs, re_corrs = {}, {}
for key, value in self.dims.items():
re_covs[key] = invech(theta[self.indices['theta'][key]].copy())
C = re_covs[key]
v = np.diag(np.sqrt(1/np.diag(C)))
re_corrs[key] = v.dot(C).dot(v)
if analytic_se:
Htheta = self.hessian(theta)
elif use_grad:
Htheta = so_gc_cd(self.gradient, theta)
else:
Htheta = so_fc_cd(self.loglike, theta)
self.theta, self.beta, self.u, self.params = theta, beta, u, params
self.Hinv_beta = XtWX_inv
self.Hinv_theta = np.linalg.pinv(Htheta/2.0)
self.se_beta = np.sqrt(np.diag(XtWX_inv))
self.se_theta = np.sqrt(np.diag(self.Hinv_theta))
self.se_params = np.concatenate([self.se_beta, self.se_theta])
self.optimizer = optimizer
self.theta_chol = theta_chol
if reml:
self.llconst = (self.X.shape[0] - self.X.shape[1])*np.log(2*np.pi)
else:
self.llconst = self.X.shape[0] * np.log(2*np.pi)
self.lltheta = self.optimizer.fun
self.ll = (self.llconst + self.lltheta)
self.llf = self.ll / -2.0
self.re_covs = re_covs
self.re_corrs = re_corrs
if reml:
n = self.X.shape[0] - self.X.shape[1]
d = len(self.theta)
else:
n = self.X.shape[0]
d = self.X.shape[1] + len(self.theta)
self.AIC = self.ll + 2.0 * d
self.AICC = self.ll + 2 * d * n / (n-d-1)
self.BIC = self.ll + d * np.log(n)
self.CAIC = self.ll + d * (np.log(n) + 1)
sumstats = np.array([self.ll, self.llf, self.AIC, self.AICC,
self.BIC, self.CAIC])
self.sumstats = pd.DataFrame(sumstats, index=['ll', 'llf', 'AIC', 'AICC',
'BIC', 'CAIC'], columns=['value'])
def predict(self, X=None, Z=None):
"""
Parameters
----------
X : ndarray, optional
Model matrix for fixed effects. The default is None.
Z : ndarray, optional
Model matrix from random effects. The default is None.
Returns
-------
yhat : ndarray
Model predictions evaluated at X and Z.
"""
if X is None:
X = self.X
if Z is None:
Z = self.Z
yhat = X.dot(self.beta)+Z.dot(self.u)
return yhat
def fit(self, reml=True, use_grad=True, use_hess=False, approx_hess=False,
analytic_se=False, adjusted_pvals=True, opt_kws={}):
"""
Parameters
----------
use_grad : bool, optional
If true, the analytic gradient is used during optimization.
The default is True.
use_hess : bool, optional
If true, the analytic hessian is used during optimization.
The default is False.
approx_hess: bool, optional
If true, uses the gradient to approximate the hessian
analytic_se : bool, optional
If true, then the hessian is used to compute standard errors.
The default is False.
opt_kws : dict, optional
Dictionary of options to use in scipy.optimize.minimize.
The default is {}.
Returns
-------
None.
"""
theta, theta_chol, optimizer = self._optimize(reml, use_grad, use_hess,
approx_hess, opt_kws)
self._post_fit(theta, theta_chol, optimizer, reml, use_grad,
analytic_se)
param_names = list(self.fe_vars)
for level in self.levels:
for i, j in list(zip(*np.triu_indices(self.dims[level]['n_vars']))):
param_names.append(f"{level}:G[{i}][{j}]")
param_names.append("resid_cov")
self.param_names = param_names
res = np.vstack((self.params, self.se_params)).T
res = pd.DataFrame(res, index=param_names, columns=['estimate', 'SE'])
res['t'] = res['estimate'] / res['SE']
res['p'] = sp.stats.t(self.X.shape[0]-self.X.shape[1]).sf(np.abs(res['t']))
res['degfree'] = self.X.shape[0] - self.X.shape[1]
if adjusted_pvals:
L = np.eye(self.X.shape[1])
L_list = [L[[i]] for i in range(self.X.shape[1])]
adj_table = pd.DataFrame(self.approx_degfree(L_list), index=self.fe_vars)
res.loc[self.fe_vars, 't'] = adj_table['F']**0.5
res.loc[self.fe_vars, 'degfree'] = adj_table['df2']
res.loc[self.fe_vars, 'p'] = adj_table['p']
self.res = res
def _restricted_ll_grad(self, theta_chol_f, free_ix, theta_chol_r, reml=True):
theta_chol_r[free_ix] = theta_chol_f
ll = self.loglike_c(theta_chol_r.copy(), reml)
g = self.gradient_chol(theta_chol_r.copy(), reml)[free_ix]
return ll, g
def profile(self, n_points=40, tb=3):
theta = self.theta.copy()
free_ix = np.ones_like(theta).astype(bool)
reparam = VarCorrReparam(self.dims, self.indices)
rmodel = RestrictedModel(self, reparam)
tau = reparam.transform(theta)
n_theta = len(theta)
llmax = self.loglike(self.theta.copy())
H = so_gc_cd(vcrepara_grad, tau, args=(self.gradient, reparam,))
se = np.diag(np.linalg.inv(H/2.0))**0.5
thetas, zetas = np.zeros((n_theta*n_points, n_theta)), np.zeros(n_theta*n_points)
k = 0
pbar = tqdm.tqdm(total=n_theta*n_points, smoothing=0.001)
for i in range(n_theta):
free_ix[i] = False
t_mle = tau[i]
tau_r = tau.copy()
if self.bounds[i][0]==0:
lb = np.maximum(0.01, t_mle-tb*se[i])
else:
lb = t_mle - tb * se[i]
ub = t_mle + tb * se[i]
tspace = np.linspace(lb, ub, n_points)
for t0 in tspace:
x = tau[free_ix]
func = lambda x: rmodel.llgrad(x, free_ix, t0)
bounds = rmodel.get_bounds(free_ix)
opt = sp.optimize.minimize(func, x, jac=True, bounds=bounds,
method='trust-constr',
options=dict(initial_tr_radius=0.5))
tau_r[free_ix] = opt.x
tau_r[~free_ix] = t0
LR = (opt.fun - llmax)
zeta = np.sqrt(LR) * np.sign(t0 - tau[~free_ix])
zetas[k] = zeta
thetas[k] = reparam.inverse_transform(tau_r)
k+=1
pbar.update(1)
free_ix[i] = True
pbar.close()
ix = np.repeat(np.arange(n_theta), n_points)
return thetas, zetas, ix
def plot_profile(self, thetas, zetas, ix, quantiles=None, figsize=(16, 8)):
if quantiles is None:
quantiles = np.array([60, 70, 80, 90, 95, 99, 99.9])
quantiles = np.concatenate([(100-quantiles[::-1])/2, 100-(100-quantiles)/2])
theta = self.theta.copy()
se_theta = self.se_theta.copy()
n_thetas = thetas.shape[1]
q = sp.stats.norm(0, 1).ppf(np.array(quantiles)/100)
fig, axes = plt.subplots(figsize=(14, 4), ncols=n_thetas, sharey=True)
plt.subplots_adjust(wspace=0.05, left=0.05, right=0.95)
for i in range(n_thetas):
ax = axes[i]
x = thetas[ix==i, i]
y = zetas[ix==i]
trunc = (y>-5)&(y<5)
x, y = x[trunc], y[trunc]
f_interp = sp.interpolate.interp1d(y, x, fill_value="extrapolate")
xq = f_interp(q)
ax.plot(x,y)
ax.set_xlim(x.min(), x.max())
ax.axhline(0, color='k')
sgs = np.zeros((len(q), 2, 2))
sgs[:, 0, 0] = sgs[:, 1, 0] = xq
sgs[:, 1, 1] = q
xqt = theta[i] + q * se_theta[i]
ax.axvline(theta[i], color='k')
norm = mpl.colors.TwoSlopeNorm(vcenter=0, vmin=q.min(), vmax=q.max())
lc = mpl.collections.LineCollection(sgs, cmap=plt.cm.bwr, norm=norm)
lc.set_array(q)
lc.set_linewidth(2)
ax.add_collection(lc)
ax.scatter(xqt, np.zeros_like(xqt), c=q, cmap=plt.cm.bwr, norm=norm,
s=20)
ax.set_xlabel(f"$\\theta$[{i}]")
ax.set_ylim(-5, 5)
fig.suptitle("Profile Zeta Plots")
return fig, axes
def approx_degfree(self, L_list=None, theta=None, beta=None, method='satterthwaite'):
L_list = [np.eye(self.X.shape[1])] if L_list is None else L_list
theta = self.theta if theta is None else theta
beta = self.beta if beta is None else beta
C = np.linalg.inv(self.vinvcrossprod(self.X, self.X, theta))
Vtheta = np.linalg.inv(so_gc_cd(self.gradient, theta))
J = []
for key in self.levels:
ind = self.jac_inds[key]
XtVZ = self.vinvcrossprod(self.X, self.Z[:, ind], theta)
CXtVZ = C.dot(XtVZ)
for dGdi in self.g_derivs[key]:
dC = CXtVZ.dot(dGdi.dot(CXtVZ.T))
J.append(dC)
XtVi = self.vinvcrossprod(self.X, self.R.copy(), theta)
CXtVi = C.dot(XtVi)
J.append(CXtVi.dot(CXtVi.T))
res = []
for L in L_list:
u, Q = np.linalg.eigh(L.dot(C).dot(L.T))
order = np.argsort(u)[::-1]
u, Q = u[order], Q[:, order]
q = np.linalg.matrix_rank(L)
P = Q.T.dot(L)
t2 = (P.dot(beta))**2 / u
f = np.sum(t2) / q
D = []
for i in range(q):
x = P[i]
D.append([np.dot(x, Ji).dot(x) for Ji in J])
D = np.asarray(D)
nu_d = np.array([D[i].T.dot(Vtheta).dot(D[i]) for i in range(q)])
nu_m = u**2 / nu_d
E = np.sum(nu_m[nu_m>2] / (nu_m[nu_m>2] - 2.0))
nu = 2.0 * E / (E - q)
res.append(dict(F=f, df1=q, df2=nu, p=sp.stats.f(q, nu).sf(f)))
return res
class WLMM:
def __init__(self, formula, data, weights=None, fixed_resid_cov=False):
"""
Parameters
----------
formula : string
lme4 style formula with random effects specified by terms in
parentheses with a bar
data : dataframe
Dataframe containing data. Missing values should be dropped
manually before passing the dataframe.
weights : ndarray, optional
Array of model weights. The default is None, which sets the
weights to one internally.
Returns
-------
None.
"""
if weights is None:
weights = np.eye(len(data))
self.weights = sps.csc_matrix(weights)
self.weights_inv = sps.csc_matrix(np.linalg.inv(weights))
indices = {}
X, Z, y, dims, levels, fe_vars = construct_model_matrices(formula, data, return_fe=True)
theta, theta_indices = make_theta(dims)
indices['theta'] = theta_indices
G, g_indices = make_gcov(theta, indices, dims)
indices['g'] = g_indices
XZ, Xty, Zty, yty = np.hstack([X, Z]), X.T.dot(y), Z.T.dot(y), y.T.dot(y)
XZ = sp.sparse.csc_matrix(XZ)
C, m = sps.csc_matrix(XZ.T.dot(XZ)), sps.csc_matrix(np.vstack([Xty, Zty]))
M = sps.bmat([[C, m], [m.T, yty]])
M = M.tocsc()
self.fe_vars = fe_vars
self.X, self.Z, self.y, self.dims, self.levels = X, Z, y, dims, levels
self.XZ, self.Xty, self.Zty, self.yty = XZ, Xty, Zty, yty
self.C, self.m, self.M = C, m, M
self.theta, self.theta_chol = theta, transform_theta(theta, dims, indices)
self.G = G
self.indices = indices
self.R = sps.eye(Z.shape[0])
self.Zs = sps.csc_matrix(Z)
self.g_derivs, self.jac_inds = get_jacmats(self.Zs, self.dims,
self.indices['theta'],
self.indices['g'], self.theta)
self.t_indices = list(zip(*np.triu_indices(len(theta))))
self.elim_mats, self.symm_mats, self.iden_mats = {}, {}, {}
self.d2g_dchol = {}
for key in self.levels:
p = self.dims[key]['n_vars']
self.elim_mats[key] = lmat(p).A
self.symm_mats[key] = nmat(p).A
self.iden_mats[key] = np.eye(p)
self.d2g_dchol[key] = get_d2_chol(self.dims[key])
self.bounds = [(0, None) if x==1 else (None, None) for x in self.theta[:-1]]+[(None, None)]
self.bounds_2 = [(1e-6, None) if x==1 else (None, None) for x in self.theta[:-1]]+[(None, None)]
self.zero_mat = sp.sparse.eye(self.X.shape[1])*0.0
self.zero_mat2 = sp.sparse.eye(1)*0.0
self.rcov = self.weights
self.fixed_resid_cov = fixed_resid_cov
def update_mme(self, Ginv, Rinv):
"""
Parameters
----------
Ginv: sparse matrix
scipy sparse matrix with inverse covariance block diagonal
s: float
resid covariance
Returns
-------
M: sparse matrix
updated mixed model matrix
"""
if type(Rinv) in [float, int, np.float64, np.float32, np.float16,
np.int, np.int16, np.int32, np.int64]:
M = self.M.copy()/Rinv
else:
RZX = Rinv.dot(self.XZ)
C = sps.csc_matrix(RZX.T.dot(self.XZ))
Ry = Rinv.dot(self.y)
m = sps.csc_matrix(np.vstack([self.X.T.dot(Ry), self.Zs.T.dot(Ry)]))
M = sps.bmat([[C, m], [m.T, self.y.T.dot(Ry)]]).tocsc()
Omega = sp.sparse.block_diag([self.zero_mat, Ginv, self.zero_mat2])
M+=Omega
return M
def update_gmat(self, theta, inverse=False):
"""
Parameters
----------
theta: ndarray
covariance parameters on the original scale
inverse: bool
whether or not to inverse G
Returns
-------
G: sparse matrix
updated random effects covariance
"""
G = self.G
for key in self.levels:
ng = self.dims[key]['n_groups']
theta_i = theta[self.indices['theta'][key]]
if inverse:
theta_i = np.linalg.inv(invech(theta_i)).reshape(-1, order='F')
else:
theta_i = invech(theta_i).reshape(-1, order='F')
G.data[self.indices['g'][key]] = np.tile(theta_i, ng)
return G
def loglike(self, theta, reml=True, use_sw=False, use_sparse=True):
"""
Parameters
----------
theta: array_like
The original parameterization of the model parameters
Returns
-------
loglike: scalar
Log likelihood of the model
"""
s = 1.0 if self.fixed_resid_cov else theta[-1]
Rinv = self.weights_inv.dot(self.R / s).dot(self.weights_inv)
Ginv = self.update_gmat(theta, inverse=True)
M = self.update_mme(Ginv, Rinv)
if (M.nnz / np.product(M.shape) < 0.05) and use_sparse:
L = cholesky(M.tocsc()).L().A
else:
L = np.linalg.cholesky(M.A)
ytPy = np.diag(L)[-1]**2
logdetG = lndet_gmat(theta, self.dims, self.indices)
logdetR = np.log(theta[-1]) * self.Z.shape[0]
if reml:
logdetC = np.sum(2*np.log(np.diag(L))[:-1])
ll = logdetR + logdetC + logdetG + ytPy
else:
Rinv = self.R / theta[-1]
RZ = Rinv.dot(self.Zs)
Q = Ginv + self.Zs.T.dot(RZ)
_, logdetV = cholesky(Q).slogdet()
ll = logdetR + logdetV + logdetG + ytPy
return ll
def vinvcrossprod(self, X, theta):
"""
Parameters
----------
X : ndarray
Array with first dimension equal to number of observations.
theta : ndarray
covariance parameters.
Returns
-------
XtVX : ndarray
X' V^{-1} X.
"""
s = 1.0 if self.fixed_resid_cov else theta[-1]
Rinv = self.weights_inv.dot(self.R / s).dot(self.weights_inv)
Ginv = self.update_gmat(theta, inverse=True)
RZ = Rinv.dot(self.Zs)
Q = Ginv + self.Zs.T.dot(RZ)
M = cholesky(Q).inv()
XtRX = X.T.dot(Rinv.dot(X))
XtRZ = X.T.dot(Rinv.dot(self.Z))
XtVX = XtRX - XtRZ.dot(M.dot(XtRZ.T))
return XtVX
def gradient(self, theta, reml=True, use_sw=False):
"""
Parameters
----------
theta: array_like
The original parameterization of the components
Returns
-------
gradient: array_like
The gradient of the log likelihood with respect to the covariance
parameterization
Notes
-----
"""
s = 1.0 if self.fixed_resid_cov else theta[-1]
Rinv = self.weights_inv.dot(self.R / s).dot(self.weights_inv)
Ginv = self.update_gmat(theta, inverse=True)
RZ = Rinv.dot(self.Zs)
RX = Rinv.dot(self.X)
Ry = Rinv.dot(self.y)
ZtRZ = RZ.T.dot(self.Zs)
XtRX = self.X.T.dot(RX)
ZtRX = RZ.T.dot(self.X)
ZtRy = RZ.T.dot(self.y)
Q = Ginv + ZtRZ
M = cholesky(Q).inv()
ZtWZ = ZtRZ - ZtRZ.dot(M).dot(ZtRZ)
MZtRX = M.dot(ZtRX)
XtWX = XtRX - ZtRX.T.dot(MZtRX)
XtWX_inv = np.linalg.inv(XtWX)
ZtWX = ZtRX - ZtRZ.dot(MZtRX)
WX = RX - RZ.dot(MZtRX)
U = XtWX_inv.dot(WX.T)
Vy = Ry - RZ.dot(M.dot(ZtRy))
Py = Vy - WX.dot(U.dot(self.y))
ZtPy = self.Zs.T.dot(Py)
grad = []
for key in (self.levels):
ind = self.jac_inds[key]
ZtWZi = ZtWZ[ind][:, ind]
ZtWXi = ZtWX[ind]
ZtPyi = ZtPy[ind]
for dGdi in self.g_derivs[key]:
g1 = dGdi.dot(ZtWZi).diagonal().sum()
g2 = ZtPyi.T.dot(dGdi.dot(ZtPyi))
if reml:
g3 = np.trace(XtWX_inv.dot(ZtWXi.T.dot(dGdi.dot(ZtWXi))))
else:
g3 = 0
gi = g1 - g2 - g3
grad.append(gi)
for dR in self.g_derivs['resid']:
g1 = Rinv.diagonal().sum() - (M.dot((RZ.T).dot(dR).dot(RZ))).diagonal().sum()
g2 = Py.T.dot(Py)
if reml:
g3 = np.trace(XtWX_inv.dot(WX.T.dot(WX)))
else:
g3 = 0
gi = g1 - g2 - g3
grad.append(gi)
grad = np.concatenate(grad)
grad = _check_shape(np.array(grad))
return grad
def hessian(self, theta, reml=True, use_sw=False):
"""
Parameters
----------
theta: array_like
The original parameterization of the components
Returns
-------
H: array_like
The hessian of the log likelihood with respect to the covariance
parameterization
Notes
-----
This function has the infrastructure to support more complex residual
covariances that are yet to be implemented.
"""
s = 1.0 if self.fixed_resid_cov else theta[-1]
Rinv = self.weights_inv.dot(self.R / s).dot(self.weights_inv)
Ginv = self.update_gmat(theta, inverse=True)
RZ = Rinv.dot(self.Zs)
Q = Ginv + self.Zs.T.dot(RZ)
M = cholesky(Q).inv()
W = Rinv - RZ.dot(M).dot(RZ.T)
WZ = W.dot(self.Zs)
WX = W.dot(self.X)
XtWX = WX.T.dot(self.X)
ZtWX = self.Zs.T.dot(WX)
U = np.linalg.solve(XtWX, WX.T)
ZtP = WZ.T - ZtWX.dot(np.linalg.solve(XtWX, WX.T))
ZtPZ = self.Zs.T.dot(ZtP.T)
Py = W.dot(self.y) - WX.dot(U.dot(self.y))
ZtPy = self.Zs.T.dot(Py)
PPy = W.dot(Py) - WX.dot(U.dot(Py))
ZtPPy = self.Zs.T.dot(PPy)
H = np.zeros((len(self.theta), len(self.theta)))
PJ, yPZJ, ZPJ = [], [], []
ix = []
for key in (self.levels):
ind = self.jac_inds[key]
ZtPZi = ZtPZ[ind]
ZtPyi = ZtPy[ind]
ZtPi = ZtP[ind]
for i in range(len(self.g_derivs[key])):
Gi = self.g_derivs[key][i]
PJ.append(Gi.dot(ZtPZi))
yPZJ.append(Gi.dot(ZtPyi))
ZPJ.append((Gi.dot(ZtPi)).T)
ix.append(ind)
t_indices = list(zip(*np.triu_indices(len(self.theta)-1)))
for i, j in t_indices:
ZtPZij = ZtPZ[ix[i]][:, ix[j]]
PJi, PJj = PJ[i][:, ix[j]], PJ[j][:, ix[i]]
yPZJi, JjZPy = yPZJ[i], yPZJ[j]
Hij = -np.einsum('ij,ji->', PJi, PJj)\
+ (2 * (yPZJi.T.dot(ZtPZij)).dot(JjZPy))[0]
H[i, j] = H[j, i] = Hij
dR = self.g_derivs['resid'][0]
dRZtP = (dR.dot(ZtP.T))
for i in range(len(self.theta)-1):
yPZJi = yPZJ[i]
ZPJi = ZPJ[i]
ZtPPyi = ZtPPy[ix[i]]
H[i, -1] = H[-1, i] = 2*yPZJi.T.dot(ZtPPyi) - np.einsum('ij,ji->', ZPJi.T, dRZtP[:, ix[i]])
P = W - WX.dot(U)
H[-1, -1] = Py.T.dot(PPy)*2 - np.einsum("ij,ji->", P, P)
return H
def update_chol(self, theta, inverse=False):
"""
Parameters
----------
theta: array_like
array containing the lower triangular components of the cholesky
for each random effect covariance
inverse: bool
Returns
-------
L_dict: dict of array_like
Dictionary whose keys and values correspond to level names
and the corresponding cholesky of the level's random effects
covariance
"""
L_dict = {}
for key in self.levels:
theta_i = theta[self.indices['theta'][key]]
L_i = invech_chol(theta_i)
L_dict[key] = L_i
return L_dict
def dg_dchol(self, L_dict):
"""
Parameters
----------
L_dict: dict of array_like
Dictionary whose keys and values correspond to level names
and the corresponding cholesky of the level's random effects
covariance
Returns
-------
Jf: dict of array_like
For each level contains the derivative of the cholesky parameters
with respect to the covariance
Notes
-----
Function evaluates the derivative of the cholesky parameterization
with respect to the lower triangular components of the covariance
"""
Jf = {}
for key in self.levels:
L = L_dict[key]
E = self.elim_mats[key]
N = self.symm_mats[key]
I = self.iden_mats[key]
Jf[key] = E.dot(N.dot(np.kron(L, I))).dot(E.T)
return Jf
def loglike_c(self, theta_chol, reml=True, use_sw=False):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
loglike: scalar
Log likelihood of the model
"""
theta = inverse_transform_theta(theta_chol.copy(), self.dims, self.indices)
return self.loglike(theta, reml, use_sw)
def gradient_c(self, theta_chol, reml=True, use_sw=False):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
gradient: array_like
The gradient of the log likelihood with respect to the covariance
parameterization
"""
theta = inverse_transform_theta(theta_chol.copy(), self.dims, self.indices)
return self.gradient(theta, reml, use_sw)
def hessian_c(self, theta_chol, reml=True):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
hessian: array_like
The hessian of the log likelihood with respect to the covariance
parameterization
"""
theta = inverse_transform_theta(theta_chol.copy(), self.dims, self.indices)
return self.hessian(theta, reml)
def gradient_chol(self, theta_chol, reml=True, use_sw=False):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
gradient: array_like
The gradient of the log likelihood with respect to the cholesky
parameterization
"""
L_dict = self.update_chol(theta_chol)
Jf_dict = self.dg_dchol(L_dict)
Jg = self.gradient_c(theta_chol, reml, use_sw)
Jf = sp.linalg.block_diag(*Jf_dict.values())
Jf = np.pad(Jf, [[0, 1]])
Jf[-1, -1] = np.exp(theta_chol[-1])
return Jg.dot(Jf)
def hessian_chol(self, theta_chol, reml=True):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
hessian: array_like
The hessian of the log likelihood with respect to the cholesky
parameterization
"""
L_dict = self.update_chol(theta_chol)
Jf_dict = self.dg_dchol(L_dict)
Hq = self.hessian_c(theta_chol, reml)
Jg = self.gradient_c(theta_chol, reml)
Hf = self.d2g_dchol
Jf = sp.linalg.block_diag(*Jf_dict.values())
Jf = np.pad(Jf, [[0, 1]])
Jf[-1, -1] = np.exp(theta_chol[-1])
A = Jf.T.dot(Hq).dot(Jf)
B = np.zeros_like(Hq)
for key in self.levels:
ix = self.indices['theta'][key]
Jg_i = Jg[ix]
Hf_i = Hf[key]
C = np.einsum('i,ijk->jk', Jg_i, Hf_i)
B[ix, ix[:, None]] += C
B[-1, -1] = Jg[-1] * np.exp(theta_chol[-1])
H = A + B
return H
def _compute_effects(self, theta=None):
"""
Parameters
----------
theta : ndarray, optional
Model parameters in the covariance form
Returns
-------
beta : ndarray
Fixed effects estimated at theta.
XtViX_inv : ndarray
Fixed effects covariance matrix.
u : ndarray
Random effect estimate at theta.
G : csc_matrix
Random effects covariance matrix.
R : dia_matrix
Matrix of residual covariance.
V : csc_matrix
Model covariance matrix given fixed effects.
"""
s = 1.0 if self.fixed_resid_cov else theta[-1]
Rinv = self.weights_inv.dot(self.R / s).dot(self.weights_inv)
theta = self.theta if theta is None else theta
Ginv = self.update_gmat(theta, inverse=True)
M = self.update_mme(Ginv, Rinv)
XZy = self.XZ.T.dot(Rinv.dot(self.y))
chol_fac = cholesky(M[:-1, :-1].tocsc())
betau = chol_fac.solve_A(XZy)
u = betau[self.X.shape[1]:].reshape(-1)
beta = betau[:self.X.shape[1]].reshape(-1)
RZ = Rinv.dot(self.Zs)
Q = Ginv + self.Zs.T.dot(RZ)
M = cholesky(Q).inv()
XtRinvX = self.X.T.dot(Rinv.dot(self.X))
XtRinvZ = self.X.T.dot(Rinv.dot(self.Z))
XtVinvX = XtRinvX - XtRinvZ.dot(M.dot(XtRinvZ.T))
XtVinvX_inv = np.linalg.inv(XtVinvX)
return beta, XtVinvX_inv, u
def _optimize(self, reml=True, use_grad=True, use_hess=False, approx_hess=False,
opt_kws={}):
"""
Parameters
----------
use_grad : bool, optional
If true, the analytic gradient is used during optimization.
The default is True.
use_hess : bool, optional
If true, the analytic hessian is used during optimization.
The default is False.
approx_hess: bool, optional
If true, uses the gradient to approximate the hessian
opt_kws : dict, optional
Dictionary of options to use in scipy.optimize.minimize.
The default is {}.
Returns
-------
None.
"""
default_opt_kws = dict(verbose=0, gtol=1e-6, xtol=1e-6)
for key, value in default_opt_kws.items():
if key not in opt_kws.keys():
opt_kws[key] = value
if use_grad:
if use_hess:
hess = self.hessian_chol
elif approx_hess:
hess = lambda x, reml: so_gc_cd(self.gradient_chol, x, args=(reml,))
else:
hess = None
optimizer = sp.optimize.minimize(self.loglike_c, self.theta, args=(reml,),
jac=self.gradient_chol, hess=hess,
options=opt_kws, bounds=self.bounds,
method='trust-constr')
else:
jac = lambda x, reml: fo_fc_cd(self.loglike_c, x, args=(reml,))
hess = lambda x, reml: so_fc_cd(self.loglike_c, x, args=(reml,))
optimizer = sp.optimize.minimize(self.loglike_c, self.theta, args=(reml,),
jac=jac, hess=hess, bounds=self.bounds,
method='trust-constr', options=opt_kws)
theta_chol = optimizer.x
theta = inverse_transform_theta(theta_chol.copy(), self.dims, self.indices)
return theta, theta_chol, optimizer
def _post_fit(self, theta, theta_chol, optimizer, reml=True,
use_grad=True, analytic_se=False):
"""
Parameters
----------
use_grad : bool, optional
If true and analytic_se is False, the gradient is used in the
numerical approximation of the hessian. The default is True.
analytic_se : bool, optional
If true, then the hessian is used to compute standard errors.
The default is False.
Returns
-------
None.
"""
beta, XtWX_inv, u = self._compute_effects(theta)
params = np.concatenate([beta, theta])
re_covs, re_corrs = {}, {}
for key, value in self.dims.items():
re_covs[key] = invech(theta[self.indices['theta'][key]].copy())
C = re_covs[key]
v = np.diag(np.sqrt(1/np.diag(C)))
re_corrs[key] = v.dot(C).dot(v)
if analytic_se:
Htheta = self.hessian(theta)
elif use_grad:
Htheta = so_gc_cd(self.gradient, theta)
else:
Htheta = so_fc_cd(self.loglike, theta)
self.theta, self.beta, self.u, self.params = theta, beta, u, params
self.Hinv_beta = XtWX_inv
self.Hinv_theta = np.linalg.pinv(Htheta/2.0)
self.se_beta = np.sqrt(np.diag(XtWX_inv))
self.se_theta = np.sqrt(np.diag(self.Hinv_theta))
self.se_params = np.concatenate([self.se_beta, self.se_theta])
self.optimizer = optimizer
self.theta_chol = theta_chol
if reml:
self.llconst = (self.X.shape[0] - self.X.shape[1])*np.log(2*np.pi)
else:
self.llconst = self.X.shape[0] * np.log(2*np.pi)
self.lltheta = self.optimizer.fun
self.ll = (self.llconst + self.lltheta)
self.llf = self.ll / -2.0
self.re_covs = re_covs
self.re_corrs = re_corrs
if reml:
n = self.X.shape[0] - self.X.shape[1]
d = len(self.theta)
else:
n = self.X.shape[0]
d = self.X.shape[1] + len(self.theta)
self.AIC = self.ll + 2.0 * d
self.AICC = self.ll + 2 * d * n / (n-d-1)
self.BIC = self.ll + d * np.log(n)
self.CAIC = self.ll + d * (np.log(n) + 1)
sumstats = np.array([self.ll, self.llf, self.AIC, self.AICC,
self.BIC, self.CAIC])
self.sumstats = pd.DataFrame(sumstats, index=['ll', 'llf', 'AIC', 'AICC',
'BIC', 'CAIC'], columns=['value'])
def predict(self, X=None, Z=None):
"""
Parameters
----------
X : ndarray, optional
Model matrix for fixed effects. The default is None.
Z : ndarray, optional
Model matrix from random effects. The default is None.
Returns
-------
yhat : ndarray
Model predictions evaluated at X and Z.
"""
if X is None:
X = self.X
if Z is None:
Z = self.Z
yhat = X.dot(self.beta)+Z.dot(self.u)
return yhat
def fit(self, reml=True, use_grad=True, use_hess=False, approx_hess=False,
analytic_se=False, opt_kws={}):
"""
Parameters
----------
use_grad : bool, optional
If true, the analytic gradient is used during optimization.
The default is True.
use_hess : bool, optional
If true, the analytic hessian is used during optimization.
The default is False.
approx_hess: bool, optional
If true, uses the gradient to approximate the hessian
analytic_se : bool, optional
If true, then the hessian is used to compute standard errors.
The default is False.
opt_kws : dict, optional
Dictionary of options to use in scipy.optimize.minimize.
The default is {}.
Returns
-------
None.
"""
theta, theta_chol, optimizer = self._optimize(reml, use_grad, use_hess,
approx_hess, opt_kws)
self._post_fit(theta, theta_chol, optimizer, reml, use_grad,
analytic_se)
param_names = list(self.fe_vars)
for level in self.levels:
for i, j in list(zip(*np.triu_indices(self.dims[level]['n_vars']))):
param_names.append(f"{level}:G[{i}][{j}]")
param_names.append("resid_cov")
self.param_names = param_names
res = np.vstack((self.params, self.se_params)).T
res = pd.DataFrame(res, index=param_names, columns=['estimate', 'SE'])
res['t'] = res['estimate'] / res['SE']
res['p'] = sp.stats.t(self.X.shape[0]-self.X.shape[1]).sf(np.abs(res['t']))
self.res = res
def _restricted_ll_grad(self, theta_chol_f, free_ix, theta_chol_r, reml=True):
theta_chol_r[free_ix] = theta_chol_f
ll = self.loglike_c(theta_chol_r.copy(), reml)
g = self.gradient_chol(theta_chol_r.copy(), reml)[free_ix]
return ll, g
def profile(self, n_points=40, par_ind=None, reml=True):
par_ind = np.ones_like(self.theta_chol) if par_ind is None else par_ind
theta_chol = self.theta_chol.copy()
n_theta = len(theta_chol)
llmax = self.loglike(self.theta.copy())
free_ix = np.ones_like(theta_chol, dtype=bool)
Hchol = so_gc_cd(self.gradient_chol, theta_chol, args=(reml,))
se_chol = np.diag(np.linalg.inv(Hchol/2.0))**0.5
thetas, zetas = np.zeros((n_theta*n_points, n_theta)), np.zeros(n_theta*n_points)
k = 0
pbar = tqdm.tqdm(total=n_theta*n_points, smoothing=0.001)
for i in range(n_theta):
free_ix[i] = False
t_mle = theta_chol[i]
theta_chol_r = theta_chol.copy()
if self.bounds[i][0]==0:
lb = np.maximum(0.01, t_mle-4.5*se_chol[i])
else:
lb = t_mle - 4.5 * se_chol[i]
ub = t_mle + 4.5 * se_chol[i]
tspace = np.linspace(lb, ub, n_points)
for t0 in tspace:
theta_chol_r = theta_chol.copy()
theta_chol_r[~free_ix] = t0
theta_chol_f = theta_chol[free_ix]
func = lambda x : self._restricted_ll_grad(x, free_ix, theta_chol_r,
reml)
bounds = np.array(self.bounds)[free_ix].tolist()
opt = sp.optimize.minimize(func, theta_chol_f, jac=True,
bounds=bounds,
method='trust-constr')
theta_chol_f = opt.x
theta_chol_r[free_ix] = theta_chol_f
LR = 2.0 * (opt.fun - llmax)
zeta = np.sqrt(LR) * np.sign(t0 - theta_chol[~free_ix])
zetas[k] = zeta
thetas[k] = theta_chol_r
k+=1
pbar.update(1)
free_ix[i] = True
pbar.close()
ix = np.repeat(np.arange(n_theta), n_points)
return thetas, zetas, ix
def plot_profile(self, n_points=40, par_ind=None, reml=True, quantiles=None):
if quantiles is None:
quantiles = [0.001, 0.05, 1, 5, 10, 20, 50, 80, 90, 95, 99, 99.5, 99.999]
thetas, zetas, ix = self.profile(n_points, par_ind, reml)
n_thetas = thetas.shape[1]
q = sp.stats.norm(0, 1).ppf(np.array(quantiles)/100)
fig, axes = plt.subplots(figsize=(14, 4), ncols=n_thetas, sharey=True)
plt.subplots_adjust(wspace=0.05, left=0.05, right=0.95)
for i in range(n_thetas):
ax = axes[i]
x = thetas[ix==i, i]
y = zetas[ix==i]
trunc = (y>-5)&(y<5)
x, y = x[trunc], y[trunc]
f_interp = sp.interpolate.interp1d(y, x, fill_value="extrapolate")
xq = f_interp(q)
ax.plot(x,y)
ax.set_xlim(x.min(), x.max())
ax.axhline(0, color='k')
for a, b in list(zip(xq, q)):
ax.plot((a, a), (0, b), color='k')
ax.set_ylim(-5, 5)
return thetas, zetas, ix, fig, ax
class GLMM(WLMM):
'''
Currently an ineffecient implementation of a GLMM, mostly done
for fun. A variety of implementations for GLMMs have been proposed in the
literature, and a variety of names have been used to refer to each model;
the implementation here is based of off linearization using a taylor
approximation of the error (assumed to be gaussian) around the current
estimates of fixed and random effects. This type of approach may be
referred to as penalized quasi-likelihood, or pseudo-likelihood, and
may be abbreviated PQL, REPL, RPL, or RQL.
'''
def __init__(self, formula, data, weights=None, fam=None):
super().__init__(formula=formula, data=data, weights=weights)
if isinstance(fam, ExponentialFamily) == False:
fam = fam()
self.f = fam
self.theta_init = self.theta.copy()
self.y_original = self.y.copy()
self.non_continuous = [isinstance(self.f, Binomial),
isinstance(self.f, NegativeBinomial),
isinstance(self.f, Poisson)]
if np.any(self.non_continuous):
self.bounds = self.bounds[:-1]+[(0, 0)]
self.fix_resid_cov=True
self.theta, self.theta_chol, self.optimizer = self._optimize()
self.beta, _, self.u = self._compute_effects(self.theta)
if isinstance(self.f, Binomial):
self.u /= np.linalg.norm(self.u)
self._nfixed_params = self.X.shape[1]
self._n_obs = self.X.shape[0]
self._n_cov_params = len(self.bounds)
self._df1 = self._n_obs - self._nfixed_params
self._df2 = self._n_obs - self._nfixed_params - self._n_cov_params - 1
self._ll_const = self._df1 / 2 * np.log(2*np.pi)
def _update_model(self, W, nu):
nu = _check_shape(nu, 2)
self.weights = sps.csc_matrix(W)
self.weights_inv = sps.csc_matrix(np.diag(1.0/np.diag((W))))
self.y = nu
self.Xty = self.X.T.dot(nu)
self.Zty = self.Z.T.dot(nu)
self.theta = self.theta_init
self.yty = nu.T.dot(nu)
def _get_pseudovar(self):
eta = self.predict()
mu = self.f.inv_link(eta)
var_mu = _check_shape(self.f.var_func(mu=mu), 1)
gp = self.f.dlink(mu)
nu = eta + gp * (_check_shape(self.y_original, 1) - mu)
W = np.diag(np.sqrt(var_mu * (self.f.dlink(mu)**2)))
return W, nu
def fit(self, n_iters=200, tol=1e-3, optimizer_kwargs={}, verbose_outer=True):
theta, theta_chol, optimizer = self.theta, self.theta_chol, self.optimizer
fit_hist = {}
for i in range(n_iters):
W, nu = self._get_pseudovar()
self._update_model(W, nu)
theta_new, theta_chol_new, optimizer_new = self._optimize(**optimizer_kwargs)
tvar = (np.linalg.norm(theta)+np.linalg.norm(theta_new))
eps = np.linalg.norm(theta - theta_new) / tvar
fit_hist[i] = dict(param_change=eps, theta=theta_new, nu=nu)
if verbose_outer:
print(eps)
if eps < tol:
break
theta, theta_chol, optimizer = theta_new, theta_chol_new, optimizer_new
self.beta, _, self.u = self._compute_effects(theta)
self._post_fit(theta, theta_chol, optimizer)
self.res = get_param_table(self.params, self.se_params,
self.X.shape[0]-len(self.params))
eta_fe = self.X.dot(self.beta)
eta = self.X.dot(self.beta)+self.Z.dot(self.u)
mu = self.f.inv_link(eta)
gp = self.f.dlink(mu)
var_mu = _check_shape(self.f.var_func(mu=mu), 1)
r_eta_fe = _check_shape(self.y, 1) - eta_fe
generalized_chi2 = self.vinvcrossprod(r_eta_fe, theta)
resids_raw_linear = _check_shape(self.y, 1) - eta
resids_raw_mean = _check_shape(self.y_original, 1) - mu
s = 1.0 if self.fixed_resid_cov else theta[-1]
R = self.weights.dot(self.R * s).dot(self.weights)
var_pearson_linear = R.diagonal() / gp**2
var_pearson_mean = var_mu
resids_pearson_linear = resids_raw_linear / np.sqrt(var_pearson_linear)
resids_pearson_mean = resids_raw_mean / np.sqrt(var_pearson_mean)
pll = self.loglike(self.theta) / -2.0 - self._ll_const
aicc = -2 * pll + 2 * self._n_cov_params * self._df1 / self._df2
bic = -2 * pll + self._n_cov_params * np.log(self._df1)
self.sumstats = dict(generalized_chi2=generalized_chi2,
pseudo_loglike=pll,
AICC=aicc,
BIC=bic)
self.resids = dict(resids_raw_linear=resids_raw_linear,
resids_raw_mean=resids_raw_mean,
resids_pearson_linear=resids_pearson_linear,
resids_pearson_mean=resids_pearson_mean)
param_names = list(self.fe_vars)
for level in self.levels:
for i, j in list(zip(*np.triu_indices(self.dims[level]['n_vars']))):
param_names.append(f"{level}:G[{i}][{j}]")
param_names.append("resid_cov")
self.param_names = param_names
self.res.index = param_names
"""
from pystats.utilities.random_corr import vine_corr
from pystats.tests.test_data import generate_data
from pylmm.pylmm.lmm import LME
from pylmm.pylmm.glmm import WLME, GLMM
from pystats.utilities import numerical_derivs
np.set_printoptions(precision=3, suppress=True, linewidth=200)
formula = "y~1+x1+x2+(1+x3|id1)+(1+x4|id2)"
model_dict = {}
model_dict['gcov'] = {'id1':invech(np.array([2., 0.4, 2.])),
'id2':invech(np.array([2.,-0.4, 2.]))}
model_dict['ginfo'] = {'id1':dict(n_grp=200, n_per=10),
'id2':dict(n_grp=400, n_per=5)}
model_dict['mu'] = np.zeros(4)
model_dict['vcov'] = vine_corr(4, 20)
model_dict['beta'] = np.array([1, -1, 1])
model_dict['n_obs'] = 2000
data, formula = generate_data(formula, model_dict, r=0.6**0.5)
model_original = LME(formula, data)
model_cholesky = LME3(formula, data)
model_original._fit()
model_cholesky._fit(opt_kws=dict(verbose=3))
model_cholesky._post_fit()
model_original.se_params
model_cholesky.se_params
"""
| [
"numpy.product",
"numpy.linalg.matrix_rank",
"numpy.sqrt",
"numpy.linalg.pinv",
"numpy.hstack",
"sksparse.cholmod.cholesky",
"numpy.log",
"scipy.interpolate.interp1d",
"matplotlib.collections.LineCollection",
"numpy.array",
"numpy.argsort",
"numpy.einsum",
"numpy.linalg.norm",
"numpy.arang... | [((2005, 2029), 'scipy.sparse.csc_matrix', 'sp.sparse.csc_matrix', (['XZ'], {}), '(XZ)\n', (2025, 2029), True, 'import scipy as sp\n'), ((2125, 2155), 'scipy.sparse.bmat', 'sps.bmat', (['[[C, m], [m.T, yty]]'], {}), '([[C, m], [m.T, yty]])\n', (2133, 2155), True, 'import scipy.sparse as sps\n'), ((2545, 2564), 'scipy.sparse.eye', 'sps.eye', (['Z.shape[0]'], {}), '(Z.shape[0])\n', (2552, 2564), True, 'import scipy.sparse as sps\n'), ((2583, 2600), 'scipy.sparse.csc_matrix', 'sps.csc_matrix', (['Z'], {}), '(Z)\n', (2597, 2600), True, 'import scipy.sparse as sps\n'), ((4623, 4682), 'scipy.sparse.block_diag', 'sp.sparse.block_diag', (['[self.zero_mat, Ginv, self.zero_mat2]'], {}), '([self.zero_mat, Ginv, self.zero_mat2])\n', (4643, 4682), True, 'import scipy as sp\n'), ((8626, 8645), 'numpy.linalg.inv', 'np.linalg.inv', (['XtWX'], {}), '(XtWX)\n', (8639, 8645), True, 'import numpy as np\n'), ((9768, 9788), 'numpy.concatenate', 'np.concatenate', (['grad'], {}), '(grad)\n', (9782, 9788), True, 'import numpy as np\n'), ((10757, 10784), 'numpy.linalg.solve', 'np.linalg.solve', (['XtWX', 'WX.T'], {}), '(XtWX, WX.T)\n', (10772, 10784), True, 'import numpy as np\n'), ((16331, 16351), 'numpy.pad', 'np.pad', (['Jf', '[[0, 1]]'], {}), '(Jf, [[0, 1]])\n', (16337, 16351), True, 'import numpy as np\n'), ((16374, 16396), 'numpy.exp', 'np.exp', (['theta_chol[-1]'], {}), '(theta_chol[-1])\n', (16380, 16396), True, 'import numpy as np\n'), ((17101, 17121), 'numpy.pad', 'np.pad', (['Jf', '[[0, 1]]'], {}), '(Jf, [[0, 1]])\n', (17107, 17121), True, 'import numpy as np\n'), ((17143, 17165), 'numpy.exp', 'np.exp', (['theta_chol[-1]'], {}), '(theta_chol[-1])\n', (17149, 17165), True, 'import numpy as np\n'), ((17213, 17230), 'numpy.zeros_like', 'np.zeros_like', (['Hq'], {}), '(Hq)\n', (17226, 17230), True, 'import numpy as np\n'), ((18907, 18929), 'numpy.linalg.inv', 'np.linalg.inv', (['XtVinvX'], {}), '(XtVinvX)\n', (18920, 18929), True, 'import numpy as np\n'), ((21711, 21740), 'numpy.concatenate', 'np.concatenate', (['[beta, theta]'], {}), '([beta, theta])\n', (21725, 21740), True, 'import numpy as np\n'), ((22376, 22404), 'numpy.linalg.pinv', 'np.linalg.pinv', (['(Htheta / 2.0)'], {}), '(Htheta / 2.0)\n', (22390, 22404), True, 'import numpy as np\n'), ((22536, 22581), 'numpy.concatenate', 'np.concatenate', (['[self.se_beta, self.se_theta]'], {}), '([self.se_beta, self.se_theta])\n', (22550, 22581), True, 'import numpy as np\n'), ((23409, 23480), 'numpy.array', 'np.array', (['[self.ll, self.llf, self.AIC, self.AICC, self.BIC, self.CAIC]'], {}), '([self.ll, self.llf, self.AIC, self.AICC, self.BIC, self.CAIC])\n', (23417, 23480), True, 'import numpy as np\n'), ((23534, 23630), 'pandas.DataFrame', 'pd.DataFrame', (['sumstats'], {'index': "['ll', 'llf', 'AIC', 'AICC', 'BIC', 'CAIC']", 'columns': "['value']"}), "(sumstats, index=['ll', 'llf', 'AIC', 'AICC', 'BIC', 'CAIC'],\n columns=['value'])\n", (23546, 23630), True, 'import pandas as pd\n'), ((25799, 25863), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'index': 'param_names', 'columns': "['estimate', 'SE']"}), "(res, index=param_names, columns=['estimate', 'SE'])\n", (25811, 25863), True, 'import pandas as pd\n'), ((27372, 27424), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'total': '(n_theta * n_points)', 'smoothing': '(0.001)'}), '(total=n_theta * n_points, smoothing=0.001)\n', (27381, 27424), False, 'import tqdm\n'), ((29126, 29184), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14, 4)', 'ncols': 'n_thetas', 'sharey': '(True)'}), '(figsize=(14, 4), ncols=n_thetas, sharey=True)\n', (29138, 29184), True, 'import matplotlib.pyplot as plt\n'), ((29193, 29248), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.05)', 'left': '(0.05)', 'right': '(0.95)'}), '(wspace=0.05, left=0.05, right=0.95)\n', (29212, 29248), True, 'import matplotlib.pyplot as plt\n'), ((32767, 32790), 'scipy.sparse.csc_matrix', 'sps.csc_matrix', (['weights'], {}), '(weights)\n', (32781, 32790), True, 'import scipy.sparse as sps\n'), ((33274, 33298), 'scipy.sparse.csc_matrix', 'sp.sparse.csc_matrix', (['XZ'], {}), '(XZ)\n', (33294, 33298), True, 'import scipy as sp\n'), ((33394, 33424), 'scipy.sparse.bmat', 'sps.bmat', (['[[C, m], [m.T, yty]]'], {}), '([[C, m], [m.T, yty]])\n', (33402, 33424), True, 'import scipy.sparse as sps\n'), ((33814, 33833), 'scipy.sparse.eye', 'sps.eye', (['Z.shape[0]'], {}), '(Z.shape[0])\n', (33821, 33833), True, 'import scipy.sparse as sps\n'), ((33852, 33869), 'scipy.sparse.csc_matrix', 'sps.csc_matrix', (['Z'], {}), '(Z)\n', (33866, 33869), True, 'import scipy.sparse as sps\n'), ((35781, 35840), 'scipy.sparse.block_diag', 'sp.sparse.block_diag', (['[self.zero_mat, Ginv, self.zero_mat2]'], {}), '([self.zero_mat, Ginv, self.zero_mat2])\n', (35801, 35840), True, 'import scipy as sp\n'), ((39757, 39776), 'numpy.linalg.inv', 'np.linalg.inv', (['XtWX'], {}), '(XtWX)\n', (39770, 39776), True, 'import numpy as np\n'), ((40899, 40919), 'numpy.concatenate', 'np.concatenate', (['grad'], {}), '(grad)\n', (40913, 40919), True, 'import numpy as np\n'), ((41980, 42007), 'numpy.linalg.solve', 'np.linalg.solve', (['XtWX', 'WX.T'], {}), '(XtWX, WX.T)\n', (41995, 42007), True, 'import numpy as np\n'), ((47554, 47574), 'numpy.pad', 'np.pad', (['Jf', '[[0, 1]]'], {}), '(Jf, [[0, 1]])\n', (47560, 47574), True, 'import numpy as np\n'), ((47597, 47619), 'numpy.exp', 'np.exp', (['theta_chol[-1]'], {}), '(theta_chol[-1])\n', (47603, 47619), True, 'import numpy as np\n'), ((48324, 48344), 'numpy.pad', 'np.pad', (['Jf', '[[0, 1]]'], {}), '(Jf, [[0, 1]])\n', (48330, 48344), True, 'import numpy as np\n'), ((48366, 48388), 'numpy.exp', 'np.exp', (['theta_chol[-1]'], {}), '(theta_chol[-1])\n', (48372, 48388), True, 'import numpy as np\n'), ((48436, 48453), 'numpy.zeros_like', 'np.zeros_like', (['Hq'], {}), '(Hq)\n', (48449, 48453), True, 'import numpy as np\n'), ((50215, 50237), 'numpy.linalg.inv', 'np.linalg.inv', (['XtVinvX'], {}), '(XtVinvX)\n', (50228, 50237), True, 'import numpy as np\n'), ((53010, 53039), 'numpy.concatenate', 'np.concatenate', (['[beta, theta]'], {}), '([beta, theta])\n', (53024, 53039), True, 'import numpy as np\n'), ((53675, 53703), 'numpy.linalg.pinv', 'np.linalg.pinv', (['(Htheta / 2.0)'], {}), '(Htheta / 2.0)\n', (53689, 53703), True, 'import numpy as np\n'), ((53835, 53880), 'numpy.concatenate', 'np.concatenate', (['[self.se_beta, self.se_theta]'], {}), '([self.se_beta, self.se_theta])\n', (53849, 53880), True, 'import numpy as np\n'), ((54708, 54779), 'numpy.array', 'np.array', (['[self.ll, self.llf, self.AIC, self.AICC, self.BIC, self.CAIC]'], {}), '([self.ll, self.llf, self.AIC, self.AICC, self.BIC, self.CAIC])\n', (54716, 54779), True, 'import numpy as np\n'), ((54833, 54929), 'pandas.DataFrame', 'pd.DataFrame', (['sumstats'], {'index': "['ll', 'llf', 'AIC', 'AICC', 'BIC', 'CAIC']", 'columns': "['value']"}), "(sumstats, index=['ll', 'llf', 'AIC', 'AICC', 'BIC', 'CAIC'],\n columns=['value'])\n", (54845, 54929), True, 'import pandas as pd\n'), ((57077, 57141), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'index': 'param_names', 'columns': "['estimate', 'SE']"}), "(res, index=param_names, columns=['estimate', 'SE'])\n", (57089, 57141), True, 'import pandas as pd\n'), ((57878, 57914), 'numpy.ones_like', 'np.ones_like', (['theta_chol'], {'dtype': 'bool'}), '(theta_chol, dtype=bool)\n', (57890, 57914), True, 'import numpy as np\n'), ((58171, 58223), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'total': '(n_theta * n_points)', 'smoothing': '(0.001)'}), '(total=n_theta * n_points, smoothing=0.001)\n', (58180, 58223), False, 'import tqdm\n'), ((60062, 60120), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14, 4)', 'ncols': 'n_thetas', 'sharey': '(True)'}), '(figsize=(14, 4), ncols=n_thetas, sharey=True)\n', (60074, 60120), True, 'import matplotlib.pyplot as plt\n'), ((60129, 60184), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.05)', 'left': '(0.05)', 'right': '(0.95)'}), '(wspace=0.05, left=0.05, right=0.95)\n', (60148, 60184), True, 'import matplotlib.pyplot as plt\n'), ((61893, 61920), 'numpy.any', 'np.any', (['self.non_continuous'], {}), '(self.non_continuous)\n', (61899, 61920), True, 'import numpy as np\n'), ((62658, 62675), 'scipy.sparse.csc_matrix', 'sps.csc_matrix', (['W'], {}), '(W)\n', (62672, 62675), True, 'import scipy.sparse as sps\n'), ((1938, 1955), 'numpy.hstack', 'np.hstack', (['[X, Z]'], {}), '([X, Z])\n', (1947, 1955), True, 'import numpy as np\n'), ((3185, 3194), 'numpy.eye', 'np.eye', (['p'], {}), '(p)\n', (3191, 3194), True, 'import numpy as np\n'), ((3486, 3516), 'scipy.sparse.eye', 'sp.sparse.eye', (['self.X.shape[1]'], {}), '(self.X.shape[1])\n', (3499, 3516), True, 'import scipy as sp\n'), ((3546, 3562), 'scipy.sparse.eye', 'sp.sparse.eye', (['(1)'], {}), '(1)\n', (3559, 3562), True, 'import scipy as sp\n'), ((5496, 5516), 'numpy.tile', 'np.tile', (['theta_i', 'ng'], {}), '(theta_i, ng)\n', (5503, 5516), True, 'import numpy as np\n'), ((6117, 6140), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['M.A'], {}), '(M.A)\n', (6135, 6140), True, 'import numpy as np\n'), ((6253, 6270), 'numpy.log', 'np.log', (['theta[-1]'], {}), '(theta[-1])\n', (6259, 6270), True, 'import numpy as np\n'), ((9817, 9831), 'numpy.array', 'np.array', (['grad'], {}), '(grad)\n', (9825, 9831), True, 'import numpy as np\n'), ((12342, 12368), 'numpy.einsum', 'np.einsum', (['"""ij,ji->"""', 'P', 'P'], {}), "('ij,ji->', P, P)\n", (12351, 12368), True, 'import numpy as np\n'), ((17385, 17419), 'numpy.einsum', 'np.einsum', (['"""i,ijk->jk"""', 'Jg_i', 'Hf_i'], {}), "('i,ijk->jk', Jg_i, Hf_i)\n", (17394, 17419), True, 'import numpy as np\n'), ((17487, 17509), 'numpy.exp', 'np.exp', (['theta_chol[-1]'], {}), '(theta_chol[-1])\n', (17493, 17509), True, 'import numpy as np\n'), ((20185, 20352), 'scipy.optimize.minimize', 'sp.optimize.minimize', (['self.loglike_c', 'self.theta'], {'args': '(reml,)', 'jac': 'self.gradient_chol', 'hess': 'hess', 'options': 'opt_kws', 'bounds': 'self.bounds', 'method': '"""trust-constr"""'}), "(self.loglike_c, self.theta, args=(reml,), jac=self.\n gradient_chol, hess=hess, options=opt_kws, bounds=self.bounds, method=\n 'trust-constr')\n", (20205, 20352), True, 'import scipy as sp\n'), ((20670, 20816), 'scipy.optimize.minimize', 'sp.optimize.minimize', (['self.loglike_c', 'self.theta'], {'args': '(reml,)', 'jac': 'jac', 'hess': 'hess', 'bounds': 'self.bounds', 'method': '"""trust-constr"""', 'options': 'opt_kws'}), "(self.loglike_c, self.theta, args=(reml,), jac=jac,\n hess=hess, bounds=self.bounds, method='trust-constr', options=opt_kws)\n", (20690, 20816), True, 'import scipy as sp\n'), ((22434, 22451), 'numpy.diag', 'np.diag', (['XtWX_inv'], {}), '(XtWX_inv)\n', (22441, 22451), True, 'import numpy as np\n'), ((22485, 22509), 'numpy.diag', 'np.diag', (['self.Hinv_theta'], {}), '(self.Hinv_theta)\n', (22492, 22509), True, 'import numpy as np\n'), ((25742, 25782), 'numpy.vstack', 'np.vstack', (['(self.params, self.se_params)'], {}), '((self.params, self.se_params))\n', (25751, 25782), True, 'import numpy as np\n'), ((25977, 25993), 'numpy.abs', 'np.abs', (["res['t']"], {}), "(res['t'])\n", (25983, 25993), True, 'import numpy as np\n'), ((26097, 26120), 'numpy.eye', 'np.eye', (['self.X.shape[1]'], {}), '(self.X.shape[1])\n', (26103, 26120), True, 'import numpy as np\n'), ((27277, 27316), 'numpy.zeros', 'np.zeros', (['(n_theta * n_points, n_theta)'], {}), '((n_theta * n_points, n_theta))\n', (27285, 27316), True, 'import numpy as np\n'), ((27316, 27344), 'numpy.zeros', 'np.zeros', (['(n_theta * n_points)'], {}), '(n_theta * n_points)\n', (27324, 27344), True, 'import numpy as np\n'), ((27751, 27780), 'numpy.linspace', 'np.linspace', (['lb', 'ub', 'n_points'], {}), '(lb, ub, n_points)\n', (27762, 27780), True, 'import numpy as np\n'), ((28590, 28608), 'numpy.arange', 'np.arange', (['n_theta'], {}), '(n_theta)\n', (28599, 28608), True, 'import numpy as np\n'), ((28806, 28846), 'numpy.array', 'np.array', (['[60, 70, 80, 90, 95, 99, 99.9]'], {}), '([60, 70, 80, 90, 95, 99, 99.9])\n', (28814, 28846), True, 'import numpy as np\n'), ((28871, 28945), 'numpy.concatenate', 'np.concatenate', (['[(100 - quantiles[::-1]) / 2, 100 - (100 - quantiles) / 2]'], {}), '([(100 - quantiles[::-1]) / 2, 100 - (100 - quantiles) / 2])\n', (28885, 28945), True, 'import numpy as np\n'), ((29464, 29519), 'scipy.interpolate.interp1d', 'sp.interpolate.interp1d', (['y', 'x'], {'fill_value': '"""extrapolate"""'}), "(y, x, fill_value='extrapolate')\n", (29487, 29519), True, 'import scipy as sp\n'), ((29958, 30021), 'matplotlib.collections.LineCollection', 'mpl.collections.LineCollection', (['sgs'], {'cmap': 'plt.cm.bwr', 'norm': 'norm'}), '(sgs, cmap=plt.cm.bwr, norm=norm)\n', (29988, 30021), True, 'import matplotlib as mpl\n'), ((31415, 31439), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['L'], {}), '(L)\n', (31436, 31439), True, 'import numpy as np\n'), ((31688, 31701), 'numpy.asarray', 'np.asarray', (['D'], {}), '(D)\n', (31698, 31701), True, 'import numpy as np\n'), ((31828, 31875), 'numpy.sum', 'np.sum', (['(nu_m[nu_m > 2] / (nu_m[nu_m > 2] - 2.0))'], {}), '(nu_m[nu_m > 2] / (nu_m[nu_m > 2] - 2.0))\n', (31834, 31875), True, 'import numpy as np\n'), ((32833, 32855), 'numpy.linalg.inv', 'np.linalg.inv', (['weights'], {}), '(weights)\n', (32846, 32855), True, 'import numpy as np\n'), ((33207, 33224), 'numpy.hstack', 'np.hstack', (['[X, Z]'], {}), '([X, Z])\n', (33216, 33224), True, 'import numpy as np\n'), ((34454, 34463), 'numpy.eye', 'np.eye', (['p'], {}), '(p)\n', (34460, 34463), True, 'import numpy as np\n'), ((34755, 34785), 'scipy.sparse.eye', 'sp.sparse.eye', (['self.X.shape[1]'], {}), '(self.X.shape[1])\n', (34768, 34785), True, 'import scipy as sp\n'), ((34815, 34831), 'scipy.sparse.eye', 'sp.sparse.eye', (['(1)'], {}), '(1)\n', (34828, 34831), True, 'import scipy as sp\n'), ((36654, 36674), 'numpy.tile', 'np.tile', (['theta_i', 'ng'], {}), '(theta_i, ng)\n', (36661, 36674), True, 'import numpy as np\n'), ((37395, 37418), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['M.A'], {}), '(M.A)\n', (37413, 37418), True, 'import numpy as np\n'), ((37531, 37548), 'numpy.log', 'np.log', (['theta[-1]'], {}), '(theta[-1])\n', (37537, 37548), True, 'import numpy as np\n'), ((40948, 40962), 'numpy.array', 'np.array', (['grad'], {}), '(grad)\n', (40956, 40962), True, 'import numpy as np\n'), ((43565, 43591), 'numpy.einsum', 'np.einsum', (['"""ij,ji->"""', 'P', 'P'], {}), "('ij,ji->', P, P)\n", (43574, 43591), True, 'import numpy as np\n'), ((48608, 48642), 'numpy.einsum', 'np.einsum', (['"""i,ijk->jk"""', 'Jg_i', 'Hf_i'], {}), "('i,ijk->jk', Jg_i, Hf_i)\n", (48617, 48642), True, 'import numpy as np\n'), ((48710, 48732), 'numpy.exp', 'np.exp', (['theta_chol[-1]'], {}), '(theta_chol[-1])\n', (48716, 48732), True, 'import numpy as np\n'), ((51493, 51660), 'scipy.optimize.minimize', 'sp.optimize.minimize', (['self.loglike_c', 'self.theta'], {'args': '(reml,)', 'jac': 'self.gradient_chol', 'hess': 'hess', 'options': 'opt_kws', 'bounds': 'self.bounds', 'method': '"""trust-constr"""'}), "(self.loglike_c, self.theta, args=(reml,), jac=self.\n gradient_chol, hess=hess, options=opt_kws, bounds=self.bounds, method=\n 'trust-constr')\n", (51513, 51660), True, 'import scipy as sp\n'), ((51978, 52124), 'scipy.optimize.minimize', 'sp.optimize.minimize', (['self.loglike_c', 'self.theta'], {'args': '(reml,)', 'jac': 'jac', 'hess': 'hess', 'bounds': 'self.bounds', 'method': '"""trust-constr"""', 'options': 'opt_kws'}), "(self.loglike_c, self.theta, args=(reml,), jac=jac,\n hess=hess, bounds=self.bounds, method='trust-constr', options=opt_kws)\n", (51998, 52124), True, 'import scipy as sp\n'), ((53733, 53750), 'numpy.diag', 'np.diag', (['XtWX_inv'], {}), '(XtWX_inv)\n', (53740, 53750), True, 'import numpy as np\n'), ((53784, 53808), 'numpy.diag', 'np.diag', (['self.Hinv_theta'], {}), '(self.Hinv_theta)\n', (53791, 53808), True, 'import numpy as np\n'), ((57020, 57060), 'numpy.vstack', 'np.vstack', (['(self.params, self.se_params)'], {}), '((self.params, self.se_params))\n', (57029, 57060), True, 'import numpy as np\n'), ((57255, 57271), 'numpy.abs', 'np.abs', (["res['t']"], {}), "(res['t'])\n", (57261, 57271), True, 'import numpy as np\n'), ((57661, 57690), 'numpy.ones_like', 'np.ones_like', (['self.theta_chol'], {}), '(self.theta_chol)\n', (57673, 57690), True, 'import numpy as np\n'), ((58076, 58115), 'numpy.zeros', 'np.zeros', (['(n_theta * n_points, n_theta)'], {}), '((n_theta * n_points, n_theta))\n', (58084, 58115), True, 'import numpy as np\n'), ((58115, 58143), 'numpy.zeros', 'np.zeros', (['(n_theta * n_points)'], {}), '(n_theta * n_points)\n', (58123, 58143), True, 'import numpy as np\n'), ((58589, 58618), 'numpy.linspace', 'np.linspace', (['lb', 'ub', 'n_points'], {}), '(lb, ub, n_points)\n', (58600, 58618), True, 'import numpy as np\n'), ((59611, 59629), 'numpy.arange', 'np.arange', (['n_theta'], {}), '(n_theta)\n', (59620, 59629), True, 'import numpy as np\n'), ((60400, 60455), 'scipy.interpolate.interp1d', 'sp.interpolate.interp1d', (['y', 'x'], {'fill_value': '"""extrapolate"""'}), "(y, x, fill_value='extrapolate')\n", (60423, 60455), True, 'import scipy as sp\n'), ((62209, 62231), 'numpy.linalg.norm', 'np.linalg.norm', (['self.u'], {}), '(self.u)\n', (62223, 62231), True, 'import numpy as np\n'), ((62536, 62553), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (62542, 62553), True, 'import numpy as np\n'), ((65038, 65065), 'numpy.sqrt', 'np.sqrt', (['var_pearson_linear'], {}), '(var_pearson_linear)\n', (65045, 65065), True, 'import numpy as np\n'), ((65114, 65139), 'numpy.sqrt', 'np.sqrt', (['var_pearson_mean'], {}), '(var_pearson_mean)\n', (65121, 65139), True, 'import numpy as np\n'), ((2090, 2111), 'numpy.vstack', 'np.vstack', (['[Xty, Zty]'], {}), '([Xty, Zty])\n', (2099, 2111), True, 'import numpy as np\n'), ((6156, 6166), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (6163, 6166), True, 'import numpy as np\n'), ((7165, 7176), 'sksparse.cholmod.cholesky', 'cholesky', (['Q'], {}), '(Q)\n', (7173, 7176), False, 'from sksparse.cholmod import cholesky\n'), ((8450, 8461), 'sksparse.cholmod.cholesky', 'cholesky', (['Q'], {}), '(Q)\n', (8458, 8461), False, 'from sksparse.cholmod import cholesky\n'), ((10567, 10578), 'sksparse.cholmod.cholesky', 'cholesky', (['Q'], {}), '(Q)\n', (10575, 10578), False, 'from sksparse.cholmod import cholesky\n'), ((10815, 10842), 'numpy.linalg.solve', 'np.linalg.solve', (['XtWX', 'WX.T'], {}), '(XtWX, WX.T)\n', (10830, 10842), True, 'import numpy as np\n'), ((12232, 12277), 'numpy.einsum', 'np.einsum', (['"""ij,ji->"""', 'ZPJi.T', 'dRZtP[:, ix[i]]'], {}), "('ij,ji->', ZPJi.T, dRZtP[:, ix[i]])\n", (12241, 12277), True, 'import numpy as np\n'), ((18709, 18720), 'sksparse.cholmod.cholesky', 'cholesky', (['Q'], {}), '(Q)\n', (18717, 18720), False, 'from sksparse.cholmod import cholesky\n'), ((22736, 22753), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (22742, 22753), True, 'import numpy as np\n'), ((22811, 22828), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (22817, 22828), True, 'import numpy as np\n'), ((23330, 23339), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (23336, 23339), True, 'import numpy as np\n'), ((25930, 25975), 'scipy.stats.t', 'sp.stats.t', (['(self.X.shape[0] - self.X.shape[1])'], {}), '(self.X.shape[0] - self.X.shape[1])\n', (25940, 25975), True, 'import scipy as sp\n'), ((26862, 26881), 'numpy.ones_like', 'np.ones_like', (['theta'], {}), '(theta)\n', (26874, 26881), True, 'import numpy as np\n'), ((27226, 27248), 'numpy.linalg.inv', 'np.linalg.inv', (['(H / 2.0)'], {}), '(H / 2.0)\n', (27239, 27248), True, 'import numpy as np\n'), ((27603, 27639), 'numpy.maximum', 'np.maximum', (['(0.01)', '(t_mle - tb * se[i])'], {}), '(0.01, t_mle - tb * se[i])\n', (27613, 27639), True, 'import numpy as np\n'), ((29057, 29076), 'scipy.stats.norm', 'sp.stats.norm', (['(0)', '(1)'], {}), '(0, 1)\n', (29070, 29076), True, 'import scipy as sp\n'), ((29081, 29100), 'numpy.array', 'np.array', (['quantiles'], {}), '(quantiles)\n', (29089, 29100), True, 'import numpy as np\n'), ((30144, 30162), 'numpy.zeros_like', 'np.zeros_like', (['xqt'], {}), '(xqt)\n', (30157, 30162), True, 'import numpy as np\n'), ((30493, 30516), 'numpy.eye', 'np.eye', (['self.X.shape[1]'], {}), '(self.X.shape[1])\n', (30499, 30516), True, 'import numpy as np\n'), ((31338, 31351), 'numpy.argsort', 'np.argsort', (['u'], {}), '(u)\n', (31348, 31351), True, 'import numpy as np\n'), ((31521, 31531), 'numpy.sum', 'np.sum', (['t2'], {}), '(t2)\n', (31527, 31531), True, 'import numpy as np\n'), ((33359, 33380), 'numpy.vstack', 'np.vstack', (['[Xty, Zty]'], {}), '([Xty, Zty])\n', (33368, 33380), True, 'import numpy as np\n'), ((37434, 37444), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (37441, 37444), True, 'import numpy as np\n'), ((38531, 38542), 'sksparse.cholmod.cholesky', 'cholesky', (['Q'], {}), '(Q)\n', (38539, 38542), False, 'from sksparse.cholmod import cholesky\n'), ((39581, 39592), 'sksparse.cholmod.cholesky', 'cholesky', (['Q'], {}), '(Q)\n', (39589, 39592), False, 'from sksparse.cholmod import cholesky\n'), ((41790, 41801), 'sksparse.cholmod.cholesky', 'cholesky', (['Q'], {}), '(Q)\n', (41798, 41801), False, 'from sksparse.cholmod import cholesky\n'), ((42038, 42065), 'numpy.linalg.solve', 'np.linalg.solve', (['XtWX', 'WX.T'], {}), '(XtWX, WX.T)\n', (42053, 42065), True, 'import numpy as np\n'), ((43455, 43500), 'numpy.einsum', 'np.einsum', (['"""ij,ji->"""', 'ZPJi.T', 'dRZtP[:, ix[i]]'], {}), "('ij,ji->', ZPJi.T, dRZtP[:, ix[i]])\n", (43464, 43500), True, 'import numpy as np\n'), ((50017, 50028), 'sksparse.cholmod.cholesky', 'cholesky', (['Q'], {}), '(Q)\n', (50025, 50028), False, 'from sksparse.cholmod import cholesky\n'), ((54035, 54052), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (54041, 54052), True, 'import numpy as np\n'), ((54110, 54127), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (54116, 54127), True, 'import numpy as np\n'), ((54629, 54638), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (54635, 54638), True, 'import numpy as np\n'), ((57208, 57253), 'scipy.stats.t', 'sp.stats.t', (['(self.X.shape[0] - self.X.shape[1])'], {}), '(self.X.shape[0] - self.X.shape[1])\n', (57218, 57253), True, 'import scipy as sp\n'), ((58021, 58047), 'numpy.linalg.inv', 'np.linalg.inv', (['(Hchol / 2.0)'], {}), '(Hchol / 2.0)\n', (58034, 58047), True, 'import numpy as np\n'), ((58423, 58465), 'numpy.maximum', 'np.maximum', (['(0.01)', '(t_mle - 4.5 * se_chol[i])'], {}), '(0.01, t_mle - 4.5 * se_chol[i])\n', (58433, 58465), True, 'import numpy as np\n'), ((59030, 59123), 'scipy.optimize.minimize', 'sp.optimize.minimize', (['func', 'theta_chol_f'], {'jac': '(True)', 'bounds': 'bounds', 'method': '"""trust-constr"""'}), "(func, theta_chol_f, jac=True, bounds=bounds, method=\n 'trust-constr')\n", (59050, 59123), True, 'import scipy as sp\n'), ((59993, 60012), 'scipy.stats.norm', 'sp.stats.norm', (['(0)', '(1)'], {}), '(0, 1)\n', (60006, 60012), True, 'import scipy as sp\n'), ((60017, 60036), 'numpy.array', 'np.array', (['quantiles'], {}), '(quantiles)\n', (60025, 60036), True, 'import numpy as np\n'), ((63667, 63688), 'numpy.linalg.norm', 'np.linalg.norm', (['theta'], {}), '(theta)\n', (63681, 63688), True, 'import numpy as np\n'), ((63689, 63714), 'numpy.linalg.norm', 'np.linalg.norm', (['theta_new'], {}), '(theta_new)\n', (63703, 63714), True, 'import numpy as np\n'), ((63734, 63767), 'numpy.linalg.norm', 'np.linalg.norm', (['(theta - theta_new)'], {}), '(theta - theta_new)\n', (63748, 63767), True, 'import numpy as np\n'), ((65332, 65349), 'numpy.log', 'np.log', (['self._df1'], {}), '(self._df1)\n', (65338, 65349), True, 'import numpy as np\n'), ((6001, 6020), 'numpy.product', 'np.product', (['M.shape'], {}), '(M.shape)\n', (6011, 6020), True, 'import numpy as np\n'), ((6567, 6578), 'sksparse.cholmod.cholesky', 'cholesky', (['Q'], {}), '(Q)\n', (6575, 6578), False, 'from sksparse.cholmod import cholesky\n'), ((11840, 11870), 'numpy.einsum', 'np.einsum', (['"""ij,ji->"""', 'PJi', 'PJj'], {}), "('ij,ji->', PJi, PJj)\n", (11849, 11870), True, 'import numpy as np\n'), ((23375, 23384), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (23381, 23384), True, 'import numpy as np\n'), ((28329, 28340), 'numpy.sqrt', 'np.sqrt', (['LR'], {}), '(LR)\n', (28336, 28340), True, 'import numpy as np\n'), ((28343, 28370), 'numpy.sign', 'np.sign', (['(t0 - tau[~free_ix])'], {}), '(t0 - tau[~free_ix])\n', (28350, 28370), True, 'import numpy as np\n'), ((37279, 37298), 'numpy.product', 'np.product', (['M.shape'], {}), '(M.shape)\n', (37289, 37298), True, 'import numpy as np\n'), ((37845, 37856), 'sksparse.cholmod.cholesky', 'cholesky', (['Q'], {}), '(Q)\n', (37853, 37856), False, 'from sksparse.cholmod import cholesky\n'), ((43063, 43093), 'numpy.einsum', 'np.einsum', (['"""ij,ji->"""', 'PJi', 'PJj'], {}), "('ij,ji->', PJi, PJj)\n", (43072, 43093), True, 'import numpy as np\n'), ((54674, 54683), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (54680, 54683), True, 'import numpy as np\n'), ((59363, 59374), 'numpy.sqrt', 'np.sqrt', (['LR'], {}), '(LR)\n', (59370, 59374), True, 'import numpy as np\n'), ((59377, 59411), 'numpy.sign', 'np.sign', (['(t0 - theta_chol[~free_ix])'], {}), '(t0 - theta_chol[~free_ix])\n', (59384, 59411), True, 'import numpy as np\n'), ((62730, 62740), 'numpy.diag', 'np.diag', (['W'], {}), '(W)\n', (62737, 62740), True, 'import numpy as np\n'), ((21960, 21970), 'numpy.diag', 'np.diag', (['C'], {}), '(C)\n', (21967, 21970), True, 'import numpy as np\n'), ((25543, 25586), 'numpy.triu_indices', 'np.triu_indices', (["self.dims[level]['n_vars']"], {}), "(self.dims[level]['n_vars'])\n", (25558, 25586), True, 'import numpy as np\n'), ((53259, 53269), 'numpy.diag', 'np.diag', (['C'], {}), '(C)\n', (53266, 53269), True, 'import numpy as np\n'), ((56821, 56864), 'numpy.triu_indices', 'np.triu_indices', (["self.dims[level]['n_vars']"], {}), "(self.dims[level]['n_vars'])\n", (56836, 56864), True, 'import numpy as np\n'), ((65914, 65957), 'numpy.triu_indices', 'np.triu_indices', (["self.dims[level]['n_vars']"], {}), "(self.dims[level]['n_vars'])\n", (65929, 65957), True, 'import numpy as np\n'), ((6344, 6354), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (6351, 6354), True, 'import numpy as np\n'), ((14105, 14118), 'numpy.kron', 'np.kron', (['L', 'I'], {}), '(L, I)\n', (14112, 14118), True, 'import numpy as np\n'), ((37622, 37632), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (37629, 37632), True, 'import numpy as np\n'), ((45328, 45341), 'numpy.kron', 'np.kron', (['L', 'I'], {}), '(L, I)\n', (45335, 45341), True, 'import numpy as np\n'), ((58968, 58989), 'numpy.array', 'np.array', (['self.bounds'], {}), '(self.bounds)\n', (58976, 58989), True, 'import numpy as np\n'), ((31637, 31650), 'numpy.dot', 'np.dot', (['x', 'Ji'], {}), '(x, Ji)\n', (31643, 31650), True, 'import numpy as np\n'), ((31957, 31974), 'scipy.stats.f', 'sp.stats.f', (['q', 'nu'], {}), '(q, nu)\n', (31967, 31974), True, 'import scipy as sp\n')] |
import librosa
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
layer_name = 'global_max_pooling2d'
model = tf.keras.models.load_model('models/resnet.h5')
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
# 读取音频数据
def load_data(data_path):
wav, sr = librosa.load(data_path, sr=16000)
intervals = librosa.effects.split(wav, top_db=20)
wav_output = []
for sliced in intervals:
wav_output.extend(wav[sliced[0]:sliced[1]])
assert len(wav_output) >= 8000, "有效音频小于0.5s"
wav_output = np.array(wav_output)
ps = librosa.feature.melspectrogram(y=wav_output, sr=sr, hop_length=256).astype(np.float32)
ps = ps[np.newaxis, ..., np.newaxis]
return ps
def infer(audio_path):
data = load_data(audio_path)
feature = intermediate_layer_model.predict(data)
return feature
if __name__ == '__main__':
# 要预测的两个人的音频文件
person1 = 'dataset/ST-CMDS-20170001_1-OS/20170001P00001A0001.wav'
person2 = 'dataset/ST-CMDS-20170001_1-OS/20170001P00001A0101.wav'
feature1 = infer(person1)[0]
feature2 = infer(person2)[0]
# 对角余弦值
dist = np.dot(feature1, feature2) / (np.linalg.norm(feature1) * np.linalg.norm(feature2))
if dist > 0.7:
print("%s 和 %s 为同一个人,相似度为:%f" % (person1, person2, dist))
else:
print("%s 和 %s 不是同一个人,相似度为:%f" % (person1, person2, dist))
| [
"librosa.feature.melspectrogram",
"numpy.array",
"numpy.dot",
"tensorflow.keras.models.load_model",
"numpy.linalg.norm",
"librosa.effects.split",
"librosa.load"
] | [((146, 192), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""models/resnet.h5"""'], {}), "('models/resnet.h5')\n", (172, 192), True, 'import tensorflow as tf\n'), ((341, 374), 'librosa.load', 'librosa.load', (['data_path'], {'sr': '(16000)'}), '(data_path, sr=16000)\n', (353, 374), False, 'import librosa\n'), ((391, 428), 'librosa.effects.split', 'librosa.effects.split', (['wav'], {'top_db': '(20)'}), '(wav, top_db=20)\n', (412, 428), False, 'import librosa\n'), ((596, 616), 'numpy.array', 'np.array', (['wav_output'], {}), '(wav_output)\n', (604, 616), True, 'import numpy as np\n'), ((1175, 1201), 'numpy.dot', 'np.dot', (['feature1', 'feature2'], {}), '(feature1, feature2)\n', (1181, 1201), True, 'import numpy as np\n'), ((626, 693), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', ([], {'y': 'wav_output', 'sr': 'sr', 'hop_length': '(256)'}), '(y=wav_output, sr=sr, hop_length=256)\n', (656, 693), False, 'import librosa\n'), ((1205, 1229), 'numpy.linalg.norm', 'np.linalg.norm', (['feature1'], {}), '(feature1)\n', (1219, 1229), True, 'import numpy as np\n'), ((1232, 1256), 'numpy.linalg.norm', 'np.linalg.norm', (['feature2'], {}), '(feature2)\n', (1246, 1256), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
from spt3g import core
from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median
from scipy.stats import skew, kurtosis
# Sparse extension operators
m = FlatSkyMap(500, 20, core.G3Units.arcmin)
m[7345] = 4
m[7345-500] = 4 # Backward
m[7345+500] = 4 # Forward
assert(m.sparse)
assert(m.npix_allocated == 3)
assert(m.npix_nonzero == 3)
m[7345-4*500] = 4 # Several steps back
assert(m.npix_allocated == 6)
assert(m.npix_nonzero == 4)
m[7345+3*500] = 4 # Several steps forward
assert(m.npix_allocated == 8)
assert(m.npix_nonzero == 5)
# Simple in-place operators
m = FlatSkyMap(500, 20, core.G3Units.arcmin)
m[15] = 10
assert(m.sparse)
m *= 5
m /= 25
assert(m[15] == 2)
assert(m.sparse)
assert(m[16] == 0)
assert((-m).sparse)
assert((-m)[15] == -2)
m += 3
m -= 14
assert(m[15] == -9)
assert(not m.sparse)
assert(m[16] == -11)
n = 1 - m
assert(n[15] == 10)
assert(n[16] == 12)
a = -11 * np.ones(m.shape)
a[0, 15] += 2
assert((m == a).all()) # Implicitly tests numpy conversions too
assert((m*0).npix_allocated == 0)
m += 11
m.sparse = True
assert(m.npix_allocated == 1)
n = 2. / m
assert(n[15] == 1)
assert(np.isinf(n[16]))
assert(n.npix_allocated == n.size)
# compactification
np.asarray(n)[np.isinf(n)] = np.nan
n.compact(zero_nans=True)
assert(n[16] == 0)
assert(n.npix_allocated == 1)
np.asarray(n)[np.isinf(n)] = np.nan
n.sparse = True
n.compact(zero_nans=True)
assert(n[16] == 0)
assert(n.npix_allocated == 1)
n = m ** 2.
assert(n[15] == 4)
assert(n.npix_allocated == 1)
# Map-by-map operations, with two sparse maps, one dense and one sparse,
# and two dense
m *= 2 # Get numbers bigger
assert((m == n).all())
assert((m > 0).any())
assert((m > 0).npix_allocated == 1)
m1 = m
m2 = m.copy()
m2.sparse = False
nm1 = m1.npix_allocated
nm2 = m2.npix_allocated
for pair in [(m1, m1), (m1, m2), (m2, m1), (m2, m2)]:
t = pair[0] * pair[1]
assert(t.sparse == pair[0].sparse)
assert(t.npix_allocated == pair[0].npix_allocated)
assert(t[15] == 16)
assert(m1.npix_allocated == nm1)
assert(m2.npix_allocated == nm2)
t = pair[0] + pair[1]
assert(t.sparse == pair[0].sparse)
assert(t.npix_allocated == pair[0].npix_allocated)
assert(t[15] == 8)
assert(m1.npix_allocated == nm1)
assert(m2.npix_allocated == nm2)
t = pair[0] - 2 * pair[1]
assert(t.sparse == pair[0].sparse)
assert(t.npix_allocated == pair[0].npix_allocated)
assert(t[15] == -4)
assert(m1.npix_allocated == nm1)
assert(m2.npix_allocated == nm2)
t = pair[0] / pair[1]
assert(t.sparse == pair[0].sparse)
assert(t.npix_allocated == t.size)
assert(t[15] == 1)
assert(not np.isfinite(t[12]))
assert(m1.npix_allocated == nm1)
assert(m2.npix_allocated == nm2)
# With a null map
m3 = m.clone(False)
for pair in [(m1, m3), (m2, m3), (m3, m2), (m3, m1)]:
nonnull = pair[1] if pair[0] is m3 else pair[0]
t = pair[0] * pair[1]
assert(t.sparse == True)
assert(t.npix_allocated == 0)
assert(m1.npix_allocated == nm1)
assert(m2.npix_allocated == nm2)
assert(m3.npix_allocated == 0)
t = pair[0] + pair[1]
assert(t.sparse == nonnull.sparse)
assert(t.npix_allocated == nonnull.npix_allocated)
assert(t[15] == 4)
assert(m1.npix_allocated == nm1)
assert(m2.npix_allocated == nm2)
assert(m3.npix_allocated == 0)
t = pair[0] - pair[1]
assert(t.sparse == nonnull.sparse)
assert(t.npix_allocated == nonnull.npix_allocated)
assert(t[15] == -4 or t[15] == 4)
assert(m1.npix_allocated == nm1)
assert(m2.npix_allocated == nm2)
assert(m3.npix_allocated == 0)
t = pair[0] / pair[1]
assert(not np.isfinite(t[12]))
if pair[0] is m3:
assert(t[15] == 0)
assert(m1.npix_allocated == nm1)
assert(m2.npix_allocated == nm2)
assert(m3.npix_allocated == 0)
for shape in [(20, 500), (21, 501)]:
print('shape', shape)
# patch extraction / insertion
m = FlatSkyMap(shape[1], shape[0], core.G3Units.arcmin, proj=MapProjection.ProjZEA)
np.asarray(m)[:] = np.random.randn(*m.shape)
malpha, mdelta = get_ra_dec_map(m)
x0 = 45
y0 = 13
for dy, dx in [[10, 50], [11, 51]]:
print(' patch', (dy, dx))
p = m.extract_patch(45, 13, dx, dy)
palpha, pdelta = get_ra_dec_map(p)
x1 = x0 - dx // 2
y1 = y0 - dy // 2
sx = slice(x1, x1 + dx)
sy = slice(y1, y1 + dy)
assert(np.allclose(np.asarray(malpha)[sy, sx], palpha))
assert(np.allclose(np.asarray(mdelta)[sy, sx], pdelta))
assert(np.allclose(np.asarray(m)[sy, sx], p))
m2 = m.clone(False)
m2.insert_patch(p)
assert(np.allclose(np.asarray(m)[sy, sx], np.asarray(m2)[sy, sx]))
# Slice operators: make sure they work like numpy slicing
assert((np.asarray(m[10:17,320:482]) == np.asarray(m.copy())[10:17,320:482]).all())
# But give the right type...
assert(m[10:17,320:482].__class__ == m.__class__)
# Try setting things
old_chunk = m[10:17,320:482]
m[10:17,320:482] = old_chunk*2
assert((np.asarray(m.copy())[10:17,320:482] == np.asarray(old_chunk)*2).all())
m[10:17,320:482] = np.asarray(old_chunk*3)
assert((np.asarray(m.copy())[10:17,320:482] == np.asarray(old_chunk)*3).all())
m[10:17,320:482] = old_chunk
mcopy = m.copy()
m[:] = np.asarray(m * 2)
assert((np.asarray(m) == np.asarray(mcopy * 2)).all())
m[:] = np.asarray(mcopy)
# Make sure inserting it in the wrong place (where coordinates don't make sense, but numpy
# would allow it) fails
failed = False
try:
m[11:18,320:482] = old_chunk
except ValueError:
failed = True
assert(failed)
# negative slice indices
assert((np.asarray(m.copy())[-10:-3, -180:-18] == np.asarray(m.copy())[-10:-3, -180:-18]).all())
# padding / cropping, with even and odd changes in dimension
pad = 10
for off in [0, 1]:
print(' padding', 2 * pad + off)
mpad = m.reshape(m.shape[1] + 2 * pad + off, m.shape[0] + 2 * pad + off)
assert(mpad.npix_allocated == m.npix_allocated)
a0 = np.array([m.alpha_center, m.delta_center])
a1 = np.array([mpad.alpha_center, mpad.delta_center])
assert(np.allclose(a0, a1))
x0 = np.array([m.y_center, m.x_center])
x1 = np.array([mpad.y_center, mpad.x_center])
v0 = m[m.angle_to_pixel(*a0)]
v1 = mpad[mpad.angle_to_pixel(*a1)]
assert(v0 == v1)
palpha, pdelta = get_ra_dec_map(mpad)
sx = slice(mpad.shape[1] // 2 - m.shape[1] // 2, mpad.shape[1] // 2 - m.shape[1] // 2 + m.shape[1])
sy = slice(mpad.shape[0] // 2 - m.shape[0] // 2, mpad.shape[0] // 2 - m.shape[0] // 2 + m.shape[0])
assert(np.allclose(np.asarray(palpha)[sy, sx], malpha))
assert(np.allclose(np.asarray(pdelta)[sy, sx], mdelta))
assert(np.allclose(np.asarray(mpad)[sy, sx], np.asarray(m)))
mcrop = mpad.reshape(m.shape[1], m.shape[0])
calpha, cdelta = get_ra_dec_map(mcrop)
a2 = np.array([mcrop.alpha_center, mcrop.delta_center])
assert(np.allclose(a0, a2))
x2 = np.array([mcrop.y_center, mcrop.x_center])
assert(np.allclose(x0, x2))
v2 = mcrop[mcrop.angle_to_pixel(*a2)]
assert(v0 == v2)
assert(np.allclose(calpha, malpha))
assert(np.allclose(cdelta, mdelta))
assert(np.allclose(mcrop, m))
assert(m.compatible(mcrop))
# statistics
m1 = np.asarray(m).ravel()
stats0 = [np.mean(m1), np.var(m1), skew(m1), kurtosis(m1)]
stats1 = get_map_stats(m, order=4)
assert(np.allclose(stats1, stats0))
med0 = np.median(m1)
med1 = get_map_median(m)
assert(np.allclose(med1, med0))
stats2 = get_map_stats(mpad, order=4, ignore_zeros=True)
assert(np.allclose(stats2, stats0))
med2 = get_map_median(mpad, ignore_zeros=True)
assert(np.allclose(med2, med0))
np.asarray(mpad)[np.asarray(mpad) == 0] = np.nan
stats3 = get_map_stats(mpad, order=4, ignore_nans=True)
assert(np.allclose(stats3, stats0))
med3 = get_map_median(mpad, ignore_nans=True)
assert(np.allclose(med3, med0))
# convolution
from scipy.signal import convolve2d
from spt3g.maps import convolve_map
kernel = FlatSkyMap(np.random.randn(5, 5), m.res)
mconv = convolve2d(m, kernel, mode='same')
mconv2 = convolve_map(m, kernel)
assert(np.allclose(mconv, mconv2))
| [
"scipy.signal.convolve2d",
"numpy.mean",
"numpy.allclose",
"numpy.median",
"numpy.ones",
"spt3g.maps.get_map_median",
"scipy.stats.kurtosis",
"spt3g.maps.get_ra_dec_map",
"numpy.asarray",
"spt3g.maps.convolve_map",
"spt3g.maps.FlatSkyMap",
"scipy.stats.skew",
"numpy.array",
"numpy.isfinite... | [((234, 274), 'spt3g.maps.FlatSkyMap', 'FlatSkyMap', (['(500)', '(20)', 'core.G3Units.arcmin'], {}), '(500, 20, core.G3Units.arcmin)\n', (244, 274), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((645, 685), 'spt3g.maps.FlatSkyMap', 'FlatSkyMap', (['(500)', '(20)', 'core.G3Units.arcmin'], {}), '(500, 20, core.G3Units.arcmin)\n', (655, 685), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((1192, 1207), 'numpy.isinf', 'np.isinf', (['n[16]'], {}), '(n[16])\n', (1200, 1207), True, 'import numpy as np\n'), ((8443, 8477), 'scipy.signal.convolve2d', 'convolve2d', (['m', 'kernel'], {'mode': '"""same"""'}), "(m, kernel, mode='same')\n", (8453, 8477), False, 'from scipy.signal import convolve2d\n'), ((8487, 8510), 'spt3g.maps.convolve_map', 'convolve_map', (['m', 'kernel'], {}), '(m, kernel)\n', (8499, 8510), False, 'from spt3g.maps import convolve_map\n'), ((8518, 8544), 'numpy.allclose', 'np.allclose', (['mconv', 'mconv2'], {}), '(mconv, mconv2)\n', (8529, 8544), True, 'import numpy as np\n'), ((969, 985), 'numpy.ones', 'np.ones', (['m.shape'], {}), '(m.shape)\n', (976, 985), True, 'import numpy as np\n'), ((1264, 1277), 'numpy.asarray', 'np.asarray', (['n'], {}), '(n)\n', (1274, 1277), True, 'import numpy as np\n'), ((1278, 1289), 'numpy.isinf', 'np.isinf', (['n'], {}), '(n)\n', (1286, 1289), True, 'import numpy as np\n'), ((1376, 1389), 'numpy.asarray', 'np.asarray', (['n'], {}), '(n)\n', (1386, 1389), True, 'import numpy as np\n'), ((1390, 1401), 'numpy.isinf', 'np.isinf', (['n'], {}), '(n)\n', (1398, 1401), True, 'import numpy as np\n'), ((4000, 4079), 'spt3g.maps.FlatSkyMap', 'FlatSkyMap', (['shape[1]', 'shape[0]', 'core.G3Units.arcmin'], {'proj': 'MapProjection.ProjZEA'}), '(shape[1], shape[0], core.G3Units.arcmin, proj=MapProjection.ProjZEA)\n', (4010, 4079), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((4103, 4128), 'numpy.random.randn', 'np.random.randn', (['*m.shape'], {}), '(*m.shape)\n', (4118, 4128), True, 'import numpy as np\n'), ((4150, 4167), 'spt3g.maps.get_ra_dec_map', 'get_ra_dec_map', (['m'], {}), '(m)\n', (4164, 4167), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((5225, 5250), 'numpy.asarray', 'np.asarray', (['(old_chunk * 3)'], {}), '(old_chunk * 3)\n', (5235, 5250), True, 'import numpy as np\n'), ((5397, 5414), 'numpy.asarray', 'np.asarray', (['(m * 2)'], {}), '(m * 2)\n', (5407, 5414), True, 'import numpy as np\n'), ((5485, 5502), 'numpy.asarray', 'np.asarray', (['mcopy'], {}), '(mcopy)\n', (5495, 5502), True, 'import numpy as np\n'), ((8405, 8426), 'numpy.random.randn', 'np.random.randn', (['(5)', '(5)'], {}), '(5, 5)\n', (8420, 8426), True, 'import numpy as np\n'), ((2711, 2729), 'numpy.isfinite', 'np.isfinite', (['t[12]'], {}), '(t[12])\n', (2722, 2729), True, 'import numpy as np\n'), ((3713, 3731), 'numpy.isfinite', 'np.isfinite', (['t[12]'], {}), '(t[12])\n', (3724, 3731), True, 'import numpy as np\n'), ((4084, 4097), 'numpy.asarray', 'np.asarray', (['m'], {}), '(m)\n', (4094, 4097), True, 'import numpy as np\n'), ((4340, 4357), 'spt3g.maps.get_ra_dec_map', 'get_ra_dec_map', (['p'], {}), '(p)\n', (4354, 4357), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((6184, 6226), 'numpy.array', 'np.array', (['[m.alpha_center, m.delta_center]'], {}), '([m.alpha_center, m.delta_center])\n', (6192, 6226), True, 'import numpy as np\n'), ((6240, 6288), 'numpy.array', 'np.array', (['[mpad.alpha_center, mpad.delta_center]'], {}), '([mpad.alpha_center, mpad.delta_center])\n', (6248, 6288), True, 'import numpy as np\n'), ((6304, 6323), 'numpy.allclose', 'np.allclose', (['a0', 'a1'], {}), '(a0, a1)\n', (6315, 6323), True, 'import numpy as np\n'), ((6338, 6372), 'numpy.array', 'np.array', (['[m.y_center, m.x_center]'], {}), '([m.y_center, m.x_center])\n', (6346, 6372), True, 'import numpy as np\n'), ((6386, 6426), 'numpy.array', 'np.array', (['[mpad.y_center, mpad.x_center]'], {}), '([mpad.y_center, mpad.x_center])\n', (6394, 6426), True, 'import numpy as np\n'), ((6559, 6579), 'spt3g.maps.get_ra_dec_map', 'get_ra_dec_map', (['mpad'], {}), '(mpad)\n', (6573, 6579), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((7072, 7093), 'spt3g.maps.get_ra_dec_map', 'get_ra_dec_map', (['mcrop'], {}), '(mcrop)\n', (7086, 7093), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((7107, 7157), 'numpy.array', 'np.array', (['[mcrop.alpha_center, mcrop.delta_center]'], {}), '([mcrop.alpha_center, mcrop.delta_center])\n', (7115, 7157), True, 'import numpy as np\n'), ((7173, 7192), 'numpy.allclose', 'np.allclose', (['a0', 'a2'], {}), '(a0, a2)\n', (7184, 7192), True, 'import numpy as np\n'), ((7207, 7249), 'numpy.array', 'np.array', (['[mcrop.y_center, mcrop.x_center]'], {}), '([mcrop.y_center, mcrop.x_center])\n', (7215, 7249), True, 'import numpy as np\n'), ((7265, 7284), 'numpy.allclose', 'np.allclose', (['x0', 'x2'], {}), '(x0, x2)\n', (7276, 7284), True, 'import numpy as np\n'), ((7372, 7399), 'numpy.allclose', 'np.allclose', (['calpha', 'malpha'], {}), '(calpha, malpha)\n', (7383, 7399), True, 'import numpy as np\n'), ((7416, 7443), 'numpy.allclose', 'np.allclose', (['cdelta', 'mdelta'], {}), '(cdelta, mdelta)\n', (7427, 7443), True, 'import numpy as np\n'), ((7460, 7481), 'numpy.allclose', 'np.allclose', (['mcrop', 'm'], {}), '(mcrop, m)\n', (7471, 7481), True, 'import numpy as np\n'), ((7660, 7685), 'spt3g.maps.get_map_stats', 'get_map_stats', (['m'], {'order': '(4)'}), '(m, order=4)\n', (7673, 7685), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((7701, 7728), 'numpy.allclose', 'np.allclose', (['stats1', 'stats0'], {}), '(stats1, stats0)\n', (7712, 7728), True, 'import numpy as np\n'), ((7745, 7758), 'numpy.median', 'np.median', (['m1'], {}), '(m1)\n', (7754, 7758), True, 'import numpy as np\n'), ((7774, 7791), 'spt3g.maps.get_map_median', 'get_map_median', (['m'], {}), '(m)\n', (7788, 7791), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((7807, 7830), 'numpy.allclose', 'np.allclose', (['med1', 'med0'], {}), '(med1, med0)\n', (7818, 7830), True, 'import numpy as np\n'), ((7850, 7897), 'spt3g.maps.get_map_stats', 'get_map_stats', (['mpad'], {'order': '(4)', 'ignore_zeros': '(True)'}), '(mpad, order=4, ignore_zeros=True)\n', (7863, 7897), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((7913, 7940), 'numpy.allclose', 'np.allclose', (['stats2', 'stats0'], {}), '(stats2, stats0)\n', (7924, 7940), True, 'import numpy as np\n'), ((7957, 7996), 'spt3g.maps.get_map_median', 'get_map_median', (['mpad'], {'ignore_zeros': '(True)'}), '(mpad, ignore_zeros=True)\n', (7971, 7996), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((8012, 8035), 'numpy.allclose', 'np.allclose', (['med2', 'med0'], {}), '(med2, med0)\n', (8023, 8035), True, 'import numpy as np\n'), ((8112, 8158), 'spt3g.maps.get_map_stats', 'get_map_stats', (['mpad'], {'order': '(4)', 'ignore_nans': '(True)'}), '(mpad, order=4, ignore_nans=True)\n', (8125, 8158), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((8174, 8201), 'numpy.allclose', 'np.allclose', (['stats3', 'stats0'], {}), '(stats3, stats0)\n', (8185, 8201), True, 'import numpy as np\n'), ((8218, 8256), 'spt3g.maps.get_map_median', 'get_map_median', (['mpad'], {'ignore_nans': '(True)'}), '(mpad, ignore_nans=True)\n', (8232, 8256), False, 'from spt3g.maps import FlatSkyMap, MapProjection, get_ra_dec_map, get_map_stats, get_map_median\n'), ((8272, 8295), 'numpy.allclose', 'np.allclose', (['med3', 'med0'], {}), '(med3, med0)\n', (8283, 8295), True, 'import numpy as np\n'), ((6977, 6990), 'numpy.asarray', 'np.asarray', (['m'], {}), '(m)\n', (6987, 6990), True, 'import numpy as np\n'), ((7594, 7605), 'numpy.mean', 'np.mean', (['m1'], {}), '(m1)\n', (7601, 7605), True, 'import numpy as np\n'), ((7607, 7617), 'numpy.var', 'np.var', (['m1'], {}), '(m1)\n', (7613, 7617), True, 'import numpy as np\n'), ((7619, 7627), 'scipy.stats.skew', 'skew', (['m1'], {}), '(m1)\n', (7623, 7627), False, 'from scipy.stats import skew, kurtosis\n'), ((7629, 7641), 'scipy.stats.kurtosis', 'kurtosis', (['m1'], {}), '(m1)\n', (7637, 7641), False, 'from scipy.stats import skew, kurtosis\n'), ((8046, 8062), 'numpy.asarray', 'np.asarray', (['mpad'], {}), '(mpad)\n', (8056, 8062), True, 'import numpy as np\n'), ((4501, 4519), 'numpy.asarray', 'np.asarray', (['malpha'], {}), '(malpha)\n', (4511, 4519), True, 'import numpy as np\n'), ((4565, 4583), 'numpy.asarray', 'np.asarray', (['mdelta'], {}), '(mdelta)\n', (4575, 4583), True, 'import numpy as np\n'), ((4629, 4642), 'numpy.asarray', 'np.asarray', (['m'], {}), '(m)\n', (4639, 4642), True, 'import numpy as np\n'), ((4739, 4752), 'numpy.asarray', 'np.asarray', (['m'], {}), '(m)\n', (4749, 4752), True, 'import numpy as np\n'), ((4762, 4776), 'numpy.asarray', 'np.asarray', (['m2'], {}), '(m2)\n', (4772, 4776), True, 'import numpy as np\n'), ((4862, 4891), 'numpy.asarray', 'np.asarray', (['m[10:17, 320:482]'], {}), '(m[10:17, 320:482])\n', (4872, 4891), True, 'import numpy as np\n'), ((5427, 5440), 'numpy.asarray', 'np.asarray', (['m'], {}), '(m)\n', (5437, 5440), True, 'import numpy as np\n'), ((5444, 5465), 'numpy.asarray', 'np.asarray', (['(mcopy * 2)'], {}), '(mcopy * 2)\n', (5454, 5465), True, 'import numpy as np\n'), ((6823, 6841), 'numpy.asarray', 'np.asarray', (['palpha'], {}), '(palpha)\n', (6833, 6841), True, 'import numpy as np\n'), ((6887, 6905), 'numpy.asarray', 'np.asarray', (['pdelta'], {}), '(pdelta)\n', (6897, 6905), True, 'import numpy as np\n'), ((6951, 6967), 'numpy.asarray', 'np.asarray', (['mpad'], {}), '(mpad)\n', (6961, 6967), True, 'import numpy as np\n'), ((7554, 7567), 'numpy.asarray', 'np.asarray', (['m'], {}), '(m)\n', (7564, 7567), True, 'import numpy as np\n'), ((8063, 8079), 'numpy.asarray', 'np.asarray', (['mpad'], {}), '(mpad)\n', (8073, 8079), True, 'import numpy as np\n'), ((5170, 5191), 'numpy.asarray', 'np.asarray', (['old_chunk'], {}), '(old_chunk)\n', (5180, 5191), True, 'import numpy as np\n'), ((5300, 5321), 'numpy.asarray', 'np.asarray', (['old_chunk'], {}), '(old_chunk)\n', (5310, 5321), True, 'import numpy as np\n')] |
# Copyright 2018 The GamePad Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from enum import Enum
import json
import numpy as np
import scipy.stats as sps
from lib.myutil import inc_update
from coq.tactics import TACTIC_HIST, TACTICS_INFO_EQUIV
from coq.constr_util import COQEXP_HIST
# -------------------------------------------------
# Utility
def load_tactr_stats(filename):
my_stats = {}
unique = {'const': 0, 'ind': 0, 'conid': 0}
with open(filename, 'r') as f:
for line in f:
if line.startswith("LEMMA INFO"):
pass
elif line.startswith("TOTAL"):
pass
elif line.startswith("UNIQUE-SORT"):
toks = line.split()
unique['sort'] = int(toks[1].strip())
elif line.startswith("UNIQUE-CONST"):
toks = line.split()
unique['const'] = int(toks[1].strip())
elif line.startswith("UNIQUE-IND"):
toks = line.split()
unique['ind'] = int(toks[1].strip())
elif line.startswith("UNIQUE-CONID"):
toks = line.split()
unique['conid'] = int(toks[1].strip())
elif line.startswith("UNIQUE-EVAR"):
toks = line.split()
unique['evar'] = int(toks[1].strip())
elif line.startswith("UNIQUE-FIX"):
toks = line.split()
unique['fix'] = int(toks[1].strip())
elif line.startswith("NUM_IARG"):
pass
elif line.startswith("NUM_ARGS"):
pass
else:
line = line.strip()
x = json.loads(line)
my_stats[x['lemma']] = x['info']
return my_stats, unique
class DepthMode(Enum):
CHAR_CTX = 0
CHAR_GOAL = 1
AST_CTX = 2
AST_GOAL = 3
# -------------------------------------------------
# Tactic Tree Statistics
class TacTrStats(object):
def __init__(self, stats):
self.stats = stats
def _mylog(self, msg):
print(msg)
def _descrip_stats(self, ls):
_min = min(ls)
_max = max(ls)
mean = np.mean(ls)
sdev = np.sqrt(sps.moment(ls, moment=2))
kurt = sps.kurtosis(ls)
self._mylog("Min: {}".format(_min))
self._mylog("Max: {}".format(_max))
self._mylog("Mean: {}".format(mean))
self._mylog("Sdev: {}".format(sdev))
self._mylog("Kurtosis: {}".format(kurt))
return _min, _max, mean, sdev
def avg_hist(self, f_sort=True):
"""Histogram of tactic vs count"""
hist = TACTIC_HIST.empty()
for lemma, info in self.stats.items():
hist = TACTIC_HIST.merge(hist, info['hist'])
# total = len(self.stats)
# avg = TACTIC_HIST.map(hist, lambda x: x / total)
# return TACTIC_HIST.view(avg, f_sort)
acc = []
for tac, cnt in TACTIC_HIST.view(hist, f_sort):
for pp_tac, eq_tacs_kinds in TACTICS_INFO_EQUIV:
eq_tacs = set(tac for tac, _ in eq_tacs_kinds)
if tac in eq_tacs:
acc += [(pp_tac, cnt)]
break
return acc
def descrip_tacs(self):
"""Descriptive statistics on number of tactics used per lemma"""
self._mylog("Statistics on number of tactics used (across lemmas)")
ls = [info['num_tacs'] for _, info in self.stats.items()]
return self._descrip_stats(ls)
def descrip_tacsts(self):
"""Descriptive statistics on number of tactic states present in each lemma"""
self._mylog("Statistics on number of tactic states present (across lemmas)")
ls = [info['num_goals'] for _, info in self.stats.items()]
return self._descrip_stats(ls)
def descrip_term(self):
"""Descriptive statistics on number of terminal states in each lemma"""
self._mylog("Statistics on number of terminal states (across lemmas)")
ls = [info['num_term'] for _, info in self.stats.items()]
return self._descrip_stats(ls)
def descrip_deadend(self):
"""Descriptive number of deadend states in each lemma"""
self._mylog("Statistics on number of deadend states (across lemmas)")
ls = [info['num_err'] for _, info in self.stats.items()]
return self._descrip_stats(ls)
def gather_term_path_lens(self):
"""Histogram of terminal path-length vs count"""
max_len = 100
term_path_lens = []
for lemma, info in self.stats.items():
hist = [0 for _ in range(max_len)]
for l in info['term_path_lens']:
if l < max_len:
hist[l] += 1
term_path_lens += [hist]
return term_path_lens
def gather_err_path_lens(self):
"""Histogram of error path-length vs count"""
max_len = 100
err_path_lens = []
for lemma, info in self.stats.items():
hist = [0 for _ in range(max_len)]
for l in info['err_path_lens']:
if l < max_len:
hist[l] += 1
err_path_lens += [hist]
return err_path_lens
def gather_have_info(self):
"""Average tactic size and length of path per lemma"""
acc_size_ftac = []
acc_size_path = []
for lemma, info in self.stats.items():
hinfos = info['have_info']
for hinfo in hinfos:
# ftac = hinfo[0]
size_ftac = hinfo[1]
size_path = len(hinfo[2])
acc_size_ftac += [size_ftac]
acc_size_path += [size_path]
self._mylog("Statistics on size of haves (across lemmas)")
self._descrip_stats(acc_size_ftac)
self._mylog("Statistics on length of have paths (across lemmas)")
self._descrip_stats(acc_size_path)
return acc_size_ftac, acc_size_path
def avg_depth_size(self, mode):
"""Histogram of depth vs context/goal size"""
if mode == DepthMode.CHAR_CTX:
projfn = lambda info: info['avg_depth_ctx_size']
elif mode == DepthMode.CHAR_GOAL:
projfn = lambda info: info['avg_depth_goal_size']
elif mode == DepthMode.AST_CTX:
projfn = lambda info: info['avg_depth_astctx_size']
elif mode == DepthMode.AST_GOAL:
projfn = lambda info: info['avg_depth_astgoal_size']
else:
raise NameError("Mode {} not supported".format(mode))
max_depth = max([max([depth for depth, _ in projfn(info)]) for _, info in self.stats.items()]) + 1
hist = {}
for depth in range(max_depth):
hist[depth] = 0
norm = [0 for _ in range(0, max_depth)]
for lemma, info in self.stats.items():
for depth, dsize in projfn(info):
hist[depth] += dsize
norm[depth] += 1
for depth in range(1, max_depth):
if norm[depth] > 0:
hist[depth] /= norm[depth]
del hist[0]
return hist
def coqexp_hist(self, f_sort=True):
hists = [info['hist_coqexp'] for lemma, info in self.stats.items()]
hist = COQEXP_HIST.merges(hists)
return COQEXP_HIST.view(hist, f_sort)
def coqexp_comp_p(self, hist_key, f_avg=True, f_trunc=True):
hist = {}
for lemma, info in self.stats.items():
for x in info[hist_key]:
inc_update(hist, x, 1.0)
maxsize = max([k for k, v in hist.items()])
if f_trunc:
hist_p = {}
total = np.sum([v for k, v in hist.items()])
acc = 0.0
for k in range(maxsize + 1):
if k in hist:
acc += hist[k]
hist_p[k] = hist[k]
else:
hist_p[k] = 0.0
if acc / total > 0.95:
break
else:
hist_p = hist
if f_avg:
num_lemmas = len(self.stats)
for k, v in hist_p.items():
hist_p[k] /= num_lemmas
return hist_p, maxsize
if __name__ == "__main__":
stats = load_tactr_stats("data/feit-tactr.log")
tactr_stats = TacTrStats(stats)
| [
"numpy.mean",
"json.loads",
"coq.constr_util.COQEXP_HIST.merges",
"scipy.stats.kurtosis",
"scipy.stats.moment",
"coq.tactics.TACTIC_HIST.view",
"lib.myutil.inc_update",
"coq.tactics.TACTIC_HIST.merge",
"coq.tactics.TACTIC_HIST.empty",
"coq.constr_util.COQEXP_HIST.view"
] | [((2785, 2796), 'numpy.mean', 'np.mean', (['ls'], {}), '(ls)\n', (2792, 2796), True, 'import numpy as np\n'), ((2861, 2877), 'scipy.stats.kurtosis', 'sps.kurtosis', (['ls'], {}), '(ls)\n', (2873, 2877), True, 'import scipy.stats as sps\n'), ((3264, 3283), 'coq.tactics.TACTIC_HIST.empty', 'TACTIC_HIST.empty', ([], {}), '()\n', (3281, 3283), False, 'from coq.tactics import TACTIC_HIST, TACTICS_INFO_EQUIV\n'), ((3570, 3600), 'coq.tactics.TACTIC_HIST.view', 'TACTIC_HIST.view', (['hist', 'f_sort'], {}), '(hist, f_sort)\n', (3586, 3600), False, 'from coq.tactics import TACTIC_HIST, TACTICS_INFO_EQUIV\n'), ((7860, 7885), 'coq.constr_util.COQEXP_HIST.merges', 'COQEXP_HIST.merges', (['hists'], {}), '(hists)\n', (7878, 7885), False, 'from coq.constr_util import COQEXP_HIST\n'), ((7901, 7931), 'coq.constr_util.COQEXP_HIST.view', 'COQEXP_HIST.view', (['hist', 'f_sort'], {}), '(hist, f_sort)\n', (7917, 7931), False, 'from coq.constr_util import COQEXP_HIST\n'), ((2820, 2844), 'scipy.stats.moment', 'sps.moment', (['ls'], {'moment': '(2)'}), '(ls, moment=2)\n', (2830, 2844), True, 'import scipy.stats as sps\n'), ((3350, 3387), 'coq.tactics.TACTIC_HIST.merge', 'TACTIC_HIST.merge', (['hist', "info['hist']"], {}), "(hist, info['hist'])\n", (3367, 3387), False, 'from coq.tactics import TACTIC_HIST, TACTICS_INFO_EQUIV\n'), ((8116, 8140), 'lib.myutil.inc_update', 'inc_update', (['hist', 'x', '(1.0)'], {}), '(hist, x, 1.0)\n', (8126, 8140), False, 'from lib.myutil import inc_update\n'), ((2291, 2307), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2301, 2307), False, 'import json\n')] |
# Renishaw wdf Raman spectroscopy file reader
# Code inspired by Henderson, Alex DOI:10.5281/zenodo.495477
from __future__ import print_function
import struct
import numpy
import io
from .types import LenType, DataType, MeasurementType
from .types import ScanType, UnitType, DataType
from .types import Offsets, ExifTags
from .utils import convert_wl, convert_attr_name
from sys import stderr
try:
import PIL
from PIL import Image
from PIL.TiffImagePlugin import IFDRational
except ImportError:
PIL = None
class WDFReader(object):
"""Reader for Renishaw(TM) WiRE Raman spectroscopy files (.wdf format)
The wdf file format is separated into several DataBlocks, with starting 4-char
strings such as (incomplete list):
`WDF1`: File header for information
`DATA`: Spectra data
`XLST`: Data for X-axis of data, usually the Raman shift or wavelength
`YLST`: Data for Y-axis of data, possibly not important
`WMAP`: Information for mapping, e.g. StreamLine or StreamLineHR mapping
`MAP `: Mapping information(?)
`ORGN`: Data for stage origin
`TEXT`: Annotation text etc
`WXDA`: ? TODO
`WXDM`: ? TODO
`ZLDC`: ? TODO
`BKXL`: ? TODO
`WXCS`: ? TODO
`WXIS`: ? TODO
`WHTL`: Whilte light image
Following the block name, there are two indicators:
Block uid: int32
Block size: int64
Args:
file_name (file) : File object for the wdf file
Attributes:
title (str) : Title of measurement
username (str) : Username
application_name (str) : Default WiRE
application_version (int,) * 4 : Version number, e.g. [4, 4, 0, 6602]
measurement_type (int) : Type of measurement
0=unknown, 1=single, 2=multi, 3=mapping
scan_type (int) : Scan of type, see values in scan_types
laser_wavenumber (float32) : Wavenumber in cm^-1
count (int) : Numbers of experiments (same type), can be smaller than capacity
spectral_units (int) : Unit of spectra, see unit_types
xlist_type (int) : See unit_types
xlist_unit (int) : See unit_types
xlist_length (int): Size for the xlist
xdata (numpy.array): x-axis data
ylist_type (int): Same as xlist_type
ylist_unit (int): Same as xlist_unit
ylist_length (int): Same as xlist_length
ydata (numpy.array): y-data, possibly not used
point_per_spectrum (int): Should be identical to xlist_length
data_origin_count (int) : Number of rows in data origin list
capacity (int) : Max number of spectra
accumulation_count (int) : Single or multiple measurements
block_info (dict) : Info block at least with following keys
DATA, XLST, YLST, ORGN
# TODO types?
"""
def __init__(self, file_name, debug=False):
try:
self.file_obj = open(str(file_name), "rb")
except IOError:
raise IOError("File {0} does noe exist!".format(file_name))
# Initialize the properties for the wdfReader class
self.title = ""
self.username = ""
self.measurement_type = None
self.scan_type = None
self.laser_length = None
self.count = None
self.spectral_unit = None
self.xlist_type = None
self.xlist_unit = None
self.ylist_type = None
self.ylist_unit = None
self.point_per_spectrum = None
self.data_origin_count = None
self.capacity = None
self.application_name = ""
self.application_version = [None]*4
self.xlist_length = 0
self.ylist_length = 0
self.accumulation_count = None
self.block_info = {} # each key has value (uid, offset, size)
self.is_completed = False
self.debug = debug
# Parse the header section in the wdf file
self.__locate_all_blocks()
# Parse individual blocks
self.__treat_block_data("WDF1")
self.__treat_block_data("DATA")
self.__treat_block_data("XLST")
self.__treat_block_data("YLST")
self.__treat_block_data("ORGN")
self.__treat_block_data("WMAP")
self.__treat_block_data("WHTL")
# Reshape spectra after reading mapping information
self.__reshape_spectra()
# self._parse_wmap()
# Finally print the information
if self.debug:
print(("File Metadata").center(80, "="),
file=stderr)
self.print_info(file=stderr)
print("=" * 80, file=stderr)
def close(self):
self.file_obj.close()
if hasattr(self, "img"):
self.img.close()
def __get_type_string(self, attr, data_type):
"""Get the enumerated-data_type as string
"""
val = getattr(self, attr) # No error checking
if data_type is None:
return val
else:
return data_type(val).name
def __read_type(self, type, size=1):
""" Unpack struct data for certain type
"""
if type in ["int16", "int32", "int64", "float", "double"]:
if size > 1:
raise NotImplementedError(
"Does not support read number type with size >1")
# unpack into unsigned values
fmt_out = LenType["s_" + type].value
fmt_in = LenType["l_" + type].value
return struct.unpack(fmt_out, self.file_obj.read(fmt_in * size))[0]
elif type == "utf8":
# Read utf8 string with determined size block
return self.file_obj.read(size).decode("utf8").replace("\x00", "")
else:
raise ValueError("Unknown data length format!")
def __locate_single_block(self, pos):
"""Get block information starting at pos
"""
self.file_obj.seek(pos)
block_name = self.file_obj.read(0x4).decode("ascii")
if len(block_name) < 4:
raise EOFError
block_uid = self.__read_type("int32")
block_size = self.__read_type("int64")
return block_name, block_uid, block_size
def __locate_all_blocks(self):
"""Get information for all data blocks and store them inside self.block_info
"""
curpos = 0
finished = False
while not finished:
try:
block_name, block_uid, block_size = self.__locate_single_block(
curpos)
self.block_info[block_name] = (block_uid, curpos, block_size)
curpos += block_size
except (EOFError, UnicodeDecodeError):
finished = True
def __treat_block_data(self, block_name):
"""Get data according to specific block name
"""
if block_name not in self.block_info.keys():
if self.debug:
print("Block name {0} not present in current measurement".
format(block_name), file=stderr)
return
# parse individual blocks with names
actions = {
"WDF1": ("_parse_header", ()),
"DATA": ("_parse_spectra", ()),
"XLST": ("_parse_xylist", ("X")),
"YLST": ("_parse_xylist", ("Y")),
"ORGN": ("_parse_orgin_list", ()),
"WMAP": ("_parse_wmap", ()),
"WHTL": ("_parse_img", ()),
}
func_name, val = actions[block_name]
getattr(self, func_name)(*val)
# The method for reading the info in the file header
def _parse_header(self):
"""Solve block WDF1
"""
self.file_obj.seek(0) # return to the head
# Must make the conversion under python3
block_ID = self.file_obj.read(Offsets.block_id).decode("ascii")
block_UID = self.__read_type("int32")
block_len = self.__read_type("int64")
# First block must be "WDF1"
if (block_ID != "WDF1") \
or (block_UID != 0 and block_UID != 1) \
or (block_len != Offsets.data_block):
raise ValueError("The wdf file format is incorrect!")
# TODO what are the digits in between?
# The keys from the header
self.file_obj.seek(Offsets.measurement_info) # space
self.point_per_spectrum = self.__read_type("int32")
self.capacity = self.__read_type("int64")
self.count = self.__read_type("int64")
# If count < capacity, this measurement is not completed
self.is_completed = (self.count == self.capacity)
self.accumulation_count = self.__read_type("int32")
self.ylist_length = self.__read_type("int32")
self.xlist_length = self.__read_type("int32")
self.data_origin_count = self.__read_type("int32")
self.application_name = self.__read_type("utf8", 24) # Must be "WiRE"
for i in range(4):
self.application_version[i] = self.__read_type("int16")
self.scan_type = ScanType(self.__read_type("int32"))
self.measurement_type = MeasurementType(self.__read_type("int32"))
# For the units
self.file_obj.seek(Offsets.spectral_info)
self.spectral_unit = UnitType(self.__read_type("int32"))
self.laser_length = convert_wl(self.__read_type("float")) # in nm
# Username and title
self.file_obj.seek(Offsets.file_info)
self.username = self.__read_type("utf8",
Offsets.usr_name -
Offsets.file_info)
self.title = self.__read_type("utf8",
Offsets.data_block -
Offsets.usr_name)
def _parse_xylist(self, dir):
"""Get information from XLST or YLST blocks
"""
if not dir.upper() in ["X", "Y"]:
raise ValueError("Direction argument `dir` must be X or Y!")
name = dir.upper() + "LST"
uid, pos, size = self.block_info[name]
offset = Offsets.block_data
self.file_obj.seek(pos + offset)
setattr(self, "{0}list_type".format(dir.lower()),
DataType(self.__read_type("int32")))
setattr(self, "{0}list_unit".format(dir.lower()),
UnitType(self.__read_type("int32")))
size = getattr(self, "{0}list_length".format(dir.lower()))
if size == 0: # Possibly not started
raise ValueError("{0}-List possibly not initialized!".
format(dir.upper()))
# self.file_obj.seek(pos + offset)
data = numpy.fromfile(self.file_obj, dtype="float32", count=size)
setattr(self, "{0}data".format(dir.lower()), data)
return
def _parse_spectra(self, start=0, end=-1):
"""Get information from DATA block
"""
if end == -1: # take all spectra
end = self.count - 1
if (start not in range(self.count)) or (end not in range(self.count)):
raise ValueError("Wrong start and end indices of spectra!")
if start > end:
raise ValueError("Start cannot be larger than end!")
# Determine start position
uid, pos, size = self.block_info["DATA"]
pos_start = pos + Offsets.block_data + LenType["l_float"].value * \
start * self.point_per_spectrum
n_row = end - start + 1
self.file_obj.seek(pos_start)
spectra_data = numpy.fromfile(
self.file_obj, dtype="float32",
count=n_row * self.point_per_spectrum)
# if len(spectra_data.shape) > 1:
# The spectra is only 1D array
# spectra_data = spectra_data.reshape(
# n_row, spectra_data.size // n_row)
self.spectra = spectra_data
return
def _parse_orgin_list(self):
"""Get information from OriginList
Set the following attributes:
`self.origin_list_header`: 2D-array
`self.origin_list`: origin list
"""
# First confirm origin list type
uid, pos, size = self.block_info["ORGN"]
self.origin_list_header = [[None, ] * 5
for i in range(self.data_origin_count)]
# All possible to have x y and z positions!
self.xpos = numpy.zeros(self.count)
self.ypos = numpy.zeros(self.count)
self.zpos = numpy.zeros(self.count)
list_increment = Offsets.origin_increment + \
LenType.l_double.value * self.capacity
curpos = pos + Offsets.origin_info
for i in range(self.data_origin_count):
self.file_obj.seek(curpos)
p1 = self.__read_type("int32")
p2 = self.__read_type("int32")
s = self.__read_type("utf8", 0x10)
# First index: is the list x, or y pos?
self.origin_list_header[i][0] = (p1 >> 31 & 0b1) == 1
# Second: Data type of the row
self.origin_list_header[i][1] = DataType(p1 & ~(0b1 << 31))
# Third: Unit
self.origin_list_header[i][2] = UnitType(p2)
# Fourth: annotation
self.origin_list_header[i][3] = s
# Last: the actual data
# array = numpy.empty(self.count)
# Time appears to be recorded as int64 in 100 nanosecond intervals
# Possibly using the .NET DateTime epoch
# Reference does not appear to be Unix Epoch time
# Set time[0] = 0 until timestamp reference can be determined
# Resulting array will have unit of `FileTime` in seconds
if self.origin_list_header[i][1] == DataType.Time:
array = numpy.array([self.__read_type("int64")
for i in range(self.count)]) / 1e7
array = array - array[0]
else:
array = numpy.array([self.__read_type("double")
for i in range(self.count)])
self.origin_list_header[i][4] = array
# Set self.xpos or self.ypos
if self.origin_list_header[i][1] == DataType.Spatial_X:
self.xpos = array
self.xpos_unit = self.origin_list_header[i][2]
elif self.origin_list_header[i][1] == DataType.Spatial_Y:
self.ypos = array
self.ypos_unit = self.origin_list_header[i][2]
elif self.origin_list_header[i][1] == DataType.Spatial_Z:
self.zpos = array
self.zpos_unit = self.origin_list_header[i][2]
else:
pass
curpos += list_increment
def _parse_wmap(self):
"""Get information about mapping in StreamLine and StreamLineHR
"""
try:
uid, pos, size = self.block_info["WMAP"]
except KeyError:
if self.debug:
print(("Current measurement does not"
" contain mapping information!"),
file=stderr)
return
self.file_obj.seek(pos + Offsets.wmap_origin)
x_start = self.__read_type("float")
if not numpy.isclose(x_start, self.xpos[0], rtol=1e-4):
raise ValueError("WMAP Xpos is not same as in ORGN!")
y_start = self.__read_type("float")
if not numpy.isclose(y_start, self.ypos[0], rtol=1e-4):
raise ValueError("WMAP Ypos is not same as in ORGN!")
unknown1 = self.__read_type("float")
x_pad = self.__read_type("float")
y_pad = self.__read_type("float")
unknown2 = self.__read_type("float")
spectra_w = self.__read_type("int32")
spectra_h = self.__read_type("int32")
# Determine if the xy-grid spacing is same as in x_pad and y_pad
if (len(self.xpos) > 1) and (len(self.ypos) > 1):
xdist = numpy.abs(self.xpos - self.xpos[0])
ydist = numpy.abs(self.ypos - self.ypos[0])
xdist = xdist[numpy.nonzero(xdist)]
ydist = ydist[numpy.nonzero(ydist)]
# Get minimal non-zero padding in the grid
try:
x_pad_grid = numpy.min(xdist)
except ValueError:
x_pad_grid = 0
try:
y_pad_grid = numpy.min(ydist)
except ValueError:
y_pad_grid = 0
self.map_shape = (spectra_w, spectra_h)
self.map_info = dict(x_start=x_start,
y_start=y_start,
x_pad=x_pad,
y_pad=y_pad,
x_span=spectra_w * x_pad,
y_span=spectra_h * y_pad,
x_unit=self.xpos_unit,
y_unit=self.ypos_unit)
def _parse_img(self):
"""Extract the white-light JPEG image
The size of while-light image is coded in its EXIF
Use PIL to parse the EXIF information
"""
try:
uid, pos, size = self.block_info["WHTL"]
except KeyError:
if self.debug:
print("The wdf file does not contain an image",
file=stderr)
return
# Read the bytes. `self.img` is a wrapped IO object mimicking a file
self.file_obj.seek(pos + Offsets.jpeg_header)
img_bytes = self.file_obj.read(size - Offsets.jpeg_header)
self.img = io.BytesIO(img_bytes)
# Handle image dimension if PIL is present
if PIL is not None:
pil_img = Image.open(self.img)
# Weird missing header keys when Pillow >= 8.2.0.
# see https://pillow.readthedocs.io/en/stable/releasenotes/8.2.0.html#image-getexif-exif-and-gps-ifd
# Use fall-back _getexif method instead
exif_header = dict(pil_img._getexif())
try:
# Get the width and height of image
w_ = exif_header[ExifTags.FocalPlaneXResolution]
h_ = exif_header[ExifTags.FocalPlaneYResolution]
x_org_, y_org_ = exif_header[ExifTags.FocalPlaneXYOrigins]
def rational2float(v):
""" Pillow<7.2.0 returns tuple, Pillow>=7.2.0 returns IFDRational """
if not isinstance(v, IFDRational):
return v[0] / v[1]
return float(v)
w_, h_ = rational2float(w_), rational2float(h_)
x_org_, y_org_ = rational2float(x_org_), rational2float(y_org_)
# The dimensions (width, height)
# with unit `img_dimension_unit`
self.img_dimensions = numpy.array([w_,
h_])
# Origin of image is at upper right corner
self.img_origins = numpy.array([x_org_,
y_org_])
# Default is microns (5)
self.img_dimension_unit = UnitType(
exif_header[ExifTags.FocalPlaneResolutionUnit])
# Give the box for cropping
# Following the PIL manual
# (left, upper, right, lower)
self.img_cropbox = self.__calc_crop_box()
except KeyError:
if self.debug:
print(("Some keys in white light image header"
" cannot be read!"),
file=stderr)
return
def __calc_crop_box(self):
"""Helper function to calculate crop box
"""
def _proportion(x, minmax, pixels):
"""Get proportional pixels"""
min, max = minmax
return int(pixels * (x - min) / (max - min))
pil_img = PIL.Image.open(self.img)
w_, h_ = self.img_dimensions
x0_, y0_ = self.img_origins
pw = pil_img.width
ph = pil_img.height
map_xl = self.xpos.min()
map_xr = self.xpos.max()
map_yt = self.ypos.min()
map_yb = self.ypos.max()
left = _proportion(map_xl, (x0_, x0_ + w_), pw)
right = _proportion(map_xr, (x0_, x0_ + w_), pw)
top = _proportion(map_yt, (y0_, y0_ + h_), ph)
bottom = _proportion(map_yb, (y0_, y0_ + h_), ph)
return (left, top, right, bottom)
def __reshape_spectra(self):
"""Reshape spectra into w * h * self.point_per_spectrum
"""
if not self.is_completed:
if self.debug:
print(("The measurement is not completed, "
"will try to reshape spectra into count * pps."),
file=stderr)
try:
self.spectra = numpy.reshape(self.spectra,
(self.count,
self.point_per_spectrum))
except ValueError:
if self.debug:
print("Reshaping spectra array failed. Please check.",
file=stderr)
return
elif hasattr(self, "map_shape"):
# Is a mapping
spectra_w, spectra_h = self.map_shape
if spectra_w * spectra_h != self.count:
if self.debug:
print(("Mapping information from WMAP not"
" corresponding to ORGN! "
"Will not reshape the spectra"),
file=stderr)
return
elif spectra_w * spectra_h * self.point_per_spectrum \
!= len(self.spectra):
if self.debug:
print(("Mapping information from WMAP"
" not corresponding to DATA! "
"Will not reshape the spectra"),
file=stderr)
return
else:
# Should be h rows * w columns. numpy.ndarray is row first
# Reshape to 3D matrix when doing 2D mapping
if (spectra_h > 1) and (spectra_w > 1):
self.spectra = numpy.reshape(self.spectra,
(spectra_h, spectra_w,
self.point_per_spectrum))
# otherwise it is a line scan
else:
self.spectra = numpy.reshape(self.spectra,
(self.count,
self.point_per_spectrum))
# For any other type of measurement, reshape into (counts, point_per_spectrum)
# example: series scan
elif self.count > 1:
self.spectra = numpy.reshape(self.spectra,
(self.count,
self.point_per_spectrum))
else:
return
def print_info(self, **params):
"""Print information of the wdf file
"""
s = []
s.append(u"{0:>24s}:\t{1}".format("Title", self.title))
s.append(u"{0:>17s} version:\t{1}.{2}.{3}.{4}".
format(self.application_name,
*self.application_version))
s.append(u"{0:>24s}:\t{1} nm".format("Laser Wavelength",
self.laser_length))
for a in ("count", "capacity", "point_per_spectrum",
"scan_type", "measurement_type",
"spectral_unit",
"xlist_unit", "xlist_length",
"ylist_unit", "ylist_length",
"xpos_unit", "ypos_unit"):
sname = convert_attr_name(a)
# Use explicit string conversion to replace
try:
val = str(getattr(self, a))
except AttributeError:
continue
s.append("{0:>24s}:\t{1}".format(sname, val))
text = u"\n".join(s)
print(text, **params)
if __name__ == '__main__':
raise NotImplementedError("Please dont run this module as a script!")
| [
"numpy.abs",
"numpy.fromfile",
"PIL.Image.open",
"numpy.isclose",
"numpy.reshape",
"io.BytesIO",
"numpy.array",
"numpy.zeros",
"numpy.nonzero",
"numpy.min"
] | [((10511, 10569), 'numpy.fromfile', 'numpy.fromfile', (['self.file_obj'], {'dtype': '"""float32"""', 'count': 'size'}), "(self.file_obj, dtype='float32', count=size)\n", (10525, 10569), False, 'import numpy\n'), ((11369, 11459), 'numpy.fromfile', 'numpy.fromfile', (['self.file_obj'], {'dtype': '"""float32"""', 'count': '(n_row * self.point_per_spectrum)'}), "(self.file_obj, dtype='float32', count=n_row * self.\n point_per_spectrum)\n", (11383, 11459), False, 'import numpy\n'), ((12200, 12223), 'numpy.zeros', 'numpy.zeros', (['self.count'], {}), '(self.count)\n', (12211, 12223), False, 'import numpy\n'), ((12244, 12267), 'numpy.zeros', 'numpy.zeros', (['self.count'], {}), '(self.count)\n', (12255, 12267), False, 'import numpy\n'), ((12288, 12311), 'numpy.zeros', 'numpy.zeros', (['self.count'], {}), '(self.count)\n', (12299, 12311), False, 'import numpy\n'), ((17387, 17408), 'io.BytesIO', 'io.BytesIO', (['img_bytes'], {}), '(img_bytes)\n', (17397, 17408), False, 'import io\n'), ((19741, 19765), 'PIL.Image.open', 'PIL.Image.open', (['self.img'], {}), '(self.img)\n', (19755, 19765), False, 'import PIL\n'), ((15097, 15146), 'numpy.isclose', 'numpy.isclose', (['x_start', 'self.xpos[0]'], {'rtol': '(0.0001)'}), '(x_start, self.xpos[0], rtol=0.0001)\n', (15110, 15146), False, 'import numpy\n'), ((15271, 15320), 'numpy.isclose', 'numpy.isclose', (['y_start', 'self.ypos[0]'], {'rtol': '(0.0001)'}), '(y_start, self.ypos[0], rtol=0.0001)\n', (15284, 15320), False, 'import numpy\n'), ((15804, 15839), 'numpy.abs', 'numpy.abs', (['(self.xpos - self.xpos[0])'], {}), '(self.xpos - self.xpos[0])\n', (15813, 15839), False, 'import numpy\n'), ((15860, 15895), 'numpy.abs', 'numpy.abs', (['(self.ypos - self.ypos[0])'], {}), '(self.ypos - self.ypos[0])\n', (15869, 15895), False, 'import numpy\n'), ((17510, 17530), 'PIL.Image.open', 'Image.open', (['self.img'], {}), '(self.img)\n', (17520, 17530), False, 'from PIL import Image\n'), ((15922, 15942), 'numpy.nonzero', 'numpy.nonzero', (['xdist'], {}), '(xdist)\n', (15935, 15942), False, 'import numpy\n'), ((15970, 15990), 'numpy.nonzero', 'numpy.nonzero', (['ydist'], {}), '(ydist)\n', (15983, 15990), False, 'import numpy\n'), ((16093, 16109), 'numpy.min', 'numpy.min', (['xdist'], {}), '(xdist)\n', (16102, 16109), False, 'import numpy\n'), ((16219, 16235), 'numpy.min', 'numpy.min', (['ydist'], {}), '(ydist)\n', (16228, 16235), False, 'import numpy\n'), ((18629, 18650), 'numpy.array', 'numpy.array', (['[w_, h_]'], {}), '([w_, h_])\n', (18640, 18650), False, 'import numpy\n'), ((18796, 18825), 'numpy.array', 'numpy.array', (['[x_org_, y_org_]'], {}), '([x_org_, y_org_])\n', (18807, 18825), False, 'import numpy\n'), ((20681, 20747), 'numpy.reshape', 'numpy.reshape', (['self.spectra', '(self.count, self.point_per_spectrum)'], {}), '(self.spectra, (self.count, self.point_per_spectrum))\n', (20694, 20747), False, 'import numpy\n'), ((22717, 22783), 'numpy.reshape', 'numpy.reshape', (['self.spectra', '(self.count, self.point_per_spectrum)'], {}), '(self.spectra, (self.count, self.point_per_spectrum))\n', (22730, 22783), False, 'import numpy\n'), ((22098, 22174), 'numpy.reshape', 'numpy.reshape', (['self.spectra', '(spectra_h, spectra_w, self.point_per_spectrum)'], {}), '(self.spectra, (spectra_h, spectra_w, self.point_per_spectrum))\n', (22111, 22174), False, 'import numpy\n'), ((22377, 22443), 'numpy.reshape', 'numpy.reshape', (['self.spectra', '(self.count, self.point_per_spectrum)'], {}), '(self.spectra, (self.count, self.point_per_spectrum))\n', (22390, 22443), False, 'import numpy\n')] |
import argparse
import numpy as np
import torch
import os
class AverageMeter(object):
def __init__(self) -> None:
self.reset()
def reset(self) -> None:
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val: float, n: int = 1) -> None:
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def set_seed(seed: int) -> None:
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ["PYTHONHASHSEED"] = str(seed)
def parse_training_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--accumulation_steps", type=int, default=1)
parser.add_argument("--checkpoint", type=str, default="roberta-base")
parser.add_argument("--dataloader_workers", type=int, default=2)
parser.add_argument("--early_stopping_patience", type=int, default=0)
parser.add_argument(
"--extra_data_dir", type=str, default="data/extra_training_data"
)
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--fold", type=int, default=None)
parser.add_argument("--group_id", type=int, required=True)
parser.add_argument("--learning_rate", type=float, default=1e-5)
parser.add_argument("--log_interval", type=int, default=100)
parser.add_argument("--loss_margin", type=float, default=0.5)
parser.add_argument("--loss_type", type=str, default="mse")
parser.add_argument("--max_length", type=int, default=128)
parser.add_argument("--num_labels", type=int, default=1)
parser.add_argument("--save_all", dest="save_all", action="store_true")
parser.add_argument("--save_dir", type=str, default=".")
parser.add_argument("--scheduler", type=str, default="constant")
parser.add_argument("--seed", type=int, default=666)
parser.add_argument("--train_batch_size", type=int, default=16)
parser.add_argument("--train_path", type=str, default=None)
parser.add_argument("--use_extra_data", dest="use_extra_data", action="store_true")
parser.add_argument("--valid_batch_size", type=int, default=128)
parser.add_argument("--validation_steps", type=int, default=None)
parser.add_argument("--valid_path", type=str, default=None)
parser.add_argument("--warmup", type=float, default=0)
parser.add_argument("--weight_decay", type=float, default=1e-2)
parser.add_argument("--weights_path", type=str, default=None)
return parser.parse_args() | [
"torch.manual_seed",
"torch.cuda.manual_seed",
"numpy.random.seed",
"argparse.ArgumentParser"
] | [((467, 487), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (481, 487), True, 'import numpy as np\n'), ((492, 515), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (509, 515), False, 'import torch\n'), ((520, 548), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (542, 548), False, 'import torch\n'), ((747, 772), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (770, 772), False, 'import argparse\n')] |
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
import os
import math
np.set_printoptions(precision=3, suppress=False)
if not os.path.exists("Slike"):
os.mkdir("Slike")
def line_fit(x, k, l):
return k*x + l
def rolling_median(mase, br_dana):
br_mj = int(len(mase)/br_dana)+1
average_br_dana = []
for i in range(br_mj):
if(len(mase[i*br_dana:]) <= br_dana):
average_br_dana.append(np.mean(mase[i*br_dana:]))
else:
average_br_dana.append(np.mean(mase[i*br_mj:i*br_mj+br_dana]))
return np.array(average_br_dana)
height = 1.76
# df = pd.read_csv('masa_09-04-2022.csv')
df = pd.read_csv('masa.csv')
x, y = df["Dan"], df["Masa"]
masa = np.array([mass for mass in df["Masa"]])
BMI = np.array([mass/(height**2) for mass in masa])
df["BMI"] = BMI
k, l = np.polyfit(x, y, 1)
print(f"k = {k}\n\nl = {l}")
#1. slika
name_m = "Promjena mase u vremenu za A.U."
fig = go.Figure()
fig.add_trace(go.Scatter( x = x, y = y, name = name_m, mode = 'lines'))
fig.add_trace(go.Scatter( x = x, y = line_fit(x, k, l),name = name_m + " linear fit" ,mode = 'lines+markers'))
#2. slika
name_BMI = "Promjena BMI u vremenu za A.U."
fig2 = go.Figure()
fig2.add_trace(go.Scatter( x=x, y=BMI, name=name_BMI))
pio.write_image(fig, "Slike/" + name_m + ".png", width=2000, height=2000)
pio.write_image(fig2, "Slike/" + name_BMI + ".png", width=2000, height=2000)
#Output
print(f"Prvi dan ti je BMI bio {round(BMI[0],2)} kg/m^2.\n")
print(f"Danas ti je BMI jednak {round(BMI[-1],2)} kg/m^2.\n")
razlika_BMI = BMI[0] - BMI[-1]
br_dana = len(BMI)
print(f"Razlika u {br_dana} dana je {round(razlika_BMI,2)} kg/m^2.\n")
razlika_BMI_jucer = BMI[-2] - BMI[-1]
razlika_mase = masa[0] - masa[-1]
razlika_mase_jucer = masa[-2] - masa[-1]
print(f"Početna masa je {round(masa[0],2)}.\n")
print(f"Današnja masa je {round(masa[-1],2)}.\n")
print(f"Ukupna razlika mase je {round(razlika_mase,2)}.\n")
print(f"Najniža masa je {min(masa)} kg.\n")
print(f"Najniži BMI je {round(min(BMI),2)} kg/m^2.\n")
#if mass is higher, lower, or equal to the day before -> Do something
if(razlika_BMI_jucer > 0):
print(f"Bravo, razlika od jučer je {round(razlika_BMI_jucer,2)} kg/m^2.\n")
print(f"Razlika mase od jučer je {round(razlika_mase_jucer,2)} kg.\n")
elif(razlika_BMI_jucer == 0):
print(f"Smrade, skidaj kile ili umri. Nisi se udeblja danas al u pičku materinu, vježbaj konju glupi.\n")
print(f"Razlika BMI od jučer je {round(razlika_BMI_jucer,2)} kg/m^2\n")
else:
print(f"Smrade, skidaj kile ili umri. Udeblja si se od jučer za {round(razlika_mase_jucer,2)} kg.\n")
print(f"Razlika BMI od jučer je loša stari: {round(razlika_BMI_jucer,2)} kg/m^2, smanji hranu pederu. Vježbaj, pička ti materina.\n")
#y = k*x + l Line fit from before
#x = (y-l)/k
X = 85
br_d_do_X = (X - l)/k - br_dana
print(f"Broj dana do {X} kg je cca.: {round(br_d_do_X,2)} ({round(br_d_do_X/7,2)} tjedana.)\n")
average_br_dana = rolling_median(masa, 7)
average_br_dana = [x for x in average_br_dana if math.isnan(x) == False]
print(f"Srednja masa prvi misec je {round(average_br_dana[0],2)}kg.\n")
if len(masa) >= 60:
print(f"Srednja masa prošli misec je {round(average_br_dana[-2],2)}kg.\n")
elif len(masa) >= 30:
print(f"Srednja masa ovaj misec je {round(average_br_dana[-1],2)}kg.\n")
| [
"os.path.exists",
"plotly.io.write_image",
"numpy.mean",
"pandas.read_csv",
"numpy.polyfit",
"math.isnan",
"numpy.array",
"plotly.graph_objects.Figure",
"plotly.graph_objects.Scatter",
"os.mkdir",
"numpy.set_printoptions"
] | [((120, 168), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(False)'}), '(precision=3, suppress=False)\n', (139, 168), True, 'import numpy as np\n'), ((688, 711), 'pandas.read_csv', 'pd.read_csv', (['"""masa.csv"""'], {}), "('masa.csv')\n", (699, 711), True, 'import pandas as pd\n'), ((749, 788), 'numpy.array', 'np.array', (["[mass for mass in df['Masa']]"], {}), "([mass for mass in df['Masa']])\n", (757, 788), True, 'import numpy as np\n'), ((795, 844), 'numpy.array', 'np.array', (['[(mass / height ** 2) for mass in masa]'], {}), '([(mass / height ** 2) for mass in masa])\n', (803, 844), True, 'import numpy as np\n'), ((865, 884), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (875, 884), True, 'import numpy as np\n'), ((974, 985), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (983, 985), True, 'import plotly.graph_objects as go\n'), ((1231, 1242), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (1240, 1242), True, 'import plotly.graph_objects as go\n'), ((1299, 1372), 'plotly.io.write_image', 'pio.write_image', (['fig', "('Slike/' + name_m + '.png')"], {'width': '(2000)', 'height': '(2000)'}), "(fig, 'Slike/' + name_m + '.png', width=2000, height=2000)\n", (1314, 1372), True, 'import plotly.io as pio\n'), ((1373, 1449), 'plotly.io.write_image', 'pio.write_image', (['fig2', "('Slike/' + name_BMI + '.png')"], {'width': '(2000)', 'height': '(2000)'}), "(fig2, 'Slike/' + name_BMI + '.png', width=2000, height=2000)\n", (1388, 1449), True, 'import plotly.io as pio\n'), ((177, 200), 'os.path.exists', 'os.path.exists', (['"""Slike"""'], {}), "('Slike')\n", (191, 200), False, 'import os\n'), ((206, 223), 'os.mkdir', 'os.mkdir', (['"""Slike"""'], {}), "('Slike')\n", (214, 223), False, 'import os\n'), ((600, 625), 'numpy.array', 'np.array', (['average_br_dana'], {}), '(average_br_dana)\n', (608, 625), True, 'import numpy as np\n'), ((1000, 1047), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': 'y', 'name': 'name_m', 'mode': '"""lines"""'}), "(x=x, y=y, name=name_m, mode='lines')\n", (1010, 1047), True, 'import plotly.graph_objects as go\n'), ((1258, 1295), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': 'BMI', 'name': 'name_BMI'}), '(x=x, y=BMI, name=name_BMI)\n', (1268, 1295), True, 'import plotly.graph_objects as go\n'), ((3069, 3082), 'math.isnan', 'math.isnan', (['x'], {}), '(x)\n', (3079, 3082), False, 'import math\n'), ((473, 500), 'numpy.mean', 'np.mean', (['mase[i * br_dana:]'], {}), '(mase[i * br_dana:])\n', (480, 500), True, 'import numpy as np\n'), ((549, 593), 'numpy.mean', 'np.mean', (['mase[i * br_mj:i * br_mj + br_dana]'], {}), '(mase[i * br_mj:i * br_mj + br_dana])\n', (556, 593), True, 'import numpy as np\n')] |
"""
Q3: Fourier filtering and smoothing
Plot the actual data, then calculate Fourier coefficients
Then plot the transformed data
"""
import numpy as np
import matplotlib.pyplot as plt
# import the Dow data
dow = np.loadtxt("dow.txt", float)
# plot it all onto a graph
dow_l = np.arange(len(dow))
plt.figure(1)
plt.title('Closing values of DOW index per day')
plt.xlabel('Number of days since start of data set')
plt.ylabel('Closing value')
plt.xlim(0,(len(dow)))
plt.plot(dow_l, dow, 'g', label='Unprocessed data')
# we won't display this plot yet, since we want to add to it later
# B
# calculate Fourier coefficients
dowft = np.fft.rfft(dow)
#print dowft #and, oh! look. they're complex.
# C
# what point do we cut off for 10%? LET'S FIND OUT
dowft_l = len(dowft)
dowft_10pc_l = (((dowft_l)//10)) # no need to +1 because we're operating on zero
dowft_f10 = np.copy(dowft)
dowft_f10[(dowft_10pc_l):(dowft_l)] = 0.0 # set all values past the 1/10th point to zero
# D
# inverse transform
inv_dowft_f10 = np.fft.irfft(dowft_f10)
# plot
plt.plot(dow_l,inv_dowft_f10, 'r', label='Inverse transform, 10% of coefficients') # plot individual ones too for clarity
plt.legend(loc=0)
plt.show()
# E
# what point do we cut off for 2%? LET'S FIND OUT
dowft_2pc_l = (((dowft_l)//50))
dowft_f2 = np.copy(dowft)
dowft_f2[(dowft_2pc_l):(dowft_l)] = 0.0 # set all values past the 1/10th point to zero
# inverse transform again
inv_dowft_f2 = np.fft.irfft(dowft_f2)
# plot
plt.figure(2)
plt.plot(dow_l,dow,'g', label='Unprocessed data')
plt.plot(dow_l,inv_dowft_f2, 'b', label='Inverse transform, 2% of coefficients')
plt.title('Closing values of DOW index per day')
plt.xlabel('Number of days since start of data set')
plt.ylabel('Closing value')
plt.xlim(0,(len(dow)))
plt.legend(loc=0)
plt.show()
#plot the whole lot too
plt.figure(3)
plt.plot(dow_l,dow,'g', label='Unprocessed data')
plt.plot(dow_l,inv_dowft_f10, 'r', label='Inverse transform, 10% of coefficients') # plot individual ones too for clarity
plt.plot(dow_l,inv_dowft_f2, 'b', label='Inverse transform, 2% of coefficients')
plt.title('Closing values of DOW index per day')
plt.xlabel('Number of days since start of data set')
plt.ylabel('Closing value')
plt.legend(loc=0)
plt.xlim(0,(len(dow)))
plt.show() | [
"numpy.copy",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.fft.irfft",
"numpy.fft.rfft",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((214, 242), 'numpy.loadtxt', 'np.loadtxt', (['"""dow.txt"""', 'float'], {}), "('dow.txt', float)\n", (224, 242), True, 'import numpy as np\n'), ((300, 313), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (310, 313), True, 'import matplotlib.pyplot as plt\n'), ((314, 362), 'matplotlib.pyplot.title', 'plt.title', (['"""Closing values of DOW index per day"""'], {}), "('Closing values of DOW index per day')\n", (323, 362), True, 'import matplotlib.pyplot as plt\n'), ((363, 415), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of days since start of data set"""'], {}), "('Number of days since start of data set')\n", (373, 415), True, 'import matplotlib.pyplot as plt\n'), ((416, 443), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Closing value"""'], {}), "('Closing value')\n", (426, 443), True, 'import matplotlib.pyplot as plt\n'), ((467, 518), 'matplotlib.pyplot.plot', 'plt.plot', (['dow_l', 'dow', '"""g"""'], {'label': '"""Unprocessed data"""'}), "(dow_l, dow, 'g', label='Unprocessed data')\n", (475, 518), True, 'import matplotlib.pyplot as plt\n'), ((634, 650), 'numpy.fft.rfft', 'np.fft.rfft', (['dow'], {}), '(dow)\n', (645, 650), True, 'import numpy as np\n'), ((870, 884), 'numpy.copy', 'np.copy', (['dowft'], {}), '(dowft)\n', (877, 884), True, 'import numpy as np\n'), ((1017, 1040), 'numpy.fft.irfft', 'np.fft.irfft', (['dowft_f10'], {}), '(dowft_f10)\n', (1029, 1040), True, 'import numpy as np\n'), ((1048, 1136), 'matplotlib.pyplot.plot', 'plt.plot', (['dow_l', 'inv_dowft_f10', '"""r"""'], {'label': '"""Inverse transform, 10% of coefficients"""'}), "(dow_l, inv_dowft_f10, 'r', label=\n 'Inverse transform, 10% of coefficients')\n", (1056, 1136), True, 'import matplotlib.pyplot as plt\n'), ((1170, 1187), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (1180, 1187), True, 'import matplotlib.pyplot as plt\n'), ((1188, 1198), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1196, 1198), True, 'import matplotlib.pyplot as plt\n'), ((1300, 1314), 'numpy.copy', 'np.copy', (['dowft'], {}), '(dowft)\n', (1307, 1314), True, 'import numpy as np\n'), ((1444, 1466), 'numpy.fft.irfft', 'np.fft.irfft', (['dowft_f2'], {}), '(dowft_f2)\n', (1456, 1466), True, 'import numpy as np\n'), ((1475, 1488), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (1485, 1488), True, 'import matplotlib.pyplot as plt\n'), ((1489, 1540), 'matplotlib.pyplot.plot', 'plt.plot', (['dow_l', 'dow', '"""g"""'], {'label': '"""Unprocessed data"""'}), "(dow_l, dow, 'g', label='Unprocessed data')\n", (1497, 1540), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1625), 'matplotlib.pyplot.plot', 'plt.plot', (['dow_l', 'inv_dowft_f2', '"""b"""'], {'label': '"""Inverse transform, 2% of coefficients"""'}), "(dow_l, inv_dowft_f2, 'b', label=\n 'Inverse transform, 2% of coefficients')\n", (1547, 1625), True, 'import matplotlib.pyplot as plt\n'), ((1620, 1668), 'matplotlib.pyplot.title', 'plt.title', (['"""Closing values of DOW index per day"""'], {}), "('Closing values of DOW index per day')\n", (1629, 1668), True, 'import matplotlib.pyplot as plt\n'), ((1669, 1721), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of days since start of data set"""'], {}), "('Number of days since start of data set')\n", (1679, 1721), True, 'import matplotlib.pyplot as plt\n'), ((1722, 1749), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Closing value"""'], {}), "('Closing value')\n", (1732, 1749), True, 'import matplotlib.pyplot as plt\n'), ((1773, 1790), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (1783, 1790), True, 'import matplotlib.pyplot as plt\n'), ((1791, 1801), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1799, 1801), True, 'import matplotlib.pyplot as plt\n'), ((1827, 1840), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (1837, 1840), True, 'import matplotlib.pyplot as plt\n'), ((1841, 1892), 'matplotlib.pyplot.plot', 'plt.plot', (['dow_l', 'dow', '"""g"""'], {'label': '"""Unprocessed data"""'}), "(dow_l, dow, 'g', label='Unprocessed data')\n", (1849, 1892), True, 'import matplotlib.pyplot as plt\n'), ((1891, 1979), 'matplotlib.pyplot.plot', 'plt.plot', (['dow_l', 'inv_dowft_f10', '"""r"""'], {'label': '"""Inverse transform, 10% of coefficients"""'}), "(dow_l, inv_dowft_f10, 'r', label=\n 'Inverse transform, 10% of coefficients')\n", (1899, 1979), True, 'import matplotlib.pyplot as plt\n'), ((2013, 2099), 'matplotlib.pyplot.plot', 'plt.plot', (['dow_l', 'inv_dowft_f2', '"""b"""'], {'label': '"""Inverse transform, 2% of coefficients"""'}), "(dow_l, inv_dowft_f2, 'b', label=\n 'Inverse transform, 2% of coefficients')\n", (2021, 2099), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2142), 'matplotlib.pyplot.title', 'plt.title', (['"""Closing values of DOW index per day"""'], {}), "('Closing values of DOW index per day')\n", (2103, 2142), True, 'import matplotlib.pyplot as plt\n'), ((2143, 2195), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of days since start of data set"""'], {}), "('Number of days since start of data set')\n", (2153, 2195), True, 'import matplotlib.pyplot as plt\n'), ((2196, 2223), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Closing value"""'], {}), "('Closing value')\n", (2206, 2223), True, 'import matplotlib.pyplot as plt\n'), ((2224, 2241), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (2234, 2241), True, 'import matplotlib.pyplot as plt\n'), ((2265, 2275), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2273, 2275), True, 'import matplotlib.pyplot as plt\n')] |
import os.path as osp
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from bounding_box_utils.bounding_box_utils import convert_coordinates
import numpy as np
import cv2
train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
images_dir = '/home/adam/.keras/datasets/udacity_self_driving_car/object-detection-crowdai-480-300'
# Ground truth
train_labels_filepath = osp.join(images_dir, 'train.csv')
train_dataset.parse_csv(images_dir=images_dir,
labels_filename=train_labels_filepath,
# This is the order of the first six columns in the CSV file that contains the labels for your
# dataset. If your labels are in XML format, maybe the XML parser will be helpful.
input_format=['image_name', 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'],
include_classes='all')
class_names = ['car', 'truck', 'pedestrian']
data_augmentation_chain = DataAugmentationConstantInputSize(random_brightness=(-48, 48, 0.5),
random_contrast=(0.5, 1.8, 0.5),
random_saturation=(0.5, 1.8, 0.5),
random_hue=(18, 0.5),
random_flip=0.5,
random_translate=((0.03, 0.5), (0.03, 0.5), 0.5),
random_scale=(0.5, 2.0, 0.5),
n_trials_max=3,
# 这里 clip 的是 gt boxes
clip_boxes=True,
overlap_criterion_box_filter='area',
overlap_criterion_validator='area',
bounds_box_filter=(0.3, 1.0),
bounds_validator=(0.5, 1.0),
n_boxes_min=1,
background=(0, 0, 0))
batch_size = 4
img_height = 300
img_width = 480
n_classes = 3
scales = [0.08, 0.16, 0.32, 0.64, 0.96]
aspect_ratios = [0.5, 1.0, 2.0]
two_boxes_for_ar1 = True
steps = None
offsets = None
clip_boxes = False
variances = [1.0, 1.0, 1.0, 1.0]
normalize_coords = True
# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
# [(img_height // 8, img_width // 8), (img_height // 16, img_width // 16), (img_height // 32, img_width // 32)
# (img_height // 64, img_width // 64)]
predictor_sizes = [(150, 240), (75, 120), (37, 60), (18, 30)]
ssd_input_encoder = SSDInputEncoder(img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
aspect_ratios_global=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
# 这里 clip 的是 anchor boxes
clip_boxes=clip_boxes,
variances=variances,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.3,
normalize_coords=normalize_coords,
coords='centroids',
)
# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.
train_generator = train_dataset.generate(batch_size=batch_size,
shuffle=True,
transformations=(data_augmentation_chain,),
label_encoder=ssd_input_encoder,
returns=('processed_images',
'encoded_labels', 'original_images', 'original_labels'),
keep_images_without_gt=False)
def preview_gt_boxes():
for _, _, original_images, original_labels in train_generator:
for i in range(len(original_images)):
image = original_images[i]
gt_boxes = original_labels[i]
for gt_box in gt_boxes:
cv2.putText(image, class_names[gt_box[0] - 1], (gt_box[1], gt_box[2]), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 255), 2)
cv2.rectangle(image, (gt_box[1], gt_box[2]), (gt_box[3], gt_box[4]), (0, 255, 0), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', image)
cv2.waitKey(0)
def preview_anchor_boxes():
for _, _, original_images, original_labels in train_generator:
for i in range(len(original_images)):
original_image = original_images[i]
original_label = original_labels[i]
anchor_boxes = batch_item_y[:, -8:-4]
anchor_boxes = convert_coordinates(anchor_boxes,
start_index=0,
conversion='centroids2corners',
border_pixels='half')
anchor_boxes[:, [0, 2]] *= img_width
anchor_boxes[:, [1, 3]] *= img_height
anchor_boxes = np.round(anchor_boxes).astype('int')
print(anchor_boxes[0])
image = batch_item_x.astype('int8')
for anchor_box in anchor_boxes:
cv2.rectangle(image, (anchor_box[0], anchor_box[1]), (anchor_box[2], anchor_box[3]), (0, 255, 0), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', image)
cv2.waitKey(0)
pass
preview_gt_boxes()
| [
"cv2.rectangle",
"data_generator.data_augmentation_chain_constant_input_size.DataAugmentationConstantInputSize",
"data_generator.object_detection_2d_data_generator.DataGenerator",
"numpy.round",
"os.path.join",
"cv2.imshow",
"cv2.putText",
"bounding_box_utils.bounding_box_utils.convert_coordinates",
... | [((386, 454), 'data_generator.object_detection_2d_data_generator.DataGenerator', 'DataGenerator', ([], {'load_images_into_memory': '(False)', 'hdf5_dataset_path': 'None'}), '(load_images_into_memory=False, hdf5_dataset_path=None)\n', (399, 454), False, 'from data_generator.object_detection_2d_data_generator import DataGenerator\n'), ((594, 627), 'os.path.join', 'osp.join', (['images_dir', '"""train.csv"""'], {}), "(images_dir, 'train.csv')\n", (602, 627), True, 'import os.path as osp\n'), ((1179, 1660), 'data_generator.data_augmentation_chain_constant_input_size.DataAugmentationConstantInputSize', 'DataAugmentationConstantInputSize', ([], {'random_brightness': '(-48, 48, 0.5)', 'random_contrast': '(0.5, 1.8, 0.5)', 'random_saturation': '(0.5, 1.8, 0.5)', 'random_hue': '(18, 0.5)', 'random_flip': '(0.5)', 'random_translate': '((0.03, 0.5), (0.03, 0.5), 0.5)', 'random_scale': '(0.5, 2.0, 0.5)', 'n_trials_max': '(3)', 'clip_boxes': '(True)', 'overlap_criterion_box_filter': '"""area"""', 'overlap_criterion_validator': '"""area"""', 'bounds_box_filter': '(0.3, 1.0)', 'bounds_validator': '(0.5, 1.0)', 'n_boxes_min': '(1)', 'background': '(0, 0, 0)'}), "(random_brightness=(-48, 48, 0.5),\n random_contrast=(0.5, 1.8, 0.5), random_saturation=(0.5, 1.8, 0.5),\n random_hue=(18, 0.5), random_flip=0.5, random_translate=((0.03, 0.5), (\n 0.03, 0.5), 0.5), random_scale=(0.5, 2.0, 0.5), n_trials_max=3,\n clip_boxes=True, overlap_criterion_box_filter='area',\n overlap_criterion_validator='area', bounds_box_filter=(0.3, 1.0),\n bounds_validator=(0.5, 1.0), n_boxes_min=1, background=(0, 0, 0))\n", (1212, 1660), False, 'from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize\n'), ((3170, 3588), 'ssd_encoder_decoder.ssd_input_encoder.SSDInputEncoder', 'SSDInputEncoder', ([], {'img_height': 'img_height', 'img_width': 'img_width', 'n_classes': 'n_classes', 'predictor_sizes': 'predictor_sizes', 'scales': 'scales', 'aspect_ratios_global': 'aspect_ratios', 'two_boxes_for_ar1': 'two_boxes_for_ar1', 'steps': 'steps', 'offsets': 'offsets', 'clip_boxes': 'clip_boxes', 'variances': 'variances', 'matching_type': '"""multi"""', 'pos_iou_threshold': '(0.5)', 'neg_iou_limit': '(0.3)', 'normalize_coords': 'normalize_coords', 'coords': '"""centroids"""'}), "(img_height=img_height, img_width=img_width, n_classes=\n n_classes, predictor_sizes=predictor_sizes, scales=scales,\n aspect_ratios_global=aspect_ratios, two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps, offsets=offsets, clip_boxes=clip_boxes, variances=\n variances, matching_type='multi', pos_iou_threshold=0.5, neg_iou_limit=\n 0.3, normalize_coords=normalize_coords, coords='centroids')\n", (3185, 3588), False, 'from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\n'), ((5354, 5397), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""', 'cv2.WINDOW_NORMAL'], {}), "('image', cv2.WINDOW_NORMAL)\n", (5369, 5397), False, 'import cv2\n'), ((5410, 5436), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (5420, 5436), False, 'import cv2\n'), ((5449, 5463), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5460, 5463), False, 'import cv2\n'), ((5780, 5887), 'bounding_box_utils.bounding_box_utils.convert_coordinates', 'convert_coordinates', (['anchor_boxes'], {'start_index': '(0)', 'conversion': '"""centroids2corners"""', 'border_pixels': '"""half"""'}), "(anchor_boxes, start_index=0, conversion=\n 'centroids2corners', border_pixels='half')\n", (5799, 5887), False, 'from bounding_box_utils.bounding_box_utils import convert_coordinates\n'), ((6443, 6486), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""', 'cv2.WINDOW_NORMAL'], {}), "('image', cv2.WINDOW_NORMAL)\n", (6458, 6486), False, 'import cv2\n'), ((6499, 6525), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (6509, 6525), False, 'import cv2\n'), ((6538, 6552), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (6549, 6552), False, 'import cv2\n'), ((5097, 5217), 'cv2.putText', 'cv2.putText', (['image', 'class_names[gt_box[0] - 1]', '(gt_box[1], gt_box[2])', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)'], {}), '(image, class_names[gt_box[0] - 1], (gt_box[1], gt_box[2]), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n', (5108, 5217), False, 'import cv2\n'), ((5257, 5346), 'cv2.rectangle', 'cv2.rectangle', (['image', '(gt_box[1], gt_box[2])', '(gt_box[3], gt_box[4])', '(0, 255, 0)', '(2)'], {}), '(image, (gt_box[1], gt_box[2]), (gt_box[3], gt_box[4]), (0, \n 255, 0), 2)\n', (5270, 5346), False, 'import cv2\n'), ((6330, 6434), 'cv2.rectangle', 'cv2.rectangle', (['image', '(anchor_box[0], anchor_box[1])', '(anchor_box[2], anchor_box[3])', '(0, 255, 0)', '(2)'], {}), '(image, (anchor_box[0], anchor_box[1]), (anchor_box[2],\n anchor_box[3]), (0, 255, 0), 2)\n', (6343, 6434), False, 'import cv2\n'), ((6150, 6172), 'numpy.round', 'np.round', (['anchor_boxes'], {}), '(anchor_boxes)\n', (6158, 6172), True, 'import numpy as np\n')] |
from __future__ import print_function
import numpy as np
import regreg.api as rr
from selection.tests.decorators import wait_for_return_value, register_report, set_sampling_params_iftrue
import selection.tests.reports as reports
from selection.tests.flags import SMALL_SAMPLES
from selection.api import multiple_queries, glm_target
from selection.randomized.glm import split_glm_group_lasso
from selection.tests.instance import logistic_instance
from selection.randomized.query import naive_confidence_intervals
@register_report(['mle', 'truth', 'pvalue', 'cover', 'naive_cover', 'active'])
@set_sampling_params_iftrue(SMALL_SAMPLES, ndraw=10, burnin=10)
@wait_for_return_value()
def test_split(s=3,
n=200,
p=50,
signal=7,
rho=0.1,
split_frac=0.8,
lam_frac=0.7,
ndraw=10000,
burnin=2000,
bootstrap=True,
solve_args={'min_its':50, 'tol':1.e-10},
reference_known=False):
X, y, beta, _ = logistic_instance(n=n, p=p, s=s, rho=rho, signal=signal)
m = int(split_frac * n)
nonzero = np.where(beta)[0]
loss = rr.glm.logistic(X, y)
epsilon = 1. / np.sqrt(n)
lam = lam_frac * np.mean(np.fabs(np.dot(X.T, np.random.binomial(1, 1. / 2, (n, 2000)))).max(0))
W = np.ones(p)*lam
W[0] = 0 # use at least some unpenalized
penalty = rr.group_lasso(np.arange(p),
weights=dict(zip(np.arange(p), W)), lagrange=1.)
M_est = split_glm_group_lasso(loss, epsilon, m, penalty)
mv = multiple_queries([M_est])
mv.solve()
M_est.selection_variable['variables'] = M_est.selection_variable['variables']
nactive = np.sum(M_est.selection_variable['variables'])
if nactive==0:
return None
if set(nonzero).issubset(np.nonzero(M_est.selection_variable['variables'])[0]):
active_set = np.nonzero(M_est.selection_variable['variables'])[0]
if bootstrap:
target_sampler, target_observed = glm_target(loss,
M_est.selection_variable['variables'],
mv)
else:
target_sampler, target_observed = glm_target(loss,
M_est.selection_variable['variables'],
mv,
bootstrap=True)
reference_known = True
if reference_known:
reference = beta[M_est.selection_variable['variables']]
else:
reference = target_observed
target_sampler.reference = reference
target_sample = target_sampler.sample(ndraw=ndraw,
burnin=burnin)
LU = target_sampler.confidence_intervals(target_observed,
sample=target_sample).T
LU_naive = naive_confidence_intervals(target_sampler, target_observed)
pivots_mle = target_sampler.coefficient_pvalues(target_observed,
parameter=target_sampler.reference,
sample=target_sample)
pivots_truth = target_sampler.coefficient_pvalues(target_observed,
parameter=beta[M_est.selection_variable['variables']],
sample=target_sample)
true_vec = beta[M_est.selection_variable['variables']]
pvalues = target_sampler.coefficient_pvalues(target_observed,
parameter=np.zeros_like(true_vec),
sample=target_sample)
L, U = LU
covered = np.zeros(nactive, np.bool)
naive_covered = np.zeros(nactive, np.bool)
active_var = np.zeros(nactive, np.bool)
for j in range(nactive):
if (L[j] <= true_vec[j]) and (U[j] >= true_vec[j]):
covered[j] = 1
if (LU_naive[j,0] <= true_vec[j]) and (LU_naive[j,1] >= true_vec[j]):
naive_covered[j] = 1
active_var[j] = active_set[j] in nonzero
return pivots_mle, pivots_truth, pvalues, covered, naive_covered, active_var
def report(niter=50, **kwargs):
split_report = reports.reports['test_split']
CLT_runs = reports.collect_multiple_runs(split_report['test'],
split_report['columns'],
niter,
reports.summarize_all,
**kwargs)
kwargs['bootstrap'] = False
fig = reports.pivot_plot(CLT_runs, color='b', label='CLT')
kwargs['bootstrap'] = True
bootstrap_runs = reports.collect_multiple_runs(split_report['test'],
split_report['columns'],
niter,
reports.summarize_all,
**kwargs)
fig = reports.pivot_plot(bootstrap_runs, color='g', label='Bootstrap', fig=fig)
fig.savefig('split_pivots.pdf') # will have both bootstrap and CLT on plot
| [
"numpy.sqrt",
"selection.api.glm_target",
"numpy.arange",
"numpy.random.binomial",
"selection.tests.reports.pivot_plot",
"selection.api.multiple_queries",
"selection.randomized.glm.split_glm_group_lasso",
"numpy.where",
"regreg.api.glm.logistic",
"selection.tests.decorators.wait_for_return_value",... | [((518, 595), 'selection.tests.decorators.register_report', 'register_report', (["['mle', 'truth', 'pvalue', 'cover', 'naive_cover', 'active']"], {}), "(['mle', 'truth', 'pvalue', 'cover', 'naive_cover', 'active'])\n", (533, 595), False, 'from selection.tests.decorators import wait_for_return_value, register_report, set_sampling_params_iftrue\n'), ((597, 659), 'selection.tests.decorators.set_sampling_params_iftrue', 'set_sampling_params_iftrue', (['SMALL_SAMPLES'], {'ndraw': '(10)', 'burnin': '(10)'}), '(SMALL_SAMPLES, ndraw=10, burnin=10)\n', (623, 659), False, 'from selection.tests.decorators import wait_for_return_value, register_report, set_sampling_params_iftrue\n'), ((661, 684), 'selection.tests.decorators.wait_for_return_value', 'wait_for_return_value', ([], {}), '()\n', (682, 684), False, 'from selection.tests.decorators import wait_for_return_value, register_report, set_sampling_params_iftrue\n'), ((1064, 1120), 'selection.tests.instance.logistic_instance', 'logistic_instance', ([], {'n': 'n', 'p': 'p', 's': 's', 'rho': 'rho', 'signal': 'signal'}), '(n=n, p=p, s=s, rho=rho, signal=signal)\n', (1081, 1120), False, 'from selection.tests.instance import logistic_instance\n'), ((1194, 1215), 'regreg.api.glm.logistic', 'rr.glm.logistic', (['X', 'y'], {}), '(X, y)\n', (1209, 1215), True, 'import regreg.api as rr\n'), ((1549, 1597), 'selection.randomized.glm.split_glm_group_lasso', 'split_glm_group_lasso', (['loss', 'epsilon', 'm', 'penalty'], {}), '(loss, epsilon, m, penalty)\n', (1570, 1597), False, 'from selection.randomized.glm import split_glm_group_lasso\n'), ((1607, 1632), 'selection.api.multiple_queries', 'multiple_queries', (['[M_est]'], {}), '([M_est])\n', (1623, 1632), False, 'from selection.api import multiple_queries, glm_target\n'), ((1745, 1790), 'numpy.sum', 'np.sum', (["M_est.selection_variable['variables']"], {}), "(M_est.selection_variable['variables'])\n", (1751, 1790), True, 'import numpy as np\n'), ((4591, 4711), 'selection.tests.reports.collect_multiple_runs', 'reports.collect_multiple_runs', (["split_report['test']", "split_report['columns']", 'niter', 'reports.summarize_all'], {}), "(split_report['test'], split_report['columns'],\n niter, reports.summarize_all, **kwargs)\n", (4620, 4711), True, 'import selection.tests.reports as reports\n'), ((4930, 4982), 'selection.tests.reports.pivot_plot', 'reports.pivot_plot', (['CLT_runs'], {'color': '"""b"""', 'label': '"""CLT"""'}), "(CLT_runs, color='b', label='CLT')\n", (4948, 4982), True, 'import selection.tests.reports as reports\n'), ((5036, 5156), 'selection.tests.reports.collect_multiple_runs', 'reports.collect_multiple_runs', (["split_report['test']", "split_report['columns']", 'niter', 'reports.summarize_all'], {}), "(split_report['test'], split_report['columns'],\n niter, reports.summarize_all, **kwargs)\n", (5065, 5156), True, 'import selection.tests.reports as reports\n'), ((5367, 5440), 'selection.tests.reports.pivot_plot', 'reports.pivot_plot', (['bootstrap_runs'], {'color': '"""g"""', 'label': '"""Bootstrap"""', 'fig': 'fig'}), "(bootstrap_runs, color='g', label='Bootstrap', fig=fig)\n", (5385, 5440), True, 'import selection.tests.reports as reports\n'), ((1164, 1178), 'numpy.where', 'np.where', (['beta'], {}), '(beta)\n', (1172, 1178), True, 'import numpy as np\n'), ((1235, 1245), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1242, 1245), True, 'import numpy as np\n'), ((1355, 1365), 'numpy.ones', 'np.ones', (['p'], {}), '(p)\n', (1362, 1365), True, 'import numpy as np\n'), ((1444, 1456), 'numpy.arange', 'np.arange', (['p'], {}), '(p)\n', (1453, 1456), True, 'import numpy as np\n'), ((3055, 3114), 'selection.randomized.query.naive_confidence_intervals', 'naive_confidence_intervals', (['target_sampler', 'target_observed'], {}), '(target_sampler, target_observed)\n', (3081, 3114), False, 'from selection.randomized.query import naive_confidence_intervals\n'), ((3980, 4006), 'numpy.zeros', 'np.zeros', (['nactive', 'np.bool'], {}), '(nactive, np.bool)\n', (3988, 4006), True, 'import numpy as np\n'), ((4031, 4057), 'numpy.zeros', 'np.zeros', (['nactive', 'np.bool'], {}), '(nactive, np.bool)\n', (4039, 4057), True, 'import numpy as np\n'), ((4079, 4105), 'numpy.zeros', 'np.zeros', (['nactive', 'np.bool'], {}), '(nactive, np.bool)\n', (4087, 4105), True, 'import numpy as np\n'), ((1861, 1910), 'numpy.nonzero', 'np.nonzero', (["M_est.selection_variable['variables']"], {}), "(M_est.selection_variable['variables'])\n", (1871, 1910), True, 'import numpy as np\n'), ((1938, 1987), 'numpy.nonzero', 'np.nonzero', (["M_est.selection_variable['variables']"], {}), "(M_est.selection_variable['variables'])\n", (1948, 1987), True, 'import numpy as np\n'), ((2060, 2119), 'selection.api.glm_target', 'glm_target', (['loss', "M_est.selection_variable['variables']", 'mv'], {}), "(loss, M_est.selection_variable['variables'], mv)\n", (2070, 2119), False, 'from selection.api import multiple_queries, glm_target\n'), ((2296, 2371), 'selection.api.glm_target', 'glm_target', (['loss', "M_est.selection_variable['variables']", 'mv'], {'bootstrap': '(True)'}), "(loss, M_est.selection_variable['variables'], mv, bootstrap=True)\n", (2306, 2371), False, 'from selection.api import multiple_queries, glm_target\n'), ((3842, 3865), 'numpy.zeros_like', 'np.zeros_like', (['true_vec'], {}), '(true_vec)\n', (3855, 3865), True, 'import numpy as np\n'), ((1504, 1516), 'numpy.arange', 'np.arange', (['p'], {}), '(p)\n', (1513, 1516), True, 'import numpy as np\n'), ((1296, 1337), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(1.0 / 2)', '(n, 2000)'], {}), '(1, 1.0 / 2, (n, 2000))\n', (1314, 1337), True, 'import numpy as np\n')] |
import random
import cv2
from torchvision import transforms
import torchvision.transforms.functional as ttf
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
def makecon():
path1 = "./dataset/DRIVE/test/1st_manual/"
path2 = "./dataset/DRIVE/test/1st_manual_tv_resized/"
save_path = "./dataset/DRIVE/test/contrast/"
for i in range(1, 21):
print("doing "+str(i))
if i<10:
prefix = "0"+str(i)
else:
prefix = str(i)
img1_path = path1 + prefix + "_manual1.gif" # 拼出图片路径和文件名
img2_path = path2 + prefix + "_manual1.gif" # 拼出图片路径和文件名
img1 = PIL.Image.open(img1_path) # 读入图片
img2 = PIL.Image.open(img2_path) # 读入图片
# manual_img_path = root + "m" + "_" + str(num) + ".jpg"
# manual_img = PIL.Image.open(manual_img_path)
# gen_img_path = root + name + "_" + str(num) + ".jpg"
# gen_img = PIL.Image.open(gen_img_path)
# manual_img = manual_img.convert("1")
# result = np.array(gen_img)
# gen_img = gen_img.convert("1")
# # manual_img.show()
# # gen_img.show()
# manual = np.array(manual_img)
# gen = np.array(gen_img)
result = np.array(img1)
img1 = np.array(img1)
img2 = np.array(img2)
# print(result)
# print(manual.shape)
# print(gen.shape)
count=0
for i in range(512):
for j in range(512):
if(img1[i][j] > img2[i][j]):
result[i][j][0] = 255
result[i][j][1] = 0
result[i][j][2] = 0
count=count+1
if(img1[i][j] < img2[i][j]):
result[i][j][0] = 0
result[i][j][1] = 255
result[i][j][2] = 0
count = count + 1
print(str(count))
result = Image.fromarray(result)
result.save(save_path + prefix + "_"+"contrast.jpg")
makecon() | [
"numpy.array",
"PIL.Image.fromarray",
"PIL.Image.open"
] | [((706, 731), 'PIL.Image.open', 'PIL.Image.open', (['img1_path'], {}), '(img1_path)\n', (720, 731), False, 'import PIL\n'), ((756, 781), 'PIL.Image.open', 'PIL.Image.open', (['img2_path'], {}), '(img2_path)\n', (770, 781), False, 'import PIL\n'), ((1313, 1327), 'numpy.array', 'np.array', (['img1'], {}), '(img1)\n', (1321, 1327), True, 'import numpy as np\n'), ((1346, 1360), 'numpy.array', 'np.array', (['img1'], {}), '(img1)\n', (1354, 1360), True, 'import numpy as np\n'), ((1377, 1391), 'numpy.array', 'np.array', (['img2'], {}), '(img2)\n', (1385, 1391), True, 'import numpy as np\n'), ((2020, 2043), 'PIL.Image.fromarray', 'Image.fromarray', (['result'], {}), '(result)\n', (2035, 2043), False, 'from PIL import Image\n')] |
from sklearn.externals import joblib
import numpy as np
np.random.seed(1337)
def gen_data(pos, neg, niter=100):
n = pos.shape[0]
nn = neg.shape[0]
pos_lst = []
neg_lst = []
for i in range(niter):
idx_pos = np.random.choice(range(n), size=n * 2, replace=True)
idx_neg = np.random.choice(range(nn), size=n * 2, replace=True)
pos_lst.append(pos[idx_pos])
neg_lst.append(neg[idx_neg])
return list(zip(pos_lst, neg_lst))
data_dt = joblib.load('data/sg_div.pkl')
sgids = joblib.load('sgids.pkl')
dt = dict.fromkeys(data_dt.keys())
# sgid = sgids[0]
for sgid in sgids:
print(sgid)
ot, neg = data_dt[sgid]
lst = gen_data(ot, neg)
dt[sgid] = lst
joblib.dump(dt, 'data/sg_bs_div.pkl')
| [
"sklearn.externals.joblib.load",
"numpy.random.seed",
"sklearn.externals.joblib.dump"
] | [((57, 77), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (71, 77), True, 'import numpy as np\n'), ((487, 517), 'sklearn.externals.joblib.load', 'joblib.load', (['"""data/sg_div.pkl"""'], {}), "('data/sg_div.pkl')\n", (498, 517), False, 'from sklearn.externals import joblib\n'), ((526, 550), 'sklearn.externals.joblib.load', 'joblib.load', (['"""sgids.pkl"""'], {}), "('sgids.pkl')\n", (537, 550), False, 'from sklearn.externals import joblib\n'), ((715, 752), 'sklearn.externals.joblib.dump', 'joblib.dump', (['dt', '"""data/sg_bs_div.pkl"""'], {}), "(dt, 'data/sg_bs_div.pkl')\n", (726, 752), False, 'from sklearn.externals import joblib\n')] |
import json
from twisted.logger import Logger
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import ApplicationRunner
from bokeh.client import push_session
from bokeh.plotting import figure, curdoc
from bokeh.models.widgets import Panel, Tabs
from bokeh.models import Range1d
import numpy as np
import pandas as pd
class test_bokeh_wamp(ApplicationSession):
def __init__(self, config=None):
ApplicationSession.__init__(self, config)
self.df = None
x = np.array([1])
y = np.array([1])
TOOLS = 'pan,box_zoom,wheel_zoom,box_select,crosshair,resize,reset,save,hover'
ampPlot = figure(plot_width=600, plot_height=800, tools=TOOLS, x_range=Range1d(0, 140))
ampPlot.legend.location = "top_left"
ampPlot.legend.click_policy = "hide"
ampPlot.xaxis[0].axis_label = "dB"
ampPlot.yaxis[0].axis_label = "Bin"
self.ampB0 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='red', legend="B0")
self.ampB1 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='green', legend="B1")
self.ampB2 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='blue', legend="B2")
self.ampB3 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='orange', legend="B3")
curdoc().add_root(ampPlot)
@inlineCallbacks
def onJoin(self, details):
"""
Initialize the WAMP settings. This is called before everything is setup to ensure
the WAMP settings are initialized.
:return:
"""
self.log.info("WAMP connected")
yield self.subscribe(self.on_ens_json_data, u"com.rti.data.ens")
self.log.info("test Amplitude Bokeh WAMP init")
def on_ens_json_data(self, data):
"""
Called when JSON Ensemble data is received from WAMP.
:param data: JSON object containing serial data.
:return:
"""
json_data = json.loads(data) # convert to JSON
self.amp = json_data['Amplitude'] # Get the amplitude data
amp_np = np.array(json_data['Amplitude']['Amplitude']) # Create a numpy array from the amplitude data
self.df = pd.DataFrame(columns=['AmpB0', 'AmpB1', 'AmpB2', 'AmpB3'], data=amp_np) # Create a description(name) for the columns
print("-")
def callback(self):
self.ampB0.data_source.data["y"] = self.df.index
self.ampB0.data_source.data["x"] = self.df.loc[:, 'AmpB0']
self.ampB1.data_source.data["y"] = self.df.index
self.ampB1.data_source.data["x"] = self.df.loc[:, 'AmpB1']
self.ampB2.data_source.data["y"] = self.df.index
self.ampB2.data_source.data["x"] = self.df.loc[:, 'AmpB2']
self.ampB3.data_source.data["y"] = self.df.index
self.ampB3.data_source.data["x"] = self.df.loc[:, 'AmpB3']
print(".")
#x = np.array([1])
#y = np.array([1])
#TOOLS = 'pan,box_zoom,wheel_zoom,box_select,crosshair,resize,reset,save,hover'
#ampPlot = figure(plot_width=600, plot_height=800, tools=TOOLS, x_range=Range1d(0, 140))
#ampPlot.legend.location = "top_left"
#ampPlot.legend.click_policy = "hide"
#ampPlot.xaxis[0].axis_label="dB"
#ampPlot.yaxis[0].axis_label = "Bin"
#ampB0 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='red', legend="B0")
#ampB1 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='green', legend="B1")
#ampB2 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='blue', legend="B2")
#ampB3 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='orange', legend="B3")
# open a session to keep our local document in sync with server
#session = push_session(curdoc())
#session.show(ampPlot) # open the document in a browser
#tbw = test_bokeh_wamp()
#curdoc().add_root(ampPlot)
#curdoc().add_periodic_callback(tbw.callback, 1000)
# Start the WAMP connection
# Connect the main window to the WAMP connection
#runner = ApplicationRunner(url=u"ws://localhost:55058/ws", realm=u"realm1",
# extra={'ampB0': ampB0, 'ampB1': ampB1, 'ampB2': ampB2, 'ampB3': ampB3})
runner = ApplicationRunner(url=u"ws://localhost:55058/ws", realm=u"realm1")
runner.run(test_bokeh_wamp, start_reactor=True)
#from twisted.internet import reactor
#reactor.run()
#session.loop_until_closed() # run forever
| [
"json.loads",
"bokeh.models.Range1d",
"autobahn.twisted.wamp.ApplicationRunner",
"numpy.array",
"autobahn.twisted.wamp.ApplicationSession.__init__",
"pandas.DataFrame",
"bokeh.plotting.curdoc"
] | [((4273, 4339), 'autobahn.twisted.wamp.ApplicationRunner', 'ApplicationRunner', ([], {'url': 'u"""ws://localhost:55058/ws"""', 'realm': 'u"""realm1"""'}), "(url=u'ws://localhost:55058/ws', realm=u'realm1')\n", (4290, 4339), False, 'from autobahn.twisted.wamp import ApplicationRunner\n'), ((492, 533), 'autobahn.twisted.wamp.ApplicationSession.__init__', 'ApplicationSession.__init__', (['self', 'config'], {}), '(self, config)\n', (519, 533), False, 'from autobahn.twisted.wamp import ApplicationSession\n'), ((570, 583), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (578, 583), True, 'import numpy as np\n'), ((596, 609), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (604, 609), True, 'import numpy as np\n'), ((2008, 2024), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2018, 2024), False, 'import json\n'), ((2208, 2253), 'numpy.array', 'np.array', (["json_data['Amplitude']['Amplitude']"], {}), "(json_data['Amplitude']['Amplitude'])\n", (2216, 2253), True, 'import numpy as np\n'), ((2356, 2427), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['AmpB0', 'AmpB1', 'AmpB2', 'AmpB3']", 'data': 'amp_np'}), "(columns=['AmpB0', 'AmpB1', 'AmpB2', 'AmpB3'], data=amp_np)\n", (2368, 2427), True, 'import pandas as pd\n'), ((776, 791), 'bokeh.models.Range1d', 'Range1d', (['(0)', '(140)'], {}), '(0, 140)\n', (783, 791), False, 'from bokeh.models import Range1d\n'), ((1364, 1372), 'bokeh.plotting.curdoc', 'curdoc', ([], {}), '()\n', (1370, 1372), False, 'from bokeh.plotting import figure, curdoc\n')] |
from __future__ import division, absolute_import, print_function
import unittest
import numpy.testing as testing
import numpy as np
import healpy as hp
import healsparse
class CoverageMapTestCase(unittest.TestCase):
def test_coverage_map_float(self):
"""
Test coverage_map functionality for floats
"""
nside_coverage = 16
nside_map = 512
# Number of non-masked pixels in the coverage map resolution
non_masked_px = 10.5
nfine = (nside_map//nside_coverage)**2
full_map = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
full_map[0: int(non_masked_px*nfine)] = 1 + np.random.random(size=int(non_masked_px*nfine))
# Generate sparse map
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
# Build the "original" coverage map
cov_map_orig = self.compute_cov_map(nside_coverage, non_masked_px, nfine,
sparse_map._cov_map.bit_shift)
# Get the built coverage map
cov_map = sparse_map.coverage_map
# Test the coverage map generation and lookup
testing.assert_array_almost_equal(cov_map_orig, cov_map)
def test_coverage_map_int(self):
"""
Test coverage_map functionality for ints
"""
nside_coverage = 16
nside_map = 512
# Number of non-masked pixels in the coverage map resolution
non_masked_px = 10.5
nfine = (nside_map//nside_coverage)**2
sentinel = healsparse.utils.check_sentinel(np.int32, None)
full_map = np.zeros(hp.nside2npix(nside_map), dtype=np.int32) + sentinel
full_map[0: int(non_masked_px*nfine)] = 1
sparse_map = healsparse.HealSparseMap(healpix_map=full_map,
nside_coverage=nside_coverage,
sentinel=sentinel)
cov_map_orig = self.compute_cov_map(nside_coverage, non_masked_px, nfine,
sparse_map._cov_map.bit_shift)
cov_map = sparse_map.coverage_map
testing.assert_array_almost_equal(cov_map_orig, cov_map)
def test_coverage_map_recarray(self):
"""
Test coverage_map functionality for a recarray
"""
nside_coverage = 16
nside_map = 512
# Number of non-masked pixels in the coverage map resolution
non_masked_px = 10.5
nfine = (nside_map//nside_coverage)**2
dtype = [('a', np.float64),
('b', np.int32)]
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
dtype, primary='a')
sparse_map.update_values_pix(np.arange(int(non_masked_px*nfine)),
np.ones(1, dtype=dtype))
cov_map_orig = self.compute_cov_map(nside_coverage, non_masked_px, nfine,
sparse_map._cov_map.bit_shift)
cov_map = sparse_map.coverage_map
testing.assert_array_almost_equal(cov_map_orig, cov_map)
def test_coverage_map_widemask(self):
"""
Test coverage_map functionality for wide masks
"""
nside_coverage = 16
nside_map = 512
# Number of non-masked pixels in the coverage map resolution
non_masked_px = 10.5
nfine = (nside_map//nside_coverage)**2
# Do a 1-byte wide
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
healsparse.WIDE_MASK,
wide_mask_maxbits=2)
# Set bits in different columns
sparse_map.set_bits_pix(np.arange(int(non_masked_px*nfine)), [1])
cov_map_orig = self.compute_cov_map(nside_coverage, non_masked_px, nfine,
sparse_map._cov_map.bit_shift)
cov_map = sparse_map.coverage_map
testing.assert_array_almost_equal(cov_map_orig, cov_map)
# Do a 3-byte wide
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
healsparse.WIDE_MASK,
wide_mask_maxbits=24)
# Set bits in different columns
sparse_map.set_bits_pix(np.arange(int(2*nfine)), [2])
sparse_map.set_bits_pix(np.arange(int(non_masked_px*nfine)), [20])
cov_map_orig = self.compute_cov_map(nside_coverage, non_masked_px, nfine,
sparse_map._cov_map.bit_shift)
cov_map = sparse_map.coverage_map
testing.assert_array_almost_equal(cov_map_orig, cov_map)
def compute_cov_map(self, nside_coverage, non_masked_px, nfine, bit_shift):
cov_map_orig = np.zeros(hp.nside2npix(nside_coverage), dtype=np.float64)
idx_cov = np.right_shift(np.arange(int(non_masked_px*nfine)), bit_shift)
unique_idx_cov = np.unique(idx_cov)
idx_counts = np.bincount(idx_cov, minlength=hp.nside2npix(nside_coverage)).astype(np.float64)
cov_map_orig[unique_idx_cov] = idx_counts[unique_idx_cov]/nfine
return cov_map_orig
def test_large_coverage_map_warning(self):
"""
Test coverage_map raises warning for large
values of nside_coverage
"""
nside_coverage = 256
nside_map = 512
# Generate sparse map and check that it rasises a warning
testing.assert_warns(ResourceWarning, healsparse.HealSparseMap.make_empty, nside_sparse=nside_map,
nside_coverage=nside_coverage, dtype=np.float32)
if __name__ == '__main__':
unittest.main()
| [
"numpy.testing.assert_warns",
"healsparse.utils.check_sentinel",
"numpy.testing.assert_array_almost_equal",
"numpy.unique",
"healsparse.HealSparseMap",
"numpy.ones",
"healsparse.HealSparseMap.make_empty",
"healpy.nside2npix",
"unittest.main"
] | [((5839, 5854), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5852, 5854), False, 'import unittest\n'), ((750, 827), 'healsparse.HealSparseMap', 'healsparse.HealSparseMap', ([], {'healpix_map': 'full_map', 'nside_coverage': 'nside_coverage'}), '(healpix_map=full_map, nside_coverage=nside_coverage)\n', (774, 827), False, 'import healsparse\n'), ((1175, 1231), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['cov_map_orig', 'cov_map'], {}), '(cov_map_orig, cov_map)\n', (1208, 1231), True, 'import numpy.testing as testing\n'), ((1559, 1606), 'healsparse.utils.check_sentinel', 'healsparse.utils.check_sentinel', (['np.int32', 'None'], {}), '(np.int32, None)\n', (1590, 1606), False, 'import healsparse\n'), ((1760, 1861), 'healsparse.HealSparseMap', 'healsparse.HealSparseMap', ([], {'healpix_map': 'full_map', 'nside_coverage': 'nside_coverage', 'sentinel': 'sentinel'}), '(healpix_map=full_map, nside_coverage=\n nside_coverage, sentinel=sentinel)\n', (1784, 1861), False, 'import healsparse\n'), ((2159, 2215), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['cov_map_orig', 'cov_map'], {}), '(cov_map_orig, cov_map)\n', (2192, 2215), True, 'import numpy.testing as testing\n'), ((2627, 2713), 'healsparse.HealSparseMap.make_empty', 'healsparse.HealSparseMap.make_empty', (['nside_coverage', 'nside_map', 'dtype'], {'primary': '"""a"""'}), "(nside_coverage, nside_map, dtype,\n primary='a')\n", (2662, 2713), False, 'import healsparse\n'), ((3113, 3169), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['cov_map_orig', 'cov_map'], {}), '(cov_map_orig, cov_map)\n', (3146, 3169), True, 'import numpy.testing as testing\n'), ((3538, 3648), 'healsparse.HealSparseMap.make_empty', 'healsparse.HealSparseMap.make_empty', (['nside_coverage', 'nside_map', 'healsparse.WIDE_MASK'], {'wide_mask_maxbits': '(2)'}), '(nside_coverage, nside_map, healsparse.\n WIDE_MASK, wide_mask_maxbits=2)\n', (3573, 3648), False, 'import healsparse\n'), ((4082, 4138), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['cov_map_orig', 'cov_map'], {}), '(cov_map_orig, cov_map)\n', (4115, 4138), True, 'import numpy.testing as testing\n'), ((4188, 4299), 'healsparse.HealSparseMap.make_empty', 'healsparse.HealSparseMap.make_empty', (['nside_coverage', 'nside_map', 'healsparse.WIDE_MASK'], {'wide_mask_maxbits': '(24)'}), '(nside_coverage, nside_map, healsparse.\n WIDE_MASK, wide_mask_maxbits=24)\n', (4223, 4299), False, 'import healsparse\n'), ((4796, 4852), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['cov_map_orig', 'cov_map'], {}), '(cov_map_orig, cov_map)\n', (4829, 4852), True, 'import numpy.testing as testing\n'), ((5121, 5139), 'numpy.unique', 'np.unique', (['idx_cov'], {}), '(idx_cov)\n', (5130, 5139), True, 'import numpy as np\n'), ((5629, 5780), 'numpy.testing.assert_warns', 'testing.assert_warns', (['ResourceWarning', 'healsparse.HealSparseMap.make_empty'], {'nside_sparse': 'nside_map', 'nside_coverage': 'nside_coverage', 'dtype': 'np.float32'}), '(ResourceWarning, healsparse.HealSparseMap.make_empty,\n nside_sparse=nside_map, nside_coverage=nside_coverage, dtype=np.float32)\n', (5649, 5780), True, 'import numpy.testing as testing\n'), ((2878, 2901), 'numpy.ones', 'np.ones', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (2885, 2901), True, 'import numpy as np\n'), ((4966, 4995), 'healpy.nside2npix', 'hp.nside2npix', (['nside_coverage'], {}), '(nside_coverage)\n', (4979, 4995), True, 'import healpy as hp\n'), ((559, 583), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (572, 583), True, 'import healpy as hp\n'), ((1635, 1659), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (1648, 1659), True, 'import healpy as hp\n'), ((5192, 5221), 'healpy.nside2npix', 'hp.nside2npix', (['nside_coverage'], {}), '(nside_coverage)\n', (5205, 5221), True, 'import healpy as hp\n')] |
# -*- coding: utf-8 -*-
"""
Spyder Editor
DATE:19/04/2020
Information theory and coding
Title:(7,4) systematic cyclic codes Encoder
Author:<NAME>
17BEC02
IIIT Dharwad
"""
######################### ENCODER ######################################################
import numpy as np
import pandas as pd
n=4
gen_p=[1, 1,0,1] #generator polynomial used x^3+x^2+1
ip = list(map(int,input("\nEnter four message bits seperated by space : ").strip().split()))[:n]
print("data is- ",ip)
# Constructing polynomial
datap= np.poly1d(ip)
gp=np.poly1d(gen_p)
#print ("data polynomial: ", datap)
#print ("\ngenerator polynomial : \n", gp)
intermediate=np.polymul([1,0,0,0],ip)
#print ("intermediate:- ", intermediate)
intermediate_polynomial=np.poly1d(intermediate)
#print ("intermediate_poly:- ", intermediate_polynomial)
quotient, remainder = np.polydiv(intermediate, gp)
#print("\n\nquotient : ", quotient)
#print("remainder : ", remainder)
remainder_2 = np.mod(remainder,2)
#print("remainder_mod: ", remainder_2)
code=np.mod(np.polyadd(intermediate, remainder_2),2)
print("generated code is: ",code)
########################## Decoder ######################################################3
rcvd_code=[1, 0, 1, 1, 1, 1, 0]
quotient2, syndrome = np.mod(np.polydiv(rcvd_code, gp),2)
print("syndrome is: ",syndrome)
syndromedict={'110': '1000000','011':'0100000','111':'0010000','101':'0001000','100':'0000100','010':'0000010','001':'0000001','000':'0000000'}
s=''
for i in syndrome:
s += str(int(i))
#print(s)
for s in syndromedict.keys() :
e=syndromedict[s]
error=list(e.strip())
for i in range(0, len(error)):
error[i] = int(error[i])
decoded_code=np.mod(np.polyadd(rcvd_code, error),2)
print("DECODED CODE IS: ",decoded_code)
m=decoded_code[:4]
print("Message sent is ",m)
| [
"numpy.polymul",
"numpy.polydiv",
"numpy.polyadd",
"numpy.poly1d",
"numpy.mod"
] | [((583, 596), 'numpy.poly1d', 'np.poly1d', (['ip'], {}), '(ip)\n', (592, 596), True, 'import numpy as np\n'), ((603, 619), 'numpy.poly1d', 'np.poly1d', (['gen_p'], {}), '(gen_p)\n', (612, 619), True, 'import numpy as np\n'), ((719, 747), 'numpy.polymul', 'np.polymul', (['[1, 0, 0, 0]', 'ip'], {}), '([1, 0, 0, 0], ip)\n', (729, 747), True, 'import numpy as np\n'), ((812, 835), 'numpy.poly1d', 'np.poly1d', (['intermediate'], {}), '(intermediate)\n', (821, 835), True, 'import numpy as np\n'), ((918, 946), 'numpy.polydiv', 'np.polydiv', (['intermediate', 'gp'], {}), '(intermediate, gp)\n', (928, 946), True, 'import numpy as np\n'), ((1038, 1058), 'numpy.mod', 'np.mod', (['remainder', '(2)'], {}), '(remainder, 2)\n', (1044, 1058), True, 'import numpy as np\n'), ((1114, 1151), 'numpy.polyadd', 'np.polyadd', (['intermediate', 'remainder_2'], {}), '(intermediate, remainder_2)\n', (1124, 1151), True, 'import numpy as np\n'), ((1350, 1375), 'numpy.polydiv', 'np.polydiv', (['rcvd_code', 'gp'], {}), '(rcvd_code, gp)\n', (1360, 1375), True, 'import numpy as np\n'), ((1802, 1830), 'numpy.polyadd', 'np.polyadd', (['rcvd_code', 'error'], {}), '(rcvd_code, error)\n', (1812, 1830), True, 'import numpy as np\n')] |
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
from detection_functions.feature_extraction import *
from toolbox.draw_on_image import *
from detection_functions.sliding_window import *
# Define a function to extract features from a single image window
# This function is very similar to extract_features()
# just for a single image rather than list of images
def single_img_features(img, color_space=None, spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
# 1) Define an empty list to receive features
img_features = []
# 2) Apply color conversion if other than 'RGB'
if color_space == None:
feature_image = np.copy(img)
elif color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
else:
feature_image = np.copy(img)
# 3) Compute spatial features if flag is set
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
# 4) Append features to list
img_features.append(spatial_features)
# 5) Compute histogram features if flag is set
if hist_feat == True:
hist_features = color_hist(feature_image, nbins=hist_bins)
# 6) Append features to list
img_features.append(hist_features)
# 7) Compute HOG features if flag is set
if hog_feat == True:
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.extend(get_hog_features(feature_image[:, :, channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
else:
hog_features = get_hog_features(feature_image[:, :, hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# 8) Append features to list
img_features.append(hog_features)
# 9) Return concatenated array of features
#return np.concatenate(img_features)
return img_features
# Define a function you will pass an image
# and the list of windows to be searched (output of slide_windows())
def search_windows(img, windows, clf, scaler, color_space='RGB',
spatial_size=(32, 32), hist_bins=32,
hist_range=(0, 256), orient=9,
pix_per_cell=8, cell_per_block=2,
hog_channel=0, spatial_feat=True,
hist_feat=True, hog_feat=True):
# 1) Create an empty list to receive positive detection windows
on_windows = []
current_window_area = ((0,0),(0,0))
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
else:
feature_image = np.copy(img)
# 2) Iterate over all windows in the list
for stripe in windows:
stripe_on_windows = []
current_stripe_area = ((stripe[0][0][0], stripe[0][0][1]), (stripe[-1][1][0], stripe[-1][1][1]))
test_stripe = feature_image[current_stripe_area[0][1]:current_stripe_area[1][1], current_stripe_area[0][0]:current_stripe_area[1][0]]
scale = min(test_stripe.shape[0], test_stripe.shape[1]) / 64 # at most 64 rows and columns
resized_test_stripe = cv2.resize(test_stripe,(np.int(test_stripe.shape[1] / scale), np.int(test_stripe.shape[0] / scale)))
if hog_feat:
if hog_channel == 'ALL':
hog_features = []
for channel in range(resized_test_stripe.shape[2]):
hog_features.extend(get_hog_features(resized_test_stripe[:, :, channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=False))
else:
hog_features = get_hog_features(resized_test_stripe[:, :, hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=False)
for window in stripe:
# 3) Extract the test window from original image
resized_window_start = int(window[0][0] / scale)
if (resized_window_start + 64) > (resized_test_stripe.shape[1]):
resized_window_start = resized_test_stripe.shape[1] - 64
test_img = np.array(resized_test_stripe)[:, resized_window_start:(resized_window_start + 64)]
#test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
# 4) Extract features for that window using single_img_features()
features = single_img_features(test_img, color_space=None,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=False)
if hog_feat:
#print(window,scale,resized_window_start)
hog_feature_window_start = int(resized_window_start / pix_per_cell)
blocks_per_image = int(64/pix_per_cell) - (cell_per_block-1)
extracted_hog_feature = np.array(hog_features)[:, hog_feature_window_start:hog_feature_window_start + blocks_per_image]
extracted_hog_feature = extracted_hog_feature.ravel()
features.append(extracted_hog_feature)
features = np.concatenate(features)
# 5) Scale extracted features to be fed to classifier
test_features = scaler.transform(np.array(features).reshape(1, -1))
# 6) Predict using your classifier
prediction = clf.predict(test_features)
# 7) If positive (prediction == 1) then save the window
if prediction == 1:
stripe_on_windows.append(window)
on_windows.append(stripe_on_windows)
# 8) Return windows for positive detections
return on_windows
def add_heat(heatmap, bbox_list):
bbox_list = np.array(bbox_list)
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap
def add_heat_labels(heatmap, bbox_list, labels):
bbox_list = np.array(bbox_list)
label_windows = return_labeled_windows(labels)
for car_number in range(0, labels[1]):
box_index = 0
delta_Y = 0
car_heatmap = np.zeros_like(heatmap)
# Iterate through list of bboxes
for box in bbox_list:
box_heatmap = np.zeros_like(heatmap)
if (box[1][0] <= (label_windows[car_number][1][0]+2)) & (box[0][0] >= (label_windows[car_number][0][0]-2)):
if delta_Y != box[1][1] - box[0][1]:
box_index = box_index + 1
delta_Y = box[1][1] - box[0][1]
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
box_heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
car_heatmap = car_heatmap + box_heatmap
if delta_Y%2 == 0:
delta_Y = delta_Y + 1
car_heatmap = cv2.GaussianBlur(car_heatmap,(delta_Y*2+1,delta_Y),0)
if box_index > 0:
car_heatmap = car_heatmap / box_index
heatmap = heatmap + car_heatmap
return heatmap
heatmap = heatmap + temp_heatmap
# Return updated heatmap
return heatmap
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
| [
"numpy.copy",
"numpy.array",
"numpy.int",
"numpy.concatenate",
"cv2.cvtColor",
"cv2.GaussianBlur",
"numpy.zeros_like"
] | [((7344, 7363), 'numpy.array', 'np.array', (['bbox_list'], {}), '(bbox_list)\n', (7352, 7363), True, 'import numpy as np\n'), ((7723, 7742), 'numpy.array', 'np.array', (['bbox_list'], {}), '(bbox_list)\n', (7731, 7742), True, 'import numpy as np\n'), ((854, 866), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (861, 866), True, 'import numpy as np\n'), ((3884, 3896), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (3891, 3896), True, 'import numpy as np\n'), ((7903, 7925), 'numpy.zeros_like', 'np.zeros_like', (['heatmap'], {}), '(heatmap)\n', (7916, 7925), True, 'import numpy as np\n'), ((1434, 1446), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (1441, 1446), True, 'import numpy as np\n'), ((3409, 3445), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HSV'], {}), '(img, cv2.COLOR_RGB2HSV)\n', (3421, 3445), False, 'import cv2\n'), ((6758, 6782), 'numpy.concatenate', 'np.concatenate', (['features'], {}), '(features)\n', (6772, 6782), True, 'import numpy as np\n'), ((8023, 8045), 'numpy.zeros_like', 'np.zeros_like', (['heatmap'], {}), '(heatmap)\n', (8036, 8045), True, 'import numpy as np\n'), ((8678, 8738), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['car_heatmap', '(delta_Y * 2 + 1, delta_Y)', '(0)'], {}), '(car_heatmap, (delta_Y * 2 + 1, delta_Y), 0)\n', (8694, 8738), False, 'import cv2\n'), ((959, 995), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HSV'], {}), '(img, cv2.COLOR_RGB2HSV)\n', (971, 995), False, 'import cv2\n'), ((3509, 3545), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2LUV'], {}), '(img, cv2.COLOR_RGB2LUV)\n', (3521, 3545), False, 'import cv2\n'), ((4403, 4439), 'numpy.int', 'np.int', (['(test_stripe.shape[1] / scale)'], {}), '(test_stripe.shape[1] / scale)\n', (4409, 4439), True, 'import numpy as np\n'), ((4441, 4477), 'numpy.int', 'np.int', (['(test_stripe.shape[0] / scale)'], {}), '(test_stripe.shape[0] / scale)\n', (4447, 4477), True, 'import numpy as np\n'), ((5466, 5495), 'numpy.array', 'np.array', (['resized_test_stripe'], {}), '(resized_test_stripe)\n', (5474, 5495), True, 'import numpy as np\n'), ((1059, 1095), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2LUV'], {}), '(img, cv2.COLOR_RGB2LUV)\n', (1071, 1095), False, 'import cv2\n'), ((3609, 3645), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (3621, 3645), False, 'import cv2\n'), ((6512, 6534), 'numpy.array', 'np.array', (['hog_features'], {}), '(hog_features)\n', (6520, 6534), True, 'import numpy as np\n'), ((1159, 1195), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (1171, 1195), False, 'import cv2\n'), ((3709, 3745), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2YUV'], {}), '(img, cv2.COLOR_RGB2YUV)\n', (3721, 3745), False, 'import cv2\n'), ((6894, 6912), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (6902, 6912), True, 'import numpy as np\n'), ((1259, 1295), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2YUV'], {}), '(img, cv2.COLOR_RGB2YUV)\n', (1271, 1295), False, 'import cv2\n'), ((3811, 3849), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2YCrCb'], {}), '(img, cv2.COLOR_RGB2YCrCb)\n', (3823, 3849), False, 'import cv2\n'), ((1361, 1399), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2YCrCb'], {}), '(img, cv2.COLOR_RGB2YCrCb)\n', (1373, 1399), False, 'import cv2\n')] |
import math
import copy
import warnings
import numpy as np
from itertools import product
from analysis.abstract_interpretation import AbstractInterpretation
import parse.parse_format_text as parse_format_text
from solver import Range, Array
from utils import OVERFLOW_LIMIT, UNDERFLOW_LIMIT, resolve_type
turn_on_bool = False
length_unknown = 1e3
# infer size from a and b under assumption that a = b, even though one of them might be unknown, i.e., equals to ?
def real_size(a, b):
if str(a) == "?" and str(b) == "?":
raise AssertionError("cannot infer ? size")
elif str(a) == "?":
return int(b)
else:
return int(a)
# the abstract interpretation of identity.
def identity(args, node=None):
try:
return args[0].value if isinstance(args[0].value, Range) else Range(left=resolve_type(np.min(args[0].value)),
right=resolve_type(np.max(args[0].value)))
except: # if it is not able to get the range (e.g., it is a zero-size array)
return None
# the abstract interpretation of joining of a list of interval abstractions.
def packtorange(args, node):
maxs = []
mins = []
for arg in args:
if isinstance(arg.value, Range):
maxs.append(arg.value.right)
mins.append(arg.value.left)
elif arg.value.size > 0:
maxs.append(resolve_type(np.max(arg.value)))
mins.append(resolve_type(np.min(arg.value)))
if None in maxs or None in mins:
return None
return Range(left=np.min(mins), right=np.max(maxs))
# returns an unbounded interval abstraction with [-inf, +inf]
def dumy():
return Range(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)
def safeexp(X):
UPPER_BOUND = 100
try:
ans = []
for x in X:
ans.append(min(math.exp(min(x, UPPER_BOUND)), OVERFLOW_LIMIT))
return np.array(ans)
except:
return min(math.exp(min(X, UPPER_BOUND)), OVERFLOW_LIMIT)
def safesqrt(X):
try:
ans = []
for x in X:
if x < 0:
ans.append(0)
else:
ans.append(math.sqrt(x))
return np.array(ans)
except:
if X < 0:
return 0
else:
return math.sqrt(X)
def safepow(X, Y):
UPPER_BOUND = 100
try:
ans = []
for (x, y) in zip(X, Y):
try:
ans.append(min(math.pow(x, y), OVERFLOW_LIMIT))
except:
ans.append(OVERFLOW_LIMIT)
return np.array(ans)
except:
try:
return min(math.pow(X, Y), OVERFLOW_LIMIT)
except:
return OVERFLOW_LIMIT
def safelgamma(X):
try:
ans = []
for x in X:
if x <= UNDERFLOW_LIMIT:
ans.append(OVERFLOW_LIMIT)
else:
ans.append(math.lgamma(x))
return np.array(ans)
except:
if X <= UNDERFLOW_LIMIT:
return OVERFLOW_LIMIT
else:
return math.lgamma(X)
def safesoftplus(X):
UPPER_BOUND = 100
try:
ans = []
for x in X:
if X > UPPER_BOUND:
ans.append(X)
else:
ans.append(np.log1p(np.exp(X)))
return np.array(ans)
except:
if X > UPPER_BOUND:
return X
else:
return np.log1p(np.exp(X))
# contains the abstract interpretations of TensorFlow APIs used in interval abstraction + tensor smashing.
class InferValue:
@staticmethod
def abs(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
left_sq = np.abs(args[0].value.left)
right_sq = np.abs(args[0].value.right)
min_sq = min(left_sq, right_sq)
max_sq = max(left_sq, right_sq)
cond = args[0].value.left <= 0 and args[0].value.right >= 0
return Range(left=0 if cond else min_sq, right=max_sq)
else:
return np.abs(args[0].value)
@staticmethod
def add(args: list, node):
assert len(args) == 2
if args[0].value is None or args[1].value is None:
return None
if isinstance(args[0].value, Range) or isinstance(args[1].value, Range):
x = identity([args[0]], node)
y = identity([args[1]], node)
return Range(left=x.left + y.left, right=x.right + y.right)
else:
return args[0].value + args[1].value
@staticmethod
def addn(args: list, node):
assert len(args) > 0
if len(args) == 1:
return args[0].value
else:
s = InferValue.add([args[0], args[1]], node)
for i in range(2, len(args)):
s = InferValue.add([AbstractInterpretation(value=s), args[i]], node)
return s
@staticmethod
def all(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def any(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def argmax(args: list, node):
assert len(args) == 2
try:
return Range(left=0, right=int(args[0].size[int(args[1].value)]) - 1)
except:
return Range(left=0, right=length_unknown)
@staticmethod
def assign(args: list, node):
assert len(args) == 2
if args[0].value is None:
return args[1].value
else:
return args[0].value
def assignadd(args: list, node):
y = identity([args[1]], node)
tmp = dumy()
if y.left >= 0:
tmp.left = args[0].value.left
if y.right <= 0:
tmp.right = args[0].value.right
return tmp
def assignsub(args: list, node):
y = identity([args[1]], node)
tmp = dumy()
if y.left <= 0:
tmp.left = args[0].value.left
if y.right >= 0:
tmp.right = args[0].value.right
return tmp
@staticmethod
def avgpool(args: list, node):
assert len(args) == 1
return identity(args, node)
@staticmethod
def batchmatmul(args: list, node):
assert len(args) == 2
x = copy.deepcopy(args[0])
y = copy.deepcopy(args[1])
x.size = x.size[1:]
y.size = y.size[1:]
return InferValue.matmul([x, y], node)
@staticmethod
def batchtospacend(args: list, node):
assert len(args) == 3
return args[0].value
@staticmethod
def spacetobatchnd(args: list, node):
assert len(args) == 3
return args[0].value
@staticmethod
def biasadd(args: list, node):
assert len(args) == 2 and len(args[1].size) == 1 and (
str(args[0].size[-1]) == "?" or str(args[1].size[0]) or args[0].size[-1] == args[1].size[0])
return Range(left=args[0].value.left + args[1].value.left,
right=args[0].value.right + args[1].value.right)
@staticmethod
def broadcastargs(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def cast(args: list, node):
# tf.int64: 9; tf.int32: 3; tf.int16: 5; tf.int8: 6;
# tf.uint64: 23; tf.uint32: 22; tf.uint16: 17; tf.uint8: 4;
# tf.float64 2; tf.float32: 1; tf.float16: 19;
# tf.bool: 10;
assert len(args) == 1
bool_proto = [10]
int_proto = [9, 3, 5, 6] + [23, 22, 17, 4]
float_proto = [2, 1, 19]
attrs = node.attr
if int(attrs['SrcT'].type) in bool_proto and int(attrs['DstT'].type) in int_proto + float_proto:
return Range(left=0, right=1)
elif int(attrs['SrcT'].type) in int_proto + float_proto and int(attrs['DstT'].type) in [10]:
return Range(left=False, right=True)
elif int(attrs['SrcT'].type) in int_proto and int(attrs['DstT'].type) in int_proto:
return args[0].value
elif int(attrs['SrcT'].type) in float_proto and int(attrs['DstT'].type) in float_proto:
return args[0].value
elif int(attrs['SrcT'].type) in int_proto and int(attrs['DstT'].type) in float_proto:
return args[0].value
elif int(attrs['SrcT'].type) in float_proto and int(attrs['DstT'].type) in int_proto:
return InferValue.floor(args, node)
else:
raise NotImplementedError("%s -> %s not implemented!" % (attrs['SrcT'].type, attrs['DstT'].type))
@staticmethod
def checknumerics(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def cholesky(args: list, node):
return dumy()
@staticmethod
def clipbyvalue(args: list, node):
assert len(args) == 3
if isinstance(args[0].value, Range):
return Range(left=max(args[0].value.left,
float(args[1].value) if not isinstance(args[1].value, Range) else args[1].value.left),
right=min(args[0].value.right,
float(args[2].value) if not isinstance(args[2].value, Range) else args[
2].value.right))
else:
return np.minimum(np.maximum(args[0].value, args[1].value), args[2].value)
@staticmethod
def concatv2(args: list, node):
any_range = False
for x in args:
if isinstance(x.value, Range):
any_range = True
break
if not any_range:
return np.concatenate([x.value for x in args[:-1]], axis=np.int32(args[-1].value))
else:
return packtorange(args[:-1], node)
@staticmethod
def const(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, node.op.lower())(node)
@staticmethod
def conv2d(args: list, node):
assert len(args) == 2
ind = 1
for x in args[1].size[:-1]:
ind *= int(x)
x = identity([args[0]], node)
y = identity([args[1]], node)
ends = [x.left * y.left * ind, x.left * y.right * ind,
x.right * y.left * ind, x.right * y.right * ind]
return Range(left=min(ends), right=max(ends))
@staticmethod
def conv2dbackpropinput(args: list, node):
return Range(left=-1, right=1)
return getattr(parse_format_text, "variablev2")(node)
@staticmethod
def depthwiseconv2dnative(args: list, node):
assert len(args) == 2
ind = 1
for x in args[1].size[:2]:
ind *= int(x)
ends = [args[0].value.left * args[1].value.left * ind, args[0].value.left * args[1].value.right * ind,
args[0].value.right * args[1].value.left * ind, args[0].value.right * args[1].value.right * ind]
return Range(left=min(ends), right=max(ends))
@staticmethod
def diag(args: list, node):
assert len(args) == 1
tmp = packtorange(args, node)
return Range(left=min(0, tmp.left), right=max(0, tmp.right))
@staticmethod
def dynamicstitch(args: list, node):
assert len(args) % 2 == 0
datas = args[len(args) // 2:]
return packtorange(datas, node)
@staticmethod
def enter(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def equal(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def exit(args: list, node):
return InferValue.identity(args, node)
@staticmethod
def expanddims(args: list, node):
if not isinstance(args[0].value, Range) and not isinstance(args[1].value, Range):
return np.expand_dims(args[0].value, axis=np.int32(args[1].value))
else:
return identity(args, node)
@staticmethod
def fifoqueuev2(args: list, node):
return InferValue.randomshufflequeuev2(args, node)
@staticmethod
def fill(args: list, node):
assert len(args) == 2
if not isinstance(args[0].value, Range) and not isinstance(args[1].value, Range):
ret = np.empty(args[0].value)
ret.fill(args[1].value)
return ret
else:
return identity([args[1]])
@staticmethod
def floor(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
return Range(left=math.floor(args[0].value.left), right=math.floor(args[0].value.right))
else:
return np.floor(args[0].value)
@staticmethod
def fusedbatchnorm(args: list, node):
assert len(args) == 5
# x, scale, offset, mean, variance
epsilon = float(node.attr['epsilon'].f)
is_training = node.attr["is_training"].b
x = identity([args[0]], node)
mean = identity([args[1]], node)
variance = identity([args[2]], node) + epsilon
if not is_training:
offset = identity([args[3]], node)
scale = identity([args[4]], node)
ends_scale_variance = [scale.left / variance.left, scale.right / variance.left,
scale.left / variance.right,
scale.right / variance.right]
ends = [(x.left - mean.right) * end for end in ends_scale_variance] + [
(x.right - mean.left) * end for end in ends_scale_variance]
return [Range(left=min(ends) + offset.left, right=max(ends) + offset.right),
dumy(), dumy(), dumy(), dumy()]
else:
ends_scale_variance = [1 / variance.left, 1 / variance.right]
ends = [(x.left - mean.right) * end for end in ends_scale_variance] + [
(x.right - mean.left) * end for end in ends_scale_variance]
return [Range(left=min(ends), right=max(ends)), dumy(), dumy(), dumy(), dumy()]
@staticmethod
def gathernd(args: list, node):
assert len(args) == 2
return identity(args, node)
@staticmethod
def gatherv2(args: list, node):
assert len(args) == 3
return identity(args, node)
@staticmethod
def greater(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def greaterequal(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def identity(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def isfinite(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def iteratorgetnext(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def iteratorv2(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, node.op.lower())(node)
@staticmethod
def leakyrelu(args: list, node):
assert len(args) == 1
alpha = node.attr["alpha"].f
def leaky_relu(x):
if x >= 0:
return x
else:
return alpha * x
if isinstance(args[0].value, Range):
return Range(left=leaky_relu(args[0].value.left), right=leaky_relu(args[0].value.right))
else:
return leaky_relu(args[0].value)
@staticmethod
def l2loss(args: list, node):
assert len(args) == 1
return InferValue.square(args, node) * 0.5
@staticmethod
def less(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def lessequal(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def lgamma(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
ends = [safelgamma(args[0].value.left), safelgamma(args[0].value.right)]
return Range(left=min(ends), right=max(ends))
else:
return safelgamma(args[0].value)
@staticmethod
def linspace(args: list, node):
assert len(args) == 3
if isinstance(args[0].value, Range) or isinstance(args[1].value, Range) or isinstance(args[2].value, Range):
return packtorange(args[:-1], node)
else:
return np.linspace(args[0].value, args[1].value, args[2].value)
@staticmethod
def logicaland(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def logicalnot(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def logicalor(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def loguniformcandidatesampler(args: list, node):
assert len(args) == 1
ind = int(node.attr["range_max"].i)
num = int(node.attr["num_sampled"].i)
return [Range(left=0, right=ind - 1), Range(left=UNDERFLOW_LIMIT * 10, right=num),
Range(left=UNDERFLOW_LIMIT * 10, right=num)]
@staticmethod
def loopcond(args: list, node):
return InferValue.identity(args, node)
@staticmethod
def matmul(args: list, node):
assert len(args) == 2
try:
len(args[0].size) == len(args[1].size)
except:
return dumy()
assert len(args[0].size) == len(args[1].size)
for i in range(len(args[0].size) - 2):
assert str(args[0].size[i]) == "?" or str(args[1].size[i]) == "?" or args[0].size[i] == args[1].size[i]
ind = real_size(args[0].size[-1], args[1].size[-2])
if not isinstance(args[0].value, Range) and not isinstance(args[1].value, Range):
return np.matmul(args[0].value, args[1].value)
else:
x = identity([args[0]], node)
y = identity([args[1]], node)
ends = [x.left * y.left * ind, x.left * y.right * ind, x.right * y.left * ind, x.right * y.right * ind]
return Range(left=min(ends), right=max(ends))
@staticmethod
def matrixdiag(args: list, node):
assert len(args) == 1
tmp = packtorange(args, node)
return Range(left=min(0, tmp.left), right=max(0, tmp.right))
@staticmethod
def matrixbandpart(args: list, node):
assert len(args) == 3
tmp = packtorange(args[:1], node)
return Range(left=min(tmp.left, 0), right=max(tmp.right, 0))
@staticmethod
def matrixdiagpart(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def max(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def maxpool(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def maximum(args: list, node):
assert len(args) == 2
x = args[0].value
y = args[1].value
if isinstance(x, Range) and isinstance(y, Range):
return Range(left=max(x.left, y.left), right=max(x.right, y.right))
elif not isinstance(x, Range) and not isinstance(y, Range):
return np.maximum(x, y)
else:
if isinstance(y, Range):
x, y = y, x
y = resolve_type(np.max(y))
return Range(left=max(x.left, y), right=max(x.right, y))
@staticmethod
def mean(args: list, node):
assert len(args) == 2
return identity(args, node)
@staticmethod
def merge(args: list, node):
tmp = packtorange(args, node)
max_index = int(node.attr["N"].i)
return_index = Range(left=0, right=max_index - 1)
if isinstance(tmp, tuple):
raise AssertionError
else:
return [tmp, return_index]
@staticmethod
def min(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def minimum(args: list, node):
assert len(args) == 2
x = args[0].value
y = args[1].value
if isinstance(x, Range) and isinstance(y, Range):
return Range(left=min(x.left, y.left), right=min(x.right, y.right))
elif not isinstance(x, Range) and not isinstance(y, Range):
return np.minimum(x, y)
else:
if isinstance(y, Range):
x, y = y, x
y = resolve_type(np.min(y))
return Range(left=min(x.left, y), right=min(x.right, y))
@staticmethod
def mul(args: list, node):
assert len(args) == 2
if args[0].value is None or args[1].value is None:
return None
if isinstance(args[1].value, Range) or isinstance(args[0].value, Range):
x = identity([args[0]], node)
y = identity([args[1]], node)
ends = [x.left * y.left, x.left * y.right, x.right * y.left, x.right * y.right]
return Range(left=min(ends), right=max(ends))
else:
return args[0].value * args[1].value
def multinomial(args: list, node):
assert len(args) == 2
return Range(left=0, right=1)
@staticmethod
def neg(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
return Range(left=-args[0].value.right, right=-args[0].value.left)
else:
return -args[0].value
@staticmethod
def nonmaxsuppressionv3(args: list, node):
assert len(args) == 5
try:
ind = int(args[1].size[0])
return Range(left=0, right=ind - 1)
except:
return Range(left=0, right=length_unknown)
@staticmethod
def notequal(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def onehot(args: list, node):
assert len(args) == 4
return Range(left=min([args[2].value, args[3].value]),
right=max([args[2].value, args[3].value]))
@staticmethod
def oneshotiterator(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, node.op.lower())(node)
@staticmethod
def pack(args: list, node):
any_range = False
for x in args:
if isinstance(x.value, Range):
any_range = True
break
if not any_range:
return np.stack([x.value for x in args], axis=int(node.attr["axis"].i))
else:
return packtorange(args, node)
@staticmethod
def pad(args: list, node):
return identity(args, node)
@staticmethod
def paddingfifoqueuev2(args: list, node):
return InferValue.randomshufflequeuev2(args, node)
@staticmethod
def parsesingleexample(args: list, node):
assert len(args) == 3
return [Range(left=0, right=length_unknown) for _ in range(20)]
@staticmethod
def placeholder(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, node.op.lower())(node)
@staticmethod
def placeholderwithdefault(args: list, node):
assert len(args) == 1
tmp = getattr(parse_format_text, 'placeholder')(node)
if isinstance(args[0].value, Range):
return Range(left=min(args[0].value.left, tmp.left), right=max(args[0].value.right, tmp.right))
else:
return Range(left=min(args[0].value, tmp.left), right=max(args[0].value, tmp.right))
@staticmethod
def pow(args: list, node):
assert len(args) == 2
if isinstance(args[0].value, Range) and isinstance(args[1].value, Range):
return Range(left=safepow(args[0].value.left, args[1].value.left),
right=safepow(args[0].value.right, args[1].value.right))
elif isinstance(args[0].value, Range):
return Range(left=safepow(args[0].value.left, args[1].value),
right=safepow(args[0].value.right, args[1].value))
elif isinstance(args[1].value, Range):
return Range(left=safepow(args[0].value, args[1].value.left),
right=safepow(args[0].value, args[1].value.right))
else:
return safepow(args[0].value, args[1].value)
@staticmethod
def prod(args: list, node):
assert len(args) == 2
if args[0].value is None:
return None
if isinstance(args[0].value, Range) or isinstance(args[1].value, Range):
try:
ind = int(args[0].size[int(args[1].value)])
return Range(left=safepow(args[0].value.left, ind), right=safepow(args[0].value.right, ind))
except:
ind = Range(left=0, right=length_unknown)
t = InferValue.pow([args[0], AbstractInterpretation(value=ind, dtype=3, size=[])], node)
if isinstance(t, tuple):
raise AssertionError
else:
return t
else:
axises = np.int32(args[1].value)
return np.prod(args[0].value, axis=tuple(axises) if len(axises.shape) > 0 else axises)
@staticmethod
def queuedequeuemanyv2(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def randomshuffle(args: list, node):
assert len(args) == 1
return identity(args, node)
@staticmethod
def randomshufflequeuev2(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, "oneshotiterator")(node)
@staticmethod
def randomstandardnormal(args: list, node):
assert len(args) == 1
return Range(left=UNDERFLOW_LIMIT * 10, right=1)
@staticmethod
def randomuniform(args: list, node):
assert len(args) == 1
return Range(left=UNDERFLOW_LIMIT * 10, right=1)
@staticmethod
def range(args: list, node):
assert len(args) == 3
all_single_np = True
for arg in args:
if isinstance(arg.value, Range) or len(np.array(arg.value).shape) > 0:
all_single_np = False
break
if not all_single_np:
left = args[0].value.left if isinstance(args[0].value, Range) else np.min(args[0].value)
right = args[1].value.right if isinstance(args[1].value, Range) else np.max(args[1].value)
return Range(left=left, right=right)
else:
return np.arange(args[0].value, args[1].value, args[2].value)
@staticmethod
def rank(args: list, node):
assert len(args) == 1
try:
return int(args[0].size)
except:
return Range(left=1, right=length_unknown)
@staticmethod
def readvariableop(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def realdiv(args: list, node):
assert len(args) == 2
x = args[0].value
y = args[1].value
if not isinstance(x, Range):
x = np.reshape(x, -1)
if not isinstance(y, Range):
y = np.reshape(y, -1)
if isinstance(x, Range) and isinstance(y, Range):
if y.left > 0 or y.right < 0:
ends = [x.left / y.left, x.left / y.right, x.right / y.left, x.right / y.right]
return Range(left=np.min(ends), right=np.max(ends))
else:
return Range(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)
elif not isinstance(y, Range): # x can be a Range or a np.array
if isinstance(x, Range):
ends = [x.left / yy for yy in y] + [x.right / yy for yy in y]
return Range(left=np.min(ends), right=np.max(ends))
else:
return x * (1 / y)
else: # if y is a Range, whatever x is, we have to end up with a Range, but we can do it precisely when x is a float
if y.left > 0 or y.right < 0:
ends = [xx / y.left for xx in x] + [xx / y.right for xx in x]
return Range(left=np.min(ends), right=np.max(ends))
else:
return Range(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)
@staticmethod
def relu(args: list, node):
assert len(args) == 1
return Range(left=max([args[0].value.left, 0]),
right=max([args[0].value.right, 0]))
@staticmethod
def relu6(args: list, node):
assert len(args) == 1
return Range(left=min(max(args[0].value.left, 0), 6),
right=min(max(args[0].value.right, 0), 6))
@staticmethod
def reshape(args: list, node):
assert len(args) == 2
if not isinstance(args[0].value, Range) and not isinstance(args[1].value, Range):
return np.reshape(args[0].value, np.int32(args[1].value))
else:
return identity(args, node)
@staticmethod
def resizearea(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def resizebilinear(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def resizenearestneighbor(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def resourcegather(args: list, node):
assert len(args) == 2
return identity(args, node)
@staticmethod
def reversev2(args: list, node):
assert len(args) == 2
return identity(args, node)
@staticmethod
def round(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
return Range(left=np.round(args[0].value.left), right=np.round(args[0].value.right))
return np.round(args[0].value)
@staticmethod
def rsqrt(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
left = safesqrt(args[0].value.left)
right = safesqrt(args[0].value.right)
if left == 0 or right == 0:
return dumy()
else:
return Range(left=1 / right, right=1 / left)
else:
return 1 / safesqrt(args[0].value)
@staticmethod
def select(args: list, node):
assert len(args) == 3
if not isinstance(args[0].value, Range):
raise NotImplementedError("not implemented when the condition is known")
x = identity([args[1]], node)
y = identity([args[2]], node)
if not turn_on_bool:
return Range(left=min(x.left, y.left), right=max(x.right, y.right))
raise NotImplementedError
@staticmethod
def shape(args: list, node):
assert len(args) == 1
try:
return [int(x) for x in args[0].size]
except:
return Range(left=1, right=length_unknown)
@staticmethod
def sign(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
return Range(left=np.sign(args[0].value.left), right=np.sign(args[0].value.right))
else:
return np.sign(args[0].value)
@staticmethod
def size(args: list, node):
assert len(args) == 1
try:
ele = 1
for x in args[0].size:
ele *= int(x)
if ele < 0:
return Range(left=0, right=length_unknown)
else:
return ele
except:
return Range(left=0, right=length_unknown)
@staticmethod
def slice(args: list, node):
assert len(args) == 3
try:
return args[0].value[
tuple(slice(a, a + b) if b >= 0 else slice(a, None) for a, b in zip(args[1].value, args[2].value))]
except:
return identity(args, node)
@staticmethod
def sparsetodense(args: list, node):
assert len(args) == 4
return Range(left=0, right=1)
@staticmethod
def split(args: list, node):
assert len(args) == 2
nums = int(node.attr["num_split"].i)
if nums == 1:
return identity(args[1:], node)
else:
return [identity(args[1:], node) for _ in range(nums)]
@staticmethod
def sqrt(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
left = safesqrt(args[0].value.left)
right = safesqrt(args[0].value.right)
return Range(left=left, right=right)
else:
return safesqrt(args[0].value)
@staticmethod
def square(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
abs_value = InferValue.abs(args, node)
return Range(left=abs_value.left * abs_value.left, right=abs_value.right * abs_value.right)
else:
return args[0].value * args[0].value
@staticmethod
def squareddifference(args: list, node):
assert len(args) == 2
value1 = (args[0].value.left - args[1].value.right) * (args[0].value.left - args[1].value.right)
value2 = (args[0].value.right - args[1].value.left) * (args[0].value.right - args[1].value.left)
return InferValue.square([AbstractInterpretation(value=Range(left=value1, right=value2))], node)
@staticmethod
def squeeze(args: list, node):
assert len(args) == 1
return identity(args, node)
@staticmethod
def stopgradient(args: list, node):
return InferValue.identity(args, node)
@staticmethod
def stridedslice(args: list, node):
return identity(args, node)
@staticmethod
def sub(args: list, node):
assert len(args) == 2
if isinstance(args[0].value, Range) or isinstance(args[1].value, Range):
x = identity([args[0]], node)
y = identity([args[1]], node)
return Range(left=x.left - y.right, right=x.right - y.left)
else:
return args[0].value - args[1].value
@staticmethod
def sum(args: list, node):
assert len(args) == 2
if args[0].value is None:
return None
if isinstance(args[0].value, Range) or isinstance(args[1].value, Range):
try:
ind = int(args[0].size[int(args[1].value)])
return Range(left=args[0].value.left * ind, right=args[0].value.right * ind)
except:
ind = Range(left=1, right=1e6)
t = InferValue.mul([args[0], AbstractInterpretation(value=ind, dtype=3, size=[])], node)
if isinstance(t, tuple):
raise AssertionError
else:
return t
else:
axises = np.int32(args[1].value)
return np.sum(args[0].value, axis=tuple(axises) if len(axises.shape) > 0 else axises)
@staticmethod
def switch(args: list, node):
assert len(args) == 2
return [args[0].value, args[0].value]
@staticmethod
def tensorarraygatherv3(args: list, node):
assert len(args) == 3
return args[0].value
@staticmethod
def tensorarrayv3(args: list, node):
assert len(args) == 1
return [dumy(), dumy()]
@staticmethod
def tensorarrayreadv3(args: list, node):
assert len(args) == 3
return args[0].value
@staticmethod
def tensorarrayscatterv3(args: list, node):
assert len(args) == 4
if isinstance(args[2].value, Range):
return args[0].value
else:
return args[0].value
@staticmethod
def tensorarraysizev3(args: list, node):
assert len(args) == 2
return int(args[0].size[0])
@staticmethod
def tensorarraywritev3(args: list, node):
assert len(args) == 4
return InferValue.tensorarrayscatterv3(args, node)
@staticmethod
def tile(args: list, node):
assert len(args) == 2
if not isinstance(args[0].value, Range) and not isinstance(args[1].value, Range):
return np.tile(args[0].value, np.int32(args[1].value))
else:
return identity(args, node)
@staticmethod
def topkv2(args: list, node):
assert len(args) == 2
try:
ind = int(args[0].size[-1])
value = Range(left=0, right=ind - 1)
except:
value = Range(left=0, right=length_unknown)
return [identity(args, node), value]
@staticmethod
def transpose(args: list, node):
assert len(args) == 2
try:
return np.transpose(args[0].value, np.int32(args[1].value))
except:
return identity(args, node)
@staticmethod
def unpack(args: list, node):
assert len(args) == 1
nums = int(node.attr["num"].i)
axis = int(node.attr["axis"].i)
if not isinstance(args[0].value, Range):
assert args[0].value.shape[axis] == nums
if nums == 1:
index = [slice(None) for _ in range(len(args[0].value.shape))]
index[axis] = 0
return args[0].value[index]
else:
ret = []
for i in range(nums):
index = [slice(None) for _ in range(len(args[0].value.shape))]
index[axis] = i
ret.append(args[0].value[index])
return ret
else:
if nums == 1:
return identity(args, node)
else:
return [identity(args, node) for _ in range(nums)]
@staticmethod
def varhandleop(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, "variablev2")(node)
@staticmethod
def variable(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, "variablev2")(node)
@staticmethod
def variablev2(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, node.op.lower())(node)
@staticmethod
def where(args: list, node):
assert len(args) == 1
try:
x = np.max(args[0].size)
return Range(left=0, right=x - 1)
except:
return Range(left=0, right=length_unknown - 1)
@staticmethod
def zeroslike(args: list, node):
assert len(args) == 1
try:
if len(args[0].size) == 0:
return 0
except:
pass
return Range(left=0, right=0)
@staticmethod
def floormod(args: list, node):
def mod(x, y):
return x - math.floor(x / y) * y
assert len(args) == 2
try:
x = float(args[0].value)
except:
x = identity([args[0]], node)
try:
y = float(args[1].value)
except:
y = identity([args[1]], node)
if isinstance(x, Range) and isinstance(y, Range):
if y.left > 0 or y.right < 0:
ends = [mod(x.left, y.left), mod(x.left, y.right), mod(x.right, y.left), mod(x.right, y.right)]
return Range(left=min(ends), right=max(ends))
else:
return Range(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)
elif not isinstance(y, Range):
return x * (1 / y)
else:
if y.left > 0 or y.right < 0:
ends = [mod(x, y.left), mod(x, y.right)]
return Range(left=min(ends), right=max(ends))
else:
return Range(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)
@staticmethod
def iteratortostringhandle(args: list, node):
warnings.warn("iteratortostringhandle not implemented", RuntimeWarning)
@staticmethod
def noop(args: list, node):
warnings.warn("noop not implemented", RuntimeWarning)
@staticmethod
def restorev2(args: list, node):
warnings.warn("restorev2 not implemented", RuntimeWarning)
@staticmethod
def savev2(args: list, node):
warnings.warn("savev2 not implemented", RuntimeWarning)
# non linear operations:
@staticmethod
def sin(args: list, node):
assert len(args) == 1
return Range(left=-1, right=1)
def cos(args: list, node):
assert len(args) == 1
return Range(left=-1, right=1)
@staticmethod
def log(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
if args[0].value.left <= 0:
return Range(left=-OVERFLOW_LIMIT, right=math.log(args[0].value.right))
else:
return Range(left=math.log(args[0].value.left), right=math.log(args[0].value.right))
else:
return np.log(args[0].value)
@staticmethod
def log1p(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
if args[0].value.left <= -1:
return Range(left=-OVERFLOW_LIMIT, right=np.log1p(args[0].value.right))
else:
return Range(left=np.log1p(args[0].value.left), right=np.log1p(args[0].value.right))
else:
return np.log1p(args[0].value)
@staticmethod
def softplus(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
return Range(left=safesoftplus(args[0].value.left), right=safesoftplus(args[0].value.right))
else:
return safesoftplus(args[0].value)
@staticmethod
def exp(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
return Range(left=safeexp(args[0].value.left), right=safeexp(args[0].value.right))
else:
return safeexp(args[0].value)
@staticmethod
def softmax(args: list, node):
assert len(args) == 1
try:
ind = int(args[0].size[-1])
except:
ind = None
if isinstance(args[0].value, Range):
min_ele = safeexp(args[0].value.left)
max_ele = safeexp(args[0].value.right)
if max_ele >= OVERFLOW_LIMIT or min_ele == 0:
left = 0
elif ind is not None:
left = min_ele / ((ind - 1) * max_ele + min_ele)
else:
left = min_ele / ((length_unknown - 1) * max_ele + min_ele)
if max_ele >= OVERFLOW_LIMIT or min_ele == 0:
right = 1
elif ind is not None:
right = max_ele / ((ind - 1) * min_ele + max_ele)
else:
right = max_ele / (min_ele + max_ele)
return Range(left=left, right=right)
else:
tmp_exp = np.exp(args[0].value)
return tmp_exp / np.sum(tmp_exp)
@staticmethod
def sigmoid(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
return Range(left=1 / (1 + safeexp(-args[0].value.left)), right=1 / (1 + safeexp(-args[0].value.right)))
else:
return 1 / (1 + safeexp(-args[0].value))
@staticmethod
def tanh(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
return Range(left=np.tanh(args[0].value.left), right=np.tanh(args[0].value.right))
else:
return np.tanh(args[0].value)
# contains the abstract interpretations of TensorFlow APIs used in the tensor partition and the linear affine relation.
class InferArray:
@staticmethod
def add(args: list, node):
try:
len(args[0].size) == len(args[1].size)
except:
return None
assert len(args) == 2 and len(args[0].size) == len(args[1].size)
ind = len(args[0].size)
for i in range(ind):
try:
l1 = int(args[0].size[i])
except:
l1 = -1
try:
l2 = int(args[1].size[i])
except:
l2 = -1
assert l1 == l2
ret = Array("tmp", args[0].size)
ret.block_to_symbol = dict()
ret.index_slices = Array.join_index_slices(args[0].array.index_slices, args[1].array.index_slices)
keys0 = args[0].array.get_corresponding_keys(ret.index_slices)
keys1 = args[1].array.get_corresponding_keys(ret.index_slices)
i = 0
for indexes in product(*ret.index_slices):
ret.block_to_symbol[tuple(indexes)] = keys0[i] + keys1[i]
i += 1
return ret
def sub(args: list, node):
try:
len(args[0].size) == len(args[1].size)
except:
return None
assert len(args) == 2 and len(args[0].size) == len(args[1].size)
ind = len(args[0].size)
for i in range(ind):
try:
l1 = int(args[0].size[i])
except:
l1 = -1
try:
l2 = int(args[1].size[i])
except:
l2 = -1
assert l1 == l2
ret = Array("tmp", args[0].size)
ret.block_to_symbol = dict()
ret.index_slices = Array.join_index_slices(args[0].array.index_slices, args[1].array.index_slices)
keys0 = args[0].array.get_corresponding_keys(ret.index_slices)
keys1 = args[1].array.get_corresponding_keys(ret.index_slices)
i = 0
for indexes in product(*ret.index_slices):
ret.block_to_symbol[tuple(indexes)] = keys0[i] - keys1[i]
i += 1
return ret
@staticmethod
def concatv2(args: list, node):
assert len(args) > 1
if len(args) - 1 > 10:
return None
try:
len(args[0].size) == len(args[1].size)
except:
return None
concat_ind = int(args[-1].value)
for i in range(1, len(args) - 1):
assert len(args[0].size) == len(args[i].size)
for j in range(len(args[i].size)):
try:
int(args[0].size[j])
int(args[i].size[j])
except:
return None
if j != concat_ind:
assert int(args[0].size[j]) == int(args[i].size[j])
ret = Array("tmp", args[0].size)
ret.block_to_symbol = dict()
index_slices = []
for arg in args[:-1]:
index_slices.append(copy.deepcopy(arg.array.index_slices))
index_slices[-1][concat_ind] = [None]
ret.index_slices = index_slices[0]
for i in range(1, len(args) - 1):
ret.index_slices = Array.join_index_slices(ret.index_slices, index_slices[i])
tmp_ret_index_slices = copy.deepcopy(ret.index_slices)
ret.index_slices[concat_ind] = []
split_point = 0
for i in range(len(args) - 1):
tmp_ret_index_slices[concat_ind] = args[i].array.index_slices[concat_ind]
ret.index_slices[concat_ind] += list(np.array(args[i].array.index_slices[concat_ind]) + split_point)
tmp_keys = args[i].array.get_corresponding_keys(tmp_ret_index_slices)
tmp_ret_index_slices[concat_ind] = list(np.array(args[i].array.index_slices[concat_ind]) + split_point)
split_point += int(args[i].array.index_slices[concat_ind][-1])
ii = 0
for indexes in product(*tmp_ret_index_slices):
ret.block_to_symbol[tuple(indexes)] = tmp_keys[ii]
ii += 1
return ret
@staticmethod
def identity(args: list, node):
assert len(args) == 1
return args[0].array
@staticmethod
def zeroslike(args: list, node):
assert len(args) == 1
ret = Array("tmp", args[0].size)
if len(ret.block_to_symbol.keys()) == 0:
return None
x = list(ret.block_to_symbol.keys())[0]
ret.block_to_symbol[x].value = {}
ret.block_to_symbol[x].map_to_index = {}
return ret
@staticmethod
def relu(args: list, node):
# right now it will abort when it encounters relu(z=x-y).
# A better approach is to set it to relu(z) instead of aborting.
assert len(args) == 1
ret = copy.deepcopy(args[0].array)
ret.block_to_symbol = {}
for x in args[0].array.block_to_symbol:
ret.block_to_symbol[x] = args[0].array.block_to_symbol[x].relu()
return ret
@staticmethod
def maximum(args: list, node):
try:
len(args[0].size) == len(args[1].size)
except:
return None
assert len(args) == 2 and len(args[0].size) == len(args[1].size)
one_value = list(args[1].array.block_to_symbol.values())
if len(one_value) == 1 and len(one_value[0].value) == 0:
return InferArray.relu([args[0]], node)
one_value = list(args[0].array.block_to_symbol.values())
if len(one_value) == 1 and len(one_value[0].value) == 0:
return InferArray.relu([args[1]], node)
@staticmethod
def neg(args: list, node):
assert len(args) == 1
ret = copy.deepcopy(args[0].array)
for x in ret.block_to_symbol:
ret.block_to_symbol[x].neg()
return ret
@staticmethod
def pack(args: list, node):
assert len(args) >= 1
if len(args) > 10:
return None
pack_ind = int(node.attr["axis"].i)
for i in range(1, len(args)):
try:
len(args[0].size) == len(args[i].size)
except:
return None
assert len(args[0].size) == len(args[i].size)
for j in range(len(args[i].size)):
try:
int(args[0].size[j])
int(args[i].size[j])
except:
return None
assert int(args[0].size[j]) == int(args[i].size[j])
ret = Array("tmp", args[0].size)
ret.block_to_symbol = dict()
index_slices = []
for arg in args:
index_slices.append(copy.deepcopy(arg.array.index_slices))
ret.index_slices = index_slices[0]
for i in range(1, len(args)):
ret.index_slices = Array.join_index_slices(ret.index_slices, index_slices[i])
tmp_ret_index_slices = copy.deepcopy(ret.index_slices)
ret.index_slices = ret.index_slices[:pack_ind] + [[]] + ret.index_slices[pack_ind:]
for i in range(len(args)):
ret.index_slices[pack_ind] += [i + 1]
tmp_keys = args[i].array.get_corresponding_keys(tmp_ret_index_slices)
ii = 0
for indexes in product(*tmp_ret_index_slices):
tmp_key = list(indexes)
tmp_key = tmp_key[:pack_ind] + [i + 1] + tmp_key[pack_ind:]
ret.block_to_symbol[tuple(tmp_key)] = tmp_keys[ii].add_pack_ind(pack_ind)
ii += 1
return ret
@staticmethod
def transpose(args: list, node):
assert len(args) == 2
assert not isinstance(args[1].value, Range)
ret = Array("tmp", args[0].size)
ret.index_slices = []
ret.block_to_symbol = {}
perm = np.array(args[1].value)
for x in perm:
ret.index_slices.append(args[0].array.index_slices[x])
for indexes in product(*args[0].array.index_slices):
new_indexes = ()
for x in perm:
new_indexes += (indexes[x],)
ret.block_to_symbol[new_indexes] = args[0].array.block_to_symbol[tuple(indexes)].transpose(perm)
return ret
@staticmethod
def unpack(args: list, node):
assert len(args) == 1
axis = int(node.attr["axis"].i)
index_slices = copy.deepcopy(args[0].array.index_slices)
try:
if int(args[0].size[axis]) > 10:
return None
except:
return None
rets = []
for i in range(int(args[0].size[axis])):
rets.append(Array("tmp", args[0].size))
rets[-1].index_slices = index_slices[:axis] + index_slices[axis + 1:]
rets[-1].block_to_symbol = {}
length = index_slices[axis][-1]
index_slices[axis] = list(range(1, length + 1)) # e.g., 4 -> [1,2,3,4]
tmp_keys = args[0].array.get_corresponding_keys(index_slices)
ii = 0
for indexes in product(*index_slices):
tmp_key = list(indexes)
which = indexes[axis] - 1
tmp_key = tmp_key[:axis] + tmp_key[axis + 1:]
rets[which].block_to_symbol[tuple(tmp_key)] = tmp_keys[ii].remove_unpack_axis(axis)
ii += 1
return rets if len(rets) > 1 else rets[0]
| [
"math.floor",
"solver.Range",
"numpy.int32",
"numpy.log",
"math.sqrt",
"math.log",
"numpy.array",
"copy.deepcopy",
"analysis.abstract_interpretation.AbstractInterpretation",
"numpy.arange",
"numpy.reshape",
"itertools.product",
"numpy.tanh",
"numpy.max",
"numpy.exp",
"numpy.linspace",
... | [((1708, 1757), 'solver.Range', 'Range', ([], {'left': '(-OVERFLOW_LIMIT)', 'right': 'OVERFLOW_LIMIT'}), '(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)\n', (1713, 1757), False, 'from solver import Range, Array\n'), ((1934, 1947), 'numpy.array', 'np.array', (['ans'], {}), '(ans)\n', (1942, 1947), True, 'import numpy as np\n'), ((2217, 2230), 'numpy.array', 'np.array', (['ans'], {}), '(ans)\n', (2225, 2230), True, 'import numpy as np\n'), ((2589, 2602), 'numpy.array', 'np.array', (['ans'], {}), '(ans)\n', (2597, 2602), True, 'import numpy as np\n'), ((2956, 2969), 'numpy.array', 'np.array', (['ans'], {}), '(ans)\n', (2964, 2969), True, 'import numpy as np\n'), ((3331, 3344), 'numpy.array', 'np.array', (['ans'], {}), '(ans)\n', (3339, 3344), True, 'import numpy as np\n'), ((6406, 6428), 'copy.deepcopy', 'copy.deepcopy', (['args[0]'], {}), '(args[0])\n', (6419, 6428), False, 'import copy\n'), ((6441, 6463), 'copy.deepcopy', 'copy.deepcopy', (['args[1]'], {}), '(args[1])\n', (6454, 6463), False, 'import copy\n'), ((7048, 7153), 'solver.Range', 'Range', ([], {'left': '(args[0].value.left + args[1].value.left)', 'right': '(args[0].value.right + args[1].value.right)'}), '(left=args[0].value.left + args[1].value.left, right=args[0].value.\n right + args[1].value.right)\n', (7053, 7153), False, 'from solver import Range, Array\n'), ((10518, 10541), 'solver.Range', 'Range', ([], {'left': '(-1)', 'right': '(1)'}), '(left=-1, right=1)\n', (10523, 10541), False, 'from solver import Range, Array\n'), ((20238, 20272), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': '(max_index - 1)'}), '(left=0, right=max_index - 1)\n', (20243, 20272), False, 'from solver import Range, Array\n'), ((21695, 21717), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': '(1)'}), '(left=0, right=1)\n', (21700, 21717), False, 'from solver import Range, Array\n'), ((26286, 26327), 'solver.Range', 'Range', ([], {'left': '(UNDERFLOW_LIMIT * 10)', 'right': '(1)'}), '(left=UNDERFLOW_LIMIT * 10, right=1)\n', (26291, 26327), False, 'from solver import Range, Array\n'), ((26433, 26474), 'solver.Range', 'Range', ([], {'left': '(UNDERFLOW_LIMIT * 10)', 'right': '(1)'}), '(left=UNDERFLOW_LIMIT * 10, right=1)\n', (26438, 26474), False, 'from solver import Range, Array\n'), ((30347, 30370), 'numpy.round', 'np.round', (['args[0].value'], {}), '(args[0].value)\n', (30355, 30370), True, 'import numpy as np\n'), ((32520, 32542), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': '(1)'}), '(left=0, right=1)\n', (32525, 32542), False, 'from solver import Range, Array\n'), ((39090, 39112), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': '(0)'}), '(left=0, right=0)\n', (39095, 39112), False, 'from solver import Range, Array\n'), ((40262, 40333), 'warnings.warn', 'warnings.warn', (['"""iteratortostringhandle not implemented"""', 'RuntimeWarning'], {}), "('iteratortostringhandle not implemented', RuntimeWarning)\n", (40275, 40333), False, 'import warnings\n'), ((40393, 40446), 'warnings.warn', 'warnings.warn', (['"""noop not implemented"""', 'RuntimeWarning'], {}), "('noop not implemented', RuntimeWarning)\n", (40406, 40446), False, 'import warnings\n'), ((40511, 40569), 'warnings.warn', 'warnings.warn', (['"""restorev2 not implemented"""', 'RuntimeWarning'], {}), "('restorev2 not implemented', RuntimeWarning)\n", (40524, 40569), False, 'import warnings\n'), ((40631, 40686), 'warnings.warn', 'warnings.warn', (['"""savev2 not implemented"""', 'RuntimeWarning'], {}), "('savev2 not implemented', RuntimeWarning)\n", (40644, 40686), False, 'import warnings\n'), ((40811, 40834), 'solver.Range', 'Range', ([], {'left': '(-1)', 'right': '(1)'}), '(left=-1, right=1)\n', (40816, 40834), False, 'from solver import Range, Array\n'), ((40912, 40935), 'solver.Range', 'Range', ([], {'left': '(-1)', 'right': '(1)'}), '(left=-1, right=1)\n', (40917, 40935), False, 'from solver import Range, Array\n'), ((44640, 44666), 'solver.Array', 'Array', (['"""tmp"""', 'args[0].size'], {}), "('tmp', args[0].size)\n", (44645, 44666), False, 'from solver import Range, Array\n'), ((44731, 44810), 'solver.Array.join_index_slices', 'Array.join_index_slices', (['args[0].array.index_slices', 'args[1].array.index_slices'], {}), '(args[0].array.index_slices, args[1].array.index_slices)\n', (44754, 44810), False, 'from solver import Range, Array\n'), ((44990, 45016), 'itertools.product', 'product', (['*ret.index_slices'], {}), '(*ret.index_slices)\n', (44997, 45016), False, 'from itertools import product\n'), ((45646, 45672), 'solver.Array', 'Array', (['"""tmp"""', 'args[0].size'], {}), "('tmp', args[0].size)\n", (45651, 45672), False, 'from solver import Range, Array\n'), ((45737, 45816), 'solver.Array.join_index_slices', 'Array.join_index_slices', (['args[0].array.index_slices', 'args[1].array.index_slices'], {}), '(args[0].array.index_slices, args[1].array.index_slices)\n', (45760, 45816), False, 'from solver import Range, Array\n'), ((45996, 46022), 'itertools.product', 'product', (['*ret.index_slices'], {}), '(*ret.index_slices)\n', (46003, 46022), False, 'from itertools import product\n'), ((46846, 46872), 'solver.Array', 'Array', (['"""tmp"""', 'args[0].size'], {}), "('tmp', args[0].size)\n", (46851, 46872), False, 'from solver import Range, Array\n'), ((47294, 47325), 'copy.deepcopy', 'copy.deepcopy', (['ret.index_slices'], {}), '(ret.index_slices)\n', (47307, 47325), False, 'import copy\n'), ((48306, 48332), 'solver.Array', 'Array', (['"""tmp"""', 'args[0].size'], {}), "('tmp', args[0].size)\n", (48311, 48332), False, 'from solver import Range, Array\n'), ((48800, 48828), 'copy.deepcopy', 'copy.deepcopy', (['args[0].array'], {}), '(args[0].array)\n', (48813, 48828), False, 'import copy\n'), ((49695, 49723), 'copy.deepcopy', 'copy.deepcopy', (['args[0].array'], {}), '(args[0].array)\n', (49708, 49723), False, 'import copy\n'), ((50504, 50530), 'solver.Array', 'Array', (['"""tmp"""', 'args[0].size'], {}), "('tmp', args[0].size)\n", (50509, 50530), False, 'from solver import Range, Array\n'), ((50892, 50923), 'copy.deepcopy', 'copy.deepcopy', (['ret.index_slices'], {}), '(ret.index_slices)\n', (50905, 50923), False, 'import copy\n'), ((51664, 51690), 'solver.Array', 'Array', (['"""tmp"""', 'args[0].size'], {}), "('tmp', args[0].size)\n", (51669, 51690), False, 'from solver import Range, Array\n'), ((51769, 51792), 'numpy.array', 'np.array', (['args[1].value'], {}), '(args[1].value)\n', (51777, 51792), True, 'import numpy as np\n'), ((51906, 51942), 'itertools.product', 'product', (['*args[0].array.index_slices'], {}), '(*args[0].array.index_slices)\n', (51913, 51942), False, 'from itertools import product\n'), ((52321, 52362), 'copy.deepcopy', 'copy.deepcopy', (['args[0].array.index_slices'], {}), '(args[0].array.index_slices)\n', (52334, 52362), False, 'import copy\n'), ((52962, 52984), 'itertools.product', 'product', (['*index_slices'], {}), '(*index_slices)\n', (52969, 52984), False, 'from itertools import product\n'), ((1587, 1599), 'numpy.min', 'np.min', (['mins'], {}), '(mins)\n', (1593, 1599), True, 'import numpy as np\n'), ((1607, 1619), 'numpy.max', 'np.max', (['maxs'], {}), '(maxs)\n', (1613, 1619), True, 'import numpy as np\n'), ((3732, 3758), 'numpy.abs', 'np.abs', (['args[0].value.left'], {}), '(args[0].value.left)\n', (3738, 3758), True, 'import numpy as np\n'), ((3782, 3809), 'numpy.abs', 'np.abs', (['args[0].value.right'], {}), '(args[0].value.right)\n', (3788, 3809), True, 'import numpy as np\n'), ((3989, 4036), 'solver.Range', 'Range', ([], {'left': '(0 if cond else min_sq)', 'right': 'max_sq'}), '(left=0 if cond else min_sq, right=max_sq)\n', (3994, 4036), False, 'from solver import Range, Array\n'), ((4070, 4091), 'numpy.abs', 'np.abs', (['args[0].value'], {}), '(args[0].value)\n', (4076, 4091), True, 'import numpy as np\n'), ((4439, 4491), 'solver.Range', 'Range', ([], {'left': '(x.left + y.left)', 'right': '(x.right + y.right)'}), '(left=x.left + y.left, right=x.right + y.right)\n', (4444, 4491), False, 'from solver import Range, Array\n'), ((5012, 5041), 'solver.Range', 'Range', ([], {'left': '(False)', 'right': '(True)'}), '(left=False, right=True)\n', (5017, 5041), False, 'from solver import Range, Array\n'), ((5174, 5203), 'solver.Range', 'Range', ([], {'left': '(False)', 'right': '(True)'}), '(left=False, right=True)\n', (5179, 5203), False, 'from solver import Range, Array\n'), ((7841, 7863), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': '(1)'}), '(left=0, right=1)\n', (7846, 7863), False, 'from solver import Range, Array\n'), ((11628, 11657), 'solver.Range', 'Range', ([], {'left': '(False)', 'right': '(True)'}), '(left=False, right=True)\n', (11633, 11657), False, 'from solver import Range, Array\n'), ((12376, 12399), 'numpy.empty', 'np.empty', (['args[0].value'], {}), '(args[0].value)\n', (12384, 12399), True, 'import numpy as np\n'), ((12773, 12796), 'numpy.floor', 'np.floor', (['args[0].value'], {}), '(args[0].value)\n', (12781, 12796), True, 'import numpy as np\n'), ((14495, 14524), 'solver.Range', 'Range', ([], {'left': '(False)', 'right': '(True)'}), '(left=False, right=True)\n', (14500, 14524), False, 'from solver import Range, Array\n'), ((14666, 14695), 'solver.Range', 'Range', ([], {'left': '(False)', 'right': '(True)'}), '(left=False, right=True)\n', (14671, 14695), False, 'from solver import Range, Array\n'), ((15920, 15949), 'solver.Range', 'Range', ([], {'left': '(False)', 'right': '(True)'}), '(left=False, right=True)\n', (15925, 15949), False, 'from solver import Range, Array\n'), ((16088, 16117), 'solver.Range', 'Range', ([], {'left': '(False)', 'right': '(True)'}), '(left=False, right=True)\n', (16093, 16117), False, 'from solver import Range, Array\n'), ((16765, 16821), 'numpy.linspace', 'np.linspace', (['args[0].value', 'args[1].value', 'args[2].value'], {}), '(args[0].value, args[1].value, args[2].value)\n', (16776, 16821), True, 'import numpy as np\n'), ((16927, 16956), 'solver.Range', 'Range', ([], {'left': '(False)', 'right': '(True)'}), '(left=False, right=True)\n', (16932, 16956), False, 'from solver import Range, Array\n'), ((17096, 17125), 'solver.Range', 'Range', ([], {'left': '(False)', 'right': '(True)'}), '(left=False, right=True)\n', (17101, 17125), False, 'from solver import Range, Array\n'), ((17264, 17293), 'solver.Range', 'Range', ([], {'left': '(False)', 'right': '(True)'}), '(left=False, right=True)\n', (17269, 17293), False, 'from solver import Range, Array\n'), ((17537, 17565), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': '(ind - 1)'}), '(left=0, right=ind - 1)\n', (17542, 17565), False, 'from solver import Range, Array\n'), ((17567, 17610), 'solver.Range', 'Range', ([], {'left': '(UNDERFLOW_LIMIT * 10)', 'right': 'num'}), '(left=UNDERFLOW_LIMIT * 10, right=num)\n', (17572, 17610), False, 'from solver import Range, Array\n'), ((17628, 17671), 'solver.Range', 'Range', ([], {'left': '(UNDERFLOW_LIMIT * 10)', 'right': 'num'}), '(left=UNDERFLOW_LIMIT * 10, right=num)\n', (17633, 17671), False, 'from solver import Range, Array\n'), ((18350, 18389), 'numpy.matmul', 'np.matmul', (['args[0].value', 'args[1].value'], {}), '(args[0].value, args[1].value)\n', (18359, 18389), True, 'import numpy as np\n'), ((21862, 21921), 'solver.Range', 'Range', ([], {'left': '(-args[0].value.right)', 'right': '(-args[0].value.left)'}), '(left=-args[0].value.right, right=-args[0].value.left)\n', (21867, 21921), False, 'from solver import Range, Array\n'), ((22137, 22165), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': '(ind - 1)'}), '(left=0, right=ind - 1)\n', (22142, 22165), False, 'from solver import Range, Array\n'), ((22340, 22369), 'solver.Range', 'Range', ([], {'left': '(False)', 'right': '(True)'}), '(left=False, right=True)\n', (22345, 22369), False, 'from solver import Range, Array\n'), ((23458, 23493), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': 'length_unknown'}), '(left=0, right=length_unknown)\n', (23463, 23493), False, 'from solver import Range, Array\n'), ((25637, 25660), 'numpy.int32', 'np.int32', (['args[1].value'], {}), '(args[1].value)\n', (25645, 25660), True, 'import numpy as np\n'), ((27008, 27037), 'solver.Range', 'Range', ([], {'left': 'left', 'right': 'right'}), '(left=left, right=right)\n', (27013, 27037), False, 'from solver import Range, Array\n'), ((27071, 27125), 'numpy.arange', 'np.arange', (['args[0].value', 'args[1].value', 'args[2].value'], {}), '(args[0].value, args[1].value, args[2].value)\n', (27080, 27125), True, 'import numpy as np\n'), ((27637, 27654), 'numpy.reshape', 'np.reshape', (['x', '(-1)'], {}), '(x, -1)\n', (27647, 27654), True, 'import numpy as np\n'), ((27708, 27725), 'numpy.reshape', 'np.reshape', (['y', '(-1)'], {}), '(y, -1)\n', (27718, 27725), True, 'import numpy as np\n'), ((31713, 31735), 'numpy.sign', 'np.sign', (['args[0].value'], {}), '(args[0].value)\n', (31720, 31735), True, 'import numpy as np\n'), ((33061, 33090), 'solver.Range', 'Range', ([], {'left': 'left', 'right': 'right'}), '(left=left, right=right)\n', (33066, 33090), False, 'from solver import Range, Array\n'), ((33346, 33434), 'solver.Range', 'Range', ([], {'left': '(abs_value.left * abs_value.left)', 'right': '(abs_value.right * abs_value.right)'}), '(left=abs_value.left * abs_value.left, right=abs_value.right *\n abs_value.right)\n', (33351, 33434), False, 'from solver import Range, Array\n'), ((34488, 34540), 'solver.Range', 'Range', ([], {'left': '(x.left - y.right)', 'right': '(x.right - y.left)'}), '(left=x.left - y.right, right=x.right - y.left)\n', (34493, 34540), False, 'from solver import Range, Array\n'), ((35333, 35356), 'numpy.int32', 'np.int32', (['args[1].value'], {}), '(args[1].value)\n', (35341, 35356), True, 'import numpy as np\n'), ((36908, 36936), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': '(ind - 1)'}), '(left=0, right=ind - 1)\n', (36913, 36936), False, 'from solver import Range, Array\n'), ((38736, 38756), 'numpy.max', 'np.max', (['args[0].size'], {}), '(args[0].size)\n', (38742, 38756), True, 'import numpy as np\n'), ((38776, 38802), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': '(x - 1)'}), '(left=0, right=x - 1)\n', (38781, 38802), False, 'from solver import Range, Array\n'), ((41341, 41362), 'numpy.log', 'np.log', (['args[0].value'], {}), '(args[0].value)\n', (41347, 41362), True, 'import numpy as np\n'), ((41771, 41794), 'numpy.log1p', 'np.log1p', (['args[0].value'], {}), '(args[0].value)\n', (41779, 41794), True, 'import numpy as np\n'), ((43241, 43270), 'solver.Range', 'Range', ([], {'left': 'left', 'right': 'right'}), '(left=left, right=right)\n', (43246, 43270), False, 'from solver import Range, Array\n'), ((43307, 43328), 'numpy.exp', 'np.exp', (['args[0].value'], {}), '(args[0].value)\n', (43313, 43328), True, 'import numpy as np\n'), ((43941, 43963), 'numpy.tanh', 'np.tanh', (['args[0].value'], {}), '(args[0].value)\n', (43948, 43963), True, 'import numpy as np\n'), ((47204, 47262), 'solver.Array.join_index_slices', 'Array.join_index_slices', (['ret.index_slices', 'index_slices[i]'], {}), '(ret.index_slices, index_slices[i])\n', (47227, 47262), False, 'from solver import Range, Array\n'), ((47949, 47979), 'itertools.product', 'product', (['*tmp_ret_index_slices'], {}), '(*tmp_ret_index_slices)\n', (47956, 47979), False, 'from itertools import product\n'), ((50802, 50860), 'solver.Array.join_index_slices', 'Array.join_index_slices', (['ret.index_slices', 'index_slices[i]'], {}), '(ret.index_slices, index_slices[i])\n', (50825, 50860), False, 'from solver import Range, Array\n'), ((51230, 51260), 'itertools.product', 'product', (['*tmp_ret_index_slices'], {}), '(*tmp_ret_index_slices)\n', (51237, 51260), False, 'from itertools import product\n'), ((2315, 2327), 'math.sqrt', 'math.sqrt', (['X'], {}), '(X)\n', (2324, 2327), False, 'import math\n'), ((3082, 3096), 'math.lgamma', 'math.lgamma', (['X'], {}), '(X)\n', (3093, 3096), False, 'import math\n'), ((5451, 5486), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': 'length_unknown'}), '(left=0, right=length_unknown)\n', (5456, 5486), False, 'from solver import Range, Array\n'), ((7984, 8013), 'solver.Range', 'Range', ([], {'left': '(False)', 'right': '(True)'}), '(left=False, right=True)\n', (7989, 8013), False, 'from solver import Range, Array\n'), ((9428, 9468), 'numpy.maximum', 'np.maximum', (['args[0].value', 'args[1].value'], {}), '(args[0].value, args[1].value)\n', (9438, 9468), True, 'import numpy as np\n'), ((19761, 19777), 'numpy.maximum', 'np.maximum', (['x', 'y'], {}), '(x, y)\n', (19771, 19777), True, 'import numpy as np\n'), ((20864, 20880), 'numpy.minimum', 'np.minimum', (['x', 'y'], {}), '(x, y)\n', (20874, 20880), True, 'import numpy as np\n'), ((22201, 22236), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': 'length_unknown'}), '(left=0, right=length_unknown)\n', (22206, 22236), False, 'from solver import Range, Array\n'), ((26864, 26885), 'numpy.min', 'np.min', (['args[0].value'], {}), '(args[0].value)\n', (26870, 26885), True, 'import numpy as np\n'), ((26967, 26988), 'numpy.max', 'np.max', (['args[1].value'], {}), '(args[1].value)\n', (26973, 26988), True, 'import numpy as np\n'), ((27292, 27327), 'solver.Range', 'Range', ([], {'left': '(1)', 'right': 'length_unknown'}), '(left=1, right=length_unknown)\n', (27297, 27327), False, 'from solver import Range, Array\n'), ((28031, 28080), 'solver.Range', 'Range', ([], {'left': '(-OVERFLOW_LIMIT)', 'right': 'OVERFLOW_LIMIT'}), '(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)\n', (28036, 28080), False, 'from solver import Range, Array\n'), ((29417, 29440), 'numpy.int32', 'np.int32', (['args[1].value'], {}), '(args[1].value)\n', (29425, 29440), True, 'import numpy as np\n'), ((30707, 30744), 'solver.Range', 'Range', ([], {'left': '(1 / right)', 'right': '(1 / left)'}), '(left=1 / right, right=1 / left)\n', (30712, 30744), False, 'from solver import Range, Array\n'), ((31423, 31458), 'solver.Range', 'Range', ([], {'left': '(1)', 'right': 'length_unknown'}), '(left=1, right=length_unknown)\n', (31428, 31458), False, 'from solver import Range, Array\n'), ((31962, 31997), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': 'length_unknown'}), '(left=0, right=length_unknown)\n', (31967, 31997), False, 'from solver import Range, Array\n'), ((32078, 32113), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': 'length_unknown'}), '(left=0, right=length_unknown)\n', (32083, 32113), False, 'from solver import Range, Array\n'), ((34923, 34992), 'solver.Range', 'Range', ([], {'left': '(args[0].value.left * ind)', 'right': '(args[0].value.right * ind)'}), '(left=args[0].value.left * ind, right=args[0].value.right * ind)\n', (34928, 34992), False, 'from solver import Range, Array\n'), ((36673, 36696), 'numpy.int32', 'np.int32', (['args[1].value'], {}), '(args[1].value)\n', (36681, 36696), True, 'import numpy as np\n'), ((36973, 37008), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': 'length_unknown'}), '(left=0, right=length_unknown)\n', (36978, 37008), False, 'from solver import Range, Array\n'), ((37200, 37223), 'numpy.int32', 'np.int32', (['args[1].value'], {}), '(args[1].value)\n', (37208, 37223), True, 'import numpy as np\n'), ((38838, 38877), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': '(length_unknown - 1)'}), '(left=0, right=length_unknown - 1)\n', (38843, 38877), False, 'from solver import Range, Array\n'), ((39799, 39848), 'solver.Range', 'Range', ([], {'left': '(-OVERFLOW_LIMIT)', 'right': 'OVERFLOW_LIMIT'}), '(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)\n', (39804, 39848), False, 'from solver import Range, Array\n'), ((43358, 43373), 'numpy.sum', 'np.sum', (['tmp_exp'], {}), '(tmp_exp)\n', (43364, 43373), True, 'import numpy as np\n'), ((46998, 47035), 'copy.deepcopy', 'copy.deepcopy', (['arg.array.index_slices'], {}), '(arg.array.index_slices)\n', (47011, 47035), False, 'import copy\n'), ((50651, 50688), 'copy.deepcopy', 'copy.deepcopy', (['arg.array.index_slices'], {}), '(arg.array.index_slices)\n', (50664, 50688), False, 'import copy\n'), ((52581, 52607), 'solver.Array', 'Array', (['"""tmp"""', 'args[0].size'], {}), "('tmp', args[0].size)\n", (52586, 52607), False, 'from solver import Range, Array\n'), ((2188, 2200), 'math.sqrt', 'math.sqrt', (['x'], {}), '(x)\n', (2197, 2200), False, 'import math\n'), ((2651, 2665), 'math.pow', 'math.pow', (['X', 'Y'], {}), '(X, Y)\n', (2659, 2665), False, 'import math\n'), ((2925, 2939), 'math.lgamma', 'math.lgamma', (['x'], {}), '(x)\n', (2936, 2939), False, 'import math\n'), ((3448, 3457), 'numpy.exp', 'np.exp', (['X'], {}), '(X)\n', (3454, 3457), True, 'import numpy as np\n'), ((9783, 9807), 'numpy.int32', 'np.int32', (['args[-1].value'], {}), '(args[-1].value)\n', (9791, 9807), True, 'import numpy as np\n'), ((11991, 12014), 'numpy.int32', 'np.int32', (['args[1].value'], {}), '(args[1].value)\n', (11999, 12014), True, 'import numpy as np\n'), ((12669, 12699), 'math.floor', 'math.floor', (['args[0].value.left'], {}), '(args[0].value.left)\n', (12679, 12699), False, 'import math\n'), ((12707, 12738), 'math.floor', 'math.floor', (['args[0].value.right'], {}), '(args[0].value.right)\n', (12717, 12738), False, 'import math\n'), ((19886, 19895), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (19892, 19895), True, 'import numpy as np\n'), ((20989, 20998), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (20995, 20998), True, 'import numpy as np\n'), ((25328, 25363), 'solver.Range', 'Range', ([], {'left': '(0)', 'right': 'length_unknown'}), '(left=0, right=length_unknown)\n', (25333, 25363), False, 'from solver import Range, Array\n'), ((28745, 28794), 'solver.Range', 'Range', ([], {'left': '(-OVERFLOW_LIMIT)', 'right': 'OVERFLOW_LIMIT'}), '(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)\n', (28750, 28794), False, 'from solver import Range, Array\n'), ((30265, 30293), 'numpy.round', 'np.round', (['args[0].value.left'], {}), '(args[0].value.left)\n', (30273, 30293), True, 'import numpy as np\n'), ((30301, 30330), 'numpy.round', 'np.round', (['args[0].value.right'], {}), '(args[0].value.right)\n', (30309, 30330), True, 'import numpy as np\n'), ((31615, 31642), 'numpy.sign', 'np.sign', (['args[0].value.left'], {}), '(args[0].value.left)\n', (31622, 31642), True, 'import numpy as np\n'), ((31650, 31678), 'numpy.sign', 'np.sign', (['args[0].value.right'], {}), '(args[0].value.right)\n', (31657, 31678), True, 'import numpy as np\n'), ((35035, 35065), 'solver.Range', 'Range', ([], {'left': '(1)', 'right': '(1000000.0)'}), '(left=1, right=1000000.0)\n', (35040, 35065), False, 'from solver import Range, Array\n'), ((39214, 39231), 'math.floor', 'math.floor', (['(x / y)'], {}), '(x / y)\n', (39224, 39231), False, 'import math\n'), ((40135, 40184), 'solver.Range', 'Range', ([], {'left': '(-OVERFLOW_LIMIT)', 'right': 'OVERFLOW_LIMIT'}), '(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)\n', (40140, 40184), False, 'from solver import Range, Array\n'), ((43843, 43870), 'numpy.tanh', 'np.tanh', (['args[0].value.left'], {}), '(args[0].value.left)\n', (43850, 43870), True, 'import numpy as np\n'), ((43878, 43906), 'numpy.tanh', 'np.tanh', (['args[0].value.right'], {}), '(args[0].value.right)\n', (43885, 43906), True, 'import numpy as np\n'), ((47566, 47614), 'numpy.array', 'np.array', (['args[i].array.index_slices[concat_ind]'], {}), '(args[i].array.index_slices[concat_ind])\n', (47574, 47614), True, 'import numpy as np\n'), ((47764, 47812), 'numpy.array', 'np.array', (['args[i].array.index_slices[concat_ind]'], {}), '(args[i].array.index_slices[concat_ind])\n', (47772, 47812), True, 'import numpy as np\n'), ((836, 857), 'numpy.min', 'np.min', (['args[0].value'], {}), '(args[0].value)\n', (842, 857), True, 'import numpy as np\n'), ((955, 976), 'numpy.max', 'np.max', (['args[0].value'], {}), '(args[0].value)\n', (961, 976), True, 'import numpy as np\n'), ((1430, 1447), 'numpy.max', 'np.max', (['arg.value'], {}), '(arg.value)\n', (1436, 1447), True, 'import numpy as np\n'), ((1487, 1504), 'numpy.min', 'np.min', (['arg.value'], {}), '(arg.value)\n', (1493, 1504), True, 'import numpy as np\n'), ((2478, 2492), 'math.pow', 'math.pow', (['x', 'y'], {}), '(x, y)\n', (2486, 2492), False, 'import math\n'), ((3304, 3313), 'numpy.exp', 'np.exp', (['X'], {}), '(X)\n', (3310, 3313), True, 'import numpy as np\n'), ((4844, 4875), 'analysis.abstract_interpretation.AbstractInterpretation', 'AbstractInterpretation', ([], {'value': 's'}), '(value=s)\n', (4866, 4875), False, 'from analysis.abstract_interpretation import AbstractInterpretation\n'), ((27956, 27968), 'numpy.min', 'np.min', (['ends'], {}), '(ends)\n', (27962, 27968), True, 'import numpy as np\n'), ((27976, 27988), 'numpy.max', 'np.max', (['ends'], {}), '(ends)\n', (27982, 27988), True, 'import numpy as np\n'), ((33861, 33893), 'solver.Range', 'Range', ([], {'left': 'value1', 'right': 'value2'}), '(left=value1, right=value2)\n', (33866, 33893), False, 'from solver import Range, Array\n'), ((41158, 41187), 'math.log', 'math.log', (['args[0].value.right'], {}), '(args[0].value.right)\n', (41166, 41187), False, 'import math\n'), ((41241, 41269), 'math.log', 'math.log', (['args[0].value.left'], {}), '(args[0].value.left)\n', (41249, 41269), False, 'import math\n'), ((41277, 41306), 'math.log', 'math.log', (['args[0].value.right'], {}), '(args[0].value.right)\n', (41285, 41306), False, 'import math\n'), ((41588, 41617), 'numpy.log1p', 'np.log1p', (['args[0].value.right'], {}), '(args[0].value.right)\n', (41596, 41617), True, 'import numpy as np\n'), ((41671, 41699), 'numpy.log1p', 'np.log1p', (['args[0].value.left'], {}), '(args[0].value.left)\n', (41679, 41699), True, 'import numpy as np\n'), ((41707, 41736), 'numpy.log1p', 'np.log1p', (['args[0].value.right'], {}), '(args[0].value.right)\n', (41715, 41736), True, 'import numpy as np\n'), ((25409, 25460), 'analysis.abstract_interpretation.AbstractInterpretation', 'AbstractInterpretation', ([], {'value': 'ind', 'dtype': '(3)', 'size': '[]'}), '(value=ind, dtype=3, size=[])\n', (25431, 25460), False, 'from analysis.abstract_interpretation import AbstractInterpretation\n'), ((26662, 26681), 'numpy.array', 'np.array', (['arg.value'], {}), '(arg.value)\n', (26670, 26681), True, 'import numpy as np\n'), ((28303, 28315), 'numpy.min', 'np.min', (['ends'], {}), '(ends)\n', (28309, 28315), True, 'import numpy as np\n'), ((28323, 28335), 'numpy.max', 'np.max', (['ends'], {}), '(ends)\n', (28329, 28335), True, 'import numpy as np\n'), ((28670, 28682), 'numpy.min', 'np.min', (['ends'], {}), '(ends)\n', (28676, 28682), True, 'import numpy as np\n'), ((28690, 28702), 'numpy.max', 'np.max', (['ends'], {}), '(ends)\n', (28696, 28702), True, 'import numpy as np\n'), ((35105, 35156), 'analysis.abstract_interpretation.AbstractInterpretation', 'AbstractInterpretation', ([], {'value': 'ind', 'dtype': '(3)', 'size': '[]'}), '(value=ind, dtype=3, size=[])\n', (35127, 35156), False, 'from analysis.abstract_interpretation import AbstractInterpretation\n')] |
"""Create an icosphere from convex regular polyhedron.
Adapted from:
https://gist.github.com/AbhilashReddyM/aed58c60438bf4c313831718013ce48f
Thank you <NAME> (abhilashreddy.com)!
Original authorship:
Author: <NAME>
(<EMAIL> where cu=columbia.edu) (github.com/wgm2111)
copyright (c) 2010
liscence: BSD style
Modified by <NAME> (abhilashreddy.com):
* made to work with Python 3+
* made to work with recent versions of matplotlib
"""
from matplotlib import tri
import numpy
class Polyhedron(object):
"""Contain information about a polyhedron."""
def __init__(self):
"""Define a normalized convex regular polyhedron."""
self.triangle_centers = self._get_triangle_centers()
self.edges = self._get_edges()
self.edge_centers = self._get_edge_centers()
def __repr__(self):
"""Return a representation of the object."""
return (f'Polyhedron(faces={len(self.triangles)}, '
f'vertices={len(self.vertices)})')
def _get_vertices(self):
raise NotImplementedError()
def _get_triangles(self):
raise NotImplementedError()
def _get_triangle_centers(self):
centers = numpy.empty_like(self.triangles, dtype=float)
for i, triangle in enumerate(self.triangles):
points = [self.vertices[idx] for idx in triangle]
centers[i] = numpy.mean(points)
return centers
def _get_edges(self):
edges = []
for triangle in self.triangles:
i1, i2, i3 = triangle
edges.append([[i1, i2], [i1, i3], [i2, i3]])
return numpy.array(edges)
def _get_edge_centers(self):
centers = numpy.empty_like(self.edges, dtype=float)
for i, edge in enumerate(self.edges):
points = [self.vertices[idx] for idx in edge]
centers[i] = numpy.mean(points)
return centers
class Icosahedron(Polyhedron):
"""Contain information about an icosahedron (polyhedron with 20 faces)."""
def __init__(self):
"""Define a normalized convex regular icosahedron."""
self.vertices = self._get_vertices()
self.triangles = self._get_triangles()
super().__init__()
def _get_vertices(self):
# Define vertices based on the golden ratio.
a = 0.5 * (1 + numpy.sqrt(5)) # golden ratio
vertices = numpy.array([[a, 0, 1],
[-a, 0, 1],
[-a, 0, -1],
[a, 0, -1],
[1, a, 0],
[1, -a, 0],
[-1, -a, 0],
[-1, a, 0],
[0, 1, a],
[0, 1, -a],
[0, -1, -a],
[0, -1, a]])
# Normalize vertices
# (note that all vertices are equidistant from origin).
vertices /= numpy.linalg.norm(vertices[0])
# Rotate vertices so that top point is located on z-axis.
alpha = numpy.arctan2(vertices[0, 0], vertices[0, 2])
ca, sa = numpy.cos(alpha), numpy.sin(alpha)
R = numpy.array([[ca, 0, -sa],
[0, 1, 0],
[sa, 0, ca]])
vertices = numpy.inner(R, vertices).transpose()
# Reorder vertices in a downward-spiral fashion.
new_order = [0, 3, 4, 8, 11, 5, 10, 9, 7, 1, 6, 2]
return vertices[new_order]
def _get_triangles(self):
indices = numpy.array([[1, 0, 2],
[2, 0, 3],
[3, 0, 4],
[4, 0, 5],
[5, 0, 1],
[6, 1, 7],
[2, 7, 1],
[7, 2, 8],
[2, 3, 8],
[8, 3, 9],
[3, 4, 9],
[9, 4, 10],
[10, 4, 5],
[10, 5, 6],
[6, 5, 1],
[6, 7, 11],
[7, 8, 11],
[8, 9, 11],
[9, 10, 11],
[10, 6, 11]])
return indices
polyhedrons = {'icosahedron': Icosahedron}
class Icosphere(object):
"""Contain information about an icosphere."""
def __init__(self, n, polyhedron):
"""Create an icosphere."""
self.n = n
self.base = polyhedron
self.vertices, self.triangles, self.edges = self._triangulate()
def __repr__(self):
"""Return a representation of the object."""
return (f'Icosphere(faces={len(self.triangles)}, '
f'vertices={len(self.vertices)})')
def print_info(self):
"""Print information about the icosphere."""
print(self)
min_lengths = numpy.empty(len(self.triangles))
max_lengths = numpy.empty(len(self.triangles))
mean_lengths = numpy.empty(len(self.triangles))
for i, triangle in enumerate(self.triangles):
a, b, c = [self.vertices[idx] for idx in triangle]
lengths = [numpy.linalg.norm(a - b),
numpy.linalg.norm(a - c),
numpy.linalg.norm(b - c)]
min_lengths[i] = numpy.min(lengths)
max_lengths[i] = numpy.max(lengths)
mean_lengths[i] = numpy.mean(lengths)
print('Min edge length:', numpy.min(min_lengths))
print('Max edge length:', numpy.max(max_lengths))
print('Mean edge length:', numpy.mean(mean_lengths))
def _triangulate(self):
"""Compute the triangulation of a unit sphere.
The function refines each face of an icosahedron to a n-th order
barycentric triangle.
This function addresses two key issues:
1. calculate the triangles (unique by construction)
2. remove non-unique nodes and edges
"""
vertices = numpy.array([self.base.vertices[self.base.triangles[:, 0]],
self.base.vertices[self.base.triangles[:, 1]],
self.base.vertices[self.base.triangles[:, 2]]])
M = self._get_barycenter_matrix(self.n + 1)
vertices = numpy.tensordot(vertices, M,
axes=([0, ], [-1, ])).transpose(0, 2, 1)
num_vertices = vertices.shape[1]
assert vertices.size / 3 < 1e6, 'Number of vertices too high!'
num = 20
flat_coords = numpy.arange(vertices.size / 3).reshape(num,
num_vertices)
edges_, triangles_ = self._triangulate_barycentric(M)
triangles = numpy.zeros((num, triangles_.shape[0], 3), dtype=int)
edges = numpy.zeros((num, edges_.shape[0], 2), dtype=int)
for i in range(num):
for j in range(3):
triangles[i, :, j] = flat_coords[i, triangles_[:, j]]
if j < 2:
edges[i, :, j] = flat_coords[i, edges_[:, j]]
vertices = vertices.reshape(vertices.size // 3, 3)
triangles = triangles.reshape(triangles.size // 3, 3)
edges = edges.reshape(edges.size // 2, 2)
# Normalize vertices.
scalars = numpy.linalg.norm(vertices, axis=-1)
vertices = (vertices.T / scalars).T
# Remove repeated vertices.
_, iu, irep = numpy.unique(numpy.dot(vertices // 1e-8,
100 * numpy.arange(1, 4, 1)),
return_index=True, return_inverse=True)
vertices = vertices[iu]
triangles = irep[triangles]
edges = irep[edges]
mid = (vertices[edges[:, 0]] + vertices[edges[:, 1]]) / 2
_, iu = numpy.unique(numpy.dot(mid // 1e-8,
100 * numpy.arange(1, 4, 1)),
return_index=True)
edges = edges[iu, :]
return vertices, triangles, edges
def _get_barycenter_matrix(self, n):
"""Create the matrix that will refine points on a triangle."""
nrows = n * (n + 1) // 2
M = numpy.zeros((nrows, 3))
vals = numpy.arange(n) / (n - 1)
start = 0
for i, offset in enumerate(range(n, 0, -1)):
stop = start + offset
M[start:stop, 0] = vals[offset - 1::-1]
M[start:stop, 1] = vals[:offset]
M[start:stop, 2] = vals[i]
start = stop
return M
def _triangulate_barycentric(self, points):
"""Triangulate a barycentric triangle using Matplotlib tri module."""
x = (numpy.cos(-numpy.pi / 4) * points[:, 0] +
numpy.sin(-numpy.pi / 4) * points[:, 1])
y = points[:, 2]
triangulation = tri.Triangulation(x, y)
return triangulation.edges, triangulation.triangles
def create_polyhedron(ptype):
"""Create a polyhedron."""
ptype = ptype.lower()
if ptype not in polyhedrons.keys():
raise ValueError(f'ptype should be one of {list(polyhedrons.keys())}')
return polyhedrons[ptype]()
def create_icosphere(n, polyhedron='icosahedron'):
"""Create an icosphere by recursively refining faces."""
polyhedron = create_polyhedron(polyhedron)
return Icosphere(n, polyhedron)
| [
"numpy.mean",
"numpy.sqrt",
"matplotlib.tri.Triangulation",
"numpy.tensordot",
"numpy.min",
"numpy.max",
"numpy.inner",
"numpy.array",
"numpy.zeros",
"numpy.empty_like",
"numpy.arctan2",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.arange"
] | [((1175, 1220), 'numpy.empty_like', 'numpy.empty_like', (['self.triangles'], {'dtype': 'float'}), '(self.triangles, dtype=float)\n', (1191, 1220), False, 'import numpy\n'), ((1596, 1614), 'numpy.array', 'numpy.array', (['edges'], {}), '(edges)\n', (1607, 1614), False, 'import numpy\n'), ((1667, 1708), 'numpy.empty_like', 'numpy.empty_like', (['self.edges'], {'dtype': 'float'}), '(self.edges, dtype=float)\n', (1683, 1708), False, 'import numpy\n'), ((2354, 2519), 'numpy.array', 'numpy.array', (['[[a, 0, 1], [-a, 0, 1], [-a, 0, -1], [a, 0, -1], [1, a, 0], [1, -a, 0], [-1,\n -a, 0], [-1, a, 0], [0, 1, a], [0, 1, -a], [0, -1, -a], [0, -1, a]]'], {}), '([[a, 0, 1], [-a, 0, 1], [-a, 0, -1], [a, 0, -1], [1, a, 0], [1,\n -a, 0], [-1, -a, 0], [-1, a, 0], [0, 1, a], [0, 1, -a], [0, -1, -a], [0,\n -1, a]])\n', (2365, 2519), False, 'import numpy\n'), ((2977, 3007), 'numpy.linalg.norm', 'numpy.linalg.norm', (['vertices[0]'], {}), '(vertices[0])\n', (2994, 3007), False, 'import numpy\n'), ((3090, 3135), 'numpy.arctan2', 'numpy.arctan2', (['vertices[0, 0]', 'vertices[0, 2]'], {}), '(vertices[0, 0], vertices[0, 2])\n', (3103, 3135), False, 'import numpy\n'), ((3200, 3251), 'numpy.array', 'numpy.array', (['[[ca, 0, -sa], [0, 1, 0], [sa, 0, ca]]'], {}), '([[ca, 0, -sa], [0, 1, 0], [sa, 0, ca]])\n', (3211, 3251), False, 'import numpy\n'), ((3558, 3814), 'numpy.array', 'numpy.array', (['[[1, 0, 2], [2, 0, 3], [3, 0, 4], [4, 0, 5], [5, 0, 1], [6, 1, 7], [2, 7, 1\n ], [7, 2, 8], [2, 3, 8], [8, 3, 9], [3, 4, 9], [9, 4, 10], [10, 4, 5],\n [10, 5, 6], [6, 5, 1], [6, 7, 11], [7, 8, 11], [8, 9, 11], [9, 10, 11],\n [10, 6, 11]]'], {}), '([[1, 0, 2], [2, 0, 3], [3, 0, 4], [4, 0, 5], [5, 0, 1], [6, 1, \n 7], [2, 7, 1], [7, 2, 8], [2, 3, 8], [8, 3, 9], [3, 4, 9], [9, 4, 10],\n [10, 4, 5], [10, 5, 6], [6, 5, 1], [6, 7, 11], [7, 8, 11], [8, 9, 11],\n [9, 10, 11], [10, 6, 11]])\n', (3569, 3814), False, 'import numpy\n'), ((6148, 6312), 'numpy.array', 'numpy.array', (['[self.base.vertices[self.base.triangles[:, 0]], self.base.vertices[self.\n base.triangles[:, 1]], self.base.vertices[self.base.triangles[:, 2]]]'], {}), '([self.base.vertices[self.base.triangles[:, 0]], self.base.\n vertices[self.base.triangles[:, 1]], self.base.vertices[self.base.\n triangles[:, 2]]])\n', (6159, 6312), False, 'import numpy\n'), ((6899, 6952), 'numpy.zeros', 'numpy.zeros', (['(num, triangles_.shape[0], 3)'], {'dtype': 'int'}), '((num, triangles_.shape[0], 3), dtype=int)\n', (6910, 6952), False, 'import numpy\n'), ((6969, 7018), 'numpy.zeros', 'numpy.zeros', (['(num, edges_.shape[0], 2)'], {'dtype': 'int'}), '((num, edges_.shape[0], 2), dtype=int)\n', (6980, 7018), False, 'import numpy\n'), ((7462, 7498), 'numpy.linalg.norm', 'numpy.linalg.norm', (['vertices'], {'axis': '(-1)'}), '(vertices, axis=-1)\n', (7479, 7498), False, 'import numpy\n'), ((8353, 8376), 'numpy.zeros', 'numpy.zeros', (['(nrows, 3)'], {}), '((nrows, 3))\n', (8364, 8376), False, 'import numpy\n'), ((8986, 9009), 'matplotlib.tri.Triangulation', 'tri.Triangulation', (['x', 'y'], {}), '(x, y)\n', (9003, 9009), False, 'from matplotlib import tri\n'), ((1362, 1380), 'numpy.mean', 'numpy.mean', (['points'], {}), '(points)\n', (1372, 1380), False, 'import numpy\n'), ((1838, 1856), 'numpy.mean', 'numpy.mean', (['points'], {}), '(points)\n', (1848, 1856), False, 'import numpy\n'), ((3153, 3169), 'numpy.cos', 'numpy.cos', (['alpha'], {}), '(alpha)\n', (3162, 3169), False, 'import numpy\n'), ((3171, 3187), 'numpy.sin', 'numpy.sin', (['alpha'], {}), '(alpha)\n', (3180, 3187), False, 'import numpy\n'), ((5480, 5498), 'numpy.min', 'numpy.min', (['lengths'], {}), '(lengths)\n', (5489, 5498), False, 'import numpy\n'), ((5528, 5546), 'numpy.max', 'numpy.max', (['lengths'], {}), '(lengths)\n', (5537, 5546), False, 'import numpy\n'), ((5577, 5596), 'numpy.mean', 'numpy.mean', (['lengths'], {}), '(lengths)\n', (5587, 5596), False, 'import numpy\n'), ((5631, 5653), 'numpy.min', 'numpy.min', (['min_lengths'], {}), '(min_lengths)\n', (5640, 5653), False, 'import numpy\n'), ((5689, 5711), 'numpy.max', 'numpy.max', (['max_lengths'], {}), '(max_lengths)\n', (5698, 5711), False, 'import numpy\n'), ((5748, 5772), 'numpy.mean', 'numpy.mean', (['mean_lengths'], {}), '(mean_lengths)\n', (5758, 5772), False, 'import numpy\n'), ((8392, 8407), 'numpy.arange', 'numpy.arange', (['n'], {}), '(n)\n', (8404, 8407), False, 'import numpy\n'), ((2304, 2317), 'numpy.sqrt', 'numpy.sqrt', (['(5)'], {}), '(5)\n', (2314, 2317), False, 'import numpy\n'), ((3321, 3345), 'numpy.inner', 'numpy.inner', (['R', 'vertices'], {}), '(R, vertices)\n', (3332, 3345), False, 'import numpy\n'), ((5327, 5351), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (5344, 5351), False, 'import numpy\n'), ((5376, 5400), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(a - c)'], {}), '(a - c)\n', (5393, 5400), False, 'import numpy\n'), ((5425, 5449), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(b - c)'], {}), '(b - c)\n', (5442, 5449), False, 'import numpy\n'), ((6438, 6484), 'numpy.tensordot', 'numpy.tensordot', (['vertices', 'M'], {'axes': '([0], [-1])'}), '(vertices, M, axes=([0], [-1]))\n', (6453, 6484), False, 'import numpy\n'), ((6694, 6725), 'numpy.arange', 'numpy.arange', (['(vertices.size / 3)'], {}), '(vertices.size / 3)\n', (6706, 6725), False, 'import numpy\n'), ((8841, 8865), 'numpy.cos', 'numpy.cos', (['(-numpy.pi / 4)'], {}), '(-numpy.pi / 4)\n', (8850, 8865), False, 'import numpy\n'), ((8896, 8920), 'numpy.sin', 'numpy.sin', (['(-numpy.pi / 4)'], {}), '(-numpy.pi / 4)\n', (8905, 8920), False, 'import numpy\n'), ((7693, 7714), 'numpy.arange', 'numpy.arange', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (7705, 7714), False, 'import numpy\n'), ((8052, 8073), 'numpy.arange', 'numpy.arange', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (8064, 8073), False, 'import numpy\n')] |
import json
import time
import copy
import checkpoint as loader
import argparse
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from torch.autograd import Variable
from torch import nn,optim
import torch
import torchvision
import torch.nn.functional as F
from torch import nn
from PIL import Image
from torchvision import datasets,transforms,models
from collections import OrderedDict
import torch.optim as optim
from torch.optim import lr_scheduler
def imshow(image, ax=None, title=None):
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def process_image(image_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
#image_pil=Image.open(image_path)
loader = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()])
image_pl = Image.open(image_path)
imagepl_ft = loader(image_pl).float()
np_image=np.array(imagepl_ft)
#np_image=np_image/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np.transpose(np_image, (1, 2, 0)) - mean)/std
np_image = np.transpose(np_image, (2, 0, 1))
return np_image
def predict(image_path, model_name, topk=10, categories='', device='cuda'):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
if(not torch.cuda.is_available() and device=='cuda'):
device='cpu'
# TODO: Implement the code to predict the class from an image file
with open('cat_to_name.json', 'r') as f:
label_mapper = json.load(f)
gpu=(device=='cuda')
model=loader.load_checkpoint(model_name,gpu=gpu)
model.to('cpu')
img=process_image(image_path)
img=torch.from_numpy(img).type(torch.FloatTensor)
inpt=img.unsqueeze(0)
model_result=model.forward(inpt)
expResult=torch.exp(model_result)
firstTopX,SecondTopX=expResult.topk(topk)
probs = torch.nn.functional.softmax(firstTopX.data, dim=1).numpy()[0]
#classes = SecondTopX.data.numpy()[0]
#probs = firstTopX.detach().numpy().tolist()[0]
classes = SecondTopX.detach().numpy().tolist()[0]
# Convert indices to classes
idx_to_class = {val: key for key, val in
model.class_to_idx.items()}
#labels = [label_mapper[str(lab)] for lab in SecondTopX]
labels = [idx_to_class[y] for y in classes]
flowers=[categories[idx_to_class[i]] for i in classes]
return probs,flowers
def show_prediction(image_path,probabilities,labels, categories):
plt.figure(figsize=(6,10))
ax=plt.subplot(2,1,1)
flower_index=image_path.split('/')[2]
name=categories[flower_index]
img=process_image(image_path)
imshow(img,ax)
plt.subplot(2,1,2)
sns.barplot(x=probabilities,y=labels,color=sns.color_palette()[0])
plt.show()
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str)
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--epochs', type=int)
parser.add_argument('--checkpoint', type=str)
parser.add_argument('--category_names', type=str)
parser.add_argument('--top_k', type=int)
args, _ = parser.parse_known_args()
if (args.input):
input_name=args.input
else:
input_name='flowers/test/28/image_05230.jpg'
if(args.checkpoint):
checkpoint=args.checkpoint
else:
checkpoint='ic-model.pth'
if(args.category_names):
category_names=args.category_names
else:
category_names='cat_to_name.json'
if(args.gpu):
device='cuda'
else:
device='cpu'
# show_prediction(image_path=input_name,model=checkpoint,category_names=category_names)
with open(category_names, 'r') as f:
categories = json.load(f)
# run the prediction
probabilities,labels=predict(input_name,checkpoint,topk=5,categories=categories,device=device)
# show prediction
print('predict results')
print(probabilities)
print(labels)
show_prediction(image_path=input_name,probabilities=probabilities,labels= labels, categories= categories)
| [
"numpy.clip",
"torch.exp",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"argparse.ArgumentParser",
"seaborn.color_palette",
"checkpoint",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Resize",
"numpy.transpose",
"matplotlib.pyplot.... | [((784, 815), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (792, 815), True, 'import numpy as np\n'), ((826, 857), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (834, 857), True, 'import numpy as np\n'), ((992, 1012), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (999, 1012), True, 'import numpy as np\n'), ((1395, 1417), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1405, 1417), False, 'from PIL import Image\n'), ((1478, 1498), 'numpy.array', 'np.array', (['imagepl_ft'], {}), '(imagepl_ft)\n', (1486, 1498), True, 'import numpy as np\n'), ((1547, 1578), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1555, 1578), True, 'import numpy as np\n'), ((1590, 1621), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1598, 1621), True, 'import numpy as np\n'), ((1707, 1740), 'numpy.transpose', 'np.transpose', (['np_image', '(2, 0, 1)'], {}), '(np_image, (2, 0, 1))\n', (1719, 1740), True, 'import numpy as np\n'), ((2217, 2260), 'checkpoint.load_checkpoint', 'loader.load_checkpoint', (['model_name'], {'gpu': 'gpu'}), '(model_name, gpu=gpu)\n', (2239, 2260), True, 'import checkpoint as loader\n'), ((2467, 2490), 'torch.exp', 'torch.exp', (['model_result'], {}), '(model_result)\n', (2476, 2490), False, 'import torch\n'), ((3195, 3222), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 10)'}), '(figsize=(6, 10))\n', (3205, 3222), True, 'import matplotlib.pyplot as plt\n'), ((3229, 3249), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (3240, 3249), True, 'import matplotlib.pyplot as plt\n'), ((3391, 3411), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (3402, 3411), True, 'import matplotlib.pyplot as plt\n'), ((3485, 3495), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3493, 3495), True, 'import matplotlib.pyplot as plt\n'), ((3538, 3563), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3561, 3563), False, 'import argparse\n'), ((562, 576), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (574, 576), True, 'import matplotlib.pyplot as plt\n'), ((2165, 2177), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2174, 2177), False, 'import json\n'), ((4481, 4493), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4490, 4493), False, 'import json\n'), ((1281, 1303), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1298, 1303), False, 'from torchvision import datasets, transforms, models\n'), ((1314, 1340), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1335, 1340), False, 'from torchvision import datasets, transforms, models\n'), ((1351, 1372), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1370, 1372), False, 'from torchvision import datasets, transforms, models\n'), ((1435, 1451), 'checkpoint', 'loader', (['image_pl'], {}), '(image_pl)\n', (1441, 1451), True, 'import checkpoint as loader\n'), ((1642, 1675), 'numpy.transpose', 'np.transpose', (['np_image', '(1, 2, 0)'], {}), '(np_image, (1, 2, 0))\n', (1654, 1675), True, 'import numpy as np\n'), ((1952, 1977), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1975, 1977), False, 'import torch\n'), ((2329, 2350), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (2345, 2350), False, 'import torch\n'), ((2559, 2609), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['firstTopX.data'], {'dim': '(1)'}), '(firstTopX.data, dim=1)\n', (2586, 2609), False, 'import torch\n'), ((3457, 3476), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (3474, 3476), True, 'import seaborn as sns\n')] |
import scipy.io
import numpy as np
import os
import random
import json
import pdb
def split_voxel_then_image(cls):
# , 'depth_render_{}'.format(cls[7:])
root = os.path.abspath('.')
in_dir = os.path.join(root, '../input/3dprnn/depth_map')
pre_match_id_file = os.path.join(in_dir, '../random_sample_id_mulfive.mat')
pre_match_id = scipy.io.loadmat(pre_match_id_file) # 5555, 1850, 0-888, 0-391, 0-199
pre_match_id_crop = {}
start_id = start_end_id[cls]['train'][0]
end_id = start_end_id[cls]['train'][1] + 1
pre_match_id_crop['train'] = list(pre_match_id['ind_train_mulfive'][0])[start_id:end_id]
start_id = start_end_id[cls]['val'][0]
end_id = start_end_id[cls]['val'][1] + 1
pre_match_id_crop['val'] = list(pre_match_id['ind_val_mulfive'][0])[start_id:end_id]
split_by_model = True # True-split by 216 models, False-split by 34 images
out_dir = os.path.join(root, '../output', cls)
voxel_txt_dir = os.path.join(out_dir, 'voxeltxt')
if not os.path.exists(voxel_txt_dir):
os.makedirs(voxel_txt_dir)
f = open(os.path.join(out_dir, 'voxels_dir_{}.txt'.format(cls)), 'w')
voxel_train_txtpath = os.path.join(voxel_txt_dir, 'voxel_train.txt')
voxel_val_txtpath = os.path.join(voxel_txt_dir, 'voxel_val.txt')
voxel_test_txtpath = os.path.join(voxel_txt_dir, 'voxel_test.txt')
voxel_ftrain = open(voxel_train_txtpath, 'w')
voxel_fval = open(voxel_val_txtpath, 'w')
voxel_ftest = open(voxel_test_txtpath, 'w')
voxel_ctrain = 0
voxel_cval = 0
voxel_ctest = 0
img_idxs = []
voxel_idxs = []
train_txtpath = os.path.join(voxel_txt_dir, 'train.txt')
val_txtpath = os.path.join(voxel_txt_dir, 'val.txt')
test_txtpath = os.path.join(voxel_txt_dir, 'test.txt')
ftrain = open(train_txtpath, 'w')
fval = open(val_txtpath, 'w')
ftest = open(test_txtpath, 'w')
ctrain = 0
cval = 0
ctest = 0
im_sum = (obj_num[cls]['train'] + obj_num[cls]['test']) * 5
for i in range(im_sum):
model_i = i // 5 # start from 0
img_file = ('0000' + str(i + 1) + '.mat')[-8:] # start from 1
img_idxs.append(i + 1)
voxel_idxs.append(model_i + 1)
if i % 5 == 0:
f.write(str(model_i) + '\n')
if model_i in pre_match_id_crop['train']:
if i % 5 == 0:
voxel_ftrain.write(str(model_i) + '\n')
voxel_ctrain += 1
ftrain.write(img_file + '\n')
ctrain += 1
elif model_i in pre_match_id_crop['val']:
if i % 5 == 0:
voxel_fval.write(str(model_i) + '\n')
voxel_cval += 1
fval.write(img_file + '\n')
cval += 1
else:
if i % 5 == 0:
voxel_ftest.write(str(model_i) + '\n')
voxel_ctest += 1
ftest.write(img_file + '\n')
ctest += 1
voxel_ftrain.close()
voxel_fval.close()
voxel_ftest.close()
ftrain.close()
fval.close()
ftest.close()
f.close()
scipy.io.savemat(os.path.join(out_dir, 'img_voxel_idxs.mat'),
{'img_idxs': np.array(img_idxs), 'voxel_idxs': np.array(voxel_idxs)})
print(voxel_ctrain + voxel_cval + voxel_ctest, voxel_ctrain, voxel_cval, voxel_ctest)
print(ctrain + cval + ctest, ctrain, cval, ctest)
print(len(img_idxs))
if __name__ == '__main__':
intervals = {'train': [3335, 4805, 5555], 'val': [1110, 1600, 1850]}
start_end_id = {'3dprnnchair': {'train': [0, 3334], 'val': [0, 1109]},
'3dprnntable': {'train': [3335, 4804], 'val': [1110, 1599]},
'3dprnnnight_stand': {'train': [4805, 5554], 'val': [1600, 1849]}}
cls_all = ['3dprnnchair', '3dprnntable', '3dprnnnight_stand']
cls = '3dprnnnight_stand'
obj_num = {'3dprnnchair': {}, '3dprnntable': {}, '3dprnnnight_stand': {}}
obj_num['3dprnnnight_stand'] = {'train': 200, 'test': 86}
obj_num['3dprnnchair'] = {'train': 889, 'test': 100}
obj_num['3dprnntable'] = {'train': 392, 'test': 100}
# for cls in cls_all:
split_voxel_then_image(cls)
| [
"os.path.exists",
"os.makedirs",
"os.path.join",
"numpy.array",
"os.path.abspath"
] | [((170, 190), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (185, 190), False, 'import os\n'), ((204, 251), 'os.path.join', 'os.path.join', (['root', '"""../input/3dprnn/depth_map"""'], {}), "(root, '../input/3dprnn/depth_map')\n", (216, 251), False, 'import os\n'), ((276, 331), 'os.path.join', 'os.path.join', (['in_dir', '"""../random_sample_id_mulfive.mat"""'], {}), "(in_dir, '../random_sample_id_mulfive.mat')\n", (288, 331), False, 'import os\n'), ((905, 941), 'os.path.join', 'os.path.join', (['root', '"""../output"""', 'cls'], {}), "(root, '../output', cls)\n", (917, 941), False, 'import os\n'), ((962, 995), 'os.path.join', 'os.path.join', (['out_dir', '"""voxeltxt"""'], {}), "(out_dir, 'voxeltxt')\n", (974, 995), False, 'import os\n'), ((1173, 1219), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""voxel_train.txt"""'], {}), "(voxel_txt_dir, 'voxel_train.txt')\n", (1185, 1219), False, 'import os\n'), ((1244, 1288), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""voxel_val.txt"""'], {}), "(voxel_txt_dir, 'voxel_val.txt')\n", (1256, 1288), False, 'import os\n'), ((1314, 1359), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""voxel_test.txt"""'], {}), "(voxel_txt_dir, 'voxel_test.txt')\n", (1326, 1359), False, 'import os\n'), ((1622, 1662), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""train.txt"""'], {}), "(voxel_txt_dir, 'train.txt')\n", (1634, 1662), False, 'import os\n'), ((1681, 1719), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""val.txt"""'], {}), "(voxel_txt_dir, 'val.txt')\n", (1693, 1719), False, 'import os\n'), ((1739, 1778), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""test.txt"""'], {}), "(voxel_txt_dir, 'test.txt')\n", (1751, 1778), False, 'import os\n'), ((1007, 1036), 'os.path.exists', 'os.path.exists', (['voxel_txt_dir'], {}), '(voxel_txt_dir)\n', (1021, 1036), False, 'import os\n'), ((1046, 1072), 'os.makedirs', 'os.makedirs', (['voxel_txt_dir'], {}), '(voxel_txt_dir)\n', (1057, 1072), False, 'import os\n'), ((3081, 3124), 'os.path.join', 'os.path.join', (['out_dir', '"""img_voxel_idxs.mat"""'], {}), "(out_dir, 'img_voxel_idxs.mat')\n", (3093, 3124), False, 'import os\n'), ((3160, 3178), 'numpy.array', 'np.array', (['img_idxs'], {}), '(img_idxs)\n', (3168, 3178), True, 'import numpy as np\n'), ((3194, 3214), 'numpy.array', 'np.array', (['voxel_idxs'], {}), '(voxel_idxs)\n', (3202, 3214), True, 'import numpy as np\n')] |
""" fivethirtyeight baseball puzzle
This code computes exact runs-scored probabilities, using the
negative binomial distribution and convolutions of the runs-scored
distributions
"""
import argparse
import numpy as np
import pandas as pd
from collections import defaultdict
from scipy.stats import distributions
from functools import partial
import copy
import sys
class ProbDist:
def __init__(self, success_prob, offset, max_value=20):
self.success_prob = success_prob
self.failure_prob = 1 - success_prob
self.offset = offset
self.max_value = max_value
self._prob = None
self.non_out_pa_prob = partial(
distributions.nbinom.pmf, n=3, p=self.failure_prob
)
@staticmethod
def prob_stats(prob_dict):
_mean = sum([k * v for k, v in prob_dict.items()])
_mean2 = sum([k * k * v for k, v in prob_dict.items()])
_norm = sum(prob_dict.values())
return {
"mean": _mean,
"std": np.sqrt(_mean2 - _mean * _mean),
"prob_norm": _norm,
}
def _make_prob(self):
_prob = defaultdict(float)
for non_out_pa in range(self.max_value):
run_value = max(0, non_out_pa - self.offset)
_prob[run_value] += self.non_out_pa_prob(non_out_pa)
return _prob
@property
def prob(self):
if not self._prob:
self._prob = self._make_prob()
return self._prob
def multi_prob(self, n, wrk=None, max_value=100):
if n == 0:
return wrk
elif n == 1 and wrk:
return wrk
elif n == 1 and not wrk:
return self.prob
if wrk is None:
_prob = copy.deepcopy(self.prob)
else:
_prob = copy.deepcopy(wrk)
return self.multi_prob(
n - 1,
self.convolve_probs(self.prob, _prob, max_value=max_value),
max_value=max_value,
)
@staticmethod
def convolve_probs(prob_dict1, prob_dict2, max_value=100):
_prob = defaultdict(float)
for runs1, prob1 in prob_dict1.items():
for runs2, prob2 in prob_dict2.items():
if runs1 + runs2 <= max_value:
_prob[runs1 + runs2] += prob1 * prob2
return _prob
@staticmethod
def generate_rvs(prob_dict, rng=None):
if rng is None:
rng = np.random
return rng.choice(list(prob_dict.keys()), p=list(prob_dict.values()))
@staticmethod
def compute_win_prob(prob_dict1, prob_dict2):
_prob = defaultdict(float)
for runs1, prob1 in prob_dict1.items():
for runs2, prob2 in prob_dict2.items():
if runs1 > runs2:
res = 1
elif runs1 < runs2:
res = -1
else:
res = 0
_prob[res] += prob1 * prob2
return _prob
def overall_win_pct(team1, team2, max_value=30, baseline_innings=9):
epsilon = 1e-6
d1 = team1.multi_prob(baseline_innings, max_value=max_value)
d2 = team2.multi_prob(baseline_innings, max_value=max_value)
win_pct = ProbDist.compute_win_prob(d1, d2)
tie_prob = win_pct[0]
extra_inning_count = 0
result = [
{
"innings": baseline_innings + extra_inning_count,
"tie_prob": tie_prob,
"win_prob": win_pct[1],
"loss_prob": win_pct[-1],
}
]
while tie_prob > epsilon:
extra_inning_count += 1
d1 = team1.multi_prob(1, max_value=30)
d2 = team2.multi_prob(1, max_value=30)
w = ProbDist.compute_win_prob(d1, d2)
win_pct[1] += w[1] * tie_prob
win_pct[-1] += w[-1] * tie_prob
tie_prob *= w[0]
result.append(
{
"innings": baseline_innings + extra_inning_count,
"tie_prob": tie_prob,
"win_prob": win_pct[1],
"loss_prob": win_pct[-1],
}
)
return pd.DataFrame(result).assign(
relative_prob=lambda x: x.win_prob / (x.win_prob + x.loss_prob)
)
def _parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--success-prob", required=True, nargs=2, type=float)
parser.add_argument("--offset", required=True, nargs=2, type=int)
parser.add_argument("--baseline-innings", default=9, type=int)
parser.add_argument("--max-value", default=30, type=int)
args = parser.parse_args(args)
return args
def main():
args = _parse_args(sys.argv[1:])
team1 = ProbDist(args.success_prob[0], args.offset[0])
team2 = ProbDist(args.success_prob[1], args.offset[1])
return overall_win_pct(
team1, team2, max_value=args.max_value, baseline_innings=args.baseline_innings
)
if __name__ == "__main__":
res = main()
print(res)
| [
"numpy.sqrt",
"argparse.ArgumentParser",
"collections.defaultdict",
"functools.partial",
"copy.deepcopy",
"pandas.DataFrame"
] | [((4194, 4219), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4217, 4219), False, 'import argparse\n'), ((652, 711), 'functools.partial', 'partial', (['distributions.nbinom.pmf'], {'n': '(3)', 'p': 'self.failure_prob'}), '(distributions.nbinom.pmf, n=3, p=self.failure_prob)\n', (659, 711), False, 'from functools import partial\n'), ((1128, 1146), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (1139, 1146), False, 'from collections import defaultdict\n'), ((2069, 2087), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (2080, 2087), False, 'from collections import defaultdict\n'), ((2591, 2609), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (2602, 2609), False, 'from collections import defaultdict\n'), ((1010, 1041), 'numpy.sqrt', 'np.sqrt', (['(_mean2 - _mean * _mean)'], {}), '(_mean2 - _mean * _mean)\n', (1017, 1041), True, 'import numpy as np\n'), ((1726, 1750), 'copy.deepcopy', 'copy.deepcopy', (['self.prob'], {}), '(self.prob)\n', (1739, 1750), False, 'import copy\n'), ((1785, 1803), 'copy.deepcopy', 'copy.deepcopy', (['wrk'], {}), '(wrk)\n', (1798, 1803), False, 'import copy\n'), ((4049, 4069), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (4061, 4069), True, 'import pandas as pd\n')] |
#!../../../../virtualenv/bin/python3
# -*- coding: utf-8 -*-
# NB: The shebang line above assumes you've installed a python virtual environment alongside your working copy of the
# <4most-4gp-scripts> git repository. It also only works if you invoke this python script from the directory where it
# is located. If these two assumptions are incorrect (e.g. you're using Conda), you can still use this script by typing
# <python list_exposure_times_from_fits.py>, but <./list_exposure_times_from_fits.py> will not work.
"""
Take a bunch of FITS template spectra, and list their intrinsic magnitudes (as saved in the FITS file), and the
exposure times needed to observe them if they were at some particular reference magnitude.
"""
import argparse
import glob
import logging
import os
from os import path as os_path
import numpy as np
from astropy.io import fits
from fourgp_fourfs import FourFS
from fourgp_speclib import SpectrumLibrarySqlite, Spectrum
our_path = os_path.split(os_path.abspath(__file__))[0]
root_path = os_path.join(our_path, "../../../..")
# Read input parameters
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--input',
required=True,
dest="input",
help="A filename wildcard where we can find the template spectra to operate on.")
parser.add_argument('--library',
dest="library",
default="louise_templates",
help="The spectrum library to import the templates into.")
parser.add_argument('--workspace', dest='workspace', default="",
help="Directory where we expect to find spectrum libraries.")
parser.add_argument('--binary-path',
required=False,
default=root_path,
dest="binary_path",
help="Specify a directory where 4FS package is installed.")
parser.add_argument('--snr-definition',
action="append",
dest="snr_definitions",
help="Specify a way of defining SNR, in the form 'name,minimum,maximum', meaning we calculate the "
"median SNR per pixel between minimum and maximum wavelengths in Angstrom.")
parser.add_argument('--snr-list',
required=False,
default="10,12,14,16,18,20,23,26,30,35,40,45,50,80,100,130,180,250",
dest="snr_list",
help="Specify a comma-separated list of the SNRs that 4FS is to degrade spectra to.")
parser.add_argument('--snr-definitions-lrs',
required=False,
default="",
dest="snr_definitions_lrs",
help="Specify the SNR definition to use for LRS. For example, 'GalDiskHR_536NM' to use the S4 "
"green definition of SNR. You can even specify three comma-separated definitions, e.g. "
"'GalDiskHR_536NM,GalDiskHR_536NM,GalDiskHR_536NM' to use different SNR metrics for the "
"RGB arms within 4MOST LRS, though this is a pretty weird thing to want to do.")
parser.add_argument('--snr-definitions-hrs',
required=False,
default="",
dest="snr_definitions_hrs",
help="Specify the SNR definition to use for HRS. For example, 'GalDiskHR_536NM' to use the S4 "
"green definition of SNR. You can even specify three comma-separated definitions, e.g. "
"'GalDiskHR_536NM,GalDiskHR_536NM,GalDiskHR_536NM' to use different SNR metrics for the "
"RGB arms within 4MOST HRS, though this is a pretty weird thing to want to do.")
parser.add_argument('--run-hrs',
action='store_true',
dest="run_hrs",
help="Set 4FS to produce output for 4MOST HRS [default].")
parser.add_argument('--no-run-hrs',
action='store_false',
dest="run_hrs",
help="Set 4FS not to produce output for 4MOST HRS. Setting this will make us run quicker.")
parser.set_defaults(run_hrs=True)
parser.add_argument('--run-lrs',
action='store_true',
dest="run_lrs",
help="Set 4FS to produce output for 4MOST LRS [default].")
parser.add_argument('--no-run-lrs',
action='store_false',
dest="run_lrs",
help="Set 4FS not to produce output for 4MOST LRS. Setting this will make us run quicker.")
parser.set_defaults(run_lrs=True)
parser.add_argument('--photometric-band',
required=False,
default="SDSS_r",
dest="photometric_band",
help="The name of the photometric band in which the magnitudes in --mag-list are specified. This "
"must match a band which is recognised by the pyphot python package.")
parser.add_argument('--mag-list',
required=False,
default="15",
dest="mag_list",
help="Specify a comma-separated list of the magnitudes to assume when simulating observations "
"of each object. If multiple magnitudes are specified, than each input spectrum we be "
"output multiple times, once at each magnitude.")
args = parser.parse_args()
# Start logger
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info("Calculating magnitudes and exposure times for templates")
# Set path to workspace where we create libraries of spectra
workspace = args.workspace if args.workspace else os_path.join(our_path, "../../../workspace")
os.system("mkdir -p {}".format(workspace))
# Turn set of templates into a spectrum library with path specified above
library_path = os_path.join(workspace, args.library)
library = SpectrumLibrarySqlite(path=library_path, create=True)
# Fetch a list of all the input template spectra which match the supplied filename wildcard
templates = glob.glob(args.input)
templates.sort()
# Parse any definitions of SNR we were supplied on the command line
if (args.snr_definitions is None) or (len(args.snr_definitions) < 1):
snr_definitions = None
else:
snr_definitions = []
for snr_definition in args.snr_definitions:
words = snr_definition.split(",")
snr_definitions.append([words[0], float(words[1]), float(words[2])])
# Look up what definition of SNR is user specified we should use for 4MOST LRS
if len(args.snr_definitions_lrs) < 1:
# Case 1: None was specified, so we use default
snr_definitions_lrs = None
else:
snr_definitions_lrs = args.snr_definitions_lrs.split(",")
# Case 2: A single definition was supplied which we use for all three arms
if len(snr_definitions_lrs) == 1:
snr_definitions_lrs *= 3
# Case 3: Three definitions were supplied, one for each arm
assert len(snr_definitions_lrs) == 3
# Look up what definition of SNR is user specified we should use for 4MOST HRS
if len(args.snr_definitions_hrs) < 1:
# Case 1: None was specified, so we use default
snr_definitions_hrs = None
else:
snr_definitions_hrs = args.snr_definitions_hrs.split(",")
# Case 2: A single definition was supplied which we use for all three arms
if len(snr_definitions_hrs) == 1:
snr_definitions_hrs *= 3
# Case 3: Three definitions were supplied, one for each arm
assert len(snr_definitions_hrs) == 3
# Parse the list of SNRs that the user specified on the command line
snr_list = [float(item.strip()) for item in args.snr_list.split(",")]
# Parse the list of magnitudes that the user specified on the command line
mag_list = [float(item.strip()) for item in args.mag_list.split(",")]
# Loop over all the magnitudes we are to simulate for each object
for magnitude in mag_list:
# Instantiate 4FS wrapper
# NB: Here we are requiring SNR/pixel=100 in GalDiskHR_545NM
etc_wrapper = FourFS(
path_to_4fs=os_path.join(args.binary_path, "OpSys/ETC"),
snr_definitions=snr_definitions,
magnitude=magnitude,
magnitude_unreddened=False,
photometric_band=args.photometric_band,
run_lrs=args.run_lrs,
run_hrs=args.run_hrs,
lrs_use_snr_definitions=snr_definitions_lrs,
hrs_use_snr_definitions=snr_definitions_hrs,
snr_list=snr_list,
snr_per_pixel=False
)
for template_index, template in enumerate(templates):
name = "template_{:08d}".format(template_index)
# Open fits spectrum
f = fits.open(template)
data = f[1].data
wavelengths = data['LAMBDA']
fluxes = data['FLUX']
# Open ASCII spectrum
# f = np.loadtxt(template).T
# wavelengths = f[0]
# fluxes = f[1]
# Create 4GP spectrum object
spectrum = Spectrum(wavelengths=wavelengths,
values=fluxes,
value_errors=np.zeros_like(wavelengths),
metadata={
"Starname": name,
"imported_from": template
})
# Work out magnitude
mag_intrinsic = spectrum.photometry(args.photometric_band)
# Pass template to 4FS
degraded_spectra = etc_wrapper.process_spectra(
spectra_list=((spectrum, None),)
)
# Loop over LRS and HRS
for mode in degraded_spectra:
# Loop over the spectra we simulated (there was only one!)
for index in degraded_spectra[mode]:
# Loop over the various SNRs we simulated
for snr in degraded_spectra[mode][index]:
# Extract the exposure time returned by 4FS from the metadata associated with this Spectrum object
# The exposure time is recorded in seconds
exposure_time = degraded_spectra[mode][index][snr]["spectrum"].metadata["exposure"]
# Print output
print("{name:100s} {mode:6s} {snr:6.1f} {magnitude:6.3f} {exposure:6.3f}". \
format(name=name,
mode=mode,
snr=snr,
magnitude=mag_intrinsic,
exposure=exposure_time))
# Insert spectrum object into spectrum library
library.insert(spectra=spectrum, filenames=os_path.split(template)[1])
| [
"logging.basicConfig",
"logging.getLogger",
"fourgp_speclib.SpectrumLibrarySqlite",
"argparse.ArgumentParser",
"os.path.join",
"os.path.split",
"astropy.io.fits.open",
"os.path.abspath",
"numpy.zeros_like",
"glob.glob"
] | [((1024, 1061), 'os.path.join', 'os_path.join', (['our_path', '"""../../../.."""'], {}), "(our_path, '../../../..')\n", (1036, 1061), True, 'from os import path as os_path\n'), ((1096, 1140), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (1119, 1140), False, 'import argparse\n'), ((5549, 5690), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""[%(asctime)s] %(levelname)s:%(filename)s:%(message)s"""', 'datefmt': '"""%d/%m/%Y %H:%M:%S"""'}), "(level=logging.INFO, format=\n '[%(asctime)s] %(levelname)s:%(filename)s:%(message)s', datefmt=\n '%d/%m/%Y %H:%M:%S')\n", (5568, 5690), False, 'import logging\n'), ((5710, 5737), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (5727, 5737), False, 'import logging\n'), ((6099, 6136), 'os.path.join', 'os_path.join', (['workspace', 'args.library'], {}), '(workspace, args.library)\n', (6111, 6136), True, 'from os import path as os_path\n'), ((6147, 6200), 'fourgp_speclib.SpectrumLibrarySqlite', 'SpectrumLibrarySqlite', ([], {'path': 'library_path', 'create': '(True)'}), '(path=library_path, create=True)\n', (6168, 6200), False, 'from fourgp_speclib import SpectrumLibrarySqlite, Spectrum\n'), ((6306, 6327), 'glob.glob', 'glob.glob', (['args.input'], {}), '(args.input)\n', (6315, 6327), False, 'import glob\n'), ((5921, 5965), 'os.path.join', 'os_path.join', (['our_path', '"""../../../workspace"""'], {}), "(our_path, '../../../workspace')\n", (5933, 5965), True, 'from os import path as os_path\n'), ((982, 1007), 'os.path.abspath', 'os_path.abspath', (['__file__'], {}), '(__file__)\n', (997, 1007), True, 'from os import path as os_path\n'), ((8862, 8881), 'astropy.io.fits.open', 'fits.open', (['template'], {}), '(template)\n', (8871, 8881), False, 'from astropy.io import fits\n'), ((8279, 8322), 'os.path.join', 'os_path.join', (['args.binary_path', '"""OpSys/ETC"""'], {}), "(args.binary_path, 'OpSys/ETC')\n", (8291, 8322), True, 'from os import path as os_path\n'), ((9270, 9296), 'numpy.zeros_like', 'np.zeros_like', (['wavelengths'], {}), '(wavelengths)\n', (9283, 9296), True, 'import numpy as np\n'), ((10795, 10818), 'os.path.split', 'os_path.split', (['template'], {}), '(template)\n', (10808, 10818), True, 'from os import path as os_path\n')] |
# This example is written for the new interface
import StateModeling as stm
import numpy as np
import matplotlib.pyplot as plt
import fetch_data
import pandas as pd
import tensorflow as tf
basePath = r"C:\Users\pi96doc\Documents\Programming\PythonScripts\StateModeling"
if False:
data = fetch_data.DataFetcher().fetch_german_data()
data_np = data.to_numpy()
df = pd.read_excel(basePath + r"\Examples\bev_lk.xlsx") # support information about the population
MeasDetected, MeasDead, SupportingInfo = stm.cumulate(data, df)
np.save(basePath + r'\Data\MeasDetected', MeasDetected)
np.save(basePath + r'\Data\MeasDead', MeasDead)
np.save(basePath + r'\Data\SupportingInfo', SupportingInfo)
else:
MeasDetected = np.load(basePath + r'\Data\MeasDetected.npy')
MeasDead = np.load(basePath + r'\Data\MeasDead.npy')
SupportingInfo = np.load(basePath + r'\Data\SupportingInfo.npy', allow_pickle=True)
(IDs, LKs, PopM, PopW, Area, Ages, Gender) = SupportingInfo
# fit,data = stm.DataLoader().get_new_data()
# axes = data.keys()
# datp = data.pivot_table(values=['cases','deaths'], index=['id','day'], aggfunc=np.sum, fill_value=0)
# data_np = datp.to_numpy()
# NumIDs = data['id'].unique().shape
# NumDays = data['day'].unique().shape
ReduceDistricts = True
if ReduceDistricts:
DistrictStride = 200
MeasDetected = MeasDetected[:, 0:-1:DistrictStride, :, :]
PopM = PopM[0:-1:DistrictStride]
PopW = PopW[0:-1:DistrictStride]
IDs = IDs[0:-1:DistrictStride]
Tmax = 120
M = stm.Model()
M.addAxis("Gender", entries=len(Gender) - 1)
M.addAxis("Age", entries=len(Ages))
M.addAxis("District", entries=len(IDs))
M.addAxis("Disease Progression", entries=20, queue=True)
Pop = 1e6 * np.array([(3.88 + 0.78), 6.62, 2.31 + 2.59 + 3.72 + 15.84, 23.9, 15.49, 7.88, 1.0], stm.CalcFloatStr)
AgeDist = (Pop / np.sum(Pop))
InitAge = M.Axes['Age'].init(AgeDist)
PopSum = np.sum(PopM) + np.sum(PopW)
InitPopulM = M.Axes['District'].init(PopM / PopSum)
InitPopulW = M.Axes['District'].init(PopW / PopSum)
InitPopul = InitPopulM + InitPopulW
MRatio = np.sum(PopM) / PopSum
M.newState(name='S', axesInit={"Age": InitAge, "District": InitPopul, "Gender": [MRatio, 1 - MRatio]})
M.newState(name='Sq', axesInit={"Age": InitAge, "District": InitPopul, "Gender": [MRatio, 1 - MRatio]})
#I0 = M.newVariables({'I0': 0.000055 * InitPopulM}, forcePos=False) # a district dependent variable of initially infected
I0 = M.newVariables({'I0': 0.01 * InitPopulM}, forcePos=False) # a district dependent variable of initially infected
# InitProgression = lambda: I0 * M.Axes['Disease Progression'].initDelta() # variables to fit have to always be packed in lambda functions!
#M.newState(name='I', axesInit={"Disease Progression": InitProgression, "District": None, "Age": None, "Gender": None})
M.newState(name='I', axesInit={"Disease Progression": 0, "District": 0, "Age": 0, "Gender": 0})
T0 = M.newVariables({"T0": 64.5}, forcePos=False) # time at which a delta is injected into the progression
M.addRate('S', 'I', lambda t: I0 * M.initGaussianT0(T0, t), queueDst='Disease Progression', hasTime=True) # When you made it though the queue, you are recovered
M.newState(name='H', axesInit={"Disease Progression": 0, "District": 0, "Age": 0, "Gender": 0})
M.newState(name='R', axesInit={"District": 0, "Age": 0, "Gender": 0}) # undetected recovered
M.newState(name='Rd', axesInit={"District": 0, "Age": 0, "Gender": 0}) # detected recovered
ht0 = M.newVariables({'ht0': 3.0}, forcePos=False)
hr = M.newVariables({'hr': 0.02}) # rate of hospitalization
hospitalization = lambda: hr() * M.Axes['Disease Progression'].initGaussian(ht0, 3.0)
influx = M.newVariables({'influx': 0.0001}) # a district dependent variable of initially infected
# infectionRate = lambda I: (I + influx) * M.Var['r0']
r0 = M.newVariables({'r0': 0.11 / InitPopul}, forcePos=True)
M.addRate(('S', 'I'), 'I', r0, queueDst="Disease Progression") # S ==> I[0]
M.addRate('I', 'H', hospitalization) # I[t] -> H[t]
M.addRate('H', 'Rd', 1.0, queueSrc="Disease Progression") # H[t] -> R[t] this is a dequeueing operation and thus the rate needs to be one!
M.addRate('I', 'R', 1.0, queueSrc="Disease Progression") # H[t] -> R[t] this is a dequeueing operation and thus the rate needs to be one!
M.addResult('detected', lambda State: tf.reduce_sum(State['H'], 1) + State['Rd']) # ('I', 'S')
# M.toFit(['r0', 'hr', 'ht0', 'I0'])
# M.toFit(['r0', 'I0'])
M.toFit(['T0', 'r0'])
# M.toFit(['r0'])
# simulated = M.simulate('simulated', {'detected': None}, Tmax=Tmax)
# M.showResults(ylabel='occupancy')
# M.showStates(MinusOne=('S'))
if True:
otype = "L-BFGS"
lossScale = 1 # 1e4
oparam = {"normFac": 'max'}
else:
# ToDo the local normFac is not yet recognized for the below methods
lossScale = None
otype = "nesterov" # "adagrad" "adadelta" "SGD" "nesterov" "adam"
learnrate = {"nesterov": 1e-10, "adam": 7e-7}
oparam = {"learning_rate": learnrate[otype]}
# oparam['noiseModel'] = 'Poisson'
oparam['noiseModel'] = 'Gaussian'
# oparam['noiseModel'] = 'ScaledGaussian' # is buggy? Why the NaNs?
NIter = 150
fittedVars, fittedRes = M.fit({'detected': MeasDetected[:, np.newaxis, :, :, 0:1] / PopSum}, Tmax, otype=otype, oparam=oparam, NIter=NIter, verbose=True, lossScale=lossScale)
M.showResults(ylabel='occupancy', dims=("District"))
M.showStates(MinusOne=('S'))
| [
"fetch_data.DataFetcher",
"StateModeling.Model",
"StateModeling.cumulate",
"tensorflow.reduce_sum",
"numpy.array",
"numpy.sum",
"pandas.read_excel",
"numpy.load",
"numpy.save"
] | [((1521, 1532), 'StateModeling.Model', 'stm.Model', ([], {}), '()\n', (1530, 1532), True, 'import StateModeling as stm\n'), ((376, 427), 'pandas.read_excel', 'pd.read_excel', (["(basePath + '\\\\Examples\\\\bev_lk.xlsx')"], {}), "(basePath + '\\\\Examples\\\\bev_lk.xlsx')\n", (389, 427), True, 'import pandas as pd\n'), ((516, 538), 'StateModeling.cumulate', 'stm.cumulate', (['data', 'df'], {}), '(data, df)\n', (528, 538), True, 'import StateModeling as stm\n'), ((543, 599), 'numpy.save', 'np.save', (["(basePath + '\\\\Data\\\\MeasDetected')", 'MeasDetected'], {}), "(basePath + '\\\\Data\\\\MeasDetected', MeasDetected)\n", (550, 599), True, 'import numpy as np\n'), ((603, 651), 'numpy.save', 'np.save', (["(basePath + '\\\\Data\\\\MeasDead')", 'MeasDead'], {}), "(basePath + '\\\\Data\\\\MeasDead', MeasDead)\n", (610, 651), True, 'import numpy as np\n'), ((655, 715), 'numpy.save', 'np.save', (["(basePath + '\\\\Data\\\\SupportingInfo')", 'SupportingInfo'], {}), "(basePath + '\\\\Data\\\\SupportingInfo', SupportingInfo)\n", (662, 715), True, 'import numpy as np\n'), ((740, 786), 'numpy.load', 'np.load', (["(basePath + '\\\\Data\\\\MeasDetected.npy')"], {}), "(basePath + '\\\\Data\\\\MeasDetected.npy')\n", (747, 786), True, 'import numpy as np\n'), ((801, 843), 'numpy.load', 'np.load', (["(basePath + '\\\\Data\\\\MeasDead.npy')"], {}), "(basePath + '\\\\Data\\\\MeasDead.npy')\n", (808, 843), True, 'import numpy as np\n'), ((864, 931), 'numpy.load', 'np.load', (["(basePath + '\\\\Data\\\\SupportingInfo.npy')"], {'allow_pickle': '(True)'}), "(basePath + '\\\\Data\\\\SupportingInfo.npy', allow_pickle=True)\n", (871, 931), True, 'import numpy as np\n'), ((1724, 1827), 'numpy.array', 'np.array', (['[3.88 + 0.78, 6.62, 2.31 + 2.59 + 3.72 + 15.84, 23.9, 15.49, 7.88, 1.0]', 'stm.CalcFloatStr'], {}), '([3.88 + 0.78, 6.62, 2.31 + 2.59 + 3.72 + 15.84, 23.9, 15.49, 7.88,\n 1.0], stm.CalcFloatStr)\n', (1732, 1827), True, 'import numpy as np\n'), ((1843, 1854), 'numpy.sum', 'np.sum', (['Pop'], {}), '(Pop)\n', (1849, 1854), True, 'import numpy as np\n'), ((1905, 1917), 'numpy.sum', 'np.sum', (['PopM'], {}), '(PopM)\n', (1911, 1917), True, 'import numpy as np\n'), ((1920, 1932), 'numpy.sum', 'np.sum', (['PopW'], {}), '(PopW)\n', (1926, 1932), True, 'import numpy as np\n'), ((2083, 2095), 'numpy.sum', 'np.sum', (['PopM'], {}), '(PopM)\n', (2089, 2095), True, 'import numpy as np\n'), ((292, 316), 'fetch_data.DataFetcher', 'fetch_data.DataFetcher', ([], {}), '()\n', (314, 316), False, 'import fetch_data\n'), ((4326, 4354), 'tensorflow.reduce_sum', 'tf.reduce_sum', (["State['H']", '(1)'], {}), "(State['H'], 1)\n", (4339, 4354), True, 'import tensorflow as tf\n')] |
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import ops
from gpflow import settings
float_type = settings.float_type
jitter_level = settings.jitter
class EulerMaruyama:
def __init__(self,f,total_time,nsteps):
self.ts = np.linspace(0,total_time,nsteps)
self.f = f
def forward(self,y0,save_intermediate=False):
time_grid = ops.convert_to_tensor(self.ts, preferred_dtype=float_type, name='t')
y0 = ops.convert_to_tensor(y0, name='y0')
time_delta_grid = time_grid[1:] - time_grid[:-1]
time_grid = time_grid[1:]
time_combined = tf.concat([time_grid[:,None],time_delta_grid[:,None]],axis=1)
scan_func = self._make_scan_func(self.f)
if save_intermediate:
y_grid = functional_ops.scan(scan_func, time_combined,y0)
y_s = array_ops.concat([[y0], y_grid], axis=0)
y_t = y_s[-1,:,:,:]
return y_t, y_s
else:
y_t = functional_ops.foldl(scan_func, time_combined,y0)
return y_t, None
def _step_func(self, evol_func, t_and_dt, y):
t = t_and_dt[0];dt = t_and_dt[1]
mu,var = evol_func(y, t) #NXD, NXD
if var.get_shape().ndims == 3:
raise NotImplementedError
dt_cast = math_ops.cast(dt, y.dtype)
dy = mu*dt_cast + tf.sqrt(dt_cast)*tf.sqrt(var)*tf.random_normal(shape=[tf.shape(y)[0],tf.shape(y)[1]], dtype=y.dtype) #NXD
return dy
def _make_scan_func(self, evol_func):
def scan_func(y, t_and_dt):
dy = self._step_func(evol_func, t_and_dt, y)
dy = math_ops.cast(dy, dtype=y.dtype)
return y + dy
return scan_func
| [
"tensorflow.shape",
"tensorflow.python.ops.functional_ops.scan",
"tensorflow.concat",
"numpy.linspace",
"tensorflow.sqrt",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.functional_ops.foldl... | [((400, 434), 'numpy.linspace', 'np.linspace', (['(0)', 'total_time', 'nsteps'], {}), '(0, total_time, nsteps)\n', (411, 434), True, 'import numpy as np\n'), ((523, 591), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['self.ts'], {'preferred_dtype': 'float_type', 'name': '"""t"""'}), "(self.ts, preferred_dtype=float_type, name='t')\n", (544, 591), False, 'from tensorflow.python.framework import ops\n'), ((605, 641), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['y0'], {'name': '"""y0"""'}), "(y0, name='y0')\n", (626, 641), False, 'from tensorflow.python.framework import ops\n'), ((757, 822), 'tensorflow.concat', 'tf.concat', (['[time_grid[:, None], time_delta_grid[:, None]]'], {'axis': '(1)'}), '([time_grid[:, None], time_delta_grid[:, None]], axis=1)\n', (766, 822), True, 'import tensorflow as tf\n'), ((1429, 1455), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['dt', 'y.dtype'], {}), '(dt, y.dtype)\n', (1442, 1455), False, 'from tensorflow.python.ops import math_ops\n'), ((920, 969), 'tensorflow.python.ops.functional_ops.scan', 'functional_ops.scan', (['scan_func', 'time_combined', 'y0'], {}), '(scan_func, time_combined, y0)\n', (939, 969), False, 'from tensorflow.python.ops import functional_ops\n'), ((987, 1027), 'tensorflow.python.ops.array_ops.concat', 'array_ops.concat', (['[[y0], y_grid]'], {'axis': '(0)'}), '([[y0], y_grid], axis=0)\n', (1003, 1027), False, 'from tensorflow.python.ops import array_ops\n'), ((1120, 1170), 'tensorflow.python.ops.functional_ops.foldl', 'functional_ops.foldl', (['scan_func', 'time_combined', 'y0'], {}), '(scan_func, time_combined, y0)\n', (1140, 1170), False, 'from tensorflow.python.ops import functional_ops\n'), ((1759, 1791), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['dy'], {'dtype': 'y.dtype'}), '(dy, dtype=y.dtype)\n', (1772, 1791), False, 'from tensorflow.python.ops import math_ops\n'), ((1482, 1498), 'tensorflow.sqrt', 'tf.sqrt', (['dt_cast'], {}), '(dt_cast)\n', (1489, 1498), True, 'import tensorflow as tf\n'), ((1499, 1511), 'tensorflow.sqrt', 'tf.sqrt', (['var'], {}), '(var)\n', (1506, 1511), True, 'import tensorflow as tf\n'), ((1536, 1547), 'tensorflow.shape', 'tf.shape', (['y'], {}), '(y)\n', (1544, 1547), True, 'import tensorflow as tf\n'), ((1551, 1562), 'tensorflow.shape', 'tf.shape', (['y'], {}), '(y)\n', (1559, 1562), True, 'import tensorflow as tf\n')] |
import numpy as np
class ReplayMemory(object):
def __init__(self, max_size, obs_dim, act_dim):
self.max_size = int(max_size)
self.obs = np.zeros((max_size, ) + obs_dim, dtype='float32')
self.action = np.zeros((max_size, act_dim), dtype='float32')
self.reward = np.zeros((max_size,), dtype='float32')
self.terminal = np.zeros((max_size,), dtype='bool')
self.next_obs = np.zeros((max_size, ) + obs_dim, dtype='float32')
self._curr_size = 0
self._curr_pos = 0
def sample_batch(self, batch_size):
batch_idx = np.random.randint(self._curr_size - 300 - 1, size=batch_size)
obs = self.obs[batch_idx]
reward = self.reward[batch_idx]
action = self.action[batch_idx]
next_obs = self.next_obs[batch_idx]
terminal = self.terminal[batch_idx]
return obs, action, reward, next_obs, terminal
def append(self, obs, act, reward, next_obs, terminal):
if self._curr_size < self.max_size:
self._curr_size += 1
self.obs[self._curr_pos] = obs
self.action[self._curr_pos] = act
self.reward[self._curr_pos] = reward
self.next_obs[self._curr_pos] = next_obs
self.terminal[self._curr_pos] = terminal
self._curr_pos = (self._curr_pos + 1) % self.max_size
def size(self):
return self._curr_size
| [
"numpy.zeros",
"numpy.random.randint"
] | [((159, 207), 'numpy.zeros', 'np.zeros', (['((max_size,) + obs_dim)'], {'dtype': '"""float32"""'}), "((max_size,) + obs_dim, dtype='float32')\n", (167, 207), True, 'import numpy as np\n'), ((231, 277), 'numpy.zeros', 'np.zeros', (['(max_size, act_dim)'], {'dtype': '"""float32"""'}), "((max_size, act_dim), dtype='float32')\n", (239, 277), True, 'import numpy as np\n'), ((300, 338), 'numpy.zeros', 'np.zeros', (['(max_size,)'], {'dtype': '"""float32"""'}), "((max_size,), dtype='float32')\n", (308, 338), True, 'import numpy as np\n'), ((363, 398), 'numpy.zeros', 'np.zeros', (['(max_size,)'], {'dtype': '"""bool"""'}), "((max_size,), dtype='bool')\n", (371, 398), True, 'import numpy as np\n'), ((423, 471), 'numpy.zeros', 'np.zeros', (['((max_size,) + obs_dim)'], {'dtype': '"""float32"""'}), "((max_size,) + obs_dim, dtype='float32')\n", (431, 471), True, 'import numpy as np\n'), ((590, 651), 'numpy.random.randint', 'np.random.randint', (['(self._curr_size - 300 - 1)'], {'size': 'batch_size'}), '(self._curr_size - 300 - 1, size=batch_size)\n', (607, 651), True, 'import numpy as np\n')] |
import os
import numpy as np
import matplotlib.pyplot as plt
import PIL
import cv2
import scipy.stats
import torch
import torch.nn as nn
from torch import optim
from torch.autograd.variable import Variable
import torch.nn.functional as F
from skimage.util import montage
from time import time
import warnings
warnings.filterwarnings('ignore')
gpu = torch.cuda.is_available()
class Hps():
"""
Default hyperparameters for training the model
"""
def __init__(self):
checks = [f for f in os.listdir('data') if f.endswith('.npz')]
if not checks:
raise FileNotFoundError("No .npz files found in data folder")
else:
data_location = 'data/{}'.format(checks[0])
self.data_location = data_location
self.enc_hidden_size = 256
self.dec_hidden_size = 512
self.Nz = 128
self.M = 20
self.dropout = 0.9
self.batch_size = 100
self.eta_min = 0.01
self.R = 0.99995
self.KL_min = 0.2
self.wKL = 0.5
self.lr = 0.001
self.lr_decay = 0.9999
self.min_lr = 0.00001
self.grad_clip = 1.
self.temperature = 0.4
self.max_seq_length = 200
hp = Hps()
def clean_strokes(strokes):
"""
Remove stroke sequences that are too long or too short
Arguments:
strokes (np.array): Sequence of strokes to clean
"""
data = []
for seq in strokes:
if seq.shape[0] <= hp.max_seq_length and seq.shape[0] > 10:
seq = np.minimum(seq, 1000)
seq = np.maximum(seq, -1000)
seq = np.array(seq, dtype=np.float32)
data.append(seq)
return data
def max_size(data):
"""
Get longest sequence length
Arguments:
data (np.array): Array of stroke sequences
"""
sizes = [len(seq) for seq in data]
return max(sizes)
def calculate_normalizing_scale_factor(strokes):
"""
Calculate normalizing scale factor as explained in section 1 (Dataset Details) of
the supplementary material for A Neural Representation of Sketch Drawings
Arguments:
strokes (np.array): Array of strokes to normalize
"""
data = []
for i in range(len(strokes)):
for j in range(len(strokes[i])):
data.append(strokes[i][j, 0])
data.append(strokes[i][j, 1])
data = np.array(data)
return np.std(data)
def normalize(strokes):
"""
Normalize the entire dataset (delta_x, delta_y) by the scaling factor
Arguments:
strokes (np.array): Array of strokes to normalize
"""
data = []
scale_factor = calculate_normalizing_scale_factor(strokes)
for seq in strokes:
seq[:, 0:2] /= scale_factor
data.append(seq)
return data
dataset = np.load(hp.data_location, encoding='latin1')
data = dataset['train']
data = clean_strokes(data)
data = normalize(data)
Nmax = max_size(data)
def make_batch(batch_size):
"""
Create batch for model training
Arguments:
batch_size (int): Size of batch for training
"""
batch_idx = np.random.choice(len(data),batch_size)
batch_sequences = [data[idx] for idx in batch_idx]
strokes = []
lengths = []
indice = 0
for seq in batch_sequences:
len_seq = len(seq[:,0])
new_seq = np.zeros((Nmax,5))
new_seq[:len_seq,:2] = seq[:,:2]
new_seq[:len_seq-1,2] = 1-seq[:-1,2]
new_seq[:len_seq,3] = seq[:,2]
new_seq[(len_seq-1):,4] = 1
new_seq[len_seq-1,2:4] = 0
lengths.append(len(seq[:,0]))
strokes.append(new_seq)
indice += 1
if gpu:
batch = Variable(torch.from_numpy(np.stack(strokes,1)).cuda().float())
else:
batch = Variable(torch.from_numpy(np.stack(strokes,1)).float())
return batch, lengths
def lr_decay(optimizer):
"""
Decay learning rate by a factor of lr_decay
Arguments:
optimizer (torch.optim.Optimizer): Pytorch optimizer to decay
"""
for param_group in optimizer.param_groups:
if param_group['lr']>hp.min_lr:
param_group['lr'] *= hp.lr_decay
return optimizer
class EncoderRNN(nn.Module):
"""
Encoder class for the model
"""
def __init__(self):
super(EncoderRNN, self).__init__()
self.lstm = nn.LSTM(5, hp.enc_hidden_size, \
dropout=hp.dropout, bidirectional=True)
self.fc_mu = nn.Linear(2*hp.enc_hidden_size, hp.Nz)
self.fc_sigma = nn.Linear(2*hp.enc_hidden_size, hp.Nz)
self.train()
def forward(self, inputs, batch_size, hidden_cell=None):
"""
Forward pass through encoder
Arguments:
inputs (torch.Tensor): Inputs to the encoder model
batch_size (int): Size of batch for model training
hidden_cell (torch.Tensor): Hidden layer for encoder model
"""
if hidden_cell is None:
if gpu:
hidden = torch.zeros(2, batch_size, hp.enc_hidden_size).cuda()
cell = torch.zeros(2, batch_size, hp.enc_hidden_size).cuda()
else:
hidden = torch.zeros(2, batch_size, hp.enc_hidden_size)
cell = torch.zeros(2, batch_size, hp.enc_hidden_size)
hidden_cell = (hidden, cell)
_, (hidden,cell) = self.lstm(inputs.float(), hidden_cell)
hidden_forward, hidden_backward = torch.split(hidden,1,0)
hidden_cat = torch.cat([hidden_forward.squeeze(0), hidden_backward.squeeze(0)],1)
mu = self.fc_mu(hidden_cat)
sigma_hat = self.fc_sigma(hidden_cat)
sigma = torch.exp(sigma_hat/2.)
z_size = mu.size()
if gpu:
N = torch.normal(torch.zeros(z_size),torch.ones(z_size)).cuda()
else:
N = torch.normal(torch.zeros(z_size),torch.ones(z_size))
z = mu + sigma*N
return z, mu, sigma_hat
class DecoderRNN(nn.Module):
"""
Decoder class for the model
"""
def __init__(self):
super(DecoderRNN, self).__init__()
self.fc_hc = nn.Linear(hp.Nz, 2*hp.dec_hidden_size)
self.lstm = nn.LSTM(hp.Nz+5, hp.dec_hidden_size, dropout=hp.dropout)
self.fc_params = nn.Linear(hp.dec_hidden_size,6*hp.M+3)
def forward(self, inputs, z, hidden_cell=None):
"""
Forward pass through decoder
Arguments:
inputs (torch.Tensor): Inputs to the decoder model
z (torch.Tensor): Vector z constructed from outputs of encoder model
hidden_cell (torch.Tensor): Hidden layer for decoder model
"""
if hidden_cell is None:
hidden,cell = torch.split(F.tanh(self.fc_hc(z)),hp.dec_hidden_size,1)
hidden_cell = (hidden.unsqueeze(0).contiguous(), cell.unsqueeze(0).contiguous())
outputs,(hidden,cell) = self.lstm(inputs, hidden_cell)
if self.training:
y = self.fc_params(outputs.view(-1, hp.dec_hidden_size))
else:
y = self.fc_params(hidden.view(-1, hp.dec_hidden_size))
params = torch.split(y,6,1)
params_mixture = torch.stack(params[:-1])
params_pen = params[-1]
pi,mu_x,mu_y,sigma_x,sigma_y,rho_xy = torch.split(params_mixture,1,2)
if self.training:
len_out = Nmax+1
else:
len_out = 1
pi = F.softmax(pi.transpose(0,1).squeeze()).view(len_out,-1,hp.M)
sigma_x = torch.exp(sigma_x.transpose(0,1).squeeze()).view(len_out,-1,hp.M)
sigma_y = torch.exp(sigma_y.transpose(0,1).squeeze()).view(len_out,-1,hp.M)
rho_xy = torch.tanh(rho_xy.transpose(0,1).squeeze()).view(len_out,-1,hp.M)
mu_x = mu_x.transpose(0,1).squeeze().contiguous().view(len_out,-1,hp.M)
mu_y = mu_y.transpose(0,1).squeeze().contiguous().view(len_out,-1,hp.M)
q = F.softmax(params_pen).view(len_out,-1,3)
return pi,mu_x,mu_y,sigma_x,sigma_y,rho_xy,q,hidden,cell
class Model():
"""
Full VAE model (Encoder + Decoder)
"""
def __init__(self):
if gpu:
self.encoder = EncoderRNN().cuda()
self.decoder = DecoderRNN().cuda()
else:
self.encoder = EncoderRNN()
self.decoder = DecoderRNN()
self.encoder_optimizer = optim.Adam(self.encoder.parameters(), hp.lr)
self.decoder_optimizer = optim.Adam(self.decoder.parameters(), hp.lr)
self.eta_step = hp.eta_min
def make_target(self, batch, lengths):
"""
Create targets for the model
Arguments:
batch (torch.Tensor): Batch to create targets from
lengths (list): lengths of each of the inputs
"""
if gpu:
eos = torch.stack([torch.Tensor([0,0,0,0,1])]*batch.size()[1]).cuda().unsqueeze(0)
else:
eos = torch.stack([torch.Tensor([0,0,0,0,1])]*batch.size()[1]).unsqueeze(0)
batch = torch.cat([batch, eos], 0)
mask = torch.zeros(Nmax+1, batch.size()[1])
for indice,length in enumerate(lengths):
mask[:length,indice] = 1
if gpu:
mask = mask.cuda()
dx = torch.stack([batch.data[:,:,0]]*hp.M,2)
dy = torch.stack([batch.data[:,:,1]]*hp.M,2)
p1 = batch.data[:,:,2]
p2 = batch.data[:,:,3]
p3 = batch.data[:,:,4]
p = torch.stack([p1,p2,p3],2)
return mask,dx,dy,p
def train(self, iteration):
"""
Function for training the model
Arguments:
iteration (int): The current iteration number
"""
self.encoder.train()
self.decoder.train()
iteration += 1
batch, lengths = make_batch(hp.batch_size)
z, self.mu, self.sigma = self.encoder(batch, hp.batch_size)
if gpu:
sos = torch.stack([torch.Tensor([0,0,1,0,0])]*hp.batch_size).cuda().unsqueeze(0)
else:
sos = torch.stack([torch.Tensor([0,0,1,0,0])]*hp.batch_size).unsqueeze(0)
batch_init = torch.cat([sos, batch],0)
z_stack = torch.stack([z]*(Nmax+1))
inputs = torch.cat([batch_init, z_stack],2)
self.pi, self.mu_x, self.mu_y, self.sigma_x, self.sigma_y, \
self.rho_xy, self.q, _, _ = self.decoder(inputs, z)
mask,dx,dy,p = self.make_target(batch, lengths)
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
self.eta_step = 1-(1-hp.eta_min)*hp.R
LKL = self.kullback_leibler_loss()
LR = self.reconstruction_loss(mask,dx,dy,p)
loss = LR + LKL
loss.backward()
nn.utils.clip_grad_norm(self.encoder.parameters(), hp.grad_clip)
nn.utils.clip_grad_norm(self.decoder.parameters(), hp.grad_clip)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
if not iteration % 1:
self.encoder_optimizer = lr_decay(self.encoder_optimizer)
self.decoder_optimizer = lr_decay(self.decoder_optimizer)
if not iteration % 1000:
print(f'Iteration: {iteration}\n{"-" * 30}\nFull loss: {loss.item() :.3f}\nReconstruction loss: {LR.item() :.3f}\nKL loss: {LKL.item() :.3f}\n')
self.save(iteration)
def bivariate_normal_pdf(self, dx, dy):
"""
Bivariate normal pdf modeled from GMM with N normal distributions
Arguments:
dx (torch.Tensor): Delta x offset term to parameterize the bivariate normal distribution
dy (torch.Tensor): Delta y offset term to parameterize the bivariate normal distribution
"""
z_x = ((dx-self.mu_x)/self.sigma_x)**2
z_y = ((dy-self.mu_y)/self.sigma_y)**2
z_xy = (dx-self.mu_x)*(dy-self.mu_y)/(self.sigma_x*self.sigma_y)
z = z_x + z_y -2*self.rho_xy*z_xy
exp = torch.exp(-z/(2*(1-self.rho_xy**2)))
norm = 2*np.pi*self.sigma_x*self.sigma_y*torch.sqrt(1-self.rho_xy**2)
return exp/norm
def reconstruction_loss(self, mask, dx, dy, p):
"""
Reconstruction loss to be used as criterion for the model
Arguments:
mask (torch.Tensor): Mask for LS portion of reconstruction loss
dx (torch.Tensor): Delta x that parameterizes the bivariate normal distribution
dy (torch.Tensor): Delta y that parameterizes the bivariate normal distribution
p (torch.Tensor): Pen state terms for LP portion of reconstruction loss
"""
pdf = self.bivariate_normal_pdf(dx, dy)
LS = -torch.sum(mask*torch.log(1e-5+torch.sum(self.pi * pdf, 2)))\
/float(Nmax*hp.batch_size)
LP = -torch.sum(p*torch.log(self.q))/float(Nmax*hp.batch_size)
return LS+LP
def kullback_leibler_loss(self):
"""
Kullback-Leibler loss to be used as criterion for the model
"""
LKL = -0.5*torch.sum(1+self.sigma-self.mu**2-torch.exp(self.sigma))\
/float(hp.Nz*hp.batch_size)
if gpu:
KL_min = Variable(torch.Tensor([hp.KL_min]).cuda()).detach()
else:
KL_min = Variable(torch.Tensor([hp.KL_min])).detach()
return hp.wKL*self.eta_step * torch.max(LKL,KL_min)
def save(self, iteration):
"""
Save state dict of the model
Arguments:
iteration (int): Iteration number
"""
torch.save(self.encoder.state_dict(), 'checkpoints/encoderRNN_iter_{}.pth'.format(iteration))
torch.save(self.decoder.state_dict(), 'checkpoints/decoderRNN_iter_{}.pth'.format(iteration))
def load(self, encoder_name, decoder_name):
"""
Load in saved model from .pth file
Arguments:
encoder_name (str): Path to the saved encoder weights
decoder_name (str): Path to the saved decoder weights
"""
saved_encoder = torch.load(encoder_name)
saved_decoder = torch.load(decoder_name)
self.encoder.load_state_dict(saved_encoder)
self.decoder.load_state_dict(saved_decoder)
def conditional_generation(self, iteration):
"""
Generate image from the model
Arguments:
iteration (int): Iteration number
"""
batch,lengths = make_batch(1)
self.encoder.train(False)
self.decoder.train(False)
z, _, _ = self.encoder(batch, 1)
if gpu:
sos = Variable(torch.Tensor([0,0,1,0,0]).view(1,1,-1).cuda())
else:
sos = Variable(torch.Tensor([0,0,1,0,0]).view(1,1,-1))
s = sos
seq_x = []
seq_y = []
seq_z = []
hidden_cell = None
for i in range(Nmax):
input = torch.cat([s,z.unsqueeze(0)],2)
self.pi, self.mu_x, self.mu_y, self.sigma_x, self.sigma_y, \
self.rho_xy, self.q, hidden, cell = \
self.decoder(input, z, hidden_cell)
hidden_cell = (hidden, cell)
s, dx, dy, pen_down, eos = self.sample_next_state()
seq_x.append(dx)
seq_y.append(dy)
seq_z.append(pen_down)
if eos:
break
x_sample = np.cumsum(seq_x, 0)
y_sample = np.cumsum(seq_y, 0)
z_sample = np.array(seq_z)
sequence = np.stack([x_sample,y_sample,z_sample]).T
make_image(sequence, iteration)
def sample_next_state(self):
def adjust_temp(pi_pdf):
"""
Adjust temperature to control randomness
Arguments:
pi_pdf (torch.Tensor): Probability density function
"""
pi_pdf = np.log(pi_pdf)/hp.temperature
pi_pdf -= pi_pdf.max()
pi_pdf = np.exp(pi_pdf)
pi_pdf /= pi_pdf.sum()
return pi_pdf
pi = self.pi.data[0,0,:].cpu().numpy()
pi = adjust_temp(pi)
pi_idx = np.random.choice(hp.M, p=pi)
q = self.q.data[0,0,:].cpu().numpy()
q = adjust_temp(q)
q_idx = np.random.choice(3, p=q)
mu_x = self.mu_x.data[0,0,pi_idx]
mu_y = self.mu_y.data[0,0,pi_idx]
sigma_x = self.sigma_x.data[0,0,pi_idx]
sigma_y = self.sigma_y.data[0,0,pi_idx]
rho_xy = self.rho_xy.data[0,0,pi_idx]
x,y = sample_bivariate_normal(mu_x,mu_y,sigma_x,sigma_y,rho_xy,greedy=False)
next_state = torch.zeros(5)
next_state[0] = x
next_state[1] = y
next_state[q_idx+2] = 1
if gpu:
return Variable(next_state.cuda()).view(1,1,-1),x,y,q_idx==1,q_idx==2
else:
return Variable(next_state).view(1,1,-1),x,y,q_idx==1,q_idx==2
def sample_bivariate_normal(mu_x,mu_y,sigma_x,sigma_y,rho_xy, greedy=False):
"""
Sample from bivariate normal parameterized by outputs from encoder
Arguments:
mu_x (torch.Tensor): Mean x for parameterizing bivariate normal distribution
mu_y (torch.Tensor): Mean y for parameterizing bivariate normal distribution
sigma_x (torch.Tensor): Standard deviation x for parameterizing bivariate normal distribution
sigma_y (torch.Tensor): Standard deviation y for parameterizing bivariate normal distribution
rho_xy (torch.Tensor): Correlation parameter for bivariate normal distribution
greedy (boolean): Whether to randomly sample from distribution
"""
if greedy:
return mu_x,mu_y
mean = [mu_x, mu_y]
sigma_x *= np.sqrt(hp.temperature)
sigma_y *= np.sqrt(hp.temperature)
cov = [[sigma_x * sigma_x, rho_xy * sigma_x * sigma_y],\
[rho_xy * sigma_x * sigma_y, sigma_y * sigma_y]]
# x = scipy.stats.multivariate_normal(mean, cov)
x = np.random.multivariate_normal(mean, cov, 1)
return x[0][0], x[0][1]
def make_image(sequence, iteration, name='generated_'):
"""
Plot strokes as image and save as JPEG
Arguments:
sequence (np.array): Numpy array of strokes from conditional generation
iteration (int): Iteration number
name (str): Prefix to use when saving generated image
"""
strokes = np.split(sequence, np.where(sequence[:,2]>0)[0]+1)
fig = plt.figure()
ax1 = fig.add_subplot(111)
plt.axis('off')
for s in strokes:
plt.plot(s[:,0],-s[:,1])
canvas = plt.get_current_fig_manager().canvas
canvas.draw()
pil_image = PIL.Image.frombytes('RGB', canvas.get_width_height(),
canvas.tostring_rgb())
name = 'style_transfer/contents/' + name + str(iteration) + '.jpg'
pil_image.save(name,"JPEG")
plt.close("all")
# Deprecated
def stitch_images_old(directory='assets', width=480, length=640):
"""
Stitch generated images together in a grid
Arguments:
directory (str): Directory where generated images are located
width (int): Width of images
length (int): Length of images
"""
img_paths = [f for f in os.listdir(directory) if 'generated' in f]
grid = np.zeros((width * int(len(img_paths) ** 0.5), length * int(len(img_paths) ** 0.5), 3))
lat, lon = 0, 0
for img in img_paths:
if lat == grid.shape[0]:
lat = 0
lon += length
grid[lat: lat + width, lon: lon + length, :] = plt.imread(os.path.join(directory, img))
lat += width
return grid
def stitch_images(directory='assets', out_dir='style_transfer/data'):
"""
Stitch generated images together in a grid
Arguments:
directory (str): Directory where generated images are located
"""
img_paths = [f for f in os.listdir(directory) if 'generated' in f]
assert len(img_paths) == 9
raw_arr = [plt.imread(os.path.join(directory, im)) for im in img_paths]
raw_arr = np.stack(raw_arr, axis=0)
stitched = montage(raw_arr, grid_shape=(3, 3), multichannel=True)
cv2.imwrite(os.path.join(out_dir, 'stitched_img.jpg'), stitched)
if __name__=="__main__":
start = time()
model = Model()
iters = 10000
print("Starting training run...\n")
for iteration in range(iters):
model.train(iteration)
model.load('checkpoints/encoderRNN_iter_{}.pth'.format(iters),'checkpoints/decoderRNN_iter_{}.pth'.format(iters))
print("Generating images...\n")
for i in range(9):
model.conditional_generation(i)
print(f'Finished model training in {(time() - start) / 60 :.3f} minutes')
| [
"numpy.sqrt",
"torch.max",
"torch.sqrt",
"numpy.log",
"torch.exp",
"numpy.array",
"skimage.util.montage",
"torch.cuda.is_available",
"torch.sum",
"torch.nn.functional.softmax",
"os.listdir",
"torch.nn.LSTM",
"numpy.where",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.sta... | [((311, 344), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (334, 344), False, 'import warnings\n'), ((352, 377), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (375, 377), False, 'import torch\n'), ((2792, 2836), 'numpy.load', 'np.load', (['hp.data_location'], {'encoding': '"""latin1"""'}), "(hp.data_location, encoding='latin1')\n", (2799, 2836), True, 'import numpy as np\n'), ((2374, 2388), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2382, 2388), True, 'import numpy as np\n'), ((2400, 2412), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (2406, 2412), True, 'import numpy as np\n'), ((17342, 17365), 'numpy.sqrt', 'np.sqrt', (['hp.temperature'], {}), '(hp.temperature)\n', (17349, 17365), True, 'import numpy as np\n'), ((17381, 17404), 'numpy.sqrt', 'np.sqrt', (['hp.temperature'], {}), '(hp.temperature)\n', (17388, 17404), True, 'import numpy as np\n'), ((17583, 17626), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', '(1)'], {}), '(mean, cov, 1)\n', (17612, 17626), True, 'import numpy as np\n'), ((18036, 18048), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18046, 18048), True, 'import matplotlib.pyplot as plt\n'), ((18084, 18099), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (18092, 18099), True, 'import matplotlib.pyplot as plt\n'), ((18440, 18456), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (18449, 18456), True, 'import matplotlib.pyplot as plt\n'), ((19594, 19619), 'numpy.stack', 'np.stack', (['raw_arr'], {'axis': '(0)'}), '(raw_arr, axis=0)\n', (19602, 19619), True, 'import numpy as np\n'), ((19635, 19689), 'skimage.util.montage', 'montage', (['raw_arr'], {'grid_shape': '(3, 3)', 'multichannel': '(True)'}), '(raw_arr, grid_shape=(3, 3), multichannel=True)\n', (19642, 19689), False, 'from skimage.util import montage\n'), ((19801, 19807), 'time.time', 'time', ([], {}), '()\n', (19805, 19807), False, 'from time import time\n'), ((3321, 3340), 'numpy.zeros', 'np.zeros', (['(Nmax, 5)'], {}), '((Nmax, 5))\n', (3329, 3340), True, 'import numpy as np\n'), ((4322, 4392), 'torch.nn.LSTM', 'nn.LSTM', (['(5)', 'hp.enc_hidden_size'], {'dropout': 'hp.dropout', 'bidirectional': '(True)'}), '(5, hp.enc_hidden_size, dropout=hp.dropout, bidirectional=True)\n', (4329, 4392), True, 'import torch.nn as nn\n'), ((4428, 4468), 'torch.nn.Linear', 'nn.Linear', (['(2 * hp.enc_hidden_size)', 'hp.Nz'], {}), '(2 * hp.enc_hidden_size, hp.Nz)\n', (4437, 4468), True, 'import torch.nn as nn\n'), ((4491, 4531), 'torch.nn.Linear', 'nn.Linear', (['(2 * hp.enc_hidden_size)', 'hp.Nz'], {}), '(2 * hp.enc_hidden_size, hp.Nz)\n', (4500, 4531), True, 'import torch.nn as nn\n'), ((5406, 5431), 'torch.split', 'torch.split', (['hidden', '(1)', '(0)'], {}), '(hidden, 1, 0)\n', (5417, 5431), False, 'import torch\n'), ((5618, 5644), 'torch.exp', 'torch.exp', (['(sigma_hat / 2.0)'], {}), '(sigma_hat / 2.0)\n', (5627, 5644), False, 'import torch\n'), ((6068, 6108), 'torch.nn.Linear', 'nn.Linear', (['hp.Nz', '(2 * hp.dec_hidden_size)'], {}), '(hp.Nz, 2 * hp.dec_hidden_size)\n', (6077, 6108), True, 'import torch.nn as nn\n'), ((6127, 6185), 'torch.nn.LSTM', 'nn.LSTM', (['(hp.Nz + 5)', 'hp.dec_hidden_size'], {'dropout': 'hp.dropout'}), '(hp.Nz + 5, hp.dec_hidden_size, dropout=hp.dropout)\n', (6134, 6185), True, 'import torch.nn as nn\n'), ((6209, 6252), 'torch.nn.Linear', 'nn.Linear', (['hp.dec_hidden_size', '(6 * hp.M + 3)'], {}), '(hp.dec_hidden_size, 6 * hp.M + 3)\n', (6218, 6252), True, 'import torch.nn as nn\n'), ((7051, 7071), 'torch.split', 'torch.split', (['y', '(6)', '(1)'], {}), '(y, 6, 1)\n', (7062, 7071), False, 'import torch\n'), ((7095, 7119), 'torch.stack', 'torch.stack', (['params[:-1]'], {}), '(params[:-1])\n', (7106, 7119), False, 'import torch\n'), ((7200, 7233), 'torch.split', 'torch.split', (['params_mixture', '(1)', '(2)'], {}), '(params_mixture, 1, 2)\n', (7211, 7233), False, 'import torch\n'), ((8927, 8953), 'torch.cat', 'torch.cat', (['[batch, eos]', '(0)'], {}), '([batch, eos], 0)\n', (8936, 8953), False, 'import torch\n'), ((9152, 9196), 'torch.stack', 'torch.stack', (['([batch.data[:, :, 0]] * hp.M)', '(2)'], {}), '([batch.data[:, :, 0]] * hp.M, 2)\n', (9163, 9196), False, 'import torch\n'), ((9205, 9249), 'torch.stack', 'torch.stack', (['([batch.data[:, :, 1]] * hp.M)', '(2)'], {}), '([batch.data[:, :, 1]] * hp.M, 2)\n', (9216, 9249), False, 'import torch\n'), ((9350, 9378), 'torch.stack', 'torch.stack', (['[p1, p2, p3]', '(2)'], {}), '([p1, p2, p3], 2)\n', (9361, 9378), False, 'import torch\n'), ((10006, 10032), 'torch.cat', 'torch.cat', (['[sos, batch]', '(0)'], {}), '([sos, batch], 0)\n', (10015, 10032), False, 'import torch\n'), ((10050, 10079), 'torch.stack', 'torch.stack', (['([z] * (Nmax + 1))'], {}), '([z] * (Nmax + 1))\n', (10061, 10079), False, 'import torch\n'), ((10093, 10128), 'torch.cat', 'torch.cat', (['[batch_init, z_stack]', '(2)'], {}), '([batch_init, z_stack], 2)\n', (10102, 10128), False, 'import torch\n'), ((11789, 11833), 'torch.exp', 'torch.exp', (['(-z / (2 * (1 - self.rho_xy ** 2)))'], {}), '(-z / (2 * (1 - self.rho_xy ** 2)))\n', (11798, 11833), False, 'import torch\n'), ((13805, 13829), 'torch.load', 'torch.load', (['encoder_name'], {}), '(encoder_name)\n', (13815, 13829), False, 'import torch\n'), ((13854, 13878), 'torch.load', 'torch.load', (['decoder_name'], {}), '(decoder_name)\n', (13864, 13878), False, 'import torch\n'), ((15100, 15119), 'numpy.cumsum', 'np.cumsum', (['seq_x', '(0)'], {}), '(seq_x, 0)\n', (15109, 15119), True, 'import numpy as np\n'), ((15139, 15158), 'numpy.cumsum', 'np.cumsum', (['seq_y', '(0)'], {}), '(seq_y, 0)\n', (15148, 15158), True, 'import numpy as np\n'), ((15178, 15193), 'numpy.array', 'np.array', (['seq_z'], {}), '(seq_z)\n', (15186, 15193), True, 'import numpy as np\n'), ((15814, 15842), 'numpy.random.choice', 'np.random.choice', (['hp.M'], {'p': 'pi'}), '(hp.M, p=pi)\n', (15830, 15842), True, 'import numpy as np\n'), ((15931, 15955), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'p': 'q'}), '(3, p=q)\n', (15947, 15955), True, 'import numpy as np\n'), ((16288, 16302), 'torch.zeros', 'torch.zeros', (['(5)'], {}), '(5)\n', (16299, 16302), False, 'import torch\n'), ((18130, 18157), 'matplotlib.pyplot.plot', 'plt.plot', (['s[:, 0]', '(-s[:, 1])'], {}), '(s[:, 0], -s[:, 1])\n', (18138, 18157), True, 'import matplotlib.pyplot as plt\n'), ((18168, 18197), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (18195, 18197), True, 'import matplotlib.pyplot as plt\n'), ((19706, 19747), 'os.path.join', 'os.path.join', (['out_dir', '"""stitched_img.jpg"""'], {}), "(out_dir, 'stitched_img.jpg')\n", (19718, 19747), False, 'import os\n'), ((1533, 1554), 'numpy.minimum', 'np.minimum', (['seq', '(1000)'], {}), '(seq, 1000)\n', (1543, 1554), True, 'import numpy as np\n'), ((1573, 1595), 'numpy.maximum', 'np.maximum', (['seq', '(-1000)'], {}), '(seq, -1000)\n', (1583, 1595), True, 'import numpy as np\n'), ((1614, 1645), 'numpy.array', 'np.array', (['seq'], {'dtype': 'np.float32'}), '(seq, dtype=np.float32)\n', (1622, 1645), True, 'import numpy as np\n'), ((11875, 11907), 'torch.sqrt', 'torch.sqrt', (['(1 - self.rho_xy ** 2)'], {}), '(1 - self.rho_xy ** 2)\n', (11885, 11907), False, 'import torch\n'), ((13129, 13151), 'torch.max', 'torch.max', (['LKL', 'KL_min'], {}), '(LKL, KL_min)\n', (13138, 13151), False, 'import torch\n'), ((15213, 15253), 'numpy.stack', 'np.stack', (['[x_sample, y_sample, z_sample]'], {}), '([x_sample, y_sample, z_sample])\n', (15221, 15253), True, 'import numpy as np\n'), ((15644, 15658), 'numpy.exp', 'np.exp', (['pi_pdf'], {}), '(pi_pdf)\n', (15650, 15658), True, 'import numpy as np\n'), ((18781, 18802), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (18791, 18802), False, 'import os\n'), ((19113, 19141), 'os.path.join', 'os.path.join', (['directory', 'img'], {}), '(directory, img)\n', (19125, 19141), False, 'import os\n'), ((19430, 19451), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (19440, 19451), False, 'import os\n'), ((19530, 19557), 'os.path.join', 'os.path.join', (['directory', 'im'], {}), '(directory, im)\n', (19542, 19557), False, 'import os\n'), ((517, 535), 'os.listdir', 'os.listdir', (['"""data"""'], {}), "('data')\n", (527, 535), False, 'import os\n'), ((5140, 5186), 'torch.zeros', 'torch.zeros', (['(2)', 'batch_size', 'hp.enc_hidden_size'], {}), '(2, batch_size, hp.enc_hidden_size)\n', (5151, 5186), False, 'import torch\n'), ((5210, 5256), 'torch.zeros', 'torch.zeros', (['(2)', 'batch_size', 'hp.enc_hidden_size'], {}), '(2, batch_size, hp.enc_hidden_size)\n', (5221, 5256), False, 'import torch\n'), ((5804, 5823), 'torch.zeros', 'torch.zeros', (['z_size'], {}), '(z_size)\n', (5815, 5823), False, 'import torch\n'), ((5824, 5842), 'torch.ones', 'torch.ones', (['z_size'], {}), '(z_size)\n', (5834, 5842), False, 'import torch\n'), ((7858, 7879), 'torch.nn.functional.softmax', 'F.softmax', (['params_pen'], {}), '(params_pen)\n', (7867, 7879), True, 'import torch.nn.functional as F\n'), ((15558, 15572), 'numpy.log', 'np.log', (['pi_pdf'], {}), '(pi_pdf)\n', (15564, 15572), True, 'import numpy as np\n'), ((17994, 18022), 'numpy.where', 'np.where', (['(sequence[:, 2] > 0)'], {}), '(sequence[:, 2] > 0)\n', (18002, 18022), True, 'import numpy as np\n'), ((3770, 3790), 'numpy.stack', 'np.stack', (['strokes', '(1)'], {}), '(strokes, 1)\n', (3778, 3790), True, 'import numpy as np\n'), ((4966, 5012), 'torch.zeros', 'torch.zeros', (['(2)', 'batch_size', 'hp.enc_hidden_size'], {}), '(2, batch_size, hp.enc_hidden_size)\n', (4977, 5012), False, 'import torch\n'), ((5043, 5089), 'torch.zeros', 'torch.zeros', (['(2)', 'batch_size', 'hp.enc_hidden_size'], {}), '(2, batch_size, hp.enc_hidden_size)\n', (5054, 5089), False, 'import torch\n'), ((5714, 5733), 'torch.zeros', 'torch.zeros', (['z_size'], {}), '(z_size)\n', (5725, 5733), False, 'import torch\n'), ((5734, 5752), 'torch.ones', 'torch.ones', (['z_size'], {}), '(z_size)\n', (5744, 5752), False, 'import torch\n'), ((12608, 12625), 'torch.log', 'torch.log', (['self.q'], {}), '(self.q)\n', (12617, 12625), False, 'import torch\n'), ((12858, 12879), 'torch.exp', 'torch.exp', (['self.sigma'], {}), '(self.sigma)\n', (12867, 12879), False, 'import torch\n'), ((13055, 13080), 'torch.Tensor', 'torch.Tensor', (['[hp.KL_min]'], {}), '([hp.KL_min])\n', (13067, 13080), False, 'import torch\n'), ((14436, 14465), 'torch.Tensor', 'torch.Tensor', (['[0, 0, 1, 0, 0]'], {}), '([0, 0, 1, 0, 0])\n', (14448, 14465), False, 'import torch\n'), ((16518, 16538), 'torch.autograd.variable.Variable', 'Variable', (['next_state'], {}), '(next_state)\n', (16526, 16538), False, 'from torch.autograd.variable import Variable\n'), ((20210, 20216), 'time.time', 'time', ([], {}), '()\n', (20214, 20216), False, 'from time import time\n'), ((3681, 3701), 'numpy.stack', 'np.stack', (['strokes', '(1)'], {}), '(strokes, 1)\n', (3689, 3701), True, 'import numpy as np\n'), ((8854, 8883), 'torch.Tensor', 'torch.Tensor', (['[0, 0, 0, 0, 1]'], {}), '([0, 0, 0, 0, 1])\n', (8866, 8883), False, 'import torch\n'), ((9930, 9959), 'torch.Tensor', 'torch.Tensor', (['[0, 0, 1, 0, 0]'], {}), '([0, 0, 1, 0, 0])\n', (9942, 9959), False, 'import torch\n'), ((12512, 12539), 'torch.sum', 'torch.sum', (['(self.pi * pdf)', '(2)'], {}), '(self.pi * pdf, 2)\n', (12521, 12539), False, 'import torch\n'), ((12968, 12993), 'torch.Tensor', 'torch.Tensor', (['[hp.KL_min]'], {}), '([hp.KL_min])\n', (12980, 12993), False, 'import torch\n'), ((14348, 14377), 'torch.Tensor', 'torch.Tensor', (['[0, 0, 1, 0, 0]'], {}), '([0, 0, 1, 0, 0])\n', (14360, 14377), False, 'import torch\n'), ((8745, 8774), 'torch.Tensor', 'torch.Tensor', (['[0, 0, 0, 0, 1]'], {}), '([0, 0, 0, 0, 1])\n', (8757, 8774), False, 'import torch\n'), ((9823, 9852), 'torch.Tensor', 'torch.Tensor', (['[0, 0, 1, 0, 0]'], {}), '([0, 0, 1, 0, 0])\n', (9835, 9852), False, 'import torch\n')] |
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
data=np.genfromtxt(path, delimiter=",", skip_header=1)
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
census = np.concatenate((data, new_record))
# --------------
#Code starts here
age = census[:,0]
max_age = np.max(age)
min_age = np.min(age)
age_mean = np.mean(age)
age_std = np.std(age)
# --------------
#Code starts here
race_0 = census[census[:,2] == 0]
race_1 = census[census[:,2] == 1]
race_2 = census[census[:,2] == 2]
race_3 = census[census[:,2] == 3]
race_4 = census[census[:,2] == 4]
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
all_len = np.array([len_0,len_1,len_2,len_3, len_4])
minority_race = np.argmin(all_len)
print(minority_race)
# --------------
#Code starts here
senior_citizens = census[census[:,0] > 60]
working_hours_sum = np.sum(senior_citizens[:,6])
senior_citizens_len = len(senior_citizens)
avg_working_hours = np.mean(working_hours_sum)/senior_citizens_len
print("Avg. Working Hours: ",avg_working_hours)
# --------------
#Code starts here
high = census[census[:,1] > 10]
low = census[census[:,1] <= 10]
avg_pay_high = np.mean(high[:,-1])
avg_pay_low = np.mean(low[:,-1])
print("% of people having with education yrs > 10 income > 50k: ",avg_pay_high)
print("% of people having with education yrs <= 10 income > 50k: ",avg_pay_low)
| [
"numpy.mean",
"numpy.std",
"numpy.max",
"numpy.array",
"numpy.sum",
"numpy.concatenate",
"numpy.min",
"numpy.argmin",
"numpy.genfromtxt"
] | [((132, 181), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(path, delimiter=',', skip_header=1)\n", (145, 181), True, 'import numpy as np\n'), ((275, 309), 'numpy.concatenate', 'np.concatenate', (['(data, new_record)'], {}), '((data, new_record))\n', (289, 309), True, 'import numpy as np\n'), ((381, 392), 'numpy.max', 'np.max', (['age'], {}), '(age)\n', (387, 392), True, 'import numpy as np\n'), ((404, 415), 'numpy.min', 'np.min', (['age'], {}), '(age)\n', (410, 415), True, 'import numpy as np\n'), ((428, 440), 'numpy.mean', 'np.mean', (['age'], {}), '(age)\n', (435, 440), True, 'import numpy as np\n'), ((452, 463), 'numpy.std', 'np.std', (['age'], {}), '(age)\n', (458, 463), True, 'import numpy as np\n'), ((800, 845), 'numpy.array', 'np.array', (['[len_0, len_1, len_2, len_3, len_4]'], {}), '([len_0, len_1, len_2, len_3, len_4])\n', (808, 845), True, 'import numpy as np\n'), ((860, 878), 'numpy.argmin', 'np.argmin', (['all_len'], {}), '(all_len)\n', (869, 878), True, 'import numpy as np\n'), ((1010, 1039), 'numpy.sum', 'np.sum', (['senior_citizens[:, 6]'], {}), '(senior_citizens[:, 6])\n', (1016, 1039), True, 'import numpy as np\n'), ((1323, 1343), 'numpy.mean', 'np.mean', (['high[:, -1]'], {}), '(high[:, -1])\n', (1330, 1343), True, 'import numpy as np\n'), ((1358, 1377), 'numpy.mean', 'np.mean', (['low[:, -1]'], {}), '(low[:, -1])\n', (1365, 1377), True, 'import numpy as np\n'), ((1104, 1130), 'numpy.mean', 'np.mean', (['working_hours_sum'], {}), '(working_hours_sum)\n', (1111, 1130), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
__author__ = '<NAME>'
__license__ = '''Copyright (c) 2014-2017, The IceCube Collaboration
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
class OscParams(object):
def __init__(self, dm_solar, dm_atm, x12, x13, x23, deltacp):
"""
Expects dm_solar and dm_atm to be in [eV^2], and x_{ij} to be
sin^2(theta_{ij})
params:
* xij - sin^2(theta_{ij}) values to use in oscillation calc.
* dm_solar - delta M_{21}^2 value [eV^2]
* dm_atm - delta M_{32}^2 value [eV^2] if Normal hierarchy, or
delta M_{31}^2 value if Inverted Hierarchy (following
BargerPropagator class).
* deltacp - \delta_{cp} value to use.
"""
assert x12 <= 1
assert x13 <= 1
assert x23 <= 1
self.sin12 = np.sqrt(x12)
self.sin13 = np.sqrt(x13)
self.sin23 = np.sqrt(x23)
self.deltacp = deltacp
# Comment BargerPropagator.cc:
# "For the inverted Hierarchy, adjust the input
# by the solar mixing (should be positive)
# to feed the core libraries the correct value of m32."
self.dm_solar = dm_solar
if dm_atm < 0.0:
self.dm_atm = dm_atm - dm_solar
else:
self.dm_atm = dm_atm
@property
def M_pmns(self):
# real part [...,0]
# imaginary part [...,1]
Mix = np.zeros((3,3,2))
sd = np.sin(self.deltacp)
cd = np.cos(self.deltacp)
c12 = np.sqrt(1.0-self.sin12*self.sin12)
c23 = np.sqrt(1.0-self.sin23*self.sin23)
c13 = np.sqrt(1.0-self.sin13*self.sin13)
Mix[0][0][0] = c12*c13
Mix[0][0][1] = 0.0
Mix[0][1][0] = self.sin12*c13
Mix[0][1][1] = 0.0
Mix[0][2][0] = self.sin13*cd
Mix[0][2][1] = -self.sin13*sd
Mix[1][0][0] = -self.sin12*c23-c12*self.sin23*self.sin13*cd
Mix[1][0][1] = -c12*self.sin23*self.sin13*sd
Mix[1][1][0] = c12*c23-self.sin12*self.sin23*self.sin13*cd
Mix[1][1][1] = -self.sin12*self.sin23*self.sin13*sd
Mix[1][2][0] = self.sin23*c13
Mix[1][2][1] = 0.0
Mix[2][0][0] = self.sin12*self.sin23-c12*c23*self.sin13*cd
Mix[2][0][1] = -c12*c23*self.sin13*sd
Mix[2][1][0] = -c12*self.sin23-self.sin12*c23*self.sin13*cd
Mix[2][1][1] = -self.sin12*c23*self.sin13*sd
Mix[2][2][0] = c23*c13
Mix[2][2][1] = 0.0
return Mix
@property
def M_mass(self):
dmVacVac = np.zeros((3,3))
mVac = np.zeros(3)
delta = 5.0e-9
mVac[0] = 0.0
mVac[1] = self.dm_solar
mVac[2] = self.dm_solar+self.dm_atm
# Break any degeneracies
if self.dm_solar == 0.0:
mVac[0] -= delta
if self.dm_atm == 0.0:
mVac[2] += delta
dmVacVac[0][0] = 0.
dmVacVac[1][1] = 0.
dmVacVac[2][2] = 0.
dmVacVac[0][1] = mVac[0]-mVac[1]
dmVacVac[1][0] = -dmVacVac[0][1]
dmVacVac[0][2] = mVac[0]-mVac[2]
dmVacVac[2][0] = -dmVacVac[0][2]
dmVacVac[1][2] = mVac[1]-mVac[2]
dmVacVac[2][1] = -dmVacVac[1][2]
return dmVacVac
| [
"numpy.sin",
"numpy.zeros",
"numpy.sqrt",
"numpy.cos"
] | [((1359, 1371), 'numpy.sqrt', 'np.sqrt', (['x12'], {}), '(x12)\n', (1366, 1371), True, 'import numpy as np\n'), ((1393, 1405), 'numpy.sqrt', 'np.sqrt', (['x13'], {}), '(x13)\n', (1400, 1405), True, 'import numpy as np\n'), ((1427, 1439), 'numpy.sqrt', 'np.sqrt', (['x23'], {}), '(x23)\n', (1434, 1439), True, 'import numpy as np\n'), ((1945, 1964), 'numpy.zeros', 'np.zeros', (['(3, 3, 2)'], {}), '((3, 3, 2))\n', (1953, 1964), True, 'import numpy as np\n'), ((1977, 1997), 'numpy.sin', 'np.sin', (['self.deltacp'], {}), '(self.deltacp)\n', (1983, 1997), True, 'import numpy as np\n'), ((2011, 2031), 'numpy.cos', 'np.cos', (['self.deltacp'], {}), '(self.deltacp)\n', (2017, 2031), True, 'import numpy as np\n'), ((2047, 2085), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.sin12 * self.sin12)'], {}), '(1.0 - self.sin12 * self.sin12)\n', (2054, 2085), True, 'import numpy as np\n'), ((2096, 2134), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.sin23 * self.sin23)'], {}), '(1.0 - self.sin23 * self.sin23)\n', (2103, 2134), True, 'import numpy as np\n'), ((2145, 2183), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.sin13 * self.sin13)'], {}), '(1.0 - self.sin13 * self.sin13)\n', (2152, 2183), True, 'import numpy as np\n'), ((3060, 3076), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3068, 3076), True, 'import numpy as np\n'), ((3091, 3102), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3099, 3102), True, 'import numpy as np\n')] |
# calculate_ICEO.py
"""
Notes
"""
# import modules
import numpy as np
import matplotlib.pyplot as plt
def calculate_ICEO(testSetup, testCol, plot_figs=False, savePath=None):
# write script to calculate and output all of the below terms using the testSetup class
"""
Required Inputs:
# physical constants
eps_fluid: permittivity of water (F/m2) CurlypivTestSetup.chip.material_fluid.permittivity
eps_dielectric: permittivity of sio2 () CurlypivTestSetup.chip.bpe.dielectric_coating.permittivity
T: temperature (K) CurlypivTestSetup.chip.material_fluid.temperature
# material properties
rho: density (kg/m3) depends on the instance
mu: dynamic viscosity (m2/s) CurlypivTestSetup.chip.material_fluid.viscosity
sigma: electrical conductivity (S/m) CurlypivTestSetup.chip.material_fluid.conductivity
zeta: zeta potential (V) depends on the instance
Ns: surface site density (#/nm2) CurlypivTestSetup.chip.material_fluid.reaction_site_density
Ka: reaction equilibrium constant () CurlypivTestSetup.chip.material_fluid.Ka
a_h: bulk concentration of protons (mmols) (I think this is just pH) CurlypivTestSetup.chip.material_fluid.pH
# geometries
l: characteristic length scale (m) CurlypivTestSetup.chip.channel.height
l_bpe: length of bpe (m) CurlypivTestSetup.chip.bpe.length
d: thickness of sio2 dielectric (m) CurlypivTestSetup.chip.bpe.dielectric_coating.thickness
# experimental inputs
x: location (m) * need to write * array of locations along BPE length for instanteous induced zeta calc.
t: time (s) * need to write * array of times in a periodic cycle for instanteous zeta calc.
f: frequency (1/s) * need to write * CurlypivTestCollection.locs.tests.test_id[1]
E: electric field strength (V/m) * need to write * CurlypivTestCollection.locs.tests.test_id[0]
# outputs
lamb: debye length (m)
Cd: capacitance of dielectric (F/m2) # needs to be scaled by BPE surface area
Cdl_linear: linear double layer capacitance (F/m2) # needs to be scaled by BPE surface area
Cdl_nonlinear: nonlinear double layer cap. (F/m2) # needs to be scaled by BPE surface area
Cbuff: buffer capacitance (F/m2) * = 0.024 from Squires * # needs to be scaled by BPE surface area
Ctotal: total capacitance (F/m2) # needs to be scaled by BPE surface area
U: characteristic velocity (m/s)
Re: Reynolds number ()
U_HS: Helmholtz Smoluchowski velocity (m/s)
U_slip: slip velocity (m/s)
tau: Double layer charging time (s)
zeta_qu_steady: quasi-steady induced zeta (V)
U_quasi_steady: quasi-steady slip velocity (m/s)
zeta_highfreq: high-frequency induced zeta (V)
U_highfreq: high-frequency slip velocity (m/s)
"""
# define variables here to simplify
# identities of components
dielectric_material = testSetup.chip.bpe.dielectric_coating.name
electrolyte_material = testSetup.chip.channel.material_fluid.name
# mechanical
mu = testSetup.chip.channel.material_fluid.viscosity
rho = testSetup.chip.channel.material_fluid.density
T = testSetup.chip.channel.material_fluid.temperature
# electro/chemical
eps_fluid = testSetup.chip.channel.material_fluid.permittivity
eps_dielectric = testSetup.chip.bpe.dielectric_coating.permittivity
reaction_site_density = testSetup.chip.bpe.dielectric_coating.reaction_site_density
Ka = testSetup.chip.bpe.dielectric_coating.Ka
Kb = testSetup.chip.bpe.dielectric_coating.Kb
pH = testSetup.chip.channel.material_fluid.pH
zeta_wall = testSetup.chip.channel.material_bottom_wall_surface.zeta
c = testSetup.chip.channel.material_fluid.concentration
sigma = testSetup.chip.channel.material_fluid.conductivity
# geometry
L = testSetup.chip.channel.length
L_bpe = testSetup.chip.bpe.length
x = testSetup.chip.bpe.linspace_x
channel_height = testSetup.chip.channel.height
dielectric_thickness = testSetup.chip.bpe.dielectric_coating.thickness
# PIV
img_acq_rate = testSetup.optics.microscope.ccd.img_acq_rate
dt = 1 / img_acq_rate # (s) time between images
p_d = testSetup.optics.fluorescent_particles.diameter
microns_to_pixels = 1/testSetup.optics.microscope.objective.pixel_to_micron
u_slip_error_scale = 0.3
# print PIV stats
dx_brownian = calc_brownian_displacement(dt, mu, p_d, T)
print("Brownian displacement: {} for {} particle diameter and {} time step".format(dx_brownian, p_d, dt))
print("Squires recommended: U_min_acceptable > {} um/s ({} pix/frame) or 20% of Brownian motion".format(np.round(dx_brownian*1e6/dt*0.2,2), np.round(microns_to_pixels*dx_brownian*1e6/(dt*img_acq_rate)*0.2,2)))
# extract the test collection test parameters
test_params = []
for key in testCol.locs:
loc = testCol.locs[key]
loc_tests = loc.tests
for ky in loc_tests:
test_keys = loc_tests[ky]
test_params.append((test_keys._E, test_keys._f))
# initialize output data arrays
electric_fields = []
frequencys = []
dielectrics = []
buffers = []
UbyUo = []
raw_uvel_max = []
raw_slope = []
betas = []
deltas = []
taus = []
d_eps = []
d_pKa = []
d_Ns = []
d_thick = []
b_conc = []
b_conduct = []
b_pH = []
b_viscosity = []
b_eps = []
b_debye = []
voltages = []
electrode_spacings = []
# Non-Squires terms
uvel_brownian_error_steady = []
uvel_brownian_error_quasisteady = []
uvel_brownian_error_highfreq = []
# iterate through test parameters
for i in range(len(test_params)):
# iterables
V_channel = test_params[i][0]
f = test_params[i][1]
# calculate intermediaries
E = V_channel/L
t = np.linspace(0, 1/f, num=100)
w = calc_w(f=f)
lamb = calc_lamb(eps_fluid=eps_fluid, c=c, T=T)
Cd = calc_dielectric_capacitance(eps=eps_dielectric, d=dielectric_thickness)
Cdl_linear = calc_linear_doublelayer_capacitance(eps=eps_fluid, lamb=lamb)
Cbuff = calc_buffer_capacitance(Cbuff_input=0.024)
C_bare_metal = Cdl_linear + Cbuff
total_capacitance, beta, delta = calc_total_capacitance(eps_fluid=eps_fluid, lamb=lamb, Cdl=Cdl_linear, Cd=Cd, Cbuff=Cbuff)
tau = calc_RC_via_bulk_time(capacitance=Cdl_linear, L=L_bpe, sigma=sigma)
# calculate background flow
u_HS = calc_U_HS(eps=eps_fluid, zeta=zeta_wall, E=E, mu=mu)
Re = calc_Re(rho=rho, U=u_HS, l=channel_height, mu=mu)
# calculate slip flow (DC)
zeta_induced = calc_zeta_induced(E=E, x=x)
u_slip = calc_U_slip(eps=eps_fluid, E=E, x=x, mu=mu)
slope_x = 40
u_slip_slope = u_slip[slope_x:len(u_slip)-slope_x]
# calculate the Brownian error for quasi-steady slip flow
error_brownian_steady = calc_brownian_error(U_estimated=u_slip, u_scale=u_slip_error_scale, dt=dt, viscosity=mu, particle_diameter=p_d, temperature=T)
# calculate slip flow (quasi-steady)
zeta_induced_quasisteady = calc_zeta_induced_quasisteady(E=E, x=x)
u_slip_quasisteady = calc_U_slip_quasisteady(eps=eps_fluid, E=E, x=x, mu=mu)
# calculate the Brownian error for quasi-steady slip flow
error_brownian_quasisteady = calc_brownian_error(U_estimated=u_slip_quasisteady, u_scale=u_slip_error_scale, dt=dt, viscosity=mu, particle_diameter=p_d, temperature=T)
# calculate slip flow (high-frequency)
zeta_induced_highfreq = calc_zeta_induced_highfreq(Re=Re, E=E, x=x, w=w, t=t, tau=tau)
u_slip_highfreq = calc_U_slip_highfreq(eps=eps_fluid, E=E, x=x, mu=mu, tau=tau, f=f)
# calculate the Brownian error for quasi-steady slip flow
error_brownian_highfreq = calc_brownian_error(U_estimated=u_slip_quasisteady, u_scale=0.1, dt=dt, viscosity=mu, particle_diameter=p_d, temperature=T)
# calculate induced zeta with linear zeta and dielectric coating
zeta_induced_Clamb_Cd_linear = calc_zeta_induced_Clamb_Cd(E=E, x=x, Cdl=Cdl_linear, Cd=Cd)
# calculate induced zeta with nonlinear zeta and dielectric coating
Cdl_nonlinear = calc_nonlinear_doublelayer_capacitance(eps_fluid, lamb=lamb, zeta=zeta_induced_Clamb_Cd_linear)
zeta_induced_Clamb_Cd_nonlinear = calc_zeta_induced_Clamb_Cd(E, x, Cdl=Cdl_nonlinear, Cd=Cd)
# calculate induced zeta with total capacitance
zeta_induced_total_capacitance = calc_zeta_induced_total_capacitance(E=E, x=x, Cdl=Cdl_linear, Cd=Cd, Cbuff=Cbuff)
u_ratio_slip_to_HS = U_ratio_slip_to_HS(Cdl=Cdl_linear, Cd=Cd, Cbuff=Cbuff)
# calculate some Squires specific data
slope_x = 40
u_slope = (u_slip_quasisteady[-slope_x]-u_slip_quasisteady[slope_x]) / (x[-slope_x]-x[slope_x])
u_UbyUo = -u_slope / np.max(u_slip_slope)
# plot important metrics
if plot_figs is True:
import matplotlib as mpl
from cycler import cycler
mpl.rc('lines', linewidth=4, linestyle='-')
mpl.rcParams['axes.prop_cycle'] = cycler(color=['r', 'g', 'b', 'y'])
fig, axes = plt.subplots(nrows=3, sharex=True, figsize=(13,10))
ax = axes.ravel()
ax[0].plot(x*1e6, zeta_induced*1e3, label=r'$steady$')
ax[0].plot(x*1e6, zeta_induced_quasisteady*1e3, label=r'$quasi-steady$')
ax[0].plot(x * 1e6, zeta_induced_highfreq * 1e3, label=r'$high-frequency$')
ax[0].plot(x * 1e6, zeta_induced_Clamb_Cd_linear * 1e3, label=r'$C_{\lambda}+C_d (linear)$')
ax[0].plot(x * 1e6, zeta_induced_Clamb_Cd_nonlinear * 1e3, label=r'$C_{\lambda}+C_d (non linear)$')
ax[0].plot(x * 1e6, zeta_induced_total_capacitance * 1e3, label=r'$C_{total}$')
ax[1].plot(x*1e6, u_slip*1e6, label=r'$steady$')
ax[1].plot(x*1e6, u_slip_quasisteady*1e6, label=r'$quasi-steady$')
ax[1].plot(x * 1e6, u_slip_highfreq * 1e6, label=r'$high frequency$')
ax[2].plot(x*1e6, error_brownian_steady, label=r'$error_{steady}$')
ax[2].plot(x*1e6, error_brownian_quasisteady, label=r'$error_{quasi-steady}$')
ax[2].plot(x*1e6, error_brownian_highfreq, label=r'$error_{high-frequency}$')
ax[2].axhline(y=-0.2, xmin=x[0]*1e6, xmax=x[-1]*1e6, color='gray', linestyle='dashed', linewidth=2, alpha=0.65, label=r'$error_{max-acceptable}$')
ax[2].axhline(y=0.2, xmin=x[0] * 1e6, xmax=x[-1] * 1e6, color='gray', linestyle='dashed', linewidth=2, alpha=0.65,)
ax[0].set_ylabel(r'$\zeta_{induced} (mV)$')
ax[0].legend(fancybox=True, loc="upper left", bbox_to_anchor=[1.01, 1])
ax[1].set_ylabel(r'$U_{slip, induced} (\mu m/s)$')
ax[1].legend(fancybox=True, loc="upper left", bbox_to_anchor=[1.01, 1])
ax[2].set_ylim(bottom=-0.5, top=0.5)
ax[2].set_ylabel(r'$\epsilon_{x} (\frac{\sigma_{x}}{\Delta x})$')
ax[2].set_xlabel(r'$x (\mu m)$')
ax[2].set_title((r'Relative Error $(\Delta x = $')+str(u_slip_error_scale*100)+(r'% of $\frac{U_{slip}}{\Delta t})$'))
ax[2].legend(fancybox=True, loc="upper left", bbox_to_anchor=[1.01, 1])
plt.suptitle('BPE-ICEO: E={} V/mm, f={} Hz'.format(E*1e-3, int(f)))
plt.tight_layout()
plt.show()
# compile into dictionary
iceo_stats_dict = {
'electric_field_strength': E,
'frequency': f,
'fluid': electrolyte_material,
'fluid_viscosity': mu,
'fluid_density': rho,
'fluid_temperature': T,
'fluid_pH': pH,
'fluid_concentration': c,
'fluid_conductivity': sigma,
'fluid_permittivity': eps_fluid,
'l_bpe': L_bpe,
'dielectric': dielectric_material,
'dielectric_thickness': dielectric_thickness,
'solid_permittivity': eps_dielectric,
'solid_reaction_site_density': reaction_site_density,
'solid_Ka': Ka,
'solid_zeta': zeta_wall,
'channel_height': channel_height,
'u_HS': u_HS,
'flow_Re': Re,
'capacitance_dielectric': Cd,
'capacitance_Cdl_linear': Cdl_linear,
#'capacitance_Cdl_nonlinear': Cdl_nonlinear, # should be plotted
'capacitance_Cbuff': Cbuff,
'capacitance_total': total_capacitance,
'beta': beta,
'delta': delta,
'tau': tau,
'debye_length': lamb,
'max_zeta_induced': np.max(zeta_induced),
'max_zeta_induced_quasisteady': np.max(zeta_induced_quasisteady), # should be plotted
'max_zeta_induced_highfreq': np.max(zeta_induced_highfreq), # should be plotted
'max_zeta_induced_Clamb_Cd_linear': np.max(zeta_induced_Clamb_Cd_linear), # should be plotted
'max_zeta_induced_Clamb_Cd_nonlinear': np.max(zeta_induced_Clamb_Cd_nonlinear), # should be plotted
'max_zeta_induced_total_capacitance': np.max(zeta_induced_total_capacitance), # should be plotted
'max_u_slip': np.max(u_slip), # should be plotted
'u_UbyUo': u_UbyUo,
'max_u_slip_quasisteady': np.max(u_slip_quasisteady), # should be plotted
'max_u_slip_highfreq': np.max(u_slip_highfreq), # should be plotted
'u_ratio_slip_to_HS': u_ratio_slip_to_HS
}
# append to storage list
electric_fields.append(E)
frequencys.append(f)
dielectrics.append(dielectric_material)
buffers.append(electrolyte_material)
UbyUo.append(u_UbyUo)
raw_uvel_max.append(np.max(u_slip))
uvel_brownian_error_quasisteady.append(error_brownian_quasisteady)
uvel_brownian_error_highfreq.append(error_brownian_highfreq)
raw_slope.append(u_slope)
betas.append(beta)
deltas.append(delta)
taus.append(tau)
d_eps.append(eps_dielectric)
d_pKa.append(Ka)
d_Ns.append(reaction_site_density)
d_thick.append(dielectric_thickness)
b_conc.append(c)
b_conduct.append(sigma)
b_pH.append(pH)
b_viscosity.append(mu)
b_eps.append(eps_fluid)
b_debye.append(lamb)
voltages.append(V_channel)
electrode_spacings.append(L)
# make numpy arrays of correct datatype
# append to storage list
electric_fields = np.array(electric_fields, dtype=float)
frequencys = np.array(frequencys, dtype=float)
dielectrics = np.array(dielectrics, dtype=str)
buffers = np.array(buffers, dtype=str)
UbyUo = np.array(UbyUo, dtype=float)
raw_uvel_max = np.array(raw_uvel_max, dtype=float)
uvel_brownian_error_quasisteady = np.array(uvel_brownian_error_quasisteady, dtype=float)
uvel_brownian_error_highfreq = np.array(uvel_brownian_error_highfreq, dtype=float)
raw_slope = np.array(raw_slope, dtype=float)
betas = np.array(betas, dtype=float)
deltas = np.array(deltas, dtype=float)
taus = np.array(taus, dtype=float)
d_eps = np.array(d_eps, dtype=float)
d_pKa = np.array(d_pKa, dtype=float)
d_Ns = np.array(d_Ns, dtype=float)
d_thick = np.array(d_thick, dtype=float)
b_conc = np.array(b_conc, dtype=float)
b_conduct = np.array(b_conduct, dtype=float)
b_pH = np.array(b_pH, dtype=float)
b_viscosity = np.array(b_viscosity, dtype=float)
b_eps = np.array(b_eps, dtype=float)
b_debye = np.array(b_debye, dtype=float)
voltages = np.array(voltages, dtype=float)
electrode_spacings = np.array(electrode_spacings, dtype=float)
iceo_stats = np.vstack((electric_fields, frequencys, dielectrics, buffers,
UbyUo, raw_uvel_max, raw_slope, betas, deltas, taus,
d_eps, d_pKa, d_Ns, d_thick,
b_conc, b_conduct, b_pH, b_viscosity, b_eps, b_debye,
voltages, electrode_spacings)).T
header = "electric_fields,frequencys,dielectrics,buffers,UbyUo,raw_uvel_max,raw_slope,beta,delta,tau,d_eps,d_pKa,d_Ns,d_thick,b_conc,b_conduct,b_pH,b_viscosity,b_eps,b_debye,voltages,electrode_spacings"
if savePath:
# Write to .csv file
np.savetxt(savePath, iceo_stats, fmt='%s', delimiter=',', header=header)
return iceo_stats, header
# ---------- INDUCED ZETA AND SLIP VELOCITIES ----------------
def calc_lamb(eps_fluid, T, c):
"""
Debye length (m) for symmetric monovalent electrolyte
"""
e = -1.602e-19 # (C) charge of an electron
kb = 1.3806e-23 # (J/K) Boltzmann constant
Na = 6.022e23 # (1/mol) Avogadro's number
z = 1 # () valence of electrolyte
lamb = np.sqrt(eps_fluid*kb*T/(2*(z**2*Na*c)*e**2))
return lamb
def calc_zeta_induced(E, x):
"""
Induced zeta potential for applied electric field
Reference: Squires 2010
"""
zeta_induced = E*x
return zeta_induced
def calc_zeta_induced_Clamb_Cd(E, x, Cdl, Cd):
delta = Cdl/Cd
zeta_induced_Clamb_Cd = E*x/(1+delta)
return zeta_induced_Clamb_Cd
def calc_zeta_induced_quasisteady(E, x):
"""
Induced zeta potential (quasi-steady limit)
"""
zeta_induced_quasisteady = E*x
return zeta_induced_quasisteady
def calc_zeta_induced_total_capacitance(E, x, Cdl, Cd, Cbuff):
beta = Cbuff/Cd
delta = Cdl/Cd
zeta_induced_total_capacitance = E*x/(1+delta+beta)
return zeta_induced_total_capacitance
def calc_U_slip_quasisteady(eps, E, x, mu):
"""
Slip velocity (quasi-steady limit)
"""
u_slip_quasisteady = -eps*E**2*x/(2*mu)
return u_slip_quasisteady
def calc_zeta_induced_highfreq(Re, E, x, w, t, tau):
"""
Induced zeta potential (high frequency)
"""
t = 1/(4*(w/(2*np.pi)))
zeta_induced_highfreq = Re*E*x*np.exp(w*t)/(1+tau*w)
return zeta_induced_highfreq
def calc_U_HS(eps, zeta, E, mu):
"""
Helmholtz-Smoluchowski
"""
u_eof = -eps*zeta*E/mu
return u_eof
def calc_U_slip(eps, E, x, mu):
"""
Slip velocity (DC field)
"""
u_slip = -eps*E**2*x/mu
return u_slip
def calc_U_slip_highfreq(eps, E, x, mu, tau, f):
"""
Slip velocity (high frequency)
"""
w = calc_w(f)
u_slip_highfreq = -eps*E**2*x/(2*mu*(1+tau**2*w**2))
return u_slip_highfreq
def U_ratio_slip_to_HS(Cdl, Cd, Cbuff):
beta = Cbuff/Cd
delta = Cdl/Cd
u_ratio_slip_to_HS = 1+delta+beta
return u_ratio_slip_to_HS
# ---------- CAPACITANCES ----------------
def calc_linear_doublelayer_capacitance(eps, lamb):
"""
Capacitance due to the linearized electric double layer
units: F/m^2
Notes:
Squires, 2010 - "linearized electric double layer capacitance"
Adjari, 2006 - "Debye layer capacitance (per unit area) at low voltage"
Inputs:
eps = permittivity of the fluid (F/m)
lamb = linearized Debye length (m)
"""
Cdl_linear = eps/lamb
return Cdl_linear
def calc_nonlinear_doublelayer_capacitance(eps_fluid, lamb, zeta):
"""
Nonlinear electric double layer capacitance (when zeta > thermal voltage)
units: F/m^2
Notes:
Squires, 2010 - " "
Inputs:
eps = permittivity of fluid (F/m)
lamb = Debye length (m)
zeta = zeta potential (V)
"""
Cdl_nonlinear = (eps_fluid/lamb)*np.cosh(zeta/2)
return Cdl_nonlinear
def calc_dielectric_capacitance(eps, d):
"""
Capacitance due to a dielectric coating on the BPE
units: F/m^2
Notes:
Squires, 2010 - "additional capacitance due to dielectric layer"
--> The addition of a dielectric layer (instead of using the Stern layer) is direct experimental control.
Adjari, 2006 - "additional surface capacitance due to dielectric layer"
Inputs:
eps = permittivity of dielectric layer (F/m)
d = thickness of the dielectric layer (m)
"""
Cd = eps/d
return Cd
def calc_buffer_capacitance(Cbuff_input=0.024, Ns=None, T=None, Ka=None, a_h=None, zeta=None):
"""
Equilibrium buffer capacitance
units: F/m^2
Notes:
Squires, 2010 - "binding of counterions due to equilibrium reaction at charged electrode surface"
--> This acts in parallel to the double layer capacitance
Inputs:
Cbuff_input: define a specific buffer capacitance (F/m)
Ns: surface density of reactive groups (#/m2)
T: temperature (K)
Ka: reaction equilibrium constant (ranges between 2-6)
a_h: bulk concentration of protons (#/m3)
zeta: zeta potential at surface (V)
"""
e = -1.602e-19 # (C) charge of an electron
kb = 1.3806e-23 # (J/K) Boltzmann constant
if Cbuff_input is None:
Cbuff = (e**2*Ns/(kb*T))*(Ka*a_h*np.exp(-e*zeta/(kb*T))/(Ka+a_h*np.exp(-e*zeta/(kb*T))))
else:
Cbuff = Cbuff_input # (F/m2) taken from Squires but should be fit to data.
return Cbuff
def calc_doublelayer_dielectric_capacitance(eps_fluid, lamb, Cdl):
"""
Total capacitance due to Debye layer and surface capacitance (Stern layer or dielectric coating)
units: F/m^2
Notes:
Adjari, 2006 - "The overall capacitance per unit area in the Debye-Huckel limit"
Inputs:
eps_fluid = permittivity of the fluid (F/m)
lamb = Debye length (m)
Cdl = Capacitance due to Stern/dielectric layer (F/m^2)
"""
C_total_Adjari = (1/(1+(eps_fluid/lamb/Cdl)))*(eps_fluid/lamb)
return C_total_Adjari
def calc_total_capacitance(eps_fluid, lamb, Cdl, Cd, Cbuff):
"""
Total capacitance from Debye layer, dielectric layer, and buffer
"""
beta = Cbuff/Cd
delta = Cdl/Cd
total_capacitance = (eps_fluid/lamb)*((1+beta/delta)/(1+delta+beta))
return total_capacitance, beta, delta
def calc_delta_capacitance_ratio(capacitance_doublelayer, capacitance_dielectric):
"""
Ratio of the double layer to dielectric capacitance
units: ~
Notes:
Squires, 2010 - "Ratio of the double layer to dielectric capacitance"
Adjari, 2006 - "Surface capacitance ratio"
--> At larger potentials the Debye layer capacitance becomes very large and,
--> total capacitance is dominated by the Stern layer only.
--> Capacitance ratios: C_total < C_debye layer < C_dielectric layer
"""
delta = capacitance_doublelayer / capacitance_dielectric
return delta
def calc_beta_capacitance_ratio(capacitance_buffer, capacitance_dielectric):
"""
Ratio of the buffer capacitance to the dielectric capacitance
units: ~
Notes:
Squires, 2010 - " "
"""
beta = capacitance_buffer / capacitance_dielectric
return beta
# ------------ TIME SCALES ------------------
def calc_Debye_charging_time(eps_fluid, sigma):
"""
The Debye charging time is the time required to charge the Debye layer
units: s
Notes:
Adjari, 2006 - "Debye time scale"
"""
tau_debye = eps_fluid / sigma
return tau_debye
def calc_RC_via_bulk_time(capacitance, L, sigma):
"""
Characteristic time for induced double layer to form considering a capacitance.
units: s
Notes:
Squires, 2010 - "characteristic time for induced double layer to form"
Inputs:
capacitance: F/m^2 C^2*s^2/kg/m^2/m^2 (total capacitance of double layer system)
L_bpe: m m (characteristic length of BPE)
sigma: S/m C^2*s/kg/m^2/m (conductivity of electrolyte/buffer)
Outputs:
tau: s
"""
tau = capacitance*L/sigma
return tau
def calc_RC_via_bulk_HV_time(capacitance, L, sigma):
"""
Characteristic charging and relaxation time of the electric double layer through the bulk electrolyte
units: s
Notes:
Adjari, 2006 - "Relaxation time at high voltages"
--> At high voltages, the Stern/dielectric layer dominates the capacitance of the
--> double layer and the relaxation time changes
"""
tau_debye_highvoltage = capacitance * L / sigma
return tau_debye_highvoltage
def calc_Debye_charging_via_Faradaic_time(charge_transfer_resistance, capacitance_dielectric):
"""
Characteristic time for (de)charging the Debye layer through Faradaic reactions
units: s
Notes:
Adjari, 2006 - "Characteristic time for (de)charging the Debye layer through Faradaic reactions"
--> When Rct << R0, this can be significantly faster than the Ohmic charging
--> acting effectively as a "short circuit" on the Debye layer
"""
tau_charge_transfer = charge_transfer_resistance * capacitance_dielectric
return tau_charge_transfer
def calculate_Debye_frequency(sigma, eps_fluid):
"""
The Debye frequency is the inverse of the Debye layer charging time (Adjari, 2006).
units: Hz
Notes:
Adjari, 2006 - minimum frequency the Debye layer can fully charge
--> Any driving frequency should be well below this.
Inputs:
sigma = conductivity of buffer/electrolyte (S/m)
eps_fluid = permittivity of buffer/electrolyte (F/m)
"""
w_D = sigma / eps_fluid
return w_D
# ----------- CURRENT AND CHARGE TRANSFER -----------------
def calc_channel_current(E, sigma, A):
"""
Calculate channel current
"""
I = E * sigma * A
return I
def calculate_q_debye_linear(eps_fluid, lambda_d, zeta):
"""
Calculate the charge accumulated in the Debye layer
(Adjari, 2006)
units: Coulombs
Notes:
"""
q = -eps_fluid * zeta / lambda_d
return q
def calculate_q_debye_nonlinear(eps_fluid, zeta, c, T):
"""
Calculate the charge accumulated in the Debye layer
(Adjari, 2006)
units: Coulombs
Notes:
For voltages below the thermal voltage (Debye Huckel limit)
"""
kb = 1.3806e-23 # (J/K) Boltzmann constant
e = -1.602e-19 # (C) charge of an electron
Na = 6.022e23 # (1/mol) Avogadro's number
z = 1 # () valence of electrolyte
q = -1*np.sign(zeta)*np.sqrt(2*eps_fluid*kb*T*((c*Na*(np.exp(-z*e*zeta/kb/T)-1))+(c*Na*(np.exp(-z*-e*zeta/kb/T)-1))))
return q
def calculate_q_debye_nonlinear_hv(eps_fluid, lambda_d, zeta, T):
"""
Calculate the charge accumulated in the Debye layer for larger voltages
(Adjari, 2006)
units: Coulombs
Notes:
For larger voltages
"""
kb = 1.3806e-23 # (J/K) Boltzmann constant
e = -1.602e-19 # (C) charge of an electron
Na = 6.022e23 # (1/mol) Avogadro's number
z = 1 # () valence of electrolyte
q = (-eps_fluid/lambda_d)*(2*kb*T/e)*np.sinh(e*zeta/(2*kb*T))
return q
# ----------- CHANNEL-WISE VARIABLES -----------------
# Bulk fluid potential due to externally applied electric field
def calc_channel_fluid_potential(E, L_channel, L_bpe):
"""
Calculate the fluid potential (phi) as a function of channel location
units: V
"""
channel_start = 0
channel_stop = L_channel
bpe_start = L_channel/2 - L_bpe/2
bpe_stop = L_channel/2 + L_bpe/2
bpe_step = L_bpe / 100
channel_step = (L_channel - bpe_stop) / 100
L_pre_bpe = np.linspace(channel_start, bpe_start, num=100, endpoint=True)
L_bpe = np.linspace(bpe_start, bpe_stop, num=100, endpoint=True)
L_post_bpe = np.linspace(bpe_stop+channel_step, channel_stop, endpoint=False)
L = np.concatenate((L_pre_bpe, L_bpe, L_post_bpe), axis=1)
phi_channel = E * (1 - L/L_channel)
return phi_channel
def calc_V_drop_lamb_dielectric(zeta, q_debye, C_dl, phi_channel):
"""
THIS IS WRONG - THIS IS WRONG - THIS IS WRONG
Calculate the voltage drop across the Stern/dielectric and Debye layers throughout the channel
units: V
"""
V_drop_lamb_dielectric = zeta - q_debye/C_dl + phi_channel
return V_drop_lamb_dielectric
# ----------- ELECTROCHEMISTRY ----------------
# Exchange current denisty through the BPE
def calc_bpe_exchange_current(K_standard_rate_constant, c_bulk_oxidized, c_bulk_reduced, alpha_transfer_coefficient=0.5):
"""
The exchange current density at a specific BPE location
units: Coulombs/s*m2
Notes:
K_standard_rate_constant: usually between 2 and 6
c_bulk (oxidized and reduced) can be taken as just the concentration of the bulk species
alpha is set to 1/2 in Adjari, 2006
"""
e = -1.602e-19 # (C) charge of an electron
j_0 = e * K_standard_rate_constant * c_bulk_reduced**alpha_transfer_coefficient * c_bulk_oxidized**(1-alpha_transfer_coefficient)
return j_0
# Charge transfer resistance through the BPE
def calc_bpe_charge_transfer_resistance(j_0_bpe, T):
"""
The area specific charge transfer resistance through the BPE
units: Ohm*m2
Notes:
"""
kb = 1.3806e-23 # (J/K) Boltzmann constant
e = -1.602e-19 # (C) charge of an electron
R_ct = kb * T / j_0_bpe / e
return R_ct
# Charge transfer resistance through bulk electrolyte
def calc_bpe_bulk_electrolyte_resistance(characteristic_length, sigma):
"""
The area specific charge transfer resistance through the bulk electrolyte
units: Ohm*m2
Notes:
Adjari, 2006 - "(area specific) bulk electrolyte resistance"
Squires, 2010 - does not explicitly define this but uses the same equation
Inputs:
char_length: (m) length of BPE
sigma (S/m) conductivity of electrolyte/buffer
Output:
Resistance: Ohm*m^2
"""
R_0 = characteristic_length / sigma
return R_0
# Faradaic conductance
def calc_faradaic_conductance(R_0, R_ct, tau_0, tau_ct):
"""
Faradaic conductance - a measure of the facility of the electrode reaction
units: ~
Notes:
Adjari, 2006 - "a measure of the facility of the electrode reaction"
"""
K_resistance = R_0 / R_ct
K_tau = tau_0 / tau_ct
# NOTE - both K's should be equal
return K_resistance, K_tau
# ----- CALCULATE PIV SPECIFIC QUANTITIES -----
def calc_brownian_displacement(dt, viscosity, particle_diameter, temperature):
"""
Calculate brownian motion characteristic displacement per dt
"""
kb = 1.3806e-23 # (J/K) Boltzmann constant
dx = np.sqrt(2*kb*temperature*dt/(3*np.pi*viscosity*particle_diameter))
return dx
def calc_Re(rho, U, l, mu):
Re = rho*U*l/mu
return Re
def calc_w(f):
"""
angular frequency (w)
"""
w = 2*np.pi*f
return w
def calc_particle_image_diameter(magnification, particle_diameter, wavelength, numerical_aperture,
index_of_refraction):
"""
Calculate the particle image diameter on the camera sensor
Recommended to be ~2-3 pixels (Westerweel et al., 2009)
"""
particle_image_diameter = np.sqrt(magnification**2*particle_diameter**2+5.95*(magnification+1)**2*wavelength**2*(index_of_refraction/(2*numerical_aperture))**2)
return particle_image_diameter
def calc_brownian_error(U_estimated, u_scale, dt, viscosity, particle_diameter, temperature):
"""
Calculate the error due to Brownian motion relative to the mean squared displacement
(Santiago & Devansatipathy, 2001)
"""
kb = 1.3806e-23 # (J/K) Boltzmann constant
diffusivity_particle = kb*temperature/(3*np.pi*viscosity*particle_diameter)
error = (1/(U_estimated*u_scale))*np.sqrt(2*diffusivity_particle/(dt))
return error
def calc_random_piv_error(particle_image_diameter):
"""
Caclulate the random error amplitude which is proportional to the diameter of the displacement correlation peak.
(Westerweel et al., 2009)
"""
c = 0.1
error = c*np.sqrt(2)*particle_image_diameter/np.sqrt(2)
return error | [
"numpy.sqrt",
"numpy.sinh",
"numpy.max",
"numpy.array",
"numpy.linspace",
"numpy.exp",
"numpy.vstack",
"numpy.concatenate",
"numpy.savetxt",
"numpy.cosh",
"matplotlib.rc",
"cycler.cycler",
"matplotlib.pyplot.tight_layout",
"numpy.sign",
"matplotlib.pyplot.subplots",
"numpy.round",
"m... | [((15600, 15638), 'numpy.array', 'np.array', (['electric_fields'], {'dtype': 'float'}), '(electric_fields, dtype=float)\n', (15608, 15638), True, 'import numpy as np\n'), ((15656, 15689), 'numpy.array', 'np.array', (['frequencys'], {'dtype': 'float'}), '(frequencys, dtype=float)\n', (15664, 15689), True, 'import numpy as np\n'), ((15708, 15740), 'numpy.array', 'np.array', (['dielectrics'], {'dtype': 'str'}), '(dielectrics, dtype=str)\n', (15716, 15740), True, 'import numpy as np\n'), ((15755, 15783), 'numpy.array', 'np.array', (['buffers'], {'dtype': 'str'}), '(buffers, dtype=str)\n', (15763, 15783), True, 'import numpy as np\n'), ((15796, 15824), 'numpy.array', 'np.array', (['UbyUo'], {'dtype': 'float'}), '(UbyUo, dtype=float)\n', (15804, 15824), True, 'import numpy as np\n'), ((15844, 15879), 'numpy.array', 'np.array', (['raw_uvel_max'], {'dtype': 'float'}), '(raw_uvel_max, dtype=float)\n', (15852, 15879), True, 'import numpy as np\n'), ((15918, 15972), 'numpy.array', 'np.array', (['uvel_brownian_error_quasisteady'], {'dtype': 'float'}), '(uvel_brownian_error_quasisteady, dtype=float)\n', (15926, 15972), True, 'import numpy as np\n'), ((16008, 16059), 'numpy.array', 'np.array', (['uvel_brownian_error_highfreq'], {'dtype': 'float'}), '(uvel_brownian_error_highfreq, dtype=float)\n', (16016, 16059), True, 'import numpy as np\n'), ((16076, 16108), 'numpy.array', 'np.array', (['raw_slope'], {'dtype': 'float'}), '(raw_slope, dtype=float)\n', (16084, 16108), True, 'import numpy as np\n'), ((16121, 16149), 'numpy.array', 'np.array', (['betas'], {'dtype': 'float'}), '(betas, dtype=float)\n', (16129, 16149), True, 'import numpy as np\n'), ((16163, 16192), 'numpy.array', 'np.array', (['deltas'], {'dtype': 'float'}), '(deltas, dtype=float)\n', (16171, 16192), True, 'import numpy as np\n'), ((16204, 16231), 'numpy.array', 'np.array', (['taus'], {'dtype': 'float'}), '(taus, dtype=float)\n', (16212, 16231), True, 'import numpy as np\n'), ((16244, 16272), 'numpy.array', 'np.array', (['d_eps'], {'dtype': 'float'}), '(d_eps, dtype=float)\n', (16252, 16272), True, 'import numpy as np\n'), ((16285, 16313), 'numpy.array', 'np.array', (['d_pKa'], {'dtype': 'float'}), '(d_pKa, dtype=float)\n', (16293, 16313), True, 'import numpy as np\n'), ((16325, 16352), 'numpy.array', 'np.array', (['d_Ns'], {'dtype': 'float'}), '(d_Ns, dtype=float)\n', (16333, 16352), True, 'import numpy as np\n'), ((16367, 16397), 'numpy.array', 'np.array', (['d_thick'], {'dtype': 'float'}), '(d_thick, dtype=float)\n', (16375, 16397), True, 'import numpy as np\n'), ((16411, 16440), 'numpy.array', 'np.array', (['b_conc'], {'dtype': 'float'}), '(b_conc, dtype=float)\n', (16419, 16440), True, 'import numpy as np\n'), ((16457, 16489), 'numpy.array', 'np.array', (['b_conduct'], {'dtype': 'float'}), '(b_conduct, dtype=float)\n', (16465, 16489), True, 'import numpy as np\n'), ((16501, 16528), 'numpy.array', 'np.array', (['b_pH'], {'dtype': 'float'}), '(b_pH, dtype=float)\n', (16509, 16528), True, 'import numpy as np\n'), ((16547, 16581), 'numpy.array', 'np.array', (['b_viscosity'], {'dtype': 'float'}), '(b_viscosity, dtype=float)\n', (16555, 16581), True, 'import numpy as np\n'), ((16594, 16622), 'numpy.array', 'np.array', (['b_eps'], {'dtype': 'float'}), '(b_eps, dtype=float)\n', (16602, 16622), True, 'import numpy as np\n'), ((16637, 16667), 'numpy.array', 'np.array', (['b_debye'], {'dtype': 'float'}), '(b_debye, dtype=float)\n', (16645, 16667), True, 'import numpy as np\n'), ((16683, 16714), 'numpy.array', 'np.array', (['voltages'], {'dtype': 'float'}), '(voltages, dtype=float)\n', (16691, 16714), True, 'import numpy as np\n'), ((16740, 16781), 'numpy.array', 'np.array', (['electrode_spacings'], {'dtype': 'float'}), '(electrode_spacings, dtype=float)\n', (16748, 16781), True, 'import numpy as np\n'), ((17892, 17954), 'numpy.sqrt', 'np.sqrt', (['(eps_fluid * kb * T / (2 * (z ** 2 * Na * c) * e ** 2))'], {}), '(eps_fluid * kb * T / (2 * (z ** 2 * Na * c) * e ** 2))\n', (17899, 17954), True, 'import numpy as np\n'), ((28837, 28898), 'numpy.linspace', 'np.linspace', (['channel_start', 'bpe_start'], {'num': '(100)', 'endpoint': '(True)'}), '(channel_start, bpe_start, num=100, endpoint=True)\n', (28848, 28898), True, 'import numpy as np\n'), ((28911, 28967), 'numpy.linspace', 'np.linspace', (['bpe_start', 'bpe_stop'], {'num': '(100)', 'endpoint': '(True)'}), '(bpe_start, bpe_stop, num=100, endpoint=True)\n', (28922, 28967), True, 'import numpy as np\n'), ((28985, 29051), 'numpy.linspace', 'np.linspace', (['(bpe_stop + channel_step)', 'channel_stop'], {'endpoint': '(False)'}), '(bpe_stop + channel_step, channel_stop, endpoint=False)\n', (28996, 29051), True, 'import numpy as np\n'), ((29059, 29113), 'numpy.concatenate', 'np.concatenate', (['(L_pre_bpe, L_bpe, L_post_bpe)'], {'axis': '(1)'}), '((L_pre_bpe, L_bpe, L_post_bpe), axis=1)\n', (29073, 29113), True, 'import numpy as np\n'), ((31923, 32008), 'numpy.sqrt', 'np.sqrt', (['(2 * kb * temperature * dt / (3 * np.pi * viscosity * particle_diameter))'], {}), '(2 * kb * temperature * dt / (3 * np.pi * viscosity * particle_diameter)\n )\n', (31930, 32008), True, 'import numpy as np\n'), ((32480, 32648), 'numpy.sqrt', 'np.sqrt', (['(magnification ** 2 * particle_diameter ** 2 + 5.95 * (magnification + 1) **\n 2 * wavelength ** 2 * (index_of_refraction / (2 * numerical_aperture)) ** 2\n )'], {}), '(magnification ** 2 * particle_diameter ** 2 + 5.95 * (magnification +\n 1) ** 2 * wavelength ** 2 * (index_of_refraction / (2 *\n numerical_aperture)) ** 2)\n', (32487, 32648), True, 'import numpy as np\n'), ((6671, 6701), 'numpy.linspace', 'np.linspace', (['(0)', '(1 / f)'], {'num': '(100)'}), '(0, 1 / f, num=100)\n', (6682, 6701), True, 'import numpy as np\n'), ((16800, 17040), 'numpy.vstack', 'np.vstack', (['(electric_fields, frequencys, dielectrics, buffers, UbyUo, raw_uvel_max,\n raw_slope, betas, deltas, taus, d_eps, d_pKa, d_Ns, d_thick, b_conc,\n b_conduct, b_pH, b_viscosity, b_eps, b_debye, voltages, electrode_spacings)'], {}), '((electric_fields, frequencys, dielectrics, buffers, UbyUo,\n raw_uvel_max, raw_slope, betas, deltas, taus, d_eps, d_pKa, d_Ns,\n d_thick, b_conc, b_conduct, b_pH, b_viscosity, b_eps, b_debye, voltages,\n electrode_spacings))\n', (16809, 17040), True, 'import numpy as np\n'), ((17397, 17469), 'numpy.savetxt', 'np.savetxt', (['savePath', 'iceo_stats'], {'fmt': '"""%s"""', 'delimiter': '""","""', 'header': 'header'}), "(savePath, iceo_stats, fmt='%s', delimiter=',', header=header)\n", (17407, 17469), True, 'import numpy as np\n'), ((20618, 20635), 'numpy.cosh', 'np.cosh', (['(zeta / 2)'], {}), '(zeta / 2)\n', (20625, 20635), True, 'import numpy as np\n'), ((28297, 28329), 'numpy.sinh', 'np.sinh', (['(e * zeta / (2 * kb * T))'], {}), '(e * zeta / (2 * kb * T))\n', (28304, 28329), True, 'import numpy as np\n'), ((33054, 33092), 'numpy.sqrt', 'np.sqrt', (['(2 * diffusivity_particle / dt)'], {}), '(2 * diffusivity_particle / dt)\n', (33061, 33092), True, 'import numpy as np\n'), ((33385, 33395), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (33392, 33395), True, 'import numpy as np\n'), ((5465, 5512), 'numpy.round', 'np.round', (['(dx_brownian * 1000000.0 / dt * 0.2)', '(2)'], {}), '(dx_brownian * 1000000.0 / dt * 0.2, 2)\n', (5473, 5512), True, 'import numpy as np\n'), ((5501, 5589), 'numpy.round', 'np.round', (['(microns_to_pixels * dx_brownian * 1000000.0 / (dt * img_acq_rate) * 0.2)', '(2)'], {}), '(microns_to_pixels * dx_brownian * 1000000.0 / (dt * img_acq_rate) *\n 0.2, 2)\n', (5509, 5589), True, 'import numpy as np\n'), ((9732, 9752), 'numpy.max', 'np.max', (['u_slip_slope'], {}), '(u_slip_slope)\n', (9738, 9752), True, 'import numpy as np\n'), ((9904, 9947), 'matplotlib.rc', 'mpl.rc', (['"""lines"""'], {'linewidth': '(4)', 'linestyle': '"""-"""'}), "('lines', linewidth=4, linestyle='-')\n", (9910, 9947), True, 'import matplotlib as mpl\n'), ((9994, 10028), 'cycler.cycler', 'cycler', ([], {'color': "['r', 'g', 'b', 'y']"}), "(color=['r', 'g', 'b', 'y'])\n", (10000, 10028), False, 'from cycler import cycler\n'), ((10054, 10106), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'sharex': '(True)', 'figsize': '(13, 10)'}), '(nrows=3, sharex=True, figsize=(13, 10))\n', (10066, 10106), True, 'import matplotlib.pyplot as plt\n'), ((12228, 12246), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12244, 12246), True, 'import matplotlib.pyplot as plt\n'), ((12259, 12269), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12267, 12269), True, 'import matplotlib.pyplot as plt\n'), ((13549, 13569), 'numpy.max', 'np.max', (['zeta_induced'], {}), '(zeta_induced)\n', (13555, 13569), True, 'import numpy as np\n'), ((13615, 13647), 'numpy.max', 'np.max', (['zeta_induced_quasisteady'], {}), '(zeta_induced_quasisteady)\n', (13621, 13647), True, 'import numpy as np\n'), ((13727, 13756), 'numpy.max', 'np.max', (['zeta_induced_highfreq'], {}), '(zeta_induced_highfreq)\n', (13733, 13756), True, 'import numpy as np\n'), ((13849, 13885), 'numpy.max', 'np.max', (['zeta_induced_Clamb_Cd_linear'], {}), '(zeta_induced_Clamb_Cd_linear)\n', (13855, 13885), True, 'import numpy as np\n'), ((13967, 14006), 'numpy.max', 'np.max', (['zeta_induced_Clamb_Cd_nonlinear'], {}), '(zeta_induced_Clamb_Cd_nonlinear)\n', (13973, 14006), True, 'import numpy as np\n'), ((14081, 14119), 'numpy.max', 'np.max', (['zeta_induced_total_capacitance'], {}), '(zeta_induced_total_capacitance)\n', (14087, 14119), True, 'import numpy as np\n'), ((14172, 14186), 'numpy.max', 'np.max', (['u_slip'], {}), '(u_slip)\n', (14178, 14186), True, 'import numpy as np\n'), ((14331, 14357), 'numpy.max', 'np.max', (['u_slip_quasisteady'], {}), '(u_slip_quasisteady)\n', (14337, 14357), True, 'import numpy as np\n'), ((14443, 14466), 'numpy.max', 'np.max', (['u_slip_highfreq'], {}), '(u_slip_highfreq)\n', (14449, 14466), True, 'import numpy as np\n'), ((14834, 14848), 'numpy.max', 'np.max', (['u_slip'], {}), '(u_slip)\n', (14840, 14848), True, 'import numpy as np\n'), ((18999, 19012), 'numpy.exp', 'np.exp', (['(w * t)'], {}), '(w * t)\n', (19005, 19012), True, 'import numpy as np\n'), ((27687, 27700), 'numpy.sign', 'np.sign', (['zeta'], {}), '(zeta)\n', (27694, 27700), True, 'import numpy as np\n'), ((33350, 33360), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (33357, 33360), True, 'import numpy as np\n'), ((22215, 22243), 'numpy.exp', 'np.exp', (['(-e * zeta / (kb * T))'], {}), '(-e * zeta / (kb * T))\n', (22221, 22243), True, 'import numpy as np\n'), ((22246, 22274), 'numpy.exp', 'np.exp', (['(-e * zeta / (kb * T))'], {}), '(-e * zeta / (kb * T))\n', (22252, 22274), True, 'import numpy as np\n'), ((27734, 27764), 'numpy.exp', 'np.exp', (['(-z * e * zeta / kb / T)'], {}), '(-z * e * zeta / kb / T)\n', (27740, 27764), True, 'import numpy as np\n'), ((27768, 27799), 'numpy.exp', 'np.exp', (['(-z * -e * zeta / kb / T)'], {}), '(-z * -e * zeta / kb / T)\n', (27774, 27799), True, 'import numpy as np\n')] |
import os
import numpy as np
from glob import glob
import skimage.measure as meas
from skimage.util import pad
import xml.etree.ElementTree as ET
from skimage import draw
from class_data import options, BaseData
mapping_dict = {
"TCGA-55-1594": "lung",
"TCGA-69-7760": "lung",
"TCGA-69-A59K": "lung",
"TCGA-73-4668": "lung",
"TCGA-78-7220": "lung",
"TCGA-86-7713": "lung",
"TCGA-86-8672": "lung",
"TCGA-L4-A4E5": "lung",
"TCGA-MP-A4SY": "lung",
"TCGA-MP-A4T7": "lung",
"TCGA-5P-A9K0": "kidney",
"TCGA-B9-A44B": "kidney",
"TCGA-B9-A8YI": "kidney",
"TCGA-DW-7841": "kidney",
"TCGA-EV-5903": "kidney",
"TCGA-F9-A97G": "kidney",
"TCGA-G7-A8LD": "kidney",
"TCGA-MH-A560": "kidney",
"TCGA-P4-AAVK": "kidney",
"TCGA-SX-A7SR": "kidney",
"TCGA-UZ-A9PO": "kidney",
"TCGA-UZ-A9PU": "kidney",
"TCGA-A2-A0CV": "breast",
"TCGA-A2-A0ES": "breast",
"TCGA-B6-A0WZ": "breast",
"TCGA-BH-A18T": "breast",
"TCGA-D8-A1X5": "breast",
"TCGA-E2-A154": "breast",
"TCGA-E9-A22B": "breast",
"TCGA-E9-A22G": "breast",
"TCGA-EW-A6SD": "breast",
"TCGA-S3-AA11": "breast",
"TCGA-EJ-5495": "prostate",
"TCGA-EJ-5505": "prostate",
"TCGA-EJ-5517": "prostate",
"TCGA-G9-6342": "prostate",
"TCGA-G9-6499": "prostate",
"TCGA-J4-A67Q": "prostate",
"TCGA-J4-A67T": "prostate",
"TCGA-KK-A59X": "prostate",
"TCGA-KK-A6E0": "prostate",
"TCGA-KK-A7AW": "prostate",
"TCGA-V1-A8WL": "prostate",
"TCGA-V1-A9O9": "prostate",
"TCGA-X4-A8KQ": "prostate",
"TCGA-YL-A9WY": "prostate",
}
def get_organ(path):
sample = os.path.basename(path).split("-01Z")[0]
return mapping_dict[sample]
def square_padding(raw, gt, size):
x, y = gt.shape
pad_width = []
s_size = (size, size)
for axe, val in enumerate([x, y]):
n_tiles = val // s_size[axe]
diff = s_size[axe] * (n_tiles + 1) - val
m = diff / 2
if m.is_integer():
m = int(m)
m = (m, m)
else:
m = int(m)
m = (m, m + 1)
pad_width.append(m)
raw = pad(raw, pad_width + [(0, 0)], mode="reflect")
gt = pad(gt, pad_width, mode="reflect")
return raw, gt
def xml_parser(xml_file_name, raw_img):
nx, ny, nz = raw_img.shape
tree = ET.parse(xml_file_name)
root = tree.getroot()
binary_mask = np.zeros(shape=(nx, ny))
cell_count = 1
for k in range(len(root)):
label = [x.attrib["Name"] for x in root[k][0]]
label = label[0]
for child in root[k]:
for x in child:
r = x.tag
if r == "Attribute":
label = x.attrib["Name"]
if r == "Region":
regions = []
vertices = x[1]
coords = np.zeros((len(vertices), 2))
for i, vertex in enumerate(vertices):
coords[i][0] = vertex.attrib["X"]
coords[i][1] = vertex.attrib["Y"]
regions.append(coords)
vertex_row_coords = regions[0][:, 0]
vertex_col_coords = regions[0][:, 1]
row_coords, col_coords = draw.polygon(
vertex_col_coords, vertex_row_coords, binary_mask.shape
)
binary_mask[row_coords, col_coords] = cell_count
cell_count += 1
return binary_mask
class monusac(BaseData):
def generate_filename(self):
"""
Generator associating each raw/gt files
"""
file_pattern = os.path.join(self.path, "TCGA-*")
for f in glob(file_pattern):
organ = get_organ(f)
for raw_f in glob(os.path.join(f, "*.tif")):
gt_f = raw_f.replace(".tif", ".xml")
yield raw_f, gt_f, organ
def gt_read(self, filename, raw_img):
gt = xml_parser(filename, raw_img)
return gt
def post_process(self, raw, gt):
"""
padding so that it becomes a multiple of 250
"""
raw, gt = square_padding(raw, gt, self.size)
gt = meas.label(gt)
return raw[:, :, :3], gt
def main():
opt = options()
monusac_dataset = monusac(opt.path, opt.size, "monusac")
monusac_dataset.create_dataset()
if __name__ == "__main__":
main()
| [
"xml.etree.ElementTree.parse",
"os.path.join",
"skimage.util.pad",
"numpy.zeros",
"os.path.basename",
"skimage.measure.label",
"class_data.options",
"glob.glob",
"skimage.draw.polygon"
] | [((2154, 2200), 'skimage.util.pad', 'pad', (['raw', '(pad_width + [(0, 0)])'], {'mode': '"""reflect"""'}), "(raw, pad_width + [(0, 0)], mode='reflect')\n", (2157, 2200), False, 'from skimage.util import pad\n'), ((2210, 2244), 'skimage.util.pad', 'pad', (['gt', 'pad_width'], {'mode': '"""reflect"""'}), "(gt, pad_width, mode='reflect')\n", (2213, 2244), False, 'from skimage.util import pad\n'), ((2348, 2371), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_file_name'], {}), '(xml_file_name)\n', (2356, 2371), True, 'import xml.etree.ElementTree as ET\n'), ((2416, 2440), 'numpy.zeros', 'np.zeros', ([], {'shape': '(nx, ny)'}), '(shape=(nx, ny))\n', (2424, 2440), True, 'import numpy as np\n'), ((4288, 4297), 'class_data.options', 'options', ([], {}), '()\n', (4295, 4297), False, 'from class_data import options, BaseData\n'), ((3676, 3709), 'os.path.join', 'os.path.join', (['self.path', '"""TCGA-*"""'], {}), "(self.path, 'TCGA-*')\n", (3688, 3709), False, 'import os\n'), ((3727, 3745), 'glob.glob', 'glob', (['file_pattern'], {}), '(file_pattern)\n', (3731, 3745), False, 'from glob import glob\n'), ((4216, 4230), 'skimage.measure.label', 'meas.label', (['gt'], {}), '(gt)\n', (4226, 4230), True, 'import skimage.measure as meas\n'), ((1659, 1681), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1675, 1681), False, 'import os\n'), ((3810, 3834), 'os.path.join', 'os.path.join', (['f', '"""*.tif"""'], {}), "(f, '*.tif')\n", (3822, 3834), False, 'import os\n'), ((3277, 3346), 'skimage.draw.polygon', 'draw.polygon', (['vertex_col_coords', 'vertex_row_coords', 'binary_mask.shape'], {}), '(vertex_col_coords, vertex_row_coords, binary_mask.shape)\n', (3289, 3346), False, 'from skimage import draw\n')] |
import os
import logging
import numpy as np
import pandas as pd
import torch
from torch_geometric.data import Data
from .graph import edge_normalization
from .data import Dictionary
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger()
def sample_edge_uniform(n_triples,sample_size):
"""Sample edges uniformly from all the edges."""
all_edges = np.arange(n_triples)
return np.random.choice(all_edges, sample_size, replace=False)
def negative_sampling(pos_samples, num_entity, negative_rate):
size_of_batch = len(pos_samples)
num_to_generate = size_of_batch * negative_rate
neg_samples = np.tile(pos_samples, (negative_rate, 1))
labels = np.zeros(size_of_batch * (negative_rate + 1), dtype=np.float32)
labels[: size_of_batch] = 1
values = np.random.choice(num_entity, size=num_to_generate)
choices = np.random.uniform(size=num_to_generate)
subj = choices > 0.5
obj = choices <= 0.5
neg_samples[subj, 0] = values[subj]
neg_samples[obj, 2] = values[obj]
return np.concatenate((pos_samples, neg_samples)), labels
def generate_sampled_graph_and_labels(triplets, sample_size, split_size, num_entity, num_rels, negative_rate):
"""
Get training graph and signals
First perform edge neighborhood sampling on graph, then perform negative
sampling to generate negative samples
"""
edges = sample_edge_uniform(len(triplets), sample_size)
# Select sampled edges
edges = triplets[edges]
src, rel, dst = edges.transpose()
uniq_entity, edges = np.unique((src, dst), return_inverse=True)
src, dst = np.reshape(edges, (2, -1))
relabeled_edges = np.stack((src, rel, dst)).transpose()
# Negative sampling
samples, labels = negative_sampling(relabeled_edges, len(uniq_entity), negative_rate)
# further split graph, only half of the edges will be used as graph
# structure, while the rest half is used as unseen positive samples
split_size = int(sample_size * split_size)
graph_split_ids = np.random.choice(np.arange(sample_size),
size=split_size, replace=False)
src = torch.tensor(src[graph_split_ids], dtype = torch.long).contiguous()
dst = torch.tensor(dst[graph_split_ids], dtype = torch.long).contiguous()
rel = torch.tensor(rel[graph_split_ids], dtype = torch.long).contiguous()
# Create bi-directional graph
src, dst = torch.cat((src, dst)), torch.cat((dst, src))
rel = torch.cat((rel, rel))
edge_index = torch.stack((src, dst))
edge_type = rel
data = Data(edge_index = edge_index)
data.entity = torch.from_numpy(uniq_entity)
data.edge_type = edge_type
data.edge_norm = edge_normalization(edge_type, edge_index, len(uniq_entity), num_rels)
data.samples = torch.from_numpy(samples)
data.labels = torch.from_numpy(labels)
return data
def build_test_graph(num_nodes, num_rels, triplets):
src, rel, dst = triplets.transpose()
src = torch.from_numpy(src)
rel = torch.from_numpy(rel)
dst = torch.from_numpy(dst)
src, dst = torch.cat((src, dst)), torch.cat((dst, src))
rel = torch.cat((rel, rel))
edge_index = torch.stack((src, dst))
edge_type = rel
data = Data(edge_index = edge_index)
data.entity = torch.from_numpy(np.arange(num_nodes))
data.edge_type = edge_type
data.edge_norm = edge_normalization(edge_type, edge_index, num_nodes, num_rels)
return data
def load_split_data(result_dir,percentage=0.7):
ent_rel_dir = os.path.join(result_dir,"graph")
logger.info("load data from {}".format(ent_rel_dir))
load_relation2idx = os.path.join(ent_rel_dir,"relation2idx.json")
load_entity2idx = os.path.join(ent_rel_dir,"entity2idx.json")
load_head2relation2tail = os.path.join(ent_rel_dir,"head2relation2tail.csv")
ent_dict = Dictionary.load(load_entity2idx)
rel_dict = Dictionary.load(load_relation2idx)
all_triplets = pd.read_csv(load_head2relation2tail)
all_len = len(all_triplets)
train_len = int(percentage*all_len)
train_triplets = all_triplets.loc[:train_len,:]
valid_triplets = all_triplets.loc[train_len:,:]
logger.info('num_entity: {}'.format(len(ent_dict)))
logger.info('num_relation: {}'.format(len(rel_dict)))
logger.info('num_train_triples: {}'.format(len(train_triplets)))
logger.info('num_valid_triples: {}'.format(len(valid_triplets)))
return ent_dict, rel_dict, train_triplets.values, valid_triplets.values
| [
"logging.basicConfig",
"numpy.tile",
"logging.getLogger",
"numpy.reshape",
"numpy.unique",
"pandas.read_csv",
"numpy.random.choice",
"torch.stack",
"os.path.join",
"torch.from_numpy",
"torch.cat",
"numpy.zeros",
"numpy.stack",
"torch.tensor",
"numpy.concatenate",
"numpy.random.uniform"... | [((184, 291), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (203, 291), False, 'import logging\n'), ((299, 318), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (316, 318), False, 'import logging\n'), ((438, 458), 'numpy.arange', 'np.arange', (['n_triples'], {}), '(n_triples)\n', (447, 458), True, 'import numpy as np\n'), ((470, 525), 'numpy.random.choice', 'np.random.choice', (['all_edges', 'sample_size'], {'replace': '(False)'}), '(all_edges, sample_size, replace=False)\n', (486, 525), True, 'import numpy as np\n'), ((696, 736), 'numpy.tile', 'np.tile', (['pos_samples', '(negative_rate, 1)'], {}), '(pos_samples, (negative_rate, 1))\n', (703, 736), True, 'import numpy as np\n'), ((750, 813), 'numpy.zeros', 'np.zeros', (['(size_of_batch * (negative_rate + 1))'], {'dtype': 'np.float32'}), '(size_of_batch * (negative_rate + 1), dtype=np.float32)\n', (758, 813), True, 'import numpy as np\n'), ((859, 909), 'numpy.random.choice', 'np.random.choice', (['num_entity'], {'size': 'num_to_generate'}), '(num_entity, size=num_to_generate)\n', (875, 909), True, 'import numpy as np\n'), ((924, 963), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'num_to_generate'}), '(size=num_to_generate)\n', (941, 963), True, 'import numpy as np\n'), ((1628, 1670), 'numpy.unique', 'np.unique', (['(src, dst)'], {'return_inverse': '(True)'}), '((src, dst), return_inverse=True)\n', (1637, 1670), True, 'import numpy as np\n'), ((1686, 1712), 'numpy.reshape', 'np.reshape', (['edges', '(2, -1)'], {}), '(edges, (2, -1))\n', (1696, 1712), True, 'import numpy as np\n'), ((2554, 2575), 'torch.cat', 'torch.cat', (['(rel, rel)'], {}), '((rel, rel))\n', (2563, 2575), False, 'import torch\n'), ((2594, 2617), 'torch.stack', 'torch.stack', (['(src, dst)'], {}), '((src, dst))\n', (2605, 2617), False, 'import torch\n'), ((2650, 2677), 'torch_geometric.data.Data', 'Data', ([], {'edge_index': 'edge_index'}), '(edge_index=edge_index)\n', (2654, 2677), False, 'from torch_geometric.data import Data\n'), ((2698, 2727), 'torch.from_numpy', 'torch.from_numpy', (['uniq_entity'], {}), '(uniq_entity)\n', (2714, 2727), False, 'import torch\n'), ((2869, 2894), 'torch.from_numpy', 'torch.from_numpy', (['samples'], {}), '(samples)\n', (2885, 2894), False, 'import torch\n'), ((2913, 2937), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (2929, 2937), False, 'import torch\n'), ((3060, 3081), 'torch.from_numpy', 'torch.from_numpy', (['src'], {}), '(src)\n', (3076, 3081), False, 'import torch\n'), ((3092, 3113), 'torch.from_numpy', 'torch.from_numpy', (['rel'], {}), '(rel)\n', (3108, 3113), False, 'import torch\n'), ((3124, 3145), 'torch.from_numpy', 'torch.from_numpy', (['dst'], {}), '(dst)\n', (3140, 3145), False, 'import torch\n'), ((3217, 3238), 'torch.cat', 'torch.cat', (['(rel, rel)'], {}), '((rel, rel))\n', (3226, 3238), False, 'import torch\n'), ((3257, 3280), 'torch.stack', 'torch.stack', (['(src, dst)'], {}), '((src, dst))\n', (3268, 3280), False, 'import torch\n'), ((3313, 3340), 'torch_geometric.data.Data', 'Data', ([], {'edge_index': 'edge_index'}), '(edge_index=edge_index)\n', (3317, 3340), False, 'from torch_geometric.data import Data\n'), ((3598, 3631), 'os.path.join', 'os.path.join', (['result_dir', '"""graph"""'], {}), "(result_dir, 'graph')\n", (3610, 3631), False, 'import os\n'), ((3712, 3758), 'os.path.join', 'os.path.join', (['ent_rel_dir', '"""relation2idx.json"""'], {}), "(ent_rel_dir, 'relation2idx.json')\n", (3724, 3758), False, 'import os\n'), ((3780, 3824), 'os.path.join', 'os.path.join', (['ent_rel_dir', '"""entity2idx.json"""'], {}), "(ent_rel_dir, 'entity2idx.json')\n", (3792, 3824), False, 'import os\n'), ((3854, 3905), 'os.path.join', 'os.path.join', (['ent_rel_dir', '"""head2relation2tail.csv"""'], {}), "(ent_rel_dir, 'head2relation2tail.csv')\n", (3866, 3905), False, 'import os\n'), ((4027, 4063), 'pandas.read_csv', 'pd.read_csv', (['load_head2relation2tail'], {}), '(load_head2relation2tail)\n', (4038, 4063), True, 'import pandas as pd\n'), ((1104, 1146), 'numpy.concatenate', 'np.concatenate', (['(pos_samples, neg_samples)'], {}), '((pos_samples, neg_samples))\n', (1118, 1146), True, 'import numpy as np\n'), ((2119, 2141), 'numpy.arange', 'np.arange', (['sample_size'], {}), '(sample_size)\n', (2128, 2141), True, 'import numpy as np\n'), ((2499, 2520), 'torch.cat', 'torch.cat', (['(src, dst)'], {}), '((src, dst))\n', (2508, 2520), False, 'import torch\n'), ((2522, 2543), 'torch.cat', 'torch.cat', (['(dst, src)'], {}), '((dst, src))\n', (2531, 2543), False, 'import torch\n'), ((3162, 3183), 'torch.cat', 'torch.cat', (['(src, dst)'], {}), '((src, dst))\n', (3171, 3183), False, 'import torch\n'), ((3185, 3206), 'torch.cat', 'torch.cat', (['(dst, src)'], {}), '((dst, src))\n', (3194, 3206), False, 'import torch\n'), ((3378, 3398), 'numpy.arange', 'np.arange', (['num_nodes'], {}), '(num_nodes)\n', (3387, 3398), True, 'import numpy as np\n'), ((1735, 1760), 'numpy.stack', 'np.stack', (['(src, rel, dst)'], {}), '((src, rel, dst))\n', (1743, 1760), True, 'import numpy as np\n'), ((2225, 2277), 'torch.tensor', 'torch.tensor', (['src[graph_split_ids]'], {'dtype': 'torch.long'}), '(src[graph_split_ids], dtype=torch.long)\n', (2237, 2277), False, 'import torch\n'), ((2303, 2355), 'torch.tensor', 'torch.tensor', (['dst[graph_split_ids]'], {'dtype': 'torch.long'}), '(dst[graph_split_ids], dtype=torch.long)\n', (2315, 2355), False, 'import torch\n'), ((2381, 2433), 'torch.tensor', 'torch.tensor', (['rel[graph_split_ids]'], {'dtype': 'torch.long'}), '(rel[graph_split_ids], dtype=torch.long)\n', (2393, 2433), False, 'import torch\n')] |
import os, pandas as pd, numpy as np, DataBase, gams
from dreamtools.gamY import Precompiler
from DB2Gams_l2 import gams_model_py, gams_settings
def append_index_with_1dindex(index1,index2):
"""
index1 is a pandas index/multiindex. index 2 is a pandas index (not multiindex).
Returns a pandas multiindex with the cartesian product of elements in (index1,index2).
NB: If index1 is a sparse multiindex, the cartesian product of (index1,index2) will keep this structure.
"""
return pd.MultiIndex.from_tuples([a+(b,) for a in index1 for b in index2],names=index1.names+index2.names) if isinstance(index1,pd.MultiIndex) else pd.MultiIndex.from_tuples([(a,b) for a in index1 for b in index2],names=index1.names+index2.names)
def prepend_index_with_1dindex(index1,index2):
return pd.MultiIndex.from_tuples([(b,)+a for a in index1 for b in index2],names=index2.names+index1.names) if isinstance(index1,pd.MultiIndex) else pd.MultiIndex.from_tuples([(b,a) for a in index1 for b in index2],names=index2.names+index1.names)
def add_grid_to_series(vals_init,vals_end,linspace_index,name,gridtype='linear',phi=1,scalar=False):
"""
vals_init and vals_end are pandas series defined over a common index.
linspace_index is a pandas index of the relevant length of the desired linspace.
The function returns a pandas series with a gridtype-spaced grid added to each element i in vals_init/vals_end.
"""
if gridtype=='linear':
apply_grid = lambda x0,xN,N: np.linspace(x0,xN,num=N)
elif gridtype=='rust':
apply_grid = lambda x0,xN,N: rust_space(x0,xN,N,phi)
elif gridtype=='pol':
apply_grid = lambda x0,xN,N: pol_space(x0,xN,N,phi)
if scalar is False:
return pd.concat([pd.Series(apply_grid(vals_init.values[i],vals_end.values[i],len(linspace_index)), index = append_index_with_1dindex(vals_init.index[vals_init.index.isin([vals_init.index[i]])],linspace_index),name=name) for i in range(len(vals_init))])
elif scalar is True:
return pd.Series(apply_grid(vals_init,vals_end,len(linspace_index)),index = linspace_index,name=name)
def add_linspace_to_series(vals_init,vals_end,linspace_index,name):
return pd.concat([pd.Series(np.linspace(vals_init.values[i],vals_end.values[i],num=len(linspace_index)),index = append_index_with_1dindex(vals_init.index[vals_init.index.isin([vals_init.index[i]])],linspace_index),name=name) for i in range(len(vals_init))])
def rust_space(x0,xN,N,phi):
x = np.empty(N)
x[0] = x0
for i in range(2,N+1):
x[i-1] = x[i-2]+(xN-x[i-2])/((N-i+1)**phi)
return x
def pol_space(x0,xN,N,phi):
return np.array([x0+(xN-x0)*((i-1)/(N-1))**phi for i in range(1,N+1)])
def end_w_y(x,y):
if x.endswith(y):
return x
else:
return x+y
def end_w_gdx(x):
return end_w_y(x,'.gdx')
def end_w_gms(x):
return end_w_y(x,'.gms')
def end_w_gmy(x):
return end_w_y(x,'.gmy')
def nl(var,loop_name,subset=None):
return var+'_'+loop_name if subset is None else var+'_'+loop_name+'_subset'
def sneaky_db(db0,db_star,diff=False,shock_name='shock',n_steps=10,loop_name='l1',update_variables='all',clean_up = True, gridtype='linear',phi=1,error=1e-11):
shock_db = DataBase.GPM_database(workspace=db0.workspace,alias=db0.get('alias_'),**{'name': shock_name})
shock_db[loop_name] = loop_name+'_'+pd.Index(range(1,n_steps+1),name=loop_name).astype(str)
if update_variables=='all':
update_variables = [var for var in db0.variables_flat if var in db_star.variables_flat];
for var in set(update_variables).intersection(set(db0.variables['variables'])):
common_index = db_star.get(var).index.intersection(db0.get(var).index)
symbol_star,symbol0 = db_star.get(var)[db_star[var].index.isin(common_index)], db0.get(var)[db0[var].index.isin(common_index)]
if diff is True:
common_index = symbol_star[abs(symbol_star-symbol0)>error].index
symbol_star,symbol0 = symbol_star[symbol_star.index.isin(common_index)], symbol0[symbol0.index.isin(common_index)]
if not symbol_star.empty:
shock_db[nl(var,loop_name,subset=True)] = symbol_star.index
shock_db[nl(var,loop_name)] = DataBase.gpy_symbol(add_grid_to_series(symbol0.sort_index(), symbol_star.sort_index(), shock_db.get(loop_name), nl(var,loop_name),gridtype=gridtype,phi=phi),**{'gtype': 'parameter'})
for var in set(update_variables).intersection(set(db0.variables['scalar_variables'])):
if diff is True and (abs(db0.get(var)-db_star.get(var))>error):
pass
else:
shock_db[nl(var,loop_name,subset=True)] = shock_db.get(loop_name)
shock_db[nl(var,loop_name)] = DataBase.gpy_symbol(add_grid_to_series(db0.get(var),db_star.get(var),shock_db.get(loop_name),nl(var,loop_name),gridtype=gridtype,phi=phi,scalar=True),**{'gtype':'parameter'})
shock_db.update_all_sets(clean_alias=True)
shock_db.merge_internal()
return shock_db,{'shock_name': shock_name, 'n_steps': n_steps, 'loop_name': loop_name, 'update_variables': update_variables, 'clean_up': clean_up, 'gridtype': gridtype, 'phi': phi}
def simple_shock_db(vals_target,db0,n_steps=10,shock_name='shock',loop_name='l1',gridtype='linear',phi=1):
loop = pd.Index(range(1,n_steps+1),name=loop_name).astype(str)
db_star = DataBase.GPM_database(workspace=db0.workspace,**{'name':shock_name})
db_star[vals_target.name] = vals_target
shock_db = sneaky_db(db0,db_star,shock_name=shock_name,n_steps=n_steps,loop_name=loop_name,gridtype=gridtype,phi=phi)[0]
return shock_db
class AddShocks:
"""
Class that includes various ways to write gams-files that adds shocks to a GAMS model.
"""
def __init__(self,name,shock_db,loop_name,work_folder=None,prefix='sol_'):
self.name = name # name of model to 'solve' in loop statement.
self.shock_gm = gams_model_py(gsettings=gams_settings(work_folder=work_folder)) # gams_model_py class with information on shocks.
self.shock_gm.settings.add_database(shock_db)
self.loop_name = loop_name # name of mapping to loop over.
self.loop_text = "" # text to write inside loop.
self.prefix=prefix # prefix used in UEVAS part.
self.write_components = {} # components used to write 'text'.
def WriteResolve(self,type_='CNS'):
return f"solve {self.name} using {type_};\n"
@property
def text(self):
"""
Return loop state with current state of attributes.
"""
return ' '.join([self.write_components[x] for x in self.write_components])
def write_sets(self):
"""
Write gams code for declaring loop-sets, and loading in values form database in self.shock_gm.database.
"""
self.write_components['sets'] = (self.shock_gm.write_sets()+
self.shock_gm.write_aliased_sets()+
self.shock_gm.write_sets_other()+
self.shock_gm.write_sets_load(self.shock_gm.database.name))
return self.write_components['sets']
def write_pars(self):
"""
Write gams code for declaring parameters and load in values.
"""
self.write_components['pars'] = (self.shock_gm.write_parameters()+
self.shock_gm.write_parameters_load(self.shock_gm.database.name))
return self.write_components['pars']
def write_loop_text(self):
"""
Write the loop text using the database with loop information + text from 'loop_text'.
"""
self.write_components['loop'] = """loop( ({sets}){cond}, {loop})
""".format( sets = ', '.join(self.shock_gm.database[self.loop_name].index.names),
cond = '$('+self.shock_gm.database[self.loop_name].write()+')' if self.shock_gm.database[self.loop_name].write()!=self.loop_name else '',
loop = self.loop_text)
return self.write_components['loop']
def UpdateExoVarsAndSolve(self,model,model_name=None):
"""
(Shorthand: UEVAS, could in principle be a class.)
Write a type of 'loop-text' that performs the following steps:
(1) Update value of exogenous variable,
(2) Resolve model,
(3) Store solution in database.
"""
self.model = model
self.name = self.model.settings.name+'_'+self.model.settings.state if model_name is None else model_name
self.UEVAS = {'sol': {}, 'adj': {}}
@property
def UEVAS_text(self):
self.write_components = {}
self.write_sets()
self.write_pars()
if len(self.UEVAS['sol'])>0:
self.UEVAS_WritePGroup()
self.loop_text = self.UEVAS_UpdateExoVars()+self.WriteResolve()+self.UEVAS_WriteStoreSol()
self.write_loop_text()
return self.text
def UEVAS_2gmy(self,file_name):
with open(end_w_gms(file_name),"w") as file:
file.write(self.UEVAS_text)
with open(end_w_gmy(file_name),"w") as file:
file.write(Precompiler(end_w_gms(file_name))())
# os.remove(end_w_gms(file_name))
self.gmy = end_w_gmy(file_name)
self.gms = end_w_gms(file_name)
def UEVAS_var2sol(self,var,loop_dom,conditions=None):
self.UEVAS['sol'][DataBase.return_version(self.prefix+var,self.UEVAS['sol'])] = {'dom': f"[{', '.join(self.shock_gm.database.get(loop_dom).names+self.model.out_db[var].index.names)}]",
'cond': "" if conditions is None else f"$({conditions})",
'var': var}
def UEVAS_WritePGroup(self):
self.write_components['UEVAS_sol'] = 'parameter\n'
for x in self.UEVAS['sol']:
self.write_components['UEVAS_sol'] += f"\t{x}{self.UEVAS['sol'][x]['dom']}\n" # add conditionals to param? {self.UEVAS['sol'][x]['cond']}
self.write_components['UEVAS_sol'] += ';\n\n'
def UEVAS_WriteStoreSol(self):
out_str = ""
for x in self.UEVAS['sol']:
out_str += "{solpar} = {solvar};\n".format(
solpar = x+self.UEVAS['sol'][x]['dom']+self.UEVAS['sol'][x]['cond'],
solvar = (self.model.out_db[self.UEVAS['sol'][x]['var']].write(l='.l')))
out_str += '\n'
return out_str
def UEVAS_adjVar(self,var,par,conditions=None,overwrite=False):
self.UEVAS['adj'][DataBase.return_version(var,self.UEVAS['adj'])] = {'varname': var, 'par': par, 'cond': conditions}
def UEVAS_UpdateExoVars(self):
out_str = ""
for x in self.UEVAS['adj']:
out_str += "\t{var} = {par};\n".format(
var = self.model.out_db[self.UEVAS['adj'][x]['varname']].write(conditions=self.UEVAS['adj'][x]['cond'],l='.fx'),
par = self.shock_gm.database[self.UEVAS['adj'][x]['par']].write())
out_str += '\n\n'
return out_str
| [
"DataBase.return_version",
"DataBase.GPM_database",
"numpy.linspace",
"numpy.empty",
"pandas.MultiIndex.from_tuples",
"DB2Gams_l2.gams_settings"
] | [((2403, 2414), 'numpy.empty', 'np.empty', (['N'], {}), '(N)\n', (2411, 2414), True, 'import os, pandas as pd, numpy as np, DataBase, gams\n'), ((5078, 5148), 'DataBase.GPM_database', 'DataBase.GPM_database', ([], {'workspace': 'db0.workspace'}), "(workspace=db0.workspace, **{'name': shock_name})\n", (5099, 5148), False, 'import os, pandas as pd, numpy as np, DataBase, gams\n'), ((487, 597), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['[(a + (b,)) for a in index1 for b in index2]'], {'names': '(index1.names + index2.names)'}), '([(a + (b,)) for a in index1 for b in index2],\n names=index1.names + index2.names)\n', (512, 597), True, 'import os, pandas as pd, numpy as np, DataBase, gams\n'), ((628, 735), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['[(a, b) for a in index1 for b in index2]'], {'names': '(index1.names + index2.names)'}), '([(a, b) for a in index1 for b in index2], names=\n index1.names + index2.names)\n', (653, 735), True, 'import os, pandas as pd, numpy as np, DataBase, gams\n'), ((783, 893), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['[((b,) + a) for a in index1 for b in index2]'], {'names': '(index2.names + index1.names)'}), '([((b,) + a) for a in index1 for b in index2],\n names=index2.names + index1.names)\n', (808, 893), True, 'import os, pandas as pd, numpy as np, DataBase, gams\n'), ((924, 1031), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['[(b, a) for a in index1 for b in index2]'], {'names': '(index2.names + index1.names)'}), '([(b, a) for a in index1 for b in index2], names=\n index2.names + index1.names)\n', (949, 1031), True, 'import os, pandas as pd, numpy as np, DataBase, gams\n'), ((1456, 1482), 'numpy.linspace', 'np.linspace', (['x0', 'xN'], {'num': 'N'}), '(x0, xN, num=N)\n', (1467, 1482), True, 'import os, pandas as pd, numpy as np, DataBase, gams\n'), ((8571, 8632), 'DataBase.return_version', 'DataBase.return_version', (['(self.prefix + var)', "self.UEVAS['sol']"], {}), "(self.prefix + var, self.UEVAS['sol'])\n", (8594, 8632), False, 'import os, pandas as pd, numpy as np, DataBase, gams\n'), ((9566, 9613), 'DataBase.return_version', 'DataBase.return_version', (['var', "self.UEVAS['adj']"], {}), "(var, self.UEVAS['adj'])\n", (9589, 9613), False, 'import os, pandas as pd, numpy as np, DataBase, gams\n'), ((5626, 5664), 'DB2Gams_l2.gams_settings', 'gams_settings', ([], {'work_folder': 'work_folder'}), '(work_folder=work_folder)\n', (5639, 5664), False, 'from DB2Gams_l2 import gams_model_py, gams_settings\n')] |
import numpy as np
import matplotlib.pyplot as plt
def plot_line(ax, w):
# input data
X = np.zeros((2, 2))
X[0, 0] = -5.0
X[1, 0] = 5.0
X[:, 1] = 1.0
# have to flip transpose
y = w.dot(X.T)
ax.plot(X[:,0], y)
# create prior
tau = 1.0*np.eye(2)
w_0 = np.zeros((2, 1))
# sample from prior
n_samples = 100
w_samp = np.random.multivariate_normal(w_0.flatten(), tau, size=n_samples)
# create plot
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
for i in range(0, w_samp.shape[0]):
plot_line(ax, w_samp[i, :])
# save fig
plt.tight_layout()
#plt.savefig(path, transpa)
plt.show() | [
"numpy.eye",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show"
] | [((285, 301), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (293, 301), True, 'import numpy as np\n'), ((436, 463), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (446, 463), True, 'import matplotlib.pyplot as plt\n'), ((571, 589), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (587, 589), True, 'import matplotlib.pyplot as plt\n'), ((618, 628), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (626, 628), True, 'import matplotlib.pyplot as plt\n'), ((99, 115), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (107, 115), True, 'import numpy as np\n'), ((269, 278), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (275, 278), True, 'import numpy as np\n')] |
##############################
# Generate risk distribution #
##############################
import numpy as np
import pandas as pd
from scipy.stats import norm
from pathlib import Path
import os
# Make a new directory
PATH = Path('...')
SAVE_PATH = PATH / 'data/processed/a_risk/'
os.makedirs(SAVE_PATH, exist_ok=True)
# Parameters
variance = 0.68
sd = np.sqrt(variance)
pop_mean = -(variance/2)
case_mean = variance/2
# 10 year absolute risk
df = pd.read_excel(f'{PATH}/data/processed/devcan_absoluterisk.xlsx', header=2)
df = df.rename(columns=({'90+': 91}))
# Get the appropriate 10-year absolute risk value from devcan
absrisk = []
for row in np.arange(0, 81):
absrisk.append(df.loc[row, row+10])
AR = pd.DataFrame(absrisk)
AR = AR.reset_index().rename(columns={'index': 'age', 0: '10yr_AR'})
AR = AR[45:80]
# Create reference absolute risks to compare each age against
reference_AR = [0.02, 0.025, 0.03, 0.035, 0.04, 0.045, 0.05, 0.055,
0.06, 0.065, 0.07, 0.075, 0.08, 0.085, 0.09, 0.095, 0.1]
# Calculate RR by age, using 10 year AR threshold of that of the reference year
for value in reference_AR:
rr = np.log(1-value) / np.log(1-AR['10yr_AR'])
log_rr = np.log(rr)
p_above_threshold = 1-norm.cdf(log_rr, pop_mean, sd)
p_not_above_threshold = 1-p_above_threshold
p_case = 1-norm.cdf(log_rr, case_mean, sd)
p_noncase = 1-p_case
rr_low = p_noncase / p_not_above_threshold
rr_high = p_case / p_above_threshold
a_risk = pd.DataFrame({
'age': np.arange(45, 80),
'10yr_AR': AR['10yr_AR'],
'rr': rr,
'log_rr': log_rr,
'p_above_threshold': p_above_threshold,
'p_case': p_case,
'rr_low': rr_low,
'rr_high': rr_high
})
a_risk.to_csv(f'{SAVE_PATH}/a_risk_{str(np.round(value*100, 2))}.csv')
| [
"numpy.sqrt",
"os.makedirs",
"pathlib.Path",
"numpy.round",
"numpy.log",
"pandas.read_excel",
"pandas.DataFrame",
"scipy.stats.norm.cdf",
"numpy.arange"
] | [((227, 238), 'pathlib.Path', 'Path', (['"""..."""'], {}), "('...')\n", (231, 238), False, 'from pathlib import Path\n'), ((283, 320), 'os.makedirs', 'os.makedirs', (['SAVE_PATH'], {'exist_ok': '(True)'}), '(SAVE_PATH, exist_ok=True)\n', (294, 320), False, 'import os\n'), ((356, 373), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (363, 373), True, 'import numpy as np\n'), ((452, 526), 'pandas.read_excel', 'pd.read_excel', (['f"""{PATH}/data/processed/devcan_absoluterisk.xlsx"""'], {'header': '(2)'}), "(f'{PATH}/data/processed/devcan_absoluterisk.xlsx', header=2)\n", (465, 526), True, 'import pandas as pd\n'), ((652, 668), 'numpy.arange', 'np.arange', (['(0)', '(81)'], {}), '(0, 81)\n', (661, 668), True, 'import numpy as np\n'), ((716, 737), 'pandas.DataFrame', 'pd.DataFrame', (['absrisk'], {}), '(absrisk)\n', (728, 737), True, 'import pandas as pd\n'), ((1199, 1209), 'numpy.log', 'np.log', (['rr'], {}), '(rr)\n', (1205, 1209), True, 'import numpy as np\n'), ((1144, 1161), 'numpy.log', 'np.log', (['(1 - value)'], {}), '(1 - value)\n', (1150, 1161), True, 'import numpy as np\n'), ((1162, 1187), 'numpy.log', 'np.log', (["(1 - AR['10yr_AR'])"], {}), "(1 - AR['10yr_AR'])\n", (1168, 1187), True, 'import numpy as np\n'), ((1236, 1266), 'scipy.stats.norm.cdf', 'norm.cdf', (['log_rr', 'pop_mean', 'sd'], {}), '(log_rr, pop_mean, sd)\n', (1244, 1266), False, 'from scipy.stats import norm\n'), ((1330, 1361), 'scipy.stats.norm.cdf', 'norm.cdf', (['log_rr', 'case_mean', 'sd'], {}), '(log_rr, case_mean, sd)\n', (1338, 1361), False, 'from scipy.stats import norm\n'), ((1519, 1536), 'numpy.arange', 'np.arange', (['(45)', '(80)'], {}), '(45, 80)\n', (1528, 1536), True, 'import numpy as np\n'), ((1795, 1819), 'numpy.round', 'np.round', (['(value * 100)', '(2)'], {}), '(value * 100, 2)\n', (1803, 1819), True, 'import numpy as np\n')] |
from scipy.optimize import minimize
import numpy as np
import argparse
import pandas as pd
import subprocess
import os
from repli1d.analyse_RFD import smooth
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--init', type=str, default="K562")
parser.add_argument('--alpha', type=float,default=0.1)
parser.add_argument('--root', type=str, default="./results/scipy_opti/")
parser.add_argument('--n', type=int, default=10)
parser.add_argument('--extension', type=int, default=5)
parser.add_argument('--command',type=str)
args = parser.parse_args()
root = args.root
os.makedirs(root,exist_ok=True)
whole_info = pd.read_csv(args.init)
x0 = np.array(whole_info.signal)
init_x0 = x0.copy()
x0[np.isnan(x0)] = 0
where = np.where(x0 != 0)
x0 = x0[where]
x0 /= np.sum(x0)
command = args.command
iter = 0
gscore = 0
def fun(x, alpha):
global iter
global gscore
signal = init_x0
signal[where] = x
if np.sum(x < 0) > 0:
return 2
filen = root + "/tmp.csv"
d = pd.DataFrame({"chrom": whole_info.chrom,
"chromStart": whole_info.chromStart,
"chromEnd": whole_info.chromStart,
"signalValue": signal})
d.to_csv(filen, index=False)
process = subprocess.Popen(command + " --signal %s --name %s" % (filen, root + "/tmp"), shell=True,
stdout=subprocess.PIPE)
process.wait()
scored = pd.read_csv(root + "/tmpglobal_corre.csv")
c1 = float(scored["MRTp"][0].split(",")[0][1:])
c1 = 0
c2 = float(scored["RFDp"][0].split(",")[0][1:])
print(scored)
if iter % 10 == 0:
print("every10", c1, c2)
score = 2 - c1 - c2 # + 0.01 * (np.sum(x)-1)**2
if iter == 0:
print("Initial value", gscore)
gscore = score
if score < gscore:
print("New minimum %.3f , old %.3f", score, gscore)
print(c1, c2)
d.to_csv(root + "_%i.csv" % iter, index=False)
gscore = score
iter += 1
scored = pd.read_csv(root + "/tmpglobal_profiles.csv")
def delta(s):
return np.array(s)[1:] - np.array(s)[:-1]
deltas = smooth(delta(scored["RFDs"]) - delta(scored["RFDe"]), args.extension)
direction = deltas[where]
direction /= np.mean(np.abs(direction))
x -= alpha * direction * x
x[x < 0] = 0
return score, x
#ret = minimize(fun,x0=x0,method='Nelder-Mead',options={"maxiter":200})
x = x0
for i in range(args.n):
score, x = fun(x, alpha=args.alpha)
print(i, score)
| [
"numpy.abs",
"os.makedirs",
"argparse.ArgumentParser",
"numpy.where",
"pandas.read_csv",
"subprocess.Popen",
"numpy.array",
"numpy.sum",
"numpy.isnan",
"pandas.DataFrame"
] | [((201, 226), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (224, 226), False, 'import argparse\n'), ((644, 676), 'os.makedirs', 'os.makedirs', (['root'], {'exist_ok': '(True)'}), '(root, exist_ok=True)\n', (655, 676), False, 'import os\n'), ((694, 716), 'pandas.read_csv', 'pd.read_csv', (['args.init'], {}), '(args.init)\n', (705, 716), True, 'import pandas as pd\n'), ((726, 753), 'numpy.array', 'np.array', (['whole_info.signal'], {}), '(whole_info.signal)\n', (734, 753), True, 'import numpy as np\n'), ((816, 833), 'numpy.where', 'np.where', (['(x0 != 0)'], {}), '(x0 != 0)\n', (824, 833), True, 'import numpy as np\n'), ((864, 874), 'numpy.sum', 'np.sum', (['x0'], {}), '(x0)\n', (870, 874), True, 'import numpy as np\n'), ((786, 798), 'numpy.isnan', 'np.isnan', (['x0'], {}), '(x0)\n', (794, 798), True, 'import numpy as np\n'), ((1146, 1287), 'pandas.DataFrame', 'pd.DataFrame', (["{'chrom': whole_info.chrom, 'chromStart': whole_info.chromStart, 'chromEnd':\n whole_info.chromStart, 'signalValue': signal}"], {}), "({'chrom': whole_info.chrom, 'chromStart': whole_info.\n chromStart, 'chromEnd': whole_info.chromStart, 'signalValue': signal})\n", (1158, 1287), True, 'import pandas as pd\n'), ((1416, 1534), 'subprocess.Popen', 'subprocess.Popen', (["(command + ' --signal %s --name %s' % (filen, root + '/tmp'))"], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), "(command + ' --signal %s --name %s' % (filen, root + '/tmp'\n ), shell=True, stdout=subprocess.PIPE)\n", (1432, 1534), False, 'import subprocess\n'), ((1607, 1649), 'pandas.read_csv', 'pd.read_csv', (["(root + '/tmpglobal_corre.csv')"], {}), "(root + '/tmpglobal_corre.csv')\n", (1618, 1649), True, 'import pandas as pd\n'), ((2257, 2302), 'pandas.read_csv', 'pd.read_csv', (["(root + '/tmpglobal_profiles.csv')"], {}), "(root + '/tmpglobal_profiles.csv')\n", (2268, 2302), True, 'import pandas as pd\n'), ((1060, 1073), 'numpy.sum', 'np.sum', (['(x < 0)'], {}), '(x < 0)\n', (1066, 1073), True, 'import numpy as np\n'), ((2532, 2549), 'numpy.abs', 'np.abs', (['direction'], {}), '(direction)\n', (2538, 2549), True, 'import numpy as np\n'), ((2345, 2356), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (2353, 2356), True, 'import numpy as np\n'), ((2363, 2374), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (2371, 2374), True, 'import numpy as np\n')] |
import csv
import matplotlib.pyplot as plt
import numpy as np
import custom_tools.fftplot as fftplot
import scipy.signal as sig
import scipy.fftpack as fft
pi = np.pi
def angle2(x):
fin_res = []
for i in x:
imag = i.imag
real = i.real
if real == 0 and isinstance(real, float):
real = 0
if imag == 0 and isinstance(real, float):
imag = 0
res = np.arctan2(imag, real)
fin_res.append(res)
return np.array(fin_res)
# A/D Conversion, Sampling
nsamps = 2 ** 16
# generate time vector
fs = 50e6
ftone = 10e6
t = np.arange(nsamps) * 1 / fs
tone2 = 0
adc_out = np.cos(2 * np.pi * ftone * t)
# adc_out = np.cos(2 * np.pi * ftone * t) + np.cos(2 * np.pi * tone2 * t)
# using cusomized fft module
x, y = fftplot.winfft(adc_out, fs=fs)
plt.figure()
fftplot.plot_spectrum(x, y)
plt.title(f'Output Spectrum of adc_out - {ftone / 1e6} MHz Tone')
# plt.axis([-500, 500, -100, 0])
nco_freq = 10e6
nco_cosine = np.cos(2 * np.pi * nco_freq * t)
nco_sine = np.sin(2 * np.pi * nco_freq * t)
i_post_mix = adc_out * nco_cosine
q_post_mix = adc_out * nco_sine
# using cusomized fft module
x, y = fftplot.winfft(i_post_mix, fs=fs)
plt.figure()
fftplot.plot_spectrum(x, y)
plt.title(f'Output Spectrum of i_post_mix - {ftone / 1e6} MHz Tone')
# using cusomized fft module
x, y = fftplot.winfft(q_post_mix, fs=fs)
plt.figure()
fftplot.plot_spectrum(x, y)
plt.title(f'Output Spectrum of q_post_mix - {ftone / 1e6} MHz Tone')
plt.figure()
yf = fft.fft(adc_out)
xf = fft.fftfreq(nsamps, 1 / fs)
xf = fft.fftshift(xf)
yf = fft.fftshift(yf)
plt.plot(xf / 1e3, np.abs(yf))
plt.figure()
plt.stem(xf / 1e3, angle2(np.round(yf, 1)) * 180 / pi, use_line_collection=True)
plt.show()
| [
"numpy.abs",
"scipy.fftpack.fftfreq",
"scipy.fftpack.fftshift",
"numpy.round",
"custom_tools.fftplot.winfft",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.arctan2",
"numpy.cos",
"scipy.fftpack.fft",
"custom_tools.fftplot.plot_spectrum",
"numpy.sin",
"matplotlib.pyplot.title",
"numpy.a... | [((647, 676), 'numpy.cos', 'np.cos', (['(2 * np.pi * ftone * t)'], {}), '(2 * np.pi * ftone * t)\n', (653, 676), True, 'import numpy as np\n'), ((789, 819), 'custom_tools.fftplot.winfft', 'fftplot.winfft', (['adc_out'], {'fs': 'fs'}), '(adc_out, fs=fs)\n', (803, 819), True, 'import custom_tools.fftplot as fftplot\n'), ((820, 832), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (830, 832), True, 'import matplotlib.pyplot as plt\n'), ((833, 860), 'custom_tools.fftplot.plot_spectrum', 'fftplot.plot_spectrum', (['x', 'y'], {}), '(x, y)\n', (854, 860), True, 'import custom_tools.fftplot as fftplot\n'), ((861, 932), 'matplotlib.pyplot.title', 'plt.title', (['f"""Output Spectrum of adc_out - {ftone / 1000000.0} MHz Tone"""'], {}), "(f'Output Spectrum of adc_out - {ftone / 1000000.0} MHz Tone')\n", (870, 932), True, 'import matplotlib.pyplot as plt\n'), ((990, 1022), 'numpy.cos', 'np.cos', (['(2 * np.pi * nco_freq * t)'], {}), '(2 * np.pi * nco_freq * t)\n', (996, 1022), True, 'import numpy as np\n'), ((1034, 1066), 'numpy.sin', 'np.sin', (['(2 * np.pi * nco_freq * t)'], {}), '(2 * np.pi * nco_freq * t)\n', (1040, 1066), True, 'import numpy as np\n'), ((1171, 1204), 'custom_tools.fftplot.winfft', 'fftplot.winfft', (['i_post_mix'], {'fs': 'fs'}), '(i_post_mix, fs=fs)\n', (1185, 1204), True, 'import custom_tools.fftplot as fftplot\n'), ((1205, 1217), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1215, 1217), True, 'import matplotlib.pyplot as plt\n'), ((1218, 1245), 'custom_tools.fftplot.plot_spectrum', 'fftplot.plot_spectrum', (['x', 'y'], {}), '(x, y)\n', (1239, 1245), True, 'import custom_tools.fftplot as fftplot\n'), ((1246, 1320), 'matplotlib.pyplot.title', 'plt.title', (['f"""Output Spectrum of i_post_mix - {ftone / 1000000.0} MHz Tone"""'], {}), "(f'Output Spectrum of i_post_mix - {ftone / 1000000.0} MHz Tone')\n", (1255, 1320), True, 'import matplotlib.pyplot as plt\n'), ((1352, 1385), 'custom_tools.fftplot.winfft', 'fftplot.winfft', (['q_post_mix'], {'fs': 'fs'}), '(q_post_mix, fs=fs)\n', (1366, 1385), True, 'import custom_tools.fftplot as fftplot\n'), ((1386, 1398), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1396, 1398), True, 'import matplotlib.pyplot as plt\n'), ((1399, 1426), 'custom_tools.fftplot.plot_spectrum', 'fftplot.plot_spectrum', (['x', 'y'], {}), '(x, y)\n', (1420, 1426), True, 'import custom_tools.fftplot as fftplot\n'), ((1427, 1501), 'matplotlib.pyplot.title', 'plt.title', (['f"""Output Spectrum of q_post_mix - {ftone / 1000000.0} MHz Tone"""'], {}), "(f'Output Spectrum of q_post_mix - {ftone / 1000000.0} MHz Tone')\n", (1436, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1497, 1509), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1507, 1509), True, 'import matplotlib.pyplot as plt\n'), ((1515, 1531), 'scipy.fftpack.fft', 'fft.fft', (['adc_out'], {}), '(adc_out)\n', (1522, 1531), True, 'import scipy.fftpack as fft\n'), ((1537, 1564), 'scipy.fftpack.fftfreq', 'fft.fftfreq', (['nsamps', '(1 / fs)'], {}), '(nsamps, 1 / fs)\n', (1548, 1564), True, 'import scipy.fftpack as fft\n'), ((1570, 1586), 'scipy.fftpack.fftshift', 'fft.fftshift', (['xf'], {}), '(xf)\n', (1582, 1586), True, 'import scipy.fftpack as fft\n'), ((1592, 1608), 'scipy.fftpack.fftshift', 'fft.fftshift', (['yf'], {}), '(yf)\n', (1604, 1608), True, 'import scipy.fftpack as fft\n'), ((1641, 1653), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1651, 1653), True, 'import matplotlib.pyplot as plt\n'), ((1736, 1746), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1744, 1746), True, 'import matplotlib.pyplot as plt\n'), ((485, 502), 'numpy.array', 'np.array', (['fin_res'], {}), '(fin_res)\n', (493, 502), True, 'import numpy as np\n'), ((1628, 1638), 'numpy.abs', 'np.abs', (['yf'], {}), '(yf)\n', (1634, 1638), True, 'import numpy as np\n'), ((421, 443), 'numpy.arctan2', 'np.arctan2', (['imag', 'real'], {}), '(imag, real)\n', (431, 443), True, 'import numpy as np\n'), ((599, 616), 'numpy.arange', 'np.arange', (['nsamps'], {}), '(nsamps)\n', (608, 616), True, 'import numpy as np\n'), ((1680, 1695), 'numpy.round', 'np.round', (['yf', '(1)'], {}), '(yf, 1)\n', (1688, 1695), True, 'import numpy as np\n')] |
"""
Dataset for clip model
"""
import os
import logging
import copy
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import time
import math
import random
import h5py
from tqdm import tqdm
from easydict import EasyDict as edict
import sys
sys.path.append(".")
from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package
def l2_normalize_np_array(np_array, eps=1e-5):
"""np_array: np.ndarray, (*, D), where the last dim will be normalized"""
return np_array / (np.linalg.norm(np_array, axis=-1, keepdims=True) + eps)
def pad_sequences_1d(sequences, dtype=torch.long):
""" Pad a single-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,
only allow the first dim has variable lengths
Args:
sequences: list(n-d tensor or list)
dtype: torch.long for word indices / torch.float (float32) for other cases
Returns:
padded_seqs: ((n+1)-d tensor) padded with zeros
mask: (2d tensor) of the same shape as the first two dims of padded_seqs,
1 indicate valid, 0 otherwise
Examples:
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=torch.long)
>>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=torch.float)
"""
if isinstance(sequences[0], list):
sequences = [torch.tensor(s, dtype=dtype) for s in sequences]
extra_dims = sequences[0].shape[1:] # the extra dims should be the same for all elements
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros((len(sequences), max(lengths)) + extra_dims, dtype=dtype)
mask = torch.zeros(len(sequences), max(lengths)).float()
for idx, seq in enumerate(sequences):
end = lengths[idx]
padded_seqs[idx, :end] = seq
mask[idx, :end] = 1
return padded_seqs, mask # , lengths
def pad_image_text_alignment(sparse_alignment: list, max_img_feat: int, padding_value: int):
"""
sparse_alignment:
max_img_feat:
return: N_img x max_img_feat x max_alignment_length
B x max_img_feat x max_alignment_length
sparse_alignment:
[
[ # image 1
[1,2,3], # whole image feature to the position of text feature embedding
[4,5,6,7], # bbox1 feature to the position of text feature embedding
[8,9,10], # bbox2 feature to the position of text feature embedding
],
...
[ #image 2
[1,2,3,4],
[3,4,5,6,7],
[8,9,10],
],
]
##
Giving a sparse alignment matrix, return a dense one with padding;
"""
max_alignment_length = max([len(region_i) for image_i in sparse_alignment for region_i in image_i])
bs = len(sparse_alignment)
padded_image_text_alignment = \
np.ones((bs, max_img_feat, max_alignment_length), dtype=np.int32) * padding_value
for i, img_ in enumerate(sparse_alignment):
for j, feat_ in enumerate(img_):
padded_image_text_alignment[i,j,:len(feat_)] = feat_
return padded_image_text_alignment
def collate_concat_segment(batch,mask_ctx_text=False, mask_ctx_vis=False):
batch_collect = edict()
for key in batch[0].keys():
batch_collect[key] = [item[key] for item in batch]
pad_ctx_text_feat, ctx_text_mask = pad_sequences_1d(batch_collect.ctx_text_feat, dtype=torch.float)
pad_ctx_vis_feat, ctx_vis_mask = pad_sequences_1d(batch_collect.ctx_vis_feat, dtype=torch.float)
if mask_ctx_text: # mask transcript of video
pad_ctx_text_feat = torch.zeros_like(pad_ctx_text_feat)
ctx_text_mask = torch.zeros_like(ctx_text_mask)
if mask_ctx_vis: # mask vision of video
pad_ctx_vis_feat = torch.zeros_like(pad_ctx_vis_feat)
ctx_vis_mask = torch.zeros_like(ctx_vis_mask)
return edict(
seg_id=batch_collect.seg_id,
seg_name=batch_collect.seg_name,
vid_name=batch_collect.vid_name,
pad_ctx_vis_feat=pad_ctx_vis_feat,
pad_ctx_text_feat=pad_ctx_text_feat,
ctx_text_mask = ctx_text_mask,
ctx_vis_mask = ctx_vis_mask
)
def collate_for_concat_fusion(batch,
mask_query_text=False,
mask_query_img=False,
mask_ctx_text=False,
mask_ctx_vis=False
):
# collate function for concat text embedding and vis embedding
batch_collect = edict()
for key in batch[0].keys():
batch_collect[key] = [item[key] for item in batch]
pad_query_text_feat, query_text_mask = pad_sequences_1d(batch_collect.query_text_feat, dtype=torch.float)
pad_query_vis_feat, query_vis_mask = pad_sequences_1d(batch_collect.query_vis_feat, dtype=torch.float)
max_len_img_feat = pad_query_vis_feat.shape[1]
pad_img_text_alignment = torch.from_numpy(
pad_image_text_alignment(batch_collect.image_2_text_alignment, max_len_img_feat, padding_value=-1)
)
pad_ctx_text_feat, ctx_text_mask = pad_sequences_1d(batch_collect.ctx_text_feat, dtype=torch.float)
pad_ctx_vis_feat, ctx_vis_mask = pad_sequences_1d(batch_collect.ctx_vis_feat, dtype=torch.float)
if mask_query_text:
pad_query_text_feat = torch.zeros_like(pad_query_text_feat)
query_text_mask = torch.zeros_like(query_text_mask)
if mask_query_img:
pad_query_vis_feat = torch.zeros_like(pad_query_vis_feat)
query_vis_mask = torch.zeros_like(query_vis_mask)
if mask_ctx_text:
pad_ctx_text_feat = torch.zeros_like(pad_ctx_text_feat)
ctx_text_mask = torch.zeros_like(ctx_text_mask)
if mask_ctx_vis:
pad_ctx_vis_feat = torch.zeros_like(pad_ctx_vis_feat)
ctx_vis_mask = torch.zeros_like(ctx_vis_mask)
return edict(
meta = batch_collect.meta,
pad_query_text_feat = pad_query_text_feat,
query_text_mask = query_text_mask,
pad_query_vis_feat = pad_query_vis_feat,
query_vis_mask = query_vis_mask,
image_2_text_alignment = pad_img_text_alignment,
pad_ctx_text_feat = pad_ctx_text_feat,
pad_ctx_vis_feat = pad_ctx_vis_feat,
ctx_text_mask = ctx_text_mask,
ctx_vis_mask = ctx_vis_mask
)
def collate_for_concat_MarginRanking(batch):
# collate function for concat text embedding and vis embedding
batch_collect = edict()
for key in batch[0].keys():
batch_collect[key] = [item[key] for item in batch]
pad_query_text_feat, query_text_mask = pad_sequences_1d(batch_collect.query_text_feat, dtype=torch.float)
pad_query_vis_feat, query_vis_mask = pad_sequences_1d(batch_collect.query_vis_feat, dtype=torch.float)
max_len_img_feat = pad_query_vis_feat.shape[1]
pad_img_text_alignment = torch.from_numpy(
pad_image_text_alignment(batch_collect.image_2_text_alignment, max_len_img_feat, padding_value=-1)
)
pad_pos_ctx_text_feat, pos_ctx_text_mask = pad_sequences_1d(batch_collect.pos_ctx_text_feat, dtype=torch.float)
pad_pos_ctx_vis_feat, pos_ctx_vis_mask = pad_sequences_1d(batch_collect.pos_ctx_vis_feat, dtype=torch.float)
pad_intra_neg_ctx_text_feat, intra_neg_ctx_text_mask = pad_sequences_1d(batch_collect.intra_neg_ctx_text_feat, dtype=torch.float)
pad_intra_neg_ctx_vis_feat, intra_neg_ctx_vis_mask = pad_sequences_1d(batch_collect.intra_neg_ctx_vis_feat, dtype=torch.float)
pad_inter_neg_ctx_text_feat, inter_neg_ctx_text_mask = pad_sequences_1d(batch_collect.inter_neg_ctx_text_feat, dtype=torch.float)
pad_inter_neg_ctx_vis_feat, inter_neg_ctx_vis_mask = pad_sequences_1d(batch_collect.inter_neg_ctx_vis_feat, dtype=torch.float)
return edict(
meta = batch_collect.meta,
pad_query_text_feat = pad_query_text_feat,
query_text_mask = query_text_mask,
pad_query_vis_feat = pad_query_vis_feat,
query_vis_mask = query_vis_mask,
image_2_text_alignment = pad_img_text_alignment,
pad_pos_ctx_text_feat = pad_pos_ctx_text_feat,
pad_pos_ctx_vis_feat = pad_pos_ctx_vis_feat,
pos_ctx_text_mask = pos_ctx_text_mask,
pos_ctx_vis_mask = pos_ctx_vis_mask,
pad_intra_neg_ctx_text_feat = pad_intra_neg_ctx_text_feat,
pad_intra_neg_ctx_vis_feat = pad_intra_neg_ctx_vis_feat,
intra_neg_ctx_text_mask = intra_neg_ctx_text_mask,
intra_neg_ctx_vis_mask = intra_neg_ctx_vis_mask,
pad_inter_neg_ctx_text_feat = pad_inter_neg_ctx_text_feat,
pad_inter_neg_ctx_vis_feat = pad_inter_neg_ctx_vis_feat,
inter_neg_ctx_text_mask = inter_neg_ctx_text_mask,
inter_neg_ctx_vis_mask = inter_neg_ctx_vis_mask
)
def collate_for_adding_fusion(batch):
# collate function for adding text embedding and vis embedding for fusion
batch_collect = edict()
for key in batch[0].keys():
batch_collect[key] = [item[key] for item in batch]
pad_query_text_feat, query_text_mask = pad_sequences_1d(batch_collect.query_text_feat, dtype=torch.float)
pad_query_vis_feat = torch.zeros(
pad_query_text_feat.size()[:2] + (batch_collect.query_vis_feat[0].shape[-1],),
dtype=pad_query_text_feat.dtype
)
query_vis_mask = copy.deepcopy(query_text_mask)
query_token_type_ids = torch.ones(
pad_query_text_feat.shape[:2], dtype=torch.long
)
for bidx, (vis_feat, i2t) in enumerate(zip(batch_collect.query_vis_feat, batch_collect.image_2_text_alignment)):
for idx,region2pos in enumerate(i2t):
pad_query_vis_feat[bidx][region2pos] = vis_feat[idx]
if idx==0: # 0 stands for the whole image
query_token_type_ids[bidx][region2pos] = 0
pad_ctx_text_feat, ctx_text_mask = pad_sequences_1d(batch_collect.ctx_text_feat, dtype=torch.float)
pad_ctx_vis_feat, ctx_vis_mask = pad_sequences_1d(batch_collect.ctx_vis_feat, dtype=torch.float)
return edict(
meta = batch_collect.meta,
pad_query_text_feat = pad_query_text_feat,
query_text_mask = query_text_mask,
pad_query_vis_feat = pad_query_vis_feat,
query_vis_mask = query_vis_mask,
query_token_type_ids = query_token_type_ids,
image_2_text_alignment = batch_collect.image_2_text_alignment,
pad_ctx_text_feat = pad_ctx_text_feat,
pad_ctx_vis_feat = pad_ctx_vis_feat,
ctx_text_mask = ctx_text_mask,
ctx_vis_mask = ctx_vis_mask
)
"""
Dummy dataset for debug:
"""
class DummyDataset(Dataset):
"""
Args:
dset_name, str, ["AQVSR"]
Return:
a dict: {
"meta": {
"query_id": int,
"text_query": str, # purely text query
"original_query": str,
"query_image_path": str,
"vid_name": str, # youtube_id (11)
"answer_segment_name" list[str] # name of segments: ["xtuiYd45q1W_segment1",...]
"answer_segment_id": list[segment_id], # unique_segment_id
"answer_segment_info": list[[st,ed], ... [st,ed]] # start_time, end_time of coresponding segment
"sample_seg_id_for_training": int, # sample one segment for training
#####
}
"query_text_feat": torch.tensor, (L, D_q) # query feature
"query_vis_feat": torch.tensor, (n_region, 2048) # image feature®ion feature
"image_2_text_alignment": list[list] # image to token alignment
"ctx_vis_feat": torch.tensor, (n_clip_in_segment, dim_video) # video feature
"ctx_text_feat": torch.tensor, (n_clip_in_segment, dim_sub) # sub feature
}
"""
def __init__(self, dset_name="dummy", data_path="", query_bert_path_or_handler="", sub_feat_path_or_handler="", vid_feat_path_or_handler="", normalize_vfeat=True, normalize_tfeat=True):
self.dset_name = dset_name
self.data = np.arange(1000) # load data self.data = load_func("data_path") # query ->
self.query_bert_path_or_handler = query_bert_path_or_handler
# self.query_image_feat_path_or_handler
self.sub_feat_path_or_handler = sub_feat_path_or_handler
self.vid_fear_path_or_handler = vid_feat_path_or_handler
# Should be loaded from h5py file
# self.query_feat_h5 = load_func(...path_to)
# self.sub_feat_h5 = load_func(...path_to)
# self.vid_feat_h5 = load_func(...path_to)
##### Dummy dataset, for debug purpose
self.query_min_len, self.query_max_len = 10, 20
self.n_clip_min, self.n_clip_max = 20, 80
self.n_region_min, self.n_region_max = 1, 3
def __len__(self):
return len(self.data)
def __getitem__(self, index):
########
# load from annotation
########
# item = self.data[index]
# meta = edict(
# query_id = item["query_id"],
# text_query = item["text_query"],
# vid_name = item["vid_name"],
# answer_segment_name = item["answer_segment_name"],
# answer_segment_id = item["answer_segment_info"],
# )
#
# query_text_feat = load_func(...),
# query_vis_feat = load_func(...),
# text_image_alignment = load_func(...),
# ctx_vis_feat = load_func(...),
# ctx_text_feat = load_func(...),
#
# return edict(
# meta = meta,
# ...
# )
### For debug purpose
qid = np.random.randint(1000) # dummy: sample from one of the 1000 data
text_query = "This is a sample from dummy dataset." # for debug
vid_name = 'xcvFGRT_O3Q'
answer_segment_name = ['xcvFGRT_O3Q_seg1','xcvFGRT_O3Q_seg3']
answer_segment_id = [10555,10557]
answer_segment_info = [[80,125], [220,320]]
meta = edict(
query_id = qid,
text_query = text_query,
vid_name = vid_name,
answer_segment_name = answer_segment_name,
answer_segment_id = answer_segment_id,
answer_segment_info = answer_segment_info
)
query_len = np.random.randint(self.query_min_len, self.query_max_len+1)
query_text_feat = torch.randn(query_len, 768)
n_img_region = np.random.randint(self.n_region_min, self.n_region_max+1)
query_vis_feat = torch.randn(n_img_region, 2048)
img_2_text_alignment = np.split(
np.arange(query_len),
np.sort(np.random.choice(np.arange(1,query_len-1), n_img_region-1, replace=False))
)
n_clip = np.random.randint(self.n_clip_min, self.n_clip_max+1)
ctx_vis_feat = torch.randn(n_clip, 2048)
ctx_text_feat = torch.randn(n_clip, 768)
return edict(
meta = meta,
query_text_feat = query_text_feat,
query_vis_feat = query_vis_feat,
image_2_text_alignment = img_2_text_alignment,
ctx_vis_feat = ctx_vis_feat,
ctx_text_feat = ctx_text_feat
)
ANNOTATION_PACKAGE_ROOT = '/home/stan/ai_assistant/data'
FEATURE_PACKAGE_ROOT = '/home/stan/ai_assistant/data'
ID_FILE_ROOT = '/home/stan/ai_assistant/data'
class AQVSR_query(Dataset):
"""
Args:
dset_name, str, "train" or "test"
avg_pooling, boolean, default = False, True for avg_pooling, False for max_pooling
Return:
a dict: {
"meta": {
"query_id": int,
"text_query": str, # purely text query
"original_query": str,
"query_image_path": str,
"vid_name": str, # youtube_id (11)
"answer_segment_name": list[str], # name of segments: ["xtuiYd45q1W_segment1",...]
"answer_segment_id": list[segment_id], # unique_segment_id
"answer_segment_info": list[[st,ed], ... [st,ed]], # start_time, end_time of coresponding segment
"sample_seg_id_for_training": int, # sample one segment for training
#####
}
"query_text_feat": torch.tensor, (L, D_q) # query feature
"query_vis_feat": torch.tensor, (n_region, 2048) # image feature®ion feature
"image_2_text_alignment": list[list] # image to token alignment
"ctx_vis_feat": torch.tensor, (n_clip_in_segment, dim_video) # video feature
"ctx_text_feat": torch.tensor, (n_clip_in_segment, dim_sub) # sub feature
}
"""
def __init__(self, dset_name="train", query_bert_path_or_handler="", sub_feat_path_or_handler="",
vid_feat_path_or_handler="", normalize_vfeat=True, normalize_tfeat=True,
avg_pooling=False, annotation_root=ANNOTATION_PACKAGE_ROOT, feature_root=FEATURE_PACKAGE_ROOT):
assert dset_name in ['train', 'valid', 'test'], "dset_name should be in 'train' 'valid' and 'test'"
self.dset_name = dset_name
if dset_name == 'train':
self.data = load_jsonl(os.path.join(annotation_root, 'trainset.jsonl'))
elif dset_name == 'valid':
self.data = load_jsonl(os.path.join(annotation_root, 'validset.jsonl'))
elif dset_name == 'test':
self.data = load_jsonl(os.path.join(annotation_root, 'testset.jsonl'))
self.query_bert_path_or_handler = query_bert_path_or_handler
self.sub_feat_path_or_handler = sub_feat_path_or_handler
self.vid_fear_path_or_handler = vid_feat_path_or_handler
self.normalize_vfeat = normalize_vfeat
self.normalize_tfeat = normalize_tfeat
if avg_pooling:
self.pooling = 'avg_pooling'
else:
self.pooling = 'max_pooling'
# Should be loaded from h5py file
with h5py.File(os.path.join(feature_root, 'feature.hdf5'), 'r') as f:
self.query_text_feat = load_from_feature_package(f['query_text_feature'])
self.query_img_feat = load_from_feature_package(f['query_grid_feature'])
self.sub_text_feat = load_from_feature_package(f['subtitle_text_feature'])
self.video_vis_feat = load_from_feature_package(f['frame_grid_feature'])
# Generate query type list
self.query_type = dict(
text=[],
video=[],
text_video=[]
)
for item in self.data:
q_type = item['query_type']
if q_type == 'Text Only':
self.query_type['text'].append(item['query_id'])
elif q_type == 'Video Only':
self.query_type['video'].append(item['query_id'])
else:
self.query_type['text_video'].append(item['query_id'])
# generate list that does not overlap with train set
if dset_name == 'valid' or dset_name == 'test':
self.not_in_train = []
for item in self.data:
if item['not_in_train']:
self.not_in_train.append(item['query_id'])
def __len__(self):
return len(self.data)
def __getitem__(self, index):
item = self.data[index]
sample_seg_idx = random.sample(range(len(item['answer_segment_id'])), 1)[0]
meta = edict(
query_id=item['query_id'],
query_name=item['query_name'],
text_query=item['text_query'],
original_query=item['original_query'],
query_img_path=item['query_img_path'],
vid_name=item['vid_name'],
answer_segment_name=item['answer_segment_name'],
answer_segment_id=item['answer_segment_id'],
answer_segment_info=item['answer_segment_info'],
sample_seg_id_for_training=item['answer_segment_id'][sample_seg_idx],
sample_seg_name_for_training=item['answer_segment_name'][sample_seg_idx]
)
query_text_feat = self.query_text_feat[item['vid_name']][item['query_name']]['feature'][0]
img_2_text_alignment = self.query_text_feat[item['vid_name']][item['query_name']]['img_alignment']
query_vis_feat = self.query_img_feat[item['vid_name']][item['query_img_path'].split('/')[-1]]
ctx_vis_feat = self.video_vis_feat[item['vid_name']][item['answer_segment_name'][sample_seg_idx]][self.pooling]
ctx_text_feat = self.sub_text_feat[item['vid_name']][item['answer_segment_name'][sample_seg_idx]][self.pooling]
if self.normalize_tfeat:
query_text_feat = l2_normalize_np_array(query_text_feat)
ctx_text_feat = l2_normalize_np_array(ctx_text_feat)
if self.normalize_vfeat:
query_vis_feat = l2_normalize_np_array(query_vis_feat)
ctx_vis_feat = l2_normalize_np_array(ctx_vis_feat)
return edict(
meta=meta,
query_text_feat=torch.from_numpy(query_text_feat),
query_vis_feat=torch.from_numpy(query_vis_feat),
image_2_text_alignment=img_2_text_alignment,
ctx_vis_feat=torch.from_numpy(ctx_vis_feat),
ctx_text_feat=torch.from_numpy(ctx_text_feat)
)
class AQVSR_segment(Dataset):
def __init__(self, dset_name="train", normalize_vfeat=True, normalize_tfeat=True,
avg_pooling=False, annotation_root=ANNOTATION_PACKAGE_ROOT, feature_root=FEATURE_PACKAGE_ROOT):
assert dset_name in ['train', 'valid', 'test'], "dset_name should be in 'train' 'valid' and 'test'"
self.dset_name = dset_name
if dset_name == 'train':
self.data = load_jsonl(os.path.join(annotation_root, 'trainset.jsonl'))
elif dset_name == 'valid':
self.data = load_jsonl(os.path.join(annotation_root, 'validset.jsonl'))
elif dset_name == 'test':
self.data = load_jsonl(os.path.join(annotation_root, 'testset.jsonl'))
self.normalize_vfeat = normalize_vfeat
self.normalize_tfeat = normalize_tfeat
if avg_pooling:
self.pooling = 'avg_pooling'
else:
self.pooling = 'max_pooling'
# Generate iterable segment list
self.segment_list = []
vid_set = set()
for query in self.data:
vid = query['query_name'][:11]
vid_set.add(vid)
seg2id = load_json(os.path.join(ID_FILE_ROOT, 'id.json'))['seg2id']
for seg_name, seg_id in seg2id.items():
vid = seg_name[:11]
if vid in vid_set:
self.segment_list.append([seg_id, seg_name, vid])
# Should be loaded from h5py file
with h5py.File(os.path.join(feature_root, 'feature.hdf5'), 'r') as f:
self.sub_text_feat = load_from_feature_package(f['subtitle_text_feature'])
self.video_vis_feat = load_from_feature_package(f['frame_grid_feature'])
def __len__(self):
return len(self.segment_list)
def __getitem__(self, index):
seg = self.segment_list[index]
seg_id = seg[0]
seg_name = seg[1]
vid = seg[2]
ctx_vis_feat = self.video_vis_feat[vid][seg_name][self.pooling]
ctx_text_feat = self.sub_text_feat[vid][seg_name][self.pooling]
if self.normalize_tfeat:
ctx_text_feat = l2_normalize_np_array(ctx_text_feat)
if self.normalize_vfeat:
ctx_vis_feat = l2_normalize_np_array(ctx_vis_feat)
return edict(
seg_id=seg_id,
seg_name=seg_name,
vid_name=vid,
ctx_vis_feat=torch.from_numpy(ctx_vis_feat),
ctx_text_feat=torch.from_numpy(ctx_text_feat)
)
# Return format according to ranking loss
# pos, intra-neg, inter-neg
class AQVSR_Ranking(Dataset):
"""
Args:
avg_pooling, boolean, default = False, True for avg_pooling, False for max_pooling
Return:
a dict: {
"meta": {
"query_id": int,
"text_query": str, # purely text query
"original_query": str,
"query_image_path": str,
"vid_name": str, # youtube_id (11)
"answer_segment_name": list[str], # name of segments: ["xtuiYd45q1W_segment1",...]
"answer_segment_id": list[segment_id], # unique_segment_id
"answer_segment_info": list[[st,ed], ... [st,ed]], # start_time, end_time of coresponding segment
# modified in v2:
"pos_seg_id_for_training": int, # sample one ground truth segment for training
"pos_seg_name_for_training": str,
"intra_neg_seg_id_for_training": int, # sample one intra wrong segment for training
"intra_neg_seg_name_for_training": str,
"inter_neg_seg_id_for_training": int, # sample one inter wrong segment for training
"inter_neg_seg_name_for_training": str,
}
"query_text_feat": torch.tensor, (L, D_q) # query feature
"query_vis_feat": torch.tensor, (n_region, 2048) # image feature®ion feature
"image_2_text_alignment": list[list] # image to token alignment
# modified in v2: # n_sample sub/video feature include the groundtruth
"pos_text_feat": torch.tensor, (n_clip_in_segment, dim_sub)
"intra_neg_text_feat": torch.tensor, (n_clip_in_segment, dim_sub)
"inter_neg_text_feat": torch.tensor, (n_clip_in_segment, dim_sub)
"pos_vis_feat": torch.tensor, (n_sample, n_clip_in_segment, dim_video)
"intra_neg_vis_feat": torch.tensor, (n_clip_in_segment, dim_video)
"inter_neg_vis_feat": torch.tensor, (n_clip_in_segment, dim_video)
}
"""
def __init__(self, dset_name='train', normalize_vfeat=True, normalize_tfeat=True,
avg_pooling=False, annotation_root=ANNOTATION_PACKAGE_ROOT, feature_root=FEATURE_PACKAGE_ROOT):
assert dset_name in ['train', 'valid', 'test'], "dset_name should be in 'train' 'valid' and 'test'"
self.dset_name = dset_name
if dset_name == 'train':
self.data = load_jsonl(os.path.join(annotation_root, 'trainset.jsonl'))
elif dset_name == 'valid':
self.data = load_jsonl(os.path.join(annotation_root, 'validset.jsonl'))
elif dset_name == 'test':
self.data = load_jsonl(os.path.join(annotation_root, 'testset.jsonl'))
# return dict should also be modified if change the neg number
self.n_pos = 1
self.n_neg_intra = 1
self.n_neg_inter = 1
self.normalize_vfeat = normalize_vfeat
self.normalize_tfeat = normalize_tfeat
if avg_pooling:
self.pooling = 'avg_pooling'
else:
self.pooling = 'max_pooling'
# Generate iterable segment list, split segment to train/test set
self.segment_list = []
vid_set = set()
for query in self.data:
vid = query['query_name'][:11]
vid_set.add(vid)
seg2id = load_json(os.path.join(ID_FILE_ROOT, 'id.json'))['seg2id']
for seg_name, seg_id in seg2id.items():
vid = seg_name[:11]
if vid in vid_set:
self.segment_list.append([seg_id, seg_name, vid])
# Should be loaded from h5py file
with h5py.File(os.path.join(feature_root, 'feature.hdf5'), 'r') as f:
self.query_text_feat = load_from_feature_package(f['query_text_feature'])
self.query_img_feat = load_from_feature_package(f['query_grid_feature'])
self.sub_text_feat = load_from_feature_package(f['subtitle_text_feature'])
self.video_vis_feat = load_from_feature_package(f['frame_grid_feature'])
# Add negative list
for item_idx in range(len(self.data)):
item = self.data[item_idx]
negative_seg_id_intra = []
negative_seg_id_inter = []
negative_seg_name_intra = []
negative_seg_name_inter = []
for [seg_id, seg_name, vid] in self.segment_list:
if seg_name in item['answer_segment_name']:
continue
else:
if vid == item['vid_name']:
negative_seg_id_intra.append(seg_id)
negative_seg_name_intra.append(seg_name)
else:
negative_seg_id_inter.append(seg_id)
negative_seg_name_inter.append(seg_name)
self.data[item_idx]['intra_negative_segment_name'] = negative_seg_name_intra
self.data[item_idx]['intra_negative_segment_id'] = negative_seg_id_intra
self.data[item_idx]['inter_negative_segment_name'] = negative_seg_name_inter
self.data[item_idx]['inter_negative_segment_id'] = negative_seg_id_inter
# Generate query type list
self.query_type = dict(
text=[],
video=[],
text_video=[]
)
for item in self.data:
q_type = item['query_type']
if q_type == 'Text Only':
self.query_type['text'].append(item['query_id'])
elif q_type == 'Video Only':
self.query_type['video'].append(item['query_id'])
else:
self.query_type['text_video'].append(item['query_id'])
# generate list that does not overlap with train set
if dset_name == 'valid' or dset_name == 'test':
self.not_in_train = []
for item in self.data:
if item['not_in_train']:
self.not_in_train.append(item['query_id'])
def __len__(self):
return len(self.data)
def __getitem__(self, index):
item = self.data[index]
# sample positive and negative segment
positive_seg_id = item['answer_segment_id']
positive_seg_name = item['answer_segment_name']
negative_seg_name_intra = item['intra_negative_segment_name']
negative_seg_name_inter = item['inter_negative_segment_name']
negative_seg_id_intra = item['intra_negative_segment_id']
negative_seg_id_inter = item['inter_negative_segment_id']
positive_idx = random.sample(range(len(positive_seg_name)), self.n_pos)
negative_idx_intra = random.sample(range(len(negative_seg_name_intra)), self.n_neg_intra)
negative_idx_inter = random.sample(range(len(negative_seg_name_inter)), self.n_neg_inter)
positive_seg_id_sampled = [positive_seg_id[idx] for idx in positive_idx]
negative_seg_id_intra_sampled = [negative_seg_id_intra[idx] for idx in negative_idx_intra]
negative_seg_id_inter_sampled = [negative_seg_id_inter[idx] for idx in negative_idx_inter]
positive_seg_name_sampled = [positive_seg_name[idx] for idx in positive_idx]
negative_seg_name_intra_sampled = [negative_seg_name_intra[idx] for idx in negative_idx_intra]
negative_seg_name_inter_sampled = [negative_seg_name_inter[idx] for idx in negative_idx_inter]
meta = edict(
query_id=item['query_id'],
query_name=item['query_name'],
text_query=item['text_query'],
original_query=item['original_query'],
query_img_path=item['query_img_path'],
vid_name=item['vid_name'],
answer_segment_name=item['answer_segment_name'],
answer_segment_id=item['answer_segment_id'],
answer_segment_info=item['answer_segment_info'],
pos_seg_id=positive_seg_id_sampled[0], # note that this [0] need all n_pos/n_neg = 1
pos_seg_name=positive_seg_name_sampled[0],
intra_neg_seg_id=negative_seg_id_intra_sampled[0],
intra_neg_seg_name=negative_seg_name_intra_sampled[0],
inter_neg_seg_id=negative_seg_id_inter_sampled[0],
inter_neg_seg_name=negative_seg_name_inter_sampled[0]
)
query_text_feat = self.query_text_feat[item['vid_name']][item['query_name']]['feature'][0]
img_2_text_alignment = self.query_text_feat[item['vid_name']][item['query_name']]['img_alignment']
query_vis_feat = self.query_img_feat[item['vid_name']][item['query_img_path'].split('/')[-1]]
ctx_vis_feat = [self.video_vis_feat[seg_name[:11]][seg_name][self.pooling] for seg_name in
positive_seg_name_sampled + negative_seg_name_intra_sampled + negative_seg_name_inter_sampled]
ctx_text_feat = [self.sub_text_feat[seg_name[:11]][seg_name][self.pooling] for seg_name in
positive_seg_name_sampled + negative_seg_name_intra_sampled + negative_seg_name_inter_sampled]
if self.normalize_tfeat:
query_text_feat = l2_normalize_np_array(query_text_feat)
for i in range(len(ctx_text_feat)):
ctx_text_feat[i] = torch.from_numpy(l2_normalize_np_array(ctx_text_feat[i]))
if self.normalize_vfeat:
query_vis_feat = l2_normalize_np_array(query_vis_feat)
for i in range(len(ctx_vis_feat)):
ctx_vis_feat[i] = torch.from_numpy(l2_normalize_np_array(ctx_vis_feat[i]))
return edict(
meta=meta,
query_text_feat=torch.from_numpy(query_text_feat),
query_vis_feat=torch.from_numpy(query_vis_feat),
image_2_text_alignment=img_2_text_alignment,
pos_ctx_vis_feat=ctx_vis_feat[0],
intra_neg_ctx_vis_feat=ctx_vis_feat[1],
inter_neg_ctx_vis_feat=ctx_vis_feat[2],
pos_ctx_text_feat=ctx_text_feat[0],
intra_neg_ctx_text_feat=ctx_text_feat[1],
inter_neg_ctx_text_feat=ctx_text_feat[2],
)
class AQVSR_Ranking_enum(Dataset):
"""
Args:
avg_pooling, boolean, default = False, True for avg_pooling, False for max_pooling
Return:
a dict: {
"meta": {
"query_id": int,
"text_query": str, # purely text query
"original_query": str,
"query_image_path": str,
"vid_name": str, # youtube_id (11)
"answer_segment_name": list[str], # name of segments: ["xtuiYd45q1W_segment1",...]
"answer_segment_id": list[segment_id], # unique_segment_id
"answer_segment_info": list[[st,ed], ... [st,ed]], # start_time, end_time of coresponding segment
# modified in v2:
"seg_id_for_ranking": int, #
"seg_name_for_ranking": str,
}
"query_text_feat": torch.tensor, (L, D_q) # query feature
"query_vis_feat": torch.tensor, (n_region, 2048) # image feature®ion feature
"image_2_text_alignment": list[list] # image to token alignment
# modified in v2:
"ctx_text_feat": torch.tensor, (n_clip_in_segment, dim_sub) # sampled sub/video feature
"ctx_vis_feat": torch.tensor, (n_sample, n_clip_in_segment, dim_video)
}
"""
def __init__(self, dset_name='test', normalize_vfeat=True, normalize_tfeat=True,
avg_pooling=False, annotation_root=ANNOTATION_PACKAGE_ROOT, feature_root=FEATURE_PACKAGE_ROOT):
assert dset_name in ['train', 'valid', 'test'], "dset_name should be in 'train' 'valid' and 'test'"
self.dset_name = dset_name
if dset_name == 'train':
self.data = load_jsonl(os.path.join(annotation_root, 'trainset.jsonl'))
elif dset_name == 'valid':
self.data = load_jsonl(os.path.join(annotation_root, 'validset.jsonl'))
elif dset_name == 'test':
self.data = load_jsonl(os.path.join(annotation_root, 'testset.jsonl'))
self.normalize_vfeat = normalize_vfeat
self.normalize_tfeat = normalize_tfeat
if avg_pooling:
self.pooling = 'avg_pooling'
else:
self.pooling = 'max_pooling'
# Generate iterable segment list, split segment to train/test set
self.pairlist = []
vid_set = set()
for query in self.data:
vid = query['query_name'][:11]
vid_set.add(vid)
seg2id = load_json(os.path.join(ID_FILE_ROOT, 'id.json'))['seg2id']
# collect query and seg
self.query_ids = [self.data[i]['query_id'] for i in range(len(self.data))]
self.seg_ids = [v for k, v in seg2id.items() if k[:11] in vid_set]
self.n_query = len(self.query_ids)
self.n_seg = len(self.seg_ids)
# print(self.n_query, self.n_seg)
for query in self.data:
for seg_name, seg_id in seg2id.items():
vid = seg_name[:11]
if vid in vid_set:
self.pairlist.append(dict(
query_item=query,
seg_name=seg_name,
seg_id=seg_id,
vid=vid
))
# Should be loaded from h5py file
with h5py.File(os.path.join(feature_root, 'feature.hdf5'), 'r') as f:
self.query_text_feat = load_from_feature_package(f['query_text_feature'])
self.query_img_feat = load_from_feature_package(f['query_grid_feature'])
self.sub_text_feat = load_from_feature_package(f['subtitle_text_feature'])
self.video_vis_feat = load_from_feature_package(f['frame_grid_feature'])
# Generate query type list
self.query_type = dict(
text=[],
video=[],
text_video=[]
)
for item in self.data:
q_type = item['query_type']
if q_type == 'Text Only':
self.query_type['text'].append(item['query_id'])
elif q_type == 'Video Only':
self.query_type['video'].append(item['query_id'])
else:
self.query_type['text_video'].append(item['query_id'])
# generate list that does not overlap with train set
if dset_name == 'valid' or dset_name == 'test':
self.not_in_train = []
for item in self.data:
if item['not_in_train']:
self.not_in_train.append(item['query_id'])
def __len__(self):
return len(self.pairlist)
def __getitem__(self, index):
pair = self.pairlist[index]
item = pair['query_item']
seg_name = pair['seg_name']
seg_id = pair['seg_id']
vid = pair['vid']
meta = edict(
query_id=item['query_id'],
query_name=item['query_name'],
text_query=item['text_query'],
original_query=item['original_query'],
query_img_path=item['query_img_path'],
vid_name=item['vid_name'],
answer_segment_name=item['answer_segment_name'],
answer_segment_id=item['answer_segment_id'],
answer_segment_info=item['answer_segment_info'],
seg_id_for_ranking=seg_id,
seg_name_for_ranking=seg_name
)
query_text_feat = self.query_text_feat[item['vid_name']][item['query_name']]['feature'][0]
img_2_text_alignment = self.query_text_feat[item['vid_name']][item['query_name']]['img_alignment']
query_vis_feat = self.query_img_feat[item['vid_name']][item['query_img_path'].split('/')[-1]]
ctx_vis_feat = self.video_vis_feat[vid][seg_name][self.pooling]
ctx_text_feat = self.sub_text_feat[vid][seg_name][self.pooling]
if self.normalize_tfeat:
query_text_feat = l2_normalize_np_array(query_text_feat)
ctx_text_feat = l2_normalize_np_array(ctx_text_feat)
if self.normalize_vfeat:
query_vis_feat = l2_normalize_np_array(query_vis_feat)
ctx_vis_feat = l2_normalize_np_array(ctx_vis_feat)
return edict(
meta=meta,
query_text_feat=torch.from_numpy(query_text_feat),
query_vis_feat=torch.from_numpy(query_vis_feat),
image_2_text_alignment=img_2_text_alignment,
ctx_vis_feat=torch.from_numpy(ctx_vis_feat),
ctx_text_feat=torch.from_numpy(ctx_text_feat)
)
if __name__ == "__main__":
pass | [
"numpy.ones",
"AQVSR.utils.basic_utils.load_from_feature_package",
"numpy.linalg.norm",
"os.path.join",
"torch.from_numpy",
"easydict.EasyDict",
"numpy.random.randint",
"torch.tensor",
"copy.deepcopy",
"torch.zeros_like",
"sys.path.append",
"torch.randn",
"numpy.arange",
"torch.ones"
] | [((271, 291), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (286, 291), False, 'import sys\n'), ((3315, 3322), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (3320, 3322), True, 'from easydict import EasyDict as edict\n'), ((3961, 4201), 'easydict.EasyDict', 'edict', ([], {'seg_id': 'batch_collect.seg_id', 'seg_name': 'batch_collect.seg_name', 'vid_name': 'batch_collect.vid_name', 'pad_ctx_vis_feat': 'pad_ctx_vis_feat', 'pad_ctx_text_feat': 'pad_ctx_text_feat', 'ctx_text_mask': 'ctx_text_mask', 'ctx_vis_mask': 'ctx_vis_mask'}), '(seg_id=batch_collect.seg_id, seg_name=batch_collect.seg_name,\n vid_name=batch_collect.vid_name, pad_ctx_vis_feat=pad_ctx_vis_feat,\n pad_ctx_text_feat=pad_ctx_text_feat, ctx_text_mask=ctx_text_mask,\n ctx_vis_mask=ctx_vis_mask)\n', (3966, 4201), True, 'from easydict import EasyDict as edict\n'), ((4486, 4493), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (4491, 4493), True, 'from easydict import EasyDict as edict\n'), ((5810, 6180), 'easydict.EasyDict', 'edict', ([], {'meta': 'batch_collect.meta', 'pad_query_text_feat': 'pad_query_text_feat', 'query_text_mask': 'query_text_mask', 'pad_query_vis_feat': 'pad_query_vis_feat', 'query_vis_mask': 'query_vis_mask', 'image_2_text_alignment': 'pad_img_text_alignment', 'pad_ctx_text_feat': 'pad_ctx_text_feat', 'pad_ctx_vis_feat': 'pad_ctx_vis_feat', 'ctx_text_mask': 'ctx_text_mask', 'ctx_vis_mask': 'ctx_vis_mask'}), '(meta=batch_collect.meta, pad_query_text_feat=pad_query_text_feat,\n query_text_mask=query_text_mask, pad_query_vis_feat=pad_query_vis_feat,\n query_vis_mask=query_vis_mask, image_2_text_alignment=\n pad_img_text_alignment, pad_ctx_text_feat=pad_ctx_text_feat,\n pad_ctx_vis_feat=pad_ctx_vis_feat, ctx_text_mask=ctx_text_mask,\n ctx_vis_mask=ctx_vis_mask)\n', (5815, 6180), True, 'from easydict import EasyDict as edict\n'), ((6399, 6406), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (6404, 6406), True, 'from easydict import EasyDict as edict\n'), ((7699, 8550), 'easydict.EasyDict', 'edict', ([], {'meta': 'batch_collect.meta', 'pad_query_text_feat': 'pad_query_text_feat', 'query_text_mask': 'query_text_mask', 'pad_query_vis_feat': 'pad_query_vis_feat', 'query_vis_mask': 'query_vis_mask', 'image_2_text_alignment': 'pad_img_text_alignment', 'pad_pos_ctx_text_feat': 'pad_pos_ctx_text_feat', 'pad_pos_ctx_vis_feat': 'pad_pos_ctx_vis_feat', 'pos_ctx_text_mask': 'pos_ctx_text_mask', 'pos_ctx_vis_mask': 'pos_ctx_vis_mask', 'pad_intra_neg_ctx_text_feat': 'pad_intra_neg_ctx_text_feat', 'pad_intra_neg_ctx_vis_feat': 'pad_intra_neg_ctx_vis_feat', 'intra_neg_ctx_text_mask': 'intra_neg_ctx_text_mask', 'intra_neg_ctx_vis_mask': 'intra_neg_ctx_vis_mask', 'pad_inter_neg_ctx_text_feat': 'pad_inter_neg_ctx_text_feat', 'pad_inter_neg_ctx_vis_feat': 'pad_inter_neg_ctx_vis_feat', 'inter_neg_ctx_text_mask': 'inter_neg_ctx_text_mask', 'inter_neg_ctx_vis_mask': 'inter_neg_ctx_vis_mask'}), '(meta=batch_collect.meta, pad_query_text_feat=pad_query_text_feat,\n query_text_mask=query_text_mask, pad_query_vis_feat=pad_query_vis_feat,\n query_vis_mask=query_vis_mask, image_2_text_alignment=\n pad_img_text_alignment, pad_pos_ctx_text_feat=pad_pos_ctx_text_feat,\n pad_pos_ctx_vis_feat=pad_pos_ctx_vis_feat, pos_ctx_text_mask=\n pos_ctx_text_mask, pos_ctx_vis_mask=pos_ctx_vis_mask,\n pad_intra_neg_ctx_text_feat=pad_intra_neg_ctx_text_feat,\n pad_intra_neg_ctx_vis_feat=pad_intra_neg_ctx_vis_feat,\n intra_neg_ctx_text_mask=intra_neg_ctx_text_mask, intra_neg_ctx_vis_mask\n =intra_neg_ctx_vis_mask, pad_inter_neg_ctx_text_feat=\n pad_inter_neg_ctx_text_feat, pad_inter_neg_ctx_vis_feat=\n pad_inter_neg_ctx_vis_feat, inter_neg_ctx_text_mask=\n inter_neg_ctx_text_mask, inter_neg_ctx_vis_mask=inter_neg_ctx_vis_mask)\n', (7704, 8550), True, 'from easydict import EasyDict as edict\n'), ((8848, 8855), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (8853, 8855), True, 'from easydict import EasyDict as edict\n'), ((9250, 9280), 'copy.deepcopy', 'copy.deepcopy', (['query_text_mask'], {}), '(query_text_mask)\n', (9263, 9280), False, 'import copy\n'), ((9308, 9367), 'torch.ones', 'torch.ones', (['pad_query_text_feat.shape[:2]'], {'dtype': 'torch.long'}), '(pad_query_text_feat.shape[:2], dtype=torch.long)\n', (9318, 9367), False, 'import torch\n'), ((9941, 10373), 'easydict.EasyDict', 'edict', ([], {'meta': 'batch_collect.meta', 'pad_query_text_feat': 'pad_query_text_feat', 'query_text_mask': 'query_text_mask', 'pad_query_vis_feat': 'pad_query_vis_feat', 'query_vis_mask': 'query_vis_mask', 'query_token_type_ids': 'query_token_type_ids', 'image_2_text_alignment': 'batch_collect.image_2_text_alignment', 'pad_ctx_text_feat': 'pad_ctx_text_feat', 'pad_ctx_vis_feat': 'pad_ctx_vis_feat', 'ctx_text_mask': 'ctx_text_mask', 'ctx_vis_mask': 'ctx_vis_mask'}), '(meta=batch_collect.meta, pad_query_text_feat=pad_query_text_feat,\n query_text_mask=query_text_mask, pad_query_vis_feat=pad_query_vis_feat,\n query_vis_mask=query_vis_mask, query_token_type_ids=\n query_token_type_ids, image_2_text_alignment=batch_collect.\n image_2_text_alignment, pad_ctx_text_feat=pad_ctx_text_feat,\n pad_ctx_vis_feat=pad_ctx_vis_feat, ctx_text_mask=ctx_text_mask,\n ctx_vis_mask=ctx_vis_mask)\n', (9946, 10373), True, 'from easydict import EasyDict as edict\n'), ((2938, 3003), 'numpy.ones', 'np.ones', (['(bs, max_img_feat, max_alignment_length)'], {'dtype': 'np.int32'}), '((bs, max_img_feat, max_alignment_length), dtype=np.int32)\n', (2945, 3003), True, 'import numpy as np\n'), ((3697, 3732), 'torch.zeros_like', 'torch.zeros_like', (['pad_ctx_text_feat'], {}), '(pad_ctx_text_feat)\n', (3713, 3732), False, 'import torch\n'), ((3757, 3788), 'torch.zeros_like', 'torch.zeros_like', (['ctx_text_mask'], {}), '(ctx_text_mask)\n', (3773, 3788), False, 'import torch\n'), ((3860, 3894), 'torch.zeros_like', 'torch.zeros_like', (['pad_ctx_vis_feat'], {}), '(pad_ctx_vis_feat)\n', (3876, 3894), False, 'import torch\n'), ((3918, 3948), 'torch.zeros_like', 'torch.zeros_like', (['ctx_vis_mask'], {}), '(ctx_vis_mask)\n', (3934, 3948), False, 'import torch\n'), ((5274, 5311), 'torch.zeros_like', 'torch.zeros_like', (['pad_query_text_feat'], {}), '(pad_query_text_feat)\n', (5290, 5311), False, 'import torch\n'), ((5338, 5371), 'torch.zeros_like', 'torch.zeros_like', (['query_text_mask'], {}), '(query_text_mask)\n', (5354, 5371), False, 'import torch\n'), ((5424, 5460), 'torch.zeros_like', 'torch.zeros_like', (['pad_query_vis_feat'], {}), '(pad_query_vis_feat)\n', (5440, 5460), False, 'import torch\n'), ((5486, 5518), 'torch.zeros_like', 'torch.zeros_like', (['query_vis_mask'], {}), '(query_vis_mask)\n', (5502, 5518), False, 'import torch\n'), ((5569, 5604), 'torch.zeros_like', 'torch.zeros_like', (['pad_ctx_text_feat'], {}), '(pad_ctx_text_feat)\n', (5585, 5604), False, 'import torch\n'), ((5629, 5660), 'torch.zeros_like', 'torch.zeros_like', (['ctx_text_mask'], {}), '(ctx_text_mask)\n', (5645, 5660), False, 'import torch\n'), ((5709, 5743), 'torch.zeros_like', 'torch.zeros_like', (['pad_ctx_vis_feat'], {}), '(pad_ctx_vis_feat)\n', (5725, 5743), False, 'import torch\n'), ((5767, 5797), 'torch.zeros_like', 'torch.zeros_like', (['ctx_vis_mask'], {}), '(ctx_vis_mask)\n', (5783, 5797), False, 'import torch\n'), ((12137, 12152), 'numpy.arange', 'np.arange', (['(1000)'], {}), '(1000)\n', (12146, 12152), True, 'import numpy as np\n'), ((13718, 13741), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {}), '(1000)\n', (13735, 13741), True, 'import numpy as np\n'), ((14070, 14259), 'easydict.EasyDict', 'edict', ([], {'query_id': 'qid', 'text_query': 'text_query', 'vid_name': 'vid_name', 'answer_segment_name': 'answer_segment_name', 'answer_segment_id': 'answer_segment_id', 'answer_segment_info': 'answer_segment_info'}), '(query_id=qid, text_query=text_query, vid_name=vid_name,\n answer_segment_name=answer_segment_name, answer_segment_id=\n answer_segment_id, answer_segment_info=answer_segment_info)\n', (14075, 14259), True, 'from easydict import EasyDict as edict\n'), ((14366, 14427), 'numpy.random.randint', 'np.random.randint', (['self.query_min_len', '(self.query_max_len + 1)'], {}), '(self.query_min_len, self.query_max_len + 1)\n', (14383, 14427), True, 'import numpy as np\n'), ((14452, 14479), 'torch.randn', 'torch.randn', (['query_len', '(768)'], {}), '(query_len, 768)\n', (14463, 14479), False, 'import torch\n'), ((14504, 14563), 'numpy.random.randint', 'np.random.randint', (['self.n_region_min', '(self.n_region_max + 1)'], {}), '(self.n_region_min, self.n_region_max + 1)\n', (14521, 14563), True, 'import numpy as np\n'), ((14587, 14618), 'torch.randn', 'torch.randn', (['n_img_region', '(2048)'], {}), '(n_img_region, 2048)\n', (14598, 14618), False, 'import torch\n'), ((14817, 14872), 'numpy.random.randint', 'np.random.randint', (['self.n_clip_min', '(self.n_clip_max + 1)'], {}), '(self.n_clip_min, self.n_clip_max + 1)\n', (14834, 14872), True, 'import numpy as np\n'), ((14894, 14919), 'torch.randn', 'torch.randn', (['n_clip', '(2048)'], {}), '(n_clip, 2048)\n', (14905, 14919), False, 'import torch\n'), ((14944, 14968), 'torch.randn', 'torch.randn', (['n_clip', '(768)'], {}), '(n_clip, 768)\n', (14955, 14968), False, 'import torch\n'), ((14985, 15175), 'easydict.EasyDict', 'edict', ([], {'meta': 'meta', 'query_text_feat': 'query_text_feat', 'query_vis_feat': 'query_vis_feat', 'image_2_text_alignment': 'img_2_text_alignment', 'ctx_vis_feat': 'ctx_vis_feat', 'ctx_text_feat': 'ctx_text_feat'}), '(meta=meta, query_text_feat=query_text_feat, query_vis_feat=\n query_vis_feat, image_2_text_alignment=img_2_text_alignment,\n ctx_vis_feat=ctx_vis_feat, ctx_text_feat=ctx_text_feat)\n', (14990, 15175), True, 'from easydict import EasyDict as edict\n'), ((19616, 20128), 'easydict.EasyDict', 'edict', ([], {'query_id': "item['query_id']", 'query_name': "item['query_name']", 'text_query': "item['text_query']", 'original_query': "item['original_query']", 'query_img_path': "item['query_img_path']", 'vid_name': "item['vid_name']", 'answer_segment_name': "item['answer_segment_name']", 'answer_segment_id': "item['answer_segment_id']", 'answer_segment_info': "item['answer_segment_info']", 'sample_seg_id_for_training': "item['answer_segment_id'][sample_seg_idx]", 'sample_seg_name_for_training': "item['answer_segment_name'][sample_seg_idx]"}), "(query_id=item['query_id'], query_name=item['query_name'], text_query=\n item['text_query'], original_query=item['original_query'],\n query_img_path=item['query_img_path'], vid_name=item['vid_name'],\n answer_segment_name=item['answer_segment_name'], answer_segment_id=item\n ['answer_segment_id'], answer_segment_info=item['answer_segment_info'],\n sample_seg_id_for_training=item['answer_segment_id'][sample_seg_idx],\n sample_seg_name_for_training=item['answer_segment_name'][sample_seg_idx])\n", (19621, 20128), True, 'from easydict import EasyDict as edict\n'), ((31665, 32348), 'easydict.EasyDict', 'edict', ([], {'query_id': "item['query_id']", 'query_name': "item['query_name']", 'text_query': "item['text_query']", 'original_query': "item['original_query']", 'query_img_path': "item['query_img_path']", 'vid_name': "item['vid_name']", 'answer_segment_name': "item['answer_segment_name']", 'answer_segment_id': "item['answer_segment_id']", 'answer_segment_info': "item['answer_segment_info']", 'pos_seg_id': 'positive_seg_id_sampled[0]', 'pos_seg_name': 'positive_seg_name_sampled[0]', 'intra_neg_seg_id': 'negative_seg_id_intra_sampled[0]', 'intra_neg_seg_name': 'negative_seg_name_intra_sampled[0]', 'inter_neg_seg_id': 'negative_seg_id_inter_sampled[0]', 'inter_neg_seg_name': 'negative_seg_name_inter_sampled[0]'}), "(query_id=item['query_id'], query_name=item['query_name'], text_query=\n item['text_query'], original_query=item['original_query'],\n query_img_path=item['query_img_path'], vid_name=item['vid_name'],\n answer_segment_name=item['answer_segment_name'], answer_segment_id=item\n ['answer_segment_id'], answer_segment_info=item['answer_segment_info'],\n pos_seg_id=positive_seg_id_sampled[0], pos_seg_name=\n positive_seg_name_sampled[0], intra_neg_seg_id=\n negative_seg_id_intra_sampled[0], intra_neg_seg_name=\n negative_seg_name_intra_sampled[0], inter_neg_seg_id=\n negative_seg_id_inter_sampled[0], inter_neg_seg_name=\n negative_seg_name_inter_sampled[0])\n", (31670, 32348), True, 'from easydict import EasyDict as edict\n'), ((39278, 39700), 'easydict.EasyDict', 'edict', ([], {'query_id': "item['query_id']", 'query_name': "item['query_name']", 'text_query': "item['text_query']", 'original_query': "item['original_query']", 'query_img_path': "item['query_img_path']", 'vid_name': "item['vid_name']", 'answer_segment_name': "item['answer_segment_name']", 'answer_segment_id': "item['answer_segment_id']", 'answer_segment_info': "item['answer_segment_info']", 'seg_id_for_ranking': 'seg_id', 'seg_name_for_ranking': 'seg_name'}), "(query_id=item['query_id'], query_name=item['query_name'], text_query=\n item['text_query'], original_query=item['original_query'],\n query_img_path=item['query_img_path'], vid_name=item['vid_name'],\n answer_segment_name=item['answer_segment_name'], answer_segment_id=item\n ['answer_segment_id'], answer_segment_info=item['answer_segment_info'],\n seg_id_for_ranking=seg_id, seg_name_for_ranking=seg_name)\n", (39283, 39700), True, 'from easydict import EasyDict as edict\n'), ((526, 574), 'numpy.linalg.norm', 'np.linalg.norm', (['np_array'], {'axis': '(-1)', 'keepdims': '(True)'}), '(np_array, axis=-1, keepdims=True)\n', (540, 574), True, 'import numpy as np\n'), ((1462, 1490), 'torch.tensor', 'torch.tensor', (['s'], {'dtype': 'dtype'}), '(s, dtype=dtype)\n', (1474, 1490), False, 'import torch\n'), ((14672, 14692), 'numpy.arange', 'np.arange', (['query_len'], {}), '(query_len)\n', (14681, 14692), True, 'import numpy as np\n'), ((18278, 18328), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['query_text_feature']"], {}), "(f['query_text_feature'])\n", (18303, 18328), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((18363, 18413), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['query_grid_feature']"], {}), "(f['query_grid_feature'])\n", (18388, 18413), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((18447, 18500), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['subtitle_text_feature']"], {}), "(f['subtitle_text_feature'])\n", (18472, 18500), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((18535, 18585), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['frame_grid_feature']"], {}), "(f['frame_grid_feature'])\n", (18560, 18585), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((23032, 23085), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['subtitle_text_feature']"], {}), "(f['subtitle_text_feature'])\n", (23057, 23085), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((23120, 23170), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['frame_grid_feature']"], {}), "(f['frame_grid_feature'])\n", (23145, 23170), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((28009, 28059), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['query_text_feature']"], {}), "(f['query_text_feature'])\n", (28034, 28059), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((28094, 28144), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['query_grid_feature']"], {}), "(f['query_grid_feature'])\n", (28119, 28144), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((28178, 28231), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['subtitle_text_feature']"], {}), "(f['subtitle_text_feature'])\n", (28203, 28231), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((28266, 28316), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['frame_grid_feature']"], {}), "(f['frame_grid_feature'])\n", (28291, 28316), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((37888, 37938), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['query_text_feature']"], {}), "(f['query_text_feature'])\n", (37913, 37938), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((37973, 38023), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['query_grid_feature']"], {}), "(f['query_grid_feature'])\n", (37998, 38023), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((38057, 38110), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['subtitle_text_feature']"], {}), "(f['subtitle_text_feature'])\n", (38082, 38110), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((38145, 38195), 'AQVSR.utils.basic_utils.load_from_feature_package', 'load_from_feature_package', (["f['frame_grid_feature']"], {}), "(f['frame_grid_feature'])\n", (38170, 38195), False, 'from AQVSR.utils.basic_utils import load_jsonl, load_json, load_from_feature_package\n'), ((17422, 17469), 'os.path.join', 'os.path.join', (['annotation_root', '"""trainset.jsonl"""'], {}), "(annotation_root, 'trainset.jsonl')\n", (17434, 17469), False, 'import os\n'), ((18188, 18230), 'os.path.join', 'os.path.join', (['feature_root', '"""feature.hdf5"""'], {}), "(feature_root, 'feature.hdf5')\n", (18200, 18230), False, 'import os\n'), ((21202, 21235), 'torch.from_numpy', 'torch.from_numpy', (['query_text_feat'], {}), '(query_text_feat)\n', (21218, 21235), False, 'import torch\n'), ((21264, 21296), 'torch.from_numpy', 'torch.from_numpy', (['query_vis_feat'], {}), '(query_vis_feat)\n', (21280, 21296), False, 'import torch\n'), ((21380, 21410), 'torch.from_numpy', 'torch.from_numpy', (['ctx_vis_feat'], {}), '(ctx_vis_feat)\n', (21396, 21410), False, 'import torch\n'), ((21438, 21469), 'torch.from_numpy', 'torch.from_numpy', (['ctx_text_feat'], {}), '(ctx_text_feat)\n', (21454, 21469), False, 'import torch\n'), ((21922, 21969), 'os.path.join', 'os.path.join', (['annotation_root', '"""trainset.jsonl"""'], {}), "(annotation_root, 'trainset.jsonl')\n", (21934, 21969), False, 'import os\n'), ((22652, 22689), 'os.path.join', 'os.path.join', (['ID_FILE_ROOT', '"""id.json"""'], {}), "(ID_FILE_ROOT, 'id.json')\n", (22664, 22689), False, 'import os\n'), ((22944, 22986), 'os.path.join', 'os.path.join', (['feature_root', '"""feature.hdf5"""'], {}), "(feature_root, 'feature.hdf5')\n", (22956, 22986), False, 'import os\n'), ((23851, 23881), 'torch.from_numpy', 'torch.from_numpy', (['ctx_vis_feat'], {}), '(ctx_vis_feat)\n', (23867, 23881), False, 'import torch\n'), ((23909, 23940), 'torch.from_numpy', 'torch.from_numpy', (['ctx_text_feat'], {}), '(ctx_text_feat)\n', (23925, 23940), False, 'import torch\n'), ((26711, 26758), 'os.path.join', 'os.path.join', (['annotation_root', '"""trainset.jsonl"""'], {}), "(annotation_root, 'trainset.jsonl')\n", (26723, 26758), False, 'import os\n'), ((27627, 27664), 'os.path.join', 'os.path.join', (['ID_FILE_ROOT', '"""id.json"""'], {}), "(ID_FILE_ROOT, 'id.json')\n", (27639, 27664), False, 'import os\n'), ((27919, 27961), 'os.path.join', 'os.path.join', (['feature_root', '"""feature.hdf5"""'], {}), "(feature_root, 'feature.hdf5')\n", (27931, 27961), False, 'import os\n'), ((33845, 33878), 'torch.from_numpy', 'torch.from_numpy', (['query_text_feat'], {}), '(query_text_feat)\n', (33861, 33878), False, 'import torch\n'), ((33907, 33939), 'torch.from_numpy', 'torch.from_numpy', (['query_vis_feat'], {}), '(query_vis_feat)\n', (33923, 33939), False, 'import torch\n'), ((36228, 36275), 'os.path.join', 'os.path.join', (['annotation_root', '"""trainset.jsonl"""'], {}), "(annotation_root, 'trainset.jsonl')\n", (36240, 36275), False, 'import os\n'), ((36987, 37024), 'os.path.join', 'os.path.join', (['ID_FILE_ROOT', '"""id.json"""'], {}), "(ID_FILE_ROOT, 'id.json')\n", (36999, 37024), False, 'import os\n'), ((37798, 37840), 'os.path.join', 'os.path.join', (['feature_root', '"""feature.hdf5"""'], {}), "(feature_root, 'feature.hdf5')\n", (37810, 37840), False, 'import os\n'), ((40682, 40715), 'torch.from_numpy', 'torch.from_numpy', (['query_text_feat'], {}), '(query_text_feat)\n', (40698, 40715), False, 'import torch\n'), ((40744, 40776), 'torch.from_numpy', 'torch.from_numpy', (['query_vis_feat'], {}), '(query_vis_feat)\n', (40760, 40776), False, 'import torch\n'), ((40860, 40890), 'torch.from_numpy', 'torch.from_numpy', (['ctx_vis_feat'], {}), '(ctx_vis_feat)\n', (40876, 40890), False, 'import torch\n'), ((40918, 40949), 'torch.from_numpy', 'torch.from_numpy', (['ctx_text_feat'], {}), '(ctx_text_feat)\n', (40934, 40949), False, 'import torch\n'), ((14731, 14758), 'numpy.arange', 'np.arange', (['(1)', '(query_len - 1)'], {}), '(1, query_len - 1)\n', (14740, 14758), True, 'import numpy as np\n'), ((17541, 17588), 'os.path.join', 'os.path.join', (['annotation_root', '"""validset.jsonl"""'], {}), "(annotation_root, 'validset.jsonl')\n", (17553, 17588), False, 'import os\n'), ((22041, 22088), 'os.path.join', 'os.path.join', (['annotation_root', '"""validset.jsonl"""'], {}), "(annotation_root, 'validset.jsonl')\n", (22053, 22088), False, 'import os\n'), ((26830, 26877), 'os.path.join', 'os.path.join', (['annotation_root', '"""validset.jsonl"""'], {}), "(annotation_root, 'validset.jsonl')\n", (26842, 26877), False, 'import os\n'), ((36347, 36394), 'os.path.join', 'os.path.join', (['annotation_root', '"""validset.jsonl"""'], {}), "(annotation_root, 'validset.jsonl')\n", (36359, 36394), False, 'import os\n'), ((17659, 17705), 'os.path.join', 'os.path.join', (['annotation_root', '"""testset.jsonl"""'], {}), "(annotation_root, 'testset.jsonl')\n", (17671, 17705), False, 'import os\n'), ((22159, 22205), 'os.path.join', 'os.path.join', (['annotation_root', '"""testset.jsonl"""'], {}), "(annotation_root, 'testset.jsonl')\n", (22171, 22205), False, 'import os\n'), ((26948, 26994), 'os.path.join', 'os.path.join', (['annotation_root', '"""testset.jsonl"""'], {}), "(annotation_root, 'testset.jsonl')\n", (26960, 26994), False, 'import os\n'), ((36465, 36511), 'os.path.join', 'os.path.join', (['annotation_root', '"""testset.jsonl"""'], {}), "(annotation_root, 'testset.jsonl')\n", (36477, 36511), False, 'import os\n')] |
import numpy as np
from PIL import Image
def image_to_array(filename):
picture = Image.open(filename)
nparray = np.asarray(picture, dtype = int)
lines = nparray[:, :, 0]
return lines
| [
"PIL.Image.open",
"numpy.asarray"
] | [((87, 107), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (97, 107), False, 'from PIL import Image\n'), ((122, 152), 'numpy.asarray', 'np.asarray', (['picture'], {'dtype': 'int'}), '(picture, dtype=int)\n', (132, 152), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : img.py
# @Author: <EMAIL>
# @Date : 2019-03-23
# @Desc :
import numpy as np
from PIL import Image
def RGB_to_gray(obs):
img = Image.fromarray(obs).crop((0, 40, 256, 240)).resize((200, 200))
img = img.convert('L')
return np.asarray(img)
def get_gif(ims, name):
sequence = []
for item in ims:
sequence.append(Image.fromarray(item))
sequence[0].save(str(name) + '.gif', save_all=True, append_images=sequence[1:])
| [
"PIL.Image.fromarray",
"numpy.asarray"
] | [((293, 308), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (303, 308), True, 'import numpy as np\n'), ((398, 419), 'PIL.Image.fromarray', 'Image.fromarray', (['item'], {}), '(item)\n', (413, 419), False, 'from PIL import Image\n'), ((191, 211), 'PIL.Image.fromarray', 'Image.fromarray', (['obs'], {}), '(obs)\n', (206, 211), False, 'from PIL import Image\n')] |
import numpy
import pypoman
from ..critical_region import CriticalRegion
from ..mplp_program import MPLP_Program
from ..utils.chebyshev_ball import chebyshev_ball
def get_chebyshev_information(region: CriticalRegion, deterministic_solver='glpk'):
region_constraints = region.get_constraints()
return chebyshev_ball(*region_constraints, deterministic_solver=deterministic_solver)
def get_vertices(region: CriticalRegion, deterministic_solver='glpk'):
"""
Generated the vertices of a Critical Region
:param region: A critical region
:param deterministic_solver: The optimization Solver to use
:return: A numpy array of the vertices of a critical region with vertices stored in rows
"""
return numpy.array(pypoman.compute_polytope_vertices(*region.get_constraints()))
def sample_region_convex_combination(region: CriticalRegion, dispersion=0, num_samples: int = 100,
deterministic_solver='glpk'):
"""
This is a class to sample a polytopic critical region
Not SUPPORTED YET
:param region:
:param dispersion:
:param num_samples:
:param deterministic_solver:
:return:
"""
cheb_info = get_chebyshev_information(region, deterministic_solver)
vertices = get_vertices(region, deterministic_solver)
num_vertices = len(vertices)
num_combs = min(max(num_vertices, 2), 4)
if cheb_info is None:
return None
for i in range(num_samples):
sample_vertices = numpy.random.choice(num_vertices, num_combs, replace=False)
print(type(sample_vertices))
weight = numpy.random.random(num_combs)
weight = weight / numpy.linalg.norm(weight)
print(weight)
print(vertices[sample_vertices])
radius = cheb_info.sol[-1]
point = cheb_info.sol[:-1]
print(point)
print(radius)
print(vertices)
def find_extents(A, b, d, x):
orth_vec = A @ d
point_vec = A @ x
dist = float('inf')
for i in range(A.shape[0]):
if orth_vec[i] <= 0:
continue
dist = min(dist, (b[i] - point_vec[i]) / orth_vec[i])
return dist
def hit_and_run(p, x_0, n_steps: int = 10):
# dimension size
size = x_0.size
def random_direction():
vec = numpy.random.rand(size).reshape(size, -1)
return vec / numpy.linalg.norm(vec, 2)
for i in range(n_steps):
# generate a random direction
random_direc = random_direction()
# find the extend in the direction of the random direction and the opposite direction
extent_forward = find_extents(p.A, p.b, random_direc, x_0)
extend_backward = find_extents(p.A, p.b, -random_direc, x_0)
# sample a delta x from the line
pert = numpy.random.uniform(-extend_backward, extent_forward) * random_direc
# check if still inside polytope
if not numpy.all(p.A @ (x_0 + pert) <= p.b):
continue
# make step
x_0 = pert + x_0
return x_0
def sample_program_theta_space(program: MPLP_Program, num_samples: int = 10):
pass
| [
"numpy.random.rand",
"numpy.random.choice",
"numpy.random.random",
"numpy.linalg.norm",
"numpy.random.uniform",
"numpy.all"
] | [((1502, 1561), 'numpy.random.choice', 'numpy.random.choice', (['num_vertices', 'num_combs'], {'replace': '(False)'}), '(num_vertices, num_combs, replace=False)\n', (1521, 1561), False, 'import numpy\n'), ((1616, 1646), 'numpy.random.random', 'numpy.random.random', (['num_combs'], {}), '(num_combs)\n', (1635, 1646), False, 'import numpy\n'), ((1673, 1698), 'numpy.linalg.norm', 'numpy.linalg.norm', (['weight'], {}), '(weight)\n', (1690, 1698), False, 'import numpy\n'), ((2338, 2363), 'numpy.linalg.norm', 'numpy.linalg.norm', (['vec', '(2)'], {}), '(vec, 2)\n', (2355, 2363), False, 'import numpy\n'), ((2762, 2816), 'numpy.random.uniform', 'numpy.random.uniform', (['(-extend_backward)', 'extent_forward'], {}), '(-extend_backward, extent_forward)\n', (2782, 2816), False, 'import numpy\n'), ((2889, 2925), 'numpy.all', 'numpy.all', (['(p.A @ (x_0 + pert) <= p.b)'], {}), '(p.A @ (x_0 + pert) <= p.b)\n', (2898, 2925), False, 'import numpy\n'), ((2275, 2298), 'numpy.random.rand', 'numpy.random.rand', (['size'], {}), '(size)\n', (2292, 2298), False, 'import numpy\n')] |
import os
from enum import Enum
from typing import Any, Dict, Tuple, List, Union
from pathlib import Path
from glob import glob
import numpy as np
import SimpleITK as sitk
from .augment import DataAugmentation
from .preprocess import Preprocess, Registration, RegionOfInterest
class FileType(Enum):
sa_ed = 'SA_ED'
sa_ed_gt = 'SA_ED_gt'
sa_es = 'SA_ES'
sa_es_gt = 'SA_ES_gt'
la_ed = 'LA_ED'
la_ed_gt = 'LA_ED_gt'
la_es = 'LA_ES'
la_es_gt = 'LA_ES_gt'
class ExtraType(Enum):
reg_affine = 'SA_to_LA_registration_affine'
class Affine(Enum):
sa_affine = 'sa_affine'
la_affine = 'la_affine'
class OutputAffine(Enum):
sa_affine = 'SA_Affine'
la_affine = 'LA_Affine'
class DataGenerator():
_cached_data_shuffle = [138, 144, 139, 95, 68, 156, 41, 129, 42, 104, 79, 160,
11, 148, 93, 155, 112, 121, 99, 48, 117, 137, 39, 47,
134, 40, 145, 113, 10, 3, 24, 35, 136, 115, 19, 8, 49,
90, 44, 123, 25, 7, 54, 70, 150, 132, 14, 58, 51, 72,
143, 106, 36, 13, 116, 75, 100, 50, 111, 94, 142, 81,
16, 52, 63, 45, 55, 74, 102, 27, 2, 118, 130, 38, 26,
1, 149, 92, 56, 84, 6, 107, 76, 122, 109, 110, 15, 60,
147, 82, 20, 53, 71, 141, 73, 61, 67, 29, 126, 66, 18,
78, 22, 131, 146, 153, 62, 33, 96, 28, 46, 85, 152, 128,
57, 135, 34, 65, 31, 98, 125, 43, 30, 158, 127, 89, 23,
64, 97, 140, 12, 83, 120, 69, 159, 86, 91, 114, 133, 4,
32, 103, 119, 88, 17, 80, 87, 105, 101, 5, 151, 59, 108,
77, 37, 9, 124, 157, 154, 21]
def __init__(self, floating_precision: str = '32',
memory_cache: bool = True, disk_cache: bool = True,
test_directory: Union[str, Path, None] = None) -> None:
file_path = Path(__file__).parent.absolute()
expected_data_directory = os.path.join('..', '..', 'data')
self.data_directory = Path(os.path.join(file_path, expected_data_directory))
self.cache_directory = os.path.join('..', '..', 'data_cache')
self.cache_directory = Path(os.path.join(file_path, self.cache_directory))
self.train_directory = Path(os.path.join(self.data_directory, 'training'))
# For the purposes of model development, the 'validation' set is treated
# as the test set
# (It does not have ground truth - validated on submission only)
if test_directory is None:
self.testing_directory = Path(os.path.join(self.data_directory, 'validation'))
else:
self.testing_directory = test_directory
self.train_list = self.get_cached_patient_list(self.train_directory, self._cached_data_shuffle)
self.train_list, self.validation_list = self.split_list(self.train_list, split_fraction=150/160)
self.test_list = self.get_patient_list(self.testing_directory)
self.target_spacing = (1.25, 1.25, 10)
self.target_size = (192, 192, 17)
self.n_classes = 1 # Right ventricle only
self.floating_precision = floating_precision
# Compute the shape for the inputs and outputs
self.sa_target_shape = list(self.target_size)
self.sa_shape = self.sa_target_shape.copy()
self.sa_target_shape.append(self.n_classes)
self.la_target_shape = list(self.target_size)
self.la_shape = self.la_target_shape.copy()
self.la_shape[-1] = 1
self.la_target_shape[-1] = self.n_classes
self.affine_shape = (4, 4)
self.disk_cache = disk_cache
self.memory_cache = memory_cache
self.data_in_memory = {}
self.augmentation = DataAugmentation(seed=1235)
@staticmethod
def get_patient_list(root_directory: Union[str, Path]) -> List[Path]:
files = glob(os.path.join(root_directory, "**"))
files = [Path(i) for i in files]
return files
@staticmethod
def get_cached_patient_list(directory: Union[str, Path], cached_data) -> List[Path]:
files = [Path(os.path.join(directory, f'{cached_data[i]:03}'))
for i in range(len(cached_data))]
return files
@staticmethod
def randomise_list(item_list: List[Any], seed: Union[None, int]=None,
inplace: bool=True) -> List[Any]:
if not inplace:
item_list = item_list.copy()
random_generator = np.random.RandomState(seed)
random_generator.shuffle(item_list)
return item_list
@staticmethod
def randomise_list_cached(item_list: List[Any], cached_indexes: List[int]) -> List[Any]:
shuffled_list = []
for i in cached_indexes:
shuffled_list.append(item_list[i])
return shuffled_list
@staticmethod
def split_list(item_list: List[Any], split_fraction: float) -> Tuple[List[Any]]:
assert 0 < split_fraction < 1
split_index = int(len(item_list) * split_fraction)
split_1 = item_list[:split_index]
split_2 = item_list[split_index:]
return split_1, split_2
@staticmethod
def load_image(patient_directory: Union[str, Path], file_type: FileType) -> sitk.Image:
file_suffix = '*' + file_type.value + '.nii.gz'
file_path = os.path.join(patient_directory, file_suffix)
file_path = glob(file_path)
assert len(file_path) == 1
file_path = file_path[0]
sitk_image = sitk.ReadImage(file_path)
return sitk_image
@staticmethod
def load_transformation(patient_directory: Union[str, Path], file_type: ExtraType) -> sitk.Transform:
file_suffix = '*' + file_type.value + '.tfm'
file_path = os.path.join(patient_directory, file_suffix)
file_path = glob(file_path)
assert len(file_path) == 1
file_path = file_path[0]
sitk_transform = sitk.ReadTransform(file_path)
return sitk_transform
@staticmethod
def load_patient_data(patient_directory: Union[str, Path], has_gt: bool = True) -> Dict[str, sitk.Image]:
patient_data = {}
patient_data[FileType.sa_ed.value] = DataGenerator.load_image(patient_directory, FileType.sa_ed)
patient_data[FileType.sa_es.value] = DataGenerator.load_image(patient_directory, FileType.sa_es)
patient_data[FileType.la_ed.value] = DataGenerator.load_image(patient_directory, FileType.la_ed)
patient_data[FileType.la_es.value] = DataGenerator.load_image(patient_directory, FileType.la_es)
if has_gt:
patient_data[FileType.sa_ed_gt.value] = DataGenerator.load_image(patient_directory, FileType.sa_ed_gt)
patient_data[FileType.sa_es_gt.value] = DataGenerator.load_image(patient_directory, FileType.sa_es_gt)
patient_data[FileType.la_ed_gt.value] = DataGenerator.load_image(patient_directory, FileType.la_ed_gt)
patient_data[FileType.la_es_gt.value] = DataGenerator.load_image(patient_directory, FileType.la_es_gt)
return patient_data
@staticmethod
def load_extra_patient_data(patient_directory: Union[str, Path],
patient_data: Dict[str, sitk.Image]) -> Dict[str, sitk.Image]:
patient_data[ExtraType.reg_affine.value] = DataGenerator.load_transformation(patient_directory,
ExtraType.reg_affine)
return patient_data
@staticmethod
def preprocess_patient_data(patient_data: Dict[str, sitk.Image], spacing: Tuple[float],
size: Tuple[int], has_gt: bool = True, register: bool = True) -> Dict[str, sitk.Image]:
# Standardise the orientation of the images
# Short-axis
direction = patient_data[FileType.sa_ed.value].GetDirection()
if direction[0] < 0.001:
permute = sitk.PermuteAxesImageFilter()
permute.SetOrder([1,0,2])
patient_data[FileType.sa_ed.value] = permute.Execute(patient_data[FileType.sa_ed.value])
patient_data[FileType.sa_es.value] = permute.Execute(patient_data[FileType.sa_es.value])
if has_gt:
patient_data[FileType.sa_ed_gt.value] = permute.Execute(patient_data[FileType.sa_ed_gt.value])
patient_data[FileType.sa_es_gt.value] = permute.Execute(patient_data[FileType.sa_es_gt.value])
flip_axes = [True, False, False]
patient_data[FileType.sa_ed.value] = sitk.Flip(patient_data[FileType.sa_ed.value], flip_axes)
patient_data[FileType.sa_es.value] = sitk.Flip(patient_data[FileType.sa_es.value], flip_axes)
if has_gt:
patient_data[FileType.sa_ed_gt.value] = sitk.Flip(patient_data[FileType.sa_ed_gt.value], flip_axes)
patient_data[FileType.sa_es_gt.value] = sitk.Flip(patient_data[FileType.sa_es_gt.value], flip_axes)
# Long-axis
direction = patient_data[FileType.la_ed.value].GetDirection()
if direction[8] < 0:
flip_axes = [True, False, False]
patient_data[FileType.la_ed.value] = sitk.Flip(patient_data[FileType.la_ed.value], flip_axes)
patient_data[FileType.la_es.value] = sitk.Flip(patient_data[FileType.la_es.value], flip_axes)
if has_gt:
patient_data[FileType.la_ed_gt.value] = sitk.Flip(patient_data[FileType.la_ed_gt.value], flip_axes)
patient_data[FileType.la_es_gt.value] = sitk.Flip(patient_data[FileType.la_es_gt.value], flip_axes)
# Resample images to standardised spacing and size
# Short-axis
sa_spacing = list(spacing)
sa_spacing[2] = patient_data[FileType.sa_ed.value].GetSpacing()[2]
sa_size = None
patient_data[FileType.sa_ed.value] = Preprocess.resample_image(patient_data[FileType.sa_ed.value],
sa_spacing, sa_size, is_label=False)
patient_data[FileType.sa_es.value] = Preprocess.resample_image(patient_data[FileType.sa_es.value],
sa_spacing, sa_size, is_label=False)
if has_gt:
patient_data[FileType.sa_ed_gt.value] = Preprocess.resample_image(patient_data[FileType.sa_ed_gt.value],
sa_spacing, sa_size, is_label=True)
patient_data[FileType.sa_es_gt.value] = Preprocess.resample_image(patient_data[FileType.sa_es_gt.value],
sa_spacing, sa_size, is_label=True)
# Long-axis
la_spacing = list(spacing)
la_spacing[2] = patient_data[FileType.la_ed.value].GetSpacing()[2]
la_size = None
patient_data[FileType.la_ed.value] = Preprocess.resample_image(patient_data[FileType.la_ed.value],
la_spacing, la_size, is_label=False)
patient_data[FileType.la_es.value] = Preprocess.resample_image(patient_data[FileType.la_es.value],
la_spacing, la_size, is_label=False)
if has_gt:
patient_data[FileType.la_ed_gt.value] = Preprocess.resample_image(patient_data[FileType.la_ed_gt.value],
la_spacing, la_size, is_label=True)
patient_data[FileType.la_es_gt.value] = Preprocess.resample_image(patient_data[FileType.la_es_gt.value],
la_spacing, la_size, is_label=True)
# Find heart ROI
# Short-axis
# TODO: Find where x/y are switched
sa_y_centre, sa_x_centre = RegionOfInterest.detect_roi_sa(patient_data[FileType.sa_ed.value],
patient_data[FileType.sa_es.value])
# Long-axis
la_y_centre, la_x_centre = RegionOfInterest.detect_roi_la(patient_data[FileType.la_ed.value],
patient_data[FileType.la_es.value])
# Crop and/or pad to centre size
# Short-axis
sa_centroid = (sa_x_centre, sa_y_centre, size[-1] // 2)
patient_data[FileType.sa_ed.value] = Preprocess.crop(patient_data[FileType.sa_ed.value],
sa_centroid,
size)
patient_data[FileType.sa_es.value] = Preprocess.crop(patient_data[FileType.sa_es.value],
sa_centroid,
size)
if has_gt:
patient_data[FileType.sa_ed_gt.value] = Preprocess.crop(patient_data[FileType.sa_ed_gt.value],
sa_centroid,
size)
patient_data[FileType.sa_es_gt.value] = Preprocess.crop(patient_data[FileType.sa_es_gt.value],
sa_centroid,
size)
# Long-axis
la_centroid = (la_x_centre, la_y_centre, 0)
la_size = list(size)
la_size[2] = 1
patient_data[FileType.la_ed.value] = Preprocess.crop(patient_data[FileType.la_ed.value],
la_centroid,
la_size)
patient_data[FileType.la_es.value] = Preprocess.crop(patient_data[FileType.la_es.value],
la_centroid,
la_size)
if has_gt:
patient_data[FileType.la_ed_gt.value] = Preprocess.crop(patient_data[FileType.la_ed_gt.value],
la_centroid,
la_size)
patient_data[FileType.la_es_gt.value] = Preprocess.crop(patient_data[FileType.la_es_gt.value],
la_centroid,
la_size)
# Register short-axis to long axis (only for end diastolic for faster execution time)
if register:
affine_transform, _ = Registration.register(patient_data[FileType.sa_ed.value],
patient_data[FileType.la_ed.value])
patient_data[ExtraType.reg_affine.value] = affine_transform
# Normalise intensities
patient_data[FileType.sa_ed.value] = Preprocess.z_score_normalisation(patient_data[FileType.sa_ed.value])
patient_data[FileType.sa_es.value] = Preprocess.z_score_normalisation(patient_data[FileType.sa_es.value])
patient_data[FileType.la_ed.value] = Preprocess.z_score_normalisation(patient_data[FileType.la_ed.value])
patient_data[FileType.la_es.value] = Preprocess.z_score_normalisation(patient_data[FileType.la_es.value])
return patient_data
def get_cache_directory(self, patient_directory: Union[str, Path]) -> Path:
path = os.path.normpath(patient_directory)
split_path = path.split(os.sep)
# .. / data / training or vlaidation / patient ID
# only last two are of interest
cache_directory = Path(os.path.join(self.cache_directory,
split_path[-2],
split_path[-1]))
return cache_directory
def is_cached(self, patient_directory: Union[str, Path], has_gt: bool = True) -> bool:
patient_cache_directory = self.get_cache_directory(patient_directory)
# Check if folder exists
if os.path.isdir(patient_cache_directory):
# and every individual file exist
for expected_file_name in FileType:
if not has_gt and expected_file_name.value.endswith('_gt'):
continue
expected_file_path = os.path.join(patient_cache_directory,
expected_file_name.value + '.nii.gz')
if not os.path.exists(expected_file_path):
return False
for expected_file_name in ExtraType:
expected_file_path = os.path.join(patient_cache_directory,
expected_file_name.value + '.tfm')
if not os.path.exists(expected_file_path):
return False
return True
return False
def save_cache(self, patient_directory: Union[str, Path],
patient_data: Dict[str, sitk.Image]) -> None:
if not self.disk_cache:
return
patient_cache_directory = self.get_cache_directory(patient_directory)
os.makedirs(patient_cache_directory, exist_ok=True)
for key, data in patient_data.items():
if key in (k.value for k in FileType):
file_path = os.path.join(patient_cache_directory, key + '.nii.gz')
sitk.WriteImage(data, file_path)
elif key in (k.value for k in ExtraType):
file_path = os.path.join(patient_cache_directory, key + '.tfm')
sitk.WriteTransform(data, file_path)
def load_cache(self, patient_directory: Union[str, Path], has_gt: bool = True) -> Dict[str, sitk.Image]:
patient_cache_directory = self.get_cache_directory(patient_directory)
patient_data = self.load_patient_data(patient_cache_directory, has_gt)
patient_data = self.load_extra_patient_data(patient_cache_directory, patient_data)
return patient_data
def is_in_memory(self, patient_directory: Union[str, Path]) -> bool:
if patient_directory in self.data_in_memory:
return True
return False
def save_memory(self, patient_directory: Union[str, Path],
patient_data: Dict[str, sitk.Image]) -> None:
if self.memory_cache:
self.data_in_memory[patient_directory] = patient_data.copy()
def get_memory(self, patient_directory: Union[str, Path]) -> Dict[str, sitk.Image]:
patient_data = self.data_in_memory[patient_directory]
return patient_data.copy()
def augment_data(self, patient_data: Dict[str, sitk.Image]) -> Dict[str, sitk.Image]:
(patient_data[FileType.sa_ed.value], patient_data[FileType.sa_ed_gt.value],
sa_affine) = self.augmentation.random_augmentation(patient_data[FileType.sa_ed.value],
patient_data[FileType.sa_ed_gt.value],
use_cache=False)
(patient_data[FileType.sa_es.value], patient_data[FileType.sa_es_gt.value],
sa_affine) = self.augmentation.random_augmentation(patient_data[FileType.sa_es.value],
patient_data[FileType.sa_es_gt.value],
use_cache=True)
(patient_data[FileType.la_ed.value], patient_data[FileType.la_ed_gt.value],
la_affine) = self.augmentation.random_augmentation(patient_data[FileType.la_ed.value],
patient_data[FileType.la_ed_gt.value],
use_cache=False)
(patient_data[FileType.la_es.value], patient_data[FileType.la_es_gt.value],
la_affine) = self.augmentation.random_augmentation(patient_data[FileType.la_es.value],
patient_data[FileType.la_es_gt.value],
use_cache=True)
patient_data[Affine.sa_affine.value] = sa_affine
patient_data[Affine.la_affine.value] = la_affine
return patient_data
def to_numpy(self, patient_data: Dict[str, sitk.Image], has_affine_matrix: bool) -> Dict[str, np.ndarray]:
# Handle 'ExtraType' data first
if has_affine_matrix:
if (Affine.sa_affine.value in patient_data and
Affine.la_affine.value in patient_data):
affine_matrix = sitk.CompositeTransform([patient_data[Affine.sa_affine.value].GetInverse(),
patient_data[ExtraType.reg_affine.value],
patient_data[Affine.la_affine.value]])
else:
affine_matrix = patient_data[ExtraType.reg_affine.value]
sa_affine = Registration.get_affine_registration_matrix(patient_data[FileType.sa_ed.value],
affine_matrix)
sa_affine = sa_affine.astype(np.float32)
la_affine = Registration.get_affine_matrix(patient_data[FileType.la_ed.value])
la_affine = la_affine.astype(np.float32)
# Free from memory (and indexing)
if ExtraType.reg_affine.value in patient_data:
del patient_data[ExtraType.reg_affine.value]
if Affine.sa_affine.value in patient_data:
del patient_data[Affine.sa_affine.value]
if Affine.la_affine.value in patient_data:
del patient_data[Affine.la_affine.value]
# Handle original file data (images and segmentations)
for key, image in patient_data.items():
numpy_image = sitk.GetArrayFromImage(image)
# Swap axes so ordering is x, y, z rather than z, y, x as stored
# in sitk
numpy_image = np.swapaxes(numpy_image, 0, -1)
# Select right-ventricle labels only
if 'gt' in key:
numpy_image = numpy_image.astype(np.uint8)
if 'SA' in key:
numpy_image = np.expand_dims(numpy_image, axis=-1)
numpy_image[numpy_image != 3] = 0
numpy_image[numpy_image == 3] = 1
if self.floating_precision == '16':
numpy_image = numpy_image.astype(np.float16)
else:
numpy_image = numpy_image.astype(np.float32)
# Add 'channel' axis for 3D images
#if 'sa' in key:
# numpy_image = np.expand_dims(numpy_image, axis=-1)
patient_data[key] = numpy_image
if has_affine_matrix:
patient_data[OutputAffine.sa_affine.value] = sa_affine
patient_data[OutputAffine.la_affine.value] = la_affine
return patient_data
@staticmethod
def to_structure(patient_data: Dict[str, sitk.Image], has_affine_matrix: bool,
has_gt: bool = True):
output_data = []
if has_gt:
output_data.append(({'input_sa': patient_data[FileType.sa_ed.value],
'input_la': patient_data[FileType.la_ed.value]},
{'output_sa': patient_data[FileType.sa_ed_gt.value],
'output_la': patient_data[FileType.la_ed_gt.value]}))
output_data.append(({'input_sa': patient_data[FileType.sa_es.value],
'input_la': patient_data[FileType.la_es.value]},
{'output_sa': patient_data[FileType.sa_es_gt.value],
'output_la': patient_data[FileType.la_es_gt.value]}))
else:
output_data.append(({'input_sa': patient_data[FileType.sa_ed.value],
'input_la': patient_data[FileType.la_ed.value]},))
output_data.append(({'input_sa': patient_data[FileType.sa_es.value],
'input_la': patient_data[FileType.la_es.value]},))
if has_affine_matrix:
for data in output_data:
data[0]['input_sa_affine'] = patient_data[OutputAffine.sa_affine.value]
data[0]['input_la_affine'] = patient_data[OutputAffine.la_affine.value]
return output_data
def generator(self, patient_directory: Union[str, Path], affine_matrix: bool,
has_gt: bool = True, augment: bool = False) -> Tuple[Dict[str, np.ndarray]]:
if self.is_in_memory(patient_directory):
patient_data = self.get_memory(patient_directory)
elif self.is_cached(patient_directory, has_gt):
patient_data = self.load_cache(patient_directory, has_gt)
self.save_memory(patient_directory, patient_data)
else:
patient_data = self.load_patient_data(patient_directory, has_gt)
patient_data = self.preprocess_patient_data(patient_data,
self.target_spacing,
self.target_size,
has_gt,
affine_matrix)
self.save_cache(patient_directory, patient_data)
self.save_memory(patient_directory, patient_data)
if augment:
patient_data = self.augment_data(patient_data)
patient_data = self.to_numpy(patient_data, affine_matrix)
output_data = self.to_structure(patient_data, affine_matrix, has_gt)
return output_data
def sitk_generator(self, patient_directory: Union[str, Path], has_gt: bool = True) -> Tuple[Dict[str, np.ndarray]]:
"""
Returns pre- and post-processed data in sitk
"""
if self.is_cached(patient_directory, has_gt):
pre_patient_data = DataGenerator.load_patient_data(patient_directory, has_gt)
post_patient_data = self.load_cache(patient_directory, has_gt)
else:
pre_patient_data = self.load_patient_data(patient_directory, has_gt)
post_patient_data = self.load_patient_data(patient_directory, has_gt)
post_patient_data = self.preprocess_patient_data(post_patient_data,
self.target_spacing,
self.target_size,
has_gt,
False)
self.save_cache(patient_directory, pre_patient_data)
pre_output_data = self.to_structure(pre_patient_data, False, has_gt)
post_output_data = self.to_structure(post_patient_data, False, has_gt)
return pre_output_data, post_output_data
def train_generator(self, augment: bool = True, verbose: int = 0) -> Tuple[Dict[str, np.ndarray]]:
for patient_directory in self.train_list:
if verbose > 0:
print('Generating patient: ', patient_directory)
patient_data = self.generator(patient_directory, affine_matrix=True, augment=augment)
yield patient_data[0] # End diastolic
yield patient_data[1] # End systolic
def validation_generator(self, verbose: int = 0) -> Tuple[Dict[str, np.ndarray]]:
for patient_directory in self.validation_list:
if verbose > 0:
print('Generating patient: ', patient_directory)
patient_data = self.generator(patient_directory, affine_matrix=True)
yield patient_data[0]
yield patient_data[1]
def test_generator(self, verbose: int = 0) -> Tuple[Dict[str, np.ndarray]]:
for patient_directory in self.test_list:
if verbose > 0:
print('Generating patient: ', patient_directory)
patient_data = self.generator(patient_directory, affine_matrix=True)
yield patient_data[0]
yield patient_data[1]
def test_generator_inference(self, verbose: int = 0) -> Tuple[Dict[str, np.ndarray]]:
for patient_directory in self.test_list:
if verbose > 0:
print('Generating patient: ', patient_directory)
patient_data = self.generator(patient_directory, affine_matrix=True, has_gt=False)
pre_patient_data, post_patient_data = self.sitk_generator(patient_directory, has_gt=False)
yield patient_data[0], pre_patient_data[0], post_patient_data[0], patient_directory, 'ed'
yield patient_data[1], pre_patient_data[1], post_patient_data[1], patient_directory, 'es'
| [
"os.path.exists",
"SimpleITK.Flip",
"os.makedirs",
"pathlib.Path",
"SimpleITK.WriteTransform",
"SimpleITK.ReadTransform",
"os.path.join",
"SimpleITK.GetArrayFromImage",
"SimpleITK.WriteImage",
"os.path.normpath",
"numpy.swapaxes",
"os.path.isdir",
"SimpleITK.PermuteAxesImageFilter",
"numpy... | [((2124, 2156), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""data"""'], {}), "('..', '..', 'data')\n", (2136, 2156), False, 'import os\n'), ((2282, 2320), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""data_cache"""'], {}), "('..', '..', 'data_cache')\n", (2294, 2320), False, 'import os\n'), ((4775, 4802), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (4796, 4802), True, 'import numpy as np\n'), ((5734, 5778), 'os.path.join', 'os.path.join', (['patient_directory', 'file_suffix'], {}), '(patient_directory, file_suffix)\n', (5746, 5778), False, 'import os\n'), ((5799, 5814), 'glob.glob', 'glob', (['file_path'], {}), '(file_path)\n', (5803, 5814), False, 'from glob import glob\n'), ((5913, 5938), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['file_path'], {}), '(file_path)\n', (5927, 5938), True, 'import SimpleITK as sitk\n'), ((6185, 6229), 'os.path.join', 'os.path.join', (['patient_directory', 'file_suffix'], {}), '(patient_directory, file_suffix)\n', (6197, 6229), False, 'import os\n'), ((6250, 6265), 'glob.glob', 'glob', (['file_path'], {}), '(file_path)\n', (6254, 6265), False, 'from glob import glob\n'), ((6368, 6397), 'SimpleITK.ReadTransform', 'sitk.ReadTransform', (['file_path'], {}), '(file_path)\n', (6386, 6397), True, 'import SimpleITK as sitk\n'), ((16271, 16306), 'os.path.normpath', 'os.path.normpath', (['patient_directory'], {}), '(patient_directory)\n', (16287, 16306), False, 'import os\n'), ((16900, 16938), 'os.path.isdir', 'os.path.isdir', (['patient_cache_directory'], {}), '(patient_cache_directory)\n', (16913, 16938), False, 'import os\n'), ((18050, 18101), 'os.makedirs', 'os.makedirs', (['patient_cache_directory'], {'exist_ok': '(True)'}), '(patient_cache_directory, exist_ok=True)\n', (18061, 18101), False, 'import os\n'), ((2201, 2249), 'os.path.join', 'os.path.join', (['file_path', 'expected_data_directory'], {}), '(file_path, expected_data_directory)\n', (2213, 2249), False, 'import os\n'), ((2357, 2402), 'os.path.join', 'os.path.join', (['file_path', 'self.cache_directory'], {}), '(file_path, self.cache_directory)\n', (2369, 2402), False, 'import os\n'), ((2449, 2494), 'os.path.join', 'os.path.join', (['self.data_directory', '"""training"""'], {}), "(self.data_directory, 'training')\n", (2461, 2494), False, 'import os\n'), ((4131, 4165), 'os.path.join', 'os.path.join', (['root_directory', '"""**"""'], {}), "(root_directory, '**')\n", (4143, 4165), False, 'import os\n'), ((4184, 4191), 'pathlib.Path', 'Path', (['i'], {}), '(i)\n', (4188, 4191), False, 'from pathlib import Path\n'), ((8467, 8496), 'SimpleITK.PermuteAxesImageFilter', 'sitk.PermuteAxesImageFilter', ([], {}), '()\n', (8494, 8496), True, 'import SimpleITK as sitk\n'), ((9089, 9145), 'SimpleITK.Flip', 'sitk.Flip', (['patient_data[FileType.sa_ed.value]', 'flip_axes'], {}), '(patient_data[FileType.sa_ed.value], flip_axes)\n', (9098, 9145), True, 'import SimpleITK as sitk\n'), ((9195, 9251), 'SimpleITK.Flip', 'sitk.Flip', (['patient_data[FileType.sa_es.value]', 'flip_axes'], {}), '(patient_data[FileType.sa_es.value], flip_axes)\n', (9204, 9251), True, 'import SimpleITK as sitk\n'), ((9729, 9785), 'SimpleITK.Flip', 'sitk.Flip', (['patient_data[FileType.la_ed.value]', 'flip_axes'], {}), '(patient_data[FileType.la_ed.value], flip_axes)\n', (9738, 9785), True, 'import SimpleITK as sitk\n'), ((9835, 9891), 'SimpleITK.Flip', 'sitk.Flip', (['patient_data[FileType.la_es.value]', 'flip_axes'], {}), '(patient_data[FileType.la_es.value], flip_axes)\n', (9844, 9891), True, 'import SimpleITK as sitk\n'), ((16476, 16542), 'os.path.join', 'os.path.join', (['self.cache_directory', 'split_path[-2]', 'split_path[-1]'], {}), '(self.cache_directory, split_path[-2], split_path[-1])\n', (16488, 16542), False, 'import os\n'), ((22928, 22957), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image'], {}), '(image)\n', (22950, 22957), True, 'import SimpleITK as sitk\n'), ((23083, 23114), 'numpy.swapaxes', 'np.swapaxes', (['numpy_image', '(0)', '(-1)'], {}), '(numpy_image, 0, -1)\n', (23094, 23114), True, 'import numpy as np\n'), ((2753, 2800), 'os.path.join', 'os.path.join', (['self.data_directory', '"""validation"""'], {}), "(self.data_directory, 'validation')\n", (2765, 2800), False, 'import os\n'), ((4377, 4424), 'os.path.join', 'os.path.join', (['directory', 'f"""{cached_data[i]:03}"""'], {}), "(directory, f'{cached_data[i]:03}')\n", (4389, 4424), False, 'import os\n'), ((9331, 9390), 'SimpleITK.Flip', 'sitk.Flip', (['patient_data[FileType.sa_ed_gt.value]', 'flip_axes'], {}), '(patient_data[FileType.sa_ed_gt.value], flip_axes)\n', (9340, 9390), True, 'import SimpleITK as sitk\n'), ((9447, 9506), 'SimpleITK.Flip', 'sitk.Flip', (['patient_data[FileType.sa_es_gt.value]', 'flip_axes'], {}), '(patient_data[FileType.sa_es_gt.value], flip_axes)\n', (9456, 9506), True, 'import SimpleITK as sitk\n'), ((9971, 10030), 'SimpleITK.Flip', 'sitk.Flip', (['patient_data[FileType.la_ed_gt.value]', 'flip_axes'], {}), '(patient_data[FileType.la_ed_gt.value], flip_axes)\n', (9980, 10030), True, 'import SimpleITK as sitk\n'), ((10087, 10146), 'SimpleITK.Flip', 'sitk.Flip', (['patient_data[FileType.la_es_gt.value]', 'flip_axes'], {}), '(patient_data[FileType.la_es_gt.value], flip_axes)\n', (10096, 10146), True, 'import SimpleITK as sitk\n'), ((17176, 17251), 'os.path.join', 'os.path.join', (['patient_cache_directory', "(expected_file_name.value + '.nii.gz')"], {}), "(patient_cache_directory, expected_file_name.value + '.nii.gz')\n", (17188, 17251), False, 'import os\n'), ((17497, 17569), 'os.path.join', 'os.path.join', (['patient_cache_directory', "(expected_file_name.value + '.tfm')"], {}), "(patient_cache_directory, expected_file_name.value + '.tfm')\n", (17509, 17569), False, 'import os\n'), ((18237, 18291), 'os.path.join', 'os.path.join', (['patient_cache_directory', "(key + '.nii.gz')"], {}), "(patient_cache_directory, key + '.nii.gz')\n", (18249, 18291), False, 'import os\n'), ((18308, 18340), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['data', 'file_path'], {}), '(data, file_path)\n', (18323, 18340), True, 'import SimpleITK as sitk\n'), ((2057, 2071), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2061, 2071), False, 'from pathlib import Path\n'), ((17325, 17359), 'os.path.exists', 'os.path.exists', (['expected_file_path'], {}), '(expected_file_path)\n', (17339, 17359), False, 'import os\n'), ((17643, 17677), 'os.path.exists', 'os.path.exists', (['expected_file_path'], {}), '(expected_file_path)\n', (17657, 17677), False, 'import os\n'), ((18423, 18474), 'os.path.join', 'os.path.join', (['patient_cache_directory', "(key + '.tfm')"], {}), "(patient_cache_directory, key + '.tfm')\n", (18435, 18474), False, 'import os\n'), ((18491, 18527), 'SimpleITK.WriteTransform', 'sitk.WriteTransform', (['data', 'file_path'], {}), '(data, file_path)\n', (18510, 18527), True, 'import SimpleITK as sitk\n'), ((23330, 23366), 'numpy.expand_dims', 'np.expand_dims', (['numpy_image'], {'axis': '(-1)'}), '(numpy_image, axis=-1)\n', (23344, 23366), True, 'import numpy as np\n')] |
"""Animation of a model increasing in voxel resolution."""
import fourier_feature_nets as ffn
import numpy as np
import scenepic as sp
def voxels_animation(voxels: ffn.OcTree, min_depth=4, num_frames=300,
up_dir=(0, 1, 0), forward_dir=(0, 0, -1),
fov_y_degrees=40, resolution=(400, 400),
distance=4) -> sp.Scene:
"""Creates an animation of a model increasing in voxel resolution.
Args:
voxels (ffn.OcTree): The model. Needs to start at maximum resolution.
It will be pruned down to `min_depth`.
min_depth (int, optional): Minimum voxel depth for the animation.
Defaults to 4.
num_frames (int, optional): Number of frames to animate.
Defaults to 300.
up_dir (tuple, optional): The up direction. Used to determine the
axes of rotation for the animation.
Defaults to (0, 1, 0).
forward_dir (tuple, optional): The forward direction. Used to determine
the starting position of the camera
and the axes of rotation.
Defaults to (0, 0, -1).
fov_y_degrees (int, optional): Field of view for the camera.
Defaults to 40.
resolution (tuple, optional): Width and height of the canvas.
Defaults to (400, 400).
distance (int, optional): Camera distance. Defaults to 4.
Returns:
sp.Scene: A scene containing the animation.
"""
up_dir = np.array(up_dir, np.float32)
forward_dir = np.array(forward_dir, np.float32)
resolution = ffn.Resolution(*resolution)
scene = sp.Scene()
canvas = scene.create_canvas_3d(width=resolution.width,
height=resolution.height)
canvas.shading = sp.Shading(sp.Colors.White)
meshes = {}
labels = {}
max_depth = voxels.depth
bar = ffn.ETABar("Pruning OcTree", max=voxels.depth - min_depth + 1)
while voxels.depth >= min_depth:
bar.next()
bar.info(str(voxels.depth))
depth_meshes = []
leaf_centers = voxels.leaf_centers()
leaf_depths = voxels.leaf_depths()
leaf_colors = voxels.leaf_data()
depths = np.unique(leaf_depths)
for depth in depths:
mesh = scene.create_mesh()
transform = sp.Transforms.scale(pow(2., 1-depth) * voxels.scale)
mesh.add_cube(sp.Colors.White, transform=transform)
depth_centers = leaf_centers[leaf_depths == depth]
depth_colors = leaf_colors[leaf_depths == depth]
mesh.enable_instancing(depth_centers, colors=depth_colors)
depth_meshes.append(mesh)
meshes[voxels.depth] = depth_meshes
text = "{} voxels".format(len(leaf_colors))
labels[voxels.depth] = scene.create_label(text=text,
color=sp.Colors.Black,
font_family="arial",
size_in_pixels=75,
camera_space=True)
voxels = voxels.prune()
bar.finish()
orbit_cameras = ffn.orbit(up_dir, forward_dir, num_frames,
fov_y_degrees, resolution, distance)
sp_cameras = [cam.to_scenepic() for cam in orbit_cameras]
frame_depth = np.linspace(min_depth, max_depth + 1,
num_frames, endpoint=False).astype(np.int32)
for camera, depth in zip(sp_cameras, frame_depth):
frame = canvas.create_frame()
for mesh in meshes[depth]:
frame.add_mesh(mesh)
frame.add_label(labels[depth], [-.43, -.33, -1])
frame.camera = camera
return scene
if __name__ == "__main__":
voxels = ffn.OcTree.load("antinous_octree_10.npz")
scene = voxels_animation(voxels, resolution=(800, 800))
scene.save_as_html("voxels_animation.html", "Voxels Animation")
| [
"fourier_feature_nets.Resolution",
"numpy.unique",
"scenepic.Scene",
"numpy.array",
"numpy.linspace",
"fourier_feature_nets.orbit",
"fourier_feature_nets.ETABar",
"fourier_feature_nets.OcTree.load",
"scenepic.Shading"
] | [((1743, 1771), 'numpy.array', 'np.array', (['up_dir', 'np.float32'], {}), '(up_dir, np.float32)\n', (1751, 1771), True, 'import numpy as np\n'), ((1790, 1823), 'numpy.array', 'np.array', (['forward_dir', 'np.float32'], {}), '(forward_dir, np.float32)\n', (1798, 1823), True, 'import numpy as np\n'), ((1841, 1868), 'fourier_feature_nets.Resolution', 'ffn.Resolution', (['*resolution'], {}), '(*resolution)\n', (1855, 1868), True, 'import fourier_feature_nets as ffn\n'), ((1882, 1892), 'scenepic.Scene', 'sp.Scene', ([], {}), '()\n', (1890, 1892), True, 'import scenepic as sp\n'), ((2036, 2063), 'scenepic.Shading', 'sp.Shading', (['sp.Colors.White'], {}), '(sp.Colors.White)\n', (2046, 2063), True, 'import scenepic as sp\n'), ((2136, 2198), 'fourier_feature_nets.ETABar', 'ffn.ETABar', (['"""Pruning OcTree"""'], {'max': '(voxels.depth - min_depth + 1)'}), "('Pruning OcTree', max=voxels.depth - min_depth + 1)\n", (2146, 2198), True, 'import fourier_feature_nets as ffn\n'), ((3439, 3518), 'fourier_feature_nets.orbit', 'ffn.orbit', (['up_dir', 'forward_dir', 'num_frames', 'fov_y_degrees', 'resolution', 'distance'], {}), '(up_dir, forward_dir, num_frames, fov_y_degrees, resolution, distance)\n', (3448, 3518), True, 'import fourier_feature_nets as ffn\n'), ((4052, 4093), 'fourier_feature_nets.OcTree.load', 'ffn.OcTree.load', (['"""antinous_octree_10.npz"""'], {}), "('antinous_octree_10.npz')\n", (4067, 4093), True, 'import fourier_feature_nets as ffn\n'), ((2463, 2485), 'numpy.unique', 'np.unique', (['leaf_depths'], {}), '(leaf_depths)\n', (2472, 2485), True, 'import numpy as np\n'), ((3630, 3695), 'numpy.linspace', 'np.linspace', (['min_depth', '(max_depth + 1)', 'num_frames'], {'endpoint': '(False)'}), '(min_depth, max_depth + 1, num_frames, endpoint=False)\n', (3641, 3695), True, 'import numpy as np\n')] |
import numpy as np
import scipy.sparse as sp
import warnings
#import pdb
# Matrix-vector product wrapper
# A is a numpy 2d array or matrix, or a scipy matrix or sparse matrix.
# x is a numpy vector only.
# Compute A.dot(x) if t is False,
# A.transpose().dot(x) otherwise.
def mult(A, x, t=False):
if sp.issparse(A):
m = A.shape[0]
n = A.shape[1]
if(t):
return sp.csr_matrix(x).dot(A).transpose().todense().A[:,0]
return A.dot(sp.csr_matrix(x).transpose()).todense().A[:,0]
if t:
return x.dot(A)
return A.dot(x)
def orthog(Y,X):
"""Orthogonalize a vector or matrix Y against the columns of the matrix X.
This function requires that the column dimension of Y is less than X and
that Y and X have the same number of rows.
"""
dotY = mult(X,Y,t=True)
return Y - mult(X,dotY)
# Simple utility function used to check linear dependencies during computation:
def invcheck(x):
eps2 = 2*np.finfo(np.float).eps
if(x>eps2):
x = 1/x
else:
x = 0
warnings.warn("Ill-conditioning encountered, result accuracy may be poor")
return(x)
def tsvd(A,n,tol=0.0001,maxit=50):
"""Estimate a few of the largest singular values and corresponding singular
vectors of matrix using the implicitly restarted Lanczos bidiagonalization
method of Baglama and Reichel, see:
Augmented Implicitly Restarted Lanczos Bidiagonalization Methods,
<NAME> and <NAME>, SIAM J. Sci. Comput. 2005
Keyword arguments:
tol -- An estimation tolerance. Smaller means more accurate estimates.
maxit -- Maximum number of Lanczos iterations allowed.
Given an input matrix A of dimension j * k, and an input desired number
of singular values n, the function returns a tuple X with five entries:
X[0] A j * nu matrix of estimated left singular vectors.
X[1] A vector of length nu of estimated singular values.
X[2] A k * nu matrix of estimated right singular vectors.
X[3] The number of Lanczos iterations run.
X[4] The number of matrix-vector products run.
The algorithm estimates the truncated singular value decomposition:
A.dot(X[2]) = X[0]*X[1].
"""
nu = n
m = A.shape[0]
n = A.shape[1]
if(min(m,n)<2):
raise Exception("The input matrix must be at least 2x2.")
m_b = min((nu+20, 3*nu, n)) # Working dimension size
mprod = 0
it = 0
j = 0
k = nu
smax = 1
sparse = sp.issparse(A)
V = np.zeros((n,m_b))
W = np.zeros((m,m_b))
F = np.zeros((n,1))
B = np.zeros((m_b,m_b))
V[:,0] = np.random.randn(n) # Initial vector
V[:,0] = V[:,0]/np.linalg.norm(V)
while(it < maxit):
print("Iteration:", it)
if(it>0): j=k
W[:,j] = mult(A,V[:,j])
mprod+=1
if(it>0):
W[:,j] = orthog(W[:,j],W[:,0:j]) # NB W[:,0:j] selects columns 0,1,...,j-1
s = np.linalg.norm(W[:,j])
sinv = invcheck(s)
W[:,j] = sinv*W[:,j]
# Lanczos process
while(j<m_b):
F = mult(A,W[:,j],t=True)
mprod+=1
F = F - s*V[:,j]
F = orthog(F,V[:,0:j+1])
fn = np.linalg.norm(F)
fninv= invcheck(fn)
F = fninv * F
if(j<m_b-1):
V[:,j+1] = F
B[j,j] = s
B[j,j+1] = fn
W[:,j+1] = mult(A,V[:,j+1])
mprod+=1
# One step of classical Gram-Schmidt...
W[:,j+1] = W[:,j+1] - fn*W[:,j]
# ...with full reorthogonalization
W[:,j+1] = orthog(W[:,j+1],W[:,0:(j+1)])
s = np.linalg.norm(W[:,j+1])
sinv = invcheck(s)
W[:,j+1] = sinv * W[:,j+1]
else:
B[j,j] = s
j+=1
# End of Lanczos process
S = np.linalg.svd(B)
R = fn * S[0][m_b-1,:] # Residuals
if it<1:
smax = S[1][0] # Largest Ritz value
else:
smax = max((S[1][0],smax))
conv = sum(np.abs(R[0:nu]) < tol*smax)
if(conv < nu): # Not coverged yet
k = max(conv+nu,k)
k = min(k,m_b-3)
else:
break
# Update the Ritz vectors
V[:,0:k] = V[:,0:m_b].dot(S[2].transpose()[:,0:k])
V[:,k] = F
B = np.diag(S[1])
B[0:k,k] = R[0:k]
# Update the left approximate singular vectors
W[:,0:k] = W[:,0:m_b].dot(S[0][:,0:k])
it+=1
U = W[:,0:m_b].dot(S[0][:,0:nu])
V = V[:,0:m_b].dot(S[2].transpose()[:,0:nu])
return (U,S[1][0:nu],V,it,mprod) | [
"numpy.abs",
"scipy.sparse.issparse",
"numpy.linalg.svd",
"numpy.diag",
"numpy.zeros",
"numpy.linalg.norm",
"warnings.warn",
"numpy.finfo",
"scipy.sparse.csr_matrix",
"numpy.random.randn"
] | [((312, 326), 'scipy.sparse.issparse', 'sp.issparse', (['A'], {}), '(A)\n', (323, 326), True, 'import scipy.sparse as sp\n'), ((977, 1051), 'warnings.warn', 'warnings.warn', (['"""Ill-conditioning encountered, result accuracy may be poor"""'], {}), "('Ill-conditioning encountered, result accuracy may be poor')\n", (990, 1051), False, 'import warnings\n'), ((2325, 2339), 'scipy.sparse.issparse', 'sp.issparse', (['A'], {}), '(A)\n', (2336, 2339), True, 'import scipy.sparse as sp\n'), ((2347, 2365), 'numpy.zeros', 'np.zeros', (['(n, m_b)'], {}), '((n, m_b))\n', (2355, 2365), True, 'import numpy as np\n'), ((2371, 2389), 'numpy.zeros', 'np.zeros', (['(m, m_b)'], {}), '((m, m_b))\n', (2379, 2389), True, 'import numpy as np\n'), ((2395, 2411), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (2403, 2411), True, 'import numpy as np\n'), ((2417, 2437), 'numpy.zeros', 'np.zeros', (['(m_b, m_b)'], {}), '((m_b, m_b))\n', (2425, 2437), True, 'import numpy as np\n'), ((2449, 2467), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (2464, 2467), True, 'import numpy as np\n'), ((2503, 2520), 'numpy.linalg.norm', 'np.linalg.norm', (['V'], {}), '(V)\n', (2517, 2520), True, 'import numpy as np\n'), ((2717, 2740), 'numpy.linalg.norm', 'np.linalg.norm', (['W[:, j]'], {}), '(W[:, j])\n', (2731, 2740), True, 'import numpy as np\n'), ((3403, 3419), 'numpy.linalg.svd', 'np.linalg.svd', (['B'], {}), '(B)\n', (3416, 3419), True, 'import numpy as np\n'), ((3784, 3797), 'numpy.diag', 'np.diag', (['S[1]'], {}), '(S[1])\n', (3791, 3797), True, 'import numpy as np\n'), ((915, 933), 'numpy.finfo', 'np.finfo', (['np.float'], {}), '(np.float)\n', (923, 933), True, 'import numpy as np\n'), ((2917, 2934), 'numpy.linalg.norm', 'np.linalg.norm', (['F'], {}), '(F)\n', (2931, 2934), True, 'import numpy as np\n'), ((3259, 3286), 'numpy.linalg.norm', 'np.linalg.norm', (['W[:, j + 1]'], {}), '(W[:, j + 1])\n', (3273, 3286), True, 'import numpy as np\n'), ((3560, 3575), 'numpy.abs', 'np.abs', (['R[0:nu]'], {}), '(R[0:nu])\n', (3566, 3575), True, 'import numpy as np\n'), ((449, 465), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['x'], {}), '(x)\n', (462, 465), True, 'import scipy.sparse as sp\n'), ((381, 397), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['x'], {}), '(x)\n', (394, 397), True, 'import scipy.sparse as sp\n')] |
from controller import Robot, Motor, DistanceSensor, Camera, Emitter, GPS
import struct
import numpy as np
import cv2 as cv
timeStep = 32 # Set the time step for the simulation
max_velocity = 6.28 # Set a maximum velocity time constant
robot = Robot()
# Create an object to control the left wheel
wheel_left = robot.getDevice("wheel1 motor")
# Create an object to control the right wheel
wheel_right = robot.getDevice("wheel2 motor")
#[left wheel speed, right wheel speed]
speeds = [max_velocity, max_velocity]
#Create objects for all robot sensors
leftDist = robot.getDevice("leftDist") # Get robot's left distance sensor
leftDist.enable(timeStep) # Enable left distance sensor
frontDist = robot.getDevice("frontDist")
frontDist.enable(timeStep)
rightDist = robot.getDevice("rightDist")
rightDist.enable(timeStep)
cam = robot.getDevice("camera")
cam.enable(timeStep)
colorSensor = robot.getDevice("color")
colorSensor.enable(timeStep)
emitter = robot.getDevice("emitter") # Emitter doesn't need enable
gps = robot.getDevice("gps")
gps.enable(timeStep)
wheel_left.setPosition(float("inf"))
wheel_right.setPosition(float("inf"))
def turn_right():
#set left wheel speed
speeds[0] = 0.6 * max_velocity
#set right wheel speed
speeds[1] = -0.2 * max_velocity
def turn_left():
#set left wheel speed
speeds[0] = -0.2 * max_velocity
#set right wheel speed
speeds[1] = 0.6 * max_velocity
def spin():
#set left wheel speed
speeds[0] = 0.6 * max_velocity
#set right wheel speed
speeds[1] = -0.6 * max_velocity
def delay(ms):
initTime = robot.getTime() # Store starting time (in seconds)
while robot.step(timeStep) != -1:
# If time elapsed (converted into ms) is greater than value passed in
if (robot.getTime() - initTime) * 1000.0 > ms:
break
def getColor():
img = colorSensor.getImage() # Grab color sensor camera's image view
# Return grayness of the only pixel (0-255)
print("Color: " + str(img[0][0][0]))
return colorSensor.imageGetGray(img, colorSensor.getWidth(), 0, 0)
def checkVic(img):
# Convert img to RGBA format (for OpenCV)
img = np.frombuffer(img, np.uint8).reshape(
(cam.getHeight(), cam.getWidth(), 4))
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Grayscale image
# Inverse threshold image (0-80 -> white; 80-255 -> black)
img, thresh = cv.threshold(img, 80, 255, cv.THRESH_BINARY_INV)
# Find all shapes within thresholded image
contours, hierarchy = cv.findContours(
thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x, y, w, h = cv.boundingRect(cnt) # Find width and height of contour
contArea = cv.contourArea(cnt) # Area covered by the shape
ratio = w / h # Calculate width to height ratio of contour
# if the contour area and width to height ratio are within certain ranges
if contArea > 300 and contArea < 1000 and ratio > 0.65 and ratio < 0.95:
return True
return False
def report(victimType):
# Struct package to be sent to supervisor to report victim/hazard
# First four bytes store robot's x coordinate
# Second four bytes store robot's z coordinate
# Last byte stores type of victim
# Victims: H, S, U, T
# Hazards: F, P, C, O
wheel_left.setVelocity(0) # Stop for 1 second
wheel_right.setVelocity(0)
delay(1300)
# Convert victimType to character for struct.pack
victimType = bytes(victimType, "utf-8")
posX = int(gps.getValues()[0] * 100) # Convert from cm to m
posZ = int(gps.getValues()[2] * 100)
message = struct.pack("i i c", posX, posZ, victimType)
emitter.send(message)
robot.step(timeStep)
while robot.step(timeStep) != -1:
speeds[0] = max_velocity
speeds[1] = max_velocity
# Check left and right sensor to avoid walls
# for sensor on the left, either
if leftDist.getValue() < 0.05:
turn_right() # We see a wall on the left, so turn right away from the wall
if rightDist.getValue() < 0.05: # for sensor on the right too
turn_left()
# for front sensor
if frontDist.getValue() < 0.05:
spin()
# if on black, turn away
if getColor() < 80:
spin()
wheel_left.setVelocity(speeds[0])
wheel_right.setVelocity(speeds[1])
delay(600)
# if sees victim, report it
if checkVic(cam.getImage()):
report('T') # Cannot determine type of victim, so always try 'T' for now
# Send the speed values we have choosen to the robot
wheel_left.setVelocity(speeds[0])
wheel_right.setVelocity(speeds[1])
| [
"controller.Robot",
"cv2.threshold",
"struct.pack",
"cv2.contourArea",
"cv2.cvtColor",
"cv2.findContours",
"numpy.frombuffer",
"cv2.boundingRect"
] | [((262, 269), 'controller.Robot', 'Robot', ([], {}), '()\n', (267, 269), False, 'from controller import Robot, Motor, DistanceSensor, Camera, Emitter, GPS\n'), ((2295, 2330), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (2306, 2330), True, 'import cv2 as cv\n'), ((2431, 2479), 'cv2.threshold', 'cv.threshold', (['img', '(80)', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(img, 80, 255, cv.THRESH_BINARY_INV)\n', (2443, 2479), True, 'import cv2 as cv\n'), ((2553, 2614), 'cv2.findContours', 'cv.findContours', (['thresh', 'cv.RETR_TREE', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n', (2568, 2614), True, 'import cv2 as cv\n'), ((3685, 3729), 'struct.pack', 'struct.pack', (['"""i i c"""', 'posX', 'posZ', 'victimType'], {}), "('i i c', posX, posZ, victimType)\n", (3696, 3729), False, 'import struct\n'), ((2670, 2690), 'cv2.boundingRect', 'cv.boundingRect', (['cnt'], {}), '(cnt)\n', (2685, 2690), True, 'import cv2 as cv\n'), ((2747, 2766), 'cv2.contourArea', 'cv.contourArea', (['cnt'], {}), '(cnt)\n', (2761, 2766), True, 'import cv2 as cv\n'), ((2201, 2229), 'numpy.frombuffer', 'np.frombuffer', (['img', 'np.uint8'], {}), '(img, np.uint8)\n', (2214, 2229), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
a = sio.loadmat('time_1_4.mat')
cells = a['timedata']
# In[3]:
cells.shape
# In[4]:
t = np.linspace(0, 180/12, 181)
title_list = ['No delay', 'Half day delay (ddT=0)', 'Half day delay (ddT=0.25 d)', 'One day delay']
# In[5]:
# temp2_CD8 = [] # 3
# temp2_mac = [] # 4
# temp2_neut= [] # 5
# temp2_DC = [] # 6
# temp2_CD4 = [] # 7
# temp2_fib = [] # 8
# In[6]:
immune_cells = ['CD8 T', 'Mac', 'Neut', 'DC', 'CD4 T', 'Fib']
colorv = ['red' , 'seagreen', 'cyan', '#810f7c', 'orange', 'blueviolet']
# In[15]:
fig, ax = plt.subplots(nrows=1, ncols=4, figsize=(22, 4))
for i in range(4):
for j in range( len(immune_cells) ):
ax[i].plot(t, cells[i,j,:], color= colorv[j], linewidth=2)
ax[i].fill_between(t, cells[i,j,:]-cells[i,j+6,:], cells[i,j,:]+cells[i,j+6,:],
label=immune_cells[j], color= colorv[j], alpha=0.35)
# ax[i].plot(t, meanR, 'green')
# ax[i].fill_between(t, meanR-stdR, meanR+stdR, alpha=0.2, label='resistant', facecolor='green')
if i==3:
ax[i].legend(fontsize=16, loc=9, bbox_to_anchor=(1.2, 1.))
title_name = title_list[i]
ax[i].set_title(title_name, fontsize=18)
ax[i].set_xlabel('Time (days)', fontsize=16)
if i==0:
ax[i].set_ylabel('# of immune cells', fontsize=16)
ax[i].set_ylim([-50, 400])
ax[i].tick_params(axis='both', which='major', labelsize=16)
# Customize the major grid
ax[i].grid(which='major', linestyle='solid', linewidth='2', color='w')
ax[i].set_facecolor("#EEEEEE") # #E6E6E6, #D3D3D3
# fig.suptitle("Comparison of different time delay", fontsize=22)
# plt.subplots_adjust(left=0.1, top=0.8, wspace = 0.2, hspace = 0.4)
plt.tight_layout()
fig.savefig('population-dynamics.png', dpi=600, pad_inches=0.1, bbox_inches='tight') # dpi=600,
# In[ ]:
# ## plt.subplots practice
# In[76]:
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10, 5))
rows, cols = axes.shape
for i in range(rows):
for j in range(cols):
name = str(i) + ' ' + 'and' + ' ' + str(j)
axes[i,j].plot(range(0, 10))
axes[i,j].set_title(name)
plt.subplots_adjust(left=0.1, top=0.9, wspace = 0.2, hspace = 0.4)
# plt.tight_layout()
# In[69]:
axes.shape
| [
"scipy.io.loadmat",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust"
] | [((141, 168), 'scipy.io.loadmat', 'sio.loadmat', (['"""time_1_4.mat"""'], {}), "('time_1_4.mat')\n", (152, 168), True, 'import scipy.io as sio\n'), ((233, 262), 'numpy.linspace', 'np.linspace', (['(0)', '(180 / 12)', '(181)'], {}), '(0, 180 / 12, 181)\n', (244, 262), True, 'import numpy as np\n'), ((679, 726), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(4)', 'figsize': '(22, 4)'}), '(nrows=1, ncols=4, figsize=(22, 4))\n', (691, 726), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1890), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1888, 1890), True, 'import matplotlib.pyplot as plt\n'), ((2057, 2104), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)', 'figsize': '(10, 5)'}), '(nrows=2, ncols=3, figsize=(10, 5))\n', (2069, 2104), True, 'import matplotlib.pyplot as plt\n'), ((2302, 2364), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'top': '(0.9)', 'wspace': '(0.2)', 'hspace': '(0.4)'}), '(left=0.1, top=0.9, wspace=0.2, hspace=0.4)\n', (2321, 2364), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ["RegionEditor"]
import os
import sys
import cv2
import copy
import numpy as np
from functools import partial
from matplotlib.path import Path
from matplotlib.widgets import (
Button, Slider, RadioButtons, CheckButtons,
RectangleSelector, EllipseSelector, LassoSelector)
import matplotlib.pyplot as plt
import matplotlib.patches as patches
sys.path.append(".")
from pyutils.figure import Figure
from src.library.base_editor import BaseEditor
class RegionEditor(BaseEditor):
def __init__(self, cap, image_size, resize_rate,
range_of_interest, mask_func):
'''
Args:
cap (cv2.VideoCapture): video object
image_size (tuple(int, int)): [width, heigth]
resize_rate (float): resize rate
range_of_interest (np.ndarray(bool)): size=(H, W)
mask_func (func): function outputing mask
'''
self.cap = cap
self.pos_frame = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
self.pos_frame_init = self.pos_frame
self.frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.image_size = tuple(np.array(image_size * resize_rate, dtype=int))
self.image_size = self.image_size[::-1]
self.roi_raw = range_of_interest
self.roi_edit = np.array(range_of_interest)
self.mask_func = mask_func
self.extensions = self.mask_func.extensions
def launch(self):
self.fig = Figure(figsize=(15, 6))
self.fig.create_grid((1, 3), wspace=0.0, width_ratios=(2, 1, 2))
self.fig[1].create_grid(
(3, 3), wspace=0.0, hspace=0.1, width_ratios=(3, 10, 3))
ax_command = self.fig[1][0, 1]
ax_command.create_grid((2, 1), hspace=0.0)
self.button_reset = Button(ax_command[0], 'reset')
self.button_reset.label.set_fontsize(15)
self.button_reset.on_clicked(self.reset)
self.button_end = Button(ax_command[1], 'end')
self.button_end.label.set_fontsize(15)
self.button_end.on_clicked(self.terminate)
self.fig._fig.canvas.mpl_connect('close_event', self.terminate)
ax_action = self.fig[1][1, 1]
ax_action.create_grid((3, 1), hspace=0.0, height_ratios=(5, 2, 2))
self._set_interactive_tool(
self.fig[0], ax_action[0], ax_action[1])
self.slider_frame = Slider(
ax_action[2], 'frame\npos', 0, self.frame_count - 1,
valinit=self.pos_frame, valstep=1.0, valfmt='%.0f')
self.slider_frame.on_changed(self._on_set_image)
ax_extension = self.fig[1][2, 1]
self._set_extension(ax_extension)
# self.fig[0].cla()
self.fig[0].add_zoom_func()
rect = plt.Rectangle(
(0, 0), self.image_size[1], self.image_size[0],
fc='w', ec='gray', hatch='++', alpha=0.5, zorder=-10)
self.fig[0].add_patch(rect)
self.im_image_left = self.fig[0].plot_matrix(
np.zeros((*self.image_size, 4)), picker=True,
flip_axis=True, colorbar=False)
self.fig[0].set_aspect("equal", "box")
self.fig[0].set_title(
"ROI of video (left click: write, left click + shift key: erase)")
# self.fig[2].cla()
self.fig[2].add_zoom_func()
rect = plt.Rectangle(
(0, 0), self.image_size[1], self.image_size[0],
fc='w', ec='gray', hatch='++', alpha=0.5, zorder=-10)
self.im_image_right = self.fig[2].plot_matrix(
np.zeros((*self.image_size, 4)), picker=True,
flip_axis=True, colorbar=False)
self.fig[2].set_aspect("equal", "box")
self.fig[2].add_patch(rect)
self.fig[2].get_xaxis().set_visible(False)
self.fig[2].get_yaxis().set_visible(False)
self.fig[2].set_title("extracted area (ROI+HSV, change HSV ranges)")
self.reset()
self.fig.show()
def reset(self, event=None):
self.roi_edit = np.array(self.roi_raw)
for ext in self.extensions:
ext.reset(self, event)
self.fig[0].zoom_reset()
self.fig[2].zoom_reset()
self.pos_frame = self.pos_frame_init
self.read()
self.update()
def read(self):
self.cap.set(cv2.CAP_PROP_POS_FRAMES, int(self.slider_frame.val))
ret, frame = self.cap.read()
if ret is False:
return
image = cv2.resize(frame, self.image_size[::-1], cv2.INTER_LINEAR)
self.image_edit = np.concatenate([
cv2.cvtColor(image, cv2.COLOR_BGR2RGB),
np.full((*image.shape[:2], 1), 255, dtype=np.uint8)], axis=2)
def update(self, **kwargs):
self.image_edit[~self.roi_edit, 3] = 0
self.image_edit[self.roi_edit, 3] = 255
self.im_image_left.set_data(self.image_edit[::-1])
mask = self.mask_func(self.image_edit[..., :3])
mask = np.logical_and(mask, self.roi_edit)
self.image_edit[~mask, 3] = 0
self.image_edit[mask, 3] = 255
self.im_image_right.set_data(self.image_edit[::-1])
self.fig._fig.canvas.draw_idle()
def terminate(self, event=None):
self.fig.close()
self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.pos_frame_init)
def _on_set_image(self, event=None):
self.read()
self.update()
| [
"cv2.resize",
"numpy.logical_and",
"matplotlib.widgets.Button",
"numpy.array",
"numpy.zeros",
"pyutils.figure.Figure",
"cv2.cvtColor",
"matplotlib.pyplot.Rectangle",
"numpy.full",
"matplotlib.widgets.Slider",
"sys.path.append"
] | [((407, 427), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (422, 427), False, 'import sys\n'), ((1340, 1367), 'numpy.array', 'np.array', (['range_of_interest'], {}), '(range_of_interest)\n', (1348, 1367), True, 'import numpy as np\n'), ((1497, 1520), 'pyutils.figure.Figure', 'Figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (1503, 1520), False, 'from pyutils.figure import Figure\n'), ((1815, 1845), 'matplotlib.widgets.Button', 'Button', (['ax_command[0]', '"""reset"""'], {}), "(ax_command[0], 'reset')\n", (1821, 1845), False, 'from matplotlib.widgets import Button, Slider, RadioButtons, CheckButtons, RectangleSelector, EllipseSelector, LassoSelector\n'), ((1970, 1998), 'matplotlib.widgets.Button', 'Button', (['ax_command[1]', '"""end"""'], {}), "(ax_command[1], 'end')\n", (1976, 1998), False, 'from matplotlib.widgets import Button, Slider, RadioButtons, CheckButtons, RectangleSelector, EllipseSelector, LassoSelector\n'), ((2400, 2516), 'matplotlib.widgets.Slider', 'Slider', (['ax_action[2]', '"""frame\npos"""', '(0)', '(self.frame_count - 1)'], {'valinit': 'self.pos_frame', 'valstep': '(1.0)', 'valfmt': '"""%.0f"""'}), "(ax_action[2], 'frame\\npos', 0, self.frame_count - 1, valinit=self.\n pos_frame, valstep=1.0, valfmt='%.0f')\n", (2406, 2516), False, 'from matplotlib.widgets import Button, Slider, RadioButtons, CheckButtons, RectangleSelector, EllipseSelector, LassoSelector\n'), ((2758, 2878), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(0, 0)', 'self.image_size[1]', 'self.image_size[0]'], {'fc': '"""w"""', 'ec': '"""gray"""', 'hatch': '"""++"""', 'alpha': '(0.5)', 'zorder': '(-10)'}), "((0, 0), self.image_size[1], self.image_size[0], fc='w', ec=\n 'gray', hatch='++', alpha=0.5, zorder=-10)\n", (2771, 2878), True, 'import matplotlib.pyplot as plt\n'), ((3328, 3448), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(0, 0)', 'self.image_size[1]', 'self.image_size[0]'], {'fc': '"""w"""', 'ec': '"""gray"""', 'hatch': '"""++"""', 'alpha': '(0.5)', 'zorder': '(-10)'}), "((0, 0), self.image_size[1], self.image_size[0], fc='w', ec=\n 'gray', hatch='++', alpha=0.5, zorder=-10)\n", (3341, 3448), True, 'import matplotlib.pyplot as plt\n'), ((3992, 4014), 'numpy.array', 'np.array', (['self.roi_raw'], {}), '(self.roi_raw)\n', (4000, 4014), True, 'import numpy as np\n'), ((4431, 4489), 'cv2.resize', 'cv2.resize', (['frame', 'self.image_size[::-1]', 'cv2.INTER_LINEAR'], {}), '(frame, self.image_size[::-1], cv2.INTER_LINEAR)\n', (4441, 4489), False, 'import cv2\n'), ((4917, 4952), 'numpy.logical_and', 'np.logical_and', (['mask', 'self.roi_edit'], {}), '(mask, self.roi_edit)\n', (4931, 4952), True, 'import numpy as np\n'), ((1180, 1225), 'numpy.array', 'np.array', (['(image_size * resize_rate)'], {'dtype': 'int'}), '(image_size * resize_rate, dtype=int)\n', (1188, 1225), True, 'import numpy as np\n'), ((3001, 3032), 'numpy.zeros', 'np.zeros', (['(*self.image_size, 4)'], {}), '((*self.image_size, 4))\n', (3009, 3032), True, 'import numpy as np\n'), ((3536, 3567), 'numpy.zeros', 'np.zeros', (['(*self.image_size, 4)'], {}), '((*self.image_size, 4))\n', (3544, 3567), True, 'import numpy as np\n'), ((4545, 4583), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (4557, 4583), False, 'import cv2\n'), ((4597, 4648), 'numpy.full', 'np.full', (['(*image.shape[:2], 1)', '(255)'], {'dtype': 'np.uint8'}), '((*image.shape[:2], 1), 255, dtype=np.uint8)\n', (4604, 4648), True, 'import numpy as np\n')] |
"""
@author: <NAME>
@contact: <EMAIL>
"""
import argparse
from ifpd import const, query
from ifpd.scripts import arguments as ap # type: ignore
from ifpd.exception import enable_rich_assert
from joblib import Parallel, delayed # type: ignore
import logging
import numpy as np # type: ignore
import os
import pandas as pd # type: ignore
from rich.logging import RichHandler # type: ignore
from rich.progress import track # type: ignore
import shutil
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
handlers=[RichHandler(markup=True, rich_tracebacks=True)],
)
def init_parser(subparsers: argparse._SubParsersAction) -> argparse.ArgumentParser:
parser = subparsers.add_parser(
__name__.split(".")[-1],
description="""
Design a FISH probe set in a genomic region, with the aim of having the probes
as homogeneously spaced as possible. Concisely, the script does the following:
- Identify all probe candidates
- Calculate centrality, size, and homogeneity for each candidate
- Build a set of consecutive (nProbes+1) windows of the same size
+ Shift the windows set to build additional windows sets. Each windows set
will produce one probe set candidate.
- For each window set:
+ Find the best probe in each window.
+ Aggregate each window's best probe into a candidate probe set.
- Rank candidate probe sets based on probe homogeneity.
- Return the top N candidates (maxSets), with plots, tables, fasta and bed.
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
help="Design a FISH probe set in a genomic region.",
)
parser.add_argument(
"database", metavar="database", type=str, help="Path to database folder."
)
parser.add_argument(
"chrom",
type=str,
help="Database feature to query for a probe set.",
)
parser.add_argument(
"outdir",
metavar="outDir",
type=str,
help="Path to query output directory. Stops if it exists already.",
)
parser.add_argument(
"nProbes", metavar="nProbes", type=int, help="Number of probes to design."
)
parser.add_argument(
"--region",
type=int,
nargs=2,
default=(0, np.inf),
help="""Start and end locations (space-separated) of the region of interest.
When a region is not provided (or start/end coincide),
the whole feature is queried.""",
)
parser.add_argument(
"--n-oligo",
metavar="nOligo",
type=int,
default=48,
help="Number of oligos per probe. Default: 48",
)
parser.add_argument(
"--max-sets",
metavar="maxProbes",
type=int,
default=-1,
help="""Maximum number of probe set candidates to output.
Set to -1 to retrieve all candidates. Default: -1""",
)
parser = ap.add_version_option(parser)
advanced = parser.add_argument_group("advanced arguments")
advanced.add_argument(
"--order",
metavar="feature",
type=str,
default=const.featureList,
nargs="+",
help="""Space-separated features, used as explained in script description.
The available features are: 'centrality', 'size', and 'homogeneity'. At least 2
features must be listed. Default: "size homogeneity centrality".""",
)
advanced.add_argument(
"--filter-thr",
metavar="filterThr",
type=float,
default=0.1,
help="""Threshold of first feature filter, used to identify
a range around the best value (percentage range around it). Accepts values
from 0 to 1. Default: 0.1""",
)
advanced.add_argument(
"--min-d",
metavar="minD",
type=int,
default=0,
help="*DEPRECATED* Minimum distance between consecutive oligos. Default: 1",
)
advanced.add_argument(
"--exact-n-oligo",
action="store_const",
dest="exact_n_oligo",
const=True,
default=False,
help="""Stop if not enough oligos are found,
instead of designing the largest probe.""",
)
advanced.add_argument(
"--window-shift",
metavar="winShift",
type=float,
default=0.1,
help="""Window fraction for windows shifting.""",
)
advanced.add_argument(
"-t",
"--threads",
metavar="nthreads",
type=int,
help="""Number of threads for parallelization. Default: 1""",
default=1,
)
advanced.add_argument(
"-f",
action="store_const",
dest="forceRun",
const=True,
default=False,
help="""Force overwriting of the query if already run.
This is potentially dangerous.""",
)
parser.set_defaults(parse=parse_arguments, run=run)
return parser
def assert_region(args):
if args.region is not None:
if args.region[0] == args.region[1]:
args.region = None
return
assert (
args.region[0] >= 0
), f"start location cannot be negative [{args.region[0]}]."
assert (
args.region[1] >= 0
), f"end location cannot be negative [{args.region[1]}]."
assert (
args.region[1] > args.region[0]
), f"end location must be greater than start location [{args.region}]."
@enable_rich_assert
def parse_arguments(args: argparse.Namespace) -> argparse.Namespace:
assert not os.path.isfile(
args.outdir
), f"output folder expected, file found: {args.outdir}"
if args.forceRun:
if os.path.isdir(args.outdir):
shutil.rmtree(args.outdir)
logging.warning("Overwriting previously run query.")
else:
assert not os.path.isdir(
args.outdir
), f"output folder already exists: {args.outdir}"
assert_region(args)
assert 2 <= len(
args.order
), f"at least 2 features need, only {len(args.order)} found."
for o in args.order:
assert (
o in const.featureList
), f'unrecognized feature "{o}". Should be one of {const.featureList}.'
assert (
args.filter_thr >= 0 and args.filter_thr <= 1
), f"first filter threshold must be a fraction: {args.filter_thr}"
assert (
args.window_shift > 0 and args.window_shift <= 1
), f"window shift must be a fraction: {args.window_shift}"
assert (
args.min_d >= 0
), f"negative minimum distance between consecutive oligos: {args.min_d}"
assert args.n_oligo >= 1, f"a probe must have oligos: {args.n_oligo}"
if args.max_sets == -1:
args.max_sets = np.inf
assert args.max_sets >= 0, f"at least 1 probe set in output: {args.max_sets}"
return args
def init_db(
args,
oligoDB,
):
assert (
not oligoDB.has_overlaps()
), "databases with overlapping oligos are not supported yet."
if args.chrom not in oligoDB.chromData.keys():
oligoDB.read_chromosome(args.chrom)
chromData = oligoDB.chromData[args.chrom]
if args.region[1] == np.inf:
args.region = (
args.region[0],
oligoDB.chromData[args.chrom]["chromEnd"].max(),
)
chromStart, chromEnd = args.region
queried_region = (args.chrom, chromStart, chromEnd)
selectCondition = np.logical_and(
chromData.iloc[:, 0] >= chromStart, chromData.iloc[:, 1] <= chromEnd
)
selectedOligos = chromData.loc[selectCondition, :]
return oligoDB, queried_region, selectCondition, selectedOligos
def build_candidates(args, queried_region, selectedOligos, oligoDB):
logging.info("Build probe candidates.")
args.threads = ap.check_threads(args.threads)
if args.threads != 1:
candidateList = Parallel(n_jobs=args.threads, backend="threading", verbose=1)(
delayed(query.OligoProbe)(
queried_region[0],
selectedOligos.iloc[ii : (ii + args.n_oligo), :],
oligoDB,
)
for ii in range(selectedOligos.shape[0] - args.n_oligo + 1)
)
else:
candidateList = [
query.OligoProbe(
queried_region[0],
selectedOligos.iloc[i : (i + args.n_oligo), :],
oligoDB,
)
for i in track(range(selectedOligos.shape[0] - args.n_oligo + 1))
]
logging.info(f"Found {len(candidateList)} probe candidates.")
return candidateList
def build_windows(args, queried_region, oligoDB):
chrom, chromStart, chromEnd = queried_region
logging.info("Building window sets...")
window_set = query.GenomicWindowList(None)
window_size = chromEnd - chromStart
window_size /= args.nProbes + 1
window_size = int(window_size)
skip = 1 + ((chromEnd - chromStart) % window_size != 0)
for startPosition in range(chromStart, chromEnd, window_size)[:-skip]:
window_set.add(chrom, startPosition, window_size)
window_shift = max(
int(args.window_shift * window_size),
args.min_d + oligoDB.get_oligo_length_range()[0],
)
window_setList = [window_set.shift(s) for s in range(0, window_size, window_shift)]
logging.info(f" Built {len(window_setList)} window sets.")
return window_setList
def export_window_set(args, queried_region, window_setList, wsi):
window_set = window_setList[wsi]
window_set_path = os.path.join(args.outdir, f"probe_set_{wsi}")
assert not os.path.isfile(window_set_path)
assert not os.path.isdir(window_set_path)
os.mkdir(window_set_path)
fasta, bed = window_set.export(window_set_path, queried_region)
fasta_path = os.path.join(window_set_path, f"probe_set_{wsi}.fa")
bed_path = os.path.join(window_set_path, f"probe_set_{wsi}.bed")
with open(fasta_path, "w+") as OH:
OH.write(fasta)
with open(bed_path, "w+") as OH:
OH.write(bed)
def build_feature_table(args, queried_region, candidateList):
logging.info("Describe candidates.")
probeFeatureTable = query.ProbeFeatureTable(
candidateList, queried_region, True, args.threads
)
logging.info("Write description table.")
probeFeatureTable.data.to_csv(
os.path.join(args.outdir, "probe_candidates.tsv"), "\t", index=False
)
assert args.nProbes <= probeFeatureTable.data.shape[0], "".join(
[
"not enough probes in the region of interest: ",
f"{probeFeatureTable.data.shape[0]}/{args.nProbes}",
]
)
return probeFeatureTable
def populate_windows(args, candidateList, window_setList, probeFeatureTable):
for wsi in range(len(window_setList)):
window_set = window_setList[wsi]
for wi in range(len(window_set)):
window = window_set[wi]
probeFeatureTable.reset()
selectCondition = np.logical_and(
probeFeatureTable.data.loc[:, "chromStart"] >= window.chromStart,
probeFeatureTable.data.loc[:, "chromEnd"] <= window.chromEnd,
)
logging.info(
"".join(
[
f"Found {selectCondition.sum()} probe candidates",
f" in window #{wi} of set #{wsi}.",
]
)
)
if selectCondition.sum() == 0:
window_setList[wsi][wi].probe = None
continue
elif selectCondition.sum() != 1:
probeFeatureTable.keep(selectCondition, cumulative=True)
feature_range, feature = probeFeatureTable.filter(
args.order[0], args.filter_thr, cumulative=True
)
feature_range = np.round(feature_range, 6)
logging.info(
"".join(
[
f" Selected {probeFeatureTable.data.shape[0]}",
f" candidates in the range {feature_range} of '{feature}'.",
]
)
)
logging.info(
" Ranking probe candidates based on" + f" '{args.order[1]}'."
)
probeFeatureTable.rank(args.order[1])
window_setList[wsi][wi].probe = candidateList[
probeFeatureTable.data.index[0]
]
return window_setList
@enable_rich_assert
def run(args: argparse.Namespace) -> None:
os.mkdir(args.outdir)
ap.add_log_file_handler(os.path.join(args.outdir, "log"))
logging.info("Read database.")
oligoDB = query.OligoDatabase(args.database)
oligoDB, queried_region, selectCondition, selectedOligos = init_db(args, oligoDB)
args = ap.check_n_oligo(args, selectCondition)
candidateList = build_candidates(args, queried_region, selectedOligos, oligoDB)
probeFeatureTable = build_feature_table(args, queried_region, candidateList)
window_setList = build_windows(args, queried_region, oligoDB)
window_setList = populate_windows(
args, candidateList, window_setList, probeFeatureTable
)
logging.info("Compare probe set candidates.")
probeSetSpread = np.array(
[ws.calc_probe_size_and_homogeneity() for ws in window_setList]
)
probeCount = np.array([ws.count_probes() for ws in window_setList])
logging.info(
"".join(
[
f" {sum(probeCount == args.nProbes)}/{len(window_setList)}",
f" probe set candidates have {args.nProbes} probes.",
]
)
)
logging.info("Rank based on #probes and homogeneity (of probes and size).")
probeSetData = pd.DataFrame.from_dict(
{
"id": range(len(window_setList)),
"homogeneity": probeSetSpread[np.argsort(probeCount)[::-1]],
"nProbes": probeCount[np.argsort(probeCount)[::-1]],
}
)
probeSetData.sort_values(
by=["nProbes", "homogeneity"], ascending=[False, False], inplace=True
)
probeSetData.drop("id", axis=1).to_csv(
os.path.join(args.outdir, "set_candidates.tsv"), "\t", index=False
)
logging.info("Export probe set candidates.")
window_setList = [window_setList[i] for i in probeSetData["id"].values]
if args.threads != 1:
Parallel(n_jobs=args.threads, verbose=1)(
delayed(export_window_set)(args, queried_region, window_setList, wsi)
for wsi in range(len(window_setList))
)
else:
for wsi in track(range(len(window_setList))):
export_window_set(args, queried_region, window_setList, wsi)
logging.info("Done. :thumbs_up: :smiley:")
| [
"ifpd.scripts.arguments.add_version_option",
"ifpd.query.ProbeFeatureTable",
"numpy.argsort",
"logging.info",
"os.path.isdir",
"os.mkdir",
"rich.logging.RichHandler",
"ifpd.query.OligoProbe",
"numpy.round",
"ifpd.query.OligoDatabase",
"logging.warning",
"os.path.isfile",
"ifpd.scripts.argume... | [((2881, 2910), 'ifpd.scripts.arguments.add_version_option', 'ap.add_version_option', (['parser'], {}), '(parser)\n', (2902, 2910), True, 'from ifpd.scripts import arguments as ap\n'), ((7375, 7463), 'numpy.logical_and', 'np.logical_and', (['(chromData.iloc[:, 0] >= chromStart)', '(chromData.iloc[:, 1] <= chromEnd)'], {}), '(chromData.iloc[:, 0] >= chromStart, chromData.iloc[:, 1] <=\n chromEnd)\n', (7389, 7463), True, 'import numpy as np\n'), ((7673, 7712), 'logging.info', 'logging.info', (['"""Build probe candidates."""'], {}), "('Build probe candidates.')\n", (7685, 7712), False, 'import logging\n'), ((7732, 7762), 'ifpd.scripts.arguments.check_threads', 'ap.check_threads', (['args.threads'], {}), '(args.threads)\n', (7748, 7762), True, 'from ifpd.scripts import arguments as ap\n'), ((8627, 8666), 'logging.info', 'logging.info', (['"""Building window sets..."""'], {}), "('Building window sets...')\n", (8639, 8666), False, 'import logging\n'), ((8684, 8713), 'ifpd.query.GenomicWindowList', 'query.GenomicWindowList', (['None'], {}), '(None)\n', (8707, 8713), False, 'from ifpd import const, query\n'), ((9461, 9506), 'os.path.join', 'os.path.join', (['args.outdir', 'f"""probe_set_{wsi}"""'], {}), "(args.outdir, f'probe_set_{wsi}')\n", (9473, 9506), False, 'import os\n'), ((9604, 9629), 'os.mkdir', 'os.mkdir', (['window_set_path'], {}), '(window_set_path)\n', (9612, 9629), False, 'import os\n'), ((9716, 9768), 'os.path.join', 'os.path.join', (['window_set_path', 'f"""probe_set_{wsi}.fa"""'], {}), "(window_set_path, f'probe_set_{wsi}.fa')\n", (9728, 9768), False, 'import os\n'), ((9784, 9837), 'os.path.join', 'os.path.join', (['window_set_path', 'f"""probe_set_{wsi}.bed"""'], {}), "(window_set_path, f'probe_set_{wsi}.bed')\n", (9796, 9837), False, 'import os\n'), ((10028, 10064), 'logging.info', 'logging.info', (['"""Describe candidates."""'], {}), "('Describe candidates.')\n", (10040, 10064), False, 'import logging\n'), ((10089, 10163), 'ifpd.query.ProbeFeatureTable', 'query.ProbeFeatureTable', (['candidateList', 'queried_region', '(True)', 'args.threads'], {}), '(candidateList, queried_region, True, args.threads)\n', (10112, 10163), False, 'from ifpd import const, query\n'), ((10183, 10223), 'logging.info', 'logging.info', (['"""Write description table."""'], {}), "('Write description table.')\n", (10195, 10223), False, 'import logging\n'), ((12526, 12547), 'os.mkdir', 'os.mkdir', (['args.outdir'], {}), '(args.outdir)\n', (12534, 12547), False, 'import os\n'), ((12615, 12645), 'logging.info', 'logging.info', (['"""Read database."""'], {}), "('Read database.')\n", (12627, 12645), False, 'import logging\n'), ((12660, 12694), 'ifpd.query.OligoDatabase', 'query.OligoDatabase', (['args.database'], {}), '(args.database)\n', (12679, 12694), False, 'from ifpd import const, query\n'), ((12792, 12831), 'ifpd.scripts.arguments.check_n_oligo', 'ap.check_n_oligo', (['args', 'selectCondition'], {}), '(args, selectCondition)\n', (12808, 12831), True, 'from ifpd.scripts import arguments as ap\n'), ((13177, 13222), 'logging.info', 'logging.info', (['"""Compare probe set candidates."""'], {}), "('Compare probe set candidates.')\n", (13189, 13222), False, 'import logging\n'), ((13635, 13710), 'logging.info', 'logging.info', (['"""Rank based on #probes and homogeneity (of probes and size)."""'], {}), "('Rank based on #probes and homogeneity (of probes and size).')\n", (13647, 13710), False, 'import logging\n'), ((14208, 14252), 'logging.info', 'logging.info', (['"""Export probe set candidates."""'], {}), "('Export probe set candidates.')\n", (14220, 14252), False, 'import logging\n'), ((14690, 14732), 'logging.info', 'logging.info', (['"""Done. :thumbs_up: :smiley:"""'], {}), "('Done. :thumbs_up: :smiley:')\n", (14702, 14732), False, 'import logging\n'), ((5509, 5536), 'os.path.isfile', 'os.path.isfile', (['args.outdir'], {}), '(args.outdir)\n', (5523, 5536), False, 'import os\n'), ((5638, 5664), 'os.path.isdir', 'os.path.isdir', (['args.outdir'], {}), '(args.outdir)\n', (5651, 5664), False, 'import os\n'), ((9522, 9553), 'os.path.isfile', 'os.path.isfile', (['window_set_path'], {}), '(window_set_path)\n', (9536, 9553), False, 'import os\n'), ((9569, 9599), 'os.path.isdir', 'os.path.isdir', (['window_set_path'], {}), '(window_set_path)\n', (9582, 9599), False, 'import os\n'), ((10267, 10316), 'os.path.join', 'os.path.join', (['args.outdir', '"""probe_candidates.tsv"""'], {}), "(args.outdir, 'probe_candidates.tsv')\n", (10279, 10316), False, 'import os\n'), ((12576, 12608), 'os.path.join', 'os.path.join', (['args.outdir', '"""log"""'], {}), "(args.outdir, 'log')\n", (12588, 12608), False, 'import os\n'), ((14130, 14177), 'os.path.join', 'os.path.join', (['args.outdir', '"""set_candidates.tsv"""'], {}), "(args.outdir, 'set_candidates.tsv')\n", (14142, 14177), False, 'import os\n'), ((542, 588), 'rich.logging.RichHandler', 'RichHandler', ([], {'markup': '(True)', 'rich_tracebacks': '(True)'}), '(markup=True, rich_tracebacks=True)\n', (553, 588), False, 'from rich.logging import RichHandler\n'), ((5678, 5704), 'shutil.rmtree', 'shutil.rmtree', (['args.outdir'], {}), '(args.outdir)\n', (5691, 5704), False, 'import shutil\n'), ((5717, 5769), 'logging.warning', 'logging.warning', (['"""Overwriting previously run query."""'], {}), "('Overwriting previously run query.')\n", (5732, 5769), False, 'import logging\n'), ((5799, 5825), 'os.path.isdir', 'os.path.isdir', (['args.outdir'], {}), '(args.outdir)\n', (5812, 5825), False, 'import os\n'), ((7813, 7874), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'args.threads', 'backend': '"""threading"""', 'verbose': '(1)'}), "(n_jobs=args.threads, backend='threading', verbose=1)\n", (7821, 7874), False, 'from joblib import Parallel, delayed\n'), ((8186, 8278), 'ifpd.query.OligoProbe', 'query.OligoProbe', (['queried_region[0]', 'selectedOligos.iloc[i:i + args.n_oligo, :]', 'oligoDB'], {}), '(queried_region[0], selectedOligos.iloc[i:i + args.n_oligo,\n :], oligoDB)\n', (8202, 8278), False, 'from ifpd import const, query\n'), ((10905, 11052), 'numpy.logical_and', 'np.logical_and', (["(probeFeatureTable.data.loc[:, 'chromStart'] >= window.chromStart)", "(probeFeatureTable.data.loc[:, 'chromEnd'] <= window.chromEnd)"], {}), "(probeFeatureTable.data.loc[:, 'chromStart'] >= window.\n chromStart, probeFeatureTable.data.loc[:, 'chromEnd'] <= window.chromEnd)\n", (10919, 11052), True, 'import numpy as np\n'), ((14364, 14404), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'args.threads', 'verbose': '(1)'}), '(n_jobs=args.threads, verbose=1)\n', (14372, 14404), False, 'from joblib import Parallel, delayed\n'), ((7888, 7913), 'joblib.delayed', 'delayed', (['query.OligoProbe'], {}), '(query.OligoProbe)\n', (7895, 7913), False, 'from joblib import Parallel, delayed\n'), ((11782, 11808), 'numpy.round', 'np.round', (['feature_range', '(6)'], {}), '(feature_range, 6)\n', (11790, 11808), True, 'import numpy as np\n'), ((12141, 12216), 'logging.info', 'logging.info', (['(\' Ranking probe candidates based on\' + f" \'{args.order[1]}\'.")'], {}), '(\' Ranking probe candidates based on\' + f" \'{args.order[1]}\'.")\n', (12153, 12216), False, 'import logging\n'), ((13852, 13874), 'numpy.argsort', 'np.argsort', (['probeCount'], {}), '(probeCount)\n', (13862, 13874), True, 'import numpy as np\n'), ((13917, 13939), 'numpy.argsort', 'np.argsort', (['probeCount'], {}), '(probeCount)\n', (13927, 13939), True, 'import numpy as np\n'), ((14418, 14444), 'joblib.delayed', 'delayed', (['export_window_set'], {}), '(export_window_set)\n', (14425, 14444), False, 'from joblib import Parallel, delayed\n')] |
import h5py # HDF5 support
import os
import glob
import numpy as n
from scipy.interpolate import interp1d
import astropy.io.fits as fits
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
def write_fits_lc(path_to_lc, out_filename, z_min, z_max, dec_max, ra_max):
f = h5py.File(path_to_lc, 'r')
is_gal = (f['/sky_position/selection'].value)&(f['/sky_position/redshift_R'].value<z_max)&(f['/cosmo_4most/is_ELG_eBOSS'].value)#&(f['/agn_properties/agn_activity'].value==1)
hdu_cols = fits.ColDefs([
fits.Column(name='Vmax',format='D', array= f['/halo_properties/Vmax'].value[is_gal], unit='km/s' )
,fits.Column(name='mvir',format='D', array= f['/halo_properties/mvir'].value[is_gal], unit='Msun' )
,fits.Column(name='log_stellar_mass',format='D', array= n.log10(f['/moster_2013_data/stellar_mass'].value[is_gal]) , unit='log10(stellar_mass/[Msun])' )
,fits.Column(name='RA',format='D', array= f['/sky_position/RA'].value[is_gal] , unit='RA/[deg]' )
,fits.Column(name='DEC',format='D', array= f['/sky_position/DEC'].value[is_gal], unit='DEC/[deg]' )
,fits.Column(name='redshift_R',format='D', array= f['/sky_position/redshift_R'].value[is_gal], unit='real space redshift' )
,fits.Column(name='redshift_S',format='D', array= f['/sky_position/redshift_S'].value[is_gal], unit='redshift space redshift' )
])
f.close()
tb_hdu = fits.BinTableHDU.from_columns( hdu_cols )
#define the header
prihdr = fits.Header()
prihdr['author'] = 'JC'
prihdr['DEC_max'] = dec_max
prihdr['DEC_max'] = - dec_max
prihdr['RA_max'] = ra_max
prihdr['RA_max'] = - ra_max
prihdr['z_min'] = z_min
prihdr['z_max'] = z_max
prihdu = fits.PrimaryHDU(header=prihdr)
#writes the file
thdulist = fits.HDUList([prihdu, tb_hdu])
print( out_filename )
os.system("rm "+out_filename)
thdulist.writeto(out_filename)
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L3.hdf5'
out_filename = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_eBOSS_L3.fits'
z_min = 0.
z_max = 1.08
dec_max = 8.269819492449505
ra_max = 6.7529257176359
write_fits_lc(path_to_lc, out_filename, z_min, z_max, dec_max, ra_max)
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L6.hdf5'
out_filename = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_eBOSS_L6.fits'
z_min = 0.
z_max = 3.0
dec_max = 2.0047373031569915
ra_max = 1.9766516114702513
write_fits_lc(path_to_lc, out_filename, z_min, z_max, dec_max, ra_max)
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L15.hdf5'
out_filename = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_eBOSS_L15.fits'
z_min = 0.
z_max = 0.54
dec_max = 20.257311381848154
ra_max = 14.323944878104827
write_fits_lc(path_to_lc, out_filename, z_min, z_max, dec_max, ra_max)
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L3_z1.hdf5'
out_filename = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_eBOSS_L3_z1.fits'
z_min = 1.08
z_max = 3.0
dec_max = 4.134909746242654
ra_max = 3.3764628588325674
write_fits_lc(path_to_lc, out_filename, z_min, z_max, dec_max, ra_max)
# z< 0.5423857379098544 |ra [deg]|< 14.323944878104827 |dec [deg]|< 20.257311381848154
# L3 characteristics :
# z< 1.0889947373832305 |ra [deg]|< 6.7529257176359 |dec [deg]|< 8.269819492449505
# N points: 8037075
#
# L3_z1 characteristics
# z< 3.8309961826584344 |ra [deg]|< 3.3764628588325674 |dec [deg]|< 4.134909746242654
# N points: 8511571
#
# L6 characteristics
# z< 6.697087333514605 |ra [deg]|< 1.9766516114702513 |dec [deg]|< 2.0047373031569915
# N points: 3287299
| [
"numpy.log10",
"astropy.io.fits.PrimaryHDU",
"astropy.io.fits.HDUList",
"astropy.io.fits.Column",
"astropy.cosmology.FlatLambdaCDM",
"h5py.File",
"astropy.io.fits.Header",
"astropy.io.fits.BinTableHDU.from_columns",
"os.system"
] | [((221, 293), 'astropy.cosmology.FlatLambdaCDM', 'FlatLambdaCDM', ([], {'H0': '(67.77 * u.km / u.s / u.Mpc)', 'Om0': '(0.307115)', 'Ob0': '(0.048206)'}), '(H0=67.77 * u.km / u.s / u.Mpc, Om0=0.307115, Ob0=0.048206)\n', (234, 293), False, 'from astropy.cosmology import FlatLambdaCDM\n'), ((372, 398), 'h5py.File', 'h5py.File', (['path_to_lc', '"""r"""'], {}), "(path_to_lc, 'r')\n", (381, 398), False, 'import h5py\n'), ((1486, 1525), 'astropy.io.fits.BinTableHDU.from_columns', 'fits.BinTableHDU.from_columns', (['hdu_cols'], {}), '(hdu_cols)\n', (1515, 1525), True, 'import astropy.io.fits as fits\n'), ((1560, 1573), 'astropy.io.fits.Header', 'fits.Header', ([], {}), '()\n', (1571, 1573), True, 'import astropy.io.fits as fits\n'), ((1785, 1815), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'header': 'prihdr'}), '(header=prihdr)\n', (1800, 1815), True, 'import astropy.io.fits as fits\n'), ((1848, 1878), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[prihdu, tb_hdu]'], {}), '([prihdu, tb_hdu])\n', (1860, 1878), True, 'import astropy.io.fits as fits\n'), ((1906, 1937), 'os.system', 'os.system', (["('rm ' + out_filename)"], {}), "('rm ' + out_filename)\n", (1915, 1937), False, 'import os\n'), ((609, 711), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""Vmax"""', 'format': '"""D"""', 'array': "f['/halo_properties/Vmax'].value[is_gal]", 'unit': '"""km/s"""'}), "(name='Vmax', format='D', array=f['/halo_properties/Vmax'].value\n [is_gal], unit='km/s')\n", (620, 711), True, 'import astropy.io.fits as fits\n'), ((716, 818), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""mvir"""', 'format': '"""D"""', 'array': "f['/halo_properties/mvir'].value[is_gal]", 'unit': '"""Msun"""'}), "(name='mvir', format='D', array=f['/halo_properties/mvir'].value\n [is_gal], unit='Msun')\n", (727, 818), True, 'import astropy.io.fits as fits\n'), ((980, 1079), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""RA"""', 'format': '"""D"""', 'array': "f['/sky_position/RA'].value[is_gal]", 'unit': '"""RA/[deg]"""'}), "(name='RA', format='D', array=f['/sky_position/RA'].value[is_gal\n ], unit='RA/[deg]')\n", (991, 1079), True, 'import astropy.io.fits as fits\n'), ((1085, 1187), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""DEC"""', 'format': '"""D"""', 'array': "f['/sky_position/DEC'].value[is_gal]", 'unit': '"""DEC/[deg]"""'}), "(name='DEC', format='D', array=f['/sky_position/DEC'].value[\n is_gal], unit='DEC/[deg]')\n", (1096, 1187), True, 'import astropy.io.fits as fits\n'), ((1193, 1319), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""redshift_R"""', 'format': '"""D"""', 'array': "f['/sky_position/redshift_R'].value[is_gal]", 'unit': '"""real space redshift"""'}), "(name='redshift_R', format='D', array=f[\n '/sky_position/redshift_R'].value[is_gal], unit='real space redshift')\n", (1204, 1319), True, 'import astropy.io.fits as fits\n'), ((1324, 1454), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""redshift_S"""', 'format': '"""D"""', 'array': "f['/sky_position/redshift_S'].value[is_gal]", 'unit': '"""redshift space redshift"""'}), "(name='redshift_S', format='D', array=f[\n '/sky_position/redshift_S'].value[is_gal], unit='redshift space redshift')\n", (1335, 1454), True, 'import astropy.io.fits as fits\n'), ((878, 936), 'numpy.log10', 'n.log10', (["f['/moster_2013_data/stellar_mass'].value[is_gal]"], {}), "(f['/moster_2013_data/stellar_mass'].value[is_gal])\n", (885, 936), True, 'import numpy as n\n')] |
import numpy as np
import claude_low_level_library as low_level
import claude_top_level_library as top_level
def grid_lat(mul, xx, yy, rad):
return (mul * np.arccos(((xx**2 + yy**2)**0.5)/rad)*180.0/np.pi).flatten()
def grid_lon(xx, yy):
return (180.0 - np.arctan2(yy,xx)*180.0/np.pi).flatten()
def cos_mul_sin(rad, lat, lon, i, j):
return rad * np.cos(lat[i]*np.pi/180.0) * np.sin(lon[j]*np.pi/180.0)
def cos_mul_cos(rad, lat, lon, i, j):
return rad * np.cos(lat[i]*np.pi/180.0) * np.cos(lon[j]*np.pi/180.0)
| [
"numpy.sin",
"numpy.arctan2",
"numpy.arccos",
"numpy.cos"
] | [((390, 420), 'numpy.sin', 'np.sin', (['(lon[j] * np.pi / 180.0)'], {}), '(lon[j] * np.pi / 180.0)\n', (396, 420), True, 'import numpy as np\n'), ((502, 532), 'numpy.cos', 'np.cos', (['(lon[j] * np.pi / 180.0)'], {}), '(lon[j] * np.pi / 180.0)\n', (508, 532), True, 'import numpy as np\n'), ((361, 391), 'numpy.cos', 'np.cos', (['(lat[i] * np.pi / 180.0)'], {}), '(lat[i] * np.pi / 180.0)\n', (367, 391), True, 'import numpy as np\n'), ((473, 503), 'numpy.cos', 'np.cos', (['(lat[i] * np.pi / 180.0)'], {}), '(lat[i] * np.pi / 180.0)\n', (479, 503), True, 'import numpy as np\n'), ((160, 203), 'numpy.arccos', 'np.arccos', (['((xx ** 2 + yy ** 2) ** 0.5 / rad)'], {}), '((xx ** 2 + yy ** 2) ** 0.5 / rad)\n', (169, 203), True, 'import numpy as np\n'), ((264, 282), 'numpy.arctan2', 'np.arctan2', (['yy', 'xx'], {}), '(yy, xx)\n', (274, 282), True, 'import numpy as np\n')] |
from functools import partial
from textwrap import dedent
from io import StringIO
import pytest
import pandas.testing as pdtest
import numpy
import pandas
from wqio.utils import misc
from wqio.tests import helpers
@pytest.fixture
def basic_data():
testcsv = """\
Date,A,B,C,D
X,1,2,3,4
Y,5,6,7,8
Z,9,0,1,2
"""
return pandas.read_csv(StringIO(dedent(testcsv)), index_col=["Date"])
@pytest.fixture
def multiindex_df():
index = pandas.MultiIndex.from_product(
[["A", "B", "C"], ["mg/L"]], names=["loc", "units"]
)
return pandas.DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["a", "b"])
class mockDataset(object):
def __init__(self, inflow, outflow):
self.inflow = mockLocation(inflow)
self.outflow = mockLocation(outflow)
class mockLocation(object):
def __init__(self, data):
self.data = data
self.stats = mockSummary(data)
class mockSummary(object):
def __init__(self, data):
self.N = len(data)
self.max = max(data)
self.min = min(data)
self.nonething = None
def test_add_column_level(basic_data):
known_cols = pandas.MultiIndex.from_tuples(
[(u"test", u"A"), (u"test", u"B"), (u"test", u"C"), (u"test", u"D")]
)
newdata = misc.add_column_level(basic_data, "test", "testlevel")
assert known_cols.tolist() == newdata.columns.tolist()
# can only add levels to non-MultiIndex columns
with helpers.raises(ValueError):
misc.add_column_level(newdata, "test2", "testlevel2")
@pytest.mark.parametrize("L1", [0, "loc"])
@pytest.mark.parametrize("L2", [2, "units"])
def test_swap_column_levels(multiindex_df, L1, L2):
columns = pandas.MultiIndex.from_product(
[["A", "B", "C"], ["res", "cen"], ["mg/L"]], names=["loc", "value", "units"]
)
data = numpy.arange(len(columns) * 10).reshape((10, len(columns)))
df = pandas.DataFrame(data, columns=columns).pipe(misc.swap_column_levels, L1, L2)
expected_columns = pandas.MultiIndex.from_product(
[["mg/L"], ["cen", "res"], ["A", "B", "C"]], names=["units", "value", "loc"]
)
pdtest.assert_index_equal(df.columns, expected_columns)
def test_flatten_columns(multiindex_df, basic_data):
expected = ["A_mg/L", "B_mg/L", "C_mg/L"]
flat = misc.flatten_columns(multiindex_df.T)
assert flat.columns.tolist() == expected
assert (
misc.flatten_columns(basic_data).columns.tolist() == basic_data.columns.tolist()
)
def test_expand_columns():
x = numpy.arange(12).reshape(3, 4)
df = pandas.DataFrame(x, columns=("A_a", "A_b", "B_a", "B_c"))
res_cols = pandas.MultiIndex(
levels=[["A", "B"], ["a", "b", "c"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 2]],
names=["top", "bottom"],
)
expected = pandas.DataFrame(x, columns=res_cols)
result = misc.expand_columns(df, ["top", "bottom"])
pdtest.assert_frame_equal(result, expected)
@pytest.mark.parametrize("criteria", [None, lambda row: row[0] in ["A", "B"]])
@pytest.mark.parametrize("dropold", [True, False])
def test_redefine_index_level(multiindex_df, criteria, dropold):
expected_cols = ["a", "b"]
if dropold:
expected_value = [[1, 2], [3, 4], [5, 6]]
if criteria:
expected_index = [("A", "ug/L"), ("B", "ug/L"), ("C", "mg/L")]
else:
expected_index = [("A", "ug/L"), ("B", "ug/L"), ("C", "ug/L")]
else:
if criteria:
expected_value = [[1, 2], [1, 2], [3, 4], [3, 4], [5, 6]]
expected_index = [
("A", "mg/L"),
("A", "ug/L"),
("B", "mg/L"),
("B", "ug/L"),
("C", "mg/L"),
]
else:
expected_value = [[1, 2], [1, 2], [3, 4], [3, 4], [5, 6], [5, 6]]
expected_index = [
("A", "mg/L"),
("A", "ug/L"),
("B", "mg/L"),
("B", "ug/L"),
("C", "mg/L"),
("C", "ug/L"),
]
result = misc.redefine_index_level(
multiindex_df, "units", "ug/L", criteria=criteria, dropold=dropold
)
expected = pandas.DataFrame(
data=expected_value,
index=pandas.MultiIndex.from_tuples(expected_index, names=["loc", "units"]),
columns=expected_cols,
)
pdtest.assert_frame_equal(result, expected)
@pytest.fixture
def basic_dataset():
inflow = [1, 3, 4, 12.57]
outflow = [2, 5, 7, 15.17]
return mockDataset(inflow, outflow)
def test_nested_getattr(basic_dataset):
result = misc.nested_getattr(basic_dataset, "inflow.stats.max")
expected = basic_dataset.inflow.stats.max
assert result == expected
@pytest.mark.parametrize(
("strformat", "expected", "attribute"),
[
("%d", "4", "inflow.stats.N"),
("%0.2f", "15.17", "outflow.stats.max"),
(None, "--", "inflow.stats.nonething"),
],
)
def test_stringify(basic_dataset, strformat, expected, attribute):
result = misc.stringify(basic_dataset, strformat, attribute=attribute)
assert result == expected
def test_categorize_columns():
csvdata = StringIO(
dedent(
"""\
parameter,units,season,lower,NSQD Median,upper
Cadmium (Cd),ug/L,autumn,0.117,0.361,0.52
Cadmium (Cd),ug/L,spring,0.172,0.352,0.53
Cadmium (Cd),ug/L,summer,0.304,0.411,0.476
Cadmium (Cd),ug/L,winter,0.355,0.559,1.125
Dissolved Chloride (Cl),mg/L,autumn,0.342,2.3,5.8
Dissolved Chloride (Cl),mg/L,spring,2.5,2.5,2.5
Dissolved Chloride (Cl),mg/L,summer,0.308,0.762,1.24
Escherichia coli,MPN/100 mL,autumn,1200.0,15500.0,24000.0
Escherichia coli,MPN/100 mL,spring,10.0,630.0,810.0
Escherichia coli,MPN/100 mL,summer,21000.0,27000.0,35000.0
<NAME>,MPN/100 mL,winter,20.0,200.0,800.0
"""
)
)
df = pandas.read_csv(csvdata)
df2 = misc.categorize_columns(df, "parameter", "units", "season")
# check pandas API that the first df has object columns
assert object in df.dtypes.values
# confirm that all of those objects are gone
assert object not in df2.dtypes.values
with helpers.raises(ValueError):
misc.categorize_columns(df, "parameter", "upper")
@pytest.mark.parametrize(
("value", "expected"), [(3, "<5"), (17, "15 - 20"), (25, "20 - 25"), (46, ">35")]
)
@pytest.mark.parametrize("units", [None, "mm"])
def test_classifier(value, units, expected):
bins = numpy.arange(5, 36, 5)
if units is not None:
expected = "{} {}".format(expected, units)
result = misc.classifier(value, bins, units=units)
assert result == expected
assert numpy.isnan(misc.classifier(numpy.nan, bins, units=units))
def test_unique_categories():
bins = [5, 10, 15]
classifier = partial(misc.classifier, bins=bins, units="mm")
known_categories = ["<5 mm", "5 - 10 mm", "10 - 15 mm", ">15 mm"]
result_categories = misc.unique_categories(classifier, bins)
assert result_categories == known_categories
def test_pop_many():
some_dict = dict(zip(list("ABCDE"), range(5)))
expected = {"C": 2, "D": 3}
assert misc.pop_many(some_dict, "A", "B", "E") == expected
def test_selector():
x = numpy.arange(10)
expected = numpy.array(list("AAABBBCCZZ"))
result = misc.selector("Z", (x <= 2, "A"), (x < 6, "B"), (x <= 7, "C"))
assert all(result == expected)
@pytest.mark.parametrize("input_values", ["A", 4, ("this", "5")])
def test_non_filter(input_values):
assert misc.non_filter(input_values)
@pytest.mark.parametrize("input_values", ["A", 4, ("this", "5")])
def test_no_op(input_values):
assert misc.no_op(input_values) == input_values
@pytest.mark.parametrize("value", [4, lambda x: 4])
def test_assign_multilevel_column(value):
df = pandas.DataFrame(
data=1,
index=pandas.MultiIndex.from_product([list("ABCD"), [1, 2, 3, 4]]),
columns=pandas.MultiIndex.from_product([list("abc"), [1, 2, 3]]),
)
result = misc.assign_multilevel_column(df, value, "d", 1)
expected = pandas.Series(4, index=df.index, name=("d", 1))
pdtest.assert_series_equal(result[("d", 1)], expected)
@pytest.mark.parametrize("join_char", [None, "-"])
def test_symbolize_bools(join_char):
df = pandas.DataFrame(
{
"A": [True, False, False],
"B": [False, True, True],
"C": [False, True, numpy.nan],
}
)
result = misc.symbolize_bools(
df, true_symbol="◆", false_symbol="◇", other_symbol="✖", join_char=join_char
)
if not join_char:
expected = pandas.DataFrame(
{
"A": {0: "◆", 1: "◇", 2: "◇"},
"B": {0: "◇", 1: "◆", 2: "◆"},
"C": {0: "◇", 1: "◆", 2: "✖"},
}
)
pdtest.assert_frame_equal(result, expected)
else:
expected = pandas.Series(["◆-◇-◇", "◇-◆-◆", "◇-◆-✖"])
pdtest.assert_series_equal(result, expected)
| [
"pandas.read_csv",
"wqio.utils.misc.expand_columns",
"wqio.utils.misc.categorize_columns",
"pandas.testing.assert_frame_equal",
"pandas.MultiIndex.from_tuples",
"numpy.arange",
"wqio.utils.misc.redefine_index_level",
"pandas.MultiIndex.from_product",
"textwrap.dedent",
"pandas.testing.assert_index... | [((1561, 1602), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""L1"""', "[0, 'loc']"], {}), "('L1', [0, 'loc'])\n", (1584, 1602), False, 'import pytest\n'), ((1604, 1647), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""L2"""', "[2, 'units']"], {}), "('L2', [2, 'units'])\n", (1627, 1647), False, 'import pytest\n'), ((2965, 3042), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""criteria"""', "[None, lambda row: row[0] in ['A', 'B']]"], {}), "('criteria', [None, lambda row: row[0] in ['A', 'B']])\n", (2988, 3042), False, 'import pytest\n'), ((3044, 3093), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dropold"""', '[True, False]'], {}), "('dropold', [True, False])\n", (3067, 3093), False, 'import pytest\n'), ((4749, 4934), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('strformat', 'expected', 'attribute')", "[('%d', '4', 'inflow.stats.N'), ('%0.2f', '15.17', 'outflow.stats.max'), (\n None, '--', 'inflow.stats.nonething')]"], {}), "(('strformat', 'expected', 'attribute'), [('%d', '4',\n 'inflow.stats.N'), ('%0.2f', '15.17', 'outflow.stats.max'), (None, '--',\n 'inflow.stats.nonething')])\n", (4772, 4934), False, 'import pytest\n'), ((6325, 6435), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('value', 'expected')", "[(3, '<5'), (17, '15 - 20'), (25, '20 - 25'), (46, '>35')]"], {}), "(('value', 'expected'), [(3, '<5'), (17, '15 - 20'),\n (25, '20 - 25'), (46, '>35')])\n", (6348, 6435), False, 'import pytest\n'), ((6439, 6485), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""units"""', "[None, 'mm']"], {}), "('units', [None, 'mm'])\n", (6462, 6485), False, 'import pytest\n'), ((7482, 7546), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_values"""', "['A', 4, ('this', '5')]"], {}), "('input_values', ['A', 4, ('this', '5')])\n", (7505, 7546), False, 'import pytest\n'), ((7626, 7690), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_values"""', "['A', 4, ('this', '5')]"], {}), "('input_values', ['A', 4, ('this', '5')])\n", (7649, 7690), False, 'import pytest\n'), ((7776, 7826), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value"""', '[4, lambda x: 4]'], {}), "('value', [4, lambda x: 4])\n", (7799, 7826), False, 'import pytest\n'), ((8256, 8305), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""join_char"""', "[None, '-']"], {}), "('join_char', [None, '-'])\n", (8279, 8305), False, 'import pytest\n'), ((465, 552), 'pandas.MultiIndex.from_product', 'pandas.MultiIndex.from_product', (["[['A', 'B', 'C'], ['mg/L']]"], {'names': "['loc', 'units']"}), "([['A', 'B', 'C'], ['mg/L']], names=['loc',\n 'units'])\n", (495, 552), False, 'import pandas\n'), ((574, 649), 'pandas.DataFrame', 'pandas.DataFrame', (['[[1, 2], [3, 4], [5, 6]]'], {'index': 'index', 'columns': "['a', 'b']"}), "([[1, 2], [3, 4], [5, 6]], index=index, columns=['a', 'b'])\n", (590, 649), False, 'import pandas\n'), ((1164, 1267), 'pandas.MultiIndex.from_tuples', 'pandas.MultiIndex.from_tuples', (["[(u'test', u'A'), (u'test', u'B'), (u'test', u'C'), (u'test', u'D')]"], {}), "([(u'test', u'A'), (u'test', u'B'), (u'test',\n u'C'), (u'test', u'D')])\n", (1193, 1267), False, 'import pandas\n'), ((1292, 1346), 'wqio.utils.misc.add_column_level', 'misc.add_column_level', (['basic_data', '"""test"""', '"""testlevel"""'], {}), "(basic_data, 'test', 'testlevel')\n", (1313, 1346), False, 'from wqio.utils import misc\n'), ((1714, 1826), 'pandas.MultiIndex.from_product', 'pandas.MultiIndex.from_product', (["[['A', 'B', 'C'], ['res', 'cen'], ['mg/L']]"], {'names': "['loc', 'value', 'units']"}), "([['A', 'B', 'C'], ['res', 'cen'], ['mg/L']],\n names=['loc', 'value', 'units'])\n", (1744, 1826), False, 'import pandas\n'), ((2019, 2131), 'pandas.MultiIndex.from_product', 'pandas.MultiIndex.from_product', (["[['mg/L'], ['cen', 'res'], ['A', 'B', 'C']]"], {'names': "['units', 'value', 'loc']"}), "([['mg/L'], ['cen', 'res'], ['A', 'B', 'C']],\n names=['units', 'value', 'loc'])\n", (2049, 2131), False, 'import pandas\n'), ((2147, 2202), 'pandas.testing.assert_index_equal', 'pdtest.assert_index_equal', (['df.columns', 'expected_columns'], {}), '(df.columns, expected_columns)\n', (2172, 2202), True, 'import pandas.testing as pdtest\n'), ((2315, 2352), 'wqio.utils.misc.flatten_columns', 'misc.flatten_columns', (['multiindex_df.T'], {}), '(multiindex_df.T)\n', (2335, 2352), False, 'from wqio.utils import misc\n'), ((2583, 2640), 'pandas.DataFrame', 'pandas.DataFrame', (['x'], {'columns': "('A_a', 'A_b', 'B_a', 'B_c')"}), "(x, columns=('A_a', 'A_b', 'B_a', 'B_c'))\n", (2599, 2640), False, 'import pandas\n'), ((2657, 2777), 'pandas.MultiIndex', 'pandas.MultiIndex', ([], {'levels': "[['A', 'B'], ['a', 'b', 'c']]", 'codes': '[[0, 0, 1, 1], [0, 1, 0, 2]]', 'names': "['top', 'bottom']"}), "(levels=[['A', 'B'], ['a', 'b', 'c']], codes=[[0, 0, 1, 1],\n [0, 1, 0, 2]], names=['top', 'bottom'])\n", (2674, 2777), False, 'import pandas\n'), ((2820, 2857), 'pandas.DataFrame', 'pandas.DataFrame', (['x'], {'columns': 'res_cols'}), '(x, columns=res_cols)\n', (2836, 2857), False, 'import pandas\n'), ((2871, 2913), 'wqio.utils.misc.expand_columns', 'misc.expand_columns', (['df', "['top', 'bottom']"], {}), "(df, ['top', 'bottom'])\n", (2890, 2913), False, 'from wqio.utils import misc\n'), ((2918, 2961), 'pandas.testing.assert_frame_equal', 'pdtest.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (2943, 2961), True, 'import pandas.testing as pdtest\n'), ((4080, 4177), 'wqio.utils.misc.redefine_index_level', 'misc.redefine_index_level', (['multiindex_df', '"""units"""', '"""ug/L"""'], {'criteria': 'criteria', 'dropold': 'dropold'}), "(multiindex_df, 'units', 'ug/L', criteria=criteria,\n dropold=dropold)\n", (4105, 4177), False, 'from wqio.utils import misc\n'), ((4376, 4419), 'pandas.testing.assert_frame_equal', 'pdtest.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (4401, 4419), True, 'import pandas.testing as pdtest\n'), ((4615, 4669), 'wqio.utils.misc.nested_getattr', 'misc.nested_getattr', (['basic_dataset', '"""inflow.stats.max"""'], {}), "(basic_dataset, 'inflow.stats.max')\n", (4634, 4669), False, 'from wqio.utils import misc\n'), ((5049, 5110), 'wqio.utils.misc.stringify', 'misc.stringify', (['basic_dataset', 'strformat'], {'attribute': 'attribute'}), '(basic_dataset, strformat, attribute=attribute)\n', (5063, 5110), False, 'from wqio.utils import misc\n'), ((5939, 5963), 'pandas.read_csv', 'pandas.read_csv', (['csvdata'], {}), '(csvdata)\n', (5954, 5963), False, 'import pandas\n'), ((5974, 6033), 'wqio.utils.misc.categorize_columns', 'misc.categorize_columns', (['df', '"""parameter"""', '"""units"""', '"""season"""'], {}), "(df, 'parameter', 'units', 'season')\n", (5997, 6033), False, 'from wqio.utils import misc\n'), ((6542, 6564), 'numpy.arange', 'numpy.arange', (['(5)', '(36)', '(5)'], {}), '(5, 36, 5)\n', (6554, 6564), False, 'import numpy\n'), ((6656, 6697), 'wqio.utils.misc.classifier', 'misc.classifier', (['value', 'bins'], {'units': 'units'}), '(value, bins, units=units)\n', (6671, 6697), False, 'from wqio.utils import misc\n'), ((6870, 6917), 'functools.partial', 'partial', (['misc.classifier'], {'bins': 'bins', 'units': '"""mm"""'}), "(misc.classifier, bins=bins, units='mm')\n", (6877, 6917), False, 'from functools import partial\n'), ((7013, 7053), 'wqio.utils.misc.unique_categories', 'misc.unique_categories', (['classifier', 'bins'], {}), '(classifier, bins)\n', (7035, 7053), False, 'from wqio.utils import misc\n'), ((7304, 7320), 'numpy.arange', 'numpy.arange', (['(10)'], {}), '(10)\n', (7316, 7320), False, 'import numpy\n'), ((7381, 7443), 'wqio.utils.misc.selector', 'misc.selector', (['"""Z"""', "(x <= 2, 'A')", "(x < 6, 'B')", "(x <= 7, 'C')"], {}), "('Z', (x <= 2, 'A'), (x < 6, 'B'), (x <= 7, 'C'))\n", (7394, 7443), False, 'from wqio.utils import misc\n'), ((7593, 7622), 'wqio.utils.misc.non_filter', 'misc.non_filter', (['input_values'], {}), '(input_values)\n', (7608, 7622), False, 'from wqio.utils import misc\n'), ((8082, 8130), 'wqio.utils.misc.assign_multilevel_column', 'misc.assign_multilevel_column', (['df', 'value', '"""d"""', '(1)'], {}), "(df, value, 'd', 1)\n", (8111, 8130), False, 'from wqio.utils import misc\n'), ((8146, 8193), 'pandas.Series', 'pandas.Series', (['(4)'], {'index': 'df.index', 'name': "('d', 1)"}), "(4, index=df.index, name=('d', 1))\n", (8159, 8193), False, 'import pandas\n'), ((8198, 8250), 'pandas.testing.assert_series_equal', 'pdtest.assert_series_equal', (["result['d', 1]", 'expected'], {}), "(result['d', 1], expected)\n", (8224, 8250), True, 'import pandas.testing as pdtest\n'), ((8352, 8458), 'pandas.DataFrame', 'pandas.DataFrame', (["{'A': [True, False, False], 'B': [False, True, True], 'C': [False, True,\n numpy.nan]}"], {}), "({'A': [True, False, False], 'B': [False, True, True], 'C':\n [False, True, numpy.nan]})\n", (8368, 8458), False, 'import pandas\n'), ((8530, 8633), 'wqio.utils.misc.symbolize_bools', 'misc.symbolize_bools', (['df'], {'true_symbol': '"""◆"""', 'false_symbol': '"""◇"""', 'other_symbol': '"""✖"""', 'join_char': 'join_char'}), "(df, true_symbol='◆', false_symbol='◇', other_symbol=\n '✖', join_char=join_char)\n", (8550, 8633), False, 'from wqio.utils import misc\n'), ((1468, 1494), 'wqio.tests.helpers.raises', 'helpers.raises', (['ValueError'], {}), '(ValueError)\n', (1482, 1494), False, 'from wqio.tests import helpers\n'), ((1504, 1557), 'wqio.utils.misc.add_column_level', 'misc.add_column_level', (['newdata', '"""test2"""', '"""testlevel2"""'], {}), "(newdata, 'test2', 'testlevel2')\n", (1525, 1557), False, 'from wqio.utils import misc\n'), ((5206, 5909), 'textwrap.dedent', 'dedent', (['""" parameter,units,season,lower,NSQD Median,upper\n Cadmium (Cd),ug/L,autumn,0.117,0.361,0.52\n Cadmium (Cd),ug/L,spring,0.172,0.352,0.53\n Cadmium (Cd),ug/L,summer,0.304,0.411,0.476\n Cadmium (Cd),ug/L,winter,0.355,0.559,1.125\n Dissolved Chloride (Cl),mg/L,autumn,0.342,2.3,5.8\n Dissolved Chloride (Cl),mg/L,spring,2.5,2.5,2.5\n Dissolved Chloride (Cl),mg/L,summer,0.308,0.762,1.24\n Escherichia coli,MPN/100 mL,autumn,1200.0,15500.0,24000.0\n Escherichia coli,MPN/100 mL,spring,10.0,630.0,810.0\n Escherichia coli,MPN/100 mL,summer,21000.0,27000.0,35000.0\n <NAME>,MPN/100 mL,winter,20.0,200.0,800.0\n """'], {}), '(\n """ parameter,units,season,lower,NSQD Median,upper\n Cadmium (Cd),ug/L,autumn,0.117,0.361,0.52\n Cadmium (Cd),ug/L,spring,0.172,0.352,0.53\n Cadmium (Cd),ug/L,summer,0.304,0.411,0.476\n Cadmium (Cd),ug/L,winter,0.355,0.559,1.125\n Dissolved Chloride (Cl),mg/L,autumn,0.342,2.3,5.8\n Dissolved Chloride (Cl),mg/L,spring,2.5,2.5,2.5\n Dissolved Chloride (Cl),mg/L,summer,0.308,0.762,1.24\n Escherichia coli,MPN/100 mL,autumn,1200.0,15500.0,24000.0\n Escherichia coli,MPN/100 mL,spring,10.0,630.0,810.0\n Escherichia coli,MPN/100 mL,summer,21000.0,27000.0,35000.0\n <NAME>,MPN/100 mL,winter,20.0,200.0,800.0\n """\n )\n', (5212, 5909), False, 'from textwrap import dedent\n'), ((6236, 6262), 'wqio.tests.helpers.raises', 'helpers.raises', (['ValueError'], {}), '(ValueError)\n', (6250, 6262), False, 'from wqio.tests import helpers\n'), ((6272, 6321), 'wqio.utils.misc.categorize_columns', 'misc.categorize_columns', (['df', '"""parameter"""', '"""upper"""'], {}), "(df, 'parameter', 'upper')\n", (6295, 6321), False, 'from wqio.utils import misc\n'), ((6751, 6796), 'wqio.utils.misc.classifier', 'misc.classifier', (['numpy.nan', 'bins'], {'units': 'units'}), '(numpy.nan, bins, units=units)\n', (6766, 6796), False, 'from wqio.utils import misc\n'), ((7221, 7260), 'wqio.utils.misc.pop_many', 'misc.pop_many', (['some_dict', '"""A"""', '"""B"""', '"""E"""'], {}), "(some_dict, 'A', 'B', 'E')\n", (7234, 7260), False, 'from wqio.utils import misc\n'), ((7732, 7756), 'wqio.utils.misc.no_op', 'misc.no_op', (['input_values'], {}), '(input_values)\n', (7742, 7756), False, 'from wqio.utils import misc\n'), ((8684, 8817), 'pandas.DataFrame', 'pandas.DataFrame', (["{'A': {(0): '◆', (1): '◇', (2): '◇'}, 'B': {(0): '◇', (1): '◆', (2): '◆'},\n 'C': {(0): '◇', (1): '◆', (2): '✖'}}"], {}), "({'A': {(0): '◆', (1): '◇', (2): '◇'}, 'B': {(0): '◇', (1):\n '◆', (2): '◆'}, 'C': {(0): '◇', (1): '◆', (2): '✖'}})\n", (8700, 8817), False, 'import pandas\n'), ((8889, 8932), 'pandas.testing.assert_frame_equal', 'pdtest.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (8914, 8932), True, 'import pandas.testing as pdtest\n'), ((8962, 9004), 'pandas.Series', 'pandas.Series', (["['◆-◇-◇', '◇-◆-◆', '◇-◆-✖']"], {}), "(['◆-◇-◇', '◇-◆-◆', '◇-◆-✖'])\n", (8975, 9004), False, 'import pandas\n'), ((9013, 9057), 'pandas.testing.assert_series_equal', 'pdtest.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (9039, 9057), True, 'import pandas.testing as pdtest\n'), ((376, 391), 'textwrap.dedent', 'dedent', (['testcsv'], {}), '(testcsv)\n', (382, 391), False, 'from textwrap import dedent\n'), ((1917, 1956), 'pandas.DataFrame', 'pandas.DataFrame', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (1933, 1956), False, 'import pandas\n'), ((2543, 2559), 'numpy.arange', 'numpy.arange', (['(12)'], {}), '(12)\n', (2555, 2559), False, 'import numpy\n'), ((4264, 4333), 'pandas.MultiIndex.from_tuples', 'pandas.MultiIndex.from_tuples', (['expected_index'], {'names': "['loc', 'units']"}), "(expected_index, names=['loc', 'units'])\n", (4293, 4333), False, 'import pandas\n'), ((2419, 2451), 'wqio.utils.misc.flatten_columns', 'misc.flatten_columns', (['basic_data'], {}), '(basic_data)\n', (2439, 2451), False, 'from wqio.utils import misc\n')] |
import numpy as np
from scipy import signal
from golem import DataSet
from golem.nodes import BaseNode
from psychic.utils import get_samplerate
class Filter(BaseNode):
def __init__(self, filt_design_func):
'''
Forward-backward filtering node. filt_design_func is a function that takes
the sample rate as an argument, and returns the filter coefficients (b, a).
'''
BaseNode.__init__(self)
self.filt_design_func = filt_design_func
def train_(self, d):
fs = get_samplerate(d)
self.log.info('Detected sample rate of %d Hz' % fs)
self.filter = self.filt_design_func(fs)
def apply_(self, d):
b, a = self.filter
xs = np.hstack([signal.filtfilt(b, a, d.xs[:, i]).reshape(-1, 1)
for i in range(d.nfeatures)])
return DataSet(xs=xs, default=d)
class OnlineFilter(Filter):
def __init__(self, filt_design_func):
Filter.__init__(self, filt_design_func)
self.zi = []
def apply_(self, d):
b, a = self.filter
if self.zi == []:
self.zi = [signal.lfiltic(b, a, np.zeros(b.size)) for fi in
range(d.nfeatures)]
new_zi = []
xs = []
for i in range(d.nfeatures):
xi, zii = signal.lfilter(b, a, d.xs[:, i], zi=self.zi[i])
xs.append(xi.reshape(-1, 1))
new_zi.append(zii)
self.zi = new_zi
return DataSet(xs=np.hstack(xs), default=d)
class Winsorize(BaseNode):
def __init__(self, cutoff=[.05, .95]):
self.cutoff = np.atleast_1d(cutoff)
assert self.cutoff.size == 2
BaseNode.__init__(self)
def train_(self, d):
assert len(d.feat_shape) == 1
self.lims = np.apply_along_axis(lambda x: np.interp(self.cutoff,
np.linspace(0, 1, d.ninstances), np.sort(x)), 0, d.xs)
def apply_(self, d):
return DataSet(xs=np.clip(d.xs, self.lims[0,:], self.lims[1:]),
default=d)
| [
"psychic.utils.get_samplerate",
"numpy.clip",
"numpy.hstack",
"scipy.signal.filtfilt",
"numpy.sort",
"scipy.signal.lfilter",
"numpy.zeros",
"numpy.linspace",
"golem.DataSet",
"golem.nodes.BaseNode.__init__",
"numpy.atleast_1d"
] | [((388, 411), 'golem.nodes.BaseNode.__init__', 'BaseNode.__init__', (['self'], {}), '(self)\n', (405, 411), False, 'from golem.nodes import BaseNode\n'), ((490, 507), 'psychic.utils.get_samplerate', 'get_samplerate', (['d'], {}), '(d)\n', (504, 507), False, 'from psychic.utils import get_samplerate\n'), ((772, 797), 'golem.DataSet', 'DataSet', ([], {'xs': 'xs', 'default': 'd'}), '(xs=xs, default=d)\n', (779, 797), False, 'from golem import DataSet\n'), ((1435, 1456), 'numpy.atleast_1d', 'np.atleast_1d', (['cutoff'], {}), '(cutoff)\n', (1448, 1456), True, 'import numpy as np\n'), ((1494, 1517), 'golem.nodes.BaseNode.__init__', 'BaseNode.__init__', (['self'], {}), '(self)\n', (1511, 1517), False, 'from golem.nodes import BaseNode\n'), ((1170, 1217), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'd.xs[:, i]'], {'zi': 'self.zi[i]'}), '(b, a, d.xs[:, i], zi=self.zi[i])\n', (1184, 1217), False, 'from scipy import signal\n'), ((1322, 1335), 'numpy.hstack', 'np.hstack', (['xs'], {}), '(xs)\n', (1331, 1335), True, 'import numpy as np\n'), ((1757, 1802), 'numpy.clip', 'np.clip', (['d.xs', 'self.lims[0, :]', 'self.lims[1:]'], {}), '(d.xs, self.lims[0, :], self.lims[1:])\n', (1764, 1802), True, 'import numpy as np\n'), ((1035, 1051), 'numpy.zeros', 'np.zeros', (['b.size'], {}), '(b.size)\n', (1043, 1051), True, 'import numpy as np\n'), ((1652, 1683), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'd.ninstances'], {}), '(0, 1, d.ninstances)\n', (1663, 1683), True, 'import numpy as np\n'), ((1685, 1695), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (1692, 1695), True, 'import numpy as np\n'), ((675, 708), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'd.xs[:, i]'], {}), '(b, a, d.xs[:, i])\n', (690, 708), False, 'from scipy import signal\n')] |
# external modules
import unittest
import tempfile
import shutil
import numpy as num
# ANUGA modules
from anuga.shallow_water.shallow_water_domain import Domain
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.file.sww import Write_sww, SWW_file
from anuga.abstract_2d_finite_volumes.generic_boundary_conditions \
import Transmissive_boundary
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a, \
netcdf_float
from anuga.geospatial_data.geospatial_data import Geospatial_data
# local modules
from anuga.file_conversion.sdf2pts import sdf2pts
from anuga.file_conversion.sww2pts import sww2pts
from pprint import pprint
class Test_2Pts(unittest.TestCase):
""" Test files that convert to pts format. """
def test_hecras_cross_sections2pts(self):
"""Test conversion from HECRAS cross sections in ascii format
to native NetCDF pts format
"""
import time, os
from anuga.file.netcdf import NetCDFFile
#Write test asc file
root = 'hecrastest'
filename = root+'.sdf'
fid = open(filename, 'w')
fid.write("""
# RAS export file created on Mon 15Aug2005 11:42
# by HEC-RAS Version 3.1.1
BEGIN HEADER:
UNITS: METRIC
DTM TYPE: TIN
DTM: v:\\1\\cit\\perth_topo\\river_tin
STREAM LAYER: c:\\x_local\\hecras\\21_02_03\\up_canning_cent3d.shp
CROSS-SECTION LAYER: c:\\x_local\\hecras\\21_02_03\\up_can_xs3d.shp
MAP PROJECTION: UTM
PROJECTION ZONE: 50
DATUM: AGD66
VERTICAL DATUM:
NUMBER OF REACHES: 19
NUMBER OF CROSS-SECTIONS: 2
END HEADER:
BEGIN CROSS-SECTIONS:
CROSS-SECTION:
STREAM ID:Southern-Wungong
REACH ID:Southern-Wungong
STATION:21410
CUT LINE:
407546.08 , 6437277.542
407329.32 , 6437489.482
407283.11 , 6437541.232
SURFACE LINE:
407546.08, 6437277.54, 52.14
407538.88, 6437284.58, 51.07
407531.68, 6437291.62, 50.56
407524.48, 6437298.66, 49.58
407517.28, 6437305.70, 49.09
407510.08, 6437312.74, 48.76
END:
CROSS-SECTION:
STREAM ID:Swan River
REACH ID:Swan Mouth
STATION:840.*
CUT LINE:
381178.0855 , 6452559.0685
380485.4755 , 6453169.272
SURFACE LINE:
381178.09, 6452559.07, 4.17
381169.49, 6452566.64, 4.26
381157.78, 6452576.96, 4.34
381155.97, 6452578.56, 4.35
381143.72, 6452589.35, 4.43
381136.69, 6452595.54, 4.58
381114.74, 6452614.88, 4.41
381075.53, 6452649.43, 4.17
381071.47, 6452653.00, 3.99
381063.46, 6452660.06, 3.67
381054.41, 6452668.03, 3.67
END:
END CROSS-SECTIONS:
""")
fid.close()
#Convert to NetCDF pts
sdf2pts(root+'.sdf')
#Check contents
#Get NetCDF
fid = NetCDFFile(root+'.pts', netcdf_mode_r)
# Get the variables
#print fid.variables.keys()
points = fid.variables['points']
elevation = fid.variables['elevation']
#Check values
ref_points = [[407546.08, 6437277.54],
[407538.88, 6437284.58],
[407531.68, 6437291.62],
[407524.48, 6437298.66],
[407517.28, 6437305.70],
[407510.08, 6437312.74]]
ref_points += [[381178.09, 6452559.07],
[381169.49, 6452566.64],
[381157.78, 6452576.96],
[381155.97, 6452578.56],
[381143.72, 6452589.35],
[381136.69, 6452595.54],
[381114.74, 6452614.88],
[381075.53, 6452649.43],
[381071.47, 6452653.00],
[381063.46, 6452660.06],
[381054.41, 6452668.03]]
ref_elevation = [52.14, 51.07, 50.56, 49.58, 49.09, 48.76]
ref_elevation += [4.17, 4.26, 4.34, 4.35, 4.43, 4.58, 4.41, 4.17, 3.99, 3.67, 3.67]
#print points[:]
#print ref_points
assert num.allclose(points, ref_points)
#print attributes[:]
#print ref_elevation
assert num.allclose(elevation, ref_elevation)
#Cleanup
fid.close()
os.remove(root + '.sdf')
os.remove(root + '.pts')
def test_sww2pts_centroids_1_5(self):
"""Test that sww information can be converted correctly to pts data at specified coordinates
- in this case, the centroids.
"""
import time, os
from anuga.file.netcdf import NetCDFFile
# Used for points that lie outside mesh
NODATA_value = 1758323
# Setup
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
# Create shallow water domain
domain = Domain(*rectangular(2, 2))
domain.set_flow_algorithm('1_5')
B = Transmissive_boundary(domain)
domain.set_boundary( {'left': B, 'right': B, 'top': B, 'bottom': B})
domain.set_name('datatest_1_5')
ptsfile = domain.get_name() + '_elevation.pts'
swwfile = domain.get_name() + '.sww'
domain.set_datadir('.')
domain.format = 'sww'
domain.set_quantity('elevation', lambda x,y: -x-y)
domain.geo_reference = Geo_reference(56,308500,6189000)
sww = SWW_file(domain)
sww.store_connectivity()
sww.store_timestep()
#self.domain.tight_slope_limiters = 1
domain.evolve_to_end(finaltime = 0.01)
sww.store_timestep()
# Check contents in NetCDF
fid = NetCDFFile(sww.filename, netcdf_mode_r)
# Get the variables
x = fid.variables['x'][:]
y = fid.variables['y'][:]
elevation = fid.variables['elevation'][:]
time = fid.variables['time'][:]
stage = fid.variables['stage'][:]
volumes = fid.variables['volumes'][:]
# Invoke interpolation for vertex points
points = num.concatenate( (x[:,num.newaxis],y[:,num.newaxis]), axis=1 )
points = num.ascontiguousarray(points)
sww2pts(domain.get_name() + '.sww',
quantity = 'elevation',
data_points = points,
NODATA_value = NODATA_value)
ref_point_values = elevation
point_values = Geospatial_data(ptsfile).get_attributes()
#print 'P', point_values
#print 'Ref', ref_point_values
assert num.allclose(point_values, ref_point_values)
# Invoke interpolation for centroids
points = domain.get_centroid_coordinates()
#print points
sww2pts(domain.get_name() + '.sww',
quantity = 'elevation',
data_points = points,
NODATA_value = NODATA_value)
ref_point_values = [-0.5, -0.5, -1, -1, -1, -1, -1.5, -1.5] #At centroids
point_values = Geospatial_data(ptsfile).get_attributes()
#print 'P', point_values
#print 'Ref', ref_point_values
assert num.allclose(point_values, ref_point_values)
fid.close()
#Cleanup
os.remove(sww.filename)
os.remove(ptsfile)
def test_sww2pts_centroids_de0(self):
"""Test that sww information can be converted correctly to pts data at specified coordinates
- in this case, the centroids.
"""
import time, os
from anuga.file.netcdf import NetCDFFile
# Used for points that lie outside mesh
NODATA_value = 1758323
# Setup
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
# Create shallow water domain
domain = Domain(*rectangular(2, 2))
B = Transmissive_boundary(domain)
domain.set_boundary( {'left': B, 'right': B, 'top': B, 'bottom': B})
domain.set_name('datatest_de0')
ptsfile = domain.get_name() + '_elevation.pts'
swwfile = domain.get_name() + '.sww'
domain.set_datadir('.')
domain.format = 'sww'
domain.set_quantity('elevation', lambda x,y: -x-y)
domain.geo_reference = Geo_reference(56,308500,6189000)
sww = SWW_file(domain)
sww.store_connectivity()
sww.store_timestep()
#self.domain.tight_slope_limiters = 1
domain.evolve_to_end(finaltime = 0.01)
sww.store_timestep()
# Check contents in NetCDF
fid = NetCDFFile(sww.filename, netcdf_mode_r)
# Get the variables
x = fid.variables['x'][:]
y = fid.variables['y'][:]
elevation = fid.variables['elevation'][:]
time = fid.variables['time'][:]
stage = fid.variables['stage'][:]
volumes = fid.variables['volumes'][:]
# Invoke interpolation for vertex points
points = num.concatenate( (x[:,num.newaxis],y[:,num.newaxis]), axis=1 )
points = num.ascontiguousarray(points)
sww2pts(domain.get_name() + '.sww',
quantity = 'elevation',
data_points = points,
NODATA_value = NODATA_value)
ref_point_values = elevation
point_values = Geospatial_data(ptsfile).get_attributes()
#print 'P', point_values
#print 'Ref', ref_point_values
assert num.allclose(point_values, ref_point_values)
# Invoke interpolation for centroids
points = domain.get_centroid_coordinates()
#print points
sww2pts(domain.get_name() + '.sww',
quantity = 'elevation',
data_points = points,
NODATA_value = NODATA_value)
#ref_point_values = [-0.5, -0.5, -1, -1, -1, -1, -1.5, -1.5] #At centroids
ref_point_values = [-0.77777777, -0.77777777, -0.99999998, -0.99999998,
-0.99999998, -0.99999998, -1.22222221, -1.22222221]
point_values = Geospatial_data(ptsfile).get_attributes()
#print 'P', point_values
#print 'Ref', ref_point_values
assert num.allclose(point_values, ref_point_values)
fid.close()
#Cleanup
os.remove(sww.filename)
os.remove(ptsfile)
#-------------------------------------------------------------
if __name__ == "__main__":
suite = unittest.makeSuite(Test_2Pts, 'test_')
runner = unittest.TextTestRunner() #verbosity=2)
runner.run(suite)
| [
"anuga.abstract_2d_finite_volumes.mesh_factory.rectangular",
"numpy.allclose",
"unittest.makeSuite",
"anuga.file.netcdf.NetCDFFile",
"anuga.coordinate_transforms.geo_reference.Geo_reference",
"numpy.ascontiguousarray",
"anuga.abstract_2d_finite_volumes.generic_boundary_conditions.Transmissive_boundary",... | [((10409, 10447), 'unittest.makeSuite', 'unittest.makeSuite', (['Test_2Pts', '"""test_"""'], {}), "(Test_2Pts, 'test_')\n", (10427, 10447), False, 'import unittest\n'), ((10461, 10486), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (10484, 10486), False, 'import unittest\n'), ((2812, 2834), 'anuga.file_conversion.sdf2pts.sdf2pts', 'sdf2pts', (["(root + '.sdf')"], {}), "(root + '.sdf')\n", (2819, 2834), False, 'from anuga.file_conversion.sdf2pts import sdf2pts\n'), ((2892, 2932), 'anuga.file.netcdf.NetCDFFile', 'NetCDFFile', (["(root + '.pts')", 'netcdf_mode_r'], {}), "(root + '.pts', netcdf_mode_r)\n", (2902, 2932), False, 'from anuga.file.netcdf import NetCDFFile\n'), ((4146, 4178), 'numpy.allclose', 'num.allclose', (['points', 'ref_points'], {}), '(points, ref_points)\n', (4158, 4178), True, 'import numpy as num\n'), ((4253, 4291), 'numpy.allclose', 'num.allclose', (['elevation', 'ref_elevation'], {}), '(elevation, ref_elevation)\n', (4265, 4291), True, 'import numpy as num\n'), ((4340, 4364), 'os.remove', 'os.remove', (["(root + '.sdf')"], {}), "(root + '.sdf')\n", (4349, 4364), False, 'import time, os\n'), ((4373, 4397), 'os.remove', 'os.remove', (["(root + '.pts')"], {}), "(root + '.pts')\n", (4382, 4397), False, 'import time, os\n'), ((4981, 5010), 'anuga.abstract_2d_finite_volumes.generic_boundary_conditions.Transmissive_boundary', 'Transmissive_boundary', (['domain'], {}), '(domain)\n', (5002, 5010), False, 'from anuga.abstract_2d_finite_volumes.generic_boundary_conditions import Transmissive_boundary\n'), ((5384, 5418), 'anuga.coordinate_transforms.geo_reference.Geo_reference', 'Geo_reference', (['(56)', '(308500)', '(6189000)'], {}), '(56, 308500, 6189000)\n', (5397, 5418), False, 'from anuga.coordinate_transforms.geo_reference import Geo_reference\n'), ((5432, 5448), 'anuga.file.sww.SWW_file', 'SWW_file', (['domain'], {}), '(domain)\n', (5440, 5448), False, 'from anuga.file.sww import Write_sww, SWW_file\n'), ((5684, 5723), 'anuga.file.netcdf.NetCDFFile', 'NetCDFFile', (['sww.filename', 'netcdf_mode_r'], {}), '(sww.filename, netcdf_mode_r)\n', (5694, 5723), False, 'from anuga.file.netcdf import NetCDFFile\n'), ((6075, 6138), 'numpy.concatenate', 'num.concatenate', (['(x[:, num.newaxis], y[:, num.newaxis])'], {'axis': '(1)'}), '((x[:, num.newaxis], y[:, num.newaxis]), axis=1)\n', (6090, 6138), True, 'import numpy as num\n'), ((6155, 6184), 'numpy.ascontiguousarray', 'num.ascontiguousarray', (['points'], {}), '(points)\n', (6176, 6184), True, 'import numpy as num\n'), ((6549, 6593), 'numpy.allclose', 'num.allclose', (['point_values', 'ref_point_values'], {}), '(point_values, ref_point_values)\n', (6561, 6593), True, 'import numpy as num\n'), ((7145, 7189), 'numpy.allclose', 'num.allclose', (['point_values', 'ref_point_values'], {}), '(point_values, ref_point_values)\n', (7157, 7189), True, 'import numpy as num\n'), ((7245, 7268), 'os.remove', 'os.remove', (['sww.filename'], {}), '(sww.filename)\n', (7254, 7268), False, 'import time, os\n'), ((7277, 7295), 'os.remove', 'os.remove', (['ptsfile'], {}), '(ptsfile)\n', (7286, 7295), False, 'import time, os\n'), ((7837, 7866), 'anuga.abstract_2d_finite_volumes.generic_boundary_conditions.Transmissive_boundary', 'Transmissive_boundary', (['domain'], {}), '(domain)\n', (7858, 7866), False, 'from anuga.abstract_2d_finite_volumes.generic_boundary_conditions import Transmissive_boundary\n'), ((8240, 8274), 'anuga.coordinate_transforms.geo_reference.Geo_reference', 'Geo_reference', (['(56)', '(308500)', '(6189000)'], {}), '(56, 308500, 6189000)\n', (8253, 8274), False, 'from anuga.coordinate_transforms.geo_reference import Geo_reference\n'), ((8288, 8304), 'anuga.file.sww.SWW_file', 'SWW_file', (['domain'], {}), '(domain)\n', (8296, 8304), False, 'from anuga.file.sww import Write_sww, SWW_file\n'), ((8540, 8579), 'anuga.file.netcdf.NetCDFFile', 'NetCDFFile', (['sww.filename', 'netcdf_mode_r'], {}), '(sww.filename, netcdf_mode_r)\n', (8550, 8579), False, 'from anuga.file.netcdf import NetCDFFile\n'), ((8931, 8994), 'numpy.concatenate', 'num.concatenate', (['(x[:, num.newaxis], y[:, num.newaxis])'], {'axis': '(1)'}), '((x[:, num.newaxis], y[:, num.newaxis]), axis=1)\n', (8946, 8994), True, 'import numpy as num\n'), ((9011, 9040), 'numpy.ascontiguousarray', 'num.ascontiguousarray', (['points'], {}), '(points)\n', (9032, 9040), True, 'import numpy as num\n'), ((9405, 9449), 'numpy.allclose', 'num.allclose', (['point_values', 'ref_point_values'], {}), '(point_values, ref_point_values)\n', (9417, 9449), True, 'import numpy as num\n'), ((10154, 10198), 'numpy.allclose', 'num.allclose', (['point_values', 'ref_point_values'], {}), '(point_values, ref_point_values)\n', (10166, 10198), True, 'import numpy as num\n'), ((10254, 10277), 'os.remove', 'os.remove', (['sww.filename'], {}), '(sww.filename)\n', (10263, 10277), False, 'import time, os\n'), ((10286, 10304), 'os.remove', 'os.remove', (['ptsfile'], {}), '(ptsfile)\n', (10295, 10304), False, 'import time, os\n'), ((4908, 4925), 'anuga.abstract_2d_finite_volumes.mesh_factory.rectangular', 'rectangular', (['(2)', '(2)'], {}), '(2, 2)\n', (4919, 4925), False, 'from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular\n'), ((6412, 6436), 'anuga.geospatial_data.geospatial_data.Geospatial_data', 'Geospatial_data', (['ptsfile'], {}), '(ptsfile)\n', (6427, 6436), False, 'from anuga.geospatial_data.geospatial_data import Geospatial_data\n'), ((7008, 7032), 'anuga.geospatial_data.geospatial_data.Geospatial_data', 'Geospatial_data', (['ptsfile'], {}), '(ptsfile)\n', (7023, 7032), False, 'from anuga.geospatial_data.geospatial_data import Geospatial_data\n'), ((7805, 7822), 'anuga.abstract_2d_finite_volumes.mesh_factory.rectangular', 'rectangular', (['(2)', '(2)'], {}), '(2, 2)\n', (7816, 7822), False, 'from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular\n'), ((9268, 9292), 'anuga.geospatial_data.geospatial_data.Geospatial_data', 'Geospatial_data', (['ptsfile'], {}), '(ptsfile)\n', (9283, 9292), False, 'from anuga.geospatial_data.geospatial_data import Geospatial_data\n'), ((10017, 10041), 'anuga.geospatial_data.geospatial_data.Geospatial_data', 'Geospatial_data', (['ptsfile'], {}), '(ptsfile)\n', (10032, 10041), False, 'from anuga.geospatial_data.geospatial_data import Geospatial_data\n')] |
"""
* @author 孟子喻
* @time 2020.6.2
* @file HMM.py
"""
import numpy as np
class HMM():
def __init__(self, A, B, Pi):
self.A = A # 状态转移概率矩阵
self.B = B # 观测概率矩阵
self.Pi = Pi # 初始状态序列
def forward(self, sequence, t):
"""计算前向概率
:param t 观测时间
:param sequence T时刻的观测序列
:return alpha 前向概率
@reference Page198 算法10.2
"""
alpha = self.Pi * self.B[:, sequence[0]] # alpha初值 Page198 10.15
for i in range(t):
alpha = (alpha * self.A.T).T
alpha = np.sum(alpha, axis=0) * self.B[:, sequence[i+1]] # 递推 Page109 10.16
return alpha
def backward(self, sequence, t):
"""计算后向概率
:param t 观测时间
:param sequence T时刻的观测序列
:return beta 后向概率
@reference Page201 算法10.3
"""
beta = np.ones(self.A.shape[0])
for _t in range(len(sequence)-t-1):
beta = beta * self.B[:, sequence[len(sequence) - _t - 1]] * self.A
beta = np.sum(beta, axis=1)
return beta
def cal_prob(self, sequence, t, state):
"""
:param t: 观测时间
:param sequence: T时刻的观测序列
:param state: 当前状态
:return:prob 当前状态概率
"""
alpha = self.forward(sequence, t)
beta = self.backward(sequence, t)
prob = alpha[state] * beta[state] / np.sum(alpha * beta)
return prob
def viterbi(self, sequence):
"""维特比(viterbi)路径
:param sequence: T时刻的观测序列
:return:path 路径
"""
T = len(sequence)
N = A.shape[0]
path = np.zeros(T)
delta = np.zeros((T, N))
Phi = np.zeros((T, N))
for i in range(N):
delta[0][i] = self.Pi[i] * self.B[i][sequence[0]]
Phi[0][i] = 0
for t in range(1, T):
for i in range(N):
a = []
for j in range(N):
a.append(delta[t - 1][j] * self.A[j][i])
delta[t][i] = np.max(a) * self.B[i][sequence[t]]
Phi[t][i] = np.argmax(a, axis=0) + 1
path[T - 1] = np.argmax(delta[T - 1], axis=0) + 1
for t in range(T-2, -1, -1):
path[t] = Phi[t + 1][int(path[t + 1] - 1)]
for i in range(0, len(path)):
path[i] = int(path[i])
return path
if __name__ == "__main__":
sequence = np.array([0, 1, 0, 0, 1, 0, 1, 1]) # {"red": 0, "white": 1}
A = np.array([[0.5, 0.1, 0.4], # 状态转移概率矩阵
[0.3, 0.5, 0.2],
[0.2, 0.2, 0.6]])
B = np.array([[0.5, 0.5], # 观测概率矩阵
[0.4, 0.6],
[0.7, 0.3]])
Pi = np.array([0.2, 0.3, 0.5]) # 初始状态概率向量
t = 3
model = HMM(A, B, Pi)
"""计算概率"""
prob = model.cal_prob(sequence, t, 2) # 2:"box_2"
print("probability:\t", str(prob))
"""viterbi路径"""
path = model.viterbi(sequence) # {"box_0": 0, "box_1": 1, "box_2": 2}
print('viterbi path:\t', path)
| [
"numpy.ones",
"numpy.argmax",
"numpy.max",
"numpy.sum",
"numpy.array",
"numpy.zeros"
] | [((2430, 2464), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 1, 0, 1, 1]'], {}), '([0, 1, 0, 0, 1, 0, 1, 1])\n', (2438, 2464), True, 'import numpy as np\n'), ((2500, 2561), 'numpy.array', 'np.array', (['[[0.5, 0.1, 0.4], [0.3, 0.5, 0.2], [0.2, 0.2, 0.6]]'], {}), '([[0.5, 0.1, 0.4], [0.3, 0.5, 0.2], [0.2, 0.2, 0.6]])\n', (2508, 2561), True, 'import numpy as np\n'), ((2618, 2664), 'numpy.array', 'np.array', (['[[0.5, 0.5], [0.4, 0.6], [0.7, 0.3]]'], {}), '([[0.5, 0.5], [0.4, 0.6], [0.7, 0.3]])\n', (2626, 2664), True, 'import numpy as np\n'), ((2725, 2750), 'numpy.array', 'np.array', (['[0.2, 0.3, 0.5]'], {}), '([0.2, 0.3, 0.5])\n', (2733, 2750), True, 'import numpy as np\n'), ((885, 909), 'numpy.ones', 'np.ones', (['self.A.shape[0]'], {}), '(self.A.shape[0])\n', (892, 909), True, 'import numpy as np\n'), ((1655, 1666), 'numpy.zeros', 'np.zeros', (['T'], {}), '(T)\n', (1663, 1666), True, 'import numpy as np\n'), ((1683, 1699), 'numpy.zeros', 'np.zeros', (['(T, N)'], {}), '((T, N))\n', (1691, 1699), True, 'import numpy as np\n'), ((1714, 1730), 'numpy.zeros', 'np.zeros', (['(T, N)'], {}), '((T, N))\n', (1722, 1730), True, 'import numpy as np\n'), ((1052, 1072), 'numpy.sum', 'np.sum', (['beta'], {'axis': '(1)'}), '(beta, axis=1)\n', (1058, 1072), True, 'import numpy as np\n'), ((1416, 1436), 'numpy.sum', 'np.sum', (['(alpha * beta)'], {}), '(alpha * beta)\n', (1422, 1436), True, 'import numpy as np\n'), ((2166, 2197), 'numpy.argmax', 'np.argmax', (['delta[T - 1]'], {'axis': '(0)'}), '(delta[T - 1], axis=0)\n', (2175, 2197), True, 'import numpy as np\n'), ((587, 608), 'numpy.sum', 'np.sum', (['alpha'], {'axis': '(0)'}), '(alpha, axis=0)\n', (593, 608), True, 'import numpy as np\n'), ((2056, 2065), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (2062, 2065), True, 'import numpy as np\n'), ((2119, 2139), 'numpy.argmax', 'np.argmax', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (2128, 2139), True, 'import numpy as np\n')] |
'''
Compute classification metrics for the preference learning models. Plot the predictions.
Created on 21 Oct 2016
@author: simpson
'''
import logging
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import f1_score, roc_auc_score, log_loss, accuracy_score
from scipy.stats import kendalltau
def compute_ranking_metrics(nmethods, gold_ranks, predicted_ranks, metrics = {}, nruns=1, r=0):
if not len(metrics):
metrics['tau'] = np.zeros(nmethods)
for i in range(nmethods):
metrics['tau'][i, r], _ = kendalltau(gold_ranks, predicted_ranks)
return metrics
def compute_metrics(nmethods, gold_prefs, predictions, metrics = {}, nruns=1, r=0):
# Task C2, C4: Compute accuracy metrics ---------------------------------------------------------------------------
logging.info('Task C2/C4, accuracy metrics')
if not len(metrics):
metrics['acc'] = np.zeros((nmethods, nruns))
metrics['f1'] = np.zeros((nmethods, nruns))
metrics['auc_roc'] = np.zeros((nmethods, nruns))
metrics['log_loss'] = np.zeros((nmethods, nruns))
# Not sure how to deal with preference labels where the true label is 0.5 with f1 score. For ROC curve we can
# combine two AUCs for negative class and positive class.
for i in range(nmethods):
ind_array = np.concatenate( ( predictions[:, i:i+1] < 1.0/3.0,
(predictions[:, i:i+1] >= 1.0/3.0) & (predictions[:, i:i+1] < 2.0/3.0),
predictions[:, i:i+1] > 2.0/3.0 ), axis=1)
ind_array_gold = np.concatenate( ( gold_prefs[:, np.newaxis] == 0,
gold_prefs[:, np.newaxis] == 0.5,
gold_prefs[:, np.newaxis] == 1 ), axis=1)
mistakes = np.round(ind_array) != ind_array_gold
print(ind_array[np.sum(mistakes, axis=1), :])
print(ind_array_gold[np.sum(mistakes, axis=1), :])
metrics['acc'][i,r] = accuracy_score(ind_array_gold, ind_array)
metrics['f1'][i,r] = f1_score(ind_array_gold, ind_array, average='weighted')
auc_a_less_b = roc_auc_score(gold_prefs==0, 1 - predictions[:, i])
frac_a_less_b = np.sum(gold_prefs==0) / float(len(gold_prefs))
auc_a_more_b = roc_auc_score(gold_prefs==1, predictions[:, i])
frac_a_more_b = np.sum(gold_prefs==1) / float(len(gold_prefs))
auc_a_equal_b = roc_auc_score(gold_prefs==0.5, 2 * (1 - np.abs(predictions[:, i] - 0.5)))
frac_a_equal_b = np.sum(gold_prefs==0.5) / float(len(gold_prefs))
metrics['auc_roc'][i,r] = auc_a_less_b * frac_a_less_b + auc_a_more_b * frac_a_more_b + auc_a_equal_b * frac_a_equal_b
predictions_safe = predictions[:, i].copy()
predictions_safe[predictions[:, i]<1e-7] = 1e-7
predictions_safe[predictions[:, i]>(1-1e-7)] = 1 - 1e-7
metrics['log_loss'][i,r] = -np.mean(gold_prefs * np.log(predictions_safe) + (1 - gold_prefs) * np.log(1 - predictions_safe))
return metrics
def plot_metrics(plotdir, metrics, nmethods, method_labels, nfolds, nanno, nanno_is_min=False, xlabels=None):
# Task C9/C10: Plotting metrics -----------------------------------------------------------------------------------
logging.info('Task C9/10, plotting accuracy metrics')
_, ax = plt.subplots()
if nanno_is_min:
ax.set_title('F1 Scores with %i-fold Cross Validation (data points with at least %i annotators)' % (nfolds, nanno))
else:
ax.set_title('F1 Scores with %i-fold Cross Validation (data points with %i annotators)' % (nfolds, nanno))
ind = np.arange(nmethods)
width = 0.6
if metrics['f1'].shape[1] == 1:
ax.bar(ind, metrics['f1'], width=width)
ax.set_xlabel('Method')
ax.set_ylabel('F1 Score')
ax.set_xticks(ind + (width/2.0))
ax.set_xticklabels(method_labels)
else:
plt.hold(True)
for m in range(nmethods):
plt.plot(metrics['f1'][m], label=method_labels[m])
if np.any(xlabels):
plt.xlabel(xlabels)
plt.legend(loc='best')
plt.savefig(plotdir + '/f1scores.eps')
_, ax = plt.subplots()
ax.set_title('AUC of ROC Curve with %i-fold Cross Validation' % nfolds)
if metrics['auc_roc'].shape[1] == 1:
ax.bar(ind, metrics['auc_roc'], width=width)
ax.set_xlabel('Method')
ax.set_ylabel('AUC')
ax.set_xticks(ind + (width/2.0))
ax.set_xticklabels(method_labels)
else:
plt.hold(True)
for m in range(nmethods):
plt.plot(metrics['auc_roc'][m], label=method_labels[m])
if np.any(xlabels):
plt.xlabel(xlabels)
plt.legend(loc='best')
plt.savefig(plotdir + '/auc_roc.eps')
_, ax = plt.subplots()
ax.set_title('Cross Entropy Error with %i-fold Cross Validation' % nfolds)
if metrics['log_loss'].shape[1] == 1:
plt.bar(ind, metrics['log_loss'], width=width)
ax.set_xlabel('Method')
ax.set_ylabel('Cross Entropy')
ax.set_xticks(ind + (width/2.0))
ax.set_xticklabels(method_labels)
else:
plt.hold(True)
for m in range(nmethods):
plt.plot(metrics['log_loss'][m], label=method_labels[m])
if np.any(xlabels):
plt.xlabel(xlabels)
plt.legend(loc='best')
plt.savefig(plotdir + '/cross_entropy.eps')
_, ax = plt.subplots()
ax.set_title('Accuracy with %i-fold Cross Validation' % nfolds)
if metrics['acc'].shape[1] == 1:
plt.bar(ind, metrics['acc'], width=width)
ax.set_xlabel('Method')
ax.set_ylabel('Accuracy')
ax.set_xticks(ind + (width/2.0))
ax.set_xticklabels(method_labels)
else:
plt.hold(True)
for m in range(nmethods):
plt.plot(metrics['acc'][m], label=method_labels[m])
if np.any(xlabels):
plt.xlabel(xlabels)
plt.legend(loc='best')
plt.savefig(plotdir + '/accuracy.eps') | [
"numpy.log",
"sklearn.metrics.roc_auc_score",
"logging.info",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"numpy.round",
"scipy.stats.kendalltau",
"numpy.abs",
"matplotlib.pyplot.savefig",
"numpy.any",
"sklearn.metrics.accuracy_score",
"matplo... | [((840, 884), 'logging.info', 'logging.info', (['"""Task C2/C4, accuracy metrics"""'], {}), "('Task C2/C4, accuracy metrics')\n", (852, 884), False, 'import logging\n'), ((3421, 3474), 'logging.info', 'logging.info', (['"""Task C9/10, plotting accuracy metrics"""'], {}), "('Task C9/10, plotting accuracy metrics')\n", (3433, 3474), False, 'import logging\n'), ((3487, 3501), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3499, 3501), True, 'from matplotlib import pyplot as plt\n'), ((3782, 3801), 'numpy.arange', 'np.arange', (['nmethods'], {}), '(nmethods)\n', (3791, 3801), True, 'import numpy as np\n'), ((4285, 4323), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(plotdir + '/f1scores.eps')"], {}), "(plotdir + '/f1scores.eps')\n", (4296, 4323), True, 'from matplotlib import pyplot as plt\n'), ((4343, 4357), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4355, 4357), True, 'from matplotlib import pyplot as plt\n'), ((4928, 4965), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(plotdir + '/auc_roc.eps')"], {}), "(plotdir + '/auc_roc.eps')\n", (4939, 4965), True, 'from matplotlib import pyplot as plt\n'), ((4984, 4998), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4996, 4998), True, 'from matplotlib import pyplot as plt\n'), ((5585, 5628), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(plotdir + '/cross_entropy.eps')"], {}), "(plotdir + '/cross_entropy.eps')\n", (5596, 5628), True, 'from matplotlib import pyplot as plt\n'), ((5646, 5660), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5658, 5660), True, 'from matplotlib import pyplot as plt\n'), ((6212, 6250), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(plotdir + '/accuracy.eps')"], {}), "(plotdir + '/accuracy.eps')\n", (6223, 6250), True, 'from matplotlib import pyplot as plt\n'), ((470, 488), 'numpy.zeros', 'np.zeros', (['nmethods'], {}), '(nmethods)\n', (478, 488), True, 'import numpy as np\n'), ((558, 597), 'scipy.stats.kendalltau', 'kendalltau', (['gold_ranks', 'predicted_ranks'], {}), '(gold_ranks, predicted_ranks)\n', (568, 597), False, 'from scipy.stats import kendalltau\n'), ((940, 967), 'numpy.zeros', 'np.zeros', (['(nmethods, nruns)'], {}), '((nmethods, nruns))\n', (948, 967), True, 'import numpy as np\n'), ((992, 1019), 'numpy.zeros', 'np.zeros', (['(nmethods, nruns)'], {}), '((nmethods, nruns))\n', (1000, 1019), True, 'import numpy as np\n'), ((1049, 1076), 'numpy.zeros', 'np.zeros', (['(nmethods, nruns)'], {}), '((nmethods, nruns))\n', (1057, 1076), True, 'import numpy as np\n'), ((1107, 1134), 'numpy.zeros', 'np.zeros', (['(nmethods, nruns)'], {}), '((nmethods, nruns))\n', (1115, 1134), True, 'import numpy as np\n'), ((1384, 1571), 'numpy.concatenate', 'np.concatenate', (['(predictions[:, i:i + 1] < 1.0 / 3.0, (predictions[:, i:i + 1] >= 1.0 / 3.0\n ) & (predictions[:, i:i + 1] < 2.0 / 3.0), predictions[:, i:i + 1] > \n 2.0 / 3.0)'], {'axis': '(1)'}), '((predictions[:, i:i + 1] < 1.0 / 3.0, (predictions[:, i:i + \n 1] >= 1.0 / 3.0) & (predictions[:, i:i + 1] < 2.0 / 3.0), predictions[:,\n i:i + 1] > 2.0 / 3.0), axis=1)\n', (1398, 1571), True, 'import numpy as np\n'), ((1652, 1778), 'numpy.concatenate', 'np.concatenate', (['(gold_prefs[:, np.newaxis] == 0, gold_prefs[:, np.newaxis] == 0.5, \n gold_prefs[:, np.newaxis] == 1)'], {'axis': '(1)'}), '((gold_prefs[:, np.newaxis] == 0, gold_prefs[:, np.newaxis] ==\n 0.5, gold_prefs[:, np.newaxis] == 1), axis=1)\n', (1666, 1778), True, 'import numpy as np\n'), ((2083, 2124), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['ind_array_gold', 'ind_array'], {}), '(ind_array_gold, ind_array)\n', (2097, 2124), False, 'from sklearn.metrics import f1_score, roc_auc_score, log_loss, accuracy_score\n'), ((2154, 2209), 'sklearn.metrics.f1_score', 'f1_score', (['ind_array_gold', 'ind_array'], {'average': '"""weighted"""'}), "(ind_array_gold, ind_array, average='weighted')\n", (2162, 2209), False, 'from sklearn.metrics import f1_score, roc_auc_score, log_loss, accuracy_score\n'), ((2243, 2296), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['(gold_prefs == 0)', '(1 - predictions[:, i])'], {}), '(gold_prefs == 0, 1 - predictions[:, i])\n', (2256, 2296), False, 'from sklearn.metrics import f1_score, roc_auc_score, log_loss, accuracy_score\n'), ((2400, 2449), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['(gold_prefs == 1)', 'predictions[:, i]'], {}), '(gold_prefs == 1, predictions[:, i])\n', (2413, 2449), False, 'from sklearn.metrics import f1_score, roc_auc_score, log_loss, accuracy_score\n'), ((4072, 4086), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (4080, 4086), True, 'from matplotlib import pyplot as plt\n'), ((4195, 4210), 'numpy.any', 'np.any', (['xlabels'], {}), '(xlabels)\n', (4201, 4210), True, 'import numpy as np\n'), ((4252, 4274), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (4262, 4274), True, 'from matplotlib import pyplot as plt\n'), ((4697, 4711), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (4705, 4711), True, 'from matplotlib import pyplot as plt\n'), ((4825, 4840), 'numpy.any', 'np.any', (['xlabels'], {}), '(xlabels)\n', (4831, 4840), True, 'import numpy as np\n'), ((4882, 4904), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (4892, 4904), True, 'from matplotlib import pyplot as plt\n'), ((5132, 5178), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', "metrics['log_loss']"], {'width': 'width'}), "(ind, metrics['log_loss'], width=width)\n", (5139, 5178), True, 'from matplotlib import pyplot as plt\n'), ((5354, 5368), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (5362, 5368), True, 'from matplotlib import pyplot as plt\n'), ((5483, 5498), 'numpy.any', 'np.any', (['xlabels'], {}), '(xlabels)\n', (5489, 5498), True, 'import numpy as np\n'), ((5540, 5562), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (5550, 5562), True, 'from matplotlib import pyplot as plt\n'), ((5774, 5815), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', "metrics['acc']"], {'width': 'width'}), "(ind, metrics['acc'], width=width)\n", (5781, 5815), True, 'from matplotlib import pyplot as plt\n'), ((5986, 6000), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (5994, 6000), True, 'from matplotlib import pyplot as plt\n'), ((6110, 6125), 'numpy.any', 'np.any', (['xlabels'], {}), '(xlabels)\n', (6116, 6125), True, 'import numpy as np\n'), ((6167, 6189), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (6177, 6189), True, 'from matplotlib import pyplot as plt\n'), ((1893, 1912), 'numpy.round', 'np.round', (['ind_array'], {}), '(ind_array)\n', (1901, 1912), True, 'import numpy as np\n'), ((2319, 2342), 'numpy.sum', 'np.sum', (['(gold_prefs == 0)'], {}), '(gold_prefs == 0)\n', (2325, 2342), True, 'import numpy as np\n'), ((2472, 2495), 'numpy.sum', 'np.sum', (['(gold_prefs == 1)'], {}), '(gold_prefs == 1)\n', (2478, 2495), True, 'import numpy as np\n'), ((2653, 2678), 'numpy.sum', 'np.sum', (['(gold_prefs == 0.5)'], {}), '(gold_prefs == 0.5)\n', (2659, 2678), True, 'import numpy as np\n'), ((4133, 4183), 'matplotlib.pyplot.plot', 'plt.plot', (["metrics['f1'][m]"], {'label': 'method_labels[m]'}), "(metrics['f1'][m], label=method_labels[m])\n", (4141, 4183), True, 'from matplotlib import pyplot as plt\n'), ((4224, 4243), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabels'], {}), '(xlabels)\n', (4234, 4243), True, 'from matplotlib import pyplot as plt\n'), ((4758, 4813), 'matplotlib.pyplot.plot', 'plt.plot', (["metrics['auc_roc'][m]"], {'label': 'method_labels[m]'}), "(metrics['auc_roc'][m], label=method_labels[m])\n", (4766, 4813), True, 'from matplotlib import pyplot as plt\n'), ((4854, 4873), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabels'], {}), '(xlabels)\n', (4864, 4873), True, 'from matplotlib import pyplot as plt\n'), ((5415, 5471), 'matplotlib.pyplot.plot', 'plt.plot', (["metrics['log_loss'][m]"], {'label': 'method_labels[m]'}), "(metrics['log_loss'][m], label=method_labels[m])\n", (5423, 5471), True, 'from matplotlib import pyplot as plt\n'), ((5512, 5531), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabels'], {}), '(xlabels)\n', (5522, 5531), True, 'from matplotlib import pyplot as plt\n'), ((6047, 6098), 'matplotlib.pyplot.plot', 'plt.plot', (["metrics['acc'][m]"], {'label': 'method_labels[m]'}), "(metrics['acc'][m], label=method_labels[m])\n", (6055, 6098), True, 'from matplotlib import pyplot as plt\n'), ((6139, 6158), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabels'], {}), '(xlabels)\n', (6149, 6158), True, 'from matplotlib import pyplot as plt\n'), ((1955, 1979), 'numpy.sum', 'np.sum', (['mistakes'], {'axis': '(1)'}), '(mistakes, axis=1)\n', (1961, 1979), True, 'import numpy as np\n'), ((2014, 2038), 'numpy.sum', 'np.sum', (['mistakes'], {'axis': '(1)'}), '(mistakes, axis=1)\n', (2020, 2038), True, 'import numpy as np\n'), ((2594, 2625), 'numpy.abs', 'np.abs', (['(predictions[:, i] - 0.5)'], {}), '(predictions[:, i] - 0.5)\n', (2600, 2625), True, 'import numpy as np\n'), ((3081, 3105), 'numpy.log', 'np.log', (['predictions_safe'], {}), '(predictions_safe)\n', (3087, 3105), True, 'import numpy as np\n'), ((3127, 3155), 'numpy.log', 'np.log', (['(1 - predictions_safe)'], {}), '(1 - predictions_safe)\n', (3133, 3155), True, 'import numpy as np\n')] |
# Lint as: python2, python3
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions related to preprocessing inputs."""
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
def flip_dim(tensor_list, prob=0.5, dim=1):
"""Randomly flips a dimension of the given tensor.
The decision to randomly flip the `Tensors` is made together. In other words,
all or none of the images pass in are flipped.
Note that tf.random_flip_left_right and tf.random_flip_up_down isn't used so
that we can control for the probability as well as ensure the same decision
is applied across the images.
Args:
tensor_list: A list of `Tensors` with the same number of dimensions.
prob: The probability of a left-right flip.
dim: The dimension to flip, 0, 1, ..
Returns:
outputs: A list of the possibly flipped `Tensors` as well as an indicator
`Tensor` at the end whose value is `True` if the inputs were flipped and
`False` otherwise.
Raises:
ValueError: If dim is negative or greater than the dimension of a `Tensor`.
"""
random_value = tf.random.uniform([])
def flip():
flipped = []
for tensor in tensor_list:
if dim < 0 or dim >= len(tensor.get_shape().as_list()):
raise ValueError('dim must represent a valid dimension.')
flipped.append(tf.compat.v1.reverse_v2(tensor, [dim]))
return flipped
is_flipped = tf.less_equal(random_value, prob)
outputs = tf.cond(is_flipped, flip, lambda: tensor_list)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs.append(is_flipped)
return outputs
def _image_dimensions(image, rank):
"""Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the input image. Dimensions
that are statically known are python integers, otherwise they are integer
scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = tf.unstack(tf.shape(image), rank)
return [
s if s is not None else d for s, d in zip(static_shape, dynamic_shape)
]
def get_label_resize_method(label):
"""Returns the resize method of labels depending on label dtype.
Args:
label: Groundtruth label tensor.
Returns:
tf.image.ResizeMethod.BILINEAR, if label dtype is floating.
tf.image.ResizeMethod.NEAREST_NEIGHBOR, if label dtype is integer.
Raises:
ValueError: If label is neither floating nor integer.
"""
if label.dtype.is_floating:
return tf.image.ResizeMethod.BILINEAR
elif label.dtype.is_integer:
return tf.image.ResizeMethod.NEAREST_NEIGHBOR
else:
raise ValueError('Label type must be either floating or integer.')
def pad_to_bounding_box(image, offset_height, offset_width, target_height,
target_width, pad_value):
"""Pads the given image with the given pad_value.
Works like tf.image.pad_to_bounding_box, except it can pad the image
with any given arbitrary pad value and also handle images whose sizes are not
known during graph construction.
Args:
image: 3-D tensor with shape [height, width, channels]
offset_height: Number of rows of zeros to add on top.
offset_width: Number of columns of zeros to add on the left.
target_height: Height of output image.
target_width: Width of output image.
pad_value: Value to pad the image tensor with.
Returns:
3-D tensor of shape [target_height, target_width, channels].
Raises:
ValueError: If the shape of image is incompatible with the offset_* or
target_* arguments.
"""
with tf.compat.v1.name_scope(None, 'pad_to_bounding_box', [image]):
image = tf.convert_to_tensor(image, name='image')
original_dtype = image.dtype
if original_dtype != tf.float32 and original_dtype != tf.float64:
# If image dtype is not float, we convert it to int32 to avoid overflow.
image = tf.cast(image, tf.int32)
image_rank_assert = tf.Assert(
tf.logical_or(
tf.equal(tf.rank(image), 3),
tf.equal(tf.rank(image), 4)),
['Wrong image tensor rank.'])
with tf.control_dependencies([image_rank_assert]):
image -= pad_value
image_shape = image.get_shape()
is_batch = True
if image_shape.ndims == 3:
is_batch = False
image = tf.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = tf.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image.get_shape().ndims != 4:
raise ValueError('Input image must have either 3 or 4 dimensions.')
_, height, width, _ = _image_dimensions(image, rank=4)
target_width_assert = tf.Assert(
tf.greater_equal(
target_width, width),
['target_width must be >= width'])
target_height_assert = tf.Assert(
tf.greater_equal(target_height, height),
['target_height must be >= height'])
with tf.control_dependencies([target_width_assert]):
after_padding_width = target_width - offset_width - width
with tf.control_dependencies([target_height_assert]):
after_padding_height = target_height - offset_height - height
offset_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(after_padding_width, 0),
tf.greater_equal(after_padding_height, 0)),
['target size not possible with the given target offsets'])
batch_params = tf.stack([0, 0])
height_params = tf.stack([offset_height, after_padding_height])
width_params = tf.stack([offset_width, after_padding_width])
channel_params = tf.stack([0, 0])
with tf.control_dependencies([offset_assert]):
paddings = tf.stack([batch_params, height_params, width_params,
channel_params])
padded = tf.pad(image, paddings)
if not is_batch:
padded = tf.squeeze(padded, axis=[0])
outputs = padded + pad_value
if outputs.dtype != original_dtype:
outputs = tf.cast(outputs, original_dtype)
return outputs
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
The cropped (and resized) image.
Raises:
ValueError: if `image` doesn't have rank of 3.
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
if len(image.get_shape().as_list()) != 3:
raise ValueError('input must have rank of 3')
original_channels = image.get_shape().as_list()[2]
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), tf.int32)
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
image = tf.reshape(image, cropped_shape)
image.set_shape([crop_height, crop_width, original_channels])
return image
def random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3), [
'Wrong rank for tensor %d in image_list [expected] [actual]', i, 3,
image_rank
])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height), [
'Wrong height for tensor %d in image_list [expected][actual]', i,
height, image_height
])
width_assert = tf.Assert(
tf.equal(width, image_width), [
'Wrong width for tensor %d in image_list [expected][actual]', i,
width, image_width
])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random.uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random.uniform([],
maxval=max_offset_height,
dtype=tf.int32)
offset_width = tf.random.uniform([], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def get_random_scale(min_scale_factor, max_scale_factor, step_size):
"""Gets a random scale value.
Args:
min_scale_factor: Minimum scale value.
max_scale_factor: Maximum scale value.
step_size: The step size from minimum to maximum value.
Returns:
A tensor with random scale value selected between minimum and maximum value.
If `min_scale_factor` and `max_scale_factor` are the same, a number is
returned instead.
Raises:
ValueError: min_scale_factor has unexpected value.
"""
if min_scale_factor < 0 or min_scale_factor > max_scale_factor:
raise ValueError('Unexpected value of min_scale_factor.')
if min_scale_factor == max_scale_factor:
return np.float32(min_scale_factor)
# When step_size = 0, we sample the value uniformly from [min, max).
if step_size == 0:
return tf.random.uniform([1],
minval=min_scale_factor,
maxval=max_scale_factor)
# When step_size != 0, we randomly select one discrete value from [min, max].
num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1)
scale_factors = tf.lin_space(min_scale_factor, max_scale_factor,
num_steps)
shuffled_scale_factors = tf.compat.v1.random_shuffle(scale_factors)
return shuffled_scale_factors[0]
def randomly_scale_image_and_label(image, label=None, scale=1.0):
"""Randomly scales image and label.
Args:
image: Image with shape [height, width, 3].
label: Label with shape [height, width, 1].
scale: The value to scale image and label.
Returns:
Scaled image and label.
"""
# No random scaling if scale == 1.
if scale == 1.0:
return image, label
image_shape = tf.shape(image)
new_dim = tf.cast(
tf.cast([image_shape[0], image_shape[1]], tf.float32) * scale,
tf.int32)
# Need squeeze and expand_dims because image interpolation takes
# 4D tensors as input.
## tf1 op without anti-aliasing
# image = tf.squeeze(
# tf.compat.v1.image.resize_bilinear(
# tf.expand_dims(image, 0), new_dim, align_corners=True), [0])
## tf2 op with anti-aliasing
image = tf.compat.v2.image.resize(
image, new_dim, method='bilinear', antialias=True)
if label is not None:
label = tf.compat.v1.image.resize(
label,
new_dim,
method=get_label_resize_method(label),
align_corners=True)
return image, label
def resolve_shape(tensor, rank=None, scope=None):
"""Fully resolves the shape of a Tensor.
Use as much as possible the shape components already known during graph
creation and resolve the remaining ones during runtime.
Args:
tensor: Input tensor whose shape we query.
rank: The rank of the tensor, provided that we know it.
scope: Optional name scope.
Returns:
shape: The full shape of the tensor.
"""
with tf.compat.v1.name_scope(scope, 'resolve_shape', [tensor]):
if rank is not None:
shape = tensor.get_shape().with_rank(rank).as_list()
else:
shape = tensor.get_shape().as_list()
if None in shape:
shape_dynamic = tf.shape(tensor)
for i in range(len(shape)):
if shape[i] is None:
shape[i] = shape_dynamic[i]
return shape
def resize_to_range_helper(input_shape, min_size, max_size=None, factor=None,
keep_aspect_ratio=True):
"""Determines output size in specified range.
Adapted from //image/understanding/object_detection/core/preprocessor.py
The output size can be described by two cases:
1. If the image can be rescaled so its minimum size is equal to min_size
without the other side exceeding max_size, then do so.
2. Otherwise, resize so the largest side is equal to max_size.
An integer in `range(factor)` is added to the computed sides so that the
final dimensions are multiples of `factor` plus one.
Args:
input_shape: A 2-element list with the [height, width] of the input image.
min_size: (scalar) desired size of the smaller image side.
max_size: (optional) (scalar) maximum allowed size of the larger image
side.
factor: Make output size multiple of factor plus one.
keep_aspect_ratio: Boolean, keep aspect ratio or not. If True, the input
will be resized while keeping the original aspect ratio. If False, the
input will be resized to [max_resize_value, max_resize_value] without
keeping the original aspect ratio.
Returns:
A 1-D tensor containing the [new_height, new_width].
"""
input_height, input_width = input_shape
input_height = tf.cast(input_height, tf.float32)
input_width = tf.cast(input_width, tf.float32)
input_min_size = tf.minimum(input_height, input_width)
# Calculate the larger of the possible sizes
min_size = tf.cast(min_size, tf.float32)
large_scale_factor = min_size / input_min_size
large_height = tf.cast(tf.floor(input_height * large_scale_factor), tf.int32)
large_width = tf.cast(tf.floor(input_width * large_scale_factor), tf.int32)
large_size = tf.stack([large_height, large_width])
if max_size is not None:
# Calculate the smaller of the possible sizes, use that if the larger
# is too big.
input_max_size = tf.maximum(input_height, input_width)
max_size = tf.cast(max_size, tf.float32)
small_scale_factor = max_size / input_max_size
small_height = tf.cast(
tf.floor(input_height * small_scale_factor), tf.int32)
small_width = tf.cast(tf.floor(input_width * small_scale_factor), tf.int32)
small_size = tf.stack([small_height, small_width])
output_shape = tf.cond(
tf.cast(tf.reduce_max(large_size), tf.float32) > max_size,
lambda: small_size,
lambda: large_size)
else:
output_shape = large_size
# Ensure that both output sides are multiples of factor plus one.
if factor is not None:
output_shape += (factor - (output_shape - 1) % factor) % factor
if not keep_aspect_ratio:
# If not keep the aspect ratio, we resize everything to max_size, allowing
# us to do pre-processing without extra padding.
output_shape = [tf.reduce_max(output_shape), tf.reduce_max(output_shape)]
return output_shape
def resize_to_range(image,
label=None,
min_size=None,
max_size=None,
factor=None,
keep_aspect_ratio=True,
align_corners=True,
label_layout_is_chw=False,
scope=None,
method=tf.image.ResizeMethod.BILINEAR):
"""Resizes image or label so their sides are within the provided range.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum size is equal to min_size
without the other side exceeding max_size, then do so.
2. Otherwise, resize so the largest side is equal to max_size.
An integer in `range(factor)` is added to the computed sides so that the
final dimensions are multiples of `factor` plus one.
Args:
image: A 3D tensor of shape [height, width, channels].
label: (optional) A 3D tensor of shape [height, width, channels] (default)
or [channels, height, width] when label_layout_is_chw = True.
min_size: (scalar) desired size of the smaller image side.
max_size: (scalar) maximum allowed size of the larger image side. Note
that the output dimension is no larger than max_size and may be slightly
smaller than max_size when factor is not None.
factor: Make output size multiple of factor plus one.
keep_aspect_ratio: Boolean, keep aspect ratio or not. If True, the input
will be resized while keeping the original aspect ratio. If False, the
input will be resized to [max_resize_value, max_resize_value] without
keeping the original aspect ratio.
align_corners: If True, exactly align all 4 corners of input and output.
label_layout_is_chw: If true, the label has shape [channel, height, width].
We support this case because for some instance segmentation dataset, the
instance segmentation is saved as [num_instances, height, width].
scope: Optional name scope.
method: Image resize method. Defaults to tf.image.ResizeMethod.BILINEAR.
Returns:
A 3-D tensor of shape [new_height, new_width, channels], where the image
has been resized (with the specified method) so that
min(new_height, new_width) == ceil(min_size) or
max(new_height, new_width) == ceil(max_size).
Raises:
ValueError: If the image is not a 3D tensor.
"""
with tf.compat.v1.name_scope(scope, 'resize_to_range', [image]):
new_tensor_list = []
min_size = tf.cast(min_size, tf.float32)
if max_size is not None:
max_size = tf.cast(max_size, tf.float32)
# Modify the max_size to be a multiple of factor plus 1 and make sure the
# max dimension after resizing is no larger than max_size.
if factor is not None:
max_size = (max_size - (max_size - 1) % factor)
[orig_height, orig_width, _] = resolve_shape(image, rank=3)
new_size = resize_to_range_helper(input_shape=[orig_height, orig_width],
min_size=min_size,
max_size=max_size,
factor=factor,
keep_aspect_ratio=keep_aspect_ratio)
new_tensor_list.append(tf.image.resize(
image, new_size, method=method, align_corners=align_corners))
if label is not None:
if label_layout_is_chw:
# Input label has shape [channel, height, width].
resized_label = tf.expand_dims(label, 3)
resized_label = tf.image.resize(
resized_label,
new_size,
method=get_label_resize_method(label),
align_corners=align_corners)
resized_label = tf.squeeze(resized_label, 3)
else:
# Input label has shape [height, width, channel].
resized_label = tf.image.resize(
label,
new_size,
method=get_label_resize_method(label),
align_corners=align_corners)
new_tensor_list.append(resized_label)
else:
new_tensor_list.append(None)
return new_tensor_list
def gaussian_blur(image, kernel_size, sigma, padding='SAME'):
"""Blurs the image with separable convolution.
Args:
image: Tensor of shape [height, width, channels], dtype float
kernel_size: kernel size of the filter
sigma: Sigma value for the Gaussian (std)
padding: Padding mode for the convolution. 'SAME' or 'VALID'
Returns:
outputs: A list of the possibly flipped `Tensors` as well as an indicator
`Tensor` at the end whose value is `True` if the inputs were flipped and
`False` otherwise.
"""
radius = tf.to_int32(kernel_size / 2)
kernel_size = radius * 2 + 1
x = tf.to_float(tf.range(-radius, radius + 1))
blur_filter = tf.exp(
-tf.pow(x, 2.0) / (2.0 * tf.pow(tf.to_float(sigma), 2.0)))
blur_filter /= tf.reduce_sum(blur_filter)
# One vertical and one horizontal filter.
blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])
blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])
num_channels = tf.shape(image)[-1]
blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])
blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])
expand_batch_dim = image.shape.ndims == 3
if expand_batch_dim:
# Tensorflow requires batched input to convolutions, which we can fake with
# an extra dimension.
image = tf.expand_dims(image, axis=0)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding=padding)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)
if expand_batch_dim:
blurred = tf.squeeze(blurred, axis=0)
return blurred
def random_gaussian_blur(image, prob=0.5):
"""Randomly blur an image.
Args:
image: Tensor
prob: probability to apply Gaussian blur
Returns:
output: blurred image
"""
random_value = tf.random.uniform([])
is_blurred = tf.less_equal(random_value, prob)
## EfficientSeg style
sigma = tf.random.uniform([]) * 1.15 + 0.15
radius = tf.cast(sigma * 4.0 + 0.5, tf.int32)
kernel_size = radius * 2 + 1
blurred = gaussian_blur(image, kernel_size, sigma)
output = tf.cond(is_blurred, lambda: blurred, lambda: image)
return output
def color_jitter(image, brightness=0, contrast=0, saturation=0, hue=0):
"""Distorts the color of the image (jittering order is random).
Args:
image: The input image tensor. Must be in [0, 1]!
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x):
"""Apply the i-th transformation."""
def brightness_foo():
if brightness == 0:
return x
else:
return tf.image.random_brightness(x, max_delta=brightness)
def contrast_foo():
if contrast == 0:
return x
else:
return tf.image.random_contrast(x, lower=1-contrast, upper=1+contrast)
def saturation_foo():
if saturation == 0:
return x
else:
return tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
def hue_foo():
if hue == 0:
return x
else:
return tf.image.random_hue(x, max_delta=hue)
x = tf.cond(tf.less(i, 2),
lambda: tf.cond(tf.less(i, 1), brightness_foo, contrast_foo),
lambda: tf.cond(tf.less(i, 3), saturation_foo, hue_foo))
return x
perm = tf.random_shuffle(tf.range(4))
for i in range(4):
image = apply_transform(perm[i], image)
image = tf.clip_by_value(image, 0., 1.)
return image
def to_grayscale(image, keep_channels=True):
image = tf.image.rgb_to_grayscale(image)
if keep_channels:
image = tf.tile(image, [1, 1, 3])
return image
def random_color_jitter(image, prob=1.0):
"""Randomly do color jittering on the given image.
Args:
image: Tensor
prob: probability to apply color jittering
Returns:
output: blurred image
"""
brightness = 0.5
contrast = 0.5
saturation = 0.5
hue = 0.25
random_value = tf.random.uniform([])
is_jittered = tf.less_equal(random_value, prob)
jittered = color_jitter(image, brightness, contrast, saturation, hue)
output = tf.cond(is_jittered, lambda: jittered, lambda: image)
return output
def cutout_with_mask(image,
label,
pad_size,
mean_pixel,
ignore_label=255,
valid=None):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type float32.
label: An image Tensor of type int32.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
mean_pixel: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
ignore_label: What value to fill in the label in the area that has the
cutout mask applied to it.
Returns:
An image Tensor that is of type float32.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
label = tf.where(
tf.equal(mask, 0),
tf.ones_like(label, dtype=label.dtype) * ignore_label,
label)
im_mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(im_mask, 0),
tf.ones_like(image, dtype=image.dtype) * mean_pixel,
image)
if valid is not None:
valid = tf.where(
tf.equal(mask, 0),
tf.zeros_like(valid, dtype=valid.dtype),
valid)
return image, label, valid
return image, label
| [
"tensorflow.tile",
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.pad",
"tensorflow.reduce_sum",
"tensorflow.compat.v1.reverse_v2",
"tensorflow.control_dependencies",
"tensorflow.ones_like",
"tensorflow.image.random_saturation",
"tensorflow.compat.v2.image.resize",
"tensorflow.cast",
"ten... | [((1731, 1752), 'tensorflow.random.uniform', 'tf.random.uniform', (['[]'], {}), '([])\n', (1748, 1752), True, 'import tensorflow as tf\n'), ((2040, 2073), 'tensorflow.less_equal', 'tf.less_equal', (['random_value', 'prob'], {}), '(random_value, prob)\n', (2053, 2073), True, 'import tensorflow as tf\n'), ((2086, 2133), 'tensorflow.cond', 'tf.cond', (['is_flipped', 'flip', '(lambda : tensor_list)'], {}), '(is_flipped, flip, lambda : tensor_list)\n', (2093, 2133), True, 'import tensorflow as tf\n'), ((7670, 7685), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (7678, 7685), True, 'import tensorflow as tf\n'), ((8581, 8613), 'tensorflow.reshape', 'tf.reshape', (['image', 'cropped_shape'], {}), '(image, cropped_shape)\n', (8591, 8613), True, 'import tensorflow as tf\n'), ((11359, 11422), 'tensorflow.random.uniform', 'tf.random.uniform', (['[]'], {'maxval': 'max_offset_height', 'dtype': 'tf.int32'}), '([], maxval=max_offset_height, dtype=tf.int32)\n', (11376, 11422), True, 'import tensorflow as tf\n'), ((11512, 11574), 'tensorflow.random.uniform', 'tf.random.uniform', (['[]'], {'maxval': 'max_offset_width', 'dtype': 'tf.int32'}), '([], maxval=max_offset_width, dtype=tf.int32)\n', (11529, 11574), True, 'import tensorflow as tf\n'), ((12833, 12892), 'tensorflow.lin_space', 'tf.lin_space', (['min_scale_factor', 'max_scale_factor', 'num_steps'], {}), '(min_scale_factor, max_scale_factor, num_steps)\n', (12845, 12892), True, 'import tensorflow as tf\n'), ((12961, 13003), 'tensorflow.compat.v1.random_shuffle', 'tf.compat.v1.random_shuffle', (['scale_factors'], {}), '(scale_factors)\n', (12988, 13003), True, 'import tensorflow as tf\n'), ((13439, 13454), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (13447, 13454), True, 'import tensorflow as tf\n'), ((13870, 13946), 'tensorflow.compat.v2.image.resize', 'tf.compat.v2.image.resize', (['image', 'new_dim'], {'method': '"""bilinear"""', 'antialias': '(True)'}), "(image, new_dim, method='bilinear', antialias=True)\n", (13895, 13946), True, 'import tensorflow as tf\n'), ((16312, 16345), 'tensorflow.cast', 'tf.cast', (['input_height', 'tf.float32'], {}), '(input_height, tf.float32)\n', (16319, 16345), True, 'import tensorflow as tf\n'), ((16362, 16394), 'tensorflow.cast', 'tf.cast', (['input_width', 'tf.float32'], {}), '(input_width, tf.float32)\n', (16369, 16394), True, 'import tensorflow as tf\n'), ((16414, 16451), 'tensorflow.minimum', 'tf.minimum', (['input_height', 'input_width'], {}), '(input_height, input_width)\n', (16424, 16451), True, 'import tensorflow as tf\n'), ((16513, 16542), 'tensorflow.cast', 'tf.cast', (['min_size', 'tf.float32'], {}), '(min_size, tf.float32)\n', (16520, 16542), True, 'import tensorflow as tf\n'), ((16765, 16802), 'tensorflow.stack', 'tf.stack', (['[large_height, large_width]'], {}), '([large_height, large_width])\n', (16773, 16802), True, 'import tensorflow as tf\n'), ((22551, 22579), 'tensorflow.to_int32', 'tf.to_int32', (['(kernel_size / 2)'], {}), '(kernel_size / 2)\n', (22562, 22579), True, 'import tensorflow as tf\n'), ((22766, 22792), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['blur_filter'], {}), '(blur_filter)\n', (22779, 22792), True, 'import tensorflow as tf\n'), ((22848, 22895), 'tensorflow.reshape', 'tf.reshape', (['blur_filter', '[kernel_size, 1, 1, 1]'], {}), '(blur_filter, [kernel_size, 1, 1, 1])\n', (22858, 22895), True, 'import tensorflow as tf\n'), ((22907, 22954), 'tensorflow.reshape', 'tf.reshape', (['blur_filter', '[1, kernel_size, 1, 1]'], {}), '(blur_filter, [1, kernel_size, 1, 1])\n', (22917, 22954), True, 'import tensorflow as tf\n'), ((23003, 23043), 'tensorflow.tile', 'tf.tile', (['blur_h', '[1, 1, num_channels, 1]'], {}), '(blur_h, [1, 1, num_channels, 1])\n', (23010, 23043), True, 'import tensorflow as tf\n'), ((23055, 23095), 'tensorflow.tile', 'tf.tile', (['blur_v', '[1, 1, num_channels, 1]'], {}), '(blur_v, [1, 1, num_channels, 1])\n', (23062, 23095), True, 'import tensorflow as tf\n'), ((23323, 23399), 'tensorflow.nn.depthwise_conv2d', 'tf.nn.depthwise_conv2d', (['image', 'blur_h'], {'strides': '[1, 1, 1, 1]', 'padding': 'padding'}), '(image, blur_h, strides=[1, 1, 1, 1], padding=padding)\n', (23345, 23399), True, 'import tensorflow as tf\n'), ((23419, 23497), 'tensorflow.nn.depthwise_conv2d', 'tf.nn.depthwise_conv2d', (['blurred', 'blur_v'], {'strides': '[1, 1, 1, 1]', 'padding': 'padding'}), '(blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)\n', (23441, 23497), True, 'import tensorflow as tf\n'), ((23794, 23815), 'tensorflow.random.uniform', 'tf.random.uniform', (['[]'], {}), '([])\n', (23811, 23815), True, 'import tensorflow as tf\n'), ((23831, 23864), 'tensorflow.less_equal', 'tf.less_equal', (['random_value', 'prob'], {}), '(random_value, prob)\n', (23844, 23864), True, 'import tensorflow as tf\n'), ((23947, 23983), 'tensorflow.cast', 'tf.cast', (['(sigma * 4.0 + 0.5)', 'tf.int32'], {}), '(sigma * 4.0 + 0.5, tf.int32)\n', (23954, 23983), True, 'import tensorflow as tf\n'), ((24080, 24133), 'tensorflow.cond', 'tf.cond', (['is_blurred', '(lambda : blurred)', '(lambda : image)'], {}), '(is_blurred, lambda : blurred, lambda : image)\n', (24087, 24133), True, 'import tensorflow as tf\n'), ((25852, 25884), 'tensorflow.image.rgb_to_grayscale', 'tf.image.rgb_to_grayscale', (['image'], {}), '(image)\n', (25877, 25884), True, 'import tensorflow as tf\n'), ((26259, 26280), 'tensorflow.random.uniform', 'tf.random.uniform', (['[]'], {}), '([])\n', (26276, 26280), True, 'import tensorflow as tf\n'), ((26297, 26330), 'tensorflow.less_equal', 'tf.less_equal', (['random_value', 'prob'], {}), '(random_value, prob)\n', (26310, 26330), True, 'import tensorflow as tf\n'), ((26415, 26470), 'tensorflow.cond', 'tf.cond', (['is_jittered', '(lambda : jittered)', '(lambda : image)'], {}), '(is_jittered, lambda : jittered, lambda : image)\n', (26422, 26470), True, 'import tensorflow as tf\n'), ((27727, 27801), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[]', 'minval': '(0)', 'maxval': 'image_height', 'dtype': 'tf.int32'}), '(shape=[], minval=0, maxval=image_height, dtype=tf.int32)\n', (27744, 27801), True, 'import tensorflow as tf\n'), ((27840, 27913), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[]', 'minval': '(0)', 'maxval': 'image_width', 'dtype': 'tf.int32'}), '(shape=[], minval=0, maxval=image_width, dtype=tf.int32)\n', (27857, 27913), True, 'import tensorflow as tf\n'), ((27942, 27988), 'tensorflow.maximum', 'tf.maximum', (['(0)', '(cutout_center_height - pad_size)'], {}), '(0, cutout_center_height - pad_size)\n', (27952, 27988), True, 'import tensorflow as tf\n'), ((28003, 28064), 'tensorflow.maximum', 'tf.maximum', (['(0)', '(image_height - cutout_center_height - pad_size)'], {}), '(0, image_height - cutout_center_height - pad_size)\n', (28013, 28064), True, 'import tensorflow as tf\n'), ((28078, 28123), 'tensorflow.maximum', 'tf.maximum', (['(0)', '(cutout_center_width - pad_size)'], {}), '(0, cutout_center_width - pad_size)\n', (28088, 28123), True, 'import tensorflow as tf\n'), ((28138, 28197), 'tensorflow.maximum', 'tf.maximum', (['(0)', '(image_width - cutout_center_width - pad_size)'], {}), '(0, image_width - cutout_center_width - pad_size)\n', (28148, 28197), True, 'import tensorflow as tf\n'), ((28492, 28516), 'tensorflow.expand_dims', 'tf.expand_dims', (['mask', '(-1)'], {}), '(mask, -1)\n', (28506, 28516), True, 'import tensorflow as tf\n'), ((28649, 28673), 'tensorflow.tile', 'tf.tile', (['mask', '[1, 1, 3]'], {}), '(mask, [1, 1, 3])\n', (28656, 28673), True, 'import tensorflow as tf\n'), ((4459, 4520), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['None', '"""pad_to_bounding_box"""', '[image]'], {}), "(None, 'pad_to_bounding_box', [image])\n", (4482, 4520), True, 'import tensorflow as tf\n'), ((4534, 4575), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {'name': '"""image"""'}), "(image, name='image')\n", (4554, 4575), True, 'import tensorflow as tf\n'), ((6280, 6296), 'tensorflow.stack', 'tf.stack', (['[0, 0]'], {}), '([0, 0])\n', (6288, 6296), True, 'import tensorflow as tf\n'), ((6317, 6364), 'tensorflow.stack', 'tf.stack', (['[offset_height, after_padding_height]'], {}), '([offset_height, after_padding_height])\n', (6325, 6364), True, 'import tensorflow as tf\n'), ((6384, 6429), 'tensorflow.stack', 'tf.stack', (['[offset_width, after_padding_width]'], {}), '([offset_width, after_padding_width])\n', (6392, 6429), True, 'import tensorflow as tf\n'), ((6451, 6467), 'tensorflow.stack', 'tf.stack', (['[0, 0]'], {}), '([0, 0])\n', (6459, 6467), True, 'import tensorflow as tf\n'), ((6646, 6669), 'tensorflow.pad', 'tf.pad', (['image', 'paddings'], {}), '(image, paddings)\n', (6652, 6669), True, 'import tensorflow as tf\n'), ((7952, 7993), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[rank_assertion]'], {}), '([rank_assertion])\n', (7975, 7993), True, 'import tensorflow as tf\n'), ((8015, 8069), 'tensorflow.stack', 'tf.stack', (['[crop_height, crop_width, original_shape[2]]'], {}), '([crop_height, crop_width, original_shape[2]])\n', (8023, 8069), True, 'import tensorflow as tf\n'), ((8314, 8356), 'tensorflow.stack', 'tf.stack', (['[offset_height, offset_width, 0]'], {}), '([offset_height, offset_width, 0])\n', (8322, 8356), True, 'import tensorflow as tf\n'), ((8476, 8517), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[size_assertion]'], {}), '([size_assertion])\n', (8499, 8517), True, 'import tensorflow as tf\n'), ((8531, 8570), 'tensorflow.slice', 'tf.slice', (['image', 'offsets', 'cropped_shape'], {}), '(image, offsets, cropped_shape)\n', (8539, 8570), True, 'import tensorflow as tf\n'), ((9583, 9605), 'tensorflow.rank', 'tf.rank', (['image_list[i]'], {}), '(image_list[i])\n', (9590, 9605), True, 'import tensorflow as tf\n'), ((9832, 9877), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[rank_assertions[0]]'], {}), '([rank_assertions[0]])\n', (9855, 9877), True, 'import tensorflow as tf\n'), ((9897, 9920), 'tensorflow.shape', 'tf.shape', (['image_list[0]'], {}), '(image_list[0])\n', (9905, 9920), True, 'import tensorflow as tf\n'), ((11168, 11200), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['asserts'], {}), '(asserts)\n', (11191, 11200), True, 'import tensorflow as tf\n'), ((11226, 11272), 'tensorflow.reshape', 'tf.reshape', (['(image_height - crop_height + 1)', '[]'], {}), '(image_height - crop_height + 1, [])\n', (11236, 11272), True, 'import tensorflow as tf\n'), ((11296, 11340), 'tensorflow.reshape', 'tf.reshape', (['(image_width - crop_width + 1)', '[]'], {}), '(image_width - crop_width + 1, [])\n', (11306, 11340), True, 'import tensorflow as tf\n'), ((12397, 12425), 'numpy.float32', 'np.float32', (['min_scale_factor'], {}), '(min_scale_factor)\n', (12407, 12425), True, 'import numpy as np\n'), ((12530, 12602), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1]'], {'minval': 'min_scale_factor', 'maxval': 'max_scale_factor'}), '([1], minval=min_scale_factor, maxval=max_scale_factor)\n', (12547, 12602), True, 'import tensorflow as tf\n'), ((14589, 14646), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['scope', '"""resolve_shape"""', '[tensor]'], {}), "(scope, 'resolve_shape', [tensor])\n", (14612, 14646), True, 'import tensorflow as tf\n'), ((16617, 16660), 'tensorflow.floor', 'tf.floor', (['(input_height * large_scale_factor)'], {}), '(input_height * large_scale_factor)\n', (16625, 16660), True, 'import tensorflow as tf\n'), ((16696, 16738), 'tensorflow.floor', 'tf.floor', (['(input_width * large_scale_factor)'], {}), '(input_width * large_scale_factor)\n', (16704, 16738), True, 'import tensorflow as tf\n'), ((16944, 16981), 'tensorflow.maximum', 'tf.maximum', (['input_height', 'input_width'], {}), '(input_height, input_width)\n', (16954, 16981), True, 'import tensorflow as tf\n'), ((16997, 17026), 'tensorflow.cast', 'tf.cast', (['max_size', 'tf.float32'], {}), '(max_size, tf.float32)\n', (17004, 17026), True, 'import tensorflow as tf\n'), ((17266, 17303), 'tensorflow.stack', 'tf.stack', (['[small_height, small_width]'], {}), '([small_height, small_width])\n', (17274, 17303), True, 'import tensorflow as tf\n'), ((20309, 20367), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['scope', '"""resize_to_range"""', '[image]'], {}), "(scope, 'resize_to_range', [image])\n", (20332, 20367), True, 'import tensorflow as tf\n'), ((20409, 20438), 'tensorflow.cast', 'tf.cast', (['min_size', 'tf.float32'], {}), '(min_size, tf.float32)\n', (20416, 20438), True, 'import tensorflow as tf\n'), ((22629, 22658), 'tensorflow.range', 'tf.range', (['(-radius)', '(radius + 1)'], {}), '(-radius, radius + 1)\n', (22637, 22658), True, 'import tensorflow as tf\n'), ((22972, 22987), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (22980, 22987), True, 'import tensorflow as tf\n'), ((23281, 23310), 'tensorflow.expand_dims', 'tf.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (23295, 23310), True, 'import tensorflow as tf\n'), ((23542, 23569), 'tensorflow.squeeze', 'tf.squeeze', (['blurred'], {'axis': '(0)'}), '(blurred, axis=0)\n', (23552, 23569), True, 'import tensorflow as tf\n'), ((24666, 24696), 'tensorflow.name_scope', 'tf.name_scope', (['"""distort_color"""'], {}), "('distort_color')\n", (24679, 24696), True, 'import tensorflow as tf\n'), ((25676, 25684), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (25681, 25684), False, 'from six.moves import range\n'), ((25917, 25942), 'tensorflow.tile', 'tf.tile', (['image', '[1, 1, 3]'], {}), '(image, [1, 1, 3])\n', (25924, 25942), True, 'import tensorflow as tf\n'), ((27566, 27581), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (27574, 27581), True, 'import tensorflow as tf\n'), ((27601, 27616), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (27609, 27616), True, 'import tensorflow as tf\n'), ((28401, 28442), 'tensorflow.zeros', 'tf.zeros', (['cutout_shape'], {'dtype': 'image.dtype'}), '(cutout_shape, dtype=image.dtype)\n', (28409, 28442), True, 'import tensorflow as tf\n'), ((28543, 28560), 'tensorflow.equal', 'tf.equal', (['mask', '(0)'], {}), '(mask, 0)\n', (28551, 28560), True, 'import tensorflow as tf\n'), ((28700, 28720), 'tensorflow.equal', 'tf.equal', (['im_mask', '(0)'], {}), '(im_mask, 0)\n', (28708, 28720), True, 'import tensorflow as tf\n'), ((2841, 2856), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (2849, 2856), True, 'import tensorflow as tf\n'), ((4772, 4796), 'tensorflow.cast', 'tf.cast', (['image', 'tf.int32'], {}), '(image, tf.int32)\n', (4779, 4796), True, 'import tensorflow as tf\n'), ((4985, 5029), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[image_rank_assert]'], {}), '([image_rank_assert])\n', (5008, 5029), True, 'import tensorflow as tf\n'), ((5180, 5204), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (5194, 5204), True, 'import tensorflow as tf\n'), ((5554, 5591), 'tensorflow.greater_equal', 'tf.greater_equal', (['target_width', 'width'], {}), '(target_width, width)\n', (5570, 5591), True, 'import tensorflow as tf\n'), ((5695, 5734), 'tensorflow.greater_equal', 'tf.greater_equal', (['target_height', 'height'], {}), '(target_height, height)\n', (5711, 5734), True, 'import tensorflow as tf\n'), ((5790, 5836), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[target_width_assert]'], {}), '([target_width_assert])\n', (5813, 5836), True, 'import tensorflow as tf\n'), ((5911, 5958), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[target_height_assert]'], {}), '([target_height_assert])\n', (5934, 5958), True, 'import tensorflow as tf\n'), ((6477, 6517), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[offset_assert]'], {}), '([offset_assert])\n', (6500, 6517), True, 'import tensorflow as tf\n'), ((6536, 6605), 'tensorflow.stack', 'tf.stack', (['[batch_params, height_params, width_params, channel_params]'], {}), '([batch_params, height_params, width_params, channel_params])\n', (6544, 6605), True, 'import tensorflow as tf\n'), ((6706, 6734), 'tensorflow.squeeze', 'tf.squeeze', (['padded'], {'axis': '[0]'}), '(padded, axis=[0])\n', (6716, 6734), True, 'import tensorflow as tf\n'), ((6824, 6856), 'tensorflow.cast', 'tf.cast', (['outputs', 'original_dtype'], {}), '(outputs, original_dtype)\n', (6831, 6856), True, 'import tensorflow as tf\n'), ((7880, 7894), 'tensorflow.rank', 'tf.rank', (['image'], {}), '(image)\n', (7887, 7894), True, 'import tensorflow as tf\n'), ((8133, 8181), 'tensorflow.greater_equal', 'tf.greater_equal', (['original_shape[0]', 'crop_height'], {}), '(original_shape[0], crop_height)\n', (8149, 8181), True, 'import tensorflow as tf\n'), ((8193, 8240), 'tensorflow.greater_equal', 'tf.greater_equal', (['original_shape[1]', 'crop_width'], {}), '(original_shape[1], crop_width)\n', (8209, 8240), True, 'import tensorflow as tf\n'), ((9643, 9666), 'tensorflow.equal', 'tf.equal', (['image_rank', '(3)'], {}), '(image_rank, 3)\n', (9651, 9666), True, 'import tensorflow as tf\n'), ((10048, 10091), 'tensorflow.greater_equal', 'tf.greater_equal', (['image_height', 'crop_height'], {}), '(image_height, crop_height)\n', (10064, 10091), True, 'import tensorflow as tf\n'), ((10103, 10144), 'tensorflow.greater_equal', 'tf.greater_equal', (['image_width', 'crop_width'], {}), '(image_width, crop_width)\n', (10119, 10144), True, 'import tensorflow as tf\n'), ((10362, 10407), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[rank_assertions[i]]'], {}), '([rank_assertions[i]])\n', (10385, 10407), True, 'import tensorflow as tf\n'), ((10423, 10438), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (10431, 10438), True, 'import tensorflow as tf\n'), ((10522, 10552), 'tensorflow.equal', 'tf.equal', (['height', 'image_height'], {}), '(height, image_height)\n', (10530, 10552), True, 'import tensorflow as tf\n'), ((10716, 10744), 'tensorflow.equal', 'tf.equal', (['width', 'image_width'], {}), '(width, image_width)\n', (10724, 10744), True, 'import tensorflow as tf\n'), ((13482, 13535), 'tensorflow.cast', 'tf.cast', (['[image_shape[0], image_shape[1]]', 'tf.float32'], {}), '([image_shape[0], image_shape[1]], tf.float32)\n', (13489, 13535), True, 'import tensorflow as tf\n'), ((14830, 14846), 'tensorflow.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (14838, 14846), True, 'import tensorflow as tf\n'), ((17114, 17157), 'tensorflow.floor', 'tf.floor', (['(input_height * small_scale_factor)'], {}), '(input_height * small_scale_factor)\n', (17122, 17157), True, 'import tensorflow as tf\n'), ((17195, 17237), 'tensorflow.floor', 'tf.floor', (['(input_width * small_scale_factor)'], {}), '(input_width * small_scale_factor)\n', (17203, 17237), True, 'import tensorflow as tf\n'), ((17835, 17862), 'tensorflow.reduce_max', 'tf.reduce_max', (['output_shape'], {}), '(output_shape)\n', (17848, 17862), True, 'import tensorflow as tf\n'), ((17864, 17891), 'tensorflow.reduce_max', 'tf.reduce_max', (['output_shape'], {}), '(output_shape)\n', (17877, 17891), True, 'import tensorflow as tf\n'), ((20485, 20514), 'tensorflow.cast', 'tf.cast', (['max_size', 'tf.float32'], {}), '(max_size, tf.float32)\n', (20492, 20514), True, 'import tensorflow as tf\n'), ((21157, 21233), 'tensorflow.image.resize', 'tf.image.resize', (['image', 'new_size'], {'method': 'method', 'align_corners': 'align_corners'}), '(image, new_size, method=method, align_corners=align_corners)\n', (21172, 21233), True, 'import tensorflow as tf\n'), ((23900, 23921), 'tensorflow.random.uniform', 'tf.random.uniform', (['[]'], {}), '([])\n', (23917, 23921), True, 'import tensorflow as tf\n'), ((25650, 25661), 'tensorflow.range', 'tf.range', (['(4)'], {}), '(4)\n', (25658, 25661), True, 'import tensorflow as tf\n'), ((25746, 25779), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['image', '(0.0)', '(1.0)'], {}), '(image, 0.0, 1.0)\n', (25762, 25779), True, 'import tensorflow as tf\n'), ((28568, 28606), 'tensorflow.ones_like', 'tf.ones_like', (['label'], {'dtype': 'label.dtype'}), '(label, dtype=label.dtype)\n', (28580, 28606), True, 'import tensorflow as tf\n'), ((28728, 28766), 'tensorflow.ones_like', 'tf.ones_like', (['image'], {'dtype': 'image.dtype'}), '(image, dtype=image.dtype)\n', (28740, 28766), True, 'import tensorflow as tf\n'), ((28849, 28866), 'tensorflow.equal', 'tf.equal', (['mask', '(0)'], {}), '(mask, 0)\n', (28857, 28866), True, 'import tensorflow as tf\n'), ((28876, 28915), 'tensorflow.zeros_like', 'tf.zeros_like', (['valid'], {'dtype': 'valid.dtype'}), '(valid, dtype=valid.dtype)\n', (28889, 28915), True, 'import tensorflow as tf\n'), ((1965, 2003), 'tensorflow.compat.v1.reverse_v2', 'tf.compat.v1.reverse_v2', (['tensor', '[dim]'], {}), '(tensor, [dim])\n', (1988, 2003), True, 'import tensorflow as tf\n'), ((2923, 2955), 'six.moves.zip', 'zip', (['static_shape', 'dynamic_shape'], {}), '(static_shape, dynamic_shape)\n', (2926, 2955), False, 'from six.moves import zip\n'), ((5278, 5302), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (5292, 5302), True, 'import tensorflow as tf\n'), ((6095, 6135), 'tensorflow.greater_equal', 'tf.greater_equal', (['after_padding_width', '(0)'], {}), '(after_padding_width, 0)\n', (6111, 6135), True, 'import tensorflow as tf\n'), ((6149, 6190), 'tensorflow.greater_equal', 'tf.greater_equal', (['after_padding_height', '(0)'], {}), '(after_padding_height, 0)\n', (6165, 6190), True, 'import tensorflow as tf\n'), ((21382, 21406), 'tensorflow.expand_dims', 'tf.expand_dims', (['label', '(3)'], {}), '(label, 3)\n', (21396, 21406), True, 'import tensorflow as tf\n'), ((21613, 21641), 'tensorflow.squeeze', 'tf.squeeze', (['resized_label', '(3)'], {}), '(resized_label, 3)\n', (21623, 21641), True, 'import tensorflow as tf\n'), ((22691, 22705), 'tensorflow.pow', 'tf.pow', (['x', '(2.0)'], {}), '(x, 2.0)\n', (22697, 22705), True, 'import tensorflow as tf\n'), ((25435, 25448), 'tensorflow.less', 'tf.less', (['i', '(2)'], {}), '(i, 2)\n', (25442, 25448), True, 'import tensorflow as tf\n'), ((4876, 4890), 'tensorflow.rank', 'tf.rank', (['image'], {}), '(image)\n', (4883, 4890), True, 'import tensorflow as tf\n'), ((4917, 4931), 'tensorflow.rank', 'tf.rank', (['image'], {}), '(image)\n', (4924, 4931), True, 'import tensorflow as tf\n'), ((17348, 17373), 'tensorflow.reduce_max', 'tf.reduce_max', (['large_size'], {}), '(large_size)\n', (17361, 17373), True, 'import tensorflow as tf\n'), ((22722, 22740), 'tensorflow.to_float', 'tf.to_float', (['sigma'], {}), '(sigma)\n', (22733, 22740), True, 'import tensorflow as tf\n'), ((24878, 24929), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['x'], {'max_delta': 'brightness'}), '(x, max_delta=brightness)\n', (24904, 24929), True, 'import tensorflow as tf\n'), ((25032, 25099), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['x'], {'lower': '(1 - contrast)', 'upper': '(1 + contrast)'}), '(x, lower=1 - contrast, upper=1 + contrast)\n', (25056, 25099), True, 'import tensorflow as tf\n'), ((25202, 25275), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['x'], {'lower': '(1 - saturation)', 'upper': '(1 + saturation)'}), '(x, lower=1 - saturation, upper=1 + saturation)\n', (25228, 25275), True, 'import tensorflow as tf\n'), ((25379, 25416), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['x'], {'max_delta': 'hue'}), '(x, max_delta=hue)\n', (25398, 25416), True, 'import tensorflow as tf\n'), ((25484, 25497), 'tensorflow.less', 'tf.less', (['i', '(1)'], {}), '(i, 1)\n', (25491, 25497), True, 'import tensorflow as tf\n'), ((25564, 25577), 'tensorflow.less', 'tf.less', (['i', '(3)'], {}), '(i, 3)\n', (25571, 25577), True, 'import tensorflow as tf\n')] |
import os
import numpy as np
import cv2
from enhance_image import Image_Loader_Utils#, Adjust_Bright_Illumination, Illumination_Finder, Adjust_Darkness
def correct_image_illumination(path):
h,w,ci,gi = Image_Loader_Utils(path).convert_img_to_array()
return ci
def template_matcher(template_gray, image_to_be_checked, image_to_be_checked_gray, threshold = 0.95):
ct = 0
w, h = template_gray.shape[::-1]
res = cv2.matchTemplate(image_to_be_checked_gray, template_gray, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
bottom_right = (min_loc[0] + w, min_loc[1] + h)
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
ct += 1
break
return (True, 'Ok') if ct >= 1 else (False, 'Bad')
def find_card_type(image, t_card = ''):
aadhar_folder = 'aadhar_templates'
pan_folder = 'pan_templates'
img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ct, card = 0, 'Image size too small!'
aadhar_template = cv2.imread(f'{aadhar_folder}/aadhar_template.jpg',0)
pan_template = cv2.imread(f'{pan_folder}/pan_template.jpg',0)
if template_matcher(aadhar_template, image, img_gray)[0] or t_card == 'aadhar':
ct, card = 0, 'Aadhar'
for template in os.listdir(aadhar_folder):
if template != 'aadhar_template.jpg':
temp = cv2.imread(f'{aadhar_folder}/{template}',0)
if template_matcher(temp, image, img_gray):
ct += 1
elif template_matcher(pan_template, image, img_gray)[0] or t_card == 'pan':
ct, card = 0, 'Pan'
for template in os.listdir(pan_folder):
if template != 'pan_template.jpg':
temp = cv2.imread(f'{pan_folder}/{template}',0)
if template_matcher(temp, image, img_gray):
ct += 1
return ct, card if t_card == '' else t_card
| [
"os.listdir",
"numpy.where",
"cv2.minMaxLoc",
"cv2.cvtColor",
"enhance_image.Image_Loader_Utils",
"cv2.matchTemplate",
"cv2.imread"
] | [((432, 517), 'cv2.matchTemplate', 'cv2.matchTemplate', (['image_to_be_checked_gray', 'template_gray', 'cv2.TM_CCOEFF_NORMED'], {}), '(image_to_be_checked_gray, template_gray, cv2.TM_CCOEFF_NORMED\n )\n', (449, 517), False, 'import cv2\n'), ((553, 571), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['res'], {}), '(res)\n', (566, 571), False, 'import cv2\n'), ((632, 658), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (640, 658), True, 'import numpy as np\n'), ((896, 935), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (908, 935), False, 'import cv2\n'), ((998, 1051), 'cv2.imread', 'cv2.imread', (['f"""{aadhar_folder}/aadhar_template.jpg"""', '(0)'], {}), "(f'{aadhar_folder}/aadhar_template.jpg', 0)\n", (1008, 1051), False, 'import cv2\n'), ((1069, 1116), 'cv2.imread', 'cv2.imread', (['f"""{pan_folder}/pan_template.jpg"""', '(0)'], {}), "(f'{pan_folder}/pan_template.jpg', 0)\n", (1079, 1116), False, 'import cv2\n'), ((1250, 1275), 'os.listdir', 'os.listdir', (['aadhar_folder'], {}), '(aadhar_folder)\n', (1260, 1275), False, 'import os\n'), ((211, 235), 'enhance_image.Image_Loader_Utils', 'Image_Loader_Utils', (['path'], {}), '(path)\n', (229, 235), False, 'from enhance_image import Image_Loader_Utils\n'), ((1579, 1601), 'os.listdir', 'os.listdir', (['pan_folder'], {}), '(pan_folder)\n', (1589, 1601), False, 'import os\n'), ((1338, 1382), 'cv2.imread', 'cv2.imread', (['f"""{aadhar_folder}/{template}"""', '(0)'], {}), "(f'{aadhar_folder}/{template}', 0)\n", (1348, 1382), False, 'import cv2\n'), ((1661, 1702), 'cv2.imread', 'cv2.imread', (['f"""{pan_folder}/{template}"""', '(0)'], {}), "(f'{pan_folder}/{template}', 0)\n", (1671, 1702), False, 'import cv2\n')] |
import numpy as np
import re
import math
import yaml
import importlib
def read_rle(path):
with open(path, "r") as f:
clean_lines = []
for line in f.readlines():
if line[0] != '#': # skip comments
if line[0] == 'x': # get header
header = line
else: # get pattern
clean_lines.append(line.replace("\n", ""))
clean_lines = "".join(clean_lines)
# parse header
shape_y, shape_x = re.findall('\d+', header)[:2]
shape_y, shape_x = int(shape_y), int(shape_x)
# generate pattern
pattern = np.zeros((shape_x, shape_y), dtype=np.uint8)
i, j = 0, 0
it = 0
possible_tokens = ['b', 'o', '$', '!']
while len(clean_lines) > 0:
next_tokens = [clean_lines.find(token) for token in possible_tokens]
next_tokens = [10**100 if token == -1 else token for token in next_tokens]
first_token = min(next_tokens)
token_name = possible_tokens[next_tokens.index(first_token)]
cell_increments = clean_lines[:first_token]
cell_increments = 1 if cell_increments == '' else cell_increments
if token_name == 'o':
pattern[j, i:i + int(cell_increments)] = 1
i += int(cell_increments)
elif token_name == 'b':
i += int(cell_increments)
elif token_name == '$' or token_name == '!':
j += int(cell_increments)
i = 0
clean_lines = clean_lines[first_token + 1:]
it += 1
if it > shape_x * shape_y + 1:
break
return pattern
def add_pattern_from_file(x, path, pos, angle=0, flip=(1, 1), centered=False):
pattern = read_rle(path)
return add_pattern(x, pattern, pos, angle, flip, centered)
def add_pattern_from_macro(x, kwargs, pos, angle=0, flip=(1, 1), centered=False):
pattern = load_macro(**kwargs)
return add_pattern(x, pattern, pos, angle, flip, centered)
def load_macro(name, kwargs=None, source='temple_macro', module_base='moebiusgol.macro'):
module = f'{module_base}.{source}'
m = importlib.import_module(module)
kwargs = {} if kwargs is None else kwargs
clazz = getattr(m, name, None)
if clazz is not None:
return clazz(**kwargs)
else:
raise RuntimeError
def add_pattern(x, pattern, pos, angle=0, flip=(1, 1), centered=False):
pattern = np.rot90(pattern, angle)
if centered:
x_slice = slice(pos[0] - math.floor(pattern.shape[0] / 2), pos[0] + math.ceil(pattern.shape[0] / 2))
y_slice = slice(pos[1] - math.floor(pattern.shape[1] / 2), pos[1] + math.ceil(pattern.shape[1] / 2))
else:
x_slice = slice(pos[0], pos[0] + pattern.shape[0])
y_slice = slice(pos[1], pos[1] + pattern.shape[1])
pattern = np.logical_or(pattern, x[x_slice, y_slice])
x[x_slice, y_slice] = pattern[::flip[0], ::flip[1]]
return x
def read_pattern_shape(path):
pattern = read_rle(path)
return pattern.shape
def basic_load(path):
with open(path, 'r') as f:
timeline = yaml.safe_load(f)
return timeline
| [
"math.ceil",
"importlib.import_module",
"math.floor",
"numpy.logical_or",
"yaml.safe_load",
"numpy.zeros",
"numpy.rot90",
"re.findall"
] | [((651, 695), 'numpy.zeros', 'np.zeros', (['(shape_x, shape_y)'], {'dtype': 'np.uint8'}), '((shape_x, shape_y), dtype=np.uint8)\n', (659, 695), True, 'import numpy as np\n'), ((2183, 2214), 'importlib.import_module', 'importlib.import_module', (['module'], {}), '(module)\n', (2206, 2214), False, 'import importlib\n'), ((2478, 2502), 'numpy.rot90', 'np.rot90', (['pattern', 'angle'], {}), '(pattern, angle)\n', (2486, 2502), True, 'import numpy as np\n'), ((2882, 2925), 'numpy.logical_or', 'np.logical_or', (['pattern', 'x[x_slice, y_slice]'], {}), '(pattern, x[x_slice, y_slice])\n', (2895, 2925), True, 'import numpy as np\n'), ((529, 555), 're.findall', 're.findall', (['"""\\\\d+"""', 'header'], {}), "('\\\\d+', header)\n", (539, 555), False, 'import re\n'), ((3155, 3172), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (3169, 3172), False, 'import yaml\n'), ((2554, 2586), 'math.floor', 'math.floor', (['(pattern.shape[0] / 2)'], {}), '(pattern.shape[0] / 2)\n', (2564, 2586), False, 'import math\n'), ((2597, 2628), 'math.ceil', 'math.ceil', (['(pattern.shape[0] / 2)'], {}), '(pattern.shape[0] / 2)\n', (2606, 2628), False, 'import math\n'), ((2663, 2695), 'math.floor', 'math.floor', (['(pattern.shape[1] / 2)'], {}), '(pattern.shape[1] / 2)\n', (2673, 2695), False, 'import math\n'), ((2706, 2737), 'math.ceil', 'math.ceil', (['(pattern.shape[1] / 2)'], {}), '(pattern.shape[1] / 2)\n', (2715, 2737), False, 'import math\n')] |
"""Module about the selector element."""
import enum
import numpy as np
from .element import ElementId, Element
@enum.unique
class Symmetry(enum.Enum):
"""List the available kind of symmetry."""
SYMMETRY_NONE = enum.auto()
SYMMETRY_X = enum.auto()
SYMMETRY_Y = enum.auto()
SYMMETRY_XY = enum.auto()
class Selector(Element):
"""Selector element of the scene."""
def __init__(self, params):
"""Initialize the Selector."""
dimensions = [2, 2, 2]
color = params["selector"]["color"]["build"]
opacity = params["selector"]["opacity"]
super().__init__(
params=params,
element_id=ElementId.SELECTOR,
dimensions=dimensions,
color=color,
opacity=opacity,
)
self.coords = None
self.coords_type = int
def select(self, coords):
"""Select a block of the grid."""
self.coords = coords.astype(self.coords_type)
origin = coords * self.unit
self.mesh.SetOrigin(origin)
def show(self):
"""Show the selector."""
self.actor.VisibilityOn()
def hide(self):
"""Hide the selector."""
self.actor.VisibilityOff()
def selection(self):
"""Return the current selection."""
return self.coords
class AreaSelector(Selector):
"""Selector that supports area."""
def __init__(self, params):
"""Initialize the selector."""
super().__init__(params=params)
self.area = None
self.area_first_coords = None
self.area_last_coords = None
def select_area(self, area):
"""Select the area."""
area = np.asarray(area).astype(self.coords_type)
self.area = (
np.min(area, axis=0),
np.max(area, axis=0),
)
coords_diff = self.area[1] - self.area[0] + 2
self.select(self.area[0])
self.mesh.SetDimensions(coords_diff)
self.mesh.Modified()
def reset_area(self):
"""Reset the selector."""
dimensions = [2, 2, 2]
self.mesh.SetDimensions(dimensions)
self.mesh.Modified()
self.area_first_coords = None
self.area_last_coords = None
def get_first_coords(self):
"""Get the first coordinates of the selection area."""
return self.area_first_coords
def get_last_coords(self):
"""Get the last coordinates of the selection area."""
return self.area_last_coords
def set_first_coords(self, coords):
"""Set the first coordinates of the selection area."""
self.area_first_coords = coords
def set_last_coords(self, coords):
"""Set the last coordinates of the selection area."""
self.area_last_coords = coords
def selection_area(self):
"""Return the current area selection."""
return self.area
class SymmetrySelector(AreaSelector):
"""Selector that supports symmetry."""
def __init__(self, params, dimensions):
"""Initialize the selector."""
super().__init__(params=params)
self.selector_x = AreaSelector(params=params)
self.selector_y = AreaSelector(params=params)
self.selector_xy = AreaSelector(params=params)
self.symmetry = Symmetry.SYMMETRY_NONE
self.dimensions = np.asarray(dimensions)
def set_block_mode(self, mode):
"""Set the block mode."""
super().set_block_mode(mode)
self.selector_x.set_block_mode(mode)
self.selector_y.set_block_mode(mode)
self.selector_xy.set_block_mode(mode)
def select_area(self, area):
"""Select the area."""
super().select_area(area)
area = np.asarray(area).astype(self.coords_type)
if self.symmetry in (Symmetry.SYMMETRY_X, Symmetry.SYMMETRY_XY):
new_area = area.copy()
new_area[0][1] = self.dimensions[1] - area[0][1] - 2
new_area[1][1] = self.dimensions[1] - area[1][1] - 2
self.selector_x.select_area(new_area)
if self.symmetry in (Symmetry.SYMMETRY_Y, Symmetry.SYMMETRY_XY):
new_area = area.copy()
new_area[0][0] = self.dimensions[0] - area[0][0] - 2
new_area[1][0] = self.dimensions[0] - area[1][0] - 2
self.selector_y.select_area(new_area)
if self.symmetry is Symmetry.SYMMETRY_XY:
new_area[0][1] = self.dimensions[1] - area[0][1] - 2
new_area[1][1] = self.dimensions[1] - area[1][1] - 2
new_area[0][0] = self.dimensions[0] - area[0][0] - 2
new_area[1][0] = self.dimensions[0] - area[1][0] - 2
self.selector_xy.select_area(new_area)
def reset_area(self):
"""Reset the selector."""
super().reset_area()
self.selector_x.reset_area()
self.selector_y.reset_area()
self.selector_xy.reset_area()
def select(self, coords):
"""Select a block of the grid."""
super().select(coords)
if self.symmetry in (Symmetry.SYMMETRY_X, Symmetry.SYMMETRY_XY):
new_coords = coords.copy()
new_coords[1] = self.dimensions[1] - coords[1] - 2
self.selector_x.select(new_coords)
if self.symmetry in (Symmetry.SYMMETRY_Y, Symmetry.SYMMETRY_XY):
new_coords = coords.copy()
new_coords[0] = self.dimensions[0] - coords[0] - 2
self.selector_y.select(new_coords)
if self.symmetry is Symmetry.SYMMETRY_XY:
new_coords = coords.copy()
new_coords[1] = self.dimensions[1] - coords[1] - 2
new_coords[0] = self.dimensions[0] - coords[0] - 2
self.selector_xy.select(new_coords)
def show(self):
"""Show the selector."""
super().show()
if self.symmetry in (Symmetry.SYMMETRY_X, Symmetry.SYMMETRY_XY):
self.selector_x.actor.VisibilityOn()
if self.symmetry in (Symmetry.SYMMETRY_Y, Symmetry.SYMMETRY_XY):
self.selector_y.actor.VisibilityOn()
if self.symmetry is Symmetry.SYMMETRY_XY:
self.selector_xy.actor.VisibilityOn()
def hide(self):
"""Hide the selector."""
super().hide()
self.selector_x.actor.VisibilityOff()
self.selector_y.actor.VisibilityOff()
self.selector_xy.actor.VisibilityOff()
def selection(self):
"""Return the current selection."""
coords = [self.coords]
if self.symmetry in (Symmetry.SYMMETRY_X, Symmetry.SYMMETRY_XY):
coords.append(self.selector_x.coords)
if self.symmetry in (Symmetry.SYMMETRY_Y, Symmetry.SYMMETRY_XY):
coords.append(self.selector_y.coords)
if self.symmetry is Symmetry.SYMMETRY_XY:
coords.append(self.selector_xy.coords)
return coords
def selection_area(self):
"""Return the current area selection."""
area = [self.area]
if self.symmetry in (Symmetry.SYMMETRY_X, Symmetry.SYMMETRY_XY):
area.append(self.selector_x.area)
if self.symmetry in (Symmetry.SYMMETRY_Y, Symmetry.SYMMETRY_XY):
area.append(self.selector_y.area)
if self.symmetry is Symmetry.SYMMETRY_XY:
area.append(self.selector_xy.area)
return area
def set_symmetry(self, value):
"""Set the symmetry."""
self.symmetry = value
| [
"numpy.max",
"numpy.asarray",
"enum.auto",
"numpy.min"
] | [((223, 234), 'enum.auto', 'enum.auto', ([], {}), '()\n', (232, 234), False, 'import enum\n'), ((252, 263), 'enum.auto', 'enum.auto', ([], {}), '()\n', (261, 263), False, 'import enum\n'), ((281, 292), 'enum.auto', 'enum.auto', ([], {}), '()\n', (290, 292), False, 'import enum\n'), ((311, 322), 'enum.auto', 'enum.auto', ([], {}), '()\n', (320, 322), False, 'import enum\n'), ((3326, 3348), 'numpy.asarray', 'np.asarray', (['dimensions'], {}), '(dimensions)\n', (3336, 3348), True, 'import numpy as np\n'), ((1760, 1780), 'numpy.min', 'np.min', (['area'], {'axis': '(0)'}), '(area, axis=0)\n', (1766, 1780), True, 'import numpy as np\n'), ((1794, 1814), 'numpy.max', 'np.max', (['area'], {'axis': '(0)'}), '(area, axis=0)\n', (1800, 1814), True, 'import numpy as np\n'), ((1684, 1700), 'numpy.asarray', 'np.asarray', (['area'], {}), '(area)\n', (1694, 1700), True, 'import numpy as np\n'), ((3707, 3723), 'numpy.asarray', 'np.asarray', (['area'], {}), '(area)\n', (3717, 3723), True, 'import numpy as np\n')] |
from transformers import (AutoModelForTokenClassification,
AutoModelForSequenceClassification,
TrainingArguments,
AutoTokenizer,
AutoConfig,
Trainer)
from biobert_ner.utils_ner import (convert_examples_to_features, get_labels, NerTestDataset)
from biobert_ner.utils_ner import InputExample as NerExample
from biobert_re.utils_re import RETestDataset
from bilstm_crf_ner.model.config import Config as BiLSTMConfig
from bilstm_crf_ner.model.ner_model import NERModel as BiLSTMModel
from bilstm_crf_ner.model.ner_learner import NERLearner as BiLSTMLearner
import en_ner_bc5cdr_md
import numpy as np
import os
from torch import nn
from ehr import HealthRecord
from generate_data import scispacy_plus_tokenizer
from annotations import Entity
import logging
from typing import List, Tuple
logger = logging.getLogger(__name__)
BIOBERT_NER_SEQ_LEN = 128
BILSTM_NER_SEQ_LEN = 512
BIOBERT_RE_SEQ_LEN = 128
logging.getLogger('matplotlib.font_manager').disabled = True
BIOBERT_NER_MODEL_DIR = "biobert_ner/output_full"
BIOBERT_RE_MODEL_DIR = "biobert_re/output_full"
# =====BioBERT Model for NER======
biobert_ner_labels = get_labels('biobert_ner/dataset_full/labels.txt')
biobert_ner_label_map = {i: label for i, label in enumerate(biobert_ner_labels)}
num_labels_ner = len(biobert_ner_labels)
biobert_ner_config = AutoConfig.from_pretrained(
os.path.join(BIOBERT_NER_MODEL_DIR, "config.json"),
num_labels=num_labels_ner,
id2label=biobert_ner_label_map,
label2id={label: i for i, label in enumerate(biobert_ner_labels)})
biobert_ner_tokenizer = AutoTokenizer.from_pretrained(
"dmis-lab/biobert-base-cased-v1.1")
biobert_ner_model = AutoModelForTokenClassification.from_pretrained(
os.path.join(BIOBERT_NER_MODEL_DIR, "pytorch_model.bin"),
config=biobert_ner_config)
biobert_ner_training_args = TrainingArguments(output_dir="/tmp", do_predict=True)
biobert_ner_trainer = Trainer(model=biobert_ner_model, args=biobert_ner_training_args)
label_ent_map = {'DRUG': 'Drug', 'STR': 'Strength',
'DUR': 'Duration', 'ROU': 'Route',
'FOR': 'Form', 'ADE': 'ADE',
'DOS': 'Dosage', 'REA': 'Reason',
'FRE': 'Frequency'}
# =====BiLSTM + CRF model for NER=========
bilstm_config = BiLSTMConfig()
bilstm_model = BiLSTMModel(bilstm_config)
bilstm_learn = BiLSTMLearner(bilstm_config, bilstm_model)
bilstm_learn.load("ner_15e_bilstm_crf_elmo")
scispacy_tok = en_ner_bc5cdr_md.load().tokenizer
scispacy_plus_tokenizer.__defaults__ = (scispacy_tok,)
# =====BioBERT Model for RE======
re_label_list = ["0", "1"]
re_task_name = "ehr-re"
biobert_re_config = AutoConfig.from_pretrained(
os.path.join(BIOBERT_RE_MODEL_DIR, "config.json"),
num_labels=len(re_label_list),
finetuning_task=re_task_name)
biobert_re_model = AutoModelForSequenceClassification.from_pretrained(
os.path.join(BIOBERT_RE_MODEL_DIR, "pytorch_model.bin"),
config=biobert_re_config,)
biobert_re_training_args = TrainingArguments(output_dir="/tmp", do_predict=True)
biobert_re_trainer = Trainer(model=biobert_re_model, args=biobert_re_training_args)
def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> List[List[str]]:
"""
Get the list of labelled predictions from model output
Parameters
----------
predictions : np.ndarray
An array of shape (num_examples, seq_len, num_labels).
label_ids : np.ndarray
An array of shape (num_examples, seq_length).
Has -100 at positions which need to be ignored.
Returns
-------
preds_list : List[List[str]]
Labelled output.
"""
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
preds_list[i].append(biobert_ner_label_map[preds[i][j]])
return preds_list
def get_chunk_type(tok: str) -> Tuple[str, str]:
"""
Args:
tok: Label in IOB format
Returns:
tuple: ("B", "DRUG")
"""
tag_class = tok.split('-')[0]
tag_type = tok.split('-')[-1]
return tag_class, tag_type
def get_chunks(seq: List[str]) -> List[Tuple[str, int, int]]:
"""
Given a sequence of tags, group entities and their position
Args:
seq: ["O", "O", "B-DRUG", "I-DRUG", ...] sequence of labels
Returns:
list of (chunk_type, chunk_start, chunk_end)
Example:
seq = ["B-DRUG", "I-DRUG", "O", "B-STR"]
result = [("DRUG", 0, 1), ("STR", 3, 3)]
"""
default = "O"
chunks = []
chunk_type, chunk_start = None, None
for i, tok in enumerate(seq):
# End of a chunk 1
if tok == default and chunk_type is not None:
# Add a chunk.
chunk = (chunk_type, chunk_start, i - 1)
chunks.append(chunk)
chunk_type, chunk_start = None, None
# End of a chunk + start of a chunk!
elif tok != default:
tok_chunk_class, tok_chunk_type = get_chunk_type(tok)
if chunk_type is None:
chunk_type, chunk_start = tok_chunk_type, i
elif tok_chunk_type != chunk_type or tok_chunk_class == "B":
chunk = (chunk_type, chunk_start, i - 1)
chunks.append(chunk)
chunk_type, chunk_start = tok_chunk_type, i
else:
continue
# end condition
if chunk_type is not None:
chunk = (chunk_type, chunk_start, len(seq))
chunks.append(chunk)
return chunks
# noinspection PyTypeChecker
def get_biobert_ner_predictions(test_ehr: HealthRecord) -> List[Tuple[str, int, int]]:
"""
Get predictions for a single EHR record using BioBERT
Parameters
----------
test_ehr : HealthRecord
The EHR record, this object should have a tokenizer set.
Returns
-------
pred_entities : List[Tuple[str, int, int]]
List of predicted Entities each with the format
("entity", start_idx, end_idx).
"""
split_points = test_ehr.get_split_points(max_len=BIOBERT_NER_SEQ_LEN - 2)
examples = []
for idx in range(len(split_points) - 1):
words = test_ehr.tokens[split_points[idx]:split_points[idx + 1]]
examples.append(NerExample(guid=str(split_points[idx]),
words=words,
labels=["O"] * len(words)))
input_features = convert_examples_to_features(
examples,
biobert_ner_labels,
max_seq_length=BIOBERT_NER_SEQ_LEN,
tokenizer=biobert_ner_tokenizer,
cls_token_at_end=False,
cls_token=biobert_ner_tokenizer.cls_token,
cls_token_segment_id=0,
sep_token=biobert_ner_tokenizer.sep_token,
sep_token_extra=False,
pad_on_left=bool(biobert_ner_tokenizer.padding_side == "left"),
pad_token=biobert_ner_tokenizer.pad_token_id,
pad_token_segment_id=biobert_ner_tokenizer.pad_token_type_id,
pad_token_label_id=nn.CrossEntropyLoss().ignore_index,
verbose=0)
test_dataset = NerTestDataset(input_features)
predictions, label_ids, _ = biobert_ner_trainer.predict(test_dataset)
predictions = align_predictions(predictions, label_ids)
# Flatten the prediction list
predictions = [p for ex in predictions for p in ex]
input_tokens = test_ehr.get_tokens()
prev_pred = ""
final_predictions = []
idx = 0
for token in input_tokens:
if token.startswith("##"):
if prev_pred == "O":
final_predictions.append(prev_pred)
else:
pred_typ = prev_pred.split("-")[-1]
final_predictions.append("I-" + pred_typ)
else:
prev_pred = predictions[idx]
final_predictions.append(prev_pred)
idx += 1
pred_entities = []
chunk_pred = get_chunks(final_predictions)
for ent in chunk_pred:
pred_entities.append((ent[0],
test_ehr.get_char_idx(ent[1])[0],
test_ehr.get_char_idx(ent[2])[1]))
return pred_entities
def get_bilstm_ner_predictions(test_ehr: HealthRecord) -> List[Tuple[str, int, int]]:
"""
Get predictions for a single EHR record using BiLSTM
Parameters
----------
test_ehr : HealthRecord
The EHR record, this object should have a tokenizer set.
Returns
-------
pred_entities : List[Tuple[str, int, int]]
List of predicted Entities each with the format
("entity", start_idx, end_idx).
"""
split_points = test_ehr.get_split_points(max_len=BILSTM_NER_SEQ_LEN)
examples = []
for idx in range(len(split_points) - 1):
words = test_ehr.tokens[split_points[idx]:split_points[idx + 1]]
examples.append(words)
predictions = bilstm_learn.predict(examples)
pred_entities = []
for idx in range(len(split_points) - 1):
chunk_pred = get_chunks(predictions[idx])
for ent in chunk_pred:
pred_entities.append((ent[0],
test_ehr.get_char_idx(split_points[idx] + ent[1])[0],
test_ehr.get_char_idx(split_points[idx] + ent[2])[1]))
return pred_entities
# noinspection PyTypeChecker
def get_ner_predictions(ehr_record: str, model_name: str = "biobert", record_id: str = "1") -> HealthRecord:
"""
Get predictions for NER using either BioBERT or BiLSTM
Parameters
--------------
ehr_record : str
An EHR record in text format.
model_name : str
The model to use for prediction. Default is biobert.
record_id : str
The record id of the returned object. Default is 1.
Returns
-----------
A HealthRecord object with entities set.
"""
if model_name.lower() == "biobert":
test_ehr = HealthRecord(record_id=record_id,
text=ehr_record,
tokenizer=biobert_ner_tokenizer.tokenize,
is_training=False)
predictions = get_biobert_ner_predictions(test_ehr)
elif model_name.lower() == "bilstm":
test_ehr = HealthRecord(text=ehr_record,
tokenizer=scispacy_plus_tokenizer,
is_training=False)
predictions = get_bilstm_ner_predictions(test_ehr)
else:
raise AttributeError("Accepted model names include 'biobert' "
"and 'bilstm'.")
ent_preds = []
for i, pred in enumerate(predictions):
ent = Entity("T%d" % i, label_ent_map[pred[0]], [pred[1], pred[2]])
ent_text = test_ehr.text[ent[0]:ent[1]]
if not any(letter.isalnum() for letter in ent_text):
continue
ent.set_text(ent_text)
ent_preds.append(ent)
test_ehr.entities = ent_preds
return test_ehr
def get_re_predictions(test_ehr: HealthRecord) -> HealthRecord:
"""
Get predictions for Relation Extraction.
Parameters
-----------
test_ehr : HealthRecord
A HealthRecord object with entities set.
Returns
--------
HealthRecord
The original object with relations set.
"""
test_dataset = RETestDataset(test_ehr, biobert_ner_tokenizer,
BIOBERT_RE_SEQ_LEN, re_label_list)
if len(test_dataset) == 0:
test_ehr.relations = []
return test_ehr
re_predictions = biobert_re_trainer.predict(test_dataset=test_dataset).predictions
re_predictions = np.argmax(re_predictions, axis=1)
idx = 1
rel_preds = []
for relation, pred in zip(test_dataset.relation_list, re_predictions):
if pred == 1:
relation.ann_id = "R%d" % idx
idx += 1
rel_preds.append(relation)
test_ehr.relations = rel_preds
return test_ehr
| [
"logging.getLogger",
"biobert_ner.utils_ner.NerTestDataset",
"torch.nn.CrossEntropyLoss",
"transformers.TrainingArguments",
"os.path.join",
"bilstm_crf_ner.model.ner_model.NERModel",
"numpy.argmax",
"ehr.HealthRecord",
"annotations.Entity",
"biobert_ner.utils_ner.get_labels",
"biobert_re.utils_r... | [((929, 956), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (946, 956), False, 'import logging\n'), ((1251, 1300), 'biobert_ner.utils_ner.get_labels', 'get_labels', (['"""biobert_ner/dataset_full/labels.txt"""'], {}), "('biobert_ner/dataset_full/labels.txt')\n", (1261, 1300), False, 'from biobert_ner.utils_ner import convert_examples_to_features, get_labels, NerTestDataset\n'), ((1692, 1757), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""dmis-lab/biobert-base-cased-v1.1"""'], {}), "('dmis-lab/biobert-base-cased-v1.1')\n", (1721, 1757), False, 'from transformers import AutoModelForTokenClassification, AutoModelForSequenceClassification, TrainingArguments, AutoTokenizer, AutoConfig, Trainer\n'), ((1955, 2008), 'transformers.TrainingArguments', 'TrainingArguments', ([], {'output_dir': '"""/tmp"""', 'do_predict': '(True)'}), "(output_dir='/tmp', do_predict=True)\n", (1972, 2008), False, 'from transformers import AutoModelForTokenClassification, AutoModelForSequenceClassification, TrainingArguments, AutoTokenizer, AutoConfig, Trainer\n'), ((2032, 2096), 'transformers.Trainer', 'Trainer', ([], {'model': 'biobert_ner_model', 'args': 'biobert_ner_training_args'}), '(model=biobert_ner_model, args=biobert_ner_training_args)\n', (2039, 2096), False, 'from transformers import AutoModelForTokenClassification, AutoModelForSequenceClassification, TrainingArguments, AutoTokenizer, AutoConfig, Trainer\n'), ((2396, 2410), 'bilstm_crf_ner.model.config.Config', 'BiLSTMConfig', ([], {}), '()\n', (2408, 2410), True, 'from bilstm_crf_ner.model.config import Config as BiLSTMConfig\n'), ((2426, 2452), 'bilstm_crf_ner.model.ner_model.NERModel', 'BiLSTMModel', (['bilstm_config'], {}), '(bilstm_config)\n', (2437, 2452), True, 'from bilstm_crf_ner.model.ner_model import NERModel as BiLSTMModel\n'), ((2468, 2510), 'bilstm_crf_ner.model.ner_learner.NERLearner', 'BiLSTMLearner', (['bilstm_config', 'bilstm_model'], {}), '(bilstm_config, bilstm_model)\n', (2481, 2510), True, 'from bilstm_crf_ner.model.ner_learner import NERLearner as BiLSTMLearner\n'), ((3112, 3165), 'transformers.TrainingArguments', 'TrainingArguments', ([], {'output_dir': '"""/tmp"""', 'do_predict': '(True)'}), "(output_dir='/tmp', do_predict=True)\n", (3129, 3165), False, 'from transformers import AutoModelForTokenClassification, AutoModelForSequenceClassification, TrainingArguments, AutoTokenizer, AutoConfig, Trainer\n'), ((3188, 3250), 'transformers.Trainer', 'Trainer', ([], {'model': 'biobert_re_model', 'args': 'biobert_re_training_args'}), '(model=biobert_re_model, args=biobert_re_training_args)\n', (3195, 3250), False, 'from transformers import AutoModelForTokenClassification, AutoModelForSequenceClassification, TrainingArguments, AutoTokenizer, AutoConfig, Trainer\n'), ((1034, 1078), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib.font_manager"""'], {}), "('matplotlib.font_manager')\n", (1051, 1078), False, 'import logging\n'), ((1477, 1527), 'os.path.join', 'os.path.join', (['BIOBERT_NER_MODEL_DIR', '"""config.json"""'], {}), "(BIOBERT_NER_MODEL_DIR, 'config.json')\n", (1489, 1527), False, 'import os\n'), ((1837, 1893), 'os.path.join', 'os.path.join', (['BIOBERT_NER_MODEL_DIR', '"""pytorch_model.bin"""'], {}), "(BIOBERT_NER_MODEL_DIR, 'pytorch_model.bin')\n", (1849, 1893), False, 'import os\n'), ((2572, 2595), 'en_ner_bc5cdr_md.load', 'en_ner_bc5cdr_md.load', ([], {}), '()\n', (2593, 2595), False, 'import en_ner_bc5cdr_md\n'), ((2800, 2849), 'os.path.join', 'os.path.join', (['BIOBERT_RE_MODEL_DIR', '"""config.json"""'], {}), "(BIOBERT_RE_MODEL_DIR, 'config.json')\n", (2812, 2849), False, 'import os\n'), ((2996, 3051), 'os.path.join', 'os.path.join', (['BIOBERT_RE_MODEL_DIR', '"""pytorch_model.bin"""'], {}), "(BIOBERT_RE_MODEL_DIR, 'pytorch_model.bin')\n", (3008, 3051), False, 'import os\n'), ((3775, 3805), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(2)'}), '(predictions, axis=2)\n', (3784, 3805), True, 'import numpy as np\n'), ((7338, 7368), 'biobert_ner.utils_ner.NerTestDataset', 'NerTestDataset', (['input_features'], {}), '(input_features)\n', (7352, 7368), False, 'from biobert_ner.utils_ner import convert_examples_to_features, get_labels, NerTestDataset\n'), ((11534, 11619), 'biobert_re.utils_re.RETestDataset', 'RETestDataset', (['test_ehr', 'biobert_ner_tokenizer', 'BIOBERT_RE_SEQ_LEN', 're_label_list'], {}), '(test_ehr, biobert_ner_tokenizer, BIOBERT_RE_SEQ_LEN,\n re_label_list)\n', (11547, 11619), False, 'from biobert_re.utils_re import RETestDataset\n'), ((11846, 11879), 'numpy.argmax', 'np.argmax', (['re_predictions'], {'axis': '(1)'}), '(re_predictions, axis=1)\n', (11855, 11879), True, 'import numpy as np\n'), ((10136, 10252), 'ehr.HealthRecord', 'HealthRecord', ([], {'record_id': 'record_id', 'text': 'ehr_record', 'tokenizer': 'biobert_ner_tokenizer.tokenize', 'is_training': '(False)'}), '(record_id=record_id, text=ehr_record, tokenizer=\n biobert_ner_tokenizer.tokenize, is_training=False)\n', (10148, 10252), False, 'from ehr import HealthRecord\n'), ((10878, 10939), 'annotations.Entity', 'Entity', (["('T%d' % i)", 'label_ent_map[pred[0]]', '[pred[1], pred[2]]'], {}), "('T%d' % i, label_ent_map[pred[0]], [pred[1], pred[2]])\n", (10884, 10939), False, 'from annotations import Entity\n'), ((10466, 10553), 'ehr.HealthRecord', 'HealthRecord', ([], {'text': 'ehr_record', 'tokenizer': 'scispacy_plus_tokenizer', 'is_training': '(False)'}), '(text=ehr_record, tokenizer=scispacy_plus_tokenizer,\n is_training=False)\n', (10478, 10553), False, 'from ehr import HealthRecord\n'), ((7263, 7284), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7282, 7284), False, 'from torch import nn\n'), ((3993, 4014), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4012, 4014), False, 'from torch import nn\n')] |
"""
Tools to process galaxy spectra .fits files from SDSS-II Legacy survey.
Authored by <NAME> 02/13/16
"""
# TODO: add parameter descriptions to SpecProcessor, normalize, and process_fits
from __future__ import absolute_import, print_function, division
import numpy as np
from scipy import interp
import time
import sys
from .io import FitsData
class SpecProcessor(object):
"""
Perform basic processing of raw spectra.
Attributes
----------
loglam_grid: ndarray
Nsamples: integer
galaxy_params: numpy record array
filenames: string, list, or ndarray
spectra_directory: string
Nspectra: integer
"""
def __init__(self, filenames, galaxy_params, spectra_directory=None, n_samples=5000, loglam_grid=None):
if len(galaxy_params) != len(filenames):
sys.exit('filenames and galaxy_params must be same length')
self.galaxy_params = galaxy_params
self.filenames = filenames
self.Nspectra = len(self.filenames)
self.spectra_directory = spectra_directory
if loglam_grid:
self.loglam_grid = loglam_grid
self.Nsamples = len(loglam_grid)
else:
self.loglam_grid = 3.5 + 0.0001 * np.arange(n_samples)
self.Nsamples = n_samples
@staticmethod
def k(wavelength, r_v=3.1):
"""
Calculate A_wavelength/A_V using CCM 1989 extincton law.
Parameters
----------
wavelength: float or ndarray
Wavelength(s) at which to compute the reddening correction.
r_v: float (default=3.1)
R_V value assumed in reddening law.
Returns
-------
k: float or ndarray
Value(s) of k(lambda) at the specified wavelength(s).
"""
x = 1. / (wavelength / 10000.)
"""
Valid for 1.1 < x < 3.3 - all wavelengths in this code are between 1.35 and 2.7.
"""
y = x - 1.82
a = 1. + 0.17699 * y - 0.50447 * (y ** 2) - 0.02427 * (y ** 3) + 0.72085 * (y ** 4) + 0.01979 * (
y ** 5) - 0.77530 * (y ** 6) + 0.32999 * (y ** 7)
b = 1.41338 * y + 2.28305 * (y ** 2) + 1.07233 * (y ** 3) - 5.38434 * (y ** 4) - 0.62251 * (
y ** 5) + 5.30260 * (y ** 6) - 2.09002 * (y ** 7)
return a + b / r_v
def deredden(self, log_wavelength, flux, ebv):
"""
Correct flux at specified wavelength(s) for reddening using CCM 1989 extinction law.
Parameters
----------
log_wavelength: float or ndarray
Wavelength(s) at which to compute the reddening correction.
flux: float or array-like
Uncorrected flux(es).
ebv: float
Value of E(B-V).
Returns
-------
flux_corr: float or ndarray
Flux(es) corrected for reddening.
"""
return flux * 10 ** (0.4 * self.k(10 ** log_wavelength) * ebv)
def normalize(self, spectra, weights):
"""
Normalize the array of spectra to mean value of each spectrum between 4400 and 4450 A
Multiply inverse variances by the square of the normalization
Parameters
----------
spectra: ndarray
weights: ndarray
Returns
-------
spectra: ndarray
weights: ndarray
"""
# TODO: check that mean flux in this window is nonzero!
norm = np.mean(spectra[:, (10 ** self.loglam_grid > 4400.) * (10 ** self.loglam_grid < 4450.)], axis=1)
spectra /= norm[:, None]
weights *= norm[:, None] ** 2
return spectra, weights
def process_fits(self, normalize=False, mask=False, return_id=False, indices=None):
"""
Iterate over all .fits filenames, read in and process spectra.
Check that redshift in header matches redshift in parameters file.
Parameters
----------
normalize: boolean (default=False)
mask: boolean (default=False)
indices: integer, list, or ndarray (default=None)
return_id: boolean (default=False)
Returns
-------
spectra: ndarray
weights: ndarray
id_dict: dictionary
Only returned if return_id=True.
"""
start_time = time.time()
counter = 0
spectra = np.zeros((self.Nspectra, self.Nsamples))
weights = np.zeros((self.Nspectra, self.Nsamples))
redshifts = []
plates = []
mjds = []
fibers = []
if indices is not None:
index_list = indices
else:
index_list = np.arange(self.Nspectra)
for ind in index_list:
data = FitsData(self.filenames[ind], spectra_directory=self.spectra_directory)
redshifts.append(data.z)
plates.append(data.plate)
mjds.append(data.mjd)
fibers.append(data.fiber)
if mask:
data.ivars[data.andmask > 0] = np.nan
# Shift to restframe, apply mask, correct for reddening
loglam = np.log10(data.wavelengths / (1. + data.z))
ebv = self.galaxy_params['EBV'][ind]
data.fluxes = self.deredden(loglam, data.fluxes, ebv)
# Interpolate spectrum/ivars & resample to common grid; set all NaNs in ivar array to 0 (masked)
spectra[ind, :] = interp(self.loglam_grid, loglam, data.fluxes, left=0., right=0.)
weights[ind, :] = interp(self.loglam_grid, loglam, data.ivars, left=0., right=0.)
weights[ind, np.isnan(weights[ind, :])] = 0.
# Progress report
if counter % 10 == 0:
current_time = time.time()
print('Time to iteration %d: %g' % (counter, current_time - start_time))
counter += 1
if normalize:
spectra, weights = self.normalize(spectra, weights)
end_time = time.time()
print('Total time:', end_time - start_time)
if return_id:
id_dict = {'redshifts': redshifts, 'plates': plates, 'mjds': mjds, 'fibers': fibers}
return spectra, weights, id_dict
else:
return spectra, weights
| [
"numpy.mean",
"numpy.log10",
"scipy.interp",
"numpy.zeros",
"numpy.isnan",
"sys.exit",
"time.time",
"numpy.arange"
] | [((3424, 3527), 'numpy.mean', 'np.mean', (['spectra[:, (10 ** self.loglam_grid > 4400.0) * (10 ** self.loglam_grid < \n 4450.0)]'], {'axis': '(1)'}), '(spectra[:, (10 ** self.loglam_grid > 4400.0) * (10 ** self.\n loglam_grid < 4450.0)], axis=1)\n', (3431, 3527), True, 'import numpy as np\n'), ((4282, 4293), 'time.time', 'time.time', ([], {}), '()\n', (4291, 4293), False, 'import time\n'), ((4333, 4373), 'numpy.zeros', 'np.zeros', (['(self.Nspectra, self.Nsamples)'], {}), '((self.Nspectra, self.Nsamples))\n', (4341, 4373), True, 'import numpy as np\n'), ((4392, 4432), 'numpy.zeros', 'np.zeros', (['(self.Nspectra, self.Nsamples)'], {}), '((self.Nspectra, self.Nsamples))\n', (4400, 4432), True, 'import numpy as np\n'), ((5926, 5937), 'time.time', 'time.time', ([], {}), '()\n', (5935, 5937), False, 'import time\n'), ((821, 880), 'sys.exit', 'sys.exit', (['"""filenames and galaxy_params must be same length"""'], {}), "('filenames and galaxy_params must be same length')\n", (829, 880), False, 'import sys\n'), ((4620, 4644), 'numpy.arange', 'np.arange', (['self.Nspectra'], {}), '(self.Nspectra)\n', (4629, 4644), True, 'import numpy as np\n'), ((5082, 5125), 'numpy.log10', 'np.log10', (['(data.wavelengths / (1.0 + data.z))'], {}), '(data.wavelengths / (1.0 + data.z))\n', (5090, 5125), True, 'import numpy as np\n'), ((5380, 5446), 'scipy.interp', 'interp', (['self.loglam_grid', 'loglam', 'data.fluxes'], {'left': '(0.0)', 'right': '(0.0)'}), '(self.loglam_grid, loglam, data.fluxes, left=0.0, right=0.0)\n', (5386, 5446), False, 'from scipy import interp\n'), ((5475, 5540), 'scipy.interp', 'interp', (['self.loglam_grid', 'loglam', 'data.ivars'], {'left': '(0.0)', 'right': '(0.0)'}), '(self.loglam_grid, loglam, data.ivars, left=0.0, right=0.0)\n', (5481, 5540), False, 'from scipy import interp\n'), ((5692, 5703), 'time.time', 'time.time', ([], {}), '()\n', (5701, 5703), False, 'import time\n'), ((1228, 1248), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (1237, 1248), True, 'import numpy as np\n'), ((5564, 5589), 'numpy.isnan', 'np.isnan', (['weights[ind, :]'], {}), '(weights[ind, :])\n', (5572, 5589), True, 'import numpy as np\n')] |
import numpy as np
from .population import Population
from spike_swarm_sim.utils import eigendecomposition, normalize
class CMA_EA_Population(Population):
def __init__(self, *args, **kwargs):
super(CMA_EA_Population, self).__init__(*args, **kwargs)
self.mu = int(.5 * self.pop_size)
self.weights = np.log(self.mu+.5) - np.log(np.arange(1, self.mu))
self.weights /= self.weights.sum()
self.mu_eff = 1/np.linalg.norm(self.weights)**2
# Step sizes
self.sigma = 1
self.cc, self.cs, self.mu_cov, self.c_cov, self.ds = None, None, None, None, None
# Strategy Parameters
self.strategy_m = []
self.strategy_C = []
self.ps = []
self.evo_path = []
self.B, self.D, self.Bt = [], [], []
self.num_evals = 0 #! MUCHO OJO CON LOAD
def _sample(self):
sample = np.random.multivariate_normal(np.zeros_like(self.strategy_m), np.eye(*self.strategy_C.shape))
return self.strategy_m + self.sigma * self.B.dot(self.D).dot(sample)
def sample(self):
return [self._sample() for _ in range(self.pop_size)]
def step(self, fitness_vector):
self.num_evals += len(fitness_vector)
selected, selected_fitness = self.selection_operator(self.population.copy(),\
fitness_vector.copy(), self.mu)
fitness_order = np.argsort(fitness_vector.copy())[::-1]
selected = [self.population[idx].copy() for idx in fitness_order[:self.mu]]
# Update mean vector
old_mean = self.strategy_m.copy()
self.strategy_m = np.sum([w_i * x_i for w_i, x_i in zip(self.weights, selected)], 0)
# if self.num_evals % (self.pop_size / (self.c1 + self.cmu)/self.strategy_m.shape[0]/10) == 0:
self.ps = (1-self.cs)*self.ps + np.sqrt(self.cs*(2-self.cs)*self.mu_eff)\
* (self.B.dot(self.D).dot(self.B.T)) * ((self.strategy_m-old_mean)/self.sigma)
hsig = np.linalg.norm(self.ps)/np.sqrt(1-(1-self.cs)**(2*self.num_evals/self.pop_size))\
< 1.4 + 2 / ((self.strategy_m.shape[0]+1)) * self.chiN
self.evo_path = (1-self.cc) * self.evo_path +\
hsig*np.sqrt(self.cc*(2-self.cc)*self.mu_eff)\
* ((self.strategy_m-old_mean)/self.sigma)
# Update Covariance matrix
y_vec = [(v - old_mean) / self.sigma for v in selected]
# self.strategy_C = (1-self.c_cov) * self.strategy_C\
# + self.c_cov *((1 / self.mu_cov) * (np.outer(self.evo_path, self.evo_path))\
# + (1/(1-self.mu_cov))*np.sum([w_i*np.outer(y_i, y_i)\
# for w_i, y_i in zip(self.weights, y_vec)], 0))
self.strategy_C = (1-self.c1-self.c_mu) * self.strategy_C\
+ self.c1 * np.outer(self.evo_path, self.evo_path)\
+ self.c_mu*np.sum([w_i*np.outer(y_i, y_i)\
for w_i, y_i in zip(self.weights, y_vec)], 0)
# Update sigma
self.sigma = self.sigma * np.exp(\
min(0, (self.cs/self.ds) * (np.linalg.norm(self.ps)/self.chiN-1)))
self.strategy_C = np.triu(self.strategy_C) + np.triu(self.strategy_C).T
self.D, self.B = np.linalg.eig(self.strategy_C)
self.D = np.diag(self.D**-.5)
# Finally sample new population
self.population = self.sample()
self.population = [np.clip(v, a_min=self.min_vector, a_max=self.max_vector) for v in self.population]
def initialize(self, interface):
self.segment_lengths = [interface.submit_query(query, primitive='LEN') for query in self.objects]
genotype_length = interface.toGenotype(self.objects).shape[0]
self.strategy_m = self.min_vector + np.random.random(size=genotype_length) * (self.max_vector-self.min_vector)
self.strategy_C = np.eye(genotype_length)
n = self.strategy_m.shape[0]
self.cc = (4 + self.mu_eff/n) / (4 + n + 2*self.mu_eff/n)#4/(n+4) #
self.cs = (self.mu_eff + 2) / (5+n+self.mu_eff)
self.c1 = 2 / (self.mu_eff + (n+1.3)**2) #2 / n ** 2
self.c_mu = 1e-4 # 2 * (self.mu_eff - 2 + 1/self.mu_eff) / ((n+2)**2 + 2*self.mu_eff/2)
self.ds = 2 * self.mu_eff/self.pop_size + 0.3 + self.cs #
self.c_cov = 1e-3 #2 / (n + 2**.5)**2
self.mu_cov = self.mu
self.chiN = np.sqrt(n) * (1- 1/(4*n) + 1/(21*n**2))
# Dynamics Initialization
self.evo_path = np.zeros_like(self.strategy_m)
self.ps = np.zeros_like(self.strategy_m)
self.B = np.eye(self.strategy_m.shape[0])
self.Bt = np.eye(self.strategy_m.shape[0])
self.D = np.eye(self.strategy_m.shape[0])
self.population = [self._sample() for _ in range(self.pop_size)]
self.population = [np.clip(v, a_min=self.min_vector, a_max=self.max_vector) for v in self.population] | [
"numpy.clip",
"numpy.eye",
"numpy.sqrt",
"numpy.linalg.eig",
"numpy.arange",
"numpy.random.random",
"numpy.log",
"numpy.diag",
"numpy.outer",
"numpy.linalg.norm",
"numpy.zeros_like",
"numpy.triu"
] | [((3401, 3431), 'numpy.linalg.eig', 'np.linalg.eig', (['self.strategy_C'], {}), '(self.strategy_C)\n', (3414, 3431), True, 'import numpy as np\n'), ((3450, 3473), 'numpy.diag', 'np.diag', (['(self.D ** -0.5)'], {}), '(self.D ** -0.5)\n', (3457, 3473), True, 'import numpy as np\n'), ((4031, 4054), 'numpy.eye', 'np.eye', (['genotype_length'], {}), '(genotype_length)\n', (4037, 4054), True, 'import numpy as np\n'), ((4672, 4702), 'numpy.zeros_like', 'np.zeros_like', (['self.strategy_m'], {}), '(self.strategy_m)\n', (4685, 4702), True, 'import numpy as np\n'), ((4722, 4752), 'numpy.zeros_like', 'np.zeros_like', (['self.strategy_m'], {}), '(self.strategy_m)\n', (4735, 4752), True, 'import numpy as np\n'), ((4771, 4803), 'numpy.eye', 'np.eye', (['self.strategy_m.shape[0]'], {}), '(self.strategy_m.shape[0])\n', (4777, 4803), True, 'import numpy as np\n'), ((4823, 4855), 'numpy.eye', 'np.eye', (['self.strategy_m.shape[0]'], {}), '(self.strategy_m.shape[0])\n', (4829, 4855), True, 'import numpy as np\n'), ((4874, 4906), 'numpy.eye', 'np.eye', (['self.strategy_m.shape[0]'], {}), '(self.strategy_m.shape[0])\n', (4880, 4906), True, 'import numpy as np\n'), ((335, 356), 'numpy.log', 'np.log', (['(self.mu + 0.5)'], {}), '(self.mu + 0.5)\n', (341, 356), True, 'import numpy as np\n'), ((943, 973), 'numpy.zeros_like', 'np.zeros_like', (['self.strategy_m'], {}), '(self.strategy_m)\n', (956, 973), True, 'import numpy as np\n'), ((975, 1005), 'numpy.eye', 'np.eye', (['*self.strategy_C.shape'], {}), '(*self.strategy_C.shape)\n', (981, 1005), True, 'import numpy as np\n'), ((3317, 3341), 'numpy.triu', 'np.triu', (['self.strategy_C'], {}), '(self.strategy_C)\n', (3324, 3341), True, 'import numpy as np\n'), ((3582, 3638), 'numpy.clip', 'np.clip', (['v'], {'a_min': 'self.min_vector', 'a_max': 'self.max_vector'}), '(v, a_min=self.min_vector, a_max=self.max_vector)\n', (3589, 3638), True, 'import numpy as np\n'), ((4567, 4577), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (4574, 4577), True, 'import numpy as np\n'), ((5012, 5068), 'numpy.clip', 'np.clip', (['v'], {'a_min': 'self.min_vector', 'a_max': 'self.max_vector'}), '(v, a_min=self.min_vector, a_max=self.max_vector)\n', (5019, 5068), True, 'import numpy as np\n'), ((363, 384), 'numpy.arange', 'np.arange', (['(1)', 'self.mu'], {}), '(1, self.mu)\n', (372, 384), True, 'import numpy as np\n'), ((456, 484), 'numpy.linalg.norm', 'np.linalg.norm', (['self.weights'], {}), '(self.weights)\n', (470, 484), True, 'import numpy as np\n'), ((2076, 2099), 'numpy.linalg.norm', 'np.linalg.norm', (['self.ps'], {}), '(self.ps)\n', (2090, 2099), True, 'import numpy as np\n'), ((2100, 2166), 'numpy.sqrt', 'np.sqrt', (['(1 - (1 - self.cs) ** (2 * self.num_evals / self.pop_size))'], {}), '(1 - (1 - self.cs) ** (2 * self.num_evals / self.pop_size))\n', (2107, 2166), True, 'import numpy as np\n'), ((3344, 3368), 'numpy.triu', 'np.triu', (['self.strategy_C'], {}), '(self.strategy_C)\n', (3351, 3368), True, 'import numpy as np\n'), ((3929, 3967), 'numpy.random.random', 'np.random.random', ([], {'size': 'genotype_length'}), '(size=genotype_length)\n', (3945, 3967), True, 'import numpy as np\n'), ((1904, 1950), 'numpy.sqrt', 'np.sqrt', (['(self.cs * (2 - self.cs) * self.mu_eff)'], {}), '(self.cs * (2 - self.cs) * self.mu_eff)\n', (1911, 1950), True, 'import numpy as np\n'), ((2317, 2363), 'numpy.sqrt', 'np.sqrt', (['(self.cc * (2 - self.cc) * self.mu_eff)'], {}), '(self.cc * (2 - self.cc) * self.mu_eff)\n', (2324, 2363), True, 'import numpy as np\n'), ((2955, 2993), 'numpy.outer', 'np.outer', (['self.evo_path', 'self.evo_path'], {}), '(self.evo_path, self.evo_path)\n', (2963, 2993), True, 'import numpy as np\n'), ((3044, 3062), 'numpy.outer', 'np.outer', (['y_i', 'y_i'], {}), '(y_i, y_i)\n', (3052, 3062), True, 'import numpy as np\n'), ((3248, 3271), 'numpy.linalg.norm', 'np.linalg.norm', (['self.ps'], {}), '(self.ps)\n', (3262, 3271), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from tnpy.linalg import KrylovExpm
class TestLinAlg(unittest.TestCase):
def test_eigshmv(self):
# self.assertEqual(True, False)
pass
class TestKrylovExpm(unittest.TestCase):
def test_construct_krylov_space(self):
mat = np.random.random((50, 50))
v0 = np.random.random(50)
kexpm = KrylovExpm(1e-3, mat, v0)
kexpm.construct_krylov_space()
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"tnpy.linalg.KrylovExpm",
"numpy.random.random"
] | [((468, 483), 'unittest.main', 'unittest.main', ([], {}), '()\n', (481, 483), False, 'import unittest\n'), ((292, 318), 'numpy.random.random', 'np.random.random', (['(50, 50)'], {}), '((50, 50))\n', (308, 318), True, 'import numpy as np\n'), ((332, 352), 'numpy.random.random', 'np.random.random', (['(50)'], {}), '(50)\n', (348, 352), True, 'import numpy as np\n'), ((370, 396), 'tnpy.linalg.KrylovExpm', 'KrylovExpm', (['(0.001)', 'mat', 'v0'], {}), '(0.001, mat, v0)\n', (380, 396), False, 'from tnpy.linalg import KrylovExpm\n')] |
"""
3D Point Cloud Visualization
Original Author: https://github.com/argoai/argoverse-api
Modified by <NAME>
Date October 2019
"""
import os
import numpy as np
import argparse
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
from mpl_toolkits.mplot3d import Axes3D
from mayavi import mlab
from typing import Any, Iterable, List, Optional, Tuple, Union, cast
#: A stub representing mayavi_wrapper.mlab figure types
Figure = Any
#: A 3D Point
Point = np.ndarray
#: An array of 3D points
PointCloud = np.ndarray
#: Any numeric type
Number = Union[int, float]
#: RGB color created from 0.0 to 1.0 values
Color = Tuple[float, float, float]
FigSize = Tuple[float, float]
Coordinate = Tuple[float, float, float]
def plot_points_3D_mayavi(
points: np.ndarray,
bird: bool,
fig: Figure,
per_pt_color_strengths: np.ndarray = None,
fixed_color: Optional[Color] = (1, 0, 0),
colormap: str = "spectral",
) -> Figure:
"""Visualize points with Mayavi. Scale factor has no influence on point size rendering
when calling `points3d()` with the mode="point" argument, so we ignore it altogether.
The parameter "line_width" also has no effect on points, so we ignore it also.
Args:
points: The points to visualize
fig: A Mayavi figure
per_pt_color_strengths: An array of scalar values the same size as `points`
fixed_color: Use a fixed color instead of a colormap
colormap: different green to red jet for 'spectral' or 'gnuplot'
Returns:
Updated Mayavi figure
"""
if len(points) == 0:
return None
if per_pt_color_strengths is None or len(per_pt_color_strengths) != len(points):
# Height data used for shading
if bird:
per_pt_color_strengths = points[:, 2]
else:
per_pt_color_strengths = points[:, 0]
mlab.points3d(
points[:, 0], # x
points[:, 1], # y
points[:, 2], # z
per_pt_color_strengths,
mode="point", # Render each point as a 'point', not as a 'sphere' or 'cube'
colormap=colormap,
color=fixed_color, # Used a fixed (r,g,b) color instead of colormap
figure=fig,
)
return fig
def draw_coordinate_frame_at_origin(fig: Figure) -> Figure:
"""
Draw the origin and 3 vectors representing standard basis vectors to express
a coordinate reference frame.
Args:
fig: Mayavi figure
Returns:
Updated Mayavi figure
Based on
--------
https://github.com/hengck23/didi-udacity-2017/blob/master/baseline-04/kitti_data/draw.py
https://github.com/charlesq34/frustum-pointnets/blob/master/mayavi/viz_util.py
"""
# draw origin
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode="sphere", scale_factor=0.2)
# Form standard basis vectors e_1, e_2, e_3
axes = np.array([[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]], dtype=np.float64)
# e_1 in red
mlab.plot3d(
[0, axes[0, 0]], [0, axes[0, 1]], [0, axes[0, 2]], color=(1, 0, 0), tube_radius=None, figure=fig
)
# e_2 in green
mlab.plot3d(
[0, axes[1, 0]], [0, axes[1, 1]], [0, axes[1, 2]], color=(0, 1, 0), tube_radius=None, figure=fig
)
# e_3 in blue
mlab.plot3d(
[0, axes[2, 0]], [0, axes[2, 1]], [0, axes[2, 2]], color=(0, 0, 1), tube_radius=None, figure=fig
)
return fig
def draw_lidar(
point_cloud: np.ndarray, bird: bool = True, colormap: str = "jet", fig: Optional[Figure] = None, bgcolor: Color = (0, 0, 0), fig_size: FigSize = (200, 200), focalpoint: Coordinate = (0, 0, 0), elevation: int = 0, distance: float = 62.0
) -> Figure:
"""Render a :ref:`PointCloud` with a 45 degree viewing frustum from worm-vehicle.
Creates a Mayavi figure, draws a point cloud. Since the majority of interesting objects and
scenarios are found closeby to the ground, we want to see the objects near the ground expressed
in the full range of the colormap. Since returns on power lines, trees, and buildings
will dominate and dilute the colormap otherwise, we clip the colors so that all points
beyond a certain z-elevation (height) share the same color at the edge of the colormap.
We choose anything beyond the 90th percentile as a height outlier.
Args:
point_cloud: The pointcloud to render
fig: A pre-existing Mayavi figure to render to
bgcolor: The background color
colormap: "spectral" or "gnuplot" or "jet" are best
Returns:
Updated or created Mayavi figure
"""
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=fig_size)
'''
z_thresh = np.percentile(point_cloud[:, 2], 90)
thresholded_heights = point_cloud[:, 2].copy()
# Colors of highest points will be clipped to all lie at edge of colormap
thresholded_heights[thresholded_heights > z_thresh] = 5
'''
tmp = [point_cloud]
for i in range(1,201):
tmp.append(point_cloud+0.0001*i)
tmp.append(point_cloud-0.0001*i)
point_cloud = np.concatenate(tmp, 0)
# draw points
fig = plot_points_3D_mayavi(
#points=point_cloud, fig=fig, per_pt_color_strengths=thresholded_heights, fixed_color=None, colormap=colormap
points=point_cloud, bird=bird, fig=fig, per_pt_color_strengths=None, fixed_color=None, colormap=colormap
)
fig = draw_coordinate_frame_at_origin(fig)
mlab.view(
azimuth=180, elevation=elevation, focalpoint=focalpoint, distance=distance, figure=fig
)
return fig
def mkdirs(name):
if not os.path.exists(name):
os.makedirs(name)
if __name__ == '__main__':
R = 5
gths = np.load('test-argo-5m-1024point-10step.npy')
frames = np.load('test-predicted-frames.npy')
bird_dir = 'bird'
worm_dir = 'worm'
mkdirs(bird_dir)
mkdirs(worm_dir)
distance = 2*np.sqrt(3*R*R)
point_size = 5
axes_limits = [[-R, R], [-R, R], [-R, R]] # X axis range # Y axis range # Z axis range
axes_str = ["X", "Y", "Z"]
axes = [1, 0, 2]
for i in range(gths.shape[0]):
gth = gths[i]
flow = flows[i]
frame = frames[i]
# bird’s-eye view
curr_bird = os.path.join(bird_dir, '%04d'%(i+1))
mkdirs(curr_bird)
for j in range(5):
fig = draw_lidar(gth[j], bird=True, focalpoint=(0, 0, 0), elevation=0, distance=distance)
mlab.savefig(os.path.join(curr_bird, 'ctx-%02d.png'%(j+1)))
mlab.close()
fig = draw_lidar(gth[j+5], bird=True, focalpoint=(0, 0, 0), elevation=0, distance=distance)
mlab.savefig(os.path.join(curr_bird, 'gth-%02d.png'%(j+1)))
mlab.close()
fig = draw_lidar(frame[j], bird=True, focalpoint=(0, 0, 0), elevation=0, distance=distance)
mlab.savefig(os.path.join(curr_bird, 'prd-%02d.png'%(j+1)))
mlab.close()
os.system('convert -delay 20 -loop 0 %s/ctx-*.png %s/ctx.gif'%(curr_bird, curr_bird))
os.system('convert -delay 20 -loop 0 %s/gth-*.png %s/gth.gif'%(curr_bird, curr_bird))
os.system('convert -delay 20 -loop 0 %s/prd-*.png %s/prd.gif'%(curr_bird, curr_bird))
# worm’s-eye view
curr_worm = os.path.join(worm_dir, '%04d'%(i+1))
mkdirs(curr_worm)
for j in range(5):
fig = draw_lidar(gth[j], bird=False, focalpoint=(0, 0, 0), elevation=90, distance=distance)
mlab.savefig(os.path.join(curr_worm, 'ctx-%02d.png'%(j+1)))
mlab.close()
fig = draw_lidar(gth[j+5], bird=False, focalpoint=(0, 0, 0), elevation=90, distance=distance)
mlab.savefig(os.path.join(curr_worm, 'gth-%02d.png'%(j+1)))
mlab.close()
fig = draw_lidar(frame[j], bird=False, focalpoint=(0, 0, 0), elevation=90, distance=distance)
mlab.savefig(os.path.join(curr_worm, 'prd-%02d.png'%(j+1)))
mlab.close()
os.system('convert -delay 20 -loop 0 %s/ctx-*.png %s/ctx.gif'%(curr_worm, curr_worm))
os.system('convert -delay 20 -loop 0 %s/gth-*.png %s/gth.gif'%(curr_worm, curr_worm))
os.system('convert -delay 20 -loop 0 %s/prd-*.png %s/prd.gif'%(curr_worm, curr_worm))
| [
"os.path.exists",
"mayavi.mlab.points3d",
"mayavi.mlab.view",
"numpy.sqrt",
"os.makedirs",
"os.path.join",
"mayavi.mlab.figure",
"mayavi.mlab.close",
"numpy.array",
"mayavi.mlab.plot3d",
"numpy.concatenate",
"os.system",
"numpy.load"
] | [((1871, 2023), 'mayavi.mlab.points3d', 'mlab.points3d', (['points[:, 0]', 'points[:, 1]', 'points[:, 2]', 'per_pt_color_strengths'], {'mode': '"""point"""', 'colormap': 'colormap', 'color': 'fixed_color', 'figure': 'fig'}), "(points[:, 0], points[:, 1], points[:, 2],\n per_pt_color_strengths, mode='point', colormap=colormap, color=\n fixed_color, figure=fig)\n", (1884, 2023), False, 'from mayavi import mlab\n'), ((2724, 2796), 'mayavi.mlab.points3d', 'mlab.points3d', (['(0)', '(0)', '(0)'], {'color': '(1, 1, 1)', 'mode': '"""sphere"""', 'scale_factor': '(0.2)'}), "(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2)\n", (2737, 2796), False, 'from mayavi import mlab\n'), ((2857, 2936), 'numpy.array', 'np.array', (['[[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]'], {'dtype': 'np.float64'}), '([[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]], dtype=np.float64)\n', (2865, 2936), True, 'import numpy as np\n'), ((2958, 3071), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, axes[0, 0]]', '[0, axes[0, 1]]', '[0, axes[0, 2]]'], {'color': '(1, 0, 0)', 'tube_radius': 'None', 'figure': 'fig'}), '([0, axes[0, 0]], [0, axes[0, 1]], [0, axes[0, 2]], color=(1, 0,\n 0), tube_radius=None, figure=fig)\n', (2969, 3071), False, 'from mayavi import mlab\n'), ((3105, 3218), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, axes[1, 0]]', '[0, axes[1, 1]]', '[0, axes[1, 2]]'], {'color': '(0, 1, 0)', 'tube_radius': 'None', 'figure': 'fig'}), '([0, axes[1, 0]], [0, axes[1, 1]], [0, axes[1, 2]], color=(0, 1,\n 0), tube_radius=None, figure=fig)\n', (3116, 3218), False, 'from mayavi import mlab\n'), ((3251, 3364), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, axes[2, 0]]', '[0, axes[2, 1]]', '[0, axes[2, 2]]'], {'color': '(0, 0, 1)', 'tube_radius': 'None', 'figure': 'fig'}), '([0, axes[2, 0]], [0, axes[2, 1]], [0, axes[2, 2]], color=(0, 0,\n 1), tube_radius=None, figure=fig)\n', (3262, 3364), False, 'from mayavi import mlab\n'), ((5080, 5102), 'numpy.concatenate', 'np.concatenate', (['tmp', '(0)'], {}), '(tmp, 0)\n', (5094, 5102), True, 'import numpy as np\n'), ((5443, 5545), 'mayavi.mlab.view', 'mlab.view', ([], {'azimuth': '(180)', 'elevation': 'elevation', 'focalpoint': 'focalpoint', 'distance': 'distance', 'figure': 'fig'}), '(azimuth=180, elevation=elevation, focalpoint=focalpoint, distance\n =distance, figure=fig)\n', (5452, 5545), False, 'from mayavi import mlab\n'), ((5699, 5743), 'numpy.load', 'np.load', (['"""test-argo-5m-1024point-10step.npy"""'], {}), "('test-argo-5m-1024point-10step.npy')\n", (5706, 5743), True, 'import numpy as np\n'), ((5757, 5793), 'numpy.load', 'np.load', (['"""test-predicted-frames.npy"""'], {}), "('test-predicted-frames.npy')\n", (5764, 5793), True, 'import numpy as np\n'), ((4587, 4675), 'mayavi.mlab.figure', 'mlab.figure', ([], {'figure': 'None', 'bgcolor': 'bgcolor', 'fgcolor': 'None', 'engine': 'None', 'size': 'fig_size'}), '(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=\n fig_size)\n', (4598, 4675), False, 'from mayavi import mlab\n'), ((5600, 5620), 'os.path.exists', 'os.path.exists', (['name'], {}), '(name)\n', (5614, 5620), False, 'import os\n'), ((5630, 5647), 'os.makedirs', 'os.makedirs', (['name'], {}), '(name)\n', (5641, 5647), False, 'import os\n'), ((5900, 5918), 'numpy.sqrt', 'np.sqrt', (['(3 * R * R)'], {}), '(3 * R * R)\n', (5907, 5918), True, 'import numpy as np\n'), ((6236, 6276), 'os.path.join', 'os.path.join', (['bird_dir', "('%04d' % (i + 1))"], {}), "(bird_dir, '%04d' % (i + 1))\n", (6248, 6276), False, 'import os\n'), ((6935, 7026), 'os.system', 'os.system', (["('convert -delay 20 -loop 0 %s/ctx-*.png %s/ctx.gif' % (curr_bird, curr_bird))"], {}), "('convert -delay 20 -loop 0 %s/ctx-*.png %s/ctx.gif' % (curr_bird,\n curr_bird))\n", (6944, 7026), False, 'import os\n'), ((7029, 7120), 'os.system', 'os.system', (["('convert -delay 20 -loop 0 %s/gth-*.png %s/gth.gif' % (curr_bird, curr_bird))"], {}), "('convert -delay 20 -loop 0 %s/gth-*.png %s/gth.gif' % (curr_bird,\n curr_bird))\n", (7038, 7120), False, 'import os\n'), ((7123, 7214), 'os.system', 'os.system', (["('convert -delay 20 -loop 0 %s/prd-*.png %s/prd.gif' % (curr_bird, curr_bird))"], {}), "('convert -delay 20 -loop 0 %s/prd-*.png %s/prd.gif' % (curr_bird,\n curr_bird))\n", (7132, 7214), False, 'import os\n'), ((7256, 7296), 'os.path.join', 'os.path.join', (['worm_dir', "('%04d' % (i + 1))"], {}), "(worm_dir, '%04d' % (i + 1))\n", (7268, 7296), False, 'import os\n'), ((7962, 8053), 'os.system', 'os.system', (["('convert -delay 20 -loop 0 %s/ctx-*.png %s/ctx.gif' % (curr_worm, curr_worm))"], {}), "('convert -delay 20 -loop 0 %s/ctx-*.png %s/ctx.gif' % (curr_worm,\n curr_worm))\n", (7971, 8053), False, 'import os\n'), ((8056, 8147), 'os.system', 'os.system', (["('convert -delay 20 -loop 0 %s/gth-*.png %s/gth.gif' % (curr_worm, curr_worm))"], {}), "('convert -delay 20 -loop 0 %s/gth-*.png %s/gth.gif' % (curr_worm,\n curr_worm))\n", (8065, 8147), False, 'import os\n'), ((8150, 8241), 'os.system', 'os.system', (["('convert -delay 20 -loop 0 %s/prd-*.png %s/prd.gif' % (curr_worm, curr_worm))"], {}), "('convert -delay 20 -loop 0 %s/prd-*.png %s/prd.gif' % (curr_worm,\n curr_worm))\n", (8159, 8241), False, 'import os\n'), ((6512, 6524), 'mayavi.mlab.close', 'mlab.close', ([], {}), '()\n', (6522, 6524), False, 'from mayavi import mlab\n'), ((6713, 6725), 'mayavi.mlab.close', 'mlab.close', ([], {}), '()\n', (6723, 6725), False, 'from mayavi import mlab\n'), ((6914, 6926), 'mayavi.mlab.close', 'mlab.close', ([], {}), '()\n', (6924, 6926), False, 'from mayavi import mlab\n'), ((7534, 7546), 'mayavi.mlab.close', 'mlab.close', ([], {}), '()\n', (7544, 7546), False, 'from mayavi import mlab\n'), ((7737, 7749), 'mayavi.mlab.close', 'mlab.close', ([], {}), '()\n', (7747, 7749), False, 'from mayavi import mlab\n'), ((7940, 7952), 'mayavi.mlab.close', 'mlab.close', ([], {}), '()\n', (7950, 7952), False, 'from mayavi import mlab\n'), ((6453, 6502), 'os.path.join', 'os.path.join', (['curr_bird', "('ctx-%02d.png' % (j + 1))"], {}), "(curr_bird, 'ctx-%02d.png' % (j + 1))\n", (6465, 6502), False, 'import os\n'), ((6654, 6703), 'os.path.join', 'os.path.join', (['curr_bird', "('gth-%02d.png' % (j + 1))"], {}), "(curr_bird, 'gth-%02d.png' % (j + 1))\n", (6666, 6703), False, 'import os\n'), ((6855, 6904), 'os.path.join', 'os.path.join', (['curr_bird', "('prd-%02d.png' % (j + 1))"], {}), "(curr_bird, 'prd-%02d.png' % (j + 1))\n", (6867, 6904), False, 'import os\n'), ((7475, 7524), 'os.path.join', 'os.path.join', (['curr_worm', "('ctx-%02d.png' % (j + 1))"], {}), "(curr_worm, 'ctx-%02d.png' % (j + 1))\n", (7487, 7524), False, 'import os\n'), ((7678, 7727), 'os.path.join', 'os.path.join', (['curr_worm', "('gth-%02d.png' % (j + 1))"], {}), "(curr_worm, 'gth-%02d.png' % (j + 1))\n", (7690, 7727), False, 'import os\n'), ((7881, 7930), 'os.path.join', 'os.path.join', (['curr_worm', "('prd-%02d.png' % (j + 1))"], {}), "(curr_worm, 'prd-%02d.png' % (j + 1))\n", (7893, 7930), False, 'import os\n')] |
from __future__ import print_function, division
import sys
import os, os.path
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
period_rng = (50, 300)
rp_rng = (0.75, 20)
# Read synthetic catalog
koi_file = sys.argv[1]
kois = pd.read_hdf(koi_file, 'kois')
theta_true = list(pd.read_hdf(koi_file, 'theta'))
# Load completeness contour
completeness_file = 'completeness.npz'
d = np.load(completeness_file)
comp = d['comp']
period_grid = d['period_grid']
rp_grid = d['rp_grid']
comp_inds = d['inds'] #indices of stellar table used
# A double power law model for the population.
def population_model(theta, period, rp):
lnf0, beta, alpha = theta
v = np.exp(lnf0) * np.ones_like(period)
for x, rng, n in zip((period, rp),
(period_rng, rp_rng),
(beta, alpha)):
n1 = n + 1
v *= x**n*n1 / (rng[1]**n1-rng[0]**n1)
return v
# The ln-likelihood function given at the top of this post.
koi_periods = np.array(kois.koi_period)
koi_rps = np.array(kois.koi_prad)
vol = np.diff(period_grid, axis=0)[:, :-1] * np.diff(rp_grid, axis=1)[:-1, :]
def lnlike(theta):
pop = population_model(theta, period_grid, rp_grid) * comp
pop = 0.5 * (pop[:-1, :-1] + pop[1:, 1:])
norm = np.sum(pop * vol)
ll = np.sum(np.log(population_model(theta, koi_periods, koi_rps))) - norm
return ll if np.isfinite(ll) else -np.inf
# The ln-probability function is just propotional to the ln-likelihood
# since we're assuming uniform priors.
bounds = [(-5, 5), (-5, 5), (-5, 5)]
def lnprob(theta):
# Broad uniform priors.
for t, rng in zip(theta, bounds):
if not rng[0] < t < rng[1]:
return -np.inf
return lnlike(theta)
# The negative ln-likelihood is useful for optimization.
# Optimizers want to *minimize* your function.
def nll(theta):
ll = lnlike(theta)
return -ll if np.isfinite(ll) else 1e15
# Optimize, and then run the chain
from scipy.optimize import minimize
theta_0 = np.array([-0.3, -1.5, -0.8])
r = minimize(nll, theta_0, method="L-BFGS-B", bounds=bounds)
import emcee
ndim, nwalkers = len(r.x), 16
pos = [r.x + 1e-5 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Burn in.
pos, _, _ = sampler.run_mcmc(pos, 1000)
sampler.reset()
# Production.
pos, _, _ = sampler.run_mcmc(pos, 4000)
import corner
corner.corner(sampler.flatchain, labels=[r"$\ln F$", r"$\beta$", r"$\alpha$"],
truths=(theta_true[0], theta_true[2], theta_true[1]))
filebase = os.path.splitext(koi_file)[0]
plt.savefig('{}_corner.png'.format(filebase))
np.save('{}_chains.npy'.format(filebase), sampler.flatchain)
| [
"numpy.ones_like",
"matplotlib.use",
"scipy.optimize.minimize",
"os.path.splitext",
"numpy.diff",
"emcee.EnsembleSampler",
"numpy.exp",
"numpy.array",
"numpy.sum",
"numpy.isfinite",
"corner.corner",
"numpy.load",
"numpy.random.randn",
"pandas.read_hdf"
] | [((137, 158), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (151, 158), False, 'import matplotlib\n'), ((292, 321), 'pandas.read_hdf', 'pd.read_hdf', (['koi_file', '"""kois"""'], {}), "(koi_file, 'kois')\n", (303, 321), True, 'import pandas as pd\n'), ((446, 472), 'numpy.load', 'np.load', (['completeness_file'], {}), '(completeness_file)\n', (453, 472), True, 'import numpy as np\n'), ((1043, 1068), 'numpy.array', 'np.array', (['kois.koi_period'], {}), '(kois.koi_period)\n', (1051, 1068), True, 'import numpy as np\n'), ((1079, 1102), 'numpy.array', 'np.array', (['kois.koi_prad'], {}), '(kois.koi_prad)\n', (1087, 1102), True, 'import numpy as np\n'), ((2055, 2083), 'numpy.array', 'np.array', (['[-0.3, -1.5, -0.8]'], {}), '([-0.3, -1.5, -0.8])\n', (2063, 2083), True, 'import numpy as np\n'), ((2088, 2144), 'scipy.optimize.minimize', 'minimize', (['nll', 'theta_0'], {'method': '"""L-BFGS-B"""', 'bounds': 'bounds'}), "(nll, theta_0, method='L-BFGS-B', bounds=bounds)\n", (2096, 2144), False, 'from scipy.optimize import minimize\n'), ((2268, 2313), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'lnprob'], {}), '(nwalkers, ndim, lnprob)\n', (2289, 2313), False, 'import emcee\n'), ((2453, 2589), 'corner.corner', 'corner.corner', (['sampler.flatchain'], {'labels': "['$\\\\ln F$', '$\\\\beta$', '$\\\\alpha$']", 'truths': '(theta_true[0], theta_true[2], theta_true[1])'}), "(sampler.flatchain, labels=['$\\\\ln F$', '$\\\\beta$',\n '$\\\\alpha$'], truths=(theta_true[0], theta_true[2], theta_true[1]))\n", (2466, 2589), False, 'import corner\n'), ((340, 370), 'pandas.read_hdf', 'pd.read_hdf', (['koi_file', '"""theta"""'], {}), "(koi_file, 'theta')\n", (351, 370), True, 'import pandas as pd\n'), ((1320, 1337), 'numpy.sum', 'np.sum', (['(pop * vol)'], {}), '(pop * vol)\n', (1326, 1337), True, 'import numpy as np\n'), ((2614, 2640), 'os.path.splitext', 'os.path.splitext', (['koi_file'], {}), '(koi_file)\n', (2630, 2640), False, 'import os, os.path\n'), ((726, 738), 'numpy.exp', 'np.exp', (['lnf0'], {}), '(lnf0)\n', (732, 738), True, 'import numpy as np\n'), ((741, 761), 'numpy.ones_like', 'np.ones_like', (['period'], {}), '(period)\n', (753, 761), True, 'import numpy as np\n'), ((1109, 1137), 'numpy.diff', 'np.diff', (['period_grid'], {'axis': '(0)'}), '(period_grid, axis=0)\n', (1116, 1137), True, 'import numpy as np\n'), ((1148, 1172), 'numpy.diff', 'np.diff', (['rp_grid'], {'axis': '(1)'}), '(rp_grid, axis=1)\n', (1155, 1172), True, 'import numpy as np\n'), ((1433, 1448), 'numpy.isfinite', 'np.isfinite', (['ll'], {}), '(ll)\n', (1444, 1448), True, 'import numpy as np\n'), ((1945, 1960), 'numpy.isfinite', 'np.isfinite', (['ll'], {}), '(ll)\n', (1956, 1960), True, 'import numpy as np\n'), ((2210, 2231), 'numpy.random.randn', 'np.random.randn', (['ndim'], {}), '(ndim)\n', (2225, 2231), True, 'import numpy as np\n')] |
from scipy import interpolate
import random
import numpy as np
import matplotlib.pyplot as plt
import math
from types import SimpleNamespace
from gym_space_racer.geometry import intersect, intersection
class CircularMap:
"""Generate random map in shape of circle"""
PRECISION = 100
def __init__(self, n=10, seed=None, width=0.05, debug=False):
self.n = n
self.seed = seed or random.randint(0, 10000)
self.width = width
np.random.seed(self.seed)
cp = self._get_control_points(n)
rand_i = np.random.randint(low=0, high=n, size=(1,))[0]
dp = cp[(rand_i+1) % len(cp)] - cp[rand_i]
# print(cp[(rand_i+1) % len(cp)], cp[rand_i], dp)
self.start = SimpleNamespace(x=cp[rand_i, 0], y=cp[rand_i, 1], angle=math.atan2(dp[1], dp[0]))
# print(self.start)
self.cpoints = cp
if debug:
plt.plot(cp[:, 0], cp[:, 1], 'x-')
interp = self._interpolate(cp[:, 0], cp[:, 1], n=self.PRECISION)
# interp = np.concatenate((interp, [interp[0]]))
# interp = self._remove_intersections(interp)
if debug:
plt.plot(interp[:, 0], interp[:, 1], 'm:')
left = self._build_track(interp[:, 0], interp[:, 1], 0.5*width)
if debug:
plt.plot(left[:, 0], left[:, 1], 'r:')
self.left = self._remove_intersections(left)
right = self._build_track(interp[:, 0], interp[:, 1], -0.5*width)
if debug:
plt.plot(right[:, 0], right[:, 1], 'g:')
self.right = self._remove_intersections(right)
def plot(self):
plt.plot(self.start[0], self.start[1], 'x') # start position
plt.plot(self.right[:, 0], self.right[:, 1], 'g-')
plt.plot(self.left[:, 0], self.left[:, 1], 'r-')
def is_valid(self) -> bool:
for arr in (self.right,):
for i in range(len(arr)-2):
if dot(arr[i], arr[i+1], arr[i+1], arr[i+2]) < 0.0:
return False
return True
def _get_control_points(self, n):
radius = np.random.normal(1.0, 0.2, size=(n, ))
radius[-1] = radius[0]
t = np.linspace(0, 2.0 * np.pi, n+1)[:-1]
x, y = radius * np.sin(t), radius * np.cos(t)
res = np.zeros((n+1, 2))
res[:-1, 0] = x
res[:-1, 1] = y
res[-1,0], res[-1, 1] = x[0], y[0]
return res
def _interpolate(self, x, y, n):
tck, u = interpolate.splprep([x,y], s=0.0, k=5, per=True)
unew = np.linspace(0, 1.0, n)
out = interpolate.splev(unew, tck)
res = np.zeros((len(out[0]), 2))
res[:, 0] = out[0]
res[:, 1] = out[1]
return res
def _build_track(self, x, y, w):
xs, ys = [], []
for x0, x1, y0, y1 in zip(x[0:], x[1:], y[0:], y[1:]):
dx = x1 - x0
dy = y1 - y0
d = np.hypot(dx, dy)
sx, sy = 0.5 * (x0 + x1), 0.5 * (y0 + y1)
px, py = sx + w*dy/d, sy - w*dx/d
xs.append(px)
ys.append(py)
xs.append(xs[0])
ys.append(ys[0])
res = np.zeros((len(xs), 2))
res[:,0]=xs
res[:,1]=ys
return res
def _remove_intersections(self, ps):
assert (ps[0,0] == ps[-1,0]) and (ps[0,1] == ps[-1,1])
while True:
detected = False
for i in range(0, len(ps)-1):
for j in range(i+2, len(ps)-2):
if intersect(ps[i], ps[i+1], ps[j], ps[j+1]):
detected = True
ix, iy = intersection(ps[i], ps[i+1], ps[j], ps[j+1])
if 2*(j-i) <= len(ps):
ps = np.concatenate((ps[:i+1], np.array([[ix, iy]]), ps[j+1:]))
else:
ps = np.concatenate((np.array([[ix, iy]]), ps[i+1:j+1]))
# print(i, j, ps.shape)
break
if detected:
break
if not detected:
break
if ps[0,0] != ps[-1,0] or ps[0,1] != ps[-1,1]:
ps[-1] = ps[0]
return ps
| [
"numpy.random.normal",
"scipy.interpolate.splprep",
"matplotlib.pyplot.plot",
"gym_space_racer.geometry.intersection",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"scipy.interpolate.splev",
"numpy.random.seed",
"numpy.random.randint",
"math.atan2",
"numpy.sin",
"numpy.hypot",
"numpy.co... | [((466, 491), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (480, 491), True, 'import numpy as np\n'), ((1617, 1660), 'matplotlib.pyplot.plot', 'plt.plot', (['self.start[0]', 'self.start[1]', '"""x"""'], {}), "(self.start[0], self.start[1], 'x')\n", (1625, 1660), True, 'import matplotlib.pyplot as plt\n'), ((1687, 1737), 'matplotlib.pyplot.plot', 'plt.plot', (['self.right[:, 0]', 'self.right[:, 1]', '"""g-"""'], {}), "(self.right[:, 0], self.right[:, 1], 'g-')\n", (1695, 1737), True, 'import matplotlib.pyplot as plt\n'), ((1746, 1794), 'matplotlib.pyplot.plot', 'plt.plot', (['self.left[:, 0]', 'self.left[:, 1]', '"""r-"""'], {}), "(self.left[:, 0], self.left[:, 1], 'r-')\n", (1754, 1794), True, 'import matplotlib.pyplot as plt\n'), ((2079, 2116), 'numpy.random.normal', 'np.random.normal', (['(1.0)', '(0.2)'], {'size': '(n,)'}), '(1.0, 0.2, size=(n,))\n', (2095, 2116), True, 'import numpy as np\n'), ((2267, 2287), 'numpy.zeros', 'np.zeros', (['(n + 1, 2)'], {}), '((n + 1, 2))\n', (2275, 2287), True, 'import numpy as np\n'), ((2451, 2500), 'scipy.interpolate.splprep', 'interpolate.splprep', (['[x, y]'], {'s': '(0.0)', 'k': '(5)', 'per': '(True)'}), '([x, y], s=0.0, k=5, per=True)\n', (2470, 2500), False, 'from scipy import interpolate\n'), ((2515, 2537), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', 'n'], {}), '(0, 1.0, n)\n', (2526, 2537), True, 'import numpy as np\n'), ((2552, 2580), 'scipy.interpolate.splev', 'interpolate.splev', (['unew', 'tck'], {}), '(unew, tck)\n', (2569, 2580), False, 'from scipy import interpolate\n'), ((405, 429), 'random.randint', 'random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (419, 429), False, 'import random\n'), ((552, 595), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'n', 'size': '(1,)'}), '(low=0, high=n, size=(1,))\n', (569, 595), True, 'import numpy as np\n'), ((896, 930), 'matplotlib.pyplot.plot', 'plt.plot', (['cp[:, 0]', 'cp[:, 1]', '"""x-"""'], {}), "(cp[:, 0], cp[:, 1], 'x-')\n", (904, 930), True, 'import matplotlib.pyplot as plt\n'), ((1148, 1190), 'matplotlib.pyplot.plot', 'plt.plot', (['interp[:, 0]', 'interp[:, 1]', '"""m:"""'], {}), "(interp[:, 0], interp[:, 1], 'm:')\n", (1156, 1190), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1332), 'matplotlib.pyplot.plot', 'plt.plot', (['left[:, 0]', 'left[:, 1]', '"""r:"""'], {}), "(left[:, 0], left[:, 1], 'r:')\n", (1302, 1332), True, 'import matplotlib.pyplot as plt\n'), ((1492, 1532), 'matplotlib.pyplot.plot', 'plt.plot', (['right[:, 0]', 'right[:, 1]', '"""g:"""'], {}), "(right[:, 0], right[:, 1], 'g:')\n", (1500, 1532), True, 'import matplotlib.pyplot as plt\n'), ((2161, 2195), 'numpy.linspace', 'np.linspace', (['(0)', '(2.0 * np.pi)', '(n + 1)'], {}), '(0, 2.0 * np.pi, n + 1)\n', (2172, 2195), True, 'import numpy as np\n'), ((2886, 2902), 'numpy.hypot', 'np.hypot', (['dx', 'dy'], {}), '(dx, dy)\n', (2894, 2902), True, 'import numpy as np\n'), ((786, 810), 'math.atan2', 'math.atan2', (['dp[1]', 'dp[0]'], {}), '(dp[1], dp[0])\n', (796, 810), False, 'import math\n'), ((2223, 2232), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2229, 2232), True, 'import numpy as np\n'), ((2243, 2252), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2249, 2252), True, 'import numpy as np\n'), ((3470, 3515), 'gym_space_racer.geometry.intersect', 'intersect', (['ps[i]', 'ps[i + 1]', 'ps[j]', 'ps[j + 1]'], {}), '(ps[i], ps[i + 1], ps[j], ps[j + 1])\n', (3479, 3515), False, 'from gym_space_racer.geometry import intersect, intersection\n'), ((3586, 3634), 'gym_space_racer.geometry.intersection', 'intersection', (['ps[i]', 'ps[i + 1]', 'ps[j]', 'ps[j + 1]'], {}), '(ps[i], ps[i + 1], ps[j], ps[j + 1])\n', (3598, 3634), False, 'from gym_space_racer.geometry import intersect, intersection\n'), ((3737, 3757), 'numpy.array', 'np.array', (['[[ix, iy]]'], {}), '([[ix, iy]])\n', (3745, 3757), True, 'import numpy as np\n'), ((3849, 3869), 'numpy.array', 'np.array', (['[[ix, iy]]'], {}), '([[ix, iy]])\n', (3857, 3869), True, 'import numpy as np\n')] |
import os.path
import os
import sys
import math
import argparse
import time
import random
from collections import OrderedDict
import torch
import options.options as option
from utils import util
from data import create_dataloader, create_dataset
from models import create_model
from utils.logger import Logger, PrintLogger
from sampler import generate_code_samples
import numpy as np
def validate(val_loader, opt, model, current_step, epoch, logger):
print('---------- validation -------------')
start_time = time.time()
avg_psnr = 0.0
avg_lpips = 0.0
idx = 0
for val_data in val_loader:
idx += 1
img_name = os.path.splitext(os.path.basename(val_data['HR_path'][0]))[0]
img_dir = os.path.join(opt['path']['val_images'], img_name)
util.mkdir(img_dir)
tensor_type = torch.zeros if opt['train']['zero_code'] else torch.randn
code = model.gen_code(val_data['network_input'][0].shape[0],
val_data['network_input'][0].shape[2],
val_data['network_input'][0].shape[3],
tensor_type=tensor_type)
model.feed_data(val_data, code=code)
model.test()
visuals = model.get_current_visuals()
# HR_pred : is the predicted colored image in RGB color space
# HR : is the original input in RGB color space
sr_img = util.tensor2img(visuals['HR_pred']) # uint8
gt_img = util.tensor2img(visuals['HR']) # uint8
# Save generated images for reference
save_img_path = os.path.join(img_dir, '{:s}_{:s}_{:d}.png'.format(opt['name'], img_name, current_step))
util.save_img(sr_img, save_img_path)
# calculate PSNR
sr_img = sr_img
gt_img = gt_img
avg_psnr += util.psnr(sr_img, gt_img)
avg_lpips += torch.sum(model.get_loss(level=-1))
if current_step == 0:
print('Saving the model at the end of iter {:d}.'.format(current_step))
model.save(current_step)
avg_psnr = avg_psnr / idx
avg_lpips = avg_lpips / idx
time_elapsed = time.time() - start_time
# Save to log
print_rlt = OrderedDict()
print_rlt['model'] = opt['model']
print_rlt['epoch'] = epoch
print_rlt['iters'] = current_step
print_rlt['time'] = time_elapsed
print_rlt['psnr'] = avg_psnr
if opt['train']['pixel_weight'] > 0:
print_rlt[opt['train']['pixel_criterion']] = avg_lpips
else:
print_rlt['lpips'] = avg_lpips
logger.print_format_results('val', print_rlt)
print('-----------------------------------')
def main():
# options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to option JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=True)
util.mkdir_and_rename(opt['path']['experiments_root']) # rename old experiments if exists
util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and
not key == 'pretrain_model_G'))
option.save(opt)
opt = option.dict_to_nonedict(opt) # Convert to NoneDict, which return None for missing key.
# print to file and std_out simultaneously
sys.stdout = PrintLogger(opt['path']['log'])
# random seed
seed = opt['train']['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
print("Random Seed: ", seed)
random.seed(seed)
torch.manual_seed(seed)
# LAB setup settings
print("Color output mode: ", util.color_output_mode)
print("AB range: ", util.AB_range)
# create train and val dataloader
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
train_set = create_dataset(dataset_opt)
train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size_per_month']))
print('Number of train images: {:,d}, iters: {:,d}'.format(len(train_set), train_size))
num_months = int(opt['train']['num_months'])
num_days = int(opt['train']['num_days'])
total_iters = int(num_months * num_days)
print('Total epochs needed: {:d} for iters {:,d}'.format(num_months, total_iters))
train_loader = create_dataloader(train_set, dataset_opt)
batch_size_per_month = dataset_opt['batch_size_per_month']
batch_size_per_day = int(opt['datasets']['train']['batch_size_per_day'])
use_dci = opt['train']['use_dci']
inter_supervision = opt['train']['inter_supervision']
elif phase == 'val':
val_set = create_dataset(dataset_opt)
val_loader = create_dataloader(val_set, dataset_opt)
print('Number of val images in [{:s}]: {:d}'.format(dataset_opt['name'], len(val_set)))
else:
raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))
assert train_loader is not None
# Create model
model = create_model(opt)
# create logger
logger = Logger(opt)
current_step = 0
start_time = time.time()
print('---------- Start training -------------')
validate(val_loader, opt, model, current_step, 0, logger)
for epoch in range(num_months):
for i, train_data in enumerate(train_loader):
# Sample the codes used for training of the month
if use_dci:
cur_month_code = generate_code_samples(model, train_data, opt)
else:
tensor_type = torch.zeros if opt['train']['zero_code'] else torch.randn
cur_month_code = model.gen_code(train_data['network_input'][0].shape[0],
train_data['network_input'][0].shape[2],
train_data['network_input'][0].shape[3],
tensor_type=tensor_type)
# clear projection matrix to save memory
model.clear_projection()
for j in range(num_days):
current_step += 1
cur_month_batch_size = min(batch_size_per_month, train_data['network_input'][0].shape[0])
# get the sliced data
cur_day_batch_start_idx = (j * batch_size_per_day) % cur_month_batch_size
cur_day_batch_end_idx = cur_day_batch_start_idx + batch_size_per_day
if cur_day_batch_end_idx > cur_month_batch_size:
cur_day_batch_idx = np.hstack((np.arange(cur_day_batch_start_idx, cur_month_batch_size),
np.arange(cur_day_batch_end_idx - cur_month_batch_size)))
else:
cur_day_batch_idx = slice(cur_day_batch_start_idx, cur_day_batch_end_idx)
cur_day_train_data = {key: val[cur_day_batch_idx] for key, val in train_data.items()}
code = [gen_code[cur_day_batch_idx] for gen_code in cur_month_code]
cur_day_train_data['network_input'] = []
for net_inp in range(len(train_data['network_input'])):
cur_day_train_data['network_input'].append(train_data['network_input'][net_inp][cur_day_batch_idx])
if 'rarity_masks' in train_data.keys():
cur_day_train_data['rarity_masks'] = []
for rar_msk in range(len(train_data['rarity_masks'])):
cur_day_train_data['rarity_masks'].append(
train_data['rarity_masks'][rar_msk][cur_day_batch_idx])
# training
model.feed_data(cur_day_train_data, code=code)
model.optimize_parameters(current_step, inter_supervision=inter_supervision)
time_elapsed = time.time() - start_time
start_time = time.time()
# log
if current_step % opt['logger']['print_freq'] == 0 or current_step == 1:
logs = model.get_current_log()
print_rlt = OrderedDict()
print_rlt['model'] = opt['model']
print_rlt['epoch'] = epoch
print_rlt['iters'] = current_step
print_rlt['time'] = time_elapsed
for k, v in logs.items():
print_rlt[k] = v
print_rlt['lr'] = model.get_current_learning_rate()
logger.print_format_results('train', print_rlt)
# save models
if current_step % opt['logger']['save_checkpoint_freq'] == 0:
print('Saving the model at the end of iter {:d}.'.format(current_step))
model.save(current_step)
# validation
if current_step % opt['train']['val_freq'] == 0:
validate(val_loader, opt, model, current_step, epoch, logger)
# update learning rate
model.update_learning_rate()
print('Saving the final model.')
model.save('latest')
print('End of training.')
if __name__ == '__main__':
main()
| [
"utils.util.psnr",
"utils.util.mkdir_and_rename",
"numpy.arange",
"utils.util.tensor2img",
"argparse.ArgumentParser",
"sampler.generate_code_samples",
"utils.util.save_img",
"random.randint",
"collections.OrderedDict",
"utils.util.mkdir",
"data.create_dataloader",
"utils.logger.Logger",
"tim... | [((521, 532), 'time.time', 'time.time', ([], {}), '()\n', (530, 532), False, 'import time\n'), ((2176, 2189), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2187, 2189), False, 'from collections import OrderedDict\n'), ((2660, 2685), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2683, 2685), False, 'import argparse\n'), ((2845, 2899), 'utils.util.mkdir_and_rename', 'util.mkdir_and_rename', (["opt['path']['experiments_root']"], {}), "(opt['path']['experiments_root'])\n", (2866, 2899), False, 'from utils import util\n'), ((3085, 3101), 'options.options.save', 'option.save', (['opt'], {}), '(opt)\n', (3096, 3101), True, 'import options.options as option\n'), ((3112, 3140), 'options.options.dict_to_nonedict', 'option.dict_to_nonedict', (['opt'], {}), '(opt)\n', (3135, 3140), True, 'import options.options as option\n'), ((3265, 3296), 'utils.logger.PrintLogger', 'PrintLogger', (["opt['path']['log']"], {}), "(opt['path']['log'])\n", (3276, 3296), False, 'from utils.logger import Logger, PrintLogger\n'), ((3453, 3470), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3464, 3470), False, 'import random\n'), ((3475, 3498), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3492, 3498), False, 'import torch\n'), ((4998, 5015), 'models.create_model', 'create_model', (['opt'], {}), '(opt)\n', (5010, 5015), False, 'from models import create_model\n'), ((5049, 5060), 'utils.logger.Logger', 'Logger', (['opt'], {}), '(opt)\n', (5055, 5060), False, 'from utils.logger import Logger, PrintLogger\n'), ((5100, 5111), 'time.time', 'time.time', ([], {}), '()\n', (5109, 5111), False, 'import time\n'), ((733, 782), 'os.path.join', 'os.path.join', (["opt['path']['val_images']", 'img_name'], {}), "(opt['path']['val_images'], img_name)\n", (745, 782), False, 'import os\n'), ((791, 810), 'utils.util.mkdir', 'util.mkdir', (['img_dir'], {}), '(img_dir)\n', (801, 810), False, 'from utils import util\n'), ((1411, 1446), 'utils.util.tensor2img', 'util.tensor2img', (["visuals['HR_pred']"], {}), "(visuals['HR_pred'])\n", (1426, 1446), False, 'from utils import util\n'), ((1473, 1503), 'utils.util.tensor2img', 'util.tensor2img', (["visuals['HR']"], {}), "(visuals['HR'])\n", (1488, 1503), False, 'from utils import util\n'), ((1680, 1716), 'utils.util.save_img', 'util.save_img', (['sr_img', 'save_img_path'], {}), '(sr_img, save_img_path)\n', (1693, 1716), False, 'from utils import util\n'), ((1811, 1836), 'utils.util.psnr', 'util.psnr', (['sr_img', 'gt_img'], {}), '(sr_img, gt_img)\n', (1820, 1836), False, 'from utils import util\n'), ((2117, 2128), 'time.time', 'time.time', ([], {}), '()\n', (2126, 2128), False, 'import time\n'), ((3391, 3415), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (3405, 3415), False, 'import random\n'), ((3768, 3795), 'data.create_dataset', 'create_dataset', (['dataset_opt'], {}), '(dataset_opt)\n', (3782, 3795), False, 'from data import create_dataloader, create_dataset\n'), ((4275, 4316), 'data.create_dataloader', 'create_dataloader', (['train_set', 'dataset_opt'], {}), '(train_set, dataset_opt)\n', (4292, 4316), False, 'from data import create_dataloader, create_dataset\n'), ((670, 710), 'os.path.basename', 'os.path.basename', (["val_data['HR_path'][0]"], {}), "(val_data['HR_path'][0])\n", (686, 710), False, 'import os\n'), ((4636, 4663), 'data.create_dataset', 'create_dataset', (['dataset_opt'], {}), '(dataset_opt)\n', (4650, 4663), False, 'from data import create_dataloader, create_dataset\n'), ((4689, 4728), 'data.create_dataloader', 'create_dataloader', (['val_set', 'dataset_opt'], {}), '(val_set, dataset_opt)\n', (4706, 4728), False, 'from data import create_dataloader, create_dataset\n'), ((5436, 5481), 'sampler.generate_code_samples', 'generate_code_samples', (['model', 'train_data', 'opt'], {}), '(model, train_data, opt)\n', (5457, 5481), False, 'from sampler import generate_code_samples\n'), ((7857, 7868), 'time.time', 'time.time', ([], {}), '()\n', (7866, 7868), False, 'import time\n'), ((7803, 7814), 'time.time', 'time.time', ([], {}), '()\n', (7812, 7814), False, 'import time\n'), ((8064, 8077), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8075, 8077), False, 'from collections import OrderedDict\n'), ((6525, 6581), 'numpy.arange', 'np.arange', (['cur_day_batch_start_idx', 'cur_month_batch_size'], {}), '(cur_day_batch_start_idx, cur_month_batch_size)\n', (6534, 6581), True, 'import numpy as np\n'), ((6634, 6689), 'numpy.arange', 'np.arange', (['(cur_day_batch_end_idx - cur_month_batch_size)'], {}), '(cur_day_batch_end_idx - cur_month_batch_size)\n', (6643, 6689), True, 'import numpy as np\n')] |
import numpy as np
a1 = np.ones((2, 3), int)
print(a1)
# [[1 1 1]
# [1 1 1]]
a2 = np.full((2, 3), 2)
print(a2)
# [[2 2 2]
# [2 2 2]]
print(np.block([a1, a2]))
# [[1 1 1 2 2 2]
# [1 1 1 2 2 2]]
print(np.block([[a1], [a2]]))
# [[1 1 1]
# [1 1 1]
# [2 2 2]
# [2 2 2]]
print(np.block([[a1, a2], [a2, a1]]))
# [[1 1 1 2 2 2]
# [1 1 1 2 2 2]
# [2 2 2 1 1 1]
# [2 2 2 1 1 1]]
print(np.block([[[a1]], [[a2]]]))
# [[[1 1 1]
# [1 1 1]]
#
# [[2 2 2]
# [2 2 2]]]
print(np.block([[[a1]], [[a2]]]).shape)
# (2, 2, 3)
a3 = np.full(6, 3)
print(a3)
# [3 3 3 3 3 3]
print(np.block([[a1, a2], [a3]]))
# [[1 1 1 2 2 2]
# [1 1 1 2 2 2]
# [3 3 3 3 3 3]]
# print(np.block([[a1, a2], a3]))
# ValueError: List depths are mismatched. First element was at depth 2, but there is an element at depth 1 (arrays[1])
# print(np.block([[a1, a2, a3]]))
# ValueError: all the input array dimensions except for the concatenation axis must match exactly
| [
"numpy.full",
"numpy.block",
"numpy.ones"
] | [((25, 45), 'numpy.ones', 'np.ones', (['(2, 3)', 'int'], {}), '((2, 3), int)\n', (32, 45), True, 'import numpy as np\n'), ((85, 103), 'numpy.full', 'np.full', (['(2, 3)', '(2)'], {}), '((2, 3), 2)\n', (92, 103), True, 'import numpy as np\n'), ((531, 544), 'numpy.full', 'np.full', (['(6)', '(3)'], {}), '(6, 3)\n', (538, 544), True, 'import numpy as np\n'), ((144, 162), 'numpy.block', 'np.block', (['[a1, a2]'], {}), '([a1, a2])\n', (152, 162), True, 'import numpy as np\n'), ((206, 228), 'numpy.block', 'np.block', (['[[a1], [a2]]'], {}), '([[a1], [a2]])\n', (214, 228), True, 'import numpy as np\n'), ((282, 312), 'numpy.block', 'np.block', (['[[a1, a2], [a2, a1]]'], {}), '([[a1, a2], [a2, a1]])\n', (290, 312), True, 'import numpy as np\n'), ((390, 416), 'numpy.block', 'np.block', (['[[[a1]], [[a2]]]'], {}), '([[[a1]], [[a2]]])\n', (398, 416), True, 'import numpy as np\n'), ((578, 604), 'numpy.block', 'np.block', (['[[a1, a2], [a3]]'], {}), '([[a1, a2], [a3]])\n', (586, 604), True, 'import numpy as np\n'), ((479, 505), 'numpy.block', 'np.block', (['[[[a1]], [[a2]]]'], {}), '([[[a1]], [[a2]]])\n', (487, 505), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from config import Config
from itertools import permutations
from unittest import TestCase
from unittest.mock import MagicMock
import numpy as np
from baseline_players import RandomCardPlayer
from card import Card
from const import Const
from encoding import Encoding
from game_type import GameType
from parameterized import parameterized
from player import Player
from utils import flatten
class PlayerTest(TestCase):
# pylint: disable=invalid-name,protected-access,line-too-long
@classmethod
def setUpClass(cls):
Config.ENCODING = Encoding("better", [1, 2, 3, 4], 5, [10, 15, 20], 50, 0, 0,
relative_in_play_encoding=True, trump_code_offset=100)
def verify_card_permutations(self, all_cards_by_suit, correct_order, cards_by_suit):
all_permutations = PlayerTest.get_permutations_by_suit(all_cards_by_suit) if cards_by_suit \
else PlayerTest.get_permutations_by_value(all_cards_by_suit)
for permutation in all_permutations:
self.assertTrue(np.array_equal(Player._sort_decision_state(permutation, cards_by_suit), correct_order))
@staticmethod
def get_permutations_by_suit(cards_by_suit):
return [flatten(permutation) for permutation in permutations(cards_by_suit)]
@staticmethod
def get_permutations_by_value(cards_by_suit):
return [flatten(zip(*permutation)) for permutation in permutations(cards_by_suit)]
@staticmethod
def get_correct_decision_state(cards, order, cards_by_suit):
ordered_cards = [cards[i] for i in order]
if cards_by_suit:
return flatten(ordered_cards)
return flatten(zip(*ordered_cards))
@parameterized.expand([
[True],
[False]
])
def test_sort_decision_state(self, cards_by_suit):
all_cards = [
[1] * Const.CARDS_PER_SUIT,
[2] * Const.CARDS_PER_SUIT,
[3] * Const.CARDS_PER_SUIT,
[4] * Const.CARDS_PER_SUIT
]
correct_state = PlayerTest.get_correct_decision_state(all_cards, range(Const.SUIT_COUNT), cards_by_suit)
self.verify_card_permutations(all_cards, correct_state, cards_by_suit)
all_cards = [
[0] * Const.CARDS_PER_SUIT,
[100] * Const.CARDS_PER_SUIT,
[10] * Const.CARDS_PER_SUIT,
[255] * Const.CARDS_PER_SUIT
]
correct_state = PlayerTest.get_correct_decision_state(all_cards, [0, 2, 1, 3], cards_by_suit)
self.verify_card_permutations(all_cards, correct_state, cards_by_suit)
all_cards = [
[0] * Const.CARDS_PER_SUIT,
[0] * Const.CARDS_PER_SUIT,
[0] * Const.CARDS_PER_SUIT,
[0] * Const.CARDS_PER_SUIT
]
all_cards[0][3] = 1
all_cards[1][2] = 1
all_cards[2][1] = 1
all_cards[3][0] = 1
correct_state = PlayerTest.get_correct_decision_state(all_cards, range(Const.SUIT_COUNT), cards_by_suit)
self.verify_card_permutations(all_cards, correct_state, cards_by_suit)
all_cards = [
[0] * Const.CARDS_PER_SUIT,
[0] * Const.CARDS_PER_SUIT,
[0] * Const.CARDS_PER_SUIT,
[0] * Const.CARDS_PER_SUIT
]
all_cards[0][0] = 1
all_cards[1][0] = 2
all_cards[2][0] = 3
all_cards[3][0] = 4
correct_state = PlayerTest.get_correct_decision_state(all_cards, range(Const.SUIT_COUNT), cards_by_suit)
self.verify_card_permutations(all_cards, correct_state, cards_by_suit)
all_cards = [
[0] * Const.CARDS_PER_SUIT,
[0] * Const.CARDS_PER_SUIT,
[0] * Const.CARDS_PER_SUIT,
[0] * Const.CARDS_PER_SUIT
]
all_cards[3][-1] = 1
all_cards[1][-1] = 2
all_cards[0][-1] = 3
all_cards[2][-1] = 4
correct_state = PlayerTest.get_correct_decision_state(all_cards, [3, 1, 0, 2], cards_by_suit)
self.verify_card_permutations(all_cards, correct_state, cards_by_suit)
all_cards = [range(i*Const.CARDS_PER_SUIT, (i+1)*Const.CARDS_PER_SUIT) for i in range(Const.SUIT_COUNT)]
correct_state = PlayerTest.get_correct_decision_state(all_cards, range(Const.SUIT_COUNT), cards_by_suit)
self.verify_card_permutations(all_cards, correct_state, cards_by_suit)
all_cards = [range(i*Const.CARDS_PER_SUIT, (i+1)*Const.CARDS_PER_SUIT) for i in range(Const.SUIT_COUNT-1, -1, -1)]
correct_state = PlayerTest.get_correct_decision_state(all_cards, range(Const.SUIT_COUNT-1, -1, -1), cards_by_suit)
self.verify_card_permutations(all_cards, correct_state, cards_by_suit)
@parameterized.expand([
[True],
[False]
])
def test_sort_decision_state_randomized(self, cards_by_suit):
np.random.seed(42)
for _ in range(int(1e2)):
all_cards = [
list(np.random.randint(0, 255, Const.CARDS_PER_SUIT)),
list(np.random.randint(0, 255, Const.CARDS_PER_SUIT)),
list(np.random.randint(0, 255, Const.CARDS_PER_SUIT)),
list(np.random.randint(0, 255, Const.CARDS_PER_SUIT))
]
first_order = Player._sort_decision_state(
PlayerTest.get_correct_decision_state(all_cards, range(Const.SUIT_COUNT), cards_by_suit), cards_by_suit)
self.verify_card_permutations(all_cards, first_order, cards_by_suit)
@parameterized.expand([
[GameType.OBENABE],
[GameType.UNNENUFE]
])
def test_get_valid_cards_to_play_non_trump(self, game_type):
testee = RandomCardPlayer("testee", 0, MagicMock())
testee.hand = [
Card(Card.SPADES, 5), # J
Card(Card.HEARTS, 0), # 6
Card(Card.HEARTS, 7), # K
Card(Card.CLUBS, 3), # 9
]
# no cards played yet
self.assertEqual(testee.get_valid_cards_to_play([], game_type), testee.hand)
# can't match suit
self.assertEqual(testee.get_valid_cards_to_play([Card(Card.DIAMONDS, 1)], game_type), testee.hand)
self.assertEqual(testee.get_valid_cards_to_play([
Card(Card.DIAMONDS, 1), Card(Card.SPADES, 1), Card(Card.HEARTS, 1)], game_type), testee.hand)
# can match suit
self.assertEqual(testee.get_valid_cards_to_play([Card(Card.SPADES, 1)], game_type), [Card(Card.SPADES, 5)])
self.assertEqual(testee.get_valid_cards_to_play([Card(Card.HEARTS, 1)], game_type),
[Card(Card.HEARTS, 0), Card(Card.HEARTS, 7)])
self.assertEqual(testee.get_valid_cards_to_play([Card(Card.CLUBS, 1)], game_type), [Card(Card.CLUBS, 3)])
def test_get_valid_cards_to_play_trump(self):
testee = RandomCardPlayer("testee", 0, MagicMock())
testee.hand = [
Card(Card.SPADES, 5), # J
Card(Card.HEARTS, 5), # J
Card(Card.HEARTS, 7), # K
Card(Card.CLUBS, 3), # 9
]
# non-trump is played without any trump in play
game_type = GameType.TRUMP_HEARTS
played_cards = [Card(Card.SPADES, 1)]
for card in testee.hand + played_cards:
card.set_game_type(game_type)
self.assertEqual(testee.get_valid_cards_to_play(played_cards, game_type), [Card(Card.SPADES, 5), Card(Card.HEARTS, 5), Card(Card.HEARTS, 7)])
game_type = GameType.TRUMP_CLUBS
played_cards = [Card(Card.SPADES, 1), Card(Card.DIAMONDS, 3)]
for card in testee.hand + played_cards:
card.set_game_type(game_type)
self.assertEqual(testee.get_valid_cards_to_play(played_cards, game_type), [Card(Card.SPADES, 5), Card(Card.CLUBS, 3)])
# non-trump is played which can't be matched without any trump in play
game_type = GameType.TRUMP_HEARTS
played_cards = [Card(Card.DIAMONDS, 1), Card(Card.CLUBS, 8)]
for card in testee.hand + played_cards:
card.set_game_type(game_type)
self.assertEqual(testee.get_valid_cards_to_play(played_cards, game_type), [Card(Card.SPADES, 5), Card(Card.HEARTS, 5), Card(Card.HEARTS, 7), Card(Card.CLUBS, 3)])
# non-trump is played with a trump in play
game_type = GameType.TRUMP_HEARTS
played_cards = [Card(Card.SPADES, 1), Card(Card.HEARTS, 8)]
for card in testee.hand + played_cards:
card.set_game_type(game_type)
self.assertEqual(testee.get_valid_cards_to_play(played_cards, game_type), [Card(Card.SPADES, 5), Card(Card.HEARTS, 5)]) # not allowed to play K (undertrump)
# non-trump is played with two trumps in play
game_type = GameType.TRUMP_HEARTS
played_cards = [Card(Card.SPADES, 1), Card(Card.HEARTS, 0), Card(Card.HEARTS, 8)]
for card in testee.hand + played_cards:
card.set_game_type(game_type)
self.assertEqual(testee.get_valid_cards_to_play(played_cards, game_type), [Card(Card.SPADES, 5), Card(Card.HEARTS, 5)]) # not allowed to play K (undertrump)
# non-trump is played which can't be matched with a trump in play
game_type = GameType.TRUMP_HEARTS
played_cards = [Card(Card.DIAMONDS, 1), Card(Card.HEARTS, 8)]
for card in testee.hand + played_cards:
card.set_game_type(game_type)
self.assertEqual(testee.get_valid_cards_to_play(played_cards, game_type), [Card(Card.SPADES, 5), Card(Card.HEARTS, 5), Card(Card.CLUBS, 3)]) # STILL not allowed to play K (undertrump)
# trump is played
game_type = GameType.TRUMP_HEARTS
played_cards = [Card(Card.HEARTS, 1), Card(Card.HEARTS, 2)]
for card in testee.hand + played_cards:
card.set_game_type(game_type)
self.assertEqual(testee.get_valid_cards_to_play(played_cards, game_type), [Card(Card.HEARTS, 5), Card(Card.HEARTS, 7)])
game_type = GameType.TRUMP_SPADES
played_cards = [Card(Card.SPADES, 1), Card(Card.SPADES, 2)]
for card in testee.hand + played_cards:
card.set_game_type(game_type)
self.assertEqual(testee.get_valid_cards_to_play(played_cards, game_type), testee.hand)
testee.hand = [
Card(Card.HEARTS, 5), # J
Card(Card.HEARTS, 7), # K
]
# untertrumping is allowed if there are only trumps in the hand
played_cards = [Card(Card.DIAMONDS, 1), Card(Card.HEARTS, 8)]
for card in testee.hand + played_cards:
card.set_game_type(game_type)
self.assertEqual(testee.get_valid_cards_to_play(played_cards, game_type), [Card(Card.HEARTS, 5), Card(Card.HEARTS, 7)])
def test_convert_to_relative_invalid(self):
testee = RandomCardPlayer("testee", 0, MagicMock())
# the player number must be among the valid codes and specifically, among the player codes
self.assertRaises(AssertionError, lambda: testee.convert_to_relative(6))
self.assertRaises(AssertionError, lambda: testee.convert_to_relative(106))
self.assertRaises(AssertionError, lambda: testee.convert_to_relative(10))
self.assertRaises(AssertionError, lambda: testee.convert_to_relative(50))
# unknown cards are ok and stay unknown
self.assertEqual(testee.convert_to_relative(0), 0)
def test_convert_to_relative_non_trump(self):
# non-trump in -> non-trump out
testee = RandomCardPlayer("testee", 1, MagicMock())
self.assertEqual(testee.convert_to_relative(1), 1)
self.assertEqual(testee.convert_to_relative(2), 2)
self.assertEqual(testee.convert_to_relative(3), 3)
self.assertEqual(testee.convert_to_relative(4), 4)
testee = RandomCardPlayer("testee", 2, MagicMock())
self.assertEqual(testee.convert_to_relative(1), 4)
self.assertEqual(testee.convert_to_relative(2), 1)
self.assertEqual(testee.convert_to_relative(3), 2)
self.assertEqual(testee.convert_to_relative(4), 3)
testee = RandomCardPlayer("testee", 3, MagicMock())
self.assertEqual(testee.convert_to_relative(1), 3)
self.assertEqual(testee.convert_to_relative(2), 4)
self.assertEqual(testee.convert_to_relative(3), 1)
self.assertEqual(testee.convert_to_relative(4), 2)
testee = RandomCardPlayer("testee", 4, MagicMock())
self.assertEqual(testee.convert_to_relative(1), 2)
self.assertEqual(testee.convert_to_relative(2), 3)
self.assertEqual(testee.convert_to_relative(3), 4)
self.assertEqual(testee.convert_to_relative(4), 1)
def test_convert_to_relative_trump(self):
# trump in -> trump out
testee = RandomCardPlayer("testee", 1, MagicMock())
self.assertEqual(testee.convert_to_relative(101), 101)
self.assertEqual(testee.convert_to_relative(102), 102)
self.assertEqual(testee.convert_to_relative(103), 103)
self.assertEqual(testee.convert_to_relative(104), 104)
testee = RandomCardPlayer("testee", 2, MagicMock())
self.assertEqual(testee.convert_to_relative(101), 104)
self.assertEqual(testee.convert_to_relative(102), 101)
self.assertEqual(testee.convert_to_relative(103), 102)
self.assertEqual(testee.convert_to_relative(104), 103)
testee = RandomCardPlayer("testee", 3, MagicMock())
self.assertEqual(testee.convert_to_relative(101), 103)
self.assertEqual(testee.convert_to_relative(102), 104)
self.assertEqual(testee.convert_to_relative(103), 101)
self.assertEqual(testee.convert_to_relative(104), 102)
testee = RandomCardPlayer("testee", 4, MagicMock())
self.assertEqual(testee.convert_to_relative(101), 102)
self.assertEqual(testee.convert_to_relative(102), 103)
self.assertEqual(testee.convert_to_relative(103), 104)
self.assertEqual(testee.convert_to_relative(104), 101)
| [
"parameterized.parameterized.expand",
"unittest.mock.MagicMock",
"utils.flatten",
"encoding.Encoding",
"numpy.random.randint",
"card.Card",
"numpy.random.seed",
"player.Player._sort_decision_state",
"itertools.permutations"
] | [((1649, 1688), 'parameterized.parameterized.expand', 'parameterized.expand', (['[[True], [False]]'], {}), '([[True], [False]])\n', (1669, 1688), False, 'from parameterized import parameterized\n'), ((4418, 4457), 'parameterized.parameterized.expand', 'parameterized.expand', (['[[True], [False]]'], {}), '([[True], [False]])\n', (4438, 4457), False, 'from parameterized import parameterized\n'), ((5123, 5186), 'parameterized.parameterized.expand', 'parameterized.expand', (['[[GameType.OBENABE], [GameType.UNNENUFE]]'], {}), '([[GameType.OBENABE], [GameType.UNNENUFE]])\n', (5143, 5186), False, 'from parameterized import parameterized\n'), ((595, 713), 'encoding.Encoding', 'Encoding', (['"""better"""', '[1, 2, 3, 4]', '(5)', '[10, 15, 20]', '(50)', '(0)', '(0)'], {'relative_in_play_encoding': '(True)', 'trump_code_offset': '(100)'}), "('better', [1, 2, 3, 4], 5, [10, 15, 20], 50, 0, 0,\n relative_in_play_encoding=True, trump_code_offset=100)\n", (603, 713), False, 'from encoding import Encoding\n'), ((4540, 4558), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (4554, 4558), True, 'import numpy as np\n'), ((1200, 1220), 'utils.flatten', 'flatten', (['permutation'], {}), '(permutation)\n', (1207, 1220), False, 'from utils import flatten\n'), ((1582, 1604), 'utils.flatten', 'flatten', (['ordered_cards'], {}), '(ordered_cards)\n', (1589, 1604), False, 'from utils import flatten\n'), ((5307, 5318), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (5316, 5318), False, 'from unittest.mock import MagicMock\n'), ((5348, 5368), 'card.Card', 'Card', (['Card.SPADES', '(5)'], {}), '(Card.SPADES, 5)\n', (5352, 5368), False, 'from card import Card\n'), ((5382, 5402), 'card.Card', 'Card', (['Card.HEARTS', '(0)'], {}), '(Card.HEARTS, 0)\n', (5386, 5402), False, 'from card import Card\n'), ((5416, 5436), 'card.Card', 'Card', (['Card.HEARTS', '(7)'], {}), '(Card.HEARTS, 7)\n', (5420, 5436), False, 'from card import Card\n'), ((5450, 5469), 'card.Card', 'Card', (['Card.CLUBS', '(3)'], {}), '(Card.CLUBS, 3)\n', (5454, 5469), False, 'from card import Card\n'), ((6352, 6363), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6361, 6363), False, 'from unittest.mock import MagicMock\n'), ((6393, 6413), 'card.Card', 'Card', (['Card.SPADES', '(5)'], {}), '(Card.SPADES, 5)\n', (6397, 6413), False, 'from card import Card\n'), ((6427, 6447), 'card.Card', 'Card', (['Card.HEARTS', '(5)'], {}), '(Card.HEARTS, 5)\n', (6431, 6447), False, 'from card import Card\n'), ((6461, 6481), 'card.Card', 'Card', (['Card.HEARTS', '(7)'], {}), '(Card.HEARTS, 7)\n', (6465, 6481), False, 'from card import Card\n'), ((6495, 6514), 'card.Card', 'Card', (['Card.CLUBS', '(3)'], {}), '(Card.CLUBS, 3)\n', (6499, 6514), False, 'from card import Card\n'), ((6641, 6661), 'card.Card', 'Card', (['Card.SPADES', '(1)'], {}), '(Card.SPADES, 1)\n', (6645, 6661), False, 'from card import Card\n'), ((6947, 6967), 'card.Card', 'Card', (['Card.SPADES', '(1)'], {}), '(Card.SPADES, 1)\n', (6951, 6967), False, 'from card import Card\n'), ((6969, 6991), 'card.Card', 'Card', (['Card.DIAMONDS', '(3)'], {}), '(Card.DIAMONDS, 3)\n', (6973, 6991), False, 'from card import Card\n'), ((7330, 7352), 'card.Card', 'Card', (['Card.DIAMONDS', '(1)'], {}), '(Card.DIAMONDS, 1)\n', (7334, 7352), False, 'from card import Card\n'), ((7354, 7373), 'card.Card', 'Card', (['Card.CLUBS', '(8)'], {}), '(Card.CLUBS, 8)\n', (7358, 7373), False, 'from card import Card\n'), ((7728, 7748), 'card.Card', 'Card', (['Card.SPADES', '(1)'], {}), '(Card.SPADES, 1)\n', (7732, 7748), False, 'from card import Card\n'), ((7750, 7770), 'card.Card', 'Card', (['Card.HEARTS', '(8)'], {}), '(Card.HEARTS, 8)\n', (7754, 7770), False, 'from card import Card\n'), ((8122, 8142), 'card.Card', 'Card', (['Card.SPADES', '(1)'], {}), '(Card.SPADES, 1)\n', (8126, 8142), False, 'from card import Card\n'), ((8144, 8164), 'card.Card', 'Card', (['Card.HEARTS', '(0)'], {}), '(Card.HEARTS, 0)\n', (8148, 8164), False, 'from card import Card\n'), ((8166, 8186), 'card.Card', 'Card', (['Card.HEARTS', '(8)'], {}), '(Card.HEARTS, 8)\n', (8170, 8186), False, 'from card import Card\n'), ((8558, 8580), 'card.Card', 'Card', (['Card.DIAMONDS', '(1)'], {}), '(Card.DIAMONDS, 1)\n', (8562, 8580), False, 'from card import Card\n'), ((8582, 8602), 'card.Card', 'Card', (['Card.HEARTS', '(8)'], {}), '(Card.HEARTS, 8)\n', (8586, 8602), False, 'from card import Card\n'), ((8953, 8973), 'card.Card', 'Card', (['Card.HEARTS', '(1)'], {}), '(Card.HEARTS, 1)\n', (8957, 8973), False, 'from card import Card\n'), ((8975, 8995), 'card.Card', 'Card', (['Card.HEARTS', '(2)'], {}), '(Card.HEARTS, 2)\n', (8979, 8995), False, 'from card import Card\n'), ((9260, 9280), 'card.Card', 'Card', (['Card.SPADES', '(1)'], {}), '(Card.SPADES, 1)\n', (9264, 9280), False, 'from card import Card\n'), ((9282, 9302), 'card.Card', 'Card', (['Card.SPADES', '(2)'], {}), '(Card.SPADES, 2)\n', (9286, 9302), False, 'from card import Card\n'), ((9504, 9524), 'card.Card', 'Card', (['Card.HEARTS', '(5)'], {}), '(Card.HEARTS, 5)\n', (9508, 9524), False, 'from card import Card\n'), ((9538, 9558), 'card.Card', 'Card', (['Card.HEARTS', '(7)'], {}), '(Card.HEARTS, 7)\n', (9542, 9558), False, 'from card import Card\n'), ((9663, 9685), 'card.Card', 'Card', (['Card.DIAMONDS', '(1)'], {}), '(Card.DIAMONDS, 1)\n', (9667, 9685), False, 'from card import Card\n'), ((9687, 9707), 'card.Card', 'Card', (['Card.HEARTS', '(8)'], {}), '(Card.HEARTS, 8)\n', (9691, 9707), False, 'from card import Card\n'), ((10003, 10014), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (10012, 10014), False, 'from unittest.mock import MagicMock\n'), ((10652, 10663), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (10661, 10663), False, 'from unittest.mock import MagicMock\n'), ((10929, 10940), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (10938, 10940), False, 'from unittest.mock import MagicMock\n'), ((11206, 11217), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (11215, 11217), False, 'from unittest.mock import MagicMock\n'), ((11483, 11494), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (11492, 11494), False, 'from unittest.mock import MagicMock\n'), ((11832, 11843), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (11841, 11843), False, 'from unittest.mock import MagicMock\n'), ((12125, 12136), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (12134, 12136), False, 'from unittest.mock import MagicMock\n'), ((12418, 12429), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (12427, 12429), False, 'from unittest.mock import MagicMock\n'), ((12711, 12722), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (12720, 12722), False, 'from unittest.mock import MagicMock\n'), ((1240, 1267), 'itertools.permutations', 'permutations', (['cards_by_suit'], {}), '(cards_by_suit)\n', (1252, 1267), False, 'from itertools import permutations\n'), ((1392, 1419), 'itertools.permutations', 'permutations', (['cards_by_suit'], {}), '(cards_by_suit)\n', (1404, 1419), False, 'from itertools import permutations\n'), ((5985, 6005), 'card.Card', 'Card', (['Card.SPADES', '(5)'], {}), '(Card.SPADES, 5)\n', (5989, 6005), False, 'from card import Card\n'), ((6105, 6125), 'card.Card', 'Card', (['Card.HEARTS', '(0)'], {}), '(Card.HEARTS, 0)\n', (6109, 6125), False, 'from card import Card\n'), ((6127, 6147), 'card.Card', 'Card', (['Card.HEARTS', '(7)'], {}), '(Card.HEARTS, 7)\n', (6131, 6147), False, 'from card import Card\n'), ((6238, 6257), 'card.Card', 'Card', (['Card.CLUBS', '(3)'], {}), '(Card.CLUBS, 3)\n', (6242, 6257), False, 'from card import Card\n'), ((6822, 6842), 'card.Card', 'Card', (['Card.SPADES', '(5)'], {}), '(Card.SPADES, 5)\n', (6826, 6842), False, 'from card import Card\n'), ((6844, 6864), 'card.Card', 'Card', (['Card.HEARTS', '(5)'], {}), '(Card.HEARTS, 5)\n', (6848, 6864), False, 'from card import Card\n'), ((6866, 6886), 'card.Card', 'Card', (['Card.HEARTS', '(7)'], {}), '(Card.HEARTS, 7)\n', (6870, 6886), False, 'from card import Card\n'), ((7152, 7172), 'card.Card', 'Card', (['Card.SPADES', '(5)'], {}), '(Card.SPADES, 5)\n', (7156, 7172), False, 'from card import Card\n'), ((7174, 7193), 'card.Card', 'Card', (['Card.CLUBS', '(3)'], {}), '(Card.CLUBS, 3)\n', (7178, 7193), False, 'from card import Card\n'), ((7534, 7554), 'card.Card', 'Card', (['Card.SPADES', '(5)'], {}), '(Card.SPADES, 5)\n', (7538, 7554), False, 'from card import Card\n'), ((7556, 7576), 'card.Card', 'Card', (['Card.HEARTS', '(5)'], {}), '(Card.HEARTS, 5)\n', (7560, 7576), False, 'from card import Card\n'), ((7578, 7598), 'card.Card', 'Card', (['Card.HEARTS', '(7)'], {}), '(Card.HEARTS, 7)\n', (7582, 7598), False, 'from card import Card\n'), ((7600, 7619), 'card.Card', 'Card', (['Card.CLUBS', '(3)'], {}), '(Card.CLUBS, 3)\n', (7604, 7619), False, 'from card import Card\n'), ((7931, 7951), 'card.Card', 'Card', (['Card.SPADES', '(5)'], {}), '(Card.SPADES, 5)\n', (7935, 7951), False, 'from card import Card\n'), ((7953, 7973), 'card.Card', 'Card', (['Card.HEARTS', '(5)'], {}), '(Card.HEARTS, 5)\n', (7957, 7973), False, 'from card import Card\n'), ((8347, 8367), 'card.Card', 'Card', (['Card.SPADES', '(5)'], {}), '(Card.SPADES, 5)\n', (8351, 8367), False, 'from card import Card\n'), ((8369, 8389), 'card.Card', 'Card', (['Card.HEARTS', '(5)'], {}), '(Card.HEARTS, 5)\n', (8373, 8389), False, 'from card import Card\n'), ((8763, 8783), 'card.Card', 'Card', (['Card.SPADES', '(5)'], {}), '(Card.SPADES, 5)\n', (8767, 8783), False, 'from card import Card\n'), ((8785, 8805), 'card.Card', 'Card', (['Card.HEARTS', '(5)'], {}), '(Card.HEARTS, 5)\n', (8789, 8805), False, 'from card import Card\n'), ((8807, 8826), 'card.Card', 'Card', (['Card.CLUBS', '(3)'], {}), '(Card.CLUBS, 3)\n', (8811, 8826), False, 'from card import Card\n'), ((9156, 9176), 'card.Card', 'Card', (['Card.HEARTS', '(5)'], {}), '(Card.HEARTS, 5)\n', (9160, 9176), False, 'from card import Card\n'), ((9178, 9198), 'card.Card', 'Card', (['Card.HEARTS', '(7)'], {}), '(Card.HEARTS, 7)\n', (9182, 9198), False, 'from card import Card\n'), ((9868, 9888), 'card.Card', 'Card', (['Card.HEARTS', '(5)'], {}), '(Card.HEARTS, 5)\n', (9872, 9888), False, 'from card import Card\n'), ((9890, 9910), 'card.Card', 'Card', (['Card.HEARTS', '(7)'], {}), '(Card.HEARTS, 7)\n', (9894, 9910), False, 'from card import Card\n'), ((1051, 1106), 'player.Player._sort_decision_state', 'Player._sort_decision_state', (['permutation', 'cards_by_suit'], {}), '(permutation, cards_by_suit)\n', (1078, 1106), False, 'from player import Player\n'), ((4624, 4671), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', 'Const.CARDS_PER_SUIT'], {}), '(0, 255, Const.CARDS_PER_SUIT)\n', (4641, 4671), True, 'import numpy as np\n'), ((4689, 4736), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', 'Const.CARDS_PER_SUIT'], {}), '(0, 255, Const.CARDS_PER_SUIT)\n', (4706, 4736), True, 'import numpy as np\n'), ((4754, 4801), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', 'Const.CARDS_PER_SUIT'], {}), '(0, 255, Const.CARDS_PER_SUIT)\n', (4771, 4801), True, 'import numpy as np\n'), ((4819, 4866), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', 'Const.CARDS_PER_SUIT'], {}), '(0, 255, Const.CARDS_PER_SUIT)\n', (4836, 4866), True, 'import numpy as np\n'), ((5670, 5692), 'card.Card', 'Card', (['Card.DIAMONDS', '(1)'], {}), '(Card.DIAMONDS, 1)\n', (5674, 5692), False, 'from card import Card\n'), ((5780, 5802), 'card.Card', 'Card', (['Card.DIAMONDS', '(1)'], {}), '(Card.DIAMONDS, 1)\n', (5784, 5802), False, 'from card import Card\n'), ((5804, 5824), 'card.Card', 'Card', (['Card.SPADES', '(1)'], {}), '(Card.SPADES, 1)\n', (5808, 5824), False, 'from card import Card\n'), ((5826, 5846), 'card.Card', 'Card', (['Card.HEARTS', '(1)'], {}), '(Card.HEARTS, 1)\n', (5830, 5846), False, 'from card import Card\n'), ((5949, 5969), 'card.Card', 'Card', (['Card.SPADES', '(1)'], {}), '(Card.SPADES, 1)\n', (5953, 5969), False, 'from card import Card\n'), ((6061, 6081), 'card.Card', 'Card', (['Card.HEARTS', '(1)'], {}), '(Card.HEARTS, 1)\n', (6065, 6081), False, 'from card import Card\n'), ((6203, 6222), 'card.Card', 'Card', (['Card.CLUBS', '(1)'], {}), '(Card.CLUBS, 1)\n', (6207, 6222), False, 'from card import Card\n')] |
import pandas as pd
import numpy as np
import py_entitymatching as em
from .magellan_modified_feature_generation import get_features
#Given a CANDIDATE SET and the list of ACTUAL duplicates (duplicates_df),
#this function adds the 1/0 labels (column name = GOLD) to the candset dataframe
def add_labels_to_candset(duplicates_df, candset_df, ltable_df, rtable_df):
#We are overwriting column names - but thats okay as this is not used anywhere else.
duplicates_df.columns = ["ltable_id", "rtable_id"]
#We merged two DF based on the common attributes. The indicator 'gold' takes three values both, left_only, right_only
df_with_gold = pd.merge(candset_df, duplicates_df, on=['ltable_id', 'rtable_id'], how='left', indicator='gold')
#If it is present in both, then it is a duplicate and we set it to 1 and 0 otherwise
df_with_gold['gold'] = np.where(df_with_gold.gold == 'both', 1, 0)
#This is to handle some Magellan issues
em.set_key(df_with_gold, '_id')
em.set_property(df_with_gold,'ltable', ltable_df)
em.set_property(df_with_gold,'rtable', rtable_df)
em.set_property(df_with_gold,'fk_ltable', "ltable_id")
em.set_property(df_with_gold,'fk_rtable', "rtable_id")
return df_with_gold
def get_features_for_type(column_type):
"""
Get features to be generated for a type
"""
# First get the look up table
lookup_table = dict()
# Features for type str_eq_1w
lookup_table['STR_EQ_1W'] = [('lev_dist'), ('lev_sim'), ('jaro'),
('jaro_winkler'),
('exact_match'),
('jaccard', 'qgm_3', 'qgm_3')]
# Features for type str_bt_1w_5w
lookup_table['STR_BT_1W_5W'] = [('jaccard', 'qgm_3', 'qgm_3'),
('cosine', 'dlm_dc0', 'dlm_dc0'),
('jaccard', 'dlm_dc0', 'dlm_dc0'),
('monge_elkan'), ('lev_dist'), ('lev_sim'),
('needleman_wunsch'),
('smith_waterman')] # dlm_dc0 is the concrete space tokenizer
# Features for type str_bt_5w_10w
lookup_table['STR_BT_5W_10W'] = [('jaccard', 'qgm_3', 'qgm_3'),
('cosine', 'dlm_dc0', 'dlm_dc0'),
('monge_elkan'), ('lev_dist'), ('lev_sim')]
# Features for type str_gt_10w
lookup_table['STR_GT_10W'] = [('jaccard', 'qgm_3', 'qgm_3'),
('cosine', 'dlm_dc0', 'dlm_dc0')]
# Features for NUMERIC type
lookup_table['NUM'] = [('exact_match'), ('abs_norm'), ('lev_dist'),
('lev_sim')]
# Features for BOOLEAN type
lookup_table['BOOL'] = [('exact_match')]
# Features for un determined type
lookup_table['UN_DETERMINED'] = []
# Based on the column type, return the feature functions that should be
# generated.
if column_type is 'str_eq_1w':
features = lookup_table['STR_EQ_1W']
elif column_type is 'str_bt_1w_5w':
features = lookup_table['STR_BT_1W_5W']
elif column_type is 'str_bt_5w_10w':
features = lookup_table['STR_BT_5W_10W']
elif column_type is 'str_gt_10w':
features = lookup_table['STR_GT_10W']
elif column_type is 'numeric':
features = lookup_table['NUM']
elif column_type is 'boolean':
features = lookup_table['BOOL']
elif column_type is 'un_determined':
features = lookup_table['UN_DETERMINED']
else:
raise TypeError('Unknown type')
return features
def extract_features(ltable_df, rtable_df, candset_df):
tokenizers = em.get_tokenizers_for_matching()
sim_functions = em.get_sim_funs_for_matching()
left_attr_types = em.get_attr_types(ltable_df)
right_attr_types = em.get_attr_types(rtable_df)
correspondences = em.get_attr_corres(ltable_df, rtable_df)
feature_dict_list = []
attribute_type_rank = {'boolean':1, 'numeric':2, 'str_eq_1w':3, 'str_bt_1w_5w':4, 'str_bt_5w_10w':5, 'str_gt_10w':6, 'un_determined':7}
for c in correspondences['corres']:
if left_attr_types[c[0]] != right_attr_types[c[1]]:
if attribute_type_rank[left_attr_types[c[0]]] < attribute_type_rank[right_attr_types[c[1]]]:
left_attr_types[c[0]] = right_attr_types[c[1]]
else:
right_attr_types[c[1]] = left_attr_types[c[0]]
feature_records = get_features(ltable_df,rtable_df,left_attr_types, right_attr_types, correspondences, tokenizers, sim_functions)
#Remove all features based on id - they are often useless
feature_records = feature_records[feature_records.left_attribute !='id']
feature_records.reset_index(inplace=True,drop=True)
distance_functions = ["lev_dist", "rdf"]
non_normalized_functions = ["aff", "sw", "swn", "nmw"]
keep_features = [True]*feature_records.shape[0]
for i in range(feature_records.shape[0]):
feature = feature_records.loc[i,"feature_name"]
for func in distance_functions + non_normalized_functions:
if func in feature:
keep_features[i] = False
feature_records = feature_records.loc[keep_features,:]
print("\n\nExtracting the full set of features:")
candset_features_df = em.extract_feature_vecs(candset_df,feature_table=feature_records,attrs_after='gold',show_progress=True,n_jobs=-1)
candset_features_df.fillna(value=0, inplace=True)
return candset_features_df
def extract_features_auto(ltable_df, rtable_df, candset_df):
feature_list = em.get_features_for_matching(ltable_df,rtable_df,validate_inferred_attr_types=False)
#Remove all features based on id - they are often useless
feature_list = feature_list[feature_list.left_attribute !='id']
print("\n\nExtracting the full set of features:")
candset_features_df = em.extract_feature_vecs(candset_df,feature_table=feature_list,attrs_after='gold',show_progress=True)
candset_features_df.fillna(value=0, inplace=True)
return candset_features_df
#High level function which just adds labels and the complete set of features to candset
def gather_features_and_labels(ltable_df, rtable_df, labels_df, candset_df):
labels_df.columns = ["ltable_id", "rtable_id"]
labels_df["ltable_id"] = labels_df["ltable_id"].astype(str)
labels_df["rtable_id"] = labels_df["rtable_id"].astype(str)
candset_df["ltable_id"] = candset_df["ltable_id"].astype(str)
candset_df["rtable_id"] = candset_df["rtable_id"].astype(str)
ltable_df["id"] = ltable_df["id"].astype(str)
rtable_df["id"] = rtable_df["id"].astype(str)
candset_df = add_labels_to_candset(labels_df, candset_df, ltable_df, rtable_df)
candset_features_df = extract_features(ltable_df, rtable_df, candset_df)
return candset_features_df
#Filter out bad features (non similarity, non distance, singular valued)
def gather_similarity_features(candset_features_df, avged = False):
distance_functions = ["lev_dist", "rdf"]
non_normalized_functions = ["aff", "sw", "swn", "nmw"]
cols = candset_features_df.columns
cols_to_be_dropped = []
for col in cols:
for func in distance_functions + non_normalized_functions:
if func in col:
cols_to_be_dropped.append(col)
break
candset_similarity_features_df = candset_features_df.drop(cols_to_be_dropped, axis=1)
similarity_features_df = candset_similarity_features_df.drop(['gold', '_id', 'ltable_id', 'rtable_id'], axis=1)
# Dropping columns that have only one value
cols_to_be_dropped = []
col_count_map = similarity_features_df.nunique()
for col in similarity_features_df.columns:
if col_count_map[col] == 1:
cols_to_be_dropped.append(col)
similarity_features_df = similarity_features_df.drop(cols_to_be_dropped, axis=1)
if (avged==False):
return similarity_features_df
headers= similarity_features_df.columns.values
attributes = []
for h in headers:
arr = h.split("_")
attributes.append(arr[0])
attributes = set(attributes)
avged_df = pd.DataFrame()
for attribute in attributes:
#print("\nFeatures for attribute:", attribute)
matches = np.zeros(candset_features_df.shape[0])
counts = 0
for h in headers:
if attribute in h:
#print(h)
matches = np.add(matches, candset_features_df[h].values)
counts += 1
matches = matches/counts
avged_df[attribute] = matches
return avged_df | [
"py_entitymatching.get_attr_corres",
"numpy.add",
"py_entitymatching.set_property",
"numpy.where",
"pandas.merge",
"py_entitymatching.extract_feature_vecs",
"py_entitymatching.get_features_for_matching",
"py_entitymatching.set_key",
"numpy.zeros",
"py_entitymatching.get_tokenizers_for_matching",
... | [((652, 753), 'pandas.merge', 'pd.merge', (['candset_df', 'duplicates_df'], {'on': "['ltable_id', 'rtable_id']", 'how': '"""left"""', 'indicator': '"""gold"""'}), "(candset_df, duplicates_df, on=['ltable_id', 'rtable_id'], how=\n 'left', indicator='gold')\n", (660, 753), True, 'import pandas as pd\n'), ((866, 909), 'numpy.where', 'np.where', (["(df_with_gold.gold == 'both')", '(1)', '(0)'], {}), "(df_with_gold.gold == 'both', 1, 0)\n", (874, 909), True, 'import numpy as np\n'), ((959, 990), 'py_entitymatching.set_key', 'em.set_key', (['df_with_gold', '"""_id"""'], {}), "(df_with_gold, '_id')\n", (969, 990), True, 'import py_entitymatching as em\n'), ((995, 1045), 'py_entitymatching.set_property', 'em.set_property', (['df_with_gold', '"""ltable"""', 'ltable_df'], {}), "(df_with_gold, 'ltable', ltable_df)\n", (1010, 1045), True, 'import py_entitymatching as em\n'), ((1049, 1099), 'py_entitymatching.set_property', 'em.set_property', (['df_with_gold', '"""rtable"""', 'rtable_df'], {}), "(df_with_gold, 'rtable', rtable_df)\n", (1064, 1099), True, 'import py_entitymatching as em\n'), ((1103, 1158), 'py_entitymatching.set_property', 'em.set_property', (['df_with_gold', '"""fk_ltable"""', '"""ltable_id"""'], {}), "(df_with_gold, 'fk_ltable', 'ltable_id')\n", (1118, 1158), True, 'import py_entitymatching as em\n'), ((1162, 1217), 'py_entitymatching.set_property', 'em.set_property', (['df_with_gold', '"""fk_rtable"""', '"""rtable_id"""'], {}), "(df_with_gold, 'fk_rtable', 'rtable_id')\n", (1177, 1217), True, 'import py_entitymatching as em\n'), ((3703, 3735), 'py_entitymatching.get_tokenizers_for_matching', 'em.get_tokenizers_for_matching', ([], {}), '()\n', (3733, 3735), True, 'import py_entitymatching as em\n'), ((3756, 3786), 'py_entitymatching.get_sim_funs_for_matching', 'em.get_sim_funs_for_matching', ([], {}), '()\n', (3784, 3786), True, 'import py_entitymatching as em\n'), ((3809, 3837), 'py_entitymatching.get_attr_types', 'em.get_attr_types', (['ltable_df'], {}), '(ltable_df)\n', (3826, 3837), True, 'import py_entitymatching as em\n'), ((3861, 3889), 'py_entitymatching.get_attr_types', 'em.get_attr_types', (['rtable_df'], {}), '(rtable_df)\n', (3878, 3889), True, 'import py_entitymatching as em\n'), ((3912, 3952), 'py_entitymatching.get_attr_corres', 'em.get_attr_corres', (['ltable_df', 'rtable_df'], {}), '(ltable_df, rtable_df)\n', (3930, 3952), True, 'import py_entitymatching as em\n'), ((5339, 5460), 'py_entitymatching.extract_feature_vecs', 'em.extract_feature_vecs', (['candset_df'], {'feature_table': 'feature_records', 'attrs_after': '"""gold"""', 'show_progress': '(True)', 'n_jobs': '(-1)'}), "(candset_df, feature_table=feature_records,\n attrs_after='gold', show_progress=True, n_jobs=-1)\n", (5362, 5460), True, 'import py_entitymatching as em\n'), ((5622, 5712), 'py_entitymatching.get_features_for_matching', 'em.get_features_for_matching', (['ltable_df', 'rtable_df'], {'validate_inferred_attr_types': '(False)'}), '(ltable_df, rtable_df,\n validate_inferred_attr_types=False)\n', (5650, 5712), True, 'import py_entitymatching as em\n'), ((5918, 6026), 'py_entitymatching.extract_feature_vecs', 'em.extract_feature_vecs', (['candset_df'], {'feature_table': 'feature_list', 'attrs_after': '"""gold"""', 'show_progress': '(True)'}), "(candset_df, feature_table=feature_list, attrs_after\n ='gold', show_progress=True)\n", (5941, 6026), True, 'import py_entitymatching as em\n'), ((8217, 8231), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8229, 8231), True, 'import pandas as pd\n'), ((8343, 8381), 'numpy.zeros', 'np.zeros', (['candset_features_df.shape[0]'], {}), '(candset_features_df.shape[0])\n', (8351, 8381), True, 'import numpy as np\n'), ((8510, 8556), 'numpy.add', 'np.add', (['matches', 'candset_features_df[h].values'], {}), '(matches, candset_features_df[h].values)\n', (8516, 8556), True, 'import numpy as np\n')] |
import os
from scipy.io import loadmat
import h5py
import numpy as np
from tools.getDistSqrtVar import getDistSqrtVar
from tools.getCNNFeature import getCNNFeature
from tools.get_ilsvrimdb import readAnnotation as ilsvr_readAnnotation
from tools.get_cubimdb import readAnnotation as cub_readAnnotation
from tools.get_vocimdb import readAnnotation as voc_readAnnotation
def x2P(idx_h, idx_w, layerID, convnet):
idx_h = idx_h[np.newaxis, :]
idx_w = idx_w[np.newaxis, :]
pHW = np.concatenate((idx_h, idx_w), axis=0)
Stride = convnet['targetStride'][layerID-1]
centerStart = convnet['targetCenter'][layerID-1]
pHW = centerStart + (pHW-1) * Stride
return pHW
def computeStability(root_path,dataset,dataset_path, truthpart_path, label_name, net, model, convnet, layerID, epochnum, partList, partRate, imdb_mean, selectPatternRatio, patchNumPerPattern):
if "ilsvrcanimalpart" in dataset_path:
objset = ilsvr_readAnnotation(dataset_path, label_name)
elif "vocpart" in dataset_path:
objset = voc_readAnnotation(root_path, dataset, dataset_path, label_name)
elif "cub200" in dataset_path:
objset = cub_readAnnotation(dataset_path, label_name)
imgNum = len(objset)
partNum = len(partList)
validImg = np.zeros(imgNum)
for i in range(partNum):
partID = partList[i]
file_path = os.path.join(truthpart_path,label_name, "truth_part"+str(0) + str(partID)+'.mat')
a = h5py.File(file_path,'r')
truth_center = a['truth']['pHW_center']
for img in range(imgNum):
if type(a[truth_center[img][0]][0]) is np.ndarray:
validImg[img] = True
patNum = round(512*partRate)
selectedPatternNum = round(patNum*selectPatternRatio)
pos = np.zeros((2,patNum,imgNum))
score = np.zeros((patNum, imgNum))
isFlip = False
for imgID in range(imgNum):
if(validImg[imgID]==0):
continue
x,I = getCNNFeature(dataset_path,objset[imgID],net,isFlip,imdb_mean, epochnum, model) # get after conv_mask feature
x = x[:,0:patNum,:,:]
x = np.squeeze(x,axis=0)
xh = x.shape[1]
v = np.max(x, axis=1)
idx = np.argmax(x, axis=1)
tmp = np.argmax(v, axis=1)
v = np.max(v, axis=1)
idx = idx.reshape(idx.shape[0] * idx.shape[1])
idx_h = idx[tmp + np.array(range(0, patNum)) * xh] # idx_h.shape=(patNum,)
idx_w = tmp # idx_w.shape=(patNum,)
theScore = v # v.shape=(patNum,)
thePos = x2P(idx_h,idx_w,layerID,convnet)
pos[:,:,imgID] = thePos
score[:,imgID] = theScore
ih = I.shape[0]
iw = I.shape[1]
distSqrtVar = getDistSqrtVar(truthpart_path, pos, score, patchNumPerPattern, partList, label_name)
distSqrtVar = np.sort(distSqrtVar[np.isnan(distSqrtVar) == 0])
stability = np.mean(distSqrtVar[0:min(selectedPatternNum, len(distSqrtVar))])/np.sqrt(np.power(ih,2)+np.power(iw,2))
return stability
| [
"tools.getCNNFeature.getCNNFeature",
"numpy.power",
"tools.get_ilsvrimdb.readAnnotation",
"tools.getDistSqrtVar.getDistSqrtVar",
"numpy.argmax",
"h5py.File",
"numpy.squeeze",
"numpy.max",
"numpy.zeros",
"numpy.isnan",
"numpy.concatenate",
"tools.get_cubimdb.readAnnotation",
"tools.get_vocimd... | [((500, 538), 'numpy.concatenate', 'np.concatenate', (['(idx_h, idx_w)'], {'axis': '(0)'}), '((idx_h, idx_w), axis=0)\n', (514, 538), True, 'import numpy as np\n'), ((1301, 1317), 'numpy.zeros', 'np.zeros', (['imgNum'], {}), '(imgNum)\n', (1309, 1317), True, 'import numpy as np\n'), ((1811, 1840), 'numpy.zeros', 'np.zeros', (['(2, patNum, imgNum)'], {}), '((2, patNum, imgNum))\n', (1819, 1840), True, 'import numpy as np\n'), ((1852, 1878), 'numpy.zeros', 'np.zeros', (['(patNum, imgNum)'], {}), '((patNum, imgNum))\n', (1860, 1878), True, 'import numpy as np\n'), ((2746, 2834), 'tools.getDistSqrtVar.getDistSqrtVar', 'getDistSqrtVar', (['truthpart_path', 'pos', 'score', 'patchNumPerPattern', 'partList', 'label_name'], {}), '(truthpart_path, pos, score, patchNumPerPattern, partList,\n label_name)\n', (2760, 2834), False, 'from tools.getDistSqrtVar import getDistSqrtVar\n'), ((962, 1008), 'tools.get_ilsvrimdb.readAnnotation', 'ilsvr_readAnnotation', (['dataset_path', 'label_name'], {}), '(dataset_path, label_name)\n', (982, 1008), True, 'from tools.get_ilsvrimdb import readAnnotation as ilsvr_readAnnotation\n'), ((1494, 1519), 'h5py.File', 'h5py.File', (['file_path', '"""r"""'], {}), "(file_path, 'r')\n", (1503, 1519), False, 'import h5py\n'), ((2002, 2089), 'tools.getCNNFeature.getCNNFeature', 'getCNNFeature', (['dataset_path', 'objset[imgID]', 'net', 'isFlip', 'imdb_mean', 'epochnum', 'model'], {}), '(dataset_path, objset[imgID], net, isFlip, imdb_mean, epochnum,\n model)\n', (2015, 2089), False, 'from tools.getCNNFeature import getCNNFeature\n'), ((2156, 2177), 'numpy.squeeze', 'np.squeeze', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2166, 2177), True, 'import numpy as np\n'), ((2215, 2232), 'numpy.max', 'np.max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (2221, 2232), True, 'import numpy as np\n'), ((2248, 2268), 'numpy.argmax', 'np.argmax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (2257, 2268), True, 'import numpy as np\n'), ((2284, 2304), 'numpy.argmax', 'np.argmax', (['v'], {'axis': '(1)'}), '(v, axis=1)\n', (2293, 2304), True, 'import numpy as np\n'), ((2318, 2335), 'numpy.max', 'np.max', (['v'], {'axis': '(1)'}), '(v, axis=1)\n', (2324, 2335), True, 'import numpy as np\n'), ((1064, 1128), 'tools.get_vocimdb.readAnnotation', 'voc_readAnnotation', (['root_path', 'dataset', 'dataset_path', 'label_name'], {}), '(root_path, dataset, dataset_path, label_name)\n', (1082, 1128), True, 'from tools.get_vocimdb import readAnnotation as voc_readAnnotation\n'), ((1183, 1227), 'tools.get_cubimdb.readAnnotation', 'cub_readAnnotation', (['dataset_path', 'label_name'], {}), '(dataset_path, label_name)\n', (1201, 1227), True, 'from tools.get_cubimdb import readAnnotation as cub_readAnnotation\n'), ((2870, 2891), 'numpy.isnan', 'np.isnan', (['distSqrtVar'], {}), '(distSqrtVar)\n', (2878, 2891), True, 'import numpy as np\n'), ((2990, 3005), 'numpy.power', 'np.power', (['ih', '(2)'], {}), '(ih, 2)\n', (2998, 3005), True, 'import numpy as np\n'), ((3005, 3020), 'numpy.power', 'np.power', (['iw', '(2)'], {}), '(iw, 2)\n', (3013, 3020), True, 'import numpy as np\n')] |
from data.base_dataset import BaseDataset
import numpy as np
import torch
class DataManager:
def __init__(self, config):
self.config = config
def get_dataloader(self):
dataset = BaseDataset(self.config)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.config['batch_size'],
shuffle=True,
pin_memory=True if self.config['device'] == 'cuda' else False
)
return dataloader
def get_train_eval_dataloaders(self):
np.random.seed(707)
dataset = BaseDataset(self.config)
dataset_size = len(dataset)
## SPLIT DATASET
train_split = self.config['train_size']
train_size = int(train_split * dataset_size)
validation_size = dataset_size - train_size
indices = list(range(dataset_size))
np.random.shuffle(indices)
train_indices = indices[:train_size]
temp = int(train_size + validation_size)
val_indices = indices[train_size:temp]
## DATA LOARDER ##
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=self.config['batch_size'],
sampler=train_sampler,
pin_memory=True if self.config['device'] == 'cuda' else False)
validation_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=self.config['batch_size'],
sampler=valid_sampler,
pin_memory=True if self.config['device'] == 'cuda' else False)
return train_loader, validation_loader
def get_train_eval_test_dataloaders(self):
np.random.seed(707)
dataset = BaseDataset(self.config)
dataset_size = len(dataset)
## SPLIT DATASET
train_split = self.config['train_size']
valid_split = self.config['valid_size']
test_split = self.config['test_size']
train_size = int(train_split * dataset_size)
valid_size = int(valid_split * dataset_size)
test_size = dataset_size - train_size - valid_size
indices = list(range(dataset_size))
np.random.shuffle(indices)
train_indices = indices[:train_size]
valid_indices = indices[train_size:(train_size + valid_size)]
test_indices = indices[(train_size + valid_size):]
## DATA LOARDER ##
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(valid_indices)
test_sampler = torch.utils.data.sampler.SubsetRandomSampler(test_indices)
train_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=self.config['batch_size'],
sampler=train_sampler,
pin_memory=True if self.config['device'] == 'cuda' else False)
validation_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=self.config['batch_size'],
sampler=valid_sampler,
pin_memory=True if self.config['device'] == 'cuda' else False)
test_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=self.config['batch_size'],
sampler=test_sampler,
pin_memory=True if self.config['device'] == 'cuda' else False)
return train_loader, validation_loader, test_loader
| [
"torch.utils.data.sampler.SubsetRandomSampler",
"data.base_dataset.BaseDataset",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"numpy.random.shuffle"
] | [((205, 229), 'data.base_dataset.BaseDataset', 'BaseDataset', (['self.config'], {}), '(self.config)\n', (216, 229), False, 'from data.base_dataset import BaseDataset\n'), ((252, 412), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': "self.config['batch_size']", 'shuffle': '(True)', 'pin_memory': "(True if self.config['device'] == 'cuda' else False)"}), "(dataset, batch_size=self.config['batch_size'],\n shuffle=True, pin_memory=True if self.config['device'] == 'cuda' else False\n )\n", (279, 412), False, 'import torch\n'), ((539, 558), 'numpy.random.seed', 'np.random.seed', (['(707)'], {}), '(707)\n', (553, 558), True, 'import numpy as np\n'), ((578, 602), 'data.base_dataset.BaseDataset', 'BaseDataset', (['self.config'], {}), '(self.config)\n', (589, 602), False, 'from data.base_dataset import BaseDataset\n'), ((871, 897), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (888, 897), True, 'import numpy as np\n'), ((1091, 1150), 'torch.utils.data.sampler.SubsetRandomSampler', 'torch.utils.data.sampler.SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (1135, 1150), False, 'import torch\n'), ((1175, 1232), 'torch.utils.data.sampler.SubsetRandomSampler', 'torch.utils.data.sampler.SubsetRandomSampler', (['val_indices'], {}), '(val_indices)\n', (1219, 1232), False, 'import torch\n'), ((1257, 1435), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': "self.config['batch_size']", 'sampler': 'train_sampler', 'pin_memory': "(True if self.config['device'] == 'cuda' else False)"}), "(dataset=dataset, batch_size=self.config[\n 'batch_size'], sampler=train_sampler, pin_memory=True if self.config[\n 'device'] == 'cuda' else False)\n", (1284, 1435), False, 'import torch\n'), ((1608, 1786), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': "self.config['batch_size']", 'sampler': 'valid_sampler', 'pin_memory': "(True if self.config['device'] == 'cuda' else False)"}), "(dataset=dataset, batch_size=self.config[\n 'batch_size'], sampler=valid_sampler, pin_memory=True if self.config[\n 'device'] == 'cuda' else False)\n", (1635, 1786), False, 'import torch\n'), ((2048, 2067), 'numpy.random.seed', 'np.random.seed', (['(707)'], {}), '(707)\n', (2062, 2067), True, 'import numpy as np\n'), ((2087, 2111), 'data.base_dataset.BaseDataset', 'BaseDataset', (['self.config'], {}), '(self.config)\n', (2098, 2111), False, 'from data.base_dataset import BaseDataset\n'), ((2535, 2561), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2552, 2561), True, 'import numpy as np\n'), ((2788, 2847), 'torch.utils.data.sampler.SubsetRandomSampler', 'torch.utils.data.sampler.SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (2832, 2847), False, 'import torch\n'), ((2872, 2931), 'torch.utils.data.sampler.SubsetRandomSampler', 'torch.utils.data.sampler.SubsetRandomSampler', (['valid_indices'], {}), '(valid_indices)\n', (2916, 2931), False, 'import torch\n'), ((2955, 3013), 'torch.utils.data.sampler.SubsetRandomSampler', 'torch.utils.data.sampler.SubsetRandomSampler', (['test_indices'], {}), '(test_indices)\n', (2999, 3013), False, 'import torch\n'), ((3038, 3216), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': "self.config['batch_size']", 'sampler': 'train_sampler', 'pin_memory': "(True if self.config['device'] == 'cuda' else False)"}), "(dataset=dataset, batch_size=self.config[\n 'batch_size'], sampler=train_sampler, pin_memory=True if self.config[\n 'device'] == 'cuda' else False)\n", (3065, 3216), False, 'import torch\n'), ((3389, 3567), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': "self.config['batch_size']", 'sampler': 'valid_sampler', 'pin_memory': "(True if self.config['device'] == 'cuda' else False)"}), "(dataset=dataset, batch_size=self.config[\n 'batch_size'], sampler=valid_sampler, pin_memory=True if self.config[\n 'device'] == 'cuda' else False)\n", (3416, 3567), False, 'import torch\n'), ((3749, 3926), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': "self.config['batch_size']", 'sampler': 'test_sampler', 'pin_memory': "(True if self.config['device'] == 'cuda' else False)"}), "(dataset=dataset, batch_size=self.config[\n 'batch_size'], sampler=test_sampler, pin_memory=True if self.config[\n 'device'] == 'cuda' else False)\n", (3776, 3926), False, 'import torch\n')] |
from random import randrange
import keras
from keras.layers import Dense, Input
from keras.optimizers import Adam
import numpy as np
import random
from breakout_RL.agents.RLAgent import RLAgent
class NNAgent(RLAgent):
ID2ACTION = {0: 2, 1: 3, 2:0}
ACTION2ID = {2: 0, 3: 1, 0:2}
"""Breakout RL with function approximation"""
def __init__(self, input_size=1 + 4, hidden_size=256, batch_size=16, epsilon=0.9):
super().__init__(None, None)
# paddle_x + ball features + flatten tiles matrix
self.nactions = 3
# self.input_size = 1 + 4
self.input_size = input_size
self.hidden_size = hidden_size
self.batch_size = batch_size
self.Q = self.build_model()
self.alpha = 0.1
self.gamma = 0.98
self.epsilon = epsilon
self.epsilon_start, self.epsilon_end = 1.0, 0.1
# self.epsilon = 0.0
# self.epsilon_start, self.epsilon_end = 0.0, 0.0
self.exploration_steps = 100000
self.epsilon_decay_step = (self.epsilon_start - self.epsilon_end) \
/ self.exploration_steps
self.iteration = 0
self.history = []
def build_model(self):
state_input = Input((self.input_size,), name='states')
actions_input = Input((self.nactions,), name='mask')
dense_1 = keras.layers.Dense(self.hidden_size)(state_input)
# dense_2 = Dense(self.hidden_size)(dense_1)
output = Dense(self.nactions)(dense_1)
filtered_output = keras.layers.multiply([output, actions_input])
model = keras.models.Model(inputs=[state_input, actions_input], outputs=filtered_output)
# optimizer = keras.optimizers.RMSprop(lr=0.00025, rho=0.95, epsilon=0.01)
# optimizer = keras.optimizers.SGD(0.0005)
optimizer = Adam()
model.compile(optimizer, loss='mse')
self.Q = model
self.Q.summary()
return self.Q
def act(self, state):
if random.random() < self.epsilon:
action_id = randrange(self.nactions)
else:
estimated_Q_values = self.Q.predict([state.reshape(1,len(state)), np.ones((1, self.nactions,))])
best_action = estimated_Q_values.argmax()
action_id = best_action
if random.random()<0.001:
print(estimated_Q_values, best_action, state[:5])
return self.ID2ACTION[action_id]
def observe(self, state, action, reward, next_state):
if len(self.history) > 16:
self.history.pop(0)
# print((state, self.ACTION2ID[action], reward, next_state))
self.history.append((state, self.ACTION2ID[action], reward, next_state))
def replay(self):
if self.batch_size > len(self.history):
return
batch = random.sample(self.history, self.batch_size)
states, actions, rewards, next_states = list(map(np.array, list(zip(*batch))))
one_hot_encoded_actions = np.zeros((actions.size, self.nactions))
one_hot_encoded_actions[np.arange(actions.size), actions] = 1
next_Q_values = self.Q.predict([next_states, np.ones(one_hot_encoded_actions.shape)])
Q_values = rewards + self.gamma * np.max(next_Q_values, axis=1)
self.Q.fit([states, one_hot_encoded_actions], one_hot_encoded_actions*Q_values[:, None],
epochs=1, batch_size=len(states), verbose=0)
self.iteration += 1
if self.epsilon > self.epsilon_end:
self.epsilon -= self.epsilon_decay_step
def save(self, filepath):
self.Q.save_weights(filepath)
def load(self, filepath):
self.Q.load_weights(filepath)
# def fit_batch(self):
# new_Qs = []
# states = []
# for (state, action, reward, next_state) in self.history:
# old_Q = self.Q[action_id].predict(state.reshape((1,self.input_size)))
#
# # a_prime = self.choose_action(next_state)
# # next_Q = self.Q[a_prime].predict(next_state.reshape((1,self.input_size)))
# # new_Q = old_Q + self.alpha*(reward + self.gamma * next_Q - old_Q)
#
# new_Q = old_Q + self.alpha*(reward + self.gamma * self._Qa(next_state).max() - old_Q)
# new_Qs.append(new_Q)
# states.append(state)
#
# new_Qs = np.asarray(new_Qs).reshape(len(new_Qs), 1)
# states = np.asarray(states).reshape((len(states), self.input_size))
# print(new_Qs[:3], self.iteration)
# self.Q[action_id].fit(states, new_Qs)
# self.iteration += 1
#
# self.history = {
# 0:[],
# 1:[]
# }
| [
"keras.optimizers.Adam",
"random.sample",
"numpy.ones",
"random.randrange",
"numpy.max",
"keras.layers.Input",
"numpy.zeros",
"keras.models.Model",
"keras.layers.multiply",
"keras.layers.Dense",
"random.random",
"numpy.arange"
] | [((1247, 1287), 'keras.layers.Input', 'Input', (['(self.input_size,)'], {'name': '"""states"""'}), "((self.input_size,), name='states')\n", (1252, 1287), False, 'from keras.layers import Dense, Input\n'), ((1312, 1348), 'keras.layers.Input', 'Input', (['(self.nactions,)'], {'name': '"""mask"""'}), "((self.nactions,), name='mask')\n", (1317, 1348), False, 'from keras.layers import Dense, Input\n'), ((1546, 1592), 'keras.layers.multiply', 'keras.layers.multiply', (['[output, actions_input]'], {}), '([output, actions_input])\n', (1567, 1592), False, 'import keras\n'), ((1610, 1695), 'keras.models.Model', 'keras.models.Model', ([], {'inputs': '[state_input, actions_input]', 'outputs': 'filtered_output'}), '(inputs=[state_input, actions_input], outputs=filtered_output\n )\n', (1628, 1695), False, 'import keras\n'), ((1846, 1852), 'keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (1850, 1852), False, 'from keras.optimizers import Adam\n'), ((2831, 2875), 'random.sample', 'random.sample', (['self.history', 'self.batch_size'], {}), '(self.history, self.batch_size)\n', (2844, 2875), False, 'import random\n'), ((2998, 3037), 'numpy.zeros', 'np.zeros', (['(actions.size, self.nactions)'], {}), '((actions.size, self.nactions))\n', (3006, 3037), True, 'import numpy as np\n'), ((1368, 1404), 'keras.layers.Dense', 'keras.layers.Dense', (['self.hidden_size'], {}), '(self.hidden_size)\n', (1386, 1404), False, 'import keras\n'), ((1489, 1509), 'keras.layers.Dense', 'Dense', (['self.nactions'], {}), '(self.nactions)\n', (1494, 1509), False, 'from keras.layers import Dense, Input\n'), ((2007, 2022), 'random.random', 'random.random', ([], {}), '()\n', (2020, 2022), False, 'import random\n'), ((2063, 2087), 'random.randrange', 'randrange', (['self.nactions'], {}), '(self.nactions)\n', (2072, 2087), False, 'from random import randrange\n'), ((2316, 2331), 'random.random', 'random.random', ([], {}), '()\n', (2329, 2331), False, 'import random\n'), ((3070, 3093), 'numpy.arange', 'np.arange', (['actions.size'], {}), '(actions.size)\n', (3079, 3093), True, 'import numpy as np\n'), ((3162, 3200), 'numpy.ones', 'np.ones', (['one_hot_encoded_actions.shape'], {}), '(one_hot_encoded_actions.shape)\n', (3169, 3200), True, 'import numpy as np\n'), ((3246, 3275), 'numpy.max', 'np.max', (['next_Q_values'], {'axis': '(1)'}), '(next_Q_values, axis=1)\n', (3252, 3275), True, 'import numpy as np\n'), ((2180, 2207), 'numpy.ones', 'np.ones', (['(1, self.nactions)'], {}), '((1, self.nactions))\n', (2187, 2207), True, 'import numpy as np\n')] |
from . import numpy_ndarray_as
def random(size, nulls=False):
"""Return random xnd.xnd instance of 64 bit floats.
"""
import xnd
import numpy as np
r = numpy_ndarray_as.random(size, nulls=nulls)
if nulls:
xr = xnd.xnd(r.tolist(), dtype='?float64')
for i in np.where(np.isnan(r))[0]:
xr[i] = None
return xr
return xnd.xnd(r.tolist(), dtype='float64')
def numpy_ndarray(xd_arr):
"""Return numpy.ndarray view of a xnd.xnd
"""
import numpy as np
if not xd_arr.dtype.isoptional():
return np.array(xd_arr, copy=False)
raise NotImplementedError(
'numpy.ndarray view of xnd.xnd with optional values')
def pandas_series(xd_arr):
"""Return pandas.Series view of a xnd.xnd
"""
import numpy as np
import pandas as pd
if not xd_arr.dtype.isoptional():
return pd.Series(np.array(xd_arr, copy=False), copy=False)
raise NotImplementedError(
'pandas.Series view of xnd.xnd with optional values')
def pyarrow_array(xd_arr):
"""Return pyarrow.Array view of a xnd.xnd
"""
import pyarrow as pa
if not xd_arr.dtype.isoptional():
pa_buf = pa.py_buffer(memoryview(xd_arr))
return pa.Array.from_buffers(
pa.from_numpy_dtype(str(xd_arr.dtype)),
xd_arr.type.datasize//xd_arr.type.itemsize,
[None, pa_buf])
raise NotImplementedError(
'pyarrow.Array view of xnd.xnd with optional values')
| [
"numpy.array",
"numpy.isnan"
] | [((575, 603), 'numpy.array', 'np.array', (['xd_arr'], {'copy': '(False)'}), '(xd_arr, copy=False)\n', (583, 603), True, 'import numpy as np\n'), ((890, 918), 'numpy.array', 'np.array', (['xd_arr'], {'copy': '(False)'}), '(xd_arr, copy=False)\n', (898, 918), True, 'import numpy as np\n'), ((308, 319), 'numpy.isnan', 'np.isnan', (['r'], {}), '(r)\n', (316, 319), True, 'import numpy as np\n')] |
"""
Created on Mon Feb 1 10:08:31 2016
"""
#------------------------------------------------------------------------------
#CHAPTER 6: The Finite-Element Method
#------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
# Basic parameters
nt = 1000 # number of time steps
vs = 3000 # acoustic velocity
ro0 = 2500 # Density
isnap = 250 # snapshot frequency
nx = 1000 # number of grid points
isx = 500 # source location
xmax = 10000.
eps = 0.5 # stability limit
dx = xmax/(nx-1) # calculate space increment
x = np.arange(0, nx)*dx # initialize space coordinates
x = x.T
h = np.diff(x) # Element sizes
# parameters
ro = x*0 + ro0
mu = x*0 + ro*vs**2
# time step from stabiity criterion
dt = 0.5*eps*dx/np.max(np.sqrt(mu/ro))
# source time function
pt = 20*dt
t = np.arange(1, nt+1)*dt # initialize time axis
t0 = 3*pt
src = -1/pt**2*(t-t0)*np.exp(-1/pt**2*(t-t0)**2)
# Source vector
f = np.zeros(nx); f[isx:isx+1] = f[isx:isx+1] + 1.
# Stiffness matrix Kij
K = np.zeros((nx,nx))
for i in range(1, nx-1):
for j in range(1, nx-1):
if i==j:
K[i,j] = mu[i-1]/h[i-1] + mu[i]/h[i]
elif i==j+1:
K[i,j] = -mu[i-1]/h[i-1]
elif i+1==j:
K[i,j] = -mu[i]/h[i]
else:
K[i,j] = 0
# Corner element
K[0,0] = mu[0]/h[0]
K[nx-1,nx-1] = mu[nx-1]/h[nx-2]
#%% CODE 10: Listing 6.4 Mass matrix with varying element size - Pag 147
# Mass matrix M_ij
M = np.zeros((nx,nx))
for i in range(1, nx-1):
for j in range (1, nx-1):
if j==i:
M[i,j] = (ro[i-1]*h[i-1] + ro[i]*h[i])/3
elif j==i+1:
M[i,j] = ro[i]*h[i]/6
elif j==i-1:
M[i,j] = ro[i-1]*h[i-1]/6
else:
M[i,j] = 0
# Corner element
M[0,0] = ro[0]*h[0]/3
M[nx-1,nx-1] = ro[nx-1]*h[nx-2]/3
# Invert M
Minv = np.linalg.inv(M)
# Initialize FD matrices for comparison in the regular grid case
Mf = np.zeros((nx,nx), dtype=float)
D = np.zeros((nx,nx), dtype=float)
dx = h[1]
for i in range(nx):
Mf[i,i] = 1./ro[i]
if i>0:
if i<nx-1:
D[i+1,i] =1
D[i-1,i] =1
D[i,i] = -2
D = ro0*vs**2*D/dx**2
# Initialize fields
u = np.zeros(nx)
uold = np.zeros(nx)
unew = np.zeros(nx)
U = np.zeros(nx)
Uold = np.zeros(nx)
Unew = np.zeros(nx)
fig = plt.figure(figsize=(14,8), dpi=80)
fig.suptitle("1D Elastic wave solution", fontsize=16)
iplot = 0
#%% CODE 09: Listing 6.3 Time extrapolation - Pag 147
# CODE 11: Listing 6.5 1D elastic case _ Pag 148
# Time extrapolation
for it in range(nt):
# Finite Difference Method
Unew = (dt**2)*Mf @ (D @ U + f/dx*src[it]) + 2*U - Uold
Uold, U = U, Unew
# Finite Element Method
unew = (dt**2)*Minv @ (f*src[it] - K @ u) + 2*u - uold
uold, u = u, unew
# Display both
if np.mod(it+1, isnap) == 0:
# extract window
xc = 500*dx + it*dt*vs - 150
xd = 300
iplot += 1
plt.subplot(4,1,iplot)
L1 = plt.plot(x, u, label='FEM')
L2 = plt.plot(x, U, label='FDM')
plt.legend()
plt.text(xc+1.5*xd, 0.00000002, '%d m' %(xc-500*dx))
plt.savefig('Fig_6.10.png')
plt.show()
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.exp",
"matplotlib.pyplot.subplot",
"numpy.zeros",
"numpy.linalg.inv",
"matplotlib.pyplot.figure",
"numpy.mod",
"numpy.arange",
"matplotlib.pyplot.... | [((671, 681), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (678, 681), True, 'import numpy as np\n'), ((989, 1001), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (997, 1001), True, 'import numpy as np\n'), ((1064, 1082), 'numpy.zeros', 'np.zeros', (['(nx, nx)'], {}), '((nx, nx))\n', (1072, 1082), True, 'import numpy as np\n'), ((1517, 1535), 'numpy.zeros', 'np.zeros', (['(nx, nx)'], {}), '((nx, nx))\n', (1525, 1535), True, 'import numpy as np\n'), ((1902, 1918), 'numpy.linalg.inv', 'np.linalg.inv', (['M'], {}), '(M)\n', (1915, 1918), True, 'import numpy as np\n'), ((1990, 2021), 'numpy.zeros', 'np.zeros', (['(nx, nx)'], {'dtype': 'float'}), '((nx, nx), dtype=float)\n', (1998, 2021), True, 'import numpy as np\n'), ((2025, 2056), 'numpy.zeros', 'np.zeros', (['(nx, nx)'], {'dtype': 'float'}), '((nx, nx), dtype=float)\n', (2033, 2056), True, 'import numpy as np\n'), ((2273, 2285), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (2281, 2285), True, 'import numpy as np\n'), ((2293, 2305), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (2301, 2305), True, 'import numpy as np\n'), ((2313, 2325), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (2321, 2325), True, 'import numpy as np\n'), ((2331, 2343), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (2339, 2343), True, 'import numpy as np\n'), ((2351, 2363), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (2359, 2363), True, 'import numpy as np\n'), ((2371, 2383), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (2379, 2383), True, 'import numpy as np\n'), ((2391, 2426), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 8)', 'dpi': '(80)'}), '(figsize=(14, 8), dpi=80)\n', (2401, 2426), True, 'import matplotlib.pyplot as plt\n'), ((3263, 3290), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Fig_6.10.png"""'], {}), "('Fig_6.10.png')\n", (3274, 3290), True, 'import matplotlib.pyplot as plt\n'), ((3299, 3309), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3307, 3309), True, 'import matplotlib.pyplot as plt\n'), ((605, 621), 'numpy.arange', 'np.arange', (['(0)', 'nx'], {}), '(0, nx)\n', (614, 621), True, 'import numpy as np\n'), ((863, 883), 'numpy.arange', 'np.arange', (['(1)', '(nt + 1)'], {}), '(1, nt + 1)\n', (872, 883), True, 'import numpy as np\n'), ((941, 977), 'numpy.exp', 'np.exp', (['(-1 / pt ** 2 * (t - t0) ** 2)'], {}), '(-1 / pt ** 2 * (t - t0) ** 2)\n', (947, 977), True, 'import numpy as np\n'), ((808, 824), 'numpy.sqrt', 'np.sqrt', (['(mu / ro)'], {}), '(mu / ro)\n', (815, 824), True, 'import numpy as np\n'), ((2926, 2947), 'numpy.mod', 'np.mod', (['(it + 1)', 'isnap'], {}), '(it + 1, isnap)\n', (2932, 2947), True, 'import numpy as np\n'), ((3067, 3091), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', 'iplot'], {}), '(4, 1, iplot)\n', (3078, 3091), True, 'import matplotlib.pyplot as plt\n'), ((3103, 3130), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'u'], {'label': '"""FEM"""'}), "(x, u, label='FEM')\n", (3111, 3130), True, 'import matplotlib.pyplot as plt\n'), ((3144, 3171), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'U'], {'label': '"""FDM"""'}), "(x, U, label='FDM')\n", (3152, 3171), True, 'import matplotlib.pyplot as plt\n'), ((3180, 3192), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3190, 3192), True, 'import matplotlib.pyplot as plt\n'), ((3209, 3265), 'matplotlib.pyplot.text', 'plt.text', (['(xc + 1.5 * xd)', '(2e-08)', "('%d m' % (xc - 500 * dx))"], {}), "(xc + 1.5 * xd, 2e-08, '%d m' % (xc - 500 * dx))\n", (3217, 3265), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import pickle
ANOMALY_MODEL_PATH_FROM_ROOT = 'data/anomaly_detection.pkl'
def predict(joules: float) -> bool:
with open(ANOMALY_MODEL_PATH_FROM_ROOT, 'rb') as open_file:
model = pickle.load(open_file)
label = model.predict(np.array(joules).reshape(-1,1))[0]
return label
| [
"numpy.array",
"pickle.load"
] | [((213, 235), 'pickle.load', 'pickle.load', (['open_file'], {}), '(open_file)\n', (224, 235), False, 'import pickle\n'), ((266, 282), 'numpy.array', 'np.array', (['joules'], {}), '(joules)\n', (274, 282), True, 'import numpy as np\n')] |
"""
Created by <NAME>, Sep. 2018.
FLOW Lab
Brigham Young University
"""
import unittest
import numpy as np
from _porteagel_fortran import porteagel_analyze, x0_func, theta_c_0_func, sigmay_func, sigma_spread_func
from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func
from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func
from _porteagel_fortran import ct_to_axial_ind_func, wind_shear_func, discontinuity_point_func, smooth_max
from _porteagel_fortran import interpolation, hermite_spline, point_velocity_with_shear_func
from openmdao.api import Problem, Group
def power_func_v80(v):
# power curve fit for vestas v80 from Niayifar 2016
p = 0.17819 * v ** 5 - 6.5198 * v ** 4 + 90.623 * v ** 3 - 574.62 * v ** 2 + 1727.2 * v - 1975.0
return p
class test_basic_subroutines(unittest.TestCase):
def setUp(self):
self.tolerance = 1E-6
self.d = 126.4
self.yaw = np.pi/6.
self.ct = 0.8
self.alpha = 2.32
self.beta = 0.154
self.ti = 0.1
self.ky = 0.25
self.kz = 0.2
self.wind_speed = 8.0
def test_x0_func_hand_calc(self):
x0 = x0_func(self.d, self.yaw, self.ct, self.alpha, self.ti, self.beta)
self.assertAlmostEqual(x0, 353.2313474, delta=self.tolerance)
def test_x0_func_data_yaw0(self):
rotor_diameter = 0.15 # m
yaw = 0.0*np.pi/180. #radians
ct = 0.8214036062840235
ti = 0.074
#
# 3.77839335632898, 0.6643546778702326
# 3.704230762943225, 0.7361200568897026
# 3.849186706118913, 0.7866577299700839
# 3.848583479099574, 0.8214036062840235
x0 = x0_func(rotor_diameter, yaw, ct, self.alpha, ti, self.beta)
self.assertAlmostEqual(x0/rotor_diameter, 3.862413891540104, delta=1E-2)
def test_x0_func_data_yaw10(self):
rotor_diameter = 0.15 # m
yaw = 10.0 * np.pi / 180. # radians
ct = 0.7866577299700839
ti = 0.074
x0 = x0_func(rotor_diameter, yaw, ct, self.alpha, ti, self.beta)
self.assertAlmostEqual(x0/rotor_diameter, 3.973368012202963, delta=1E-1)
def test_x0_func_data_yaw20(self):
rotor_diameter = 0.15 # m
yaw = 20.0 * np.pi / 180. # radians
ct = 0.7361200568897026
ti = 0.074
x0 = x0_func(rotor_diameter, yaw, ct, self.alpha, ti, self.beta)
self.assertAlmostEqual(x0/rotor_diameter, 4.051040798613613, delta=1E-1)
def test_x0_func_data_yaw30(self):
rotor_diameter = 0.15 # m
yaw = 30.0 * np.pi / 180. # radians
ct = 0.6643546778702326
ti = 0.074
x0 = x0_func(rotor_diameter, yaw, ct, self.alpha, ti, self.beta)
self.assertAlmostEqual(x0/rotor_diameter, 4.053814723717636, delta=1E-1)
def test_discontinuity_point_func(self):
x0 = 353.0
xd = discontinuity_point_func(x0, self.d, self.ky, self.kz, self.yaw, self.ct)
self.assertAlmostEqual(xd, 335.5180515, delta=self.tolerance)
def test_sigmay_func(self):
x = 500.0
x0 = 353.0
xd = sigmay_func(x, x0, self.ky, self.d, self.yaw)
self.assertAlmostEqual(xd, 75.45193794, delta=self.tolerance)
def test_sigmaz_func(self):
x = 500.0
x0 = 353.0
xd = sigmaz_func(x, x0, self.kz, self.d)
self.assertAlmostEqual(xd, 74.08914857, delta=self.tolerance)
def test_theta_c_0_func(self):
theta_c_0 = theta_c_0_func(self.yaw, self.ct)
self.assertAlmostEqual(theta_c_0, 0.080852297, delta=self.tolerance)
def test_wake_offset_func_near_wake(self):
x = 200.
theta_c_0 = 0.0808
sigmay = 36.
sigmaz = 43.
x0 = 353.
delta = wake_offset_func(x, self.d, theta_c_0, x0, self.yaw, self.ky, self.kz, self.ct, sigmay, sigmaz)
self.assertAlmostEqual(delta, 16.16, delta=self.tolerance)
def test_wake_offset_func_far_wake(self):
x = 500.
x0 = 353.
theta_c_0 = 0.0808
sigmay = 75.45193794
sigmaz = 74.08914857
delta = wake_offset_func(x, self.d, theta_c_0, x0, self.yaw, self.ky, self.kz, self.ct, sigmay, sigmaz)
self.assertAlmostEqual(delta, 33.89352568, delta=self.tolerance)
def test_deltav_func_2016(self):
d = 126.4
yaw = np.pi / 6.
ky = 0.25
kz = 0.2
sigmay = 75.0
sigmaz = 74.0
ct = 0.8
z = 100.0
zh = 90.0
wec_factor = 1.0
y = 50.0
delta = 33.89
x = 500.
deltay = y-delta
deltaz = z-zh
deltav = deltav_func(deltay, deltaz, ct, yaw, sigmay, sigmaz, d, 2016, self.ky, x, wec_factor, sigmay, sigmaz)
self.assertAlmostEqual(deltav, 0.1293410999394427, delta=self.tolerance)
def test_deltav_func_2014(self):
d = 126.4
yaw = np.pi / 6.
ky = 0.25
kz = 0.2
sigmay = 75.0
sigmaz = 74.0
ct = 0.8
z = 100.0
zh = 90.0
wec_factor = 1.0
y = 50.0
delta = 33.89
x = 500.
deltay = y-delta
deltaz = z-zh
deltav = deltav_func(deltay, deltaz, ct, yaw, sigmay, sigmaz, d, 2014, self.ky, x, wec_factor, sigmay, sigmaz)
self.assertAlmostEqual(deltav, 0.03264659097, delta=self.tolerance)
def test_near_deltav_func_2014_rotor_location(self):
version = 2014
x0 = 353.0
xd = 335.0
yaw = 0.0
sigmay_spread = 45.
sigmaz_spread = 45.
sigmay_0 = 50.0
sigmaz_0 = 50.0
sigmay_d = 40.0
sigmaz_d = 40.0
wec_factor = 1.0
deltay = 100.0
deltaz = 5.0
x = 0.0
deltav = deltav_near_wake_lin_func(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0, x0, self.d, x, xd, sigmay_d,
sigmaz_d, version, self.ky, x, sigmay_spread, sigmaz_spread, wec_factor)
self.assertAlmostEqual(deltav, 0.00048145926305030354, delta=self.tolerance)
def test_near_deltav_func_2014_midrange_location(self):
version = 2014
x0 = 353.0
xd = 335.0
yaw = 0.0
sigmay_spread = 45.
sigmaz_spread = 45.
sigmay_0 = 50.0
sigmaz_0 = 50.0
sigmay_d = 40.0
sigmaz_d = 40.0
wec_factor = 1.0
deltay = 100.0
deltaz = 5.0
x = 200.0
deltav = deltav_near_wake_lin_func(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0, x0, self.d, x, xd, sigmay_d,
sigmaz_d, version, self.ky, x, sigmay_spread, sigmaz_spread, wec_factor)
self.assertAlmostEqual(deltav, 0.027941992346249663, delta=self.tolerance)
def test_near_deltav_func_2014_x0_location(self):
version = 2014
x0 = 353.0
xd = 335.0
yaw = 0.0
sigmay_spread = 45.
sigmaz_spread = 45.
sigmay_0 = 50.0
sigmaz_0 = 50.0
sigmay_d = 40.0
sigmaz_d = 40.0
wec_factor = 1.0
deltay = 100.0
deltaz = 5.0
x = 353.
deltav = deltav_near_wake_lin_func(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0, x0, self.d, x, xd,
sigmay_d,
sigmaz_d, version, self.ky, x, sigmay_spread, sigmaz_spread, wec_factor)
self.assertAlmostEqual(deltav, 0.0401329549842686, delta=self.tolerance)
def test_near_deltav_func_2016_rotor_location(self):
version = 2016
x0 = 353.0
xd = 335.0
yaw = np.pi/6.
sigmay_spread = 45.
sigmaz_spread = 45.
sigmay_0 = 50.0
sigmaz_0 = 50.0
sigmay_d = 40.0
sigmaz_d = 40.0
wec_factor = 1.0
deltay = 100.0
deltaz = 5.0
x = 0.0
deltav = deltav_near_wake_lin_func(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0, x0, self.d, x, xd,
sigmay_d,
sigmaz_d, version, self.ky, x, sigmay_spread, sigmaz_spread, wec_factor)
self.assertAlmostEqual(deltav, 0.05319773340098457, delta=self.tolerance)
def test_near_deltav_func_2016_midrange_location(self):
version = 2016
x0 = 353.0
xd = 335.0
yaw = np.pi/6.
sigmay_spread = 45.
sigmaz_spread = 45.
sigmay_0 = 50.0
sigmaz_0 = 50.0
sigmay_d = 40.0
sigmaz_d = 40.0
wec_factor = 1.0
deltay = 100.0
deltaz = 5.0
x = 200.0
deltav = deltav_near_wake_lin_func(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0, x0, self.d, x, xd,
sigmay_d,
sigmaz_d, version, self.ky, x, sigmay_spread, sigmaz_spread, wec_factor)
self.assertAlmostEqual(deltav, 0.0388723745762739, delta=self.tolerance)
def test_near_deltav_func_2016_x0_location(self):
version = 2016
x0 = 353.0
xd = 335.0
yaw = np.pi/6.
sigmay_spread = 45.
sigmaz_spread = 45.
sigmay_0 = 50.0
sigmaz_0 = 50.0
sigmay_d = 40.0
sigmaz_d = 40.0
wec_factor = 1.0
deltay = 100.0
deltaz = 5.0
x = 353.
deltav = deltav_near_wake_lin_func(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0, x0, self.d, x, xd,
sigmay_d,
sigmaz_d, version, self.ky, x, sigmay_spread, sigmaz_spread, wec_factor)
self.assertAlmostEqual(deltav, 0.027913475075370238, delta=self.tolerance)
def test_wake_combination_func_Lissaman1979(self):
Uk = 7.0
deltav = 0.05
wake_combination_method = 0
deficit_sum = 2.0
new_sum = wake_combination_func(self.wind_speed, Uk, deltav, wake_combination_method, deficit_sum)
self.assertAlmostEqual(new_sum, 2.4, delta=self.tolerance)
def test_wake_combination_func_Katic1986(self):
Uk = 7.0
deltav = 0.05
wake_combination_method = 2
deficit_sum = 2.0
new_sum = wake_combination_func(self.wind_speed, Uk, deltav, wake_combination_method, deficit_sum)
self.assertAlmostEqual(new_sum, 2.039607805437114, delta=self.tolerance)
def test_wake_combination_func_Voutsinas1990(self):
Uk = 7.0
deltav = 0.05
wake_combination_method = 3
deficit_sum = 2.0
new_sum = wake_combination_func(self.wind_speed, Uk, deltav, wake_combination_method, deficit_sum)
self.assertAlmostEqual(new_sum, 2.0303940504246953, delta=self.tolerance)
def test_wake_combination_func_Niayifar2016(self):
Uk = 7.0
deltav = 0.05
wake_combination_method = 1
deficit_sum = 2.0
new_sum = wake_combination_func(self.wind_speed, Uk, deltav, wake_combination_method, deficit_sum)
self.assertAlmostEqual(new_sum, 2.35, delta=self.tolerance)
def test_wind_shear_func(self):
z = 90.0
zo = 2.0
zr = 80.0
psi = 0.15
wind_velocity_with_shear = wind_shear_func(z, self.wind_speed, zr, zo, psi)
self.assertAlmostEqual(wind_velocity_with_shear, 8.14607111996, delta=self.tolerance)
def test_k_star_func(self):
ti_ust = 0.1
kstar = k_star_func(ti_ust)
self.assertAlmostEqual(kstar, 0.042048, delta=self.tolerance)
def test_ct_to_axial_ind_func_normal_ct(self):
ct = 0.84
axial_induction = ct_to_axial_ind_func(ct)
self.assertAlmostEqual(axial_induction, 0.3, delta=self.tolerance)
def test_ct_to_axial_ind_func_high_ct(self):
ct = 0.97
axial_induction = ct_to_axial_ind_func(ct)
self.assertAlmostEqual(axial_induction, 0.4119957249, delta=self.tolerance)
def test_smooth_max(self):
x = 12.
y = 13.
s = 100.
smax1 = smooth_max(s, x, y)
self.assertAlmostEqual(smax1, 13.0, delta=self.tolerance)
def test_overlap_area_func_rotor_all_in_wake(self):
turbiney = 50.
turbinez = 90.
rotor_diameter = 100.
wake_center_y = 0.0
wake_center_z = 90.0
wake_diameter = 200.
wake_overlap = overlap_area_func(turbiney, turbinez, rotor_diameter, wake_center_y, wake_center_z, wake_diameter)
self.assertAlmostEqual(wake_overlap, np.pi*rotor_diameter**2/4, delta=self.tolerance)
def test_overlap_area_func_rotor_all_in_wake_perfect_overlap(self):
turbiney = 0.
turbinez = 90.
rotor_diameter = 100.
wake_center_y = 0.0
wake_center_z = 90.0
wake_diameter = 200.
wake_overlap = overlap_area_func(turbiney, turbinez, rotor_diameter, wake_center_y, wake_center_z,
wake_diameter)
self.assertAlmostEqual(wake_overlap, np.pi * rotor_diameter ** 2 / 4, delta=self.tolerance)
def test_overlap_area_func_wake_all_in_rotor(self):
turbiney = 50.
turbinez = 90.
rotor_diameter = 200.
wake_center_y = 0.0
wake_center_z = 90.0
wake_diameter = 100.
wake_overlap = overlap_area_func(turbiney, turbinez, rotor_diameter, wake_center_y, wake_center_z, wake_diameter)
self.assertAlmostEqual(wake_overlap, np.pi*wake_diameter**2/4, delta=self.tolerance)
def test_overlap_area_func_wake_all_in_rotor_perfect_overlap(self):
turbiney = 0.
turbinez = 90.
rotor_diameter = 200.
wake_center_y = 0.0
wake_center_z = 90.0
wake_diameter = 100.
wake_overlap = overlap_area_func(turbiney, turbinez, rotor_diameter, wake_center_y, wake_center_z, wake_diameter)
self.assertAlmostEqual(wake_overlap, np.pi*wake_diameter**2/4, delta=self.tolerance)
def test_overlap_area_func_no_overlap(self):
turbiney = 0.
turbinez = 90.
rotor_diameter = 100.
wake_center_y = 100.0
wake_center_z = 90.0
wake_diameter = 100.
wake_overlap = overlap_area_func(turbiney, turbinez, rotor_diameter, wake_center_y, wake_center_z, wake_diameter)
self.assertAlmostEqual(wake_overlap, 0.0, delta=self.tolerance)
#TODO add tests for partial overlap
class test_added_ti_func(unittest.TestCase):
def setUp(self):
self.tolerance = 1E-2
self.yaw = 0.0
self.ct = 0.8
self.alpha = 2.32
self.beta = 0.154
self.ti = 0.1
self.ky = 0.022
self.kz = 0.022
self.wind_speed = 8.0
self.TI = 0.077
self.x = 560.
self.rotor_diameter = 80.
self.deltay = 0.
self.wake_height = 70.
self.turbine_height = 70.
self.sm_smoothing = 700.
def test_added_ti_func_Niayifar_2016_max_2nd_turb(self):
TI_calculation_method = 4
TI_area_ratio_in = 0.0
TI_dst_in = 0.0
TI_ust = 0.077
ti_area_ratio, ti_dst = added_ti_func(self.TI, self.ct, self.x, self.ky, self.rotor_diameter, self.rotor_diameter,
self.deltay, self.wake_height, self.turbine_height, self.sm_smoothing, TI_ust,
TI_calculation_method, TI_area_ratio_in, TI_dst_in)
self.assertAlmostEqual(ti_dst, 0.1476, delta=self.tolerance)
def test_added_ti_func_Niayifar_2016_max_3rd_turb(self):
TI_calculation_method = 4
TI_area_ratio_in = 0.0
TI_dst_in = 0.0
TI_ust = 0.1476
ti_area_ratio, ti_dst = added_ti_func(self.TI, self.ct, self.x, self.ky, self.rotor_diameter, self.rotor_diameter,
self.deltay, self.wake_height, self.turbine_height, self.sm_smoothing, TI_ust,
TI_calculation_method, TI_area_ratio_in, TI_dst_in)
self.assertAlmostEqual(ti_dst, 0.1476, delta=self.tolerance)
def test_added_ti_func_Niayifar_2016_smoothmax_2nd_turb(self):
TI_calculation_method = 5
TI_area_ratio_in = 0.0
TI_dst_in = 0.0
TI_ust = 0.077
ti_area_ratio, ti_dst = added_ti_func(self.TI, self.ct, self.x, self.ky, self.rotor_diameter,
self.rotor_diameter,
self.deltay, self.wake_height, self.turbine_height, self.sm_smoothing,
TI_ust,
TI_calculation_method, TI_area_ratio_in, TI_dst_in)
self.assertAlmostEqual(ti_dst, 0.1476, delta=self.tolerance)
def test_added_ti_func_Niayifar_2016_smoothmax_3rd_turb(self):
TI_calculation_method = 5
TI_area_ratio_in = .05
TI_dst_in = 0.077
TI_ust = 0.1476
ti_area_ratio, ti_dst = added_ti_func(self.TI, self.ct, self.x, self.ky, self.rotor_diameter,
self.rotor_diameter,
self.deltay, self.wake_height, self.turbine_height, self.sm_smoothing,
TI_ust,
TI_calculation_method, TI_area_ratio_in, TI_dst_in)
self.assertAlmostEqual(ti_dst, 0.1476, delta=self.tolerance)
class test_point_velocity_with_shear(unittest.TestCase):
def setUp(self):
self.tolerance = 1E-2
self.turbI = -1
self.wake_combination_method = 1
self.wake_model_version = 2016
self.sorted_x_idx = np.array([0])
self.rotorDiameter = np.array([0.15])
self.pointX = self.rotorDiameter*5.
self.pointY = 0.24*self.rotorDiameter
self.pointZ = 0.125
self.tol = 1E-12
self.alpha = 2.32
self.beta = 0.154
self.expratemultiplier = 1.0
self.wec_factor = 1.0
self.wind_speed = 4.88
self.z_ref = 0.125
self.z_0 = 0.000022
self.shear_exp = 0.1
self.turbineXw = np.array([0])
self.turbineYw = np.array([0])
self.turbineZ = np.array([0.125])
self.yaw = np.array([20. * np.pi / 180.0])
self.wtVelocity = np.array([self.wind_speed])
self.Ct_local = 0.7361200568897026 * np.ones_like(self.turbineXw) # np.array([0.7374481936835376])
self.TIturbs = 0.025 * np.ones_like(self.turbineXw) # *np.array([0.01]) #np.array([0.001]) #TODO check point velocity tests and ti input
self.ky_local = 0.022 # np.array([0.3837*TIturbs[0] + 0.003678])
self.kz_local = 0.022 # np.array([0.3837*TIturbs[0] + 0.003678])
def test_point_velocity_with_shear(self):
point_velocity_with_shear = point_velocity_with_shear_func(self.turbI, self.wake_combination_method, self.wake_model_version,
self.sorted_x_idx, self.pointX, self.pointY, self.pointZ, self.tol,
self.alpha, self.beta, self.expratemultiplier, self.wec_factor,
self.wind_speed, self.z_ref, self.z_0, self.shear_exp, self.turbineXw,
self.turbineYw, self.turbineZ, self.rotorDiameter, self.yaw,
self.wtVelocity, self.Ct_local, self.TIturbs, self.ky_local,
self.kz_local)
self.assertAlmostEqual(point_velocity_with_shear/self.wind_speed, 0.406, delta=self.tolerance)
class test_sigma_spread(unittest.TestCase):
def setUp(self):
self.tolerance = 1E-6
self.d = 126.4
self.yaw = np.pi / 6.
self.ct = 0.8
self.alpha = 2.32
self.beta = 0.154
self.ti = 0.1
self.ky = 0.25
self.kz = 0.2
x = np.array([500.0, 500.0, 500.0, 200.0, -10.0])
xi_d = np.array([1.0, 2.0, 1.0, 1.0, 1.0])
xi_a = np.array([0.0, 0.0, 45.0, 0.0, 0.0])
sigma_0 = 38.7
sigma_d = 34.2
x0 = 353.0
self.sigma_spread = np.zeros_like(x)
for i in np.arange(0, x.size):
self.sigma_spread[i] = sigma_spread_func(x[i], x0, self.ky, sigma_0, sigma_d, xi_a[i], xi_d[i])
self.correct_results = np.array([75.45, 150.9, 534.2, 36.7495751, 0.0])
def test_sigma_spread_func_case1(self):
self.assertAlmostEqual(self.sigma_spread[0], self.correct_results[0], delta=self.tolerance)
def test_sigma_spread_func_case2(self):
self.assertAlmostEqual(self.sigma_spread[1], self.correct_results[1], delta=self.tolerance)
def test_sigma_spread_func_case3(self):
self.assertAlmostEqual(self.sigma_spread[2], self.correct_results[2], delta=self.tolerance)
def test_sigma_spread_func_case4(self):
self.assertAlmostEqual(self.sigma_spread[3], self.correct_results[3], delta=self.tolerance)
def test_sigma_spread_func_case5(self):
self.assertAlmostEqual(self.sigma_spread[4], self.correct_results[4], delta=self.tolerance)
# class test_sigma_spread_too_high_error(unittest.TestCase):
#
# def setUp(self):
# self.tolerance = 1E-6
# self.d = 126.4
# self.yaw = np.pi / 6.
# self.ct = 0.8
# self.alpha = 2.32
# self.beta = 0.154
# self.ti = 0.1
# self.ky = 0.25
# self.kz = 0.2
#
# self.x = 500.0
# self.xi_d = 1.0
# self.xi_a = 90.000
#
#
# self.sigma_0 = 38.7
# self.sigma_d = 34.2
#
# self.x0 = 353.0
#
# def test_sigma_spread_too_high(self):
# self.assertRaises(sigma_spread_func(self.x, self.x0, self.ky, self.sigma_0, self.sigma_d, self.xi_a, self.xi_d))
#
class test_hermite_spline(unittest.TestCase):
def test_linear(self):
""""Approximate y = x - 1"""
x = 1.
x0 = 0.
x1 = 2.
y0 = -1.
dy0 = 1.
y1 = 1.
dy1 = 1.
y = hermite_spline(x, x0, x1, y0, dy0, y1, dy1)
self.assertEqual(y, 0.0)
def test_cubic(self):
"""Approximate y=x**3"""
x = 0.
x0 = -1.
x1 = 1.
y0 = 0.
dy0 = 2.
y1 = 0.
dy1 = 2.
y = hermite_spline(x, x0, x1, y0, dy0, y1, dy1)
self.assertEqual(y, 0.0)
def test_parabolic(self):
"""Approximate y=x**2"""
x = 0.
x0 = -1.
x1 = 1.
y0 = 1.
dy0 = -2.
y1 = 1.
dy1 = 2.
y = hermite_spline(x, x0, x1, y0, dy0, y1, dy1)
self.assertEqual(y, 0.0)
class test_interpolation(unittest.TestCase):
# def test_cubic(self):
#
# # define interpolation type
# interp_type = 0
#
# # set up points for interpolation
# x = np.array([-1., -0.5, 0., 0.5, 1.])
# y = np.array([-1., -0.125, 0., 0.125, 1.])
#
# # set location of interpolation
# xval = 0.125
#
# # get interpolated y value
# yval = interpolation(interp_type, x, y, xval, 3.0, 3.0, True)
#
# self.assertEqual(yval, 0.0625)
def test_linear(self):
# define interpolation type
interp_type = 1
# set up points for interpolation
x = np.array([0., 1., 2.])
y = np.array([0., 1., 0.])
# set location of interpolation
xval = 0.5
# get interpolated y value
yval = interpolation(interp_type, x, y, xval, 0.0, 0.0, False)
self.assertEqual(yval, 0.5)
class test_ctcp_curve(unittest.TestCase):
def setUp(self):
filename = "./input_files/NREL5MWCPCT_dict.p"
import cPickle as pickle
data = pickle.load(open(filename, "rb"))
cp_data = np.zeros([data['wind_speed'].size])
ct_data = np.zeros([data['wind_speed'].size])
wind_speed_data = np.zeros([data['wind_speed'].size])
cp_data[:] = data['CP']
ct_data[:] = data['CT']
wind_speed_data[:] = data['wind_speed']
self.ct_data = ct_data
self.cp_data = cp_data
self.wind_speed_data = wind_speed_data
self.options = {'use_ct_curve': True,
'ct_curve_ct': self.ct_data,
'ct_curve_wind_speed': self.wind_speed_data}
def test_5mw_ct_greater_than_1_warning(self):
from gaussianwake.gaussianwake import GaussianWake
import pytest
pytest.warns(Warning, GaussianWake, nTurbines=6, options=self.options)
class test_wec(unittest.TestCase):
def setUp(self):
filename = "./input_files/NREL5MWCPCT_dict.p"
import cPickle as pickle
data = pickle.load(open(filename, "rb"))
cp_data = np.zeros([data['wind_speed'].size])
ct_data = np.zeros([data['wind_speed'].size])
wind_speed_data = np.zeros([data['wind_speed'].size])
cp_data[:] = data['CP']
ct_data[:] = data['CT']
wind_speed_data[:] = data['wind_speed']
self.ct_data = ct_data
self.cp_data = cp_data
self.wind_speed_data = wind_speed_data
self.options = {'use_ct_curve': True,
'ct_curve_ct': self.ct_data,
'ct_curve_wind_speed': self.wind_speed_data}
nTurbines = 2
from gaussianwake.gaussianwake import GaussianWake
prob = Problem(root=Group())
prob.root.add('wakemodel', GaussianWake(nTurbines, options=self.options), promotes=['*'])
prob.setup()
prob['wind_speed'] = 8.
self.prob = prob
def test_no_change_in_deficit_by_wake_spread_rate_multiplier_at_center(self):
prob = self.prob
turbineX = np.array([0., 400.])
turbineY = np.array([0., 0.])
rotor_diameter = 50.
prob['turbineXw'] = turbineX
prob['turbineYw'] = turbineY
prob['rotorDiameter'] = np.array([rotor_diameter, rotor_diameter])
prob['rotorDiameter'] = np.array([rotor_diameter, rotor_diameter])
prob['model_params:wec_spreading_angle'] = 0.0
prob['model_params:wec_factor'] = 1.0
prob.run_once()
wspeed0 = prob['wtVelocity0'][1]
prob['model_params:wec_spreading_angle'] = 2.0
prob.run_once()
wspeed1 = prob['wtVelocity0'][1]
self.assertEqual(wspeed1, wspeed0)
def test_no_change_in_deficit_by_wake_diameter_multiplier_at_center(self):
prob = self.prob
turbineX = np.array([0., 400.])
turbineY = np.array([0., 0.])
rotor_diameter = 50.
prob['turbineXw'] = turbineX
prob['turbineYw'] = turbineY
prob['rotorDiameter'] = np.array([rotor_diameter, rotor_diameter])
prob['model_params:wec_spreading_angle'] = 0.0
prob['model_params:wec_factor'] = 1.0
prob.run_once()
wspeed0 = prob['wtVelocity0'][1]
prob['model_params:wec_spreading_angle'] = 2.0
prob.run_once()
wspeed1 = prob['wtVelocity0'][1]
self.assertEqual(wspeed1, wspeed0)
def test_increase_deficit_by_wake_diameter_expansion(self):
prob = self.prob
turbineX = np.array([0., 400.])
turbineY = np.array([0., 100.])
rotor_diameter = 50.
prob['turbineXw'] = turbineX
prob['turbineYw'] = turbineY
prob['rotorDiameter'] = np.array([rotor_diameter, rotor_diameter])
prob['model_params:wec_spreading_angle'] = 0.0
prob['model_params:wec_factor'] = 1.0
prob.run_once()
wspeed0 = prob['wtVelocity0'][1]
prob['model_params:wec_factor'] = 2.0
prob.run_once()
wspeed1 = prob['wtVelocity0'][1]
self.assertGreater(wspeed0, wspeed1)
def test_increase_deficit_by_wake_expansion_rate_multiplier(self):
prob = self.prob
turbineX = np.array([0., 400.])
turbineY = np.array([0., 100.])
rotor_diameter = 50.
prob['turbineXw'] = turbineX
prob['turbineYw'] = turbineY
prob['rotorDiameter'] = np.array([rotor_diameter, rotor_diameter])
prob['model_params:wec_spreading_angle'] = 0.0
prob['model_params:wec_factor'] = 1.0
prob.run_once()
prob['model_params:wec_factor'] = 1.0
wspeed0 = prob['wtVelocity0'][1]
prob['model_params:wec_spreading_angle'] = 2.0
prob.run_once()
wspeed1 = prob['wtVelocity0'][1]
self.assertGreater(wspeed0, wspeed1)
class test_porteagel_analyze(unittest.TestCase):
#TODO improve tolerance of test - why can't we match more closely?
def setUp(self):
from plantenergy.utilities import sunflower_points
self.tolerance = 1E-1
self.wake_combination_method = 1
self.wake_model_version = 2016
self.rotor_diameter = 80.
self.hub_height = 70.
self.ct = 0.6
self.alpha = 2.32
self.beta = 0.154
self.expratemultiplier = 1.0
self.wec_factor = 1.0
self.wind_speed = 8.0
self.z_ref = self.hub_height
self.z_0 = 0.0002
self.shear_exp = 0.15
self.yaw = 0.0
self.wtVelocity = np.array([self.wind_speed])
self.TI = 0.077
self.ky = 0.3837*self.TI + 0.003678 # np.array([0.3837*TIturbs[0] + 0.003678])
self.kz = 0.3837*self.TI + 0.003678 # np.array([0.3837*TIturbs[0] + 0.003678])
rotorpoints = sunflower_points(100)
self.RotorPointsY = rotorpoints[0] #np.array([0, .5, 1.0, 0., 0.0, -.5, -1.0, 0., 0.])
self.RotorPointsZ = rotorpoints[1] #np.array([0, 0., 0., .5, 1.0, 0., 0.0, -0.5, -1.])
# self.RotorPointsY = np.array([0])
# self.RotorPointsZ = np.array([0])
self.RotorPointsY = np.array([0, .5, 1.0, 0., 0.0, -.5, -1.0, 0., 0.])
self.RotorPointsZ = np.array([0, 0., 0., .5, 1.0, 0., 0.0, -0.5, -1.])
self.TI_calculation_method = 4
self.calc_k_star = True
self.print_ti = False
self.interp_type = 1
self.sm_smoothing = 700.
loc_data = np.loadtxt('input_files/horns_rev_locations.txt', delimiter=',')
turbineXw = loc_data[:, 0] * self.rotor_diameter
turbineYw = loc_data[:, 1] * self.rotor_diameter
turbineZ = np.ones_like(turbineXw) * self.hub_height
sorted_x_idx = np.argsort(turbineXw, kind='heapsort')
rotorDiameter = np.ones_like(turbineXw) * self.rotor_diameter
Ct = np.ones_like(turbineXw) * self.ct
yaw = np.ones_like(turbineXw) * self.yaw
TI_turbs = np.ones_like(turbineXw) * self.TI
use_ct_curve = True
# ct_data = np.loadtxt('input_files/predicted_ct_vestas_v80_niayifar2016.txt', delimiter=',')
ct_data = np.loadtxt('input_files/mfg_ct_vestas_v80_niayifar2016.txt', delimiter=',')
ct_curve_wind_speed = ct_data[:, 0]
ct_curve_ct = ct_data[:, 1]
CalculateFlowField=False
wtVelocity, _ = porteagel_analyze(turbineXw, sorted_x_idx, turbineYw, turbineZ,
rotorDiameter, Ct, self.wind_speed,
yaw, self.ky, self.kz, self.alpha, self.beta, TI_turbs, self.RotorPointsY,
self.RotorPointsZ, np.array([0]), np.array([0]), np.array([0]),
self.z_ref, self.z_0, self.shear_exp, self.wake_combination_method,
self.TI_calculation_method, self.calc_k_star, self.wec_factor, self.print_ti,
self.wake_model_version, self.interp_type, use_ct_curve,
ct_curve_wind_speed, ct_curve_ct, self.sm_smoothing,
self.expratemultiplier, CalculateFlowField)
free_stream_power = power_func_v80(self.wind_speed)
wtPower = power_func_v80(wtVelocity)
self.norm_pow_ave_by_row = np.zeros(10)
for i in np.arange(0, self.norm_pow_ave_by_row.size):
pow_ave_row = np.average([wtPower[40 + i], wtPower[50 + i], wtPower[60 + i]])
self.norm_pow_ave_by_row[i] = pow_ave_row / free_stream_power
def test_wt_velocity_1_turb(self):
turbineXw = np.array([0.0])
turbineYw = np.array([0.0])
turbineZ = np.ones_like(turbineXw)*self.hub_height
sorted_x_idx = np.argsort(turbineXw, kind='heapsort')
rotorDiameter = np.ones_like(turbineXw)*self.rotor_diameter
Ct = np.ones_like(turbineXw)*self.ct
yaw = np.ones_like(turbineXw)*self.yaw
TI_turbs = np.ones_like(turbineXw)*self.TI
use_ct_curve = False
ct_curve_wind_speed = np.array([self.wind_speed])
ct_curve_ct = np.array([self.ct])
CalculateFlowField=False
wtVelocity, _ = porteagel_analyze(turbineXw, sorted_x_idx, turbineYw, turbineZ,
rotorDiameter, Ct, self.wind_speed,
yaw, self.ky, self.kz, self.alpha, self.beta, TI_turbs, self.RotorPointsY,
self.RotorPointsZ, np.array([0]), np.array([0]), np.array([0]),
self.z_ref, self.z_0, self.shear_exp, self.wake_combination_method,
self.TI_calculation_method, self.calc_k_star, self.wec_factor, self.print_ti,
self.wake_model_version, self.interp_type, use_ct_curve,
ct_curve_wind_speed, ct_curve_ct, self.sm_smoothing,
self.expratemultiplier, CalculateFlowField)
self.assertAlmostEqual(wtVelocity, 8.0, delta=self.tolerance)
#
# 2.009085240790877, 0.4619246861924686
# 2.0091082123225856, 0.46359832635983256
# 3.003385019279678, 0.5037656903765689
# 3.997271310197718, 0.515481171548117
# 4.996498482238084, 0.516317991631799
# 5.990212486668307, 0.515481171548117
# 7.0003626220362625, 0.5121338912133888
# 7.994042169168923, 0.5087866108786608
# 8.99869062269259, 0.5046025104602508
# 10.003339076216259, 0.5004184100418408
def test_wt_velocity_row_1_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[0], 1.0, delta=self.tolerance)
def test_wt_velocity_row_2_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[1], 0.4619246861924686, delta=self.tolerance)
def test_wt_velocity_row_3_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[2], 0.5037656903765689, delta=self.tolerance)
def test_wt_velocity_row_4_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[3], 0.515481171548117, delta=self.tolerance)
def test_wt_velocity_row_5_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[4], 0.516317991631799, delta=self.tolerance)
def test_wt_velocity_row_6_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[5], 0.515481171548117, delta=self.tolerance)
def test_wt_velocity_row_7_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[6], 0.5121338912133888, delta=self.tolerance)
def test_wt_velocity_row_8_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[7], 0.5087866108786608, delta=self.tolerance)
def test_wt_velocity_row_9_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[8], 0.5046025104602508, delta=self.tolerance)
def test_wt_velocity_row_10_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[9], 0.5004184100418408, delta=self.tolerance)
if __name__ == "__main__":
unittest.main(verbosity=2) | [
"_porteagel_fortran.wake_offset_func",
"_porteagel_fortran.discontinuity_point_func",
"_porteagel_fortran.k_star_func",
"_porteagel_fortran.ct_to_axial_ind_func",
"_porteagel_fortran.added_ti_func",
"_porteagel_fortran.overlap_area_func",
"gaussianwake.gaussianwake.GaussianWake",
"numpy.argsort",
"n... | [((35943, 35969), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (35956, 35969), False, 'import unittest\n'), ((1227, 1293), '_porteagel_fortran.x0_func', 'x0_func', (['self.d', 'self.yaw', 'self.ct', 'self.alpha', 'self.ti', 'self.beta'], {}), '(self.d, self.yaw, self.ct, self.alpha, self.ti, self.beta)\n', (1234, 1293), False, 'from _porteagel_fortran import porteagel_analyze, x0_func, theta_c_0_func, sigmay_func, sigma_spread_func\n'), ((1742, 1801), '_porteagel_fortran.x0_func', 'x0_func', (['rotor_diameter', 'yaw', 'ct', 'self.alpha', 'ti', 'self.beta'], {}), '(rotor_diameter, yaw, ct, self.alpha, ti, self.beta)\n', (1749, 1801), False, 'from _porteagel_fortran import porteagel_analyze, x0_func, theta_c_0_func, sigmay_func, sigma_spread_func\n'), ((2069, 2128), '_porteagel_fortran.x0_func', 'x0_func', (['rotor_diameter', 'yaw', 'ct', 'self.alpha', 'ti', 'self.beta'], {}), '(rotor_diameter, yaw, ct, self.alpha, ti, self.beta)\n', (2076, 2128), False, 'from _porteagel_fortran import porteagel_analyze, x0_func, theta_c_0_func, sigmay_func, sigma_spread_func\n'), ((2396, 2455), '_porteagel_fortran.x0_func', 'x0_func', (['rotor_diameter', 'yaw', 'ct', 'self.alpha', 'ti', 'self.beta'], {}), '(rotor_diameter, yaw, ct, self.alpha, ti, self.beta)\n', (2403, 2455), False, 'from _porteagel_fortran import porteagel_analyze, x0_func, theta_c_0_func, sigmay_func, sigma_spread_func\n'), ((2723, 2782), '_porteagel_fortran.x0_func', 'x0_func', (['rotor_diameter', 'yaw', 'ct', 'self.alpha', 'ti', 'self.beta'], {}), '(rotor_diameter, yaw, ct, self.alpha, ti, self.beta)\n', (2730, 2782), False, 'from _porteagel_fortran import porteagel_analyze, x0_func, theta_c_0_func, sigmay_func, sigma_spread_func\n'), ((2945, 3018), '_porteagel_fortran.discontinuity_point_func', 'discontinuity_point_func', (['x0', 'self.d', 'self.ky', 'self.kz', 'self.yaw', 'self.ct'], {}), '(x0, self.d, self.ky, self.kz, self.yaw, self.ct)\n', (2969, 3018), False, 'from _porteagel_fortran import ct_to_axial_ind_func, wind_shear_func, discontinuity_point_func, smooth_max\n'), ((3175, 3220), '_porteagel_fortran.sigmay_func', 'sigmay_func', (['x', 'x0', 'self.ky', 'self.d', 'self.yaw'], {}), '(x, x0, self.ky, self.d, self.yaw)\n', (3186, 3220), False, 'from _porteagel_fortran import porteagel_analyze, x0_func, theta_c_0_func, sigmay_func, sigma_spread_func\n'), ((3377, 3412), '_porteagel_fortran.sigmaz_func', 'sigmaz_func', (['x', 'x0', 'self.kz', 'self.d'], {}), '(x, x0, self.kz, self.d)\n', (3388, 3412), False, 'from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func\n'), ((3541, 3574), '_porteagel_fortran.theta_c_0_func', 'theta_c_0_func', (['self.yaw', 'self.ct'], {}), '(self.yaw, self.ct)\n', (3555, 3574), False, 'from _porteagel_fortran import porteagel_analyze, x0_func, theta_c_0_func, sigmay_func, sigma_spread_func\n'), ((3822, 3922), '_porteagel_fortran.wake_offset_func', 'wake_offset_func', (['x', 'self.d', 'theta_c_0', 'x0', 'self.yaw', 'self.ky', 'self.kz', 'self.ct', 'sigmay', 'sigmaz'], {}), '(x, self.d, theta_c_0, x0, self.yaw, self.ky, self.kz, self\n .ct, sigmay, sigmaz)\n', (3838, 3922), False, 'from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func\n'), ((4171, 4271), '_porteagel_fortran.wake_offset_func', 'wake_offset_func', (['x', 'self.d', 'theta_c_0', 'x0', 'self.yaw', 'self.ky', 'self.kz', 'self.ct', 'sigmay', 'sigmaz'], {}), '(x, self.d, theta_c_0, x0, self.yaw, self.ky, self.kz, self\n .ct, sigmay, sigmaz)\n', (4187, 4271), False, 'from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func\n'), ((4703, 4808), '_porteagel_fortran.deltav_func', 'deltav_func', (['deltay', 'deltaz', 'ct', 'yaw', 'sigmay', 'sigmaz', 'd', '(2016)', 'self.ky', 'x', 'wec_factor', 'sigmay', 'sigmaz'], {}), '(deltay, deltaz, ct, yaw, sigmay, sigmaz, d, 2016, self.ky, x,\n wec_factor, sigmay, sigmaz)\n', (4714, 4808), False, 'from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func\n'), ((5249, 5354), '_porteagel_fortran.deltav_func', 'deltav_func', (['deltay', 'deltaz', 'ct', 'yaw', 'sigmay', 'sigmaz', 'd', '(2014)', 'self.ky', 'x', 'wec_factor', 'sigmay', 'sigmaz'], {}), '(deltay, deltaz, ct, yaw, sigmay, sigmaz, d, 2014, self.ky, x,\n wec_factor, sigmay, sigmaz)\n', (5260, 5354), False, 'from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func\n'), ((5823, 6008), '_porteagel_fortran.deltav_near_wake_lin_func', 'deltav_near_wake_lin_func', (['deltay', 'deltaz', 'self.ct', 'yaw', 'sigmay_0', 'sigmaz_0', 'x0', 'self.d', 'x', 'xd', 'sigmay_d', 'sigmaz_d', 'version', 'self.ky', 'x', 'sigmay_spread', 'sigmaz_spread', 'wec_factor'], {}), '(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0,\n x0, self.d, x, xd, sigmay_d, sigmaz_d, version, self.ky, x,\n sigmay_spread, sigmaz_spread, wec_factor)\n', (5848, 6008), False, 'from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func\n'), ((6521, 6706), '_porteagel_fortran.deltav_near_wake_lin_func', 'deltav_near_wake_lin_func', (['deltay', 'deltaz', 'self.ct', 'yaw', 'sigmay_0', 'sigmaz_0', 'x0', 'self.d', 'x', 'xd', 'sigmay_d', 'sigmaz_d', 'version', 'self.ky', 'x', 'sigmay_spread', 'sigmaz_spread', 'wec_factor'], {}), '(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0,\n x0, self.d, x, xd, sigmay_d, sigmaz_d, version, self.ky, x,\n sigmay_spread, sigmaz_spread, wec_factor)\n', (6546, 6706), False, 'from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func\n'), ((7219, 7404), '_porteagel_fortran.deltav_near_wake_lin_func', 'deltav_near_wake_lin_func', (['deltay', 'deltaz', 'self.ct', 'yaw', 'sigmay_0', 'sigmaz_0', 'x0', 'self.d', 'x', 'xd', 'sigmay_d', 'sigmaz_d', 'version', 'self.ky', 'x', 'sigmay_spread', 'sigmaz_spread', 'wec_factor'], {}), '(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0,\n x0, self.d, x, xd, sigmay_d, sigmaz_d, version, self.ky, x,\n sigmay_spread, sigmaz_spread, wec_factor)\n', (7244, 7404), False, 'from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func\n'), ((7962, 8147), '_porteagel_fortran.deltav_near_wake_lin_func', 'deltav_near_wake_lin_func', (['deltay', 'deltaz', 'self.ct', 'yaw', 'sigmay_0', 'sigmaz_0', 'x0', 'self.d', 'x', 'xd', 'sigmay_d', 'sigmaz_d', 'version', 'self.ky', 'x', 'sigmay_spread', 'sigmaz_spread', 'wec_factor'], {}), '(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0,\n x0, self.d, x, xd, sigmay_d, sigmaz_d, version, self.ky, x,\n sigmay_spread, sigmaz_spread, wec_factor)\n', (7987, 8147), False, 'from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func\n'), ((8713, 8898), '_porteagel_fortran.deltav_near_wake_lin_func', 'deltav_near_wake_lin_func', (['deltay', 'deltaz', 'self.ct', 'yaw', 'sigmay_0', 'sigmaz_0', 'x0', 'self.d', 'x', 'xd', 'sigmay_d', 'sigmaz_d', 'version', 'self.ky', 'x', 'sigmay_spread', 'sigmaz_spread', 'wec_factor'], {}), '(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0,\n x0, self.d, x, xd, sigmay_d, sigmaz_d, version, self.ky, x,\n sigmay_spread, sigmaz_spread, wec_factor)\n', (8738, 8898), False, 'from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func\n'), ((9455, 9640), '_porteagel_fortran.deltav_near_wake_lin_func', 'deltav_near_wake_lin_func', (['deltay', 'deltaz', 'self.ct', 'yaw', 'sigmay_0', 'sigmaz_0', 'x0', 'self.d', 'x', 'xd', 'sigmay_d', 'sigmaz_d', 'version', 'self.ky', 'x', 'sigmay_spread', 'sigmaz_spread', 'wec_factor'], {}), '(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0,\n x0, self.d, x, xd, sigmay_d, sigmaz_d, version, self.ky, x,\n sigmay_spread, sigmaz_spread, wec_factor)\n', (9480, 9640), False, 'from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func\n'), ((9979, 10071), '_porteagel_fortran.wake_combination_func', 'wake_combination_func', (['self.wind_speed', 'Uk', 'deltav', 'wake_combination_method', 'deficit_sum'], {}), '(self.wind_speed, Uk, deltav, wake_combination_method,\n deficit_sum)\n', (10000, 10071), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((10310, 10402), '_porteagel_fortran.wake_combination_func', 'wake_combination_func', (['self.wind_speed', 'Uk', 'deltav', 'wake_combination_method', 'deficit_sum'], {}), '(self.wind_speed, Uk, deltav, wake_combination_method,\n deficit_sum)\n', (10331, 10402), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((10659, 10751), '_porteagel_fortran.wake_combination_func', 'wake_combination_func', (['self.wind_speed', 'Uk', 'deltav', 'wake_combination_method', 'deficit_sum'], {}), '(self.wind_speed, Uk, deltav, wake_combination_method,\n deficit_sum)\n', (10680, 10751), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((11008, 11100), '_porteagel_fortran.wake_combination_func', 'wake_combination_func', (['self.wind_speed', 'Uk', 'deltav', 'wake_combination_method', 'deficit_sum'], {}), '(self.wind_speed, Uk, deltav, wake_combination_method,\n deficit_sum)\n', (11029, 11100), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((11311, 11359), '_porteagel_fortran.wind_shear_func', 'wind_shear_func', (['z', 'self.wind_speed', 'zr', 'zo', 'psi'], {}), '(z, self.wind_speed, zr, zo, psi)\n', (11326, 11359), False, 'from _porteagel_fortran import ct_to_axial_ind_func, wind_shear_func, discontinuity_point_func, smooth_max\n'), ((11527, 11546), '_porteagel_fortran.k_star_func', 'k_star_func', (['ti_ust'], {}), '(ti_ust)\n', (11538, 11546), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((11715, 11739), '_porteagel_fortran.ct_to_axial_ind_func', 'ct_to_axial_ind_func', (['ct'], {}), '(ct)\n', (11735, 11739), False, 'from _porteagel_fortran import ct_to_axial_ind_func, wind_shear_func, discontinuity_point_func, smooth_max\n'), ((11911, 11935), '_porteagel_fortran.ct_to_axial_ind_func', 'ct_to_axial_ind_func', (['ct'], {}), '(ct)\n', (11931, 11935), False, 'from _porteagel_fortran import ct_to_axial_ind_func, wind_shear_func, discontinuity_point_func, smooth_max\n'), ((12119, 12138), '_porteagel_fortran.smooth_max', 'smooth_max', (['s', 'x', 'y'], {}), '(s, x, y)\n', (12129, 12138), False, 'from _porteagel_fortran import ct_to_axial_ind_func, wind_shear_func, discontinuity_point_func, smooth_max\n'), ((12449, 12551), '_porteagel_fortran.overlap_area_func', 'overlap_area_func', (['turbiney', 'turbinez', 'rotor_diameter', 'wake_center_y', 'wake_center_z', 'wake_diameter'], {}), '(turbiney, turbinez, rotor_diameter, wake_center_y,\n wake_center_z, wake_diameter)\n', (12466, 12551), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((12900, 13002), '_porteagel_fortran.overlap_area_func', 'overlap_area_func', (['turbiney', 'turbinez', 'rotor_diameter', 'wake_center_y', 'wake_center_z', 'wake_diameter'], {}), '(turbiney, turbinez, rotor_diameter, wake_center_y,\n wake_center_z, wake_diameter)\n', (12917, 13002), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((13384, 13486), '_porteagel_fortran.overlap_area_func', 'overlap_area_func', (['turbiney', 'turbinez', 'rotor_diameter', 'wake_center_y', 'wake_center_z', 'wake_diameter'], {}), '(turbiney, turbinez, rotor_diameter, wake_center_y,\n wake_center_z, wake_diameter)\n', (13401, 13486), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((13835, 13937), '_porteagel_fortran.overlap_area_func', 'overlap_area_func', (['turbiney', 'turbinez', 'rotor_diameter', 'wake_center_y', 'wake_center_z', 'wake_diameter'], {}), '(turbiney, turbinez, rotor_diameter, wake_center_y,\n wake_center_z, wake_diameter)\n', (13852, 13937), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((14265, 14367), '_porteagel_fortran.overlap_area_func', 'overlap_area_func', (['turbiney', 'turbinez', 'rotor_diameter', 'wake_center_y', 'wake_center_z', 'wake_diameter'], {}), '(turbiney, turbinez, rotor_diameter, wake_center_y,\n wake_center_z, wake_diameter)\n', (14282, 14367), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((15187, 15421), '_porteagel_fortran.added_ti_func', 'added_ti_func', (['self.TI', 'self.ct', 'self.x', 'self.ky', 'self.rotor_diameter', 'self.rotor_diameter', 'self.deltay', 'self.wake_height', 'self.turbine_height', 'self.sm_smoothing', 'TI_ust', 'TI_calculation_method', 'TI_area_ratio_in', 'TI_dst_in'], {}), '(self.TI, self.ct, self.x, self.ky, self.rotor_diameter, self.\n rotor_diameter, self.deltay, self.wake_height, self.turbine_height,\n self.sm_smoothing, TI_ust, TI_calculation_method, TI_area_ratio_in,\n TI_dst_in)\n', (15200, 15421), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((15754, 15988), '_porteagel_fortran.added_ti_func', 'added_ti_func', (['self.TI', 'self.ct', 'self.x', 'self.ky', 'self.rotor_diameter', 'self.rotor_diameter', 'self.deltay', 'self.wake_height', 'self.turbine_height', 'self.sm_smoothing', 'TI_ust', 'TI_calculation_method', 'TI_area_ratio_in', 'TI_dst_in'], {}), '(self.TI, self.ct, self.x, self.ky, self.rotor_diameter, self.\n rotor_diameter, self.deltay, self.wake_height, self.turbine_height,\n self.sm_smoothing, TI_ust, TI_calculation_method, TI_area_ratio_in,\n TI_dst_in)\n', (15767, 15988), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((16326, 16560), '_porteagel_fortran.added_ti_func', 'added_ti_func', (['self.TI', 'self.ct', 'self.x', 'self.ky', 'self.rotor_diameter', 'self.rotor_diameter', 'self.deltay', 'self.wake_height', 'self.turbine_height', 'self.sm_smoothing', 'TI_ust', 'TI_calculation_method', 'TI_area_ratio_in', 'TI_dst_in'], {}), '(self.TI, self.ct, self.x, self.ky, self.rotor_diameter, self.\n rotor_diameter, self.deltay, self.wake_height, self.turbine_height,\n self.sm_smoothing, TI_ust, TI_calculation_method, TI_area_ratio_in,\n TI_dst_in)\n', (16339, 16560), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((17019, 17253), '_porteagel_fortran.added_ti_func', 'added_ti_func', (['self.TI', 'self.ct', 'self.x', 'self.ky', 'self.rotor_diameter', 'self.rotor_diameter', 'self.deltay', 'self.wake_height', 'self.turbine_height', 'self.sm_smoothing', 'TI_ust', 'TI_calculation_method', 'TI_area_ratio_in', 'TI_dst_in'], {}), '(self.TI, self.ct, self.x, self.ky, self.rotor_diameter, self.\n rotor_diameter, self.deltay, self.wake_height, self.turbine_height,\n self.sm_smoothing, TI_ust, TI_calculation_method, TI_area_ratio_in,\n TI_dst_in)\n', (17032, 17253), False, 'from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func\n'), ((17736, 17749), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (17744, 17749), True, 'import numpy as np\n'), ((17779, 17795), 'numpy.array', 'np.array', (['[0.15]'], {}), '([0.15])\n', (17787, 17795), True, 'import numpy as np\n'), ((18198, 18211), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (18206, 18211), True, 'import numpy as np\n'), ((18237, 18250), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (18245, 18250), True, 'import numpy as np\n'), ((18275, 18292), 'numpy.array', 'np.array', (['[0.125]'], {}), '([0.125])\n', (18283, 18292), True, 'import numpy as np\n'), ((18312, 18344), 'numpy.array', 'np.array', (['[20.0 * np.pi / 180.0]'], {}), '([20.0 * np.pi / 180.0])\n', (18320, 18344), True, 'import numpy as np\n'), ((18370, 18397), 'numpy.array', 'np.array', (['[self.wind_speed]'], {}), '([self.wind_speed])\n', (18378, 18397), True, 'import numpy as np\n'), ((18889, 19352), '_porteagel_fortran.point_velocity_with_shear_func', 'point_velocity_with_shear_func', (['self.turbI', 'self.wake_combination_method', 'self.wake_model_version', 'self.sorted_x_idx', 'self.pointX', 'self.pointY', 'self.pointZ', 'self.tol', 'self.alpha', 'self.beta', 'self.expratemultiplier', 'self.wec_factor', 'self.wind_speed', 'self.z_ref', 'self.z_0', 'self.shear_exp', 'self.turbineXw', 'self.turbineYw', 'self.turbineZ', 'self.rotorDiameter', 'self.yaw', 'self.wtVelocity', 'self.Ct_local', 'self.TIturbs', 'self.ky_local', 'self.kz_local'], {}), '(self.turbI, self.wake_combination_method,\n self.wake_model_version, self.sorted_x_idx, self.pointX, self.pointY,\n self.pointZ, self.tol, self.alpha, self.beta, self.expratemultiplier,\n self.wec_factor, self.wind_speed, self.z_ref, self.z_0, self.shear_exp,\n self.turbineXw, self.turbineYw, self.turbineZ, self.rotorDiameter, self\n .yaw, self.wtVelocity, self.Ct_local, self.TIturbs, self.ky_local, self\n .kz_local)\n', (18919, 19352), False, 'from _porteagel_fortran import interpolation, hermite_spline, point_velocity_with_shear_func\n'), ((20063, 20108), 'numpy.array', 'np.array', (['[500.0, 500.0, 500.0, 200.0, -10.0]'], {}), '([500.0, 500.0, 500.0, 200.0, -10.0])\n', (20071, 20108), True, 'import numpy as np\n'), ((20124, 20159), 'numpy.array', 'np.array', (['[1.0, 2.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 2.0, 1.0, 1.0, 1.0])\n', (20132, 20159), True, 'import numpy as np\n'), ((20175, 20211), 'numpy.array', 'np.array', (['[0.0, 0.0, 45.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 45.0, 0.0, 0.0])\n', (20183, 20211), True, 'import numpy as np\n'), ((20309, 20325), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (20322, 20325), True, 'import numpy as np\n'), ((20343, 20363), 'numpy.arange', 'np.arange', (['(0)', 'x.size'], {}), '(0, x.size)\n', (20352, 20363), True, 'import numpy as np\n'), ((20505, 20553), 'numpy.array', 'np.array', (['[75.45, 150.9, 534.2, 36.7495751, 0.0]'], {}), '([75.45, 150.9, 534.2, 36.7495751, 0.0])\n', (20513, 20553), True, 'import numpy as np\n'), ((22197, 22240), '_porteagel_fortran.hermite_spline', 'hermite_spline', (['x', 'x0', 'x1', 'y0', 'dy0', 'y1', 'dy1'], {}), '(x, x0, x1, y0, dy0, y1, dy1)\n', (22211, 22240), False, 'from _porteagel_fortran import interpolation, hermite_spline, point_velocity_with_shear_func\n'), ((22462, 22505), '_porteagel_fortran.hermite_spline', 'hermite_spline', (['x', 'x0', 'x1', 'y0', 'dy0', 'y1', 'dy1'], {}), '(x, x0, x1, y0, dy0, y1, dy1)\n', (22476, 22505), False, 'from _porteagel_fortran import interpolation, hermite_spline, point_velocity_with_shear_func\n'), ((22732, 22775), '_porteagel_fortran.hermite_spline', 'hermite_spline', (['x', 'x0', 'x1', 'y0', 'dy0', 'y1', 'dy1'], {}), '(x, x0, x1, y0, dy0, y1, dy1)\n', (22746, 22775), False, 'from _porteagel_fortran import interpolation, hermite_spline, point_velocity_with_shear_func\n'), ((23484, 23509), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (23492, 23509), True, 'import numpy as np\n'), ((23519, 23544), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (23527, 23544), True, 'import numpy as np\n'), ((23653, 23708), '_porteagel_fortran.interpolation', 'interpolation', (['interp_type', 'x', 'y', 'xval', '(0.0)', '(0.0)', '(False)'], {}), '(interp_type, x, y, xval, 0.0, 0.0, False)\n', (23666, 23708), False, 'from _porteagel_fortran import interpolation, hermite_spline, point_velocity_with_shear_func\n'), ((23965, 24000), 'numpy.zeros', 'np.zeros', (["[data['wind_speed'].size]"], {}), "([data['wind_speed'].size])\n", (23973, 24000), True, 'import numpy as np\n'), ((24019, 24054), 'numpy.zeros', 'np.zeros', (["[data['wind_speed'].size]"], {}), "([data['wind_speed'].size])\n", (24027, 24054), True, 'import numpy as np\n'), ((24081, 24116), 'numpy.zeros', 'np.zeros', (["[data['wind_speed'].size]"], {}), "([data['wind_speed'].size])\n", (24089, 24116), True, 'import numpy as np\n'), ((24640, 24710), 'pytest.warns', 'pytest.warns', (['Warning', 'GaussianWake'], {'nTurbines': '(6)', 'options': 'self.options'}), '(Warning, GaussianWake, nTurbines=6, options=self.options)\n', (24652, 24710), False, 'import pytest\n'), ((24923, 24958), 'numpy.zeros', 'np.zeros', (["[data['wind_speed'].size]"], {}), "([data['wind_speed'].size])\n", (24931, 24958), True, 'import numpy as np\n'), ((24977, 25012), 'numpy.zeros', 'np.zeros', (["[data['wind_speed'].size]"], {}), "([data['wind_speed'].size])\n", (24985, 25012), True, 'import numpy as np\n'), ((25039, 25074), 'numpy.zeros', 'np.zeros', (["[data['wind_speed'].size]"], {}), "([data['wind_speed'].size])\n", (25047, 25074), True, 'import numpy as np\n'), ((25878, 25900), 'numpy.array', 'np.array', (['[0.0, 400.0]'], {}), '([0.0, 400.0])\n', (25886, 25900), True, 'import numpy as np\n'), ((25918, 25938), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (25926, 25938), True, 'import numpy as np\n'), ((26072, 26114), 'numpy.array', 'np.array', (['[rotor_diameter, rotor_diameter]'], {}), '([rotor_diameter, rotor_diameter])\n', (26080, 26114), True, 'import numpy as np\n'), ((26147, 26189), 'numpy.array', 'np.array', (['[rotor_diameter, rotor_diameter]'], {}), '([rotor_diameter, rotor_diameter])\n', (26155, 26189), True, 'import numpy as np\n'), ((26645, 26667), 'numpy.array', 'np.array', (['[0.0, 400.0]'], {}), '([0.0, 400.0])\n', (26653, 26667), True, 'import numpy as np\n'), ((26685, 26705), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (26693, 26705), True, 'import numpy as np\n'), ((26839, 26881), 'numpy.array', 'np.array', (['[rotor_diameter, rotor_diameter]'], {}), '([rotor_diameter, rotor_diameter])\n', (26847, 26881), True, 'import numpy as np\n'), ((27322, 27344), 'numpy.array', 'np.array', (['[0.0, 400.0]'], {}), '([0.0, 400.0])\n', (27330, 27344), True, 'import numpy as np\n'), ((27362, 27384), 'numpy.array', 'np.array', (['[0.0, 100.0]'], {}), '([0.0, 100.0])\n', (27370, 27384), True, 'import numpy as np\n'), ((27518, 27560), 'numpy.array', 'np.array', (['[rotor_diameter, rotor_diameter]'], {}), '([rotor_diameter, rotor_diameter])\n', (27526, 27560), True, 'import numpy as np\n'), ((28001, 28023), 'numpy.array', 'np.array', (['[0.0, 400.0]'], {}), '([0.0, 400.0])\n', (28009, 28023), True, 'import numpy as np\n'), ((28041, 28063), 'numpy.array', 'np.array', (['[0.0, 100.0]'], {}), '([0.0, 100.0])\n', (28049, 28063), True, 'import numpy as np\n'), ((28197, 28239), 'numpy.array', 'np.array', (['[rotor_diameter, rotor_diameter]'], {}), '([rotor_diameter, rotor_diameter])\n', (28205, 28239), True, 'import numpy as np\n'), ((29309, 29336), 'numpy.array', 'np.array', (['[self.wind_speed]'], {}), '([self.wind_speed])\n', (29317, 29336), True, 'import numpy as np\n'), ((29558, 29579), 'plantenergy.utilities.sunflower_points', 'sunflower_points', (['(100)'], {}), '(100)\n', (29574, 29579), False, 'from plantenergy.utilities import sunflower_points\n'), ((29886, 29941), 'numpy.array', 'np.array', (['[0, 0.5, 1.0, 0.0, 0.0, -0.5, -1.0, 0.0, 0.0]'], {}), '([0, 0.5, 1.0, 0.0, 0.0, -0.5, -1.0, 0.0, 0.0])\n', (29894, 29941), True, 'import numpy as np\n'), ((29965, 30020), 'numpy.array', 'np.array', (['[0, 0.0, 0.0, 0.5, 1.0, 0.0, 0.0, -0.5, -1.0]'], {}), '([0, 0.0, 0.0, 0.5, 1.0, 0.0, 0.0, -0.5, -1.0])\n', (29973, 30020), True, 'import numpy as np\n'), ((30199, 30263), 'numpy.loadtxt', 'np.loadtxt', (['"""input_files/horns_rev_locations.txt"""'], {'delimiter': '""","""'}), "('input_files/horns_rev_locations.txt', delimiter=',')\n", (30209, 30263), True, 'import numpy as np\n'), ((30462, 30500), 'numpy.argsort', 'np.argsort', (['turbineXw'], {'kind': '"""heapsort"""'}), "(turbineXw, kind='heapsort')\n", (30472, 30500), True, 'import numpy as np\n'), ((30869, 30944), 'numpy.loadtxt', 'np.loadtxt', (['"""input_files/mfg_ct_vestas_v80_niayifar2016.txt"""'], {'delimiter': '""","""'}), "('input_files/mfg_ct_vestas_v80_niayifar2016.txt', delimiter=',')\n", (30879, 30944), True, 'import numpy as np\n'), ((32077, 32089), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (32085, 32089), True, 'import numpy as np\n'), ((32107, 32150), 'numpy.arange', 'np.arange', (['(0)', 'self.norm_pow_ave_by_row.size'], {}), '(0, self.norm_pow_ave_by_row.size)\n', (32116, 32150), True, 'import numpy as np\n'), ((32376, 32391), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (32384, 32391), True, 'import numpy as np\n'), ((32412, 32427), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (32420, 32427), True, 'import numpy as np\n'), ((32510, 32548), 'numpy.argsort', 'np.argsort', (['turbineXw'], {'kind': '"""heapsort"""'}), "(turbineXw, kind='heapsort')\n", (32520, 32548), True, 'import numpy as np\n'), ((32820, 32847), 'numpy.array', 'np.array', (['[self.wind_speed]'], {}), '([self.wind_speed])\n', (32828, 32847), True, 'import numpy as np\n'), ((32870, 32889), 'numpy.array', 'np.array', (['[self.ct]'], {}), '([self.ct])\n', (32878, 32889), True, 'import numpy as np\n'), ((18443, 18471), 'numpy.ones_like', 'np.ones_like', (['self.turbineXw'], {}), '(self.turbineXw)\n', (18455, 18471), True, 'import numpy as np\n'), ((18537, 18565), 'numpy.ones_like', 'np.ones_like', (['self.turbineXw'], {}), '(self.turbineXw)\n', (18549, 18565), True, 'import numpy as np\n'), ((20400, 20472), '_porteagel_fortran.sigma_spread_func', 'sigma_spread_func', (['x[i]', 'x0', 'self.ky', 'sigma_0', 'sigma_d', 'xi_a[i]', 'xi_d[i]'], {}), '(x[i], x0, self.ky, sigma_0, sigma_d, xi_a[i], xi_d[i])\n', (20417, 20472), False, 'from _porteagel_fortran import porteagel_analyze, x0_func, theta_c_0_func, sigmay_func, sigma_spread_func\n'), ((25610, 25655), 'gaussianwake.gaussianwake.GaussianWake', 'GaussianWake', (['nTurbines'], {'options': 'self.options'}), '(nTurbines, options=self.options)\n', (25622, 25655), False, 'from gaussianwake.gaussianwake import GaussianWake\n'), ((30397, 30420), 'numpy.ones_like', 'np.ones_like', (['turbineXw'], {}), '(turbineXw)\n', (30409, 30420), True, 'import numpy as np\n'), ((30525, 30548), 'numpy.ones_like', 'np.ones_like', (['turbineXw'], {}), '(turbineXw)\n', (30537, 30548), True, 'import numpy as np\n'), ((30584, 30607), 'numpy.ones_like', 'np.ones_like', (['turbineXw'], {}), '(turbineXw)\n', (30596, 30607), True, 'import numpy as np\n'), ((30632, 30655), 'numpy.ones_like', 'np.ones_like', (['turbineXw'], {}), '(turbineXw)\n', (30644, 30655), True, 'import numpy as np\n'), ((30686, 30709), 'numpy.ones_like', 'np.ones_like', (['turbineXw'], {}), '(turbineXw)\n', (30698, 30709), True, 'import numpy as np\n'), ((31395, 31408), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (31403, 31408), True, 'import numpy as np\n'), ((31410, 31423), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (31418, 31423), True, 'import numpy as np\n'), ((31425, 31438), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (31433, 31438), True, 'import numpy as np\n'), ((32178, 32241), 'numpy.average', 'np.average', (['[wtPower[40 + i], wtPower[50 + i], wtPower[60 + i]]'], {}), '([wtPower[40 + i], wtPower[50 + i], wtPower[60 + i]])\n', (32188, 32241), True, 'import numpy as np\n'), ((32447, 32470), 'numpy.ones_like', 'np.ones_like', (['turbineXw'], {}), '(turbineXw)\n', (32459, 32470), True, 'import numpy as np\n'), ((32573, 32596), 'numpy.ones_like', 'np.ones_like', (['turbineXw'], {}), '(turbineXw)\n', (32585, 32596), True, 'import numpy as np\n'), ((32630, 32653), 'numpy.ones_like', 'np.ones_like', (['turbineXw'], {}), '(turbineXw)\n', (32642, 32653), True, 'import numpy as np\n'), ((32676, 32699), 'numpy.ones_like', 'np.ones_like', (['turbineXw'], {}), '(turbineXw)\n', (32688, 32699), True, 'import numpy as np\n'), ((32728, 32751), 'numpy.ones_like', 'np.ones_like', (['turbineXw'], {}), '(turbineXw)\n', (32740, 32751), True, 'import numpy as np\n'), ((33270, 33283), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (33278, 33283), True, 'import numpy as np\n'), ((33285, 33298), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (33293, 33298), True, 'import numpy as np\n'), ((33300, 33313), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (33308, 33313), True, 'import numpy as np\n'), ((25566, 25573), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (25571, 25573), False, 'from openmdao.api import Problem, Group\n')] |
import math
try:
from ulab import scipy, numpy as np
except ImportError:
import scipy
import numpy as np
A = np.array([[3, 0, 2, 6], [2, 1, 0, 1], [1, 0, 1, 4], [1, 2, 1, 8]])
b = np.array([4, 2, 4, 2])
# forward substitution
result = scipy.linalg.solve_triangular(A, b, lower=True)
ref_result = np.array([1.333333333, -0.666666666, 2.666666666, -0.083333333])
for i in range(4):
print(math.isclose(result[i], ref_result[i], rel_tol=1E-6, abs_tol=1E-6))
# backward substitution
result = scipy.linalg.solve_triangular(A, b, lower=False)
ref_result = np.array([-1.166666666, 1.75, 3.0, 0.25])
for i in range(4):
print(math.isclose(result[i], ref_result[i], rel_tol=1E-6, abs_tol=1E-6))
| [
"numpy.array",
"scipy.linalg.solve_triangular",
"math.isclose"
] | [((123, 189), 'numpy.array', 'np.array', (['[[3, 0, 2, 6], [2, 1, 0, 1], [1, 0, 1, 4], [1, 2, 1, 8]]'], {}), '([[3, 0, 2, 6], [2, 1, 0, 1], [1, 0, 1, 4], [1, 2, 1, 8]])\n', (131, 189), True, 'import numpy as np\n'), ((194, 216), 'numpy.array', 'np.array', (['[4, 2, 4, 2]'], {}), '([4, 2, 4, 2])\n', (202, 216), True, 'import numpy as np\n'), ((250, 297), 'scipy.linalg.solve_triangular', 'scipy.linalg.solve_triangular', (['A', 'b'], {'lower': '(True)'}), '(A, b, lower=True)\n', (279, 297), False, 'import scipy\n'), ((311, 375), 'numpy.array', 'np.array', (['[1.333333333, -0.666666666, 2.666666666, -0.083333333]'], {}), '([1.333333333, -0.666666666, 2.666666666, -0.083333333])\n', (319, 375), True, 'import numpy as np\n'), ((511, 559), 'scipy.linalg.solve_triangular', 'scipy.linalg.solve_triangular', (['A', 'b'], {'lower': '(False)'}), '(A, b, lower=False)\n', (540, 559), False, 'import scipy\n'), ((573, 614), 'numpy.array', 'np.array', (['[-1.166666666, 1.75, 3.0, 0.25]'], {}), '([-1.166666666, 1.75, 3.0, 0.25])\n', (581, 614), True, 'import numpy as np\n'), ((409, 477), 'math.isclose', 'math.isclose', (['result[i]', 'ref_result[i]'], {'rel_tol': '(1e-06)', 'abs_tol': '(1e-06)'}), '(result[i], ref_result[i], rel_tol=1e-06, abs_tol=1e-06)\n', (421, 477), False, 'import math\n'), ((648, 716), 'math.isclose', 'math.isclose', (['result[i]', 'ref_result[i]'], {'rel_tol': '(1e-06)', 'abs_tol': '(1e-06)'}), '(result[i], ref_result[i], rel_tol=1e-06, abs_tol=1e-06)\n', (660, 716), False, 'import math\n')] |
"""
Implementation of DDPG - Deep Deterministic Policy Gradient Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 and MountainCarContinuous-v0 OpenAI gym task
"""
import numpy as np
import datetime
import gym
from gym.wrappers import Monitor
import tensorflow as tf
from tqdm import tqdm
from src.agent.ddpg_agent import DDPGAgent
from src.network.ddpg_network import CriticNetwork, ActorNetwork
from src.replaybuffer import ReplayBuffer
from src.explorationnoise import OrnsteinUhlenbeckProcess, GreedyPolicy
flags = tf.app.flags
# ================================
# UTILITY PARAMETERS
# ================================
# Gym environment name
#'Pendulum-v0''MountainCarContinuous-v0'
flags.DEFINE_string('env_name', 'Pendulum-v0', 'environment name in gym.')
flags.DEFINE_boolean('env_render', False, 'whether render environment (display).')
flags.DEFINE_boolean('env_monitor', True, 'whether use gym monitor.')
DATETIME = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
RANDOM_SEED = 1234
# ================================
# TRAINING PARAMETERS
# ================================
flags.DEFINE_integer('mini_batch', 64, 'mini batch size for training.')
# Learning rates actor and critic
ACTOR_LEARNING_RATE = 0.0001
CRITIC_LEARNING_RATE = 0.001
# Maximum number of episodes
MAX_EPISODES = 100000
# Maximum number of steps per episode
MAX_STEPS_EPISODE = 50000
# warmup steps.
WARMUP_STEPS = 10000
# Exploration duration
EXPLORATION_EPISODES = 10000
# Discount factor
GAMMA = 0.99
# Soft target update parameter
TAU = 0.001
# Size of replay buffer
BUFFER_SIZE = 1000000
# Exploration noise variables Ornstein-Uhlenbeck variables
OU_THETA = 0.15
OU_MU = 0.
OU_SIGMA = 0.3
# Explorationnoise for greedy policy
MIN_EPSILON = 0.1
MAX_EPSILON = 1
#================
# parameters for evaluate.
#================
# evaluate periods
EVAL_PERIODS = 100
# evaluate episodes
EVAL_EPISODES = 10
FLAGS = flags.FLAGS
# Directory for storing gym results
MONITOR_DIR = './results/{}/{}/gym_ddpg'.format(FLAGS.env_name, DATETIME)
# Directory for storing tensorboard summary results
SUMMARY_DIR = './results/{}/{}/tf_ddpg'.format(FLAGS.env_name, DATETIME)
# ================================
# MAIN
# ================================
def main(_):
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
env = gym.make(FLAGS.env_name)
if FLAGS.env_monitor:
if not FLAGS.env_render:
env = Monitor(env, MONITOR_DIR, video_callable=False, force=True)
else:
env = Monitor(env, MONITOR_DIR, force=True)
state_dim = env.observation_space.shape
try:
action_dim = env.action_space.shape[0]
action_bound = env.action_space.high
# Ensure action bound is symmetric
assert(np.all(env.action_space.high == -env.action_space.low))
action_type = 'Continuous'
except:
action_dim = env.action_space.n
action_bound = None
action_type = 'Discrete'
print(action_type)
actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
ACTOR_LEARNING_RATE, TAU, action_type)
critic = CriticNetwork(sess, state_dim, action_dim, action_bound,
CRITIC_LEARNING_RATE, TAU, actor.get_num_trainable_vars(), action_type)
# Initialize replay memory
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
if action_type == 'Continuous':
noise = OrnsteinUhlenbeckProcess(OU_THETA, mu=OU_MU, sigma=OU_SIGMA, n_steps_annealing=EXPLORATION_EPISODES)
else:
noise = GreedyPolicy(action_dim, EXPLORATION_EPISODES, MIN_EPSILON, MAX_EPSILON)
agent = DDPGAgent(sess, action_type, actor, critic, GAMMA, env, replay_buffer, noise=noise, exploration_episodes=EXPLORATION_EPISODES,\
max_episodes=MAX_EPISODES, max_steps_episode=MAX_STEPS_EPISODE, warmup_steps=WARMUP_STEPS,\
mini_batch=FLAGS.mini_batch, eval_episodes=EVAL_EPISODES, eval_periods=EVAL_PERIODS, \
env_render=FLAGS.env_render, summary_dir=SUMMARY_DIR)
agent.train()
env.close()
if __name__ == '__main__':
tf.app.run()
| [
"src.agent.ddpg_agent.DDPGAgent",
"src.explorationnoise.GreedyPolicy",
"src.network.ddpg_network.ActorNetwork",
"src.replaybuffer.ReplayBuffer",
"datetime.datetime.now",
"src.explorationnoise.OrnsteinUhlenbeckProcess",
"gym.wrappers.Monitor",
"tensorflow.ConfigProto",
"numpy.all",
"tensorflow.GPUO... | [((2361, 2393), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (2374, 2393), True, 'import tensorflow as tf\n'), ((4406, 4418), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (4416, 4418), True, 'import tensorflow as tf\n'), ((1024, 1047), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1045, 1047), False, 'import datetime\n'), ((2485, 2509), 'gym.make', 'gym.make', (['FLAGS.env_name'], {}), '(FLAGS.env_name)\n', (2493, 2509), False, 'import gym\n'), ((3235, 3333), 'src.network.ddpg_network.ActorNetwork', 'ActorNetwork', (['sess', 'state_dim', 'action_dim', 'action_bound', 'ACTOR_LEARNING_RATE', 'TAU', 'action_type'], {}), '(sess, state_dim, action_dim, action_bound, ACTOR_LEARNING_RATE,\n TAU, action_type)\n', (3247, 3333), False, 'from src.network.ddpg_network import CriticNetwork, ActorNetwork\n'), ((3597, 3635), 'src.replaybuffer.ReplayBuffer', 'ReplayBuffer', (['BUFFER_SIZE', 'RANDOM_SEED'], {}), '(BUFFER_SIZE, RANDOM_SEED)\n', (3609, 3635), False, 'from src.replaybuffer import ReplayBuffer\n'), ((3922, 4301), 'src.agent.ddpg_agent.DDPGAgent', 'DDPGAgent', (['sess', 'action_type', 'actor', 'critic', 'GAMMA', 'env', 'replay_buffer'], {'noise': 'noise', 'exploration_episodes': 'EXPLORATION_EPISODES', 'max_episodes': 'MAX_EPISODES', 'max_steps_episode': 'MAX_STEPS_EPISODE', 'warmup_steps': 'WARMUP_STEPS', 'mini_batch': 'FLAGS.mini_batch', 'eval_episodes': 'EVAL_EPISODES', 'eval_periods': 'EVAL_PERIODS', 'env_render': 'FLAGS.env_render', 'summary_dir': 'SUMMARY_DIR'}), '(sess, action_type, actor, critic, GAMMA, env, replay_buffer,\n noise=noise, exploration_episodes=EXPLORATION_EPISODES, max_episodes=\n MAX_EPISODES, max_steps_episode=MAX_STEPS_EPISODE, warmup_steps=\n WARMUP_STEPS, mini_batch=FLAGS.mini_batch, eval_episodes=EVAL_EPISODES,\n eval_periods=EVAL_PERIODS, env_render=FLAGS.env_render, summary_dir=\n SUMMARY_DIR)\n', (3931, 4301), False, 'from src.agent.ddpg_agent import DDPGAgent\n'), ((2967, 3021), 'numpy.all', 'np.all', (['(env.action_space.high == -env.action_space.low)'], {}), '(env.action_space.high == -env.action_space.low)\n', (2973, 3021), True, 'import numpy as np\n'), ((3696, 3800), 'src.explorationnoise.OrnsteinUhlenbeckProcess', 'OrnsteinUhlenbeckProcess', (['OU_THETA'], {'mu': 'OU_MU', 'sigma': 'OU_SIGMA', 'n_steps_annealing': 'EXPLORATION_EPISODES'}), '(OU_THETA, mu=OU_MU, sigma=OU_SIGMA,\n n_steps_annealing=EXPLORATION_EPISODES)\n', (3720, 3800), False, 'from src.explorationnoise import OrnsteinUhlenbeckProcess, GreedyPolicy\n'), ((3831, 3903), 'src.explorationnoise.GreedyPolicy', 'GreedyPolicy', (['action_dim', 'EXPLORATION_EPISODES', 'MIN_EPSILON', 'MAX_EPSILON'], {}), '(action_dim, EXPLORATION_EPISODES, MIN_EPSILON, MAX_EPSILON)\n', (3843, 3903), False, 'from src.explorationnoise import OrnsteinUhlenbeckProcess, GreedyPolicy\n'), ((2421, 2460), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (2435, 2460), True, 'import tensorflow as tf\n'), ((2600, 2659), 'gym.wrappers.Monitor', 'Monitor', (['env', 'MONITOR_DIR'], {'video_callable': '(False)', 'force': '(True)'}), '(env, MONITOR_DIR, video_callable=False, force=True)\n', (2607, 2659), False, 'from gym.wrappers import Monitor\n'), ((2700, 2737), 'gym.wrappers.Monitor', 'Monitor', (['env', 'MONITOR_DIR'], {'force': '(True)'}), '(env, MONITOR_DIR, force=True)\n', (2707, 2737), False, 'from gym.wrappers import Monitor\n')] |
import numpy as np
a_soll = np.zeros((1000,20), dtype=np.complex64)
for ind in range(a_soll.shape[0]):
for jnd in range(a_soll.shape[1]):
i = ind + 1
j = jnd + 1
a_soll[ind,jnd] = - i * 0.3 + 1j*( j*j + 0.4)
b_soll = np.zeros(1200, dtype=np.complex64)
for ind in range(b_soll.shape[0]):
i = ind + 1
b_soll[ind] = - i * 0.3 + 1j*( i + 0.4)
a = np.load("mtx.npy")
b = np.load("vec.npy")
print("A: ")
print(np.max(np.abs(a - a_soll)/a_soll ))
print("B: ")
print(np.max(np.abs(b - b_soll) / b_soll ))
| [
"numpy.abs",
"numpy.zeros",
"numpy.load"
] | [((31, 71), 'numpy.zeros', 'np.zeros', (['(1000, 20)'], {'dtype': 'np.complex64'}), '((1000, 20), dtype=np.complex64)\n', (39, 71), True, 'import numpy as np\n'), ((254, 288), 'numpy.zeros', 'np.zeros', (['(1200)'], {'dtype': 'np.complex64'}), '(1200, dtype=np.complex64)\n', (262, 288), True, 'import numpy as np\n'), ((393, 411), 'numpy.load', 'np.load', (['"""mtx.npy"""'], {}), "('mtx.npy')\n", (400, 411), True, 'import numpy as np\n'), ((417, 435), 'numpy.load', 'np.load', (['"""vec.npy"""'], {}), "('vec.npy')\n", (424, 435), True, 'import numpy as np\n'), ((463, 481), 'numpy.abs', 'np.abs', (['(a - a_soll)'], {}), '(a - a_soll)\n', (469, 481), True, 'import numpy as np\n'), ((519, 537), 'numpy.abs', 'np.abs', (['(b - b_soll)'], {}), '(b - b_soll)\n', (525, 537), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.