code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
:mod:`orion.algo.dehb.dehb -- Evolutionary Hyperband for Scalable,
Robust and Efficient Hyperparameter Optimization
============================================
Modern machine learning algorithms crucially rely on several design
decisions to achieve strong performance, making the problem of Hyper-
parameter Optimization (HPO) more important than ever. Here, we combine
the advantages of the popular bandit-based HPO method Hyperband (HB) and
the evolutionary search approach of Differential Evolution (DE) to yield
a new HPO method which we call DEHB. Comprehensive results on a very
broad range of HPO problems, as well as a wide range of tabular benchmarks
from neural architecture search, demonstrate that DEHB achieves strong
performance far more robustly than all previous HPO methods we are aware of,
especially for high-dimensional problems with discrete input dimensions.
For example, DEHB is up to 1000x faster than random search.
It is also efficient in computational time, conceptually simple and easy to
implement, positioning it well to become a new default HPO method.
<NAME>, <NAME>, <NAME>, https://arxiv.org/abs/2105.09821
"""
import logging
from collections import OrderedDict, defaultdict
from copy import deepcopy
from typing import List, Optional
import numpy as np
from orion.algo.dehb.brackets import SHBracketManager
from orion.algo.dehb.logger import remove_loguru
remove_loguru()
from dehb.optimizers import DEHB as DEHBImpl
from orion.algo.base import BaseAlgorithm
from orion.algo.space import Space
from orion.core.utils import format_trials
from orion.core.worker.trial import Trial
from sspace.convert import convert_space
from sspace.convert import transform as to_orion
logger = logging.getLogger(__name__)
class UnsupportedConfiguration(Exception):
"""Raised when an unsupported configuration is sent"""
pass
SPACE_ERROR = """
DEHB cannot be used if space does not contain a fidelity dimension.
"""
MUTATION_STRATEGIES = [
"rand1",
"rand2dir",
"randtobest1",
"currenttobest1",
"best1",
"best2",
"rand2",
]
CROSSOVER_STRATEGY = [
"bin",
"exp",
]
FIX_MODES = ["random", "clip"]
# pylint: disable=too-many-public-methods
class DEHB(DEHBImpl, BaseAlgorithm):
"""
Parameters
----------
space: `orion.algo.space.Space`
Optimisation space with priors for each dimension.
seed: None, int or sequence of int
Seed for the random number generator used to sample new trials.
Default: ``None``
mutation_factor: float
Mutation probability
Default: ``0.5``
crossover_prob: float
Crossover probability
Default: ``0.5``
mutation_strategy: str
Mutation strategy rand1, rand2dir randtobest1 currenttobest1 best1 best2 rand2
Default: ``'rand1'``
crossover_strategy: str
Crossover strategy bin or exp
Default: ``'bin'``
boundary_fix_type: str
Boundary fix method, clip or random
Default: ``'random'``
min_clip: float
Min clip when boundary fix method is clip
Default: ``None``
max_clip: float
Max clip when boundary fix method is clip
Default: ``None``
max_age: Optional[int]
Default: ``np.inf``
"""
requires_type = None
requires_dist = None
requires_shape = "flattened"
# pylint: disable=too-many-arguments
def __init__(
self,
space: Space = None,
seed: Optional[int] = None,
mutation_factor: float = 0.5,
crossover_prob: float = 0.5,
mutation_strategy: str = "rand1",
crossover_strategy: str = "bin",
boundary_fix_type: str = "random",
min_clip: Optional[int] = None,
max_clip: Optional[int] = None,
max_age: Optional[int] = np.inf,
):
# Sanity Check
if mutation_strategy not in MUTATION_STRATEGIES:
raise UnsupportedConfiguration(
f"Mutation strategy {mutation_strategy} not supported"
)
if crossover_strategy not in CROSSOVER_STRATEGY:
raise UnsupportedConfiguration(
f"Crossover strategy {crossover_strategy} not supported"
)
if boundary_fix_type not in FIX_MODES:
raise UnsupportedConfiguration(
f"Boundary fix type {boundary_fix_type} not supported"
)
# We need the transformed algo to initialize DEHB
# so store the arguments for after the constructor
self._original = space
BaseAlgorithm.__init__(
self,
space,
seed=seed,
mutation_factor=mutation_factor,
crossover_prob=crossover_prob,
mutation_strategy=mutation_strategy,
crossover_strategy=crossover_strategy,
boundary_fix_type=boundary_fix_type,
min_clip=min_clip,
max_clip=max_clip,
max_age=max_age,
)
# Extract fidelity information
fidelity_index = self.fidelity_index
if fidelity_index is None:
raise RuntimeError(SPACE_ERROR)
fidelity_dim = space[fidelity_index]
# Add derived arguments for when we will init DEHB
self.init_kwargs = dict(
mutation_factor=mutation_factor,
crossover_prob=crossover_prob,
strategy=f"{mutation_strategy}_{crossover_strategy}",
min_clip=min_clip,
max_clip=max_clip,
boundary_fix_type=boundary_fix_type,
max_age=max_age,
# Derived
min_budget=fidelity_dim.low,
max_budget=fidelity_dim.high,
eta=fidelity_dim.base,
# Disable their Dask Integration
n_workers=1,
client=None,
# No need for the user function
f=None,
)
self.rung = None
self.cs = None
self.seed = seed
self.job_infos = defaultdict(list)
self.job_results = dict()
self.duplicates = defaultdict(int)
def f_objective(self, *args, **kwargs):
"""Not needed for Orion, the objective is called by the worker"""
pass
@property
def space(self) -> Space:
"""Space of the optimizer"""
return self._space
@space.setter
def space(self, space: Space) -> None:
"""Setter of optimizer's space.
We need the transformed algo to initialize DEHB
"""
self._space = space
self._initialize()
def _initialize(self) -> None:
# Convert to configpace
self.cs = convert_space(self.space)
dimensions = len(self.cs.get_hyperparameters())
# Initialize
self.seed_rng(self.seed)
DEHBImpl.__init__(
self,
cs=self.cs,
configspace=True,
dimensions=dimensions,
**self.init_kwargs,
)
self.rung = len(self.budgets)
def _start_new_bracket(self):
"""Starts a new bracket based on Hyperband"""
# start new bracket
self.iteration_counter += (
1 # iteration counter gives the bracket count or bracket ID
)
n_configs, budgets = self.get_next_iteration(self.iteration_counter)
bracket = SHBracketManager(
n_configs=n_configs,
budgets=budgets,
bracket_id=self.iteration_counter,
duplicates=self.duplicates,
)
self.active_brackets.append(bracket)
return bracket
@property
def state_dict(self) -> dict:
"""Return a state dict that can be used to reset the state of the algorithm."""
state_dict = super(DEHB, self).state_dict
state = dict(self.__dict__)
state["client"] = None
state["logger"] = None
state_dict["numpy_GlobalState"] = np.random.get_state()
state_dict["numpy_RandomState"] = self.cs.random.get_state()
state_dict["DEHB_statedict"] = state
return deepcopy(state_dict)
def set_state(self, state_dict: dict) -> None:
"""Reset the state of the algorithm based on the given state_dict
:param state_dict: Dictionary representing state of an algorithm
"""
BaseAlgorithm.set_state(self, state_dict)
for k, v in state_dict["DEHB_statedict"].items():
if hasattr(self, k):
setattr(self, k, v)
else:
logger.error("DEHB does not have attribute %s", k)
np.random.set_state(state_dict["numpy_GlobalState"])
self.cs.random.set_state(state_dict["numpy_RandomState"])
def seed_rng(self, seed: int) -> None:
"""Seed the state of the random number generator.
Parameters
----------
seed: int
Integer seed for the random number generator.
"""
np.random.seed(seed)
if hasattr(self, "cs"):
self.cs.seed(np.random.randint(np.iinfo(np.int32).max))
def init_population(self, pop_size: int) -> List[np.array]:
"""Generate our initial population of sample
Parameters
----------
pop_size: int
Number of samples to generate
"""
population = self.cs.sample_configuration(size=pop_size)
population = [
self.configspace_to_vector(individual) for individual in population
]
return population
def register_job(self, job_info: dict) -> None:
"""Register to DEHB's backend"""
# pass information of job submission to Bracket Manager
for bracket in self.active_brackets:
if bracket.bracket_id == job_info["bracket_id"]:
# registering is IMPORTANT for Bracket Manager to perform SH
bracket.register_job(job_info["budget"])
break
@property
def is_done(self) -> bool:
"""Return True, if an algorithm holds that there can be no further improvement."""
return self._is_run_budget_exhausted(None, self.rung, None)
def sample_to_trial(self, sample: np.array, fidelity: int) -> Trial:
"""Convert a ConfigSpace sample into a trial"""
config = self.vector_to_configspace(sample)
hps = OrderedDict()
for k, v in self.space.items():
if v.type == "fidelity":
hps[k] = fidelity
else:
hps[k] = config[k]
return format_trials.dict_to_trial(to_orion(hps), self.space)
def suggest(self, num: int) -> List[Trial]:
"""Suggest a `num`ber of new sets of parameters.
Parameters
----------
num: int, optional
Number of trials to suggest. The algorithm may return less than the number of trials
requested.
Returns
-------
list of trials or None
A list of trials representing values suggested by the algorithm. The algorithm may opt
out if it cannot make a good suggestion at the moment (it may be waiting for other
trials to complete), in which case it will return None.
Notes
-----
New parameters must be compliant with the problem's domain `orion.algo.space.Space`.
"""
trials = []
while len(trials) < num:
if self.is_done:
break
job_info = DEHBImpl._get_next_job(self)
job_info["done"] = 0
# We are generating trials for a bracket that is too high
if self.rung is not None and job_info["bracket_id"] >= self.rung:
break
# Generate Orion trial
new_trial = self.sample_to_trial(
job_info["config"], fidelity=job_info["budget"]
)
# DEHB may sample 2 very similar trial, that gets the same ID because
# for instance the precision is low and rounding the HP values leads to
# 2 identical trials. It this case you will have has_suggested(new_trial)
# is True, and you will discard this trial.
# It's fine to discard the trial, but we should keep track of the job_info.
# DEHB does not know that we discarded the trial and will be waiting for the
# result.We need to keep track of both job_info so that when we have
# the result of the first trial, we assign it to the second job_info as well.
if not self.has_suggested(new_trial):
# Store metadata
self.job_infos[self.get_id(new_trial)].append(job_info)
self.register_job(job_info)
# Standard Orion
self.register(new_trial)
trials.append(new_trial)
logger.debug("Suggest new trials %s", new_trial)
else:
logger.debug("Already suggested %s", new_trial)
# Do we already have a result for this trial ?
result = self.job_results.get(self.get_id(new_trial))
# Keep track of duplicated jobs per brackets
# Bracket is only done after we reach the budget for unique
# jobs
self.duplicates[str(job_info["budget"])] += 1
# if so observe it right now and discard
if result is not None:
self._dehb_observe(job_info, *result)
else:
# else we need to keep track of it to observe it later
self.job_infos[self.get_id(new_trial)].append(job_info)
return trials
def observe(self, trials: List[Trial]) -> None:
"""Observe the `trials` new state of result.
Parameters
----------
trials: list of ``orion.core.worker.trial.Trial``
Trials from a `orion.algo.space.Space`.
"""
for trial in trials:
if not self.has_suggested(trial):
logger.debug("Ignore unseen trial %s", trial)
continue
if self.has_observed(trial):
logger.debug("Ignore already observed trial %s", trial)
continue
self.register(trial)
if trial.status == "completed":
self.observe_one(trial)
logger.debug(
"Observe trial %s (Remaining %d)", trial, len(self.job_infos)
)
def observe_one(self, trial: Trial) -> None:
"""Observe a single trial"""
# Get all the job sampled by DEHB, it might be more than one
job_infos = self.job_infos.get(self.get_id(trial), [])
if len(job_infos) == 0:
# this should be 100% unreachable because we check
# if the trial was suggested inside `observe`
logger.error("Could not find trial %s", self.get_id(trial))
return
# Yes, it is odd; fidelity is cost and fitness is objective
cost = trial.params[self.fidelity_index]
fitness = trial.objective.value
# Store the result for later, if we sample
# a trial that is too alike for us to evaluate
results = cost, fitness
self.job_results[self.get_id(trial)] = results
for job_info in job_infos:
cost = job_info["budget"]
self._dehb_observe(job_info, cost, fitness)
def _dehb_observe(self, job_info, cost, fitness):
config = job_info["config"]
budget = job_info["budget"]
parent_id = job_info["parent_id"]
bracket_id = job_info["bracket_id"]
info = dict()
#
for bracket in self.active_brackets:
if bracket.bracket_id == bracket_id:
# bracket job complete
# IMPORTANT to perform synchronous SH
bracket.complete_job(budget)
# carry out DE selection
if fitness <= self.de[budget].fitness[parent_id]:
self.de[budget].population[parent_id] = config
self.de[budget].fitness[parent_id] = fitness
# updating incumbents
if self.de[budget].fitness[parent_id] < self.inc_score:
self._update_incumbents(
config=self.de[budget].population[parent_id],
score=self.de[budget].fitness[parent_id],
info=info,
)
# book-keeping
self._update_trackers(
traj=self.inc_score,
runtime=cost,
history=(config.tolist(), float(fitness), float(cost), float(budget), info),
)
| [
"copy.deepcopy",
"numpy.random.seed",
"sspace.convert.convert_space",
"numpy.random.get_state",
"orion.algo.dehb.brackets.SHBracketManager",
"sspace.convert.transform",
"numpy.iinfo",
"numpy.random.set_state",
"collections.defaultdict",
"orion.algo.base.BaseAlgorithm.set_state",
"orion.algo.base... | [((1413, 1428), 'orion.algo.dehb.logger.remove_loguru', 'remove_loguru', ([], {}), '()\n', (1426, 1428), False, 'from orion.algo.dehb.logger import remove_loguru\n'), ((1737, 1764), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1754, 1764), False, 'import logging\n'), ((4587, 4884), 'orion.algo.base.BaseAlgorithm.__init__', 'BaseAlgorithm.__init__', (['self', 'space'], {'seed': 'seed', 'mutation_factor': 'mutation_factor', 'crossover_prob': 'crossover_prob', 'mutation_strategy': 'mutation_strategy', 'crossover_strategy': 'crossover_strategy', 'boundary_fix_type': 'boundary_fix_type', 'min_clip': 'min_clip', 'max_clip': 'max_clip', 'max_age': 'max_age'}), '(self, space, seed=seed, mutation_factor=\n mutation_factor, crossover_prob=crossover_prob, mutation_strategy=\n mutation_strategy, crossover_strategy=crossover_strategy,\n boundary_fix_type=boundary_fix_type, min_clip=min_clip, max_clip=\n max_clip, max_age=max_age)\n', (4609, 4884), False, 'from orion.algo.base import BaseAlgorithm\n'), ((6014, 6031), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6025, 6031), False, 'from collections import OrderedDict, defaultdict\n'), ((6092, 6108), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (6103, 6108), False, 'from collections import OrderedDict, defaultdict\n'), ((6663, 6688), 'sspace.convert.convert_space', 'convert_space', (['self.space'], {}), '(self.space)\n', (6676, 6688), False, 'from sspace.convert import convert_space\n'), ((6809, 6909), 'dehb.optimizers.DEHB.__init__', 'DEHBImpl.__init__', (['self'], {'cs': 'self.cs', 'configspace': '(True)', 'dimensions': 'dimensions'}), '(self, cs=self.cs, configspace=True, dimensions=dimensions,\n **self.init_kwargs)\n', (6826, 6909), True, 'from dehb.optimizers import DEHB as DEHBImpl\n'), ((7346, 7468), 'orion.algo.dehb.brackets.SHBracketManager', 'SHBracketManager', ([], {'n_configs': 'n_configs', 'budgets': 'budgets', 'bracket_id': 'self.iteration_counter', 'duplicates': 'self.duplicates'}), '(n_configs=n_configs, budgets=budgets, bracket_id=self.\n iteration_counter, duplicates=self.duplicates)\n', (7362, 7468), False, 'from orion.algo.dehb.brackets import SHBracketManager\n'), ((7920, 7941), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (7939, 7941), True, 'import numpy as np\n'), ((8072, 8092), 'copy.deepcopy', 'deepcopy', (['state_dict'], {}), '(state_dict)\n', (8080, 8092), False, 'from copy import deepcopy\n'), ((8313, 8354), 'orion.algo.base.BaseAlgorithm.set_state', 'BaseAlgorithm.set_state', (['self', 'state_dict'], {}), '(self, state_dict)\n', (8336, 8354), False, 'from orion.algo.base import BaseAlgorithm\n'), ((8577, 8629), 'numpy.random.set_state', 'np.random.set_state', (["state_dict['numpy_GlobalState']"], {}), "(state_dict['numpy_GlobalState'])\n", (8596, 8629), True, 'import numpy as np\n'), ((8934, 8954), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (8948, 8954), True, 'import numpy as np\n'), ((10315, 10328), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10326, 10328), False, 'from collections import OrderedDict, defaultdict\n'), ((10540, 10553), 'sspace.convert.transform', 'to_orion', (['hps'], {}), '(hps)\n', (10548, 10553), True, 'from sspace.convert import transform as to_orion\n'), ((11448, 11476), 'dehb.optimizers.DEHB._get_next_job', 'DEHBImpl._get_next_job', (['self'], {}), '(self)\n', (11470, 11476), True, 'from dehb.optimizers import DEHB as DEHBImpl\n'), ((9030, 9048), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (9038, 9048), True, 'import numpy as np\n')] |
import numpy as num
from direct.showbase import DirectObject
from panda3d.core import LVector3f
TO_RAD = 0.017453293
TO_DEG = 57.295779513
class FreeCameraControl(DirectObject.DirectObject):
def __init__(self, base_object, cam_node=None):
DirectObject.DirectObject.__init__(self)
self.base_object = base_object
base_object.disableMouse()
if cam_node is None:
self.camera = base_object.camera
else:
self.camera = cam_node
self.camera.reparent_to(self.base_object.render)
self.mwn = base_object.mouseWatcherNode
self.target = LVector3f(0, 0, 0)
self._phi = 0.
self._theta = num.pi / 2
self._r = 100.
# u follows the line of sight
self._u = LVector3f(0.0, 0.0, 0.0)
# n is othogonal to the camera
self._n = LVector3f(0.0, 0.0, 0.0)
# k gives the direction of the camera in the X, Y plane
self._k = LVector3f(0., 0., 0.)
self.update_cam()
self._mx, self._my = 0, 0
self.can_spin_vertically = True
self.keyboard = False
self.spinning = False
self.sliding = False
self.velocity = 5
self.slide_velocity = 30
self.keyboard_velocity = 1.5
self.keyboard_spin = 0.03
self.panZoneSize = .15
self.use_panning = False
self._slide_x, self._slide_y = 0, 0
self._config_mousse()
self.base_object.taskMgr.add(self.cam_move_task, 'cam_move_task')
def set_fov(self, fov):
self.base_object.camLens.setFov(fov)
def use_keyboard(self):
self.keyboard = True
self._config_keyboard()
def use_mousse(self):
self.keyboard = False
self._config_mousse()
def _config_keyboard(self):
self.ignore_all()
self.accept('space', self.space_key)
self.accept('enter', self.enter_key)
def _config_mousse(self):
self.accept("mouse2", self.start_spin)
self.accept("mouse2-up", self.stop_spin)
self.accept("mouse3", self.start_sliding)
self.accept("mouse3-up", self.stop_sliding)
self.accept("wheel_up", lambda: self.set_r(0.9 * self._r))
self.accept("wheel_down", lambda: self.set_r(1.1 * self._r))
def reset_camera(self):
self.target = LVector3f(0, 0, 0)
def start_sliding(self):
self.sliding = True
if self.mwn.hasMouse():
mpos = self.mwn.getMouse()
self._slide_x = mpos.getX()
self._slide_y = mpos.getY()
def stop_sliding(self):
self.sliding = False
self._slide_x = 0.0
self._slide_y = 0.0
def start_spin(self):
self.spinning = True
def stop_spin(self):
self.spinning = False
def set_r(self, r):
self._r = r
self.look_to_target()
def update_cam(self):
self.update_euler_angles()
self.look_to_target()
def update_euler_angles(self):
h = 90 + self._phi * 180. / num.pi
p = - self._theta * 180 / num.pi
self.camera.setHpr(h, p, 180)
self.compute_unit_vectors()
def compute_unit_vectors(self):
self._u = LVector3f(-num.cos(self._phi) * num.cos(self._theta),
-num.sin(self._phi) * num.cos(self._theta),
-num.sin(self._theta))
self._n = LVector3f(-num.sin(self._phi), num.cos(self._phi), 0.0)
self._k = LVector3f(-num.cos(self._phi), -num.sin(self._phi), 0.0)
def set_target(self, target):
self.target = target
self.look_to_target()
def look_to_target(self):
self.camera.setPos(self.target - self._u * self._r)
def set_theta(self, theta):
self._theta = theta
self.update_euler_angles()
def set_phi(self, phi):
self._phi = phi
self.update_euler_angles()
def spin_around_target(self, d_phi=0.0, d_theta=0.0):
self._phi += d_phi
self._theta += d_theta
self.update_cam()
def move(self, dx=0.0, dy=0.0):
dv = LVector3f(-dx * num.sin(self._phi) - dy * num.cos(self._phi),
dx * num.cos(self._phi) - dy * num.sin(self._phi),
0.0)
self.camera.setPos(self.camera.getPos() + dv)
self.target = LVector3f(self.target + dv)
def space_key(self):
self.reset_camera()
def enter_key(self):
selected_object = self.base_object.picker.get_selected_object()
if selected_object is not None:
self.set_target(selected_object.getPos())
def cam_move_task(self, task):
if not self.keyboard and self.mwn.hasMouse():
mpos = self.mwn.getMouse()
if self.spinning:
if self.can_spin_vertically:
self.spin_around_target(d_phi=self._mx - mpos.getX(), d_theta=- self._my + mpos.getY())
else:
self.spin_around_target(d_phi=self._mx - mpos.getX())
elif self.sliding:
self.move(dx=- self.slide_velocity * (mpos.getX() - self._slide_x),
dy=- self.slide_velocity * (mpos.getY() - self._slide_y))
self._slide_x = mpos.getX()
self._slide_y = mpos.getY()
elif self.use_panning:
dy = 0.
dx = 0.
if mpos.getY() > (1 - self.panZoneSize):
dy = mpos.getY() + self.panZoneSize - 1
elif mpos.getY() < (-1 + self.panZoneSize):
dy = mpos.getY() + 1 - self.panZoneSize
if mpos.getX() > (1 - self.panZoneSize):
dx = mpos.getX() + self.panZoneSize - 1
elif mpos.getX() < (-1 + self.panZoneSize):
dx = mpos.getX() + 1 - self.panZoneSize
if dx != 0.0 or dy != 0.0:
self.move(self.velocity * dx, self.velocity * dy)
self._mx = mpos.getX()
self._my = mpos.getY()
elif self.keyboard:
is_down = self.mwn.is_button_down
if is_down('arrow_left') or is_down('q'):
self.move(dx=- self.keyboard_velocity)
if is_down('arrow_right') or is_down('d'):
self.move(dx=self.keyboard_velocity)
if is_down("arrow_up") or is_down('z'):
self.move(dy=self.keyboard_velocity)
if is_down('arrow_down') or is_down('s'):
self.move(dy=-self.keyboard_velocity)
if is_down('a'):
self.spin_around_target(d_phi=self.keyboard_spin)
if is_down('e'):
self.spin_around_target(d_phi=-self.keyboard_spin)
if is_down('page_up'):
self.set_r(0.95 * self._r)
if is_down('page_down'):
self.set_r(1.05 * self._r)
return task.cont
| [
"numpy.sin",
"numpy.cos",
"direct.showbase.DirectObject.DirectObject.__init__",
"panda3d.core.LVector3f"
] | [((254, 294), 'direct.showbase.DirectObject.DirectObject.__init__', 'DirectObject.DirectObject.__init__', (['self'], {}), '(self)\n', (288, 294), False, 'from direct.showbase import DirectObject\n'), ((623, 641), 'panda3d.core.LVector3f', 'LVector3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (632, 641), False, 'from panda3d.core import LVector3f\n'), ((778, 802), 'panda3d.core.LVector3f', 'LVector3f', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (787, 802), False, 'from panda3d.core import LVector3f\n'), ((860, 884), 'panda3d.core.LVector3f', 'LVector3f', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (869, 884), False, 'from panda3d.core import LVector3f\n'), ((967, 991), 'panda3d.core.LVector3f', 'LVector3f', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (976, 991), False, 'from panda3d.core import LVector3f\n'), ((2344, 2362), 'panda3d.core.LVector3f', 'LVector3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (2353, 2362), False, 'from panda3d.core import LVector3f\n'), ((4342, 4369), 'panda3d.core.LVector3f', 'LVector3f', (['(self.target + dv)'], {}), '(self.target + dv)\n', (4351, 4369), False, 'from panda3d.core import LVector3f\n'), ((3440, 3458), 'numpy.cos', 'num.cos', (['self._phi'], {}), '(self._phi)\n', (3447, 3458), True, 'import numpy as num\n'), ((3246, 3266), 'numpy.cos', 'num.cos', (['self._theta'], {}), '(self._theta)\n', (3253, 3266), True, 'import numpy as num\n'), ((3318, 3338), 'numpy.cos', 'num.cos', (['self._theta'], {}), '(self._theta)\n', (3325, 3338), True, 'import numpy as num\n'), ((3369, 3389), 'numpy.sin', 'num.sin', (['self._theta'], {}), '(self._theta)\n', (3376, 3389), True, 'import numpy as num\n'), ((3420, 3438), 'numpy.sin', 'num.sin', (['self._phi'], {}), '(self._phi)\n', (3427, 3438), True, 'import numpy as num\n'), ((3494, 3512), 'numpy.cos', 'num.cos', (['self._phi'], {}), '(self._phi)\n', (3501, 3512), True, 'import numpy as num\n'), ((3515, 3533), 'numpy.sin', 'num.sin', (['self._phi'], {}), '(self._phi)\n', (3522, 3533), True, 'import numpy as num\n'), ((3225, 3243), 'numpy.cos', 'num.cos', (['self._phi'], {}), '(self._phi)\n', (3232, 3243), True, 'import numpy as num\n'), ((3297, 3315), 'numpy.sin', 'num.sin', (['self._phi'], {}), '(self._phi)\n', (3304, 3315), True, 'import numpy as num\n'), ((4118, 4136), 'numpy.sin', 'num.sin', (['self._phi'], {}), '(self._phi)\n', (4125, 4136), True, 'import numpy as num\n'), ((4144, 4162), 'numpy.cos', 'num.cos', (['self._phi'], {}), '(self._phi)\n', (4151, 4162), True, 'import numpy as num\n'), ((4192, 4210), 'numpy.cos', 'num.cos', (['self._phi'], {}), '(self._phi)\n', (4199, 4210), True, 'import numpy as num\n'), ((4218, 4236), 'numpy.sin', 'num.sin', (['self._phi'], {}), '(self._phi)\n', (4225, 4236), True, 'import numpy as num\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue May 29 13:58:29 2018
@author: kristianeschenburg
"""
import numpy as np
def fisher(samples):
"""
Fisher transform samples of correlation values.
"""
return (1./2) * np.log((1.+samples)/(1.-samples))
def fisher_inv(samples):
"""
Inverse Fisher transform Z-transformed correlation values.
"""
return np.tanh(samples) | [
"numpy.log",
"numpy.tanh"
] | [((420, 436), 'numpy.tanh', 'np.tanh', (['samples'], {}), '(samples)\n', (427, 436), True, 'import numpy as np\n'), ((260, 301), 'numpy.log', 'np.log', (['((1.0 + samples) / (1.0 - samples))'], {}), '((1.0 + samples) / (1.0 - samples))\n', (266, 301), True, 'import numpy as np\n')] |
import pickle
import torch.nn as nn
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
# from dataloader.mano.network.utils import *
# from dataloader.mano.network.utilsSmallFunctions import *
# from dataloader.mano.network.Const import *
def minusHomoVectors(v0, v1):
v = v0 - v1
if (v.shape[-1] == 1):
v[..., -1, 0] = 1
else:
v[..., -1] = 1
return v
class MANO_SMPL(nn.Module):
def __init__(self, mano_pkl_path, ncomps = 10, flat_hand_mean=False,cuda=True,device='cuda'):
super(MANO_SMPL, self).__init__()
self.userotJoints=False
# Load the MANO_RIGHT.pkl
with open(mano_pkl_path, 'rb') as f:
model = pickle.load(f, encoding='latin1')
faces_mano = np.array(model['f'], dtype=int)
# Add new faces for the wrist part and let mano model waterproof
# for MANO_RIGHT.pkl
faces_addition = np.array([[38, 122, 92], [214, 79, 78], [239, 234, 122],
[122, 118, 239], [215, 108, 79], [279, 118, 117],
[117, 119, 279], [119, 108, 215], [120, 108, 119],
[119, 215, 279], [214, 215, 79], [118, 279, 239],
[121, 214, 78], [122, 234, 92]])
self.faces = np.concatenate((faces_mano, faces_addition), axis=0)
self.flat_hand_mean = flat_hand_mean
self.is_cuda = (torch.cuda.is_available() and cuda and device=='cuda')
np_v_template = np.array(model['v_template'], dtype=np.float)
np_v_template = torch.from_numpy(np_v_template).float()
#print('np_v_template',np_v_template.shape) #np_v_template torch.Size([778, 3])
self.size = [np_v_template.shape[0], 3]
np_shapedirs = np.array(model['shapedirs'], dtype=np.float)
self.num_betas = np_shapedirs.shape[-1]
np_shapedirs = np.reshape(np_shapedirs, [-1, self.num_betas]).T
#print('np_shapedirs',np_shapedirs.shape)#np_shapedirs (10, 2334)
np_shapedirs = torch.from_numpy(np_shapedirs).float()
# Adding new joints for the fingertips. Original MANO model provide only 16 skeleton joints.
np_J_regressor = model['J_regressor'].T.toarray()
np_J_addition = np.zeros((778, 5))
np_J_addition[745][0] = 1
np_J_addition[333][1] = 1
np_J_addition[444][2] = 1
np_J_addition[555][3] = 1
np_J_addition[672][4] = 1
np_J_regressor = np.concatenate((np_J_regressor, np_J_addition), axis=1)
np_J_regressor = torch.from_numpy(np_J_regressor).float()
np_hand_component = np.array(model['hands_components'], dtype=np.float)[:ncomps]
np_hand_component = torch.from_numpy(np_hand_component).float()
#print("np_hand_component",np_hand_component.shape)
np_hand_mean = np.array(model['hands_mean'], dtype=np.float)[np.newaxis,:]
if self.flat_hand_mean:
np_hand_mean = np.zeros_like(np_hand_mean)
np_hand_mean = torch.from_numpy(np_hand_mean).float()
np_posedirs = np.array(model['posedirs'], dtype=np.float)
num_pose_basis = np_posedirs.shape[-1]
np_posedirs = np.reshape(np_posedirs, [-1, num_pose_basis]).T
np_posedirs = torch.from_numpy(np_posedirs).float()
self.parents = np.array(model['kintree_table'])[0].astype(np.int32)
#print('self.parents',self.parents)
np_weights = np.array(model['weights'], dtype=np.float)
vertex_count = np_weights.shape[0]
vertex_component = np_weights.shape[1]
np_weights = torch.from_numpy(np_weights).float().reshape(-1, vertex_count, vertex_component)
e3 = torch.eye(3).float()
np_rot_x = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype=np.float)
np_rot_x = np.reshape(np.tile(np_rot_x, [1, 1]), [1, 3, 3])
self.base_rot_mat_x = Variable(torch.from_numpy(np_rot_x).float())
joint_x = torch.matmul(np_v_template[:, 0], np_J_regressor)
joint_y = torch.matmul(np_v_template[:, 1], np_J_regressor)
joint_z = torch.matmul(np_v_template[:, 2], np_J_regressor)
self.tjoints = torch.stack([joint_x, joint_y, joint_z, torch.ones_like(joint_x)], dim=1).numpy()
self.J = torch.stack([joint_x, joint_y, joint_z], dim=1).numpy()
self.bJ=torch.tensor(self.J.reshape(1,21,3),dtype=torch.float32)
if self.is_cuda:
np_v_template = np_v_template.cuda()
np_shapedirs = np_shapedirs.cuda()
np_J_regressor = np_J_regressor.cuda()
np_hand_component = np_hand_component.cuda()
np_hand_mean = np_hand_mean.cuda()
np_posedirs = np_posedirs.cuda()
e3 = e3.cuda()
np_weights = np_weights.cuda()
self.base_rot_mat_x = self.base_rot_mat_x.cuda()
'''
np_hand_component torch.Size([45, 45])
np_v_template torch.Size([778, 3])
np_shapedirs torch.Size([10, 2334])
np_J_regressor torch.Size([778, 21])
np_hand_component torch.Size([45, 45])
np_hand_mean torch.Size([1, 45])
np_posedirs torch.Size([135, 2334])
weight torch.Size([1, 778, 16])
'''
self.register_buffer('v_template', np_v_template)
self.register_buffer('shapedirs', np_shapedirs)
self.register_buffer('J_regressor', np_J_regressor)
self.register_buffer('hands_comp', np_hand_component)
self.register_buffer('hands_mean', np_hand_mean)
self.register_buffer('posedirs', np_posedirs)
self.register_buffer('e3', e3)
self.register_buffer('weight', np_weights)
def getTemplate(self,beta,zero_wrist=False):
v_shaped = torch.matmul(beta*10, self.shapedirs).view(-1, self.size[0], self.size[1]) + self.v_template
Jx = torch.matmul(v_shaped[:, :, 0], self.J_regressor)
Jy = torch.matmul(v_shaped[:, :, 1], self.J_regressor)
Jz = torch.matmul(v_shaped[:, :, 2], self.J_regressor)
J = torch.stack([Jx, Jy, Jz], dim=2)
if(zero_wrist):J-=J[:,0:1,:].clone()
return J
def forward(self, beta, theta, wrist_euler, pose_type, get_skin=False,external_transition=None):
assert pose_type in ['pca', 'euler', 'rot_matrix'], print('The type of pose input should be pca, euler or rot_matrix')
num_batch = beta.shape[0]
# print("num_batch",num_batch)
v_shaped = torch.matmul(beta, self.shapedirs).view(-1, self.size[0], self.size[1]) + self.v_template
Jx = torch.matmul(v_shaped[:, :, 0], self.J_regressor)
Jy = torch.matmul(v_shaped[:, :, 1], self.J_regressor)
Jz = torch.matmul(v_shaped[:, :, 2], self.J_regressor)
J = torch.stack([Jx, Jy, Jz], dim=2)
self.CJ=J.clone()
#print("J.shape",J.shape)
#global_rot = self.batch_rodrigues(wrist_euler).view(-1, 1, 3, 3)
# pose_type should be 'pca' or 'euler' here
if pose_type == 'pca':
euler_pose = theta.mm(self.hands_comp) + self.hands_mean
Rs = self.batch_rodrigues(euler_pose.contiguous().view(-1, 3))
#print('Rs',Rs)
global_rot = self.batch_rodrigues(wrist_euler.view(-1, 3)).view(-1, 1, 3, 3)
#print("global_rot",global_rot)
elif pose_type == 'euler':
euler_pose = theta
Rs = self.batch_rodrigues(euler_pose.contiguous().view(-1, 3)).view(-1, 15, 3, 3)
global_rot = self.batch_rodrigues(wrist_euler.view(-1, 3)).view(-1, 1, 3, 3)
else:
Rs = theta.view(num_batch, 15, 3, 3)
global_rot = wrist_euler.view(num_batch, 1, 3, 3)
Rs = Rs.view(-1, 15, 3, 3)
pose_feature = (Rs[:, :, :, :]).sub(1.0, self.e3).view(-1, 135)
v_posed = v_shaped + torch.matmul(pose_feature, self.posedirs).view(-1, self.size[0], self.size[1])
self.J_transformed, A,rotJoints = self.batch_global_rigid_transformation(torch.cat([global_rot, Rs], dim=1), J[:, :16, :], self.parents,JsAll=J.clone())
weight = self.weight.repeat(num_batch, 1, 1)
W = weight.view(num_batch, -1, 16)
T = torch.matmul(W, A.view(num_batch, 16, 16)).view(num_batch, -1, 4, 4)
ones_homo = torch.ones(num_batch, v_posed.shape[1], 1)
if self.is_cuda:
ones_homo = ones_homo.cuda()
v_posed_homo = torch.cat([v_posed, ones_homo], dim=2)
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, -1))
verts = v_homo[:, :, :3, 0]
if self.userotJoints:
joints = rotJoints
else:
joint_x = torch.matmul(verts[:, :, 0], self.J_regressor)
joint_y = torch.matmul(verts[:, :, 1], self.J_regressor)
joint_z = torch.matmul(verts[:, :, 2], self.J_regressor)
joints = torch.stack([joint_x, joint_y, joint_z], dim=2)
if get_skin:
return verts, joints, Rs,
else:
return joints
def get_mano_vertices(self, wrist_euler, pose, shape, scale, translation, pose_type = 'pca', mmcp_center = False,external_transition=None):
"""
:param wrist_euler: mano wrist rotation params in euler representation [batch_size, 3]
:param pose: mano articulation params [batch_size, 45] or pca pose [batch_size, ncomps]
:param shape: mano shape params [batch_size, 10]
:param cam: mano scale and translation params [batch_size, 3]
:return: vertices: mano vertices Nx778x3,
joints: 3d joints in BigHand skeleton indexing Nx21x3
"""
# apply parameters on the model
if not isinstance(scale, torch.Tensor):
scale = torch.tensor(scale, dtype=torch.float)
if not isinstance(translation, torch.Tensor):
translation = torch.tensor(translation, dtype=torch.float)
if not isinstance(wrist_euler, torch.Tensor):
wrist_euler = torch.tensor(wrist_euler, dtype=torch.float)
if not isinstance(pose, torch.Tensor):
pose = torch.tensor(pose, dtype=torch.float)
if not isinstance(shape, torch.Tensor):
shape = torch.tensor(shape, dtype=torch.float)
if self.is_cuda:
translation = translation.cuda()
scale = scale.cuda()
shape = shape.cuda()
pose = pose.cuda()
wrist_euler = wrist_euler.cuda()
#
if pose_type == 'pca':
pose = pose.clamp(-2.,2.)
#shape = shape.clamp(-0.03, 0.03)
verts, joints, Rs = self.forward(shape, pose, wrist_euler, pose_type, get_skin=True,external_transition=external_transition)
scale = scale.contiguous().view(-1, 1, 1)
trans = translation.contiguous().view(-1, 1, 3)
verts = scale * verts
verts = trans + verts
joints = scale * joints
joints = trans + joints
# mmcp is 3th joint in bighand order
if mmcp_center:
mmcp = joints[:, 3, :].clone().unsqueeze(1)
verts -= mmcp
joints -= mmcp
#verts = torch.matmul(verts, self.base_rot_mat_x)
joints = joints # convert to mm
return verts, joints
def quat2mat(self, quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [B, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w * x, w * y, w * z
xy, xz, yz = x * y, x * z, y * z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,
2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,
2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def batch_rodrigues(self, theta):
l1norm = torch.norm(theta + 1e-8, p=2, dim=1)
angle = torch.unsqueeze(l1norm, -1)
normalized = torch.div(theta, angle)
angle = angle * 0.5
v_cos = torch.cos(angle)
v_sin = torch.sin(angle)
quat = self.quat2mat(torch.cat([v_cos, v_sin * normalized], dim=1))
return quat
def batch_global_rigid_transformation(self, Rs, Js, parent,JsAll=None):
N = Rs.shape[0]
root_rotation = Rs[:, 0, :, :]
Js = torch.unsqueeze(Js, -1)
def make_A(R, t):
R_homo = F.pad(R, [0, 0, 0, 1, 0, 0])
ones_homo = Variable(torch.ones(N, 1, 1))
if self.is_cuda:
ones_homo = ones_homo.cuda()
t_homo = torch.cat([t, ones_homo], dim=1)
return torch.cat([R_homo, t_homo], 2)
A0 = make_A(root_rotation, Js[:, 0])
results = [A0]
newjs=JsAll.clone().reshape(N,21,3)
newjsones=torch.ones([N,21,1]).to(Rs.device)
newjs=torch.cat([newjs,newjsones],dim=2).reshape(N,21,4,1)
orijs = newjs.clone().reshape(N,21,4)
transidx=[2,3,17, 5, 6, 18, 8, 9, 20, 11, 12, 19, 14, 15, 16]
transpdx=[1,2,3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
#manopdx=[1,2,3, 4,5, 6, 7,8, 9, 10,11, 12, 13,14, 15]
#parent: 012 045 078 01011 01314
cpidx=[1,4,7,10,13]
for i in range(len(cpidx)):
a=minusHomoVectors(orijs[:, cpidx[i]],orijs[:, 0]).reshape(N,4,1)
newjs[:,cpidx[i]]=(A0@a)
for i in range(1, parent.shape[0]):
j_here = Js[:, i] - Js[:, parent[i]]
A_here = make_A(Rs[:, i], j_here)
res_here = torch.matmul(results[parent[i]], A_here)
a = minusHomoVectors(orijs[:,transidx[i-1]], orijs[:,transpdx[i-1]]).reshape(N,4,1)
newjs[:,transidx[i-1]]=(res_here@a)
results.append(res_here)
self.newjs=newjs
results = torch.stack(results, dim=1)
new_J = results[:, :, :3, 3] #did not use later
ones_homo = Variable(torch.zeros(N, 16, 1, 1))
if self.is_cuda:ones_homo = ones_homo.cuda()
Js_w0 = torch.cat([Js, ones_homo], dim=2)
init_bone = torch.matmul(results, Js_w0)
init_bone = F.pad(init_bone, [3, 0, 0, 0, 0, 0, 0, 0])
A = results - init_bone
return new_J, A, newjs.clone()[:,:,:-1,0]
| [
"torch.eye",
"torch.cat",
"torch.cos",
"pickle.load",
"numpy.tile",
"torch.nn.functional.pad",
"torch.ones",
"numpy.zeros_like",
"numpy.reshape",
"torch.zeros",
"torch.matmul",
"torch.norm",
"torch.cuda.is_available",
"torch.unsqueeze",
"numpy.concatenate",
"torch.from_numpy",
"torch... | [((790, 821), 'numpy.array', 'np.array', (["model['f']"], {'dtype': 'int'}), "(model['f'], dtype=int)\n", (798, 821), True, 'import numpy as np\n'), ((950, 1203), 'numpy.array', 'np.array', (['[[38, 122, 92], [214, 79, 78], [239, 234, 122], [122, 118, 239], [215, 108,\n 79], [279, 118, 117], [117, 119, 279], [119, 108, 215], [120, 108, 119],\n [119, 215, 279], [214, 215, 79], [118, 279, 239], [121, 214, 78], [122,\n 234, 92]]'], {}), '([[38, 122, 92], [214, 79, 78], [239, 234, 122], [122, 118, 239], [\n 215, 108, 79], [279, 118, 117], [117, 119, 279], [119, 108, 215], [120,\n 108, 119], [119, 215, 279], [214, 215, 79], [118, 279, 239], [121, 214,\n 78], [122, 234, 92]])\n', (958, 1203), True, 'import numpy as np\n'), ((1308, 1360), 'numpy.concatenate', 'np.concatenate', (['(faces_mano, faces_addition)'], {'axis': '(0)'}), '((faces_mano, faces_addition), axis=0)\n', (1322, 1360), True, 'import numpy as np\n'), ((1512, 1557), 'numpy.array', 'np.array', (["model['v_template']"], {'dtype': 'np.float'}), "(model['v_template'], dtype=np.float)\n", (1520, 1557), True, 'import numpy as np\n'), ((1782, 1826), 'numpy.array', 'np.array', (["model['shapedirs']"], {'dtype': 'np.float'}), "(model['shapedirs'], dtype=np.float)\n", (1790, 1826), True, 'import numpy as np\n'), ((2268, 2286), 'numpy.zeros', 'np.zeros', (['(778, 5)'], {}), '((778, 5))\n', (2276, 2286), True, 'import numpy as np\n'), ((2482, 2537), 'numpy.concatenate', 'np.concatenate', (['(np_J_regressor, np_J_addition)'], {'axis': '(1)'}), '((np_J_regressor, np_J_addition), axis=1)\n', (2496, 2537), True, 'import numpy as np\n'), ((3083, 3126), 'numpy.array', 'np.array', (["model['posedirs']"], {'dtype': 'np.float'}), "(model['posedirs'], dtype=np.float)\n", (3091, 3126), True, 'import numpy as np\n'), ((3447, 3489), 'numpy.array', 'np.array', (["model['weights']"], {'dtype': 'np.float'}), "(model['weights'], dtype=np.float)\n", (3455, 3489), True, 'import numpy as np\n'), ((3737, 3798), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, -1, 0], [0, 0, -1]]'], {'dtype': 'np.float'}), '([[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype=np.float)\n', (3745, 3798), True, 'import numpy as np\n'), ((3961, 4010), 'torch.matmul', 'torch.matmul', (['np_v_template[:, 0]', 'np_J_regressor'], {}), '(np_v_template[:, 0], np_J_regressor)\n', (3973, 4010), False, 'import torch\n'), ((4029, 4078), 'torch.matmul', 'torch.matmul', (['np_v_template[:, 1]', 'np_J_regressor'], {}), '(np_v_template[:, 1], np_J_regressor)\n', (4041, 4078), False, 'import torch\n'), ((4097, 4146), 'torch.matmul', 'torch.matmul', (['np_v_template[:, 2]', 'np_J_regressor'], {}), '(np_v_template[:, 2], np_J_regressor)\n', (4109, 4146), False, 'import torch\n'), ((5844, 5893), 'torch.matmul', 'torch.matmul', (['v_shaped[:, :, 0]', 'self.J_regressor'], {}), '(v_shaped[:, :, 0], self.J_regressor)\n', (5856, 5893), False, 'import torch\n'), ((5907, 5956), 'torch.matmul', 'torch.matmul', (['v_shaped[:, :, 1]', 'self.J_regressor'], {}), '(v_shaped[:, :, 1], self.J_regressor)\n', (5919, 5956), False, 'import torch\n'), ((5970, 6019), 'torch.matmul', 'torch.matmul', (['v_shaped[:, :, 2]', 'self.J_regressor'], {}), '(v_shaped[:, :, 2], self.J_regressor)\n', (5982, 6019), False, 'import torch\n'), ((6032, 6064), 'torch.stack', 'torch.stack', (['[Jx, Jy, Jz]'], {'dim': '(2)'}), '([Jx, Jy, Jz], dim=2)\n', (6043, 6064), False, 'import torch\n'), ((6556, 6605), 'torch.matmul', 'torch.matmul', (['v_shaped[:, :, 0]', 'self.J_regressor'], {}), '(v_shaped[:, :, 0], self.J_regressor)\n', (6568, 6605), False, 'import torch\n'), ((6619, 6668), 'torch.matmul', 'torch.matmul', (['v_shaped[:, :, 1]', 'self.J_regressor'], {}), '(v_shaped[:, :, 1], self.J_regressor)\n', (6631, 6668), False, 'import torch\n'), ((6682, 6731), 'torch.matmul', 'torch.matmul', (['v_shaped[:, :, 2]', 'self.J_regressor'], {}), '(v_shaped[:, :, 2], self.J_regressor)\n', (6694, 6731), False, 'import torch\n'), ((6744, 6776), 'torch.stack', 'torch.stack', (['[Jx, Jy, Jz]'], {'dim': '(2)'}), '([Jx, Jy, Jz], dim=2)\n', (6755, 6776), False, 'import torch\n'), ((8252, 8294), 'torch.ones', 'torch.ones', (['num_batch', 'v_posed.shape[1]', '(1)'], {}), '(num_batch, v_posed.shape[1], 1)\n', (8262, 8294), False, 'import torch\n'), ((8384, 8422), 'torch.cat', 'torch.cat', (['[v_posed, ones_homo]'], {'dim': '(2)'}), '([v_posed, ones_homo], dim=2)\n', (8393, 8422), False, 'import torch\n'), ((12188, 12225), 'torch.norm', 'torch.norm', (['(theta + 1e-08)'], {'p': '(2)', 'dim': '(1)'}), '(theta + 1e-08, p=2, dim=1)\n', (12198, 12225), False, 'import torch\n'), ((12241, 12268), 'torch.unsqueeze', 'torch.unsqueeze', (['l1norm', '(-1)'], {}), '(l1norm, -1)\n', (12256, 12268), False, 'import torch\n'), ((12290, 12313), 'torch.div', 'torch.div', (['theta', 'angle'], {}), '(theta, angle)\n', (12299, 12313), False, 'import torch\n'), ((12358, 12374), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (12367, 12374), False, 'import torch\n'), ((12391, 12407), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (12400, 12407), False, 'import torch\n'), ((12657, 12680), 'torch.unsqueeze', 'torch.unsqueeze', (['Js', '(-1)'], {}), '(Js, -1)\n', (12672, 12680), False, 'import torch\n'), ((14154, 14181), 'torch.stack', 'torch.stack', (['results'], {'dim': '(1)'}), '(results, dim=1)\n', (14165, 14181), False, 'import torch\n'), ((14363, 14396), 'torch.cat', 'torch.cat', (['[Js, ones_homo]'], {'dim': '(2)'}), '([Js, ones_homo], dim=2)\n', (14372, 14396), False, 'import torch\n'), ((14417, 14445), 'torch.matmul', 'torch.matmul', (['results', 'Js_w0'], {}), '(results, Js_w0)\n', (14429, 14445), False, 'import torch\n'), ((14466, 14508), 'torch.nn.functional.pad', 'F.pad', (['init_bone', '[3, 0, 0, 0, 0, 0, 0, 0]'], {}), '(init_bone, [3, 0, 0, 0, 0, 0, 0, 0])\n', (14471, 14508), True, 'import torch.nn.functional as F\n'), ((734, 767), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (745, 767), False, 'import pickle\n'), ((1432, 1457), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1455, 1457), False, 'import torch\n'), ((1898, 1944), 'numpy.reshape', 'np.reshape', (['np_shapedirs', '[-1, self.num_betas]'], {}), '(np_shapedirs, [-1, self.num_betas])\n', (1908, 1944), True, 'import numpy as np\n'), ((2633, 2684), 'numpy.array', 'np.array', (["model['hands_components']"], {'dtype': 'np.float'}), "(model['hands_components'], dtype=np.float)\n", (2641, 2684), True, 'import numpy as np\n'), ((2851, 2896), 'numpy.array', 'np.array', (["model['hands_mean']"], {'dtype': 'np.float'}), "(model['hands_mean'], dtype=np.float)\n", (2859, 2896), True, 'import numpy as np\n'), ((2970, 2997), 'numpy.zeros_like', 'np.zeros_like', (['np_hand_mean'], {}), '(np_hand_mean)\n', (2983, 2997), True, 'import numpy as np\n'), ((3196, 3241), 'numpy.reshape', 'np.reshape', (['np_posedirs', '[-1, num_pose_basis]'], {}), '(np_posedirs, [-1, num_pose_basis])\n', (3206, 3241), True, 'import numpy as np\n'), ((3829, 3854), 'numpy.tile', 'np.tile', (['np_rot_x', '[1, 1]'], {}), '(np_rot_x, [1, 1])\n', (3836, 3854), True, 'import numpy as np\n'), ((7973, 8007), 'torch.cat', 'torch.cat', (['[global_rot, Rs]'], {'dim': '(1)'}), '([global_rot, Rs], dim=1)\n', (7982, 8007), False, 'import torch\n'), ((8456, 8489), 'torch.unsqueeze', 'torch.unsqueeze', (['v_posed_homo', '(-1)'], {}), '(v_posed_homo, -1)\n', (8471, 8489), False, 'import torch\n'), ((8625, 8671), 'torch.matmul', 'torch.matmul', (['verts[:, :, 0]', 'self.J_regressor'], {}), '(verts[:, :, 0], self.J_regressor)\n', (8637, 8671), False, 'import torch\n'), ((8694, 8740), 'torch.matmul', 'torch.matmul', (['verts[:, :, 1]', 'self.J_regressor'], {}), '(verts[:, :, 1], self.J_regressor)\n', (8706, 8740), False, 'import torch\n'), ((8763, 8809), 'torch.matmul', 'torch.matmul', (['verts[:, :, 2]', 'self.J_regressor'], {}), '(verts[:, :, 2], self.J_regressor)\n', (8775, 8809), False, 'import torch\n'), ((8831, 8878), 'torch.stack', 'torch.stack', (['[joint_x, joint_y, joint_z]'], {'dim': '(2)'}), '([joint_x, joint_y, joint_z], dim=2)\n', (8842, 8878), False, 'import torch\n'), ((9696, 9734), 'torch.tensor', 'torch.tensor', (['scale'], {'dtype': 'torch.float'}), '(scale, dtype=torch.float)\n', (9708, 9734), False, 'import torch\n'), ((9815, 9859), 'torch.tensor', 'torch.tensor', (['translation'], {'dtype': 'torch.float'}), '(translation, dtype=torch.float)\n', (9827, 9859), False, 'import torch\n'), ((9940, 9984), 'torch.tensor', 'torch.tensor', (['wrist_euler'], {'dtype': 'torch.float'}), '(wrist_euler, dtype=torch.float)\n', (9952, 9984), False, 'import torch\n'), ((10051, 10088), 'torch.tensor', 'torch.tensor', (['pose'], {'dtype': 'torch.float'}), '(pose, dtype=torch.float)\n', (10063, 10088), False, 'import torch\n'), ((10157, 10195), 'torch.tensor', 'torch.tensor', (['shape'], {'dtype': 'torch.float'}), '(shape, dtype=torch.float)\n', (10169, 10195), False, 'import torch\n'), ((12437, 12482), 'torch.cat', 'torch.cat', (['[v_cos, v_sin * normalized]'], {'dim': '(1)'}), '([v_cos, v_sin * normalized], dim=1)\n', (12446, 12482), False, 'import torch\n'), ((12729, 12757), 'torch.nn.functional.pad', 'F.pad', (['R', '[0, 0, 0, 1, 0, 0]'], {}), '(R, [0, 0, 0, 1, 0, 0])\n', (12734, 12757), True, 'import torch.nn.functional as F\n'), ((12907, 12939), 'torch.cat', 'torch.cat', (['[t, ones_homo]'], {'dim': '(1)'}), '([t, ones_homo], dim=1)\n', (12916, 12939), False, 'import torch\n'), ((12959, 12989), 'torch.cat', 'torch.cat', (['[R_homo, t_homo]', '(2)'], {}), '([R_homo, t_homo], 2)\n', (12968, 12989), False, 'import torch\n'), ((13887, 13927), 'torch.matmul', 'torch.matmul', (['results[parent[i]]', 'A_here'], {}), '(results[parent[i]], A_here)\n', (13899, 13927), False, 'import torch\n'), ((14268, 14292), 'torch.zeros', 'torch.zeros', (['N', '(16)', '(1)', '(1)'], {}), '(N, 16, 1, 1)\n', (14279, 14292), False, 'import torch\n'), ((1582, 1613), 'torch.from_numpy', 'torch.from_numpy', (['np_v_template'], {}), '(np_v_template)\n', (1598, 1613), False, 'import torch\n'), ((2045, 2075), 'torch.from_numpy', 'torch.from_numpy', (['np_shapedirs'], {}), '(np_shapedirs)\n', (2061, 2075), False, 'import torch\n'), ((2563, 2595), 'torch.from_numpy', 'torch.from_numpy', (['np_J_regressor'], {}), '(np_J_regressor)\n', (2579, 2595), False, 'import torch\n'), ((2722, 2757), 'torch.from_numpy', 'torch.from_numpy', (['np_hand_component'], {}), '(np_hand_component)\n', (2738, 2757), False, 'import torch\n'), ((3021, 3051), 'torch.from_numpy', 'torch.from_numpy', (['np_hand_mean'], {}), '(np_hand_mean)\n', (3037, 3051), False, 'import torch\n'), ((3266, 3295), 'torch.from_numpy', 'torch.from_numpy', (['np_posedirs'], {}), '(np_posedirs)\n', (3282, 3295), False, 'import torch\n'), ((3696, 3708), 'torch.eye', 'torch.eye', (['(3)'], {}), '(3)\n', (3705, 3708), False, 'import torch\n'), ((4269, 4316), 'torch.stack', 'torch.stack', (['[joint_x, joint_y, joint_z]'], {'dim': '(1)'}), '([joint_x, joint_y, joint_z], dim=1)\n', (4280, 4316), False, 'import torch\n'), ((11855, 12043), 'torch.stack', 'torch.stack', (['[w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, w2 -\n x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 -\n x2 - y2 + z2]'], {'dim': '(1)'}), '([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + \n 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 *\n yz, w2 - x2 - y2 + z2], dim=1)\n', (11866, 12043), False, 'import torch\n'), ((12791, 12810), 'torch.ones', 'torch.ones', (['N', '(1)', '(1)'], {}), '(N, 1, 1)\n', (12801, 12810), False, 'import torch\n'), ((13122, 13144), 'torch.ones', 'torch.ones', (['[N, 21, 1]'], {}), '([N, 21, 1])\n', (13132, 13144), False, 'import torch\n'), ((13171, 13207), 'torch.cat', 'torch.cat', (['[newjs, newjsones]'], {'dim': '(2)'}), '([newjs, newjsones], dim=2)\n', (13180, 13207), False, 'import torch\n'), ((3328, 3360), 'numpy.array', 'np.array', (["model['kintree_table']"], {}), "(model['kintree_table'])\n", (3336, 3360), True, 'import numpy as np\n'), ((3906, 3932), 'torch.from_numpy', 'torch.from_numpy', (['np_rot_x'], {}), '(np_rot_x)\n', (3922, 3932), False, 'import torch\n'), ((5738, 5777), 'torch.matmul', 'torch.matmul', (['(beta * 10)', 'self.shapedirs'], {}), '(beta * 10, self.shapedirs)\n', (5750, 5777), False, 'import torch\n'), ((6453, 6487), 'torch.matmul', 'torch.matmul', (['beta', 'self.shapedirs'], {}), '(beta, self.shapedirs)\n', (6465, 6487), False, 'import torch\n'), ((7812, 7853), 'torch.matmul', 'torch.matmul', (['pose_feature', 'self.posedirs'], {}), '(pose_feature, self.posedirs)\n', (7824, 7853), False, 'import torch\n'), ((3601, 3629), 'torch.from_numpy', 'torch.from_numpy', (['np_weights'], {}), '(np_weights)\n', (3617, 3629), False, 'import torch\n'), ((4210, 4234), 'torch.ones_like', 'torch.ones_like', (['joint_x'], {}), '(joint_x)\n', (4225, 4234), False, 'import torch\n')] |
# -------------------------------------------------------
# Assignment #1 Montreal Crime Analytics
# Written by <NAME> - 26250912
# For COMP 472 Section ABJX – Summer 2020
# --------------------------------------------------------
from src.Node import Node
from typing import Dict, Tuple
import numpy as np
from src.GridTopology import GridTopology
class DisconnectedGraph:
def __init__(self, grid: np.ndarray, grid_topology: GridTopology):
self._node_dict: Dict[int, Node] = dict()
self._grid_topology = grid_topology
self._epsilon = float(0.0000759)
self._diagonal_weight = 1.5
self._shared_edge_weight = 1.3
self._edge_weight = 1
self._grid = grid
def build_graph_from_grid(self) -> Dict[int, Node]:
# build the graph by using a dictionary as the backing data structure.
# used handles(ids) as a keys to associate to a node.
rows, columns = self._grid_topology.bin_dimensions
boundaries = self._grid_topology.bounding_box[0], \
self._grid_topology.bounding_box[1], \
self._grid_topology.bounding_box[2], \
self._grid_topology.bounding_box[3]
for c in range(columns):
for r in range(rows):
# the node is blocked, so we skip it
if self._grid[c, r] >= self._grid_topology.threshold:
continue
self.create_node_connections_for_cell(c, r, boundaries)
return self._node_dict
def create_node_connections_for_cell(self, x: int, y: int,
grid_boundaries: Tuple[float, float, float, float]) -> None:
# each cell has up to 4 nodes. we must create them if they are not already created. We also do bi-directional
# connections. We respect that we cannot two nodes that are on a boundary edge.
x_min, y_min, x_max, y_max = grid_boundaries
rows, columns = self._grid_topology.bin_dimensions
res_x, res_y = self._grid_topology.grid_resolution
# whenever we are ina new cell we process nodes in a counter clockwise fashion starting at the bottom left
# node
# we need to do + 1 for rows since we are processing the vertices of the cells.
vertex_rows = rows + 1
# left bottom node
current_lb_node_index = (x * vertex_rows) + y
current_lb_node = self._node_dict.get(current_lb_node_index)
# the node does not exist so we need toc create it.
# when calculating the coordinates of the nodes when need to flip
# x and y because y represent the current column and x represent the row
# in order to get the correct position we need to do this flip
# left bottom node
if current_lb_node is None:
current_lb_node = Node(current_lb_node_index)
# create the current node
# calculate the nodes real coordinates
left_bottom_corner_x = x_min + (y * res_x)
left_bottom_corner_y = y_min + (x * res_y)
current_lb_node.cod_x = left_bottom_corner_x
current_lb_node.cod_y = left_bottom_corner_y
self._node_dict[current_lb_node_index] = current_lb_node
# left top node
current_lt_node_index = ((x+1) * vertex_rows) + y
current_lt_node = self._node_dict.get(current_lt_node_index)
# the node does not exist so we need toc create it.
if current_lt_node is None:
current_lt_node = Node(current_lt_node_index)
# create the current node
# calculate the nodes real coordinates
left_bottom_corner_x = x_min + (y * res_x)
left_bottom_corner_y = y_min + ((x + 1) * res_y)
current_lt_node.cod_x = left_bottom_corner_x
current_lt_node.cod_y = left_bottom_corner_y
self._node_dict[current_lt_node_index] = current_lt_node
# right top node
current_rt_node_index = ((x+1) * vertex_rows) + y + 1
current_rt_node = self._node_dict.get(current_rt_node_index)
# the node does not exist so we need toc create it.
if current_rt_node is None:
current_rt_node = Node(current_rt_node_index)
# create the current node
# calculate the nodes real coordinates
left_bottom_corner_x = x_min + ((y + 1) * res_x)
left_bottom_corner_y = y_min + ((x + 1) * res_y)
current_rt_node.cod_x = left_bottom_corner_x
current_rt_node.cod_y = left_bottom_corner_y
self._node_dict[current_rt_node_index] = current_rt_node
# right bottom node
current_rb_node_index = (x * vertex_rows) + y + 1
current_rb_node = self._node_dict.get(current_rb_node_index)
# the node does not exist so we need toc create it.
if current_rb_node is None:
current_rb_node = Node(current_rb_node_index)
# create the current node
left_bottom_corner_x = x_min + ((y + 1) * res_x)
left_bottom_corner_y = y_min + (x * res_y)
current_rb_node.cod_x = left_bottom_corner_x
current_rb_node.cod_y = left_bottom_corner_y
self._node_dict[current_rb_node_index] = current_rb_node
# create bi-directional diagonal connection from left bottom to right top node
if not current_lb_node.is_connected_to(current_rt_node_index):
current_lb_node.connect_to_node(self._diagonal_weight, current_rt_node_index)
if not current_rt_node.is_connected_to(current_lb_node_index):
current_rt_node.connect_to_node(self._diagonal_weight, current_lb_node_index)
# create bi-directional diagonal connection from right bottom to left top node
if not current_lt_node.is_connected_to(current_rb_node_index):
current_lt_node.connect_to_node(self._diagonal_weight, current_rb_node_index)
if not current_rb_node.is_connected_to(current_lt_node_index):
current_rb_node.connect_to_node(self._diagonal_weight, current_lt_node_index)
# left -> right and we are not at the left edge of the grid
# if we are at column 0 we can not process the left edge of the grid.
# we cannot connect the bottom left node to the top left node together.
if y != 0:
weight = self._edge_weight
if self._grid[x, y - 1] >= self._grid_topology.threshold:
weight = self._shared_edge_weight
if not current_lb_node.is_connected_to(current_lt_node_index):
current_lb_node.connect_to_node(weight, current_lt_node_index)
if not current_lt_node.is_connected_to(current_lb_node_index):
current_lt_node.connect_to_node(weight, current_lb_node_index)
# bottom -> up and we are not at the bottom edge of the grid
# if we are at row 0 we can not process the bottom edge of the grid.
# we cannot connect the bottom left node to the bottom right node together.
if x != 0:
weight = self._edge_weight
if self._grid[x - 1, y] >= self._grid_topology.threshold:
weight = self._shared_edge_weight
if not current_lb_node.is_connected_to(current_rb_node_index):
current_lb_node.connect_to_node(weight, current_rb_node_index)
if not current_rb_node.is_connected_to(current_lb_node_index):
current_rb_node.connect_to_node(weight, current_lb_node_index)
# right -> left and we are not at the right edge of the grid
# if we are at len(columns) - 1 we can not process the right edge of the grid.
# we cannot connect the bottom right node to the top right node together.
if y != (columns-1):
weight = self._edge_weight
if self._grid[x, y + 1] >= self._grid_topology.threshold:
weight = self._shared_edge_weight
if not current_rb_node.is_connected_to(current_rt_node_index):
current_rb_node.connect_to_node(weight, current_rt_node_index)
if not current_rt_node.is_connected_to(current_rb_node_index):
current_rt_node.connect_to_node(weight, current_rb_node_index)
# top -> bottom and we are not at the top edge of the grid
# if we are at len(rows) - 1 we can not process the top edge of the grid.
# we cannot connect the top left node to the top right node together.
if x != (rows - 1):
weight = self._edge_weight
if self._grid[x + 1, y] >= self._grid_topology.threshold:
weight = self._shared_edge_weight
if not current_lt_node.is_connected_to(current_rt_node_index):
current_lt_node.connect_to_node(weight, current_rt_node_index)
if not current_rt_node.is_connected_to(current_lt_node_index):
current_rt_node.connect_to_node(weight, current_lt_node_index)
def get_node(self, node_id: int):
return self._node_dict.get(node_id)
def __get_cell_from_point(self, grid_dimensions: Tuple[int, int], point: Tuple[float, float]):
# given a point get the current cell of that grid that it exists in.
rows, columns = grid_dimensions
delta_x, delta_y = self.__get_bbox_dimensions()
bbox = self._grid_topology.bounding_box
x, y = point
cells_x = np.abs(np.floor((x - bbox[0] - self._epsilon) / delta_x * columns))
cells_y = np.abs(np.floor((y - bbox[1] - self._epsilon) / delta_y * rows))
return int(cells_x), int(cells_y)
def __get_bbox_dimensions(self) -> Tuple[float, float]:
bbox = self._grid_topology.bounding_box
delta_x = np.abs(bbox[2] - bbox[0])
delta_y = np.abs(bbox[3] - bbox[1])
return delta_x, delta_y
| [
"src.Node.Node",
"numpy.floor",
"numpy.abs"
] | [((9891, 9916), 'numpy.abs', 'np.abs', (['(bbox[2] - bbox[0])'], {}), '(bbox[2] - bbox[0])\n', (9897, 9916), True, 'import numpy as np\n'), ((9935, 9960), 'numpy.abs', 'np.abs', (['(bbox[3] - bbox[1])'], {}), '(bbox[3] - bbox[1])\n', (9941, 9960), True, 'import numpy as np\n'), ((2948, 2975), 'src.Node.Node', 'Node', (['current_lb_node_index'], {}), '(current_lb_node_index)\n', (2952, 2975), False, 'from src.Node import Node\n'), ((3636, 3663), 'src.Node.Node', 'Node', (['current_lt_node_index'], {}), '(current_lt_node_index)\n', (3640, 3663), False, 'from src.Node import Node\n'), ((4335, 4362), 'src.Node.Node', 'Node', (['current_rt_node_index'], {}), '(current_rt_node_index)\n', (4339, 4362), False, 'from src.Node import Node\n'), ((5039, 5066), 'src.Node.Node', 'Node', (['current_rb_node_index'], {}), '(current_rb_node_index)\n', (5043, 5066), False, 'from src.Node import Node\n'), ((9578, 9637), 'numpy.floor', 'np.floor', (['((x - bbox[0] - self._epsilon) / delta_x * columns)'], {}), '((x - bbox[0] - self._epsilon) / delta_x * columns)\n', (9586, 9637), True, 'import numpy as np\n'), ((9664, 9720), 'numpy.floor', 'np.floor', (['((y - bbox[1] - self._epsilon) / delta_y * rows)'], {}), '((y - bbox[1] - self._epsilon) / delta_y * rows)\n', (9672, 9720), True, 'import numpy as np\n')] |
"""
Created July, 2019
@author: <NAME> & <NAME>
"""
import tensorflow as tf
import dataset
import time
from datetime import timedelta
import math
import random
import numpy as np
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
batch_size = 1
# 7 classess for recognitions
classes = ['up','down','left','right','forward','backward','none']
num_classes = len(classes)
# 20% of the data will automatically be used for validation
validation_size = 0.2
img_size = 200
num_channels = 3
train_path='training_data'
# load all the training and validation images and labels into memory
data = dataset.read_train_sets(train_path, img_size, classes, validation_size=validation_size)
print("Complete reading input data. Will Now print a snippet of it")
print("Number of files in Training-set:\t\t{}".format(len(data.train.labels)))
print("Number of files in Validation-set:\t{}".format(len(data.valid.labels)))
session = tf.Session()
x = tf.placeholder(tf.float32, shape=[batch_size,img_size,img_size,num_channels], name='x')
# labels
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
y_true_cls = tf.argmax(y_true, dimension=1)
#Network graph params
filter_size_conv1 = 3
num_filters_conv1 = 32
filter_size_conv2 = 3
num_filters_conv2 = 32
filter_size_conv3 = 3
num_filters_conv3 = 64
filter_size_conv4 = 3
num_filters_conv4 = 128
filter_size_conv5 = 3
num_filters_conv5 = 256
filter_size_conv6 = 3
num_filters_conv6 = 512
filter_size_conv7 = 3
num_filters_conv7= 1024
fc_layer_size = 2048
def create_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def create_biases(size):
return tf.Variable(tf.constant(0.05, shape=[size]))
def create_convolutional_layer(input,num_input_channels,conv_filter_size,num_filters):
# define the weights that will be trained
weights = create_weights(shape=[conv_filter_size, conv_filter_size, num_input_channels, num_filters])
# create biases
biases = create_biases(num_filters)
# Creat convolutional layer
layer = tf.nn.conv2d(input=input,filter=weights,strides=[1, 1, 1, 1],padding='SAME')
layer += biases
# max-pooling
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Relu is the activation function
layer = tf.nn.relu(layer)
return layer
def create_flatten_layer(layer):
layer_shape = layer.get_shape()
num_features = layer_shape[1:4].num_elements()
# Flatten the layer so reshape to num_features
layer = tf.reshape(layer, [-1, num_features])
return layer
def create_fc_layer(input,
num_inputs,
num_outputs,
use_relu=True):
# define trainable weights and biases.
weights = create_weights(shape=[num_inputs, num_outputs])
biases = create_biases(num_outputs)
# Fully connected layer
layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer)
return layer
layer_conv1 = create_convolutional_layer(input=x,num_input_channels=num_channels,conv_filter_size=filter_size_conv1,
num_filters=num_filters_conv1)
layer_conv2 = create_convolutional_layer(input=layer_conv1,
num_input_channels=num_filters_conv1,
conv_filter_size=filter_size_conv2,
num_filters=num_filters_conv2)
layer_conv3= create_convolutional_layer(input=layer_conv2,
num_input_channels=num_filters_conv2,
conv_filter_size=filter_size_conv3,
num_filters=num_filters_conv3)
layer_conv4= create_convolutional_layer(input=layer_conv3,
num_input_channels=num_filters_conv3,
conv_filter_size=filter_size_conv4,
num_filters=num_filters_conv4)
layer_conv5= create_convolutional_layer(input=layer_conv4,
num_input_channels=num_filters_conv4,
conv_filter_size=filter_size_conv5,
num_filters=num_filters_conv5)
layer_conv6= create_convolutional_layer(input=layer_conv5,
num_input_channels=num_filters_conv5,
conv_filter_size=filter_size_conv6,
num_filters=num_filters_conv6)
layer_conv7= create_convolutional_layer(input=layer_conv6,
num_input_channels=num_filters_conv6,
conv_filter_size=filter_size_conv7,
num_filters=num_filters_conv7)
layer_flat = create_flatten_layer(layer_conv7)
layer_fc1 = create_fc_layer(input=layer_flat,num_inputs=layer_flat.get_shape()[1:4].num_elements(),num_outputs=fc_layer_size,
use_relu=True)
layer_fc2 = create_fc_layer(input=layer_fc1, num_inputs=fc_layer_size,num_outputs=num_classes, use_relu=False)
y_pred = tf.nn.softmax(layer_fc2,name='y_pred')
y_pred_cls = tf.argmax(y_pred, dimension=1)
session.run(tf.global_variables_initializer())
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,labels=y_true)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session.run(tf.global_variables_initializer())
def show_progress(epoch, feed_dict_train, feed_dict_validate, val_loss):
acc = session.run(accuracy, feed_dict=feed_dict_train)
val_acc = session.run(accuracy, feed_dict=feed_dict_validate)
msg = "Training Epoch {0} --- Training Accuracy: {1:>6.1%}, Validation Accuracy: {2:>6.1%}, Validation Loss: {3:.3f}"
print(msg.format(epoch + 1, acc, val_acc, val_loss))
total_iterations = 0
saver = tf.train.Saver()
def train(num_iteration):
global total_iterations
for i in range(total_iterations,total_iterations + num_iteration):
x_batch, y_true_batch, _, cls_batch = data.train.next_batch(batch_size)
x_valid_batch, y_valid_batch, _, valid_cls_batch = data.valid.next_batch(batch_size)
feed_dict_tr = {x: x_batch,y_true: y_true_batch}
feed_dict_val = {x: x_valid_batch,y_true: y_valid_batch}
session.run(optimizer, feed_dict=feed_dict_tr)
if i % int(data.train.num_examples/batch_size) == 0:
val_loss = session.run(cost, feed_dict=feed_dict_val)
epoch = int(i / int(data.train.num_examples/batch_size))
show_progress(epoch, feed_dict_tr, feed_dict_val, val_loss)
saver.save(session, '~/tf-realsense-gesture/')
total_iterations += num_iteration
train(num_iteration=6000)
| [
"numpy.random.seed",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.nn.conv2d",
"dataset.read_train_sets",
"tensorflow.truncated_normal",
"tensorflow.nn.softmax",
"tensorflow.nn.relu",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.set_random_seed",
"tensorflow.placeholde... | [((211, 218), 'numpy.random.seed', 'seed', (['(1)'], {}), '(1)\n', (215, 218), False, 'from numpy.random import seed\n'), ((258, 276), 'tensorflow.set_random_seed', 'set_random_seed', (['(2)'], {}), '(2)\n', (273, 276), False, 'from tensorflow import set_random_seed\n'), ((637, 729), 'dataset.read_train_sets', 'dataset.read_train_sets', (['train_path', 'img_size', 'classes'], {'validation_size': 'validation_size'}), '(train_path, img_size, classes, validation_size=\n validation_size)\n', (660, 729), False, 'import dataset\n'), ((964, 976), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (974, 976), True, 'import tensorflow as tf\n'), ((981, 1075), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[batch_size, img_size, img_size, num_channels]', 'name': '"""x"""'}), "(tf.float32, shape=[batch_size, img_size, img_size,\n num_channels], name='x')\n", (995, 1075), True, 'import tensorflow as tf\n'), ((1087, 1155), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, num_classes]', 'name': '"""y_true"""'}), "(tf.float32, shape=[None, num_classes], name='y_true')\n", (1101, 1155), True, 'import tensorflow as tf\n'), ((1169, 1199), 'tensorflow.argmax', 'tf.argmax', (['y_true'], {'dimension': '(1)'}), '(y_true, dimension=1)\n', (1178, 1199), True, 'import tensorflow as tf\n'), ((4887, 4926), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['layer_fc2'], {'name': '"""y_pred"""'}), "(layer_fc2, name='y_pred')\n", (4900, 4926), True, 'import tensorflow as tf\n'), ((4940, 4970), 'tensorflow.argmax', 'tf.argmax', (['y_pred'], {'dimension': '(1)'}), '(y_pred, dimension=1)\n', (4949, 4970), True, 'import tensorflow as tf\n'), ((5034, 5106), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'layer_fc2', 'labels': 'y_true'}), '(logits=layer_fc2, labels=y_true)\n', (5073, 5106), True, 'import tensorflow as tf\n'), ((5113, 5142), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (5127, 5142), True, 'import tensorflow as tf\n'), ((5234, 5266), 'tensorflow.equal', 'tf.equal', (['y_pred_cls', 'y_true_cls'], {}), '(y_pred_cls, y_true_cls)\n', (5242, 5266), True, 'import tensorflow as tf\n'), ((5793, 5809), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5807, 5809), True, 'import tensorflow as tf\n'), ((2101, 2180), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'input', 'filter': 'weights', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=input, filter=weights, strides=[1, 1, 1, 1], padding='SAME')\n", (2113, 2180), True, 'import tensorflow as tf\n'), ((2231, 2320), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', ([], {'value': 'layer', 'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(value=layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n padding='SAME')\n", (2245, 2320), True, 'import tensorflow as tf\n'), ((2451, 2468), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer'], {}), '(layer)\n', (2461, 2468), True, 'import tensorflow as tf\n'), ((2672, 2709), 'tensorflow.reshape', 'tf.reshape', (['layer', '[-1, num_features]'], {}), '(layer, [-1, num_features])\n', (2682, 2709), True, 'import tensorflow as tf\n'), ((4983, 5016), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5014, 5016), True, 'import tensorflow as tf\n'), ((5293, 5332), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (5300, 5332), True, 'import tensorflow as tf\n'), ((5347, 5380), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5378, 5380), True, 'import tensorflow as tf\n'), ((1626, 1665), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.05)'}), '(shape, stddev=0.05)\n', (1645, 1665), True, 'import tensorflow as tf\n'), ((1716, 1747), 'tensorflow.constant', 'tf.constant', (['(0.05)'], {'shape': '[size]'}), '(0.05, shape=[size])\n', (1727, 1747), True, 'import tensorflow as tf\n'), ((3041, 3066), 'tensorflow.matmul', 'tf.matmul', (['input', 'weights'], {}), '(input, weights)\n', (3050, 3066), True, 'import tensorflow as tf\n'), ((3109, 3126), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer'], {}), '(layer)\n', (3119, 3126), True, 'import tensorflow as tf\n'), ((5155, 5199), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (5177, 5199), True, 'import tensorflow as tf\n')] |
from data_loader import DataLoader
from text_cleaner import preprocess
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import resample
from sklearn.metrics import confusion_matrix
from sklearn import svm
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from numpy import mean
from numpy import std
from sklearn.model_selection import train_test_split
# we have several dataframes.
# They may be imbalanced. We address this issue.
def merge_datasets(dataframe_array, max_nb_records=200000):
biggest_dataframe_size = min(
max([x.shape[0] for x in dataframe_array]),
max_nb_records) # I do not want too many records, to avoid memory problems
#print("biggest_dataframe_size", biggest_dataframe_size)
res = pd.DataFrame()
for d in dataframe_array:
if d.shape[0] < biggest_dataframe_size:
res = pd.concat([res, resample(d,
replace=True, # sample with replacement
n_samples=biggest_dataframe_size) ])
else:
res = pd.concat([res, d.sample(n=biggest_dataframe_size) ])
return res
# First, we load the datasets.
print("Loading CRIM dataset")
crim_data_loader = DataLoader(filename='../data/CRIM.csv', class_id=1)
crim_data = crim_data_loader.load_data() # 385 lines
print("Loading COM dataset")
com_data_loader = DataLoader(filename='../data/COM.csv', class_id=2)
com_data = com_data_loader.load_data() # 2263 lines
print("Loading CIV dataset")
civ_data_loader = DataLoader(filename='../data/CIV.csv', class_id=3)
civ_data = civ_data_loader.load_data() # 13085 lines
print("Loading SOC dataset")
soc_data_loader = DataLoader(filename='../data/SOC.csv', class_id=4)
soc_data = soc_data_loader.load_data() # 12260 lines
# Second, whe aggregate the datasets.
# We also make sure the data is not imbalanced, by resampling least frequent classes
print("Aggregating datasets")
full_dataframe = merge_datasets([crim_data, com_data, civ_data, soc_data])
# Third, we clean the content
print("Cleaning text")
full_data_cleaned = preprocess(full_dataframe)
# Fourth, we build the classifier
# Term-Frequency Inversed Document Frequency
# then Singular-Value Decomposition
# Used to keep most relevant tokens
vec = TfidfVectorizer()
x = vec.fit_transform(full_data_cleaned["content"])
# This parameter has been decided empirically. It has the value that maximizes both teh accuracy and recall of each class.
# Without overfitting, of course.
n_components = 150
svd = TruncatedSVD(n_components=n_components) # This parameter has to be adapted, to best fit our situation.
fitted_x = svd.fit_transform(x)
y = full_data_cleaned["class_id"].values
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
model = svm.SVC(kernel='linear', C=1, decision_function_shape='ovo')
metrics = cross_validate(model, fitted_x, y, scoring=['precision_macro', 'recall_macro'], cv=cv, n_jobs=-1)
print('Precision: %.3f (%.3f)' % (mean(metrics["test_precision_macro"]), std(metrics["test_precision_macro"])))
print('Recall: %.3f (%.3f)' % (mean(metrics["test_recall_macro"]), std(metrics["test_recall_macro"])))
# I also want a confusion matrix, to see which classes are less fitted to the model.
# To test a confusion matrix, I split in 80% train, 20% test
X_train, X_test, y_train, y_test = train_test_split(fitted_x, y, test_size=0.2)
fitted_model = model.fit(X_train, y_train)
pred_y = model.predict(X_test)
print(confusion_matrix(y_test, pred_y))
| [
"pandas.DataFrame",
"text_cleaner.preprocess",
"sklearn.decomposition.TruncatedSVD",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.model_selection.cross_validate",
"sklearn.model_selection.train_test_split",
"numpy.std",
"data_loader.DataLoader",
"numpy.mean",
"sklearn.utils.resample... | [((1300, 1351), 'data_loader.DataLoader', 'DataLoader', ([], {'filename': '"""../data/CRIM.csv"""', 'class_id': '(1)'}), "(filename='../data/CRIM.csv', class_id=1)\n", (1310, 1351), False, 'from data_loader import DataLoader\n'), ((1453, 1503), 'data_loader.DataLoader', 'DataLoader', ([], {'filename': '"""../data/COM.csv"""', 'class_id': '(2)'}), "(filename='../data/COM.csv', class_id=2)\n", (1463, 1503), False, 'from data_loader import DataLoader\n'), ((1604, 1654), 'data_loader.DataLoader', 'DataLoader', ([], {'filename': '"""../data/CIV.csv"""', 'class_id': '(3)'}), "(filename='../data/CIV.csv', class_id=3)\n", (1614, 1654), False, 'from data_loader import DataLoader\n'), ((1756, 1806), 'data_loader.DataLoader', 'DataLoader', ([], {'filename': '"""../data/SOC.csv"""', 'class_id': '(4)'}), "(filename='../data/SOC.csv', class_id=4)\n", (1766, 1806), False, 'from data_loader import DataLoader\n'), ((2166, 2192), 'text_cleaner.preprocess', 'preprocess', (['full_dataframe'], {}), '(full_dataframe)\n', (2176, 2192), False, 'from text_cleaner import preprocess\n'), ((2354, 2371), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (2369, 2371), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2609, 2648), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (2621, 2648), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((2791, 2846), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': '(10)', 'n_repeats': '(3)', 'random_state': '(1)'}), '(n_splits=10, n_repeats=3, random_state=1)\n', (2804, 2846), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((2855, 2915), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'C': '(1)', 'decision_function_shape': '"""ovo"""'}), "(kernel='linear', C=1, decision_function_shape='ovo')\n", (2862, 2915), False, 'from sklearn import svm\n'), ((2927, 3028), 'sklearn.model_selection.cross_validate', 'cross_validate', (['model', 'fitted_x', 'y'], {'scoring': "['precision_macro', 'recall_macro']", 'cv': 'cv', 'n_jobs': '(-1)'}), "(model, fitted_x, y, scoring=['precision_macro',\n 'recall_macro'], cv=cv, n_jobs=-1)\n", (2941, 3028), False, 'from sklearn.model_selection import cross_validate\n'), ((3426, 3470), 'sklearn.model_selection.train_test_split', 'train_test_split', (['fitted_x', 'y'], {'test_size': '(0.2)'}), '(fitted_x, y, test_size=0.2)\n', (3442, 3470), False, 'from sklearn.model_selection import train_test_split\n'), ((919, 933), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (931, 933), True, 'import pandas as pd\n'), ((3552, 3584), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'pred_y'], {}), '(y_test, pred_y)\n', (3568, 3584), False, 'from sklearn.metrics import confusion_matrix\n'), ((3059, 3096), 'numpy.mean', 'mean', (["metrics['test_precision_macro']"], {}), "(metrics['test_precision_macro'])\n", (3063, 3096), False, 'from numpy import mean\n'), ((3098, 3134), 'numpy.std', 'std', (["metrics['test_precision_macro']"], {}), "(metrics['test_precision_macro'])\n", (3101, 3134), False, 'from numpy import std\n'), ((3168, 3202), 'numpy.mean', 'mean', (["metrics['test_recall_macro']"], {}), "(metrics['test_recall_macro'])\n", (3172, 3202), False, 'from numpy import mean\n'), ((3204, 3237), 'numpy.std', 'std', (["metrics['test_recall_macro']"], {}), "(metrics['test_recall_macro'])\n", (3207, 3237), False, 'from numpy import std\n'), ((1028, 1087), 'sklearn.utils.resample', 'resample', (['d'], {'replace': '(True)', 'n_samples': 'biggest_dataframe_size'}), '(d, replace=True, n_samples=biggest_dataframe_size)\n', (1036, 1087), False, 'from sklearn.utils import resample\n')] |
"""
@author: <NAME>
__license__= "LGPL"
"""
import numpy as np
import easyvvuq as uq
import os
#import matplotlib as mpl
#mpl.use('Agg')
#from matplotlib import ticker
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 20})
plt.rcParams['figure.figsize'] = 8,6
"""
*****************
* VVUQ ANALYSES *
*****************
"""
# home directory of this file
HOME = os.path.abspath(os.path.dirname(__file__))
work_dir = '~/VECMA/Campaigns'
# Reload the campaign
my_campaign = uq.Campaign(state_file = "campaign_state_FC.json",
work_dir = work_dir)
print('========================================================')
print('Reloaded campaign', my_campaign.campaign_dir.split('/')[-1])
print('========================================================')
# get sampler and output columns from my_campaign object
my_sampler = my_campaign._active_sampler
output_columns = my_campaign._active_app_decoder.output_columns
# collate output
my_campaign.collate()
# get full dataset of data
data = my_campaign.get_collation_result()
#print(data)
# Post-processing analysis
qmc_analysis = uq.analysis.QMCAnalysis(sampler=my_sampler, qoi_cols=output_columns)
my_campaign.apply_analysis(qmc_analysis)
results = my_campaign.get_last_analysis()
#print(results)
"""
***************************
* SOBOL 1st ORDER INDICES *
***************************
"""
#first order Sobol indices and parameter names
sobols = results['sobols_first']
params = list(my_sampler.vary.get_keys())
#print(params)
time = np.arange(0, 550+1, 1)
######################################################################
sobol_idx_ICp = np.zeros((len(params)), dtype='float')
yerr_ICp = np.zeros((2,len(params)), dtype='float')
sobol_idx_ICe = np.zeros((len(params)), dtype='float')
yerr_ICe = np.zeros((2,len(params)), dtype='float')
idx = 0
for param in params:
#
sobol_idx = sobols['IC_prev_avg_max'][param][200]
sobol_idx_ICp[idx] = sobol_idx
low = results['conf_sobols_first']['IC_prev_avg_max'][param]['low'][200]
high = results['conf_sobols_first']['IC_prev_avg_max'][param]['high'][200]
yerr_ICp[:,idx] = [sobol_idx-low, high-sobol_idx]
#
sobol_idx = sobols['IC_ex_max'][param][200]
sobol_idx_ICe[idx] = sobol_idx
low = results['conf_sobols_first']['IC_ex_max'][param]['low'][200]
high = results['conf_sobols_first']['IC_ex_max'][param]['high'][200]
yerr_ICe[:,idx] = [sobol_idx-low, high-sobol_idx]
#
idx += 1
# print values to terminal
print('Param = ',param)
print('Sobol index for IC_prev_avg_max = ', sobols['IC_prev_avg_max'][param][200])
print('Sobol index for IC_ex_max = ', sobols['IC_ex_max'][param][200])
f = plt.figure('Sobol_IC_max', figsize=[12, 6])
ax_ICp_max = f.add_subplot(121, title = 'IC_prev_avg_max')
ax_ICp_max.set_ylim([-.1, 1.1])
ax_ICe_max = f.add_subplot(122, title = 'IC_ex_max')
ax_ICe_max.set_ylim([-.1, 1.1])
ax_ICp_max.errorbar(np.arange(0, len(params), 1), sobol_idx_ICp, yerr=yerr_ICp, fmt='o', elinewidth=2)
ax_ICe_max.errorbar(np.arange(0, len(params), 1), sobol_idx_ICe, yerr=yerr_ICe, fmt='o', elinewidth=2)
ax_ICp_max.set_xticks(np.arange(0, len(params), 1))
ax_ICp_max.set_xticklabels(params, rotation=45)
ax_ICe_max.set_xticks(np.arange(0, len(params), 1))
ax_ICe_max.set_xticklabels(params, rotation=45)
#
plt.tight_layout()
f.savefig('Sobol_IC_max_FC.png')
# fig = plt.figure()
# ax = fig.add_subplot(111, ylim=[0,1])
# idx = 0
# for param in params:
# sobol_idx = sobols['IC_prev_avg_max'][param][200]
# low = results['conf_sobols_first']['IC_prev_avg_max'][param]['low'][200]
# high = results['conf_sobols_first']['IC_prev_avg_max'][param]['high'][200]
# yerr = np.array([low, high])
# print(sobol_idx)
# # print(low)
# # print(high)
# # print(yerr)
# ax.errorbar(idx, sobol_idx, yerr=yerr.sort(), fmt='o')
# idx += 1
# plt.tight_layout()
# fig.savefig('figures/Sobol_FC_errorbar.png')
plt.show()
### END OF CODE ###
| [
"easyvvuq.Campaign",
"easyvvuq.analysis.QMCAnalysis",
"matplotlib.pyplot.show",
"os.path.dirname",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rcParams.update",
"numpy.arange",
"matplotlib.pyplot.tight_layout"
] | [((202, 240), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (221, 240), True, 'import matplotlib.pyplot as plt\n'), ((494, 561), 'easyvvuq.Campaign', 'uq.Campaign', ([], {'state_file': '"""campaign_state_FC.json"""', 'work_dir': 'work_dir'}), "(state_file='campaign_state_FC.json', work_dir=work_dir)\n", (505, 561), True, 'import easyvvuq as uq\n'), ((1122, 1190), 'easyvvuq.analysis.QMCAnalysis', 'uq.analysis.QMCAnalysis', ([], {'sampler': 'my_sampler', 'qoi_cols': 'output_columns'}), '(sampler=my_sampler, qoi_cols=output_columns)\n', (1145, 1190), True, 'import easyvvuq as uq\n'), ((1529, 1553), 'numpy.arange', 'np.arange', (['(0)', '(550 + 1)', '(1)'], {}), '(0, 550 + 1, 1)\n', (1538, 1553), True, 'import numpy as np\n'), ((2707, 2750), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Sobol_IC_max"""'], {'figsize': '[12, 6]'}), "('Sobol_IC_max', figsize=[12, 6])\n", (2717, 2750), True, 'import matplotlib.pyplot as plt\n'), ((3338, 3356), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3354, 3356), True, 'import matplotlib.pyplot as plt\n'), ((3964, 3974), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3972, 3974), True, 'import matplotlib.pyplot as plt\n'), ((398, 423), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (413, 423), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun May 30 12:35:28 2021
@author: Lukas
"""
import numpy as np
import tensorflow as tf
import strawberryfields as sf
from strawberryfields import ops
import basis
import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
tf.random.set_seed(2021)
np.random.seed(2021)
#==============================================================
# Trainingsdaten
#==============================================================
#Größe des Trainingssamples
batch = 20
#Größe des Intervalls
a = -1
b = 1
#Trainingsepochen
epochs=1000
#Bestrafung von nicht erwünschten Eigenschaften der Lösung
reg = 1
#Lernrate
lr = 0.03
#Funktionen die gelernt werden sollen
#Rauschen (Normalverteilt)
e=0.0
#2 dimensional
def f1(x,y,e):
return x*y + e*np.random.normal(size=x.shape)
def f2(x,y,e):
return np.sin(x*y) + e*np.random.normal(size=x.shape)
def f3(x,y,e):
return np.sin(x)*np.sin(y) + e*np.random.normal(size=x.shape)
def f4(x,y,e):
return np.sin(x)+ np.sin(y) + e*np.random.normal(size=x.shape)
#Bestimme welche Funktion gelernt werden soll
def f(x,y,e):
return f1(x,y,e)
#Ordner in dem Bilder gespeichert werden
ordner="multiplication/"
#==============================================================
#Erstelle Trainings und Testdaten
train_data_x = np.linspace(a, b, num=batch)
train_data_y = np.linspace(a, b, num=batch)
test_data_x = np.linspace(a-0.01, b+0.01, num=batch)
test_data_y = np.linspace(a-0.01, b+0.01, num=batch)
X,Y = np.meshgrid(train_data_x,train_data_y)
tX,tY = np.meshgrid(test_data_x,test_data_y)
train_data_x=X.flatten()
train_data_y=Y.flatten()
train_Z = f(train_data_x,train_data_y,e)
train_data_x = tf.constant(train_data_x,tf.float32)
train_data_y = tf.constant(train_data_y,tf.float32)
train_Z = tf.constant(train_Z,tf.float32)
testX = tf.constant(tX.flatten(),tf.float32)
testY = tf.constant(tY.flatten(),tf.float32)
#==============================================================
# Netzparameter
#==============================================================
#Größe des Netzes
in_dim = 3
layers = 7
#Genauigkeit
cutoff_dim = 11
#==============================================================
# zum Ausführen des Programms wird ein Simulator benötigt. Hier wird das backend von tensorflow verwendet
#cutoff_dim gibt an wieviele Dimensionen des Fock-Raums für die Simulation benutzt werden sollen
#Je höher die Zahl, desto kleiner ist der Fehler auf Operationen, aber desto mehr Zeit wird benötigt
eng = sf.Engine('tf', backend_options={"cutoff_dim": cutoff_dim, "batch_size": batch**2})
#==============================================================
# Initialisierung
#==============================================================
#Erstelle ein Programm mit N qumodes
qnn = sf.Program(in_dim)
# initialisiere Parameter zufällig
weights = basis.init(in_dim, layers)
anzahl = np.prod(weights.shape) # Gesamtzahl an Parametern
#Erstelle einen Array mit symbolischen Variabeln die im QNN verwendet werden
params = np.arange(anzahl).reshape(weights.shape)
params = params.astype(np.str) #Variablen sind einfach numeriert
par = []
for i in params:
par.append(qnn.params(*i))
params = np.array(par)
#symbolischer Parameter für den Input
x_data = qnn.params("input1")
y_data = qnn.params("input2")
#==============================================================
#Baue die Struktur des Netzes auf
with qnn.context as q:
#Setze den Input des Netzes als Verschiebung im Ortsraum
ops.Dgate(x_data) | q[0]
ops.Dgate(y_data) | q[1]
for l in range(layers):
basis.layer(params[l], q)
#==============================================================
# Kostenfunktion
#==============================================================
def costfunc(weights):
#Um Tensorflow benutzen zu können muss ein Dictionary zwischen den symbolischen
#Variablen und den Tensorflowvariablen erstellt werden
dictio = {}
for symb, var in zip(params.flatten(), tf.reshape(weights, -1)):
dictio[symb.name] = var
dictio["input1"] = train_data_x
dictio["input2"] = train_data_y
# benutze den Tensorflowsimulator
state = eng.run(qnn, args=dictio).state
#Ortsprojektion und Varianz
output = state.quad_expectation(2)[0]
#Größe die minimiert werden soll
loss = tf.reduce_mean(tf.abs(output - train_Z) ** 2)
#Stelle sicher, dass der Trace des Outputs nahe bei 1 bleibt
#Es wird also bestraft, wenn der Circuit Operationen benutzt
#die für große Rechenfehler sorgen (dazu führen, dass der Anteil an höheren Fockstates zunimmt)
trace = tf.abs(tf.reduce_mean(state.trace()))
cost = loss + reg * (tf.abs(trace - 1) ** 2)
return cost, loss, trace, output
"""
#Das Training dieses Netzes dauert mehrere Stunden! zum Testen daher
#den Trainingsteil des Programmes auskommentieren (Gewichte werden aus Datei geladen)
#==============================================================
# Training
#==============================================================
weights = tf.Variable(weights)
history = []
start_time = time.time()
#Nutze einen Optimierer von Tensorflow. Genauer gesagt: Adam (arXiv:1412.6980v9)
opt= tf.keras.optimizers.Adam(learning_rate=lr)
# Führe das Training 1000 mal durch
for i in range(epochs):
# wenn das Programm gelaufen ist, dann resete die Engine
if eng.run_progs:
eng.reset()
with tf.GradientTape() as tape:
cost, loss, trace, output = costfunc(weights)
gradients = tape.gradient(cost, weights)
opt.apply_gradients(zip([gradients], [weights]))
history.append(loss)
#alle 10 Schritte
if i % 10 == 0:
print("Epochen: {} Gesamtkosten: {:.4f} Loss: {:.4f} Trace: {:.4f}".format(i, cost, loss, trace))
#Speichere grafisch den Trainingsfortschritt
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot_surface(X, Y, np.reshape(output,(batch,batch)), cmap="RdYlGn", lw=0.5, rstride=1, cstride=1)
ax.plot_surface(X, Y, np.reshape(train_Z,(batch,batch)), cmap="Greys", lw=0.5, rstride=1, cstride=1,alpha=0.2)
fig.set_size_inches(4.8, 5)
name=ordner+str(i)+".png"
fig.savefig(name, format='png', bbox_inches='tight')
plt.close(fig)
#Gebe die Dauer des Trainings aus
end_time = time.time()
print("Dauer: ",np.round(end_time-start_time),"Sekunden")
np.save("weights_mult",weights)
eng.reset()
# %matplotlib inline
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.sans-serif'] = ['Computer Modern Roman']
plt.style.use('default')
#Erstelle einen Plot des Trainingsverlaufes
plt.plot(history)
plt.ylabel('Kosten')
plt.xlabel('Epoche')
plt.show()
"""
#Teste den Algorithmus an nicht gelernten Trainingsdaten
#==============================================================
# Test
#==============================================================
weights=np.load("weights_mult.npy")
"""
#Simuliere fehlerhafte Gates durch Veränderung einzelner Parameter
from random import randint
for fehler in range(1):
print(fehler)
for anz in range(1):
weights=np.load("weights_mult.npy")
for z in range(8):
i=randint(0,6)
j=randint(0,27)
weights[i,j] += 0.1*np.random.normal(size=1)
cost, loss, trace, output = costfunc(weights)
eng.reset()
print(loss)
"""
dictio = {}
for symb, var in zip(params.flatten(), tf.reshape(weights, -1)):
dictio[symb.name] = var
dictio["input1"] = testX
dictio["input2"] = testY
# benutze den Tensorflowsimulator
state = eng.run(qnn, args=dictio).state
#Ortsprojektion der Ausgabe
output = state.quad_expectation(2)[0]
#Visualisiere die Ausgabe für alle Testdaten
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot_surface(tX, tY, np.reshape(output,(batch,batch)), cmap="RdYlGn", lw=0.5, rstride=1, cstride=1,alpha=0.8)
#ax.plot_surface(X, Y, np.reshape(output,(batch,batch)), cmap="RdYlGn", lw=0.5, rstride=1, cstride=1,alpha=0.8)
ax.plot_surface(X, Y, np.reshape(train_Z,(batch,batch)), cmap="Greys", lw=0.5, rstride=1, cstride=1,alpha=0.4)
fig.set_size_inches(4.8, 5)
name=ordner+"Test"+".pdf"
ax.set_xlabel('x', fontsize=18)
ax.set_ylabel('y', fontsize=18)
ax.set_zlabel('z', fontsize=18)
fig.savefig(name, format='pdf', bbox_inches='tight')
| [
"tensorflow.random.set_seed",
"strawberryfields.Engine",
"numpy.load",
"numpy.random.seed",
"tensorflow.reshape",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"basis.layer",
"numpy.random.normal",
"numpy.prod",
"numpy.meshgrid",
"tensorflow.abs",
"numpy.reshape",
"numpy.linsp... | [((311, 335), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(2021)'], {}), '(2021)\n', (329, 335), True, 'import tensorflow as tf\n'), ((337, 357), 'numpy.random.seed', 'np.random.seed', (['(2021)'], {}), '(2021)\n', (351, 357), True, 'import numpy as np\n'), ((1431, 1459), 'numpy.linspace', 'np.linspace', (['a', 'b'], {'num': 'batch'}), '(a, b, num=batch)\n', (1442, 1459), True, 'import numpy as np\n'), ((1476, 1504), 'numpy.linspace', 'np.linspace', (['a', 'b'], {'num': 'batch'}), '(a, b, num=batch)\n', (1487, 1504), True, 'import numpy as np\n'), ((1520, 1562), 'numpy.linspace', 'np.linspace', (['(a - 0.01)', '(b + 0.01)'], {'num': 'batch'}), '(a - 0.01, b + 0.01, num=batch)\n', (1531, 1562), True, 'import numpy as np\n'), ((1574, 1616), 'numpy.linspace', 'np.linspace', (['(a - 0.01)', '(b + 0.01)'], {'num': 'batch'}), '(a - 0.01, b + 0.01, num=batch)\n', (1585, 1616), True, 'import numpy as np\n'), ((1622, 1661), 'numpy.meshgrid', 'np.meshgrid', (['train_data_x', 'train_data_y'], {}), '(train_data_x, train_data_y)\n', (1633, 1661), True, 'import numpy as np\n'), ((1670, 1707), 'numpy.meshgrid', 'np.meshgrid', (['test_data_x', 'test_data_y'], {}), '(test_data_x, test_data_y)\n', (1681, 1707), True, 'import numpy as np\n'), ((1821, 1858), 'tensorflow.constant', 'tf.constant', (['train_data_x', 'tf.float32'], {}), '(train_data_x, tf.float32)\n', (1832, 1858), True, 'import tensorflow as tf\n'), ((1874, 1911), 'tensorflow.constant', 'tf.constant', (['train_data_y', 'tf.float32'], {}), '(train_data_y, tf.float32)\n', (1885, 1911), True, 'import tensorflow as tf\n'), ((1922, 1954), 'tensorflow.constant', 'tf.constant', (['train_Z', 'tf.float32'], {}), '(train_Z, tf.float32)\n', (1933, 1954), True, 'import tensorflow as tf\n'), ((2694, 2784), 'strawberryfields.Engine', 'sf.Engine', (['"""tf"""'], {'backend_options': "{'cutoff_dim': cutoff_dim, 'batch_size': batch ** 2}"}), "('tf', backend_options={'cutoff_dim': cutoff_dim, 'batch_size': \n batch ** 2})\n", (2703, 2784), True, 'import strawberryfields as sf\n'), ((3004, 3022), 'strawberryfields.Program', 'sf.Program', (['in_dim'], {}), '(in_dim)\n', (3014, 3022), True, 'import strawberryfields as sf\n'), ((3072, 3098), 'basis.init', 'basis.init', (['in_dim', 'layers'], {}), '(in_dim, layers)\n', (3082, 3098), False, 'import basis\n'), ((3110, 3132), 'numpy.prod', 'np.prod', (['weights.shape'], {}), '(weights.shape)\n', (3117, 3132), True, 'import numpy as np\n'), ((3441, 3454), 'numpy.array', 'np.array', (['par'], {}), '(par)\n', (3449, 3454), True, 'import numpy as np\n'), ((7476, 7503), 'numpy.load', 'np.load', (['"""weights_mult.npy"""'], {}), "('weights_mult.npy')\n", (7483, 7503), True, 'import numpy as np\n'), ((8350, 8362), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8360, 8362), True, 'import matplotlib.pyplot as plt\n'), ((8036, 8059), 'tensorflow.reshape', 'tf.reshape', (['weights', '(-1)'], {}), '(weights, -1)\n', (8046, 8059), True, 'import tensorflow as tf\n'), ((8432, 8466), 'numpy.reshape', 'np.reshape', (['output', '(batch, batch)'], {}), '(output, (batch, batch))\n', (8442, 8466), True, 'import numpy as np\n'), ((8657, 8692), 'numpy.reshape', 'np.reshape', (['train_Z', '(batch, batch)'], {}), '(train_Z, (batch, batch))\n', (8667, 8692), True, 'import numpy as np\n'), ((939, 952), 'numpy.sin', 'np.sin', (['(x * y)'], {}), '(x * y)\n', (945, 952), True, 'import numpy as np\n'), ((3256, 3273), 'numpy.arange', 'np.arange', (['anzahl'], {}), '(anzahl)\n', (3265, 3273), True, 'import numpy as np\n'), ((3757, 3774), 'strawberryfields.ops.Dgate', 'ops.Dgate', (['x_data'], {}), '(x_data)\n', (3766, 3774), False, 'from strawberryfields import ops\n'), ((3787, 3804), 'strawberryfields.ops.Dgate', 'ops.Dgate', (['y_data'], {}), '(y_data)\n', (3796, 3804), False, 'from strawberryfields import ops\n'), ((3862, 3887), 'basis.layer', 'basis.layer', (['params[l]', 'q'], {}), '(params[l], q)\n', (3873, 3887), False, 'import basis\n'), ((4318, 4341), 'tensorflow.reshape', 'tf.reshape', (['weights', '(-1)'], {}), '(weights, -1)\n', (4328, 4341), True, 'import tensorflow as tf\n'), ((878, 908), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'x.shape'}), '(size=x.shape)\n', (894, 908), True, 'import numpy as np\n'), ((955, 985), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'x.shape'}), '(size=x.shape)\n', (971, 985), True, 'import numpy as np\n'), ((1016, 1025), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1022, 1025), True, 'import numpy as np\n'), ((1026, 1035), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (1032, 1035), True, 'import numpy as np\n'), ((1040, 1070), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'x.shape'}), '(size=x.shape)\n', (1056, 1070), True, 'import numpy as np\n'), ((1101, 1110), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1107, 1110), True, 'import numpy as np\n'), ((1112, 1121), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (1118, 1121), True, 'import numpy as np\n'), ((1126, 1156), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'x.shape'}), '(size=x.shape)\n', (1142, 1156), True, 'import numpy as np\n'), ((4689, 4713), 'tensorflow.abs', 'tf.abs', (['(output - train_Z)'], {}), '(output - train_Z)\n', (4695, 4713), True, 'import tensorflow as tf\n'), ((5044, 5061), 'tensorflow.abs', 'tf.abs', (['(trace - 1)'], {}), '(trace - 1)\n', (5050, 5061), True, 'import tensorflow as tf\n')] |
import numpy as np
import pytest
from numpy.testing import assert_almost_equal, assert_equal
import pyproj
from pyproj import Proj, Transformer, itransform, transform
from pyproj.exceptions import ProjError
def test_tranform_wgs84_to_custom():
custom_proj = pyproj.Proj(
"+proj=geos +lon_0=0.000000 +lat_0=0 +h=35807.414063"
" +a=6378.169000 +b=6356.583984"
)
wgs84 = pyproj.Proj("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
lat, lon = 51.04715, 3.23406
xx, yy = pyproj.transform(wgs84, custom_proj, lon, lat)
assert "{:.3f} {:.3f}".format(xx, yy) == "212.623 4604.975"
def test_transform_wgs84_to_alaska():
lat_lon_proj = pyproj.Proj(init="epsg:4326", preserve_units=False)
alaska_aea_proj = pyproj.Proj(init="epsg:2964", preserve_units=False)
test = (-179.72638, 49.752533)
xx, yy = pyproj.transform(lat_lon_proj, alaska_aea_proj, *test)
assert "{:.3f} {:.3f}".format(xx, yy) == "-1824924.495 330822.800"
def test_illegal_transformation():
# issue 202
p1 = pyproj.Proj(init="epsg:4326")
p2 = pyproj.Proj(init="epsg:3857")
xx, yy = pyproj.transform(
p1, p2, (-180, -180, 180, 180, -180), (-90, 90, 90, -90, -90)
)
assert np.all(np.isinf(xx))
assert np.all(np.isinf(yy))
with pytest.raises(ProjError):
pyproj.transform(
p1, p2, (-180, -180, 180, 180, -180), (-90, 90, 90, -90, -90), errcheck=True
)
def test_lambert_conformal_transform():
# issue 207
Midelt = pyproj.Proj(init="epsg:26191")
WGS84 = pyproj.Proj(init="epsg:4326")
E = 567623.931
N = 256422.787
h = 1341.467
Long1, Lat1, H1 = pyproj.transform(Midelt, WGS84, E, N, h, radians=False)
assert_almost_equal((Long1, Lat1, H1), (-4.6753456, 32.902199, 1341.467), decimal=5)
def test_equivalent_crs():
with pytest.warns(UserWarning):
transformer = Transformer.from_crs("epsg:4326", 4326, skip_equivalent=True)
assert transformer._transformer.projections_equivalent
assert transformer._transformer.projections_exact_same
assert transformer._transformer.skip_equivalent
def test_equivalent_crs__disabled():
with pytest.warns(UserWarning):
transformer = Transformer.from_crs("epsg:4326", 4326)
assert not transformer._transformer.skip_equivalent
assert transformer._transformer.projections_equivalent
assert transformer._transformer.projections_exact_same
def test_equivalent_crs__different():
with pytest.warns(UserWarning):
transformer = Transformer.from_crs("epsg:4326", 3857, skip_equivalent=True)
assert transformer._transformer.skip_equivalent
assert not transformer._transformer.projections_equivalent
assert not transformer._transformer.projections_exact_same
def test_equivalent_proj():
transformer = Transformer.from_proj(
"+init=epsg:4326", pyproj.Proj(4326).crs.to_proj4(), skip_equivalent=True
)
assert transformer._transformer.skip_equivalent
assert transformer._transformer.projections_equivalent
assert not transformer._transformer.projections_exact_same
def test_equivalent_proj__disabled():
transformer = Transformer.from_proj(3857, pyproj.Proj(3857).crs.to_proj4())
assert not transformer._transformer.skip_equivalent
assert not transformer._transformer.projections_equivalent
assert not transformer._transformer.projections_exact_same
def test_equivalent_proj__different():
transformer = Transformer.from_proj(3857, 4326, skip_equivalent=True)
assert transformer._transformer.skip_equivalent
assert not transformer._transformer.projections_equivalent
assert not transformer._transformer.projections_exact_same
def test_equivalent_pipeline():
transformer = Transformer.from_pipeline(
"+proj=pipeline +step +proj=longlat +ellps=WGS84 +step "
"+proj=unitconvert +xy_in=rad +xy_out=deg"
)
assert not transformer._transformer.skip_equivalent
assert not transformer._transformer.projections_equivalent
assert not transformer._transformer.projections_exact_same
def test_4d_transform():
transformer = Transformer.from_pipeline("+init=ITRF2008:ITRF2000")
assert_almost_equal(
transformer.transform(
xx=3513638.19380, yy=778956.45250, zz=5248216.46900, tt=2008.75
),
(3513638.1999428216, 778956.4532640711, 5248216.453456361, 2008.75),
)
def test_2d_with_time_transform():
transformer = Transformer.from_pipeline("+init=ITRF2008:ITRF2000")
assert_almost_equal(
transformer.transform(xx=3513638.19380, yy=778956.45250, tt=2008.75),
(3513638.1999428216, 778956.4532640711, 2008.75),
)
def test_4d_transform_crs_obs1():
transformer = Transformer.from_proj(7789, 8401)
assert_almost_equal(
transformer.transform(
xx=3496737.2679, yy=743254.4507, zz=5264462.9620, tt=2019.0
),
(3496737.757717311, 743253.9940103051, 5264462.701132784, 2019.0),
)
def test_4d_transform_orginal_crs_obs1():
assert_almost_equal(
transform(7789, 8401, x=3496737.2679, y=743254.4507, z=5264462.9620, tt=2019.0),
(3496737.757717311, 743253.9940103051, 5264462.701132784, 2019.0),
)
def test_4d_transform_crs_obs2():
transformer = Transformer.from_proj(4896, 7930)
assert_almost_equal(
transformer.transform(
xx=3496737.2679, yy=743254.4507, zz=5264462.9620, tt=2019.0
),
(3496737.7857162016, 743254.0394113371, 5264462.643659916, 2019.0),
)
def test_2d_with_time_transform_crs_obs2():
transformer = Transformer.from_proj(4896, 7930)
assert_almost_equal(
transformer.transform(xx=3496737.2679, yy=743254.4507, tt=2019.0),
(3496737.4105305015, 743254.1014318303, 2019.0),
)
def test_2d_with_time_transform_original_crs_obs2():
assert_almost_equal(
transform(4896, 7930, x=3496737.2679, y=743254.4507, tt=2019.0),
(3496737.4105305015, 743254.1014318303, 2019.0),
)
def test_4d_itransform():
transformer = Transformer.from_pipeline("+init=ITRF2008:ITRF2000")
assert_almost_equal(
list(
transformer.itransform(
[(3513638.19380, 778956.45250, 5248216.46900, 2008.75)]
)
),
[(3513638.1999428216, 778956.4532640711, 5248216.453456361, 2008.75)],
)
def test_3d_time_itransform():
transformer = Transformer.from_pipeline("+init=ITRF2008:ITRF2000")
assert_almost_equal(
list(
transformer.itransform(
[(3513638.19380, 778956.45250, 2008.75)], time_3rd=True
)
),
[(3513638.1999428216, 778956.4532640711, 2008.75)],
)
def test_4d_itransform_orginal_crs_obs1():
assert_almost_equal(
list(
itransform(7789, 8401, [(3496737.2679, 743254.4507, 5264462.9620, 2019.0)])
),
[(3496737.757717311, 743253.9940103051, 5264462.701132784, 2019.0)],
)
def test_2d_with_time_itransform_original_crs_obs2():
assert_almost_equal(
list(
itransform(4896, 7930, [(3496737.2679, 743254.4507, 2019.0)], time_3rd=True)
),
[(3496737.4105305015, 743254.1014318303, 2019.0)],
)
def test_itransform_time_3rd_invalid():
with pytest.raises(ValueError, match="'time_3rd' is only valid for 3 coordinates."):
list(
itransform(
7789,
8401,
[(3496737.2679, 743254.4507, 5264462.9620, 2019.0)],
time_3rd=True,
)
)
with pytest.raises(ValueError, match="'time_3rd' is only valid for 3 coordinates."):
list(itransform(7789, 8401, [(3496737.2679, 743254.4507)], time_3rd=True))
def test_transform_no_error():
pj = Proj(init="epsg:4555")
pjx, pjy = pj(116.366, 39.867)
transform(pj, Proj(4326), pjx, pjy, radians=True, errcheck=True)
def test_itransform_no_error():
pj = Proj(init="epsg:4555")
pjx, pjy = pj(116.366, 39.867)
list(itransform(pj, Proj(4326), [(pjx, pjy)], radians=True, errcheck=True))
def test_transform_exception():
transformer = Transformer.from_proj("+init=epsg:4326", "+init=epsg:27700")
with pytest.raises(ProjError):
transformer.transform(100000, 100000, errcheck=True)
def test_transform_no_exception():
# issue 249
transformer = Transformer.from_proj("+init=epsg:4326", "+init=epsg:27700")
transformer.transform(1.716073972, 52.658007833, errcheck=True)
transformer.itransform([(1.716073972, 52.658007833)], errcheck=True)
def test_transform_radians():
WGS84 = pyproj.Proj("+init=EPSG:4326")
ECEF = pyproj.Proj(proj="geocent", ellps="WGS84", datum="WGS84")
assert_almost_equal(
pyproj.transform(
ECEF, WGS84, -2704026.010, -4253051.810, 3895878.820, radians=True
),
(-2.137113493845668, 0.6613203738996222, -20.531156923621893),
)
assert_almost_equal(
pyproj.transform(
WGS84,
ECEF,
-2.137113493845668,
0.6613203738996222,
-20.531156923621893,
radians=True,
),
(-2704026.010, -4253051.810, 3895878.820),
)
def test_itransform_radians():
WGS84 = pyproj.Proj("+init=EPSG:4326")
ECEF = pyproj.Proj(proj="geocent", ellps="WGS84", datum="WGS84")
assert_almost_equal(
list(
pyproj.itransform(
ECEF, WGS84, [(-2704026.010, -4253051.810, 3895878.820)], radians=True
)
),
[(-2.137113493845668, 0.6613203738996222, -20.531156923621893)],
)
assert_almost_equal(
list(
pyproj.itransform(
WGS84,
ECEF,
[(-2.137113493845668, 0.6613203738996222, -20.531156923621893)],
radians=True,
)
),
[(-2704026.010, -4253051.810, 3895878.820)],
)
| [
"pyproj.Transformer.from_proj",
"pytest.warns",
"numpy.testing.assert_almost_equal",
"numpy.isinf",
"pytest.raises",
"pyproj.Proj",
"pyproj.Transformer.from_crs",
"pyproj.itransform",
"pyproj.transform",
"pyproj.Transformer.from_pipeline"
] | [((265, 371), 'pyproj.Proj', 'pyproj.Proj', (['"""+proj=geos +lon_0=0.000000 +lat_0=0 +h=35807.414063 +a=6378.169000 +b=6356.583984"""'], {}), "(\n '+proj=geos +lon_0=0.000000 +lat_0=0 +h=35807.414063 +a=6378.169000 +b=6356.583984'\n )\n", (276, 371), False, 'import pyproj\n'), ((399, 462), 'pyproj.Proj', 'pyproj.Proj', (['"""+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"""'], {}), "('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')\n", (410, 462), False, 'import pyproj\n'), ((509, 555), 'pyproj.transform', 'pyproj.transform', (['wgs84', 'custom_proj', 'lon', 'lat'], {}), '(wgs84, custom_proj, lon, lat)\n', (525, 555), False, 'import pyproj\n'), ((679, 730), 'pyproj.Proj', 'pyproj.Proj', ([], {'init': '"""epsg:4326"""', 'preserve_units': '(False)'}), "(init='epsg:4326', preserve_units=False)\n", (690, 730), False, 'import pyproj\n'), ((753, 804), 'pyproj.Proj', 'pyproj.Proj', ([], {'init': '"""epsg:2964"""', 'preserve_units': '(False)'}), "(init='epsg:2964', preserve_units=False)\n", (764, 804), False, 'import pyproj\n'), ((853, 907), 'pyproj.transform', 'pyproj.transform', (['lat_lon_proj', 'alaska_aea_proj', '*test'], {}), '(lat_lon_proj, alaska_aea_proj, *test)\n', (869, 907), False, 'import pyproj\n'), ((1041, 1070), 'pyproj.Proj', 'pyproj.Proj', ([], {'init': '"""epsg:4326"""'}), "(init='epsg:4326')\n", (1052, 1070), False, 'import pyproj\n'), ((1080, 1109), 'pyproj.Proj', 'pyproj.Proj', ([], {'init': '"""epsg:3857"""'}), "(init='epsg:3857')\n", (1091, 1109), False, 'import pyproj\n'), ((1123, 1202), 'pyproj.transform', 'pyproj.transform', (['p1', 'p2', '(-180, -180, 180, 180, -180)', '(-90, 90, 90, -90, -90)'], {}), '(p1, p2, (-180, -180, 180, 180, -180), (-90, 90, 90, -90, -90))\n', (1139, 1202), False, 'import pyproj\n'), ((1512, 1542), 'pyproj.Proj', 'pyproj.Proj', ([], {'init': '"""epsg:26191"""'}), "(init='epsg:26191')\n", (1523, 1542), False, 'import pyproj\n'), ((1555, 1584), 'pyproj.Proj', 'pyproj.Proj', ([], {'init': '"""epsg:4326"""'}), "(init='epsg:4326')\n", (1566, 1584), False, 'import pyproj\n'), ((1664, 1719), 'pyproj.transform', 'pyproj.transform', (['Midelt', 'WGS84', 'E', 'N', 'h'], {'radians': '(False)'}), '(Midelt, WGS84, E, N, h, radians=False)\n', (1680, 1719), False, 'import pyproj\n'), ((1724, 1812), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['(Long1, Lat1, H1)', '(-4.6753456, 32.902199, 1341.467)'], {'decimal': '(5)'}), '((Long1, Lat1, H1), (-4.6753456, 32.902199, 1341.467),\n decimal=5)\n', (1743, 1812), False, 'from numpy.testing import assert_almost_equal, assert_equal\n'), ((3471, 3526), 'pyproj.Transformer.from_proj', 'Transformer.from_proj', (['(3857)', '(4326)'], {'skip_equivalent': '(True)'}), '(3857, 4326, skip_equivalent=True)\n', (3492, 3526), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((3757, 3890), 'pyproj.Transformer.from_pipeline', 'Transformer.from_pipeline', (['"""+proj=pipeline +step +proj=longlat +ellps=WGS84 +step +proj=unitconvert +xy_in=rad +xy_out=deg"""'], {}), "(\n '+proj=pipeline +step +proj=longlat +ellps=WGS84 +step +proj=unitconvert +xy_in=rad +xy_out=deg'\n )\n", (3782, 3890), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((4133, 4185), 'pyproj.Transformer.from_pipeline', 'Transformer.from_pipeline', (['"""+init=ITRF2008:ITRF2000"""'], {}), "('+init=ITRF2008:ITRF2000')\n", (4158, 4185), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((4467, 4519), 'pyproj.Transformer.from_pipeline', 'Transformer.from_pipeline', (['"""+init=ITRF2008:ITRF2000"""'], {}), "('+init=ITRF2008:ITRF2000')\n", (4492, 4519), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((4741, 4774), 'pyproj.Transformer.from_proj', 'Transformer.from_proj', (['(7789)', '(8401)'], {}), '(7789, 8401)\n', (4762, 4774), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((5288, 5321), 'pyproj.Transformer.from_proj', 'Transformer.from_proj', (['(4896)', '(7930)'], {}), '(4896, 7930)\n', (5309, 5321), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((5607, 5640), 'pyproj.Transformer.from_proj', 'Transformer.from_proj', (['(4896)', '(7930)'], {}), '(4896, 7930)\n', (5628, 5640), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((6066, 6118), 'pyproj.Transformer.from_pipeline', 'Transformer.from_pipeline', (['"""+init=ITRF2008:ITRF2000"""'], {}), "('+init=ITRF2008:ITRF2000')\n", (6091, 6118), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((6427, 6479), 'pyproj.Transformer.from_pipeline', 'Transformer.from_pipeline', (['"""+init=ITRF2008:ITRF2000"""'], {}), "('+init=ITRF2008:ITRF2000')\n", (6452, 6479), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((7796, 7818), 'pyproj.Proj', 'Proj', ([], {'init': '"""epsg:4555"""'}), "(init='epsg:4555')\n", (7800, 7818), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((7966, 7988), 'pyproj.Proj', 'Proj', ([], {'init': '"""epsg:4555"""'}), "(init='epsg:4555')\n", (7970, 7988), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((8156, 8216), 'pyproj.Transformer.from_proj', 'Transformer.from_proj', (['"""+init=epsg:4326"""', '"""+init=epsg:27700"""'], {}), "('+init=epsg:4326', '+init=epsg:27700')\n", (8177, 8216), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((8384, 8444), 'pyproj.Transformer.from_proj', 'Transformer.from_proj', (['"""+init=epsg:4326"""', '"""+init=epsg:27700"""'], {}), "('+init=epsg:4326', '+init=epsg:27700')\n", (8405, 8444), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((8630, 8660), 'pyproj.Proj', 'pyproj.Proj', (['"""+init=EPSG:4326"""'], {}), "('+init=EPSG:4326')\n", (8641, 8660), False, 'import pyproj\n'), ((8672, 8729), 'pyproj.Proj', 'pyproj.Proj', ([], {'proj': '"""geocent"""', 'ellps': '"""WGS84"""', 'datum': '"""WGS84"""'}), "(proj='geocent', ellps='WGS84', datum='WGS84')\n", (8683, 8729), False, 'import pyproj\n'), ((9273, 9303), 'pyproj.Proj', 'pyproj.Proj', (['"""+init=EPSG:4326"""'], {}), "('+init=EPSG:4326')\n", (9284, 9303), False, 'import pyproj\n'), ((9315, 9372), 'pyproj.Proj', 'pyproj.Proj', ([], {'proj': '"""geocent"""', 'ellps': '"""WGS84"""', 'datum': '"""WGS84"""'}), "(proj='geocent', ellps='WGS84', datum='WGS84')\n", (9326, 9372), False, 'import pyproj\n'), ((1235, 1247), 'numpy.isinf', 'np.isinf', (['xx'], {}), '(xx)\n', (1243, 1247), True, 'import numpy as np\n'), ((1267, 1279), 'numpy.isinf', 'np.isinf', (['yy'], {}), '(yy)\n', (1275, 1279), True, 'import numpy as np\n'), ((1290, 1314), 'pytest.raises', 'pytest.raises', (['ProjError'], {}), '(ProjError)\n', (1303, 1314), False, 'import pytest\n'), ((1324, 1423), 'pyproj.transform', 'pyproj.transform', (['p1', 'p2', '(-180, -180, 180, 180, -180)', '(-90, 90, 90, -90, -90)'], {'errcheck': '(True)'}), '(p1, p2, (-180, -180, 180, 180, -180), (-90, 90, 90, -90, -\n 90), errcheck=True)\n', (1340, 1423), False, 'import pyproj\n'), ((1847, 1872), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (1859, 1872), False, 'import pytest\n'), ((1896, 1957), 'pyproj.Transformer.from_crs', 'Transformer.from_crs', (['"""epsg:4326"""', '(4326)'], {'skip_equivalent': '(True)'}), "('epsg:4326', 4326, skip_equivalent=True)\n", (1916, 1957), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((2176, 2201), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (2188, 2201), False, 'import pytest\n'), ((2225, 2264), 'pyproj.Transformer.from_crs', 'Transformer.from_crs', (['"""epsg:4326"""', '(4326)'], {}), "('epsg:4326', 4326)\n", (2245, 2264), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((2488, 2513), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (2500, 2513), False, 'import pytest\n'), ((2537, 2598), 'pyproj.Transformer.from_crs', 'Transformer.from_crs', (['"""epsg:4326"""', '(3857)'], {'skip_equivalent': '(True)'}), "('epsg:4326', 3857, skip_equivalent=True)\n", (2557, 2598), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((5072, 5150), 'pyproj.transform', 'transform', (['(7789)', '(8401)'], {'x': '(3496737.2679)', 'y': '(743254.4507)', 'z': '(5264462.962)', 'tt': '(2019.0)'}), '(7789, 8401, x=3496737.2679, y=743254.4507, z=5264462.962, tt=2019.0)\n', (5081, 5150), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((5892, 5955), 'pyproj.transform', 'transform', (['(4896)', '(7930)'], {'x': '(3496737.2679)', 'y': '(743254.4507)', 'tt': '(2019.0)'}), '(4896, 7930, x=3496737.2679, y=743254.4507, tt=2019.0)\n', (5901, 5955), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((7296, 7374), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""\'time_3rd\' is only valid for 3 coordinates."""'}), '(ValueError, match="\'time_3rd\' is only valid for 3 coordinates.")\n', (7309, 7374), False, 'import pytest\n'), ((7591, 7669), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""\'time_3rd\' is only valid for 3 coordinates."""'}), '(ValueError, match="\'time_3rd\' is only valid for 3 coordinates.")\n', (7604, 7669), False, 'import pytest\n'), ((7872, 7882), 'pyproj.Proj', 'Proj', (['(4326)'], {}), '(4326)\n', (7876, 7882), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((8226, 8250), 'pytest.raises', 'pytest.raises', (['ProjError'], {}), '(ProjError)\n', (8239, 8250), False, 'import pytest\n'), ((8763, 8849), 'pyproj.transform', 'pyproj.transform', (['ECEF', 'WGS84', '(-2704026.01)', '(-4253051.81)', '(3895878.82)'], {'radians': '(True)'}), '(ECEF, WGS84, -2704026.01, -4253051.81, 3895878.82, radians\n =True)\n', (8779, 8849), False, 'import pyproj\n'), ((8982, 9091), 'pyproj.transform', 'pyproj.transform', (['WGS84', 'ECEF', '(-2.137113493845668)', '(0.6613203738996222)', '(-20.531156923621893)'], {'radians': '(True)'}), '(WGS84, ECEF, -2.137113493845668, 0.6613203738996222, -\n 20.531156923621893, radians=True)\n', (8998, 9091), False, 'import pyproj\n'), ((6814, 6888), 'pyproj.itransform', 'itransform', (['(7789)', '(8401)', '[(3496737.2679, 743254.4507, 5264462.962, 2019.0)]'], {}), '(7789, 8401, [(3496737.2679, 743254.4507, 5264462.962, 2019.0)])\n', (6824, 6888), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((7091, 7167), 'pyproj.itransform', 'itransform', (['(4896)', '(7930)', '[(3496737.2679, 743254.4507, 2019.0)]'], {'time_3rd': '(True)'}), '(4896, 7930, [(3496737.2679, 743254.4507, 2019.0)], time_3rd=True)\n', (7101, 7167), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((7402, 7495), 'pyproj.itransform', 'itransform', (['(7789)', '(8401)', '[(3496737.2679, 743254.4507, 5264462.962, 2019.0)]'], {'time_3rd': '(True)'}), '(7789, 8401, [(3496737.2679, 743254.4507, 5264462.962, 2019.0)],\n time_3rd=True)\n', (7412, 7495), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((7684, 7752), 'pyproj.itransform', 'itransform', (['(7789)', '(8401)', '[(3496737.2679, 743254.4507)]'], {'time_3rd': '(True)'}), '(7789, 8401, [(3496737.2679, 743254.4507)], time_3rd=True)\n', (7694, 7752), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((8048, 8058), 'pyproj.Proj', 'Proj', (['(4326)'], {}), '(4326)\n', (8052, 8058), False, 'from pyproj import Proj, Transformer, itransform, transform\n'), ((9424, 9514), 'pyproj.itransform', 'pyproj.itransform', (['ECEF', 'WGS84', '[(-2704026.01, -4253051.81, 3895878.82)]'], {'radians': '(True)'}), '(ECEF, WGS84, [(-2704026.01, -4253051.81, 3895878.82)],\n radians=True)\n', (9441, 9514), False, 'import pyproj\n'), ((9686, 9800), 'pyproj.itransform', 'pyproj.itransform', (['WGS84', 'ECEF', '[(-2.137113493845668, 0.6613203738996222, -20.531156923621893)]'], {'radians': '(True)'}), '(WGS84, ECEF, [(-2.137113493845668, 0.6613203738996222, -\n 20.531156923621893)], radians=True)\n', (9703, 9800), False, 'import pyproj\n'), ((2875, 2892), 'pyproj.Proj', 'pyproj.Proj', (['(4326)'], {}), '(4326)\n', (2886, 2892), False, 'import pyproj\n'), ((3196, 3213), 'pyproj.Proj', 'pyproj.Proj', (['(3857)'], {}), '(3857)\n', (3207, 3213), False, 'import pyproj\n')] |
import numpy as np
def parseData( path, n=25000, startWith=0):
games = open( path)
whiteWins = np.zeros( shape=(n, 1))
blackWins = np.zeros( shape=(n, 1))
draws = np.zeros( shape=(n, 1))
whiteElo = np.zeros( shape=(n, 1))
blackElo = np.zeros( shape=(n, 1))
i = -1
for row in games:
if i == n:
break
if row.startswith('[Event'):
i += 1
elif row.startswith('[Result'):
result = row.split('"')[1]
if result == '1/2-1/2':
draws[i] = 1.0
elif result == '1-0':
whiteWins[i] = 1.0
else:
blackWins[i] = 1.0
elif row.startswith('[WhiteElo'):
whiteElo[i] = int(row.split('"')[1])
elif row.startswith('[BlackElo'):
blackElo[i] = int(row.split('"')[1])
results = { "whiteWins":whiteWins,
"blackWins":blackWins,
"draws":draws,
"whiteElo":whiteElo,
"blackElo":blackElo}
return results
| [
"numpy.zeros"
] | [((106, 128), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, 1)'}), '(shape=(n, 1))\n', (114, 128), True, 'import numpy as np\n'), ((146, 168), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, 1)'}), '(shape=(n, 1))\n', (154, 168), True, 'import numpy as np\n'), ((182, 204), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, 1)'}), '(shape=(n, 1))\n', (190, 204), True, 'import numpy as np\n'), ((221, 243), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, 1)'}), '(shape=(n, 1))\n', (229, 243), True, 'import numpy as np\n'), ((260, 282), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, 1)'}), '(shape=(n, 1))\n', (268, 282), True, 'import numpy as np\n')] |
import numpy as np
import functions_plotting as plot
import functions_misc as misc
def plot_2pdataset(data_in):
"""Plot the raw, trial-averaged Ca data contained in the file, all protocols"""
# if a path was inserted, then the file
if data_in is str:
# file = r'J:\<NAME>\Data\DG_180816_a\2018_10_03\2\preProcessed.npz'
contents = np.load(data_in, allow_pickle=True)
data = contents['data'].item()
# metadata = contents['metadata'].item()
else:
data = data_in
# initialize a list to store the figure handles
fig_list = []
# for all the protocols
for protocol in data:
# analyze the DG info
ca_data = data[protocol]['data']
# # get the number of cells
# cell_num = ca_data.shape[0]
# # get the number of reps
# rep_num = ca_data.shape[2]
# get the number of stimuli
stim_num = ca_data.shape[3]
# get the number of time points
time_num = ca_data.shape[1]
# plot trial averages with concatenated orientation
trial_average = np.nanmean(ca_data, axis=2)
cat_matrix = np.reshape(trial_average, (-1, time_num*stim_num), order='F')
fig_list.append(plot.plot_image([misc.normalize_matrix(cat_matrix, axis=1)], ylabel='Traces', title=protocol))
fig_list[-1].show()
return fig_list
| [
"numpy.load",
"functions_misc.normalize_matrix",
"numpy.reshape",
"numpy.nanmean"
] | [((361, 396), 'numpy.load', 'np.load', (['data_in'], {'allow_pickle': '(True)'}), '(data_in, allow_pickle=True)\n', (368, 396), True, 'import numpy as np\n'), ((1095, 1122), 'numpy.nanmean', 'np.nanmean', (['ca_data'], {'axis': '(2)'}), '(ca_data, axis=2)\n', (1105, 1122), True, 'import numpy as np\n'), ((1144, 1207), 'numpy.reshape', 'np.reshape', (['trial_average', '(-1, time_num * stim_num)'], {'order': '"""F"""'}), "(trial_average, (-1, time_num * stim_num), order='F')\n", (1154, 1207), True, 'import numpy as np\n'), ((1248, 1289), 'functions_misc.normalize_matrix', 'misc.normalize_matrix', (['cat_matrix'], {'axis': '(1)'}), '(cat_matrix, axis=1)\n', (1269, 1289), True, 'import functions_misc as misc\n')] |
import numpy as np
from scipy.optimize import least_squares
from .fitting import rmse
def linKK(f, Z, c=0.85, max_M=50):
""" A method for implementing the Lin-KK test for validating linearity [1]
Parameters
----------
f: np.ndarray
measured frequencies
Z: np.ndarray of complex numbers
measured impedances
c: np.float
cutoff for mu
max_M: int
the maximum number of RC elements
Returns
-------
mu: np.float
under- or over-fitting measure
residuals: np.ndarray of complex numbers
the residuals of the fit at input frequencies
Z_fit: np.ndarray of complex numbers
impedance of fit at input frequencies
Notes
-----
The lin-KK method from Schönleber et al. [1] is a quick test for checking
the
validity of EIS data. The validity of an impedance spectrum is analyzed by
its reproducibility by a Kramers-Kronig (KK) compliant equivalent circuit.
In particular, the model used in the lin-KK test is an ohmic resistor,
:math:`R_{Ohm}`, and :math:`M` RC elements.
.. math::
\\hat Z = R_{Ohm} + \\sum_{k=1}^{M} \\frac{R_k}{1 + j \\omega \\tau_k}
The :math:`M` time constants, :math:`\\tau_k`, are distributed
logarithmically,
.. math::
\\tau_1 = \\frac{1}{\\omega_{max}} ; \\tau_M = \\frac{1}{\\omega_{min}}
; \\tau_k = 10^{\\log{(\\tau_{min}) + \\frac{k-1}{M-1}\\log{{(
\\frac{\\tau_{max}}{\\tau_{min}}}})}}
and are not fit during the test (only :math:`R_{Ohm}` and :math:`R_{k}`
are free parameters).
In order to prevent under- or over-fitting, Schönleber et al. propose using
the ratio of positive resistor mass to negative resistor mass as a metric
for finding the optimal number of RC elements.
.. math::
\\mu = 1 - \\frac{\\sum_{R_k \\ge 0} |R_k|}{\\sum_{R_k < 0} |R_k|}
The argument :code:`c` defines the cutoff value for :math:`\\mu`. The
algorithm starts at :code:`M = 3` and iterates up to :code:`max_M` until a
:math:`\\mu < c` is reached. The default of 0.85 is simply a heuristic
value based off of the experience of Schönleber et al.
If the argument :code:`c` is :code:`None`, then the automatic determination
of RC elements is turned off and the solution is calculated for
:code:`max_M` RC elements. This manual mode should be used with caution as
under- and over-fitting should be avoided.
[1] <NAME> al. A Method for Improving the Robustness of
linear Kramers-Kronig Validity Tests. Electrochimica Acta 131, 20–27 (2014)
`doi: 10.1016/j.electacta.2014.01.034
<https://doi.org/10.1016/j.electacta.2014.01.034>`_.
"""
def get_tc_distribution(f, M):
""" Returns the distribution of time constants for the linKK method """
t_max = 1/np.min(f)
t_min = 1/np.max(f)
ts = np.zeros(shape=(M,))
ts[0] = t_min
ts[-1] = t_max
if M > 1:
for k in range(2, M):
ts[k-1] = 10**(np.log10(t_min) +
((k-1)/(M-1))*np.log10(t_max/t_min))
ts *= 2*np.pi
return ts
if c is not None:
M = 0
mu = 1
while mu > c and M <= max_M:
M += 1
ts = get_tc_distribution(f, M)
p_values, mu = fitLinKK(f, ts, M, Z)
if M % 10 == 0:
print(M, mu, rmse(eval_linKK(p_values, ts, f), Z))
else:
M = max_M
ts = get_tc_distribution(f, M)
p_values, mu = fitLinKK(f, M, Z)
return M, mu, eval_linKK(p_values, ts, f), \
residuals_linKK(p_values, ts, Z, f, residuals='real'), \
residuals_linKK(p_values, ts, Z, f, residuals='imag')
def fitLinKK(f, ts, M, Z):
""" Fits the linKK model using scipy.optimize.least_squares """
initial_guess = np.append(min(np.real(Z)),
np.ones(shape=(M,)) *
((max(np.real(Z))-min(np.real(Z)))/M))
result = least_squares(residuals_linKK, initial_guess, method='lm',
args=(ts, Z, f, 'both'),
ftol=1E-13, gtol=1E-10)
p_values = result['x']
mu = calc_mu(p_values[1:])
return p_values, mu
def eval_linKK(Rs, ts, f):
""" Builds a circuit of RC elements to be used in LinKK """
from .circuit_elements import s, R, K # noqa
circuit_string = 's([R({},{}),'.format([Rs[0]], f.tolist())
for i, (Rk, tk) in enumerate(zip(Rs[1:], ts)):
circuit_string += 'K({},{}),'.format([Rk, tk], f.tolist())
circuit_string = circuit_string.strip(',')
circuit_string += '])'
return eval(circuit_string)
def residuals_linKK(Rs, ts, Z, f, residuals='real'):
""" Calculates the residual between the data and a LinKK fit """
err = Z - eval_linKK(Rs, ts, f)
if residuals == 'real':
return err.real/(np.abs(Z))
elif residuals == 'imag':
return err.imag/(np.abs(Z))
elif residuals == 'both':
z1d = np.zeros(Z.size*2, dtype=np.float64)
z1d[0:z1d.size:2] = err.real/(np.abs(Z))
z1d[1:z1d.size:2] = err.imag/(np.abs(Z))
return z1d
def calc_mu(Rs):
""" Calculates mu for use in LinKK """
neg_sum = sum(abs(x) for x in Rs if x < 0)
pos_sum = sum(abs(x) for x in Rs if x >= 0)
return 1 - neg_sum/pos_sum
| [
"numpy.abs",
"numpy.zeros",
"numpy.ones",
"scipy.optimize.least_squares",
"numpy.min",
"numpy.max",
"numpy.real",
"numpy.log10"
] | [((4036, 4147), 'scipy.optimize.least_squares', 'least_squares', (['residuals_linKK', 'initial_guess'], {'method': '"""lm"""', 'args': "(ts, Z, f, 'both')", 'ftol': '(1e-13)', 'gtol': '(1e-10)'}), "(residuals_linKK, initial_guess, method='lm', args=(ts, Z, f,\n 'both'), ftol=1e-13, gtol=1e-10)\n", (4049, 4147), False, 'from scipy.optimize import least_squares\n'), ((2899, 2919), 'numpy.zeros', 'np.zeros', ([], {'shape': '(M,)'}), '(shape=(M,))\n', (2907, 2919), True, 'import numpy as np\n'), ((2847, 2856), 'numpy.min', 'np.min', (['f'], {}), '(f)\n', (2853, 2856), True, 'import numpy as np\n'), ((2875, 2884), 'numpy.max', 'np.max', (['f'], {}), '(f)\n', (2881, 2884), True, 'import numpy as np\n'), ((3888, 3898), 'numpy.real', 'np.real', (['Z'], {}), '(Z)\n', (3895, 3898), True, 'import numpy as np\n'), ((3931, 3950), 'numpy.ones', 'np.ones', ([], {'shape': '(M,)'}), '(shape=(M,))\n', (3938, 3950), True, 'import numpy as np\n'), ((4932, 4941), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (4938, 4941), True, 'import numpy as np\n'), ((4998, 5007), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (5004, 5007), True, 'import numpy as np\n'), ((5053, 5091), 'numpy.zeros', 'np.zeros', (['(Z.size * 2)'], {'dtype': 'np.float64'}), '(Z.size * 2, dtype=np.float64)\n', (5061, 5091), True, 'import numpy as np\n'), ((5128, 5137), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (5134, 5137), True, 'import numpy as np\n'), ((5177, 5186), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (5183, 5186), True, 'import numpy as np\n'), ((3048, 3063), 'numpy.log10', 'np.log10', (['t_min'], {}), '(t_min)\n', (3056, 3063), True, 'import numpy as np\n'), ((3989, 3999), 'numpy.real', 'np.real', (['Z'], {}), '(Z)\n', (3996, 3999), True, 'import numpy as np\n'), ((4005, 4015), 'numpy.real', 'np.real', (['Z'], {}), '(Z)\n', (4012, 4015), True, 'import numpy as np\n'), ((3111, 3134), 'numpy.log10', 'np.log10', (['(t_max / t_min)'], {}), '(t_max / t_min)\n', (3119, 3134), True, 'import numpy as np\n')] |
import random
########### Normal distribution ############
a = random.normalvariate(0, 1)
print(a)
arr = list('ABCDH')
a = random.choice(arr)
print(a)
########### Tokens / secret keys ############
import secrets
s = secrets.randbelow(3)
b = secrets.randbits(4)
print(f's: {s} and b: {b}')
import numpy as np
######### 3x3 array #########
n = np.random.rand(3, 3)
print(n)
######### 3x4 array #########
n = np.random.randint(0, 10, (3, 4))
print(n)
| [
"secrets.randbits",
"secrets.randbelow",
"random.normalvariate",
"random.choice",
"numpy.random.randint",
"numpy.random.rand"
] | [((64, 90), 'random.normalvariate', 'random.normalvariate', (['(0)', '(1)'], {}), '(0, 1)\n', (84, 90), False, 'import random\n'), ((125, 143), 'random.choice', 'random.choice', (['arr'], {}), '(arr)\n', (138, 143), False, 'import random\n'), ((219, 239), 'secrets.randbelow', 'secrets.randbelow', (['(3)'], {}), '(3)\n', (236, 239), False, 'import secrets\n'), ((244, 263), 'secrets.randbits', 'secrets.randbits', (['(4)'], {}), '(4)\n', (260, 263), False, 'import secrets\n'), ((346, 366), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (360, 366), True, 'import numpy as np\n'), ((411, 443), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(3, 4)'], {}), '(0, 10, (3, 4))\n', (428, 443), True, 'import numpy as np\n')] |
import torch
import numpy as np
import os
import glob
#import skvideo
#skvideo.setFFmpegPath("C:\\ffmpeg") # you need this before the import
import skvideo.io
def crop():
current_path = os.path.dirname(__file__)
resized_path = os.path.join(current_path, 'resized_data')
dirs = glob.glob(os.path.join(current_path, args.data+'/*'))
files = [ glob.glob(dir+'/*') for dir in dirs ]
files = sum(files, []) # flatten
''' script for cropping '''
for i, file in enumerate(files):
os.system("ffmpeg -i %s -pix_fmt yuv420p -vf crop=96:96:42:24 %s.mp4" %
(file, os.path.join(resized_path, str(i))))
def preprocess(args):
"""
Apply normalisation
Transpose each video to (channel, nframe, img_size, img_size)
"""
crop()
curr_dir = os.path.dirname(__file__)
data_dir = os.path.join(curr_dir, 'resized_data')
vid_file = glob.glob(data_dir+'/*')
#print(len(vid_file))
videos = [skvideo.io.vread(vid) for vid in vid_file] # video size: (nframe, img_size, img_size, channel)
# Normalising and appling transpose
videos = [video.transpose(3, 0, 1, 2)/255.0 for video in videos ]
return videos, curr_dir
def sample(video, T):
#print(video.shape[0])
start = np.random.randint(0, video.shape[1]-(T+1))
end = start + T
return video[:, start:end, :, :]
def randomVideo(videos, batch_size, T):
x = []
for i in range(batch_size):
# Randomly Sample a video from the videos
video = videos[ np.random.randint(1, len(videos)-1)]
# Randomly sample the sequence of T frames from the video
video = torch.Tensor(sample(video, T))
x.append(video)
x = torch.stack(x)
return x
def save_video(fake_video, epoch, current_path):
outputdata = fake_video * 255
outputdata = outputdata.astype(np.uint8)
dir_path = os.path.join(current_path, 'generated_videos')
file_path = os.path.join(dir_path, 'fakeVideo_epoch-%d.mp4' % epoch)
skvideo.io.vwrite(file_path, outputdata)
| [
"torch.stack",
"os.path.dirname",
"numpy.random.randint",
"glob.glob",
"os.path.join"
] | [((200, 225), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (215, 225), False, 'import os\n'), ((246, 288), 'os.path.join', 'os.path.join', (['current_path', '"""resized_data"""'], {}), "(current_path, 'resized_data')\n", (258, 288), False, 'import os\n'), ((825, 850), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (840, 850), False, 'import os\n'), ((867, 905), 'os.path.join', 'os.path.join', (['curr_dir', '"""resized_data"""'], {}), "(curr_dir, 'resized_data')\n", (879, 905), False, 'import os\n'), ((922, 948), 'glob.glob', 'glob.glob', (["(data_dir + '/*')"], {}), "(data_dir + '/*')\n", (931, 948), False, 'import glob\n'), ((1291, 1337), 'numpy.random.randint', 'np.random.randint', (['(0)', '(video.shape[1] - (T + 1))'], {}), '(0, video.shape[1] - (T + 1))\n', (1308, 1337), True, 'import numpy as np\n'), ((1743, 1757), 'torch.stack', 'torch.stack', (['x'], {}), '(x)\n', (1754, 1757), False, 'import torch\n'), ((1921, 1967), 'os.path.join', 'os.path.join', (['current_path', '"""generated_videos"""'], {}), "(current_path, 'generated_videos')\n", (1933, 1967), False, 'import os\n'), ((1985, 2041), 'os.path.join', 'os.path.join', (['dir_path', "('fakeVideo_epoch-%d.mp4' % epoch)"], {}), "(dir_path, 'fakeVideo_epoch-%d.mp4' % epoch)\n", (1997, 2041), False, 'import os\n'), ((311, 355), 'os.path.join', 'os.path.join', (['current_path', "(args.data + '/*')"], {}), "(current_path, args.data + '/*')\n", (323, 355), False, 'import os\n'), ((370, 391), 'glob.glob', 'glob.glob', (["(dir + '/*')"], {}), "(dir + '/*')\n", (379, 391), False, 'import glob\n')] |
from collections import deque, defaultdict
import numpy as np
import pytest
from snc.environments.closed_loop_crw import ClosedLoopCRW
import snc.environments.examples as examples
from snc.environments.job_generators.discrete_review_job_generator import \
DeterministicDiscreteReviewJobGenerator
from snc.environments.job_generators.scaled_bernoulli_services_poisson_arrivals_generator import \
ScaledBernoulliServicesPoissonArrivalsGenerator
from snc.environments.state_initialiser import DeterministicCRWStateInitialiser
def build_closed_loop_env_2_demand_buffers(
demand_to_supplier_routes,
constituency_matrix,
initial_state=np.zeros((5, 1))
):
ind_surplus_buffers = [1, 3]
job_gen_seed = 42
mu = 1.5
mud = 3
mus = 1.5
alpha = 0.95
cost_per_buffer = np.array([[1], [2], [5], [3], [8]])
demand_rate = np.array([[0], [0], [alpha], [0], [alpha]])
buffer_processing_matrix = np.array([[-mu, -mu/3, 0, mus, 0, 0],
[2*mu/3, 0, -mud, 0, 0, 0],
[0, 0, -mud, 0, 0, 0],
[mu/3, mu/3, 0, 0, -mud, mus/3],
[0, 0, 0, 0, -mud, 0]])
job_generator = ScaledBernoulliServicesPoissonArrivalsGenerator(demand_rate,
buffer_processing_matrix,
job_gen_seed=job_gen_seed)
state_initialiser = DeterministicCRWStateInitialiser(initial_state)
cl_env = ClosedLoopCRW(
demand_to_supplier_routes,
ind_surplus_buffers,
cost_per_buffer,
np.ones_like(demand_rate) * np.inf,
constituency_matrix,
job_generator,
state_initialiser
)
return cl_env
def build_closed_loop_single_station_demand_model(initial_state=np.zeros((3, 1)), toa=100):
ind_surplus_buffers = [1]
demand_to_supplier_routes = {2: (2, toa)}
job_gen_seed = 42
mu = 3
mud = 3
mus = 3
alpha = 2
cost_per_buffer = np.array([[1], [2], [5]])
demand_rate = np.array([[0], [0], [alpha]])
buffer_processing_matrix = np.array([[-mu, 0, mus],
[mu, -mud, 0],
[0, -mud, 0]])
job_generator = DeterministicDiscreteReviewJobGenerator(demand_rate,
buffer_processing_matrix,
job_gen_seed,
sim_time_interval=1)
constituency_matrix = np.eye(3)
state_initialiser = DeterministicCRWStateInitialiser(initial_state)
cl_env = ClosedLoopCRW(
demand_to_supplier_routes,
ind_surplus_buffers,
cost_per_buffer,
np.ones_like(demand_rate) * np.inf,
constituency_matrix,
job_generator,
state_initialiser
)
return cl_env
def test_get_supply_and_demand_ids():
demand = (0, 1, 2, 3, 4, 5, 6, 7)
supply = (10, 11, 12, 13, 14, 15, 16, 17)
toa = (20, 21, 22, 23, 24, 25, 26, 27)
demand_to_supplier_routes = {demand[i]: (supply[i], toa[i]) for i in range(8)}
supply_id, demand_id = ClosedLoopCRW.get_supply_and_demand_ids(demand_to_supplier_routes)
assert supply_id == list(supply)
assert demand_id == list(demand)
def test_are_demand_ids_unique():
demand_id = list(range(4))
assert ClosedLoopCRW.are_demand_ids_unique(demand_id)
def test_are_demand_ids_unique_false():
demand_id = [0, 0, 1, 2]
assert not ClosedLoopCRW.are_demand_ids_unique(demand_id)
def test_get_supply_and_demand_ids_repeated_ids():
demand = (0, 1, 2, 3, 4)
supply = (10, 11, 10, 11, 12)
toa = (20, 21, 20, 21, 22)
demand_to_supplier_routes = {demand[i]: (supply[i], toa[i]) for i in range(len(demand))}
supply_id, demand_id = ClosedLoopCRW.get_supply_and_demand_ids(demand_to_supplier_routes)
assert supply_id == [10, 11, 12]
assert demand_id == list(demand)
@pytest.mark.parametrize('supply_ids,demand_ids,env_class', [
([3], [5], examples.double_reentrant_line_with_demand_only_shared_resources_model),
([7, 8], [14, 15], examples.complex_demand_driven_model),
])
def test_is_demand_to_supplier_routes_consistent_with_job_generator_envs(
supply_ids,
demand_ids,
env_class
):
env = env_class()
assert ClosedLoopCRW.is_demand_to_supplier_routes_consistent_with_job_generator(
supply_ids,
demand_ids,
env.constituency_matrix,
env.job_generator.supply_nodes,
env.job_generator.demand_nodes.values()
)
def test_is_supply_ids_consistent_with_job_generator():
demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
assert ClosedLoopCRW.is_supply_ids_consistent_with_job_generator(
env.supply_ids,
env.job_generator.supply_nodes,
env.constituency_matrix
)
def test_is_supply_ids_consistent_with_job_generator_false():
supply_ids = [2, 3] # It should be [2, 4]
demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
assert not ClosedLoopCRW.is_supply_ids_consistent_with_job_generator(
supply_ids,
env.job_generator.supply_nodes,
env.constituency_matrix
)
def test_initialise_supply_buffers():
supply_id = [0, 11]
supply_buf = ClosedLoopCRW.initialise_supply_buffers(supply_id)
assert supply_buf == {0: 0, 11: 0}
def test_get_activity_supply_resource_association_eye():
supply_nodes = [(0, 1), (1, 2)]
constituency_matrix = np.eye(4)
activity_to_resource, resource_to_activity = \
ClosedLoopCRW.get_activity_supply_resource_association(
supply_nodes,
constituency_matrix
)
assert activity_to_resource == {1: 1, 2: 2}
assert resource_to_activity == {1: [1], 2: [2]}
def test_get_activity_supply_resource_association_only_one_resource():
supply_nodes = [(0, 1), (1, 2)]
constituency_matrix = np.array([[0, 1, 1],
[1, 0, 0]])
activity_to_resource, resource_to_activity = \
ClosedLoopCRW.get_activity_supply_resource_association(
supply_nodes,
constituency_matrix
)
assert activity_to_resource == {1: 0, 2: 0}
assert resource_to_activity == {0: [1, 2]}
def test_get_activity_supply_resource_association_two_resources():
supply_nodes = [(0, 1), (1, 2), (2, 0)]
constituency_matrix = np.array([[1, 1, 0],
[0, 0, 1]])
activity_to_resource, resource_to_activity = \
ClosedLoopCRW.get_activity_supply_resource_association(
supply_nodes,
constituency_matrix
)
assert activity_to_resource == {0: 0, 1: 0, 2: 1}
assert resource_to_activity == {0: [0, 1], 1: [2]}
def test_get_activity_supply_resource_association_action_belongs_to_two_resources():
supply_nodes = [(0, 1)]
constituency_matrix = np.array([[1, 1],
[0, 1]])
with pytest.raises(AssertionError):
_, _ = ClosedLoopCRW.get_activity_supply_resource_association(
supply_nodes,
constituency_matrix
)
def test_get_supply_activity_to_buffer_association_only_one_supply_activity():
supply_nodes = [(0, 2)]
activity_to_buffer = ClosedLoopCRW.get_supply_activity_to_buffer_association(supply_nodes)
assert activity_to_buffer == {2: 0}
def test_get_supply_activity_to_buffer_association_multiple_supply_activities():
supply_nodes = [(0, 2), (1, 3)]
activity_to_buffer = ClosedLoopCRW.get_supply_activity_to_buffer_association(supply_nodes)
assert activity_to_buffer == {2: 0, 3: 1}
@pytest.mark.parametrize(
's1,s2', [(0, 0), (3, 1), (10, 20)]
)
def test_sum_supplier_outbound(s1, s2):
demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
routing_matrix = np.zeros_like(env.job_generator.buffer_processing_matrix)
routing_matrix[0, 3] = s1
routing_matrix[3, 5] = s2
sum_outbound = env.sum_supplier_outbound(routing_matrix)
assert sum_outbound == {2: s1, 4: s2}
@pytest.mark.parametrize(
's1,s2', [(0, 0), (3, 1), (10, 20)]
)
def test_sum_supplier_outbound_one_resource_multiple_routes(s1, s2):
demand_to_supplier_routes = {2: (2, 100), 4: (2, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 0]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
routing_matrix = np.zeros_like(env.job_generator.buffer_processing_matrix)
routing_matrix[0, 3] = s1
routing_matrix[3, 5] = s2
sum_outbound = env.sum_supplier_outbound(routing_matrix)
assert sum_outbound == {2: s1 + s2}
def get_truncated_val(s, a):
return s if s < a else a
@pytest.mark.parametrize(
's1,s2,a1,a2',
[
(0, 0, 0, 0), # Empty and none available.
(0, 0, 1, 1), # Empty but available.
(3, 2, 0, 0), # Some but none available.
(3, 2, 3, 2), # Exactly what's available.
(3, 2, 2, 1), # More than available.
(3, 2, 4, 3), # Less than available.
]
)
def test_truncate_routing_matrix_supplier(s1, s2, a1, a2):
demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
routing_matrix = np.zeros_like(env.job_generator.buffer_processing_matrix)
routing_matrix[0, 3] = s1
routing_matrix[3, 5] = s2
env.supply_buffers[2] = a1
env.supply_buffers[4] = a2
new_routing_matrix = env.truncate_routing_matrix_supplier(2, routing_matrix, a1)
assert new_routing_matrix[0, 3] == get_truncated_val(s1, a1)
new_routing_matrix = env.truncate_routing_matrix_supplier(4, new_routing_matrix, a2)
assert new_routing_matrix[3, 5] == get_truncated_val(s2, a2)
@pytest.mark.parametrize(
's1,s2,a',
[
(0, 0, 0), # Empty and none available.
(0, 0, 1), # Empty but available.
(3, 2, 0), # Some but none available.
(3, 2, 5), # Exactly what's available.
(3, 2, 4), # More than available.
(3, 2, 6), # Less than available.
]
)
def test_truncate_routing_matrix_supplier_one_resource_multiple_routes(s1, s2, a):
demand_to_supplier_routes = {2: (2, 100), 4: (2, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 0]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
routing_matrix = np.zeros_like(env.job_generator.buffer_processing_matrix)
routing_matrix[0, 3] = s1
routing_matrix[3, 5] = s2
env.supply_buffers[2] = a
new_routing_matrix = env.truncate_routing_matrix_supplier(2, routing_matrix, a)
assert new_routing_matrix[0, 3] + new_routing_matrix[3, 5] == get_truncated_val(s1 + s2, a)
def get_new_supply_buffers(s, a):
return a - s if s < a else 0
@pytest.mark.parametrize(
's1,s2,a1,a2',
[
(0, 0, 0, 0), # Empty and none available.
(0, 0, 1, 1), # Empty but available.
(3, 2, 0, 0), # Some but none available.
(3, 2, 3, 2), # Exactly what's available.
(3, 2, 2, 1), # More than available.
(3, 2, 4, 3), # Less than available.
]
)
def test_ensure_jobs_conservation(s1, s2, a1, a2):
demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
state_plus_arrivals = np.zeros((5, 1))
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
routing_matrix = np.zeros_like(env.job_generator.buffer_processing_matrix)
routing_matrix[0, 3] = s1
routing_matrix[3, 5] = s2
env.supply_buffers[2] = a1
env.supply_buffers[4] = a2
new_routing_matrix = env.ensure_jobs_conservation(routing_matrix, state_plus_arrivals)
assert new_routing_matrix[0, 3] == get_truncated_val(s1, a1)
assert new_routing_matrix[3, 5] == get_truncated_val(s2, a2)
assert env.supply_buffers[2] == get_new_supply_buffers(s1, a1)
assert env.supply_buffers[4] == get_new_supply_buffers(s2, a2)
def test_ensure_jobs_conservation_with_enough_jobs():
state = 3 * np.ones((3, 1))
routing_matrix = np.array([[-3, 0, 3],
[3, -3, 0],
[0, -3, 0]])
env = build_closed_loop_single_station_demand_model()
new_routing_jobs_matrix = env.ensure_jobs_conservation(routing_matrix, state)
assert np.all(new_routing_jobs_matrix == routing_matrix)
def test_ensure_jobs_conservation_with_not_enough_jobs():
state = np.array([[2], [1], [2]])
routing_matrix = np.array([[-3, 0, 3],
[3, -3, 0],
[0, -3, 0]])
env = build_closed_loop_single_station_demand_model()
env.supply_buffers[2] = 1
expected_routing_matrix = np.array([[-2, 0, 1],
[2, -1, 0],
[0, -1, 0]])
new_routing_jobs_matrix = env.ensure_jobs_conservation(routing_matrix, state)
assert np.all(new_routing_jobs_matrix == expected_routing_matrix)
def test_ensure_jobs_conservation_with_zero_jobs():
state = np.zeros((3, 1))
routing_matrix = np.array([[-3, 0, 3],
[3, -3, 0],
[0, -3, 0]])
env = build_closed_loop_single_station_demand_model()
env.supply_buffers[2] = 1
expected_routing_matrix = np.array([[0, 0, 1],
[0, 0, 0],
[0, 0, 0]])
new_routing_jobs_matrix = env.ensure_jobs_conservation(routing_matrix, state)
assert np.all(new_routing_jobs_matrix == expected_routing_matrix)
def test_get_num_items_supply_buff():
demand_to_supplier_routes = {2: (2, 100), 4: (2, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 0]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
env.supply_buffers[2] = 10
env.supply_buffers[4] = 20
assert env.get_num_items_supply_buff() == 30
def test_get_num_items_supply_buff_init():
demand_to_supplier_routes = {2: (2, 100), 4: (2, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 0]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
assert env.get_num_items_supply_buff() == 0
def test_get_num_items_in_transit_to_suppliers():
supp1 = 2
supp2 = 4
demand_to_supplier_routes = {2: (supp1, 100), 4: (supp2, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
toa1 = 10
toa2 = 11
env.in_transit_parcels[toa1].append((supp1, 1))
env.in_transit_parcels[toa2].append((supp2, 7))
assert env.get_num_items_in_transit_to_suppliers() == 8
def test_get_num_items_in_transit_to_suppliers_multiple_in_transit():
supp1 = 2
supp2 = 4
demand_to_supplier_routes = {2: (supp1, 100), 4: (supp2, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
toa1 = 10
toa2 = 11
env.in_transit_parcels[toa1].extend([(supp1, 9), (supp1, 1)])
env.in_transit_parcels[toa2].extend([(supp2, 20), (supp1, 10)])
assert env.get_num_items_in_transit_to_suppliers() == 40
def test_get_num_items_in_transit_to_suppliers_init():
demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
assert env.get_num_items_in_transit_to_suppliers() == 0
def test_assert_remains_closed_network_empty():
initial_state = np.zeros((5, 1))
demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(
demand_to_supplier_routes,
constituency_matrix,
initial_state
)
env.assert_remains_closed_network()
def test_assert_remains_closed_network_false():
initial_state = np.ones((5, 1))
demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(
demand_to_supplier_routes,
constituency_matrix,
initial_state
)
env.state[0] = 0 # Remove one item without putting it anywhere else.
with pytest.raises(AssertionError):
env.assert_remains_closed_network()
def test_get_num_items_state_without_demand():
initial_state = 5 * np.ones((5, 1)) # 25 items, 10 in demand buffers.
supp1 = 2
supp2 = 4
demand_to_supplier_routes = {2: (supp1, 100), 4: (supp2, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(
demand_to_supplier_routes,
constituency_matrix,
initial_state
)
assert np.all(env.num_initial_items == 15)
def test_assert_remains_closed_network_all_in_transit_and_suppliers():
initial_state = 5 * np.ones((5, 1)) # 25 items, 10 in demand buffers.
supp1 = 2
supp2 = 4
demand_to_supplier_routes = {2: (supp1, 100), 4: (supp2, 300)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(
demand_to_supplier_routes,
constituency_matrix,
initial_state
)
env.state = np.zeros((5, 1))
env.supply_buffers[2] = 1
env.supply_buffers[4] = 2
toa1 = 10
toa2 = 11
env.in_transit_parcels[toa1].extend([(supp1, 3), (supp1, 1)])
env.in_transit_parcels[toa2].extend([(supp2, 2), (supp1, 6)])
env.assert_remains_closed_network()
def test_get_satisfied_demand():
drained_amount = np.array([1, 2, 3, 4, 5])[:, None]
demand_id = [0, 3]
satisfied_demand = ClosedLoopCRW.get_satisfied_demand(drained_amount, demand_id)
assert satisfied_demand == {0: 1, 3: 4}
def test_fill_in_transit_to_suppliers():
initial_state = np.array([10, 4, 3])[:, None]
toa = 200
amount = 7
current_time = 42
env = build_closed_loop_single_station_demand_model(initial_state, toa)
env._t = current_time
satisfied_demand = {2: amount} # From buffer 2, which will be delivered at resource 2.
env.fill_in_transit_to_suppliers(satisfied_demand)
assert env.in_transit_parcels == {current_time + toa: [(2, amount)]}
def test_fill_in_transit_to_suppliers_multiple_parcels():
initial_state = np.array([10, 4, 3])[:, None]
toa = 200
amount1 = 7
amount2 = 14
current_time = 42
env = build_closed_loop_single_station_demand_model(initial_state, toa)
env._t = current_time
env.fill_in_transit_to_suppliers({2: amount1})
env.fill_in_transit_to_suppliers({2: amount2})
assert env.in_transit_parcels == {current_time + toa: [(2, amount1), (2, amount2)]}
def test_fill_in_transit_to_suppliers_multiple_simultaneous_parcels():
toa2 = 100
toa4 = 300
demand_to_supplier_routes = {2: (2, toa2), 4: (4, toa4)}
current_time = 42
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
env._t = current_time
env.fill_in_transit_to_suppliers({2: 10, 4: 13})
assert env.in_transit_parcels == {
current_time + toa2: [(2, 10)],
current_time + toa4: [(4, 13)]
}
def test_fill_in_transit_to_suppliers_multiple_resources_multiple_sequential_parcels():
toa2 = 100
toa4 = 300
demand_to_supplier_routes = {2: (2, toa2), 4: (4, toa4)}
current_time = 42
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
env._t = current_time
env.fill_in_transit_to_suppliers({2: 10})
env.fill_in_transit_to_suppliers({4: 13})
env._t = current_time + 100
env.fill_in_transit_to_suppliers({2: 14})
assert env.in_transit_parcels == {
current_time + toa2: [(2, 10)],
current_time + toa4: [(4, 13)],
current_time + toa2 + 100: [(2, 14)]
}
def test_fill_supply_buffers_empty_in_transit():
toa2 = 100
toa4 = 300
demand_to_supplier_routes = {2: (2, toa2), 4: (4, toa4)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
env.fill_supply_buffers()
assert env.supply_buffers == {2: 0, 4: 0}
assert env.in_transit_parcels == defaultdict(list)
def test_fill_supply_buffers_some_in_transit_but_not_arrived():
amount2 = 10
amount4 = 11
toa2 = 100
toa4 = 300
demand_to_supplier_routes = {2: (2, toa2), 4: (4, toa4)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
env.fill_in_transit_to_suppliers({2: amount2, 4: amount4})
env._t = toa2 - 1
env.fill_supply_buffers()
assert env.supply_buffers == {2: 0, 4: 0}
assert env.in_transit_parcels == {toa2: [(2, amount2)], toa4: [(4, amount4)]}
def test_fill_supply_buffers_some_in_transit_only_one_arrived():
toa2 = 100
toa4 = 300
demand_to_supplier_routes = {2: (2, toa2), 4: (4, toa4)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
env.fill_in_transit_to_suppliers({2: 10})
env.fill_in_transit_to_suppliers({4: 11})
env._t = toa2
env.fill_supply_buffers()
assert env.supply_buffers == {2: 10, 4: 0}
assert env.in_transit_parcels == {toa4: [(4, 11)]}
def test_fill_supply_buffers_some_in_transit_two_arrived():
toa2 = 100
toa4 = 300
demand_to_supplier_routes = {2: (2, toa2), 4: (4, toa4)}
constituency_matrix = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix)
env.fill_in_transit_to_suppliers({4: 11})
env._t = 200
env.fill_in_transit_to_suppliers({2: 10})
env._t = 300
env.fill_supply_buffers()
assert env.supply_buffers == {2: 10, 4: 11}
assert env.in_transit_parcels == defaultdict(list)
def test_step():
env = build_closed_loop_single_station_demand_model(
initial_state=np.array([[10], [5], [3]]),
toa=100
)
action = np.array([[0], [1], [1]])
env.step(action)
# Nothing done in buffer 0. 3 are removed from buffers 1 and 2, but 2 new arrivals at buffer 2.
assert np.all(env.state == np.array([[10], [2], [2]]))
assert env.in_transit_parcels == {101: [(2, 3)]} # Deliver 3 items to resource 2 at time 101.
assert env.supply_buffers == {2: 0}
def test_step_many_steps():
toa = 100
env = build_closed_loop_single_station_demand_model(
initial_state=np.array([[10], [5], [3]]),
toa=toa
)
alpha = env.job_generator.demand_rate[2]
action = np.array([[1], [1], [1]])
env.step(action)
assert np.all(env.state == np.array([[7], [5], [alpha]]))
assert env.in_transit_parcels == {101: [(2, 3)]}
assert env.supply_buffers == {2: 0}
action = np.zeros((3, 1))
for i in range(toa - 1):
env.step(action)
assert np.all(env.state == np.array([[7], [5], [alpha * env.t]]))
assert env.in_transit_parcels == {101: [(2, 3)]}
assert env.supply_buffers == {2: 0}
env.step(action)
assert np.all(env.state == np.array([[7], [5], [env.t * alpha]]))
assert env.in_transit_parcels == defaultdict(list)
assert env.supply_buffers == {2: 3}
| [
"numpy.ones",
"collections.defaultdict",
"snc.environments.state_initialiser.DeterministicCRWStateInitialiser",
"snc.environments.closed_loop_crw.ClosedLoopCRW.get_supply_activity_to_buffer_association",
"pytest.mark.parametrize",
"snc.environments.closed_loop_crw.ClosedLoopCRW.get_supply_and_demand_ids",... | [((4179, 4389), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""supply_ids,demand_ids,env_class"""', '[([3], [5], examples.\n double_reentrant_line_with_demand_only_shared_resources_model), ([7, 8],\n [14, 15], examples.complex_demand_driven_model)]'], {}), "('supply_ids,demand_ids,env_class', [([3], [5],\n examples.double_reentrant_line_with_demand_only_shared_resources_model),\n ([7, 8], [14, 15], examples.complex_demand_driven_model)])\n", (4202, 4389), False, 'import pytest\n'), ((8646, 8706), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""s1,s2"""', '[(0, 0), (3, 1), (10, 20)]'], {}), "('s1,s2', [(0, 0), (3, 1), (10, 20)])\n", (8669, 8706), False, 'import pytest\n'), ((9435, 9495), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""s1,s2"""', '[(0, 0), (3, 1), (10, 20)]'], {}), "('s1,s2', [(0, 0), (3, 1), (10, 20)])\n", (9458, 9495), False, 'import pytest\n'), ((10255, 10384), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""s1,s2,a1,a2"""', '[(0, 0, 0, 0), (0, 0, 1, 1), (3, 2, 0, 0), (3, 2, 3, 2), (3, 2, 2, 1), (3, \n 2, 4, 3)]'], {}), "('s1,s2,a1,a2', [(0, 0, 0, 0), (0, 0, 1, 1), (3, 2, \n 0, 0), (3, 2, 3, 2), (3, 2, 2, 1), (3, 2, 4, 3)])\n", (10278, 10384), False, 'import pytest\n'), ((11607, 11713), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""s1,s2,a"""', '[(0, 0, 0), (0, 0, 1), (3, 2, 0), (3, 2, 5), (3, 2, 4), (3, 2, 6)]'], {}), "('s1,s2,a', [(0, 0, 0), (0, 0, 1), (3, 2, 0), (3, 2,\n 5), (3, 2, 4), (3, 2, 6)])\n", (11630, 11713), False, 'import pytest\n'), ((12818, 12947), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""s1,s2,a1,a2"""', '[(0, 0, 0, 0), (0, 0, 1, 1), (3, 2, 0, 0), (3, 2, 3, 2), (3, 2, 2, 1), (3, \n 2, 4, 3)]'], {}), "('s1,s2,a1,a2', [(0, 0, 0, 0), (0, 0, 1, 1), (3, 2, \n 0, 0), (3, 2, 3, 2), (3, 2, 2, 1), (3, 2, 4, 3)])\n", (12841, 12947), False, 'import pytest\n'), ((664, 680), 'numpy.zeros', 'np.zeros', (['(5, 1)'], {}), '((5, 1))\n', (672, 680), True, 'import numpy as np\n'), ((817, 852), 'numpy.array', 'np.array', (['[[1], [2], [5], [3], [8]]'], {}), '([[1], [2], [5], [3], [8]])\n', (825, 852), True, 'import numpy as np\n'), ((871, 914), 'numpy.array', 'np.array', (['[[0], [0], [alpha], [0], [alpha]]'], {}), '([[0], [0], [alpha], [0], [alpha]])\n', (879, 914), True, 'import numpy as np\n'), ((946, 1112), 'numpy.array', 'np.array', (['[[-mu, -mu / 3, 0, mus, 0, 0], [2 * mu / 3, 0, -mud, 0, 0, 0], [0, 0, -mud,\n 0, 0, 0], [mu / 3, mu / 3, 0, 0, -mud, mus / 3], [0, 0, 0, 0, -mud, 0]]'], {}), '([[-mu, -mu / 3, 0, mus, 0, 0], [2 * mu / 3, 0, -mud, 0, 0, 0], [0,\n 0, -mud, 0, 0, 0], [mu / 3, mu / 3, 0, 0, -mud, mus / 3], [0, 0, 0, 0, \n -mud, 0]])\n', (954, 1112), True, 'import numpy as np\n'), ((1315, 1432), 'snc.environments.job_generators.scaled_bernoulli_services_poisson_arrivals_generator.ScaledBernoulliServicesPoissonArrivalsGenerator', 'ScaledBernoulliServicesPoissonArrivalsGenerator', (['demand_rate', 'buffer_processing_matrix'], {'job_gen_seed': 'job_gen_seed'}), '(demand_rate,\n buffer_processing_matrix, job_gen_seed=job_gen_seed)\n', (1362, 1432), False, 'from snc.environments.job_generators.scaled_bernoulli_services_poisson_arrivals_generator import ScaledBernoulliServicesPoissonArrivalsGenerator\n'), ((1589, 1636), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (1621, 1636), False, 'from snc.environments.state_initialiser import DeterministicCRWStateInitialiser\n'), ((1966, 1982), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (1974, 1982), True, 'import numpy as np\n'), ((2163, 2188), 'numpy.array', 'np.array', (['[[1], [2], [5]]'], {}), '([[1], [2], [5]])\n', (2171, 2188), True, 'import numpy as np\n'), ((2207, 2236), 'numpy.array', 'np.array', (['[[0], [0], [alpha]]'], {}), '([[0], [0], [alpha]])\n', (2215, 2236), True, 'import numpy as np\n'), ((2268, 2322), 'numpy.array', 'np.array', (['[[-mu, 0, mus], [mu, -mud, 0], [0, -mud, 0]]'], {}), '([[-mu, 0, mus], [mu, -mud, 0], [0, -mud, 0]])\n', (2276, 2322), True, 'import numpy as np\n'), ((2425, 2542), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'DeterministicDiscreteReviewJobGenerator', (['demand_rate', 'buffer_processing_matrix', 'job_gen_seed'], {'sim_time_interval': '(1)'}), '(demand_rate,\n buffer_processing_matrix, job_gen_seed, sim_time_interval=1)\n', (2464, 2542), False, 'from snc.environments.job_generators.discrete_review_job_generator import DeterministicDiscreteReviewJobGenerator\n'), ((2745, 2754), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2751, 2754), True, 'import numpy as np\n'), ((2779, 2826), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (2811, 2826), False, 'from snc.environments.state_initialiser import DeterministicCRWStateInitialiser\n'), ((3368, 3434), 'snc.environments.closed_loop_crw.ClosedLoopCRW.get_supply_and_demand_ids', 'ClosedLoopCRW.get_supply_and_demand_ids', (['demand_to_supplier_routes'], {}), '(demand_to_supplier_routes)\n', (3407, 3434), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((3587, 3633), 'snc.environments.closed_loop_crw.ClosedLoopCRW.are_demand_ids_unique', 'ClosedLoopCRW.are_demand_ids_unique', (['demand_id'], {}), '(demand_id)\n', (3622, 3633), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((4035, 4101), 'snc.environments.closed_loop_crw.ClosedLoopCRW.get_supply_and_demand_ids', 'ClosedLoopCRW.get_supply_and_demand_ids', (['demand_to_supplier_routes'], {}), '(demand_to_supplier_routes)\n', (4074, 4101), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((4945, 5059), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (4953, 5059), True, 'import numpy as np\n'), ((5308, 5442), 'snc.environments.closed_loop_crw.ClosedLoopCRW.is_supply_ids_consistent_with_job_generator', 'ClosedLoopCRW.is_supply_ids_consistent_with_job_generator', (['env.supply_ids', 'env.job_generator.supply_nodes', 'env.constituency_matrix'], {}), '(env.supply_ids,\n env.job_generator.supply_nodes, env.constituency_matrix)\n', (5365, 5442), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((5665, 5779), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (5673, 5779), True, 'import numpy as np\n'), ((6270, 6320), 'snc.environments.closed_loop_crw.ClosedLoopCRW.initialise_supply_buffers', 'ClosedLoopCRW.initialise_supply_buffers', (['supply_id'], {}), '(supply_id)\n', (6309, 6320), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((6481, 6490), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6487, 6490), True, 'import numpy as np\n'), ((6550, 6643), 'snc.environments.closed_loop_crw.ClosedLoopCRW.get_activity_supply_resource_association', 'ClosedLoopCRW.get_activity_supply_resource_association', (['supply_nodes', 'constituency_matrix'], {}), '(supply_nodes,\n constituency_matrix)\n', (6604, 6643), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((6909, 6941), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 0]]'], {}), '([[0, 1, 1], [1, 0, 0]])\n', (6917, 6941), True, 'import numpy as np\n'), ((7037, 7130), 'snc.environments.closed_loop_crw.ClosedLoopCRW.get_activity_supply_resource_association', 'ClosedLoopCRW.get_activity_supply_resource_association', (['supply_nodes', 'constituency_matrix'], {}), '(supply_nodes,\n constituency_matrix)\n', (7091, 7130), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((7395, 7427), 'numpy.array', 'np.array', (['[[1, 1, 0], [0, 0, 1]]'], {}), '([[1, 1, 0], [0, 0, 1]])\n', (7403, 7427), True, 'import numpy as np\n'), ((7523, 7616), 'snc.environments.closed_loop_crw.ClosedLoopCRW.get_activity_supply_resource_association', 'ClosedLoopCRW.get_activity_supply_resource_association', (['supply_nodes', 'constituency_matrix'], {}), '(supply_nodes,\n constituency_matrix)\n', (7577, 7616), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((7897, 7923), 'numpy.array', 'np.array', (['[[1, 1], [0, 1]]'], {}), '([[1, 1], [0, 1]])\n', (7905, 7923), True, 'import numpy as np\n'), ((8273, 8342), 'snc.environments.closed_loop_crw.ClosedLoopCRW.get_supply_activity_to_buffer_association', 'ClosedLoopCRW.get_supply_activity_to_buffer_association', (['supply_nodes'], {}), '(supply_nodes)\n', (8328, 8342), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((8527, 8596), 'snc.environments.closed_loop_crw.ClosedLoopCRW.get_supply_activity_to_buffer_association', 'ClosedLoopCRW.get_supply_activity_to_buffer_association', (['supply_nodes'], {}), '(supply_nodes)\n', (8582, 8596), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((8838, 8952), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (8846, 8952), True, 'import numpy as np\n'), ((9211, 9268), 'numpy.zeros_like', 'np.zeros_like', (['env.job_generator.buffer_processing_matrix'], {}), '(env.job_generator.buffer_processing_matrix)\n', (9224, 9268), True, 'import numpy as np\n'), ((9656, 9750), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 1, 0]\n ]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0,\n 0, 0, 1, 0]])\n', (9664, 9750), True, 'import numpy as np\n'), ((9973, 10030), 'numpy.zeros_like', 'np.zeros_like', (['env.job_generator.buffer_processing_matrix'], {}), '(env.job_generator.buffer_processing_matrix)\n', (9986, 10030), True, 'import numpy as np\n'), ((10747, 10861), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (10755, 10861), True, 'import numpy as np\n'), ((11120, 11177), 'numpy.zeros_like', 'np.zeros_like', (['env.job_generator.buffer_processing_matrix'], {}), '(env.job_generator.buffer_processing_matrix)\n', (11133, 11177), True, 'import numpy as np\n'), ((12101, 12195), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 1, 0]\n ]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0,\n 0, 0, 1, 0]])\n', (12109, 12195), True, 'import numpy as np\n'), ((12418, 12475), 'numpy.zeros_like', 'np.zeros_like', (['env.job_generator.buffer_processing_matrix'], {}), '(env.job_generator.buffer_processing_matrix)\n', (12431, 12475), True, 'import numpy as np\n'), ((13302, 13416), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (13310, 13416), True, 'import numpy as np\n'), ((13583, 13599), 'numpy.zeros', 'np.zeros', (['(5, 1)'], {}), '((5, 1))\n', (13591, 13599), True, 'import numpy as np\n'), ((13718, 13775), 'numpy.zeros_like', 'np.zeros_like', (['env.job_generator.buffer_processing_matrix'], {}), '(env.job_generator.buffer_processing_matrix)\n', (13731, 13775), True, 'import numpy as np\n'), ((14362, 14408), 'numpy.array', 'np.array', (['[[-3, 0, 3], [3, -3, 0], [0, -3, 0]]'], {}), '([[-3, 0, 3], [3, -3, 0], [0, -3, 0]])\n', (14370, 14408), True, 'import numpy as np\n'), ((14623, 14672), 'numpy.all', 'np.all', (['(new_routing_jobs_matrix == routing_matrix)'], {}), '(new_routing_jobs_matrix == routing_matrix)\n', (14629, 14672), True, 'import numpy as np\n'), ((14745, 14770), 'numpy.array', 'np.array', (['[[2], [1], [2]]'], {}), '([[2], [1], [2]])\n', (14753, 14770), True, 'import numpy as np\n'), ((14792, 14838), 'numpy.array', 'np.array', (['[[-3, 0, 3], [3, -3, 0], [0, -3, 0]]'], {}), '([[-3, 0, 3], [3, -3, 0], [0, -3, 0]])\n', (14800, 14838), True, 'import numpy as np\n'), ((15021, 15067), 'numpy.array', 'np.array', (['[[-2, 0, 1], [2, -1, 0], [0, -1, 0]]'], {}), '([[-2, 0, 1], [2, -1, 0], [0, -1, 0]])\n', (15029, 15067), True, 'import numpy as np\n'), ((15242, 15300), 'numpy.all', 'np.all', (['(new_routing_jobs_matrix == expected_routing_matrix)'], {}), '(new_routing_jobs_matrix == expected_routing_matrix)\n', (15248, 15300), True, 'import numpy as np\n'), ((15367, 15383), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (15375, 15383), True, 'import numpy as np\n'), ((15405, 15451), 'numpy.array', 'np.array', (['[[-3, 0, 3], [3, -3, 0], [0, -3, 0]]'], {}), '([[-3, 0, 3], [3, -3, 0], [0, -3, 0]])\n', (15413, 15451), True, 'import numpy as np\n'), ((15634, 15677), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 1], [0, 0, 0], [0, 0, 0]])\n', (15642, 15677), True, 'import numpy as np\n'), ((15851, 15909), 'numpy.all', 'np.all', (['(new_routing_jobs_matrix == expected_routing_matrix)'], {}), '(new_routing_jobs_matrix == expected_routing_matrix)\n', (15857, 15909), True, 'import numpy as np\n'), ((16035, 16129), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 1, 0]\n ]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0,\n 0, 0, 1, 0]])\n', (16043, 16129), True, 'import numpy as np\n'), ((16572, 16666), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 1, 0]\n ]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0,\n 0, 0, 1, 0]])\n', (16580, 16666), True, 'import numpy as np\n'), ((17089, 17203), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (17097, 17203), True, 'import numpy as np\n'), ((17826, 17940), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (17834, 17940), True, 'import numpy as np\n'), ((18543, 18657), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (18551, 18657), True, 'import numpy as np\n'), ((19025, 19041), 'numpy.zeros', 'np.zeros', (['(5, 1)'], {}), '((5, 1))\n', (19033, 19041), True, 'import numpy as np\n'), ((19127, 19241), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (19135, 19241), True, 'import numpy as np\n'), ((19634, 19649), 'numpy.ones', 'np.ones', (['(5, 1)'], {}), '((5, 1))\n', (19641, 19649), True, 'import numpy as np\n'), ((19735, 19849), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (19743, 19849), True, 'import numpy as np\n'), ((20535, 20649), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (20543, 20649), True, 'import numpy as np\n'), ((20943, 20978), 'numpy.all', 'np.all', (['(env.num_initial_items == 15)'], {}), '(env.num_initial_items == 15)\n', (20949, 20978), True, 'import numpy as np\n'), ((21248, 21362), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (21256, 21362), True, 'import numpy as np\n'), ((21661, 21677), 'numpy.zeros', 'np.zeros', (['(5, 1)'], {}), '((5, 1))\n', (21669, 21677), True, 'import numpy as np\n'), ((22076, 22137), 'snc.environments.closed_loop_crw.ClosedLoopCRW.get_satisfied_demand', 'ClosedLoopCRW.get_satisfied_demand', (['drained_amount', 'demand_id'], {}), '(drained_amount, demand_id)\n', (22110, 22137), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((23333, 23447), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (23341, 23447), True, 'import numpy as np\n'), ((24119, 24233), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (24127, 24233), True, 'import numpy as np\n'), ((25008, 25122), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (25016, 25122), True, 'import numpy as np\n'), ((25709, 25823), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (25717, 25823), True, 'import numpy as np\n'), ((26489, 26603), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (26497, 26603), True, 'import numpy as np\n'), ((27263, 27377), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (27271, 27377), True, 'import numpy as np\n'), ((28037, 28062), 'numpy.array', 'np.array', (['[[0], [1], [1]]'], {}), '([[0], [1], [1]])\n', (28045, 28062), True, 'import numpy as np\n'), ((28613, 28638), 'numpy.array', 'np.array', (['[[1], [1], [1]]'], {}), '([[1], [1], [1]])\n', (28621, 28638), True, 'import numpy as np\n'), ((28830, 28846), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (28838, 28846), True, 'import numpy as np\n'), ((3720, 3766), 'snc.environments.closed_loop_crw.ClosedLoopCRW.are_demand_ids_unique', 'ClosedLoopCRW.are_demand_ids_unique', (['demand_id'], {}), '(demand_id)\n', (3755, 3766), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((6032, 6163), 'snc.environments.closed_loop_crw.ClosedLoopCRW.is_supply_ids_consistent_with_job_generator', 'ClosedLoopCRW.is_supply_ids_consistent_with_job_generator', (['supply_ids', 'env.job_generator.supply_nodes', 'env.constituency_matrix'], {}), '(supply_ids, env.\n job_generator.supply_nodes, env.constituency_matrix)\n', (6089, 6163), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((7969, 7998), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (7982, 7998), False, 'import pytest\n'), ((8015, 8108), 'snc.environments.closed_loop_crw.ClosedLoopCRW.get_activity_supply_resource_association', 'ClosedLoopCRW.get_activity_supply_resource_association', (['supply_nodes', 'constituency_matrix'], {}), '(supply_nodes,\n constituency_matrix)\n', (8069, 8108), False, 'from snc.environments.closed_loop_crw import ClosedLoopCRW\n'), ((14325, 14340), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (14332, 14340), True, 'import numpy as np\n'), ((20215, 20244), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (20228, 20244), False, 'import pytest\n'), ((20363, 20378), 'numpy.ones', 'np.ones', (['(5, 1)'], {}), '((5, 1))\n', (20370, 20378), True, 'import numpy as np\n'), ((21076, 21091), 'numpy.ones', 'np.ones', (['(5, 1)'], {}), '((5, 1))\n', (21083, 21091), True, 'import numpy as np\n'), ((21994, 22019), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (22002, 22019), True, 'import numpy as np\n'), ((22245, 22265), 'numpy.array', 'np.array', (['[10, 4, 3]'], {}), '([10, 4, 3])\n', (22253, 22265), True, 'import numpy as np\n'), ((22729, 22749), 'numpy.array', 'np.array', (['[10, 4, 3]'], {}), '([10, 4, 3])\n', (22737, 22749), True, 'import numpy as np\n'), ((25474, 25491), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (25485, 25491), False, 'from collections import deque, defaultdict\n'), ((27858, 27875), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (27869, 27875), False, 'from collections import deque, defaultdict\n'), ((29205, 29222), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (29216, 29222), False, 'from collections import deque, defaultdict\n'), ((1762, 1787), 'numpy.ones_like', 'np.ones_like', (['demand_rate'], {}), '(demand_rate)\n', (1774, 1787), True, 'import numpy as np\n'), ((2952, 2977), 'numpy.ones_like', 'np.ones_like', (['demand_rate'], {}), '(demand_rate)\n', (2964, 2977), True, 'import numpy as np\n'), ((27974, 28000), 'numpy.array', 'np.array', (['[[10], [5], [3]]'], {}), '([[10], [5], [3]])\n', (27982, 28000), True, 'import numpy as np\n'), ((28215, 28241), 'numpy.array', 'np.array', (['[[10], [2], [2]]'], {}), '([[10], [2], [2]])\n', (28223, 28241), True, 'import numpy as np\n'), ((28505, 28531), 'numpy.array', 'np.array', (['[[10], [5], [3]]'], {}), '([[10], [5], [3]])\n', (28513, 28531), True, 'import numpy as np\n'), ((28692, 28721), 'numpy.array', 'np.array', (['[[7], [5], [alpha]]'], {}), '([[7], [5], [alpha]])\n', (28700, 28721), True, 'import numpy as np\n'), ((29129, 29166), 'numpy.array', 'np.array', (['[[7], [5], [env.t * alpha]]'], {}), '([[7], [5], [env.t * alpha]])\n', (29137, 29166), True, 'import numpy as np\n'), ((28936, 28973), 'numpy.array', 'np.array', (['[[7], [5], [alpha * env.t]]'], {}), '([[7], [5], [alpha * env.t]])\n', (28944, 28973), True, 'import numpy as np\n')] |
import numpy as np
from scipy.special import gamma
from scipy.signal import residue
# Global Pade approximation of Miffag-Leffler function as described in https://arxiv.org/abs/1912.10996
# Valid for z < 0, 0 < alpha < 1, beta >= alpha, alpha != beta != 1
# Implemented by <NAME>
def solve_poly_coefs(alpha, beta, m=7, n=2):
"""
Solve for polynomial coefficients for rational expression.
Only implemented for (m,n) = (7,2), which was shown to yield best results.
:param float alpha: alpha parameter of ML function
:param float beta: beta parameter of ML function
:param int m: m value for approximation (7)
:param int n: n value for approximation (2)
"""
# Perform checks
check_ab(alpha, beta)
check_mn(m, n)
if beta > alpha:
A = np.zeros((7, 7))
np.fill_diagonal(A[:3, :3], [1, 1, 1])
A[6, 2] = 1
np.fill_diagonal(A[:4, 3:], -gamma(beta - alpha) / gamma(beta))
np.fill_diagonal(A[1:5, 3:], gamma(beta - alpha) / gamma(beta + alpha))
np.fill_diagonal(A[2:6, 3:], -gamma(beta - alpha) / gamma(beta + 2 * alpha))
np.fill_diagonal(A[3:6, 3:6], gamma(beta - alpha) / gamma(beta + 3 * alpha))
np.fill_diagonal(A[4:6, 3:5], -gamma(beta - alpha) / gamma(beta + 4 * alpha))
A[5, 3] = gamma(beta - alpha) / gamma(beta + 5 * alpha)
A[6, 6] = -1
y = np.array([0, 0, 0, -1,
gamma(beta - alpha) / gamma(beta),
-gamma(beta - alpha) / gamma(beta + alpha),
-gamma(beta - alpha) / gamma(beta - 2 * alpha)
])
x = np.linalg.solve(A, y)
else:
raise ValueError('Not implemented for alpha==beta')
return x
def get_partial_frac(coefs, alpha, beta):
"""
Get partial fraction decomposition of rational expression
:param coefs array: polynomial coefficients
:param float alpha: alpha parameter of the ML function
:param float beta: beta parameter of the ML function
"""
if beta > alpha:
p1, p2, p3, q0, q1, q2, q3 = coefs
r, p, k = residue([1, p3, p2, p1], [1, q3, q2, q1, q0])
else:
raise ValueError('Not implemented for alpha==beta')
return r, p, k
def ml_pade_approx(z, alpha, beta, m=7, n=2, decompose=True):
"""
Evaluate the Pade approximation of the ML function
:param float z: alpha parameter of ML function
:param float alpha: alpha parameter of ML function
:param float beta: beta parameter of ML function
:param int m: m value for approximation (7)
:param int n: n value for approximation (2)
:param decompose bool: if True, use the partial fraction decomposition (exactly equal with lower computation time).
If False, use the rational expression
"""
coefs = solve_poly_coefs(alpha, beta, m, n)
if beta > alpha:
func = create_approx_func(alpha, beta, m, n, decompose)
out = func(z)
else:
raise ValueError('Not implemented for alpha==beta')
return out
def create_approx_func(alpha, beta, m=7, n=2, decompose=True):
"""
Create a function to evaluate the ML approximation for fixed alpha, beta, m, n
:param float alpha: alpha parameter of ML function
:param float beta: beta parameter of ML function
:param int m: m value for approximation (7)
:param int n: 2 value for approximation (2)
:param decompose bool: if True, use the partial fraction decomposition (exactly equal with lower computation time).
If False, use the rational expression
"""
coefs = solve_poly_coefs(alpha, beta, m, n)
if beta > alpha:
if decompose:
r, p, k = get_partial_frac(coefs, alpha, beta)
def approx_func(z):
check_z(z)
return 2 * (np.real(r[0] / (-z - p[0])) + np.real(r[2] / (-z - p[2])))
else:
p1, p2, p3, q0, q1, q2, q3 = coefs
def approx_func(z):
check_z(z)
return (1 / gamma(beta - alpha)) * (p1 + p2*(-z) + p3*(-z)**2 + (-z)**3) / (q0 + q1*(-z) + q2*(-z)**2 + q3*(-z)**3 + (-z)**4)
return approx_func
# Checks
def check_z(z):
if np.max(z) >= 0:
raise ValueError('Approximation is only valid for z < 0')
def check_ab(alpha, beta):
if beta < alpha:
raise ValueError('Approximation is only valid for beta >= alpha')
elif (0 < alpha < 1) == False:
raise ValueError('Approximation is only valid for 0 < alpha < 1')
elif alpha == 1 or beta == 1:
raise ValueError('Approximation is not valid if alpha = 1 or beta = 1')
def check_mn(m, n):
if m != 7 or n != 2:
raise ValueError('Only implemented for (m,n) = (7,2)')
| [
"numpy.fill_diagonal",
"scipy.special.gamma",
"numpy.zeros",
"numpy.max",
"scipy.signal.residue",
"numpy.real",
"numpy.linalg.solve"
] | [((747, 763), 'numpy.zeros', 'np.zeros', (['(7, 7)'], {}), '((7, 7))\n', (755, 763), True, 'import numpy as np\n'), ((766, 804), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A[:3, :3]', '[1, 1, 1]'], {}), '(A[:3, :3], [1, 1, 1])\n', (782, 804), True, 'import numpy as np\n'), ((1465, 1486), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'y'], {}), '(A, y)\n', (1480, 1486), True, 'import numpy as np\n'), ((1896, 1941), 'scipy.signal.residue', 'residue', (['[1, p3, p2, p1]', '[1, q3, q2, q1, q0]'], {}), '([1, p3, p2, p1], [1, q3, q2, q1, q0])\n', (1903, 1941), False, 'from scipy.signal import residue\n'), ((3773, 3782), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (3779, 3782), True, 'import numpy as np\n'), ((1210, 1229), 'scipy.special.gamma', 'gamma', (['(beta - alpha)'], {}), '(beta - alpha)\n', (1215, 1229), False, 'from scipy.special import gamma\n'), ((1232, 1255), 'scipy.special.gamma', 'gamma', (['(beta + 5 * alpha)'], {}), '(beta + 5 * alpha)\n', (1237, 1255), False, 'from scipy.special import gamma\n'), ((873, 884), 'scipy.special.gamma', 'gamma', (['beta'], {}), '(beta)\n', (878, 884), False, 'from scipy.special import gamma\n'), ((917, 936), 'scipy.special.gamma', 'gamma', (['(beta - alpha)'], {}), '(beta - alpha)\n', (922, 936), False, 'from scipy.special import gamma\n'), ((939, 958), 'scipy.special.gamma', 'gamma', (['(beta + alpha)'], {}), '(beta + alpha)\n', (944, 958), False, 'from scipy.special import gamma\n'), ((1014, 1037), 'scipy.special.gamma', 'gamma', (['(beta + 2 * alpha)'], {}), '(beta + 2 * alpha)\n', (1019, 1037), False, 'from scipy.special import gamma\n'), ((1071, 1090), 'scipy.special.gamma', 'gamma', (['(beta - alpha)'], {}), '(beta - alpha)\n', (1076, 1090), False, 'from scipy.special import gamma\n'), ((1093, 1116), 'scipy.special.gamma', 'gamma', (['(beta + 3 * alpha)'], {}), '(beta + 3 * alpha)\n', (1098, 1116), False, 'from scipy.special import gamma\n'), ((1173, 1196), 'scipy.special.gamma', 'gamma', (['(beta + 4 * alpha)'], {}), '(beta + 4 * alpha)\n', (1178, 1196), False, 'from scipy.special import gamma\n'), ((851, 870), 'scipy.special.gamma', 'gamma', (['(beta - alpha)'], {}), '(beta - alpha)\n', (856, 870), False, 'from scipy.special import gamma\n'), ((992, 1011), 'scipy.special.gamma', 'gamma', (['(beta - alpha)'], {}), '(beta - alpha)\n', (997, 1011), False, 'from scipy.special import gamma\n'), ((1151, 1170), 'scipy.special.gamma', 'gamma', (['(beta - alpha)'], {}), '(beta - alpha)\n', (1156, 1170), False, 'from scipy.special import gamma\n'), ((1308, 1327), 'scipy.special.gamma', 'gamma', (['(beta - alpha)'], {}), '(beta - alpha)\n', (1313, 1327), False, 'from scipy.special import gamma\n'), ((1330, 1341), 'scipy.special.gamma', 'gamma', (['beta'], {}), '(beta)\n', (1335, 1341), False, 'from scipy.special import gamma\n'), ((1373, 1392), 'scipy.special.gamma', 'gamma', (['(beta + alpha)'], {}), '(beta + alpha)\n', (1378, 1392), False, 'from scipy.special import gamma\n'), ((1424, 1447), 'scipy.special.gamma', 'gamma', (['(beta - 2 * alpha)'], {}), '(beta - 2 * alpha)\n', (1429, 1447), False, 'from scipy.special import gamma\n'), ((1351, 1370), 'scipy.special.gamma', 'gamma', (['(beta - alpha)'], {}), '(beta - alpha)\n', (1356, 1370), False, 'from scipy.special import gamma\n'), ((1402, 1421), 'scipy.special.gamma', 'gamma', (['(beta - alpha)'], {}), '(beta - alpha)\n', (1407, 1421), False, 'from scipy.special import gamma\n'), ((3443, 3470), 'numpy.real', 'np.real', (['(r[0] / (-z - p[0]))'], {}), '(r[0] / (-z - p[0]))\n', (3450, 3470), True, 'import numpy as np\n'), ((3473, 3500), 'numpy.real', 'np.real', (['(r[2] / (-z - p[2]))'], {}), '(r[2] / (-z - p[2]))\n', (3480, 3500), True, 'import numpy as np\n'), ((3602, 3621), 'scipy.special.gamma', 'gamma', (['(beta - alpha)'], {}), '(beta - alpha)\n', (3607, 3621), False, 'from scipy.special import gamma\n')] |
import torch
from torch import nn
import numpy as np
from scipy.io import loadmat, savemat
from array import array
class BFM():
"""
This is a numpy implementation of BFM model
for visualization purpose, not used in the DNN model
"""
def __init__(self):
model_path = './BFM/BFM_model_front.mat'
model = loadmat(model_path)
self.meanshape = model['meanshape'].T # mean face shape
self.idBase = model['idBase'] # identity basis
self.exBase = model['exBase'] # expression basis
self.meantex = model['meantex'].T # mean face texture
self.texBase = model['texBase'] # texture basis
self.point_buf = model['point_buf'] # adjacent face index for each vertex, starts from 1 (only used for calculating face normal)
self.tri = model['tri'] # vertex index for each triangle face, starts from 1
self.keypoints = np.squeeze(model['keypoints']).astype(np.int32) - 1 # 68 face landmark index, starts from 0
class BFM_torch(nn.Module):
"""
This is a torch implementation of the BFM model
Used in the DNN model, comes with gradient support
"""
def __init__(self):
super(BFM_torch, self).__init__()
model_path = './BFM/BFM_model_front.mat'
model = loadmat(model_path)
# [107127, 1]
self.register_buffer("meanshape", torch.tensor(model['meanshape'].T, dtype=torch.float32))
# [107127, 80]
self.register_buffer("idBase", torch.tensor(model['idBase'], dtype=torch.float32))
# [107127, 64]
self.register_buffer("exBase", torch.tensor(model['exBase'], dtype=torch.float32))
# [107127, 1]
self.register_buffer("meantex", torch.tensor(model['meantex'].T, dtype=torch.float32))
# [107121, 80]
self.register_buffer('texBase', torch.tensor(model['texBase'], dtype=torch.float32))
# [70789, 3]
self.register_buffer('tri', torch.tensor(model['tri'], dtype=torch.int32))
# [35709, 8] Max is 70789;
self.register_buffer('point_buf', torch.tensor(model['point_buf'], dtype=torch.int32))
# [68]
self.register_buffer('keypoints', torch.tensor(np.squeeze(model['keypoints']).astype(np.int32) - 1, dtype=torch.int32))
def get_shape(self, id_param, ex_param):
"""
Perform shape assembly from index parameter and expression parameter
id_param: [bs, 80]
ex_param: [bs, 64]
return: [bs, 107127, 1]
"""
assert id_param.shape[0] == ex_param.shape[0]
bs = id_param.shape[0]
id_base = self.idBase[None,:,:].expand(bs,-1,-1)
ex_base = self.exBase[None,:,:].expand(bs,-1,-1)
face_shape = self.meanshape+torch.bmm(id_base,id_param[:,:,None])+torch.bmm(ex_base,ex_param[:,:,None])
face_shape = face_shape.reshape(bs,-1, 3)
face_shape = face_shape - torch.mean(self.meanshape[None,:,:].reshape(1,-1,3), dim=1, keepdim=True)
return face_shape
def get_texture(self, tex_param):
"""
Perform texture assembly from texture parameter
tex_param: [bs, 80]
return: [bs, 107127, 1]
"""
bs = tex_param.shape[0]
tex_base = self.texBase[None,:,:].expand(bs,-1,-1)
return self.meantex+torch.bmm(tex_base,tex_param[:,:,None])
def compute_rotation_matrix(self, rotate_param):
"""
Perform rotation based on the batch rotation parameter
rotate_param: [bs, 3]
return: [bs, 3, 3]
"""
pitch, yaw, roll = rotate_param[:,0], rotate_param[:,1], rotate_param[:,2]
bs = rotate_param.shape[0]
device = rotate_param.device
pitch_matrix = torch.eye(3, device=device)[None,:,:].expand(bs,-1,-1).clone()
yaw_matrix = torch.eye(3, device=device)[None,:,:].expand(bs,-1,-1).clone()
roll_matrix = torch.eye(3, device=device)[None,:,:].expand(bs,-1,-1).clone()
pitch_matrix[:,1,1] = torch.cos(pitch)
pitch_matrix[:,2,2] = torch.cos(pitch)
pitch_matrix[:,1,2] = -torch.sin(pitch)
pitch_matrix[:,2,1] = torch.sin(pitch)
yaw_matrix[:,0,0] = torch.cos(yaw)
yaw_matrix[:,2,2] = torch.cos(yaw)
yaw_matrix[:,0,2] = torch.sin(yaw)
yaw_matrix[:,2,0] = -torch.sin(yaw)
roll_matrix[:,0,0] = torch.cos(roll)
roll_matrix[:,1,1] = torch.cos(roll)
roll_matrix[:,0,1] = -torch.sin(roll)
roll_matrix[:,1,0] = torch.sin(roll)
return torch.bmm(torch.bmm(roll_matrix, yaw_matrix), pitch_matrix).permute(0,2,1) | [
"torch.bmm",
"torch.eye",
"scipy.io.loadmat",
"torch.cos",
"numpy.squeeze",
"torch.sin",
"torch.tensor"
] | [((313, 332), 'scipy.io.loadmat', 'loadmat', (['model_path'], {}), '(model_path)\n', (320, 332), False, 'from scipy.io import loadmat, savemat\n'), ((1169, 1188), 'scipy.io.loadmat', 'loadmat', (['model_path'], {}), '(model_path)\n', (1176, 1188), False, 'from scipy.io import loadmat, savemat\n'), ((3561, 3577), 'torch.cos', 'torch.cos', (['pitch'], {}), '(pitch)\n', (3570, 3577), False, 'import torch\n'), ((3602, 3618), 'torch.cos', 'torch.cos', (['pitch'], {}), '(pitch)\n', (3611, 3618), False, 'import torch\n'), ((3685, 3701), 'torch.sin', 'torch.sin', (['pitch'], {}), '(pitch)\n', (3694, 3701), False, 'import torch\n'), ((3725, 3739), 'torch.cos', 'torch.cos', (['yaw'], {}), '(yaw)\n', (3734, 3739), False, 'import torch\n'), ((3762, 3776), 'torch.cos', 'torch.cos', (['yaw'], {}), '(yaw)\n', (3771, 3776), False, 'import torch\n'), ((3799, 3813), 'torch.sin', 'torch.sin', (['yaw'], {}), '(yaw)\n', (3808, 3813), False, 'import torch\n'), ((3876, 3891), 'torch.cos', 'torch.cos', (['roll'], {}), '(roll)\n', (3885, 3891), False, 'import torch\n'), ((3915, 3930), 'torch.cos', 'torch.cos', (['roll'], {}), '(roll)\n', (3924, 3930), False, 'import torch\n'), ((3994, 4009), 'torch.sin', 'torch.sin', (['roll'], {}), '(roll)\n', (4003, 4009), False, 'import torch\n'), ((1241, 1296), 'torch.tensor', 'torch.tensor', (["model['meanshape'].T"], {'dtype': 'torch.float32'}), "(model['meanshape'].T, dtype=torch.float32)\n", (1253, 1296), False, 'import torch\n'), ((1348, 1398), 'torch.tensor', 'torch.tensor', (["model['idBase']"], {'dtype': 'torch.float32'}), "(model['idBase'], dtype=torch.float32)\n", (1360, 1398), False, 'import torch\n'), ((1450, 1500), 'torch.tensor', 'torch.tensor', (["model['exBase']"], {'dtype': 'torch.float32'}), "(model['exBase'], dtype=torch.float32)\n", (1462, 1500), False, 'import torch\n'), ((1552, 1605), 'torch.tensor', 'torch.tensor', (["model['meantex'].T"], {'dtype': 'torch.float32'}), "(model['meantex'].T, dtype=torch.float32)\n", (1564, 1605), False, 'import torch\n'), ((1658, 1709), 'torch.tensor', 'torch.tensor', (["model['texBase']"], {'dtype': 'torch.float32'}), "(model['texBase'], dtype=torch.float32)\n", (1670, 1709), False, 'import torch\n'), ((1756, 1801), 'torch.tensor', 'torch.tensor', (["model['tri']"], {'dtype': 'torch.int32'}), "(model['tri'], dtype=torch.int32)\n", (1768, 1801), False, 'import torch\n'), ((1868, 1919), 'torch.tensor', 'torch.tensor', (["model['point_buf']"], {'dtype': 'torch.int32'}), "(model['point_buf'], dtype=torch.int32)\n", (1880, 1919), False, 'import torch\n'), ((2493, 2533), 'torch.bmm', 'torch.bmm', (['ex_base', 'ex_param[:, :, None]'], {}), '(ex_base, ex_param[:, :, None])\n', (2502, 2533), False, 'import torch\n'), ((2955, 2997), 'torch.bmm', 'torch.bmm', (['tex_base', 'tex_param[:, :, None]'], {}), '(tex_base, tex_param[:, :, None])\n', (2964, 2997), False, 'import torch\n'), ((3644, 3660), 'torch.sin', 'torch.sin', (['pitch'], {}), '(pitch)\n', (3653, 3660), False, 'import torch\n'), ((3837, 3851), 'torch.sin', 'torch.sin', (['yaw'], {}), '(yaw)\n', (3846, 3851), False, 'import torch\n'), ((3955, 3970), 'torch.sin', 'torch.sin', (['roll'], {}), '(roll)\n', (3964, 3970), False, 'import torch\n'), ((2455, 2495), 'torch.bmm', 'torch.bmm', (['id_base', 'id_param[:, :, None]'], {}), '(id_base, id_param[:, :, None])\n', (2464, 2495), False, 'import torch\n'), ((827, 857), 'numpy.squeeze', 'np.squeeze', (["model['keypoints']"], {}), "(model['keypoints'])\n", (837, 857), True, 'import numpy as np\n'), ((4030, 4064), 'torch.bmm', 'torch.bmm', (['roll_matrix', 'yaw_matrix'], {}), '(roll_matrix, yaw_matrix)\n', (4039, 4064), False, 'import torch\n'), ((1979, 2009), 'numpy.squeeze', 'np.squeeze', (["model['keypoints']"], {}), "(model['keypoints'])\n", (1989, 2009), True, 'import numpy as np\n'), ((3315, 3342), 'torch.eye', 'torch.eye', (['(3)'], {'device': 'device'}), '(3, device=device)\n', (3324, 3342), False, 'import torch\n'), ((3393, 3420), 'torch.eye', 'torch.eye', (['(3)'], {'device': 'device'}), '(3, device=device)\n', (3402, 3420), False, 'import torch\n'), ((3473, 3500), 'torch.eye', 'torch.eye', (['(3)'], {'device': 'device'}), '(3, device=device)\n', (3482, 3500), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 10:51:58 2021
@author: 91750
"""
import math
import numpy as np
def Join(Sensors,Model,TotalCH):
n=Model['n']
m=len(TotalCH)
if m>1:
D=[]
for i in range(m):
B=[]
for j in range(n):
B.append(0)
D.append(B)
for i in range(n):
for j in range(m):
d1=pow(Sensors['xd'][i]-Sensors['xd'][TotalCH[j]-1],2)
d2=pow(Sensors['yd'][i]-Sensors['yd'][TotalCH[j]-1],2)
D[j][i]=math.sqrt(d1+d2)
arr=np.array(D)
Dmin=[]
Din=[]
Dmin=arr.min(axis=0)
for i in range(n):
x=Dmin[i]
for j in range(m):
if D[j][i]==x:
Din.append(j)
break
for i in range(n):
if Sensors['E'][i]>0:
if Dmin[i]<=Model['RR'] and Dmin[i]<Sensors['distosink'][i]:
Sensors['MCH'][i]=TotalCH[Din[i]]
Sensors['distoCH']['i']=Dmin[i]
else:
Sensors['MCH'][i]=n+1
Sensors['distoCH'][i]=Sensors['distosink'][i]
return Sensors
| [
"numpy.array",
"math.sqrt"
] | [((626, 637), 'numpy.array', 'np.array', (['D'], {}), '(D)\n', (634, 637), True, 'import numpy as np\n'), ((596, 614), 'math.sqrt', 'math.sqrt', (['(d1 + d2)'], {}), '(d1 + d2)\n', (605, 614), False, 'import math\n')] |
from __future__ import division
import numpy as np
import pdb
from scipy import integrate
__author__ = '<NAME>'
def area_weight_avg(data, lat, lat_axis):
'''Only use this for testing or plotting. This is a rough test.
Use calc_global_mean instead'''
weights = np.cos(np.radians(lat))
return np.average(data, weights=weights,
axis=lat_axis)
def area_weight_data(data, lat):
'''Used for plotting '''
weights = np.cos(np.radians(lat))
if len(data.shape) == 1:
data_weighted = data * weights
elif len(data.shape) ==2:
data_weighted = data * weights[:,None]
else:
print('Check dimension of data')
pdb.set_trace()
return data_weighted
def calc_global_mean(data, lat):
'''Why integrate to find an average?
The average is an integral. It is more accurate to
take the integral than to 'brute force' it with an average.
The avergae will be smaller unless dlat is infinitly small.'''
lat_rad = np.deg2rad(lat)
# area weight the latitude to account for differences in latitude
weights = np.cos(lat_rad)
# find the weights and then integrate
area_weight = integrate.trapz(weights,lat_rad)
global_integral = integrate.trapz(data * weights, lat_rad)
return global_integral / area_weight
| [
"numpy.radians",
"numpy.average",
"numpy.deg2rad",
"pdb.set_trace",
"numpy.cos",
"scipy.integrate.trapz"
] | [((315, 363), 'numpy.average', 'np.average', (['data'], {'weights': 'weights', 'axis': 'lat_axis'}), '(data, weights=weights, axis=lat_axis)\n', (325, 363), True, 'import numpy as np\n'), ((1048, 1063), 'numpy.deg2rad', 'np.deg2rad', (['lat'], {}), '(lat)\n', (1058, 1063), True, 'import numpy as np\n'), ((1158, 1173), 'numpy.cos', 'np.cos', (['lat_rad'], {}), '(lat_rad)\n', (1164, 1173), True, 'import numpy as np\n'), ((1239, 1272), 'scipy.integrate.trapz', 'integrate.trapz', (['weights', 'lat_rad'], {}), '(weights, lat_rad)\n', (1254, 1272), False, 'from scipy import integrate\n'), ((1294, 1334), 'scipy.integrate.trapz', 'integrate.trapz', (['(data * weights)', 'lat_rad'], {}), '(data * weights, lat_rad)\n', (1309, 1334), False, 'from scipy import integrate\n'), ((286, 301), 'numpy.radians', 'np.radians', (['lat'], {}), '(lat)\n', (296, 301), True, 'import numpy as np\n'), ((475, 490), 'numpy.radians', 'np.radians', (['lat'], {}), '(lat)\n', (485, 490), True, 'import numpy as np\n'), ((705, 720), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (718, 720), False, 'import pdb\n')] |
#!/usr/bin/env python
'''
Uses SURF to match two images.
Based on the sample code from opencv:
samples/python2/find_obj.py
Example:
matcher = Matcher()
for i in range(8):
matcher.add_baseline_image(%imagepath%)
match_key, cnt = matcher.match_image_info(%imagepath%)
is_match = matcher.match_image(%imagepath%)
'''
import numpy
import cv2
import os
import sys
import time
import math
from tradition.matcher.thread_pool import ThreadPool
def _one_match(thread_name, matcher, task_cnt, key, visual_image_path, image, kp, desc):
if matcher.debug:
print("begin match thread %s" % (thread_name))
if matcher.debug:
(b_kp, b_desc, b_image) = matcher.path_to_baseline_info[key]
b_width = b_image.shape[1]
b_height = b_image.shape[0]
else:
(b_kp, b_desc, b_width, b_height) = matcher.path_to_baseline_info[key]
raw_matches = matcher.matcher.knnMatch(desc, trainDescriptors=b_desc, k=2) # 2
if matcher.debug:
print('raw_matches:{}'.format(len(raw_matches)))
kp_pairs = matcher.filter_matches(kp, b_kp, raw_matches)
if matcher.debug:
print('kp_pairs:{}'.format(len(kp_pairs)))
if len(kp_pairs) >= matcher.min_match_points_cnt:
mkp1, mkp2 = zip(*kp_pairs)
p1 = numpy.float32([kp.pt for kp in mkp1])
p2 = numpy.float32([kp.pt for kp in mkp2])
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 3.0)
if matcher.debug:
print('kp_cnt:{}'.format(numpy.sum(status)))
if numpy.sum(status) >= matcher.min_match_points_cnt:
corners = numpy.float32(
[[0, 0], [image.shape[1], 0], [image.shape[1], image.shape[0]], [0, image.shape[0]]])
corners = numpy.int32(
cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2))
x = corners[:, 0]
y = corners[:, 1]
corner_distance = max(
abs(numpy.min(x)) / b_width,
abs(numpy.min(y)) / b_height,
abs(numpy.max(x) - b_width) / b_width,
abs(numpy.max(y) - b_height) / b_height
)
if matcher.debug:
print('corner_distance:{}'.format(corner_distance))
if corner_distance <= 1: # 四个顶点远离边缘的距离过大,则不匹配 TODO maybe some problem
# corners平行四边形判断
line1_delta = math.atan(
(corners[1][1] - corners[0][1]) / (corners[1][0] - corners[0][0]) if corners[1][0] - corners[0][
0] != 0 else 10000) * 180 / math.pi
line3_delta = math.atan(
(corners[2][1] - corners[3][1]) / (corners[2][0] - corners[3][0]) if corners[2][0] - corners[3][
0] != 0 else 10000) * 180 / math.pi
first_parallel_distance = abs(line1_delta - line3_delta)
if matcher.debug:
print(line1_delta, line3_delta, first_parallel_distance)
line2_delta = math.atan(
(corners[3][1] - corners[0][1]) / (corners[3][0] - corners[0][0]) if corners[3][0] - corners[0][
0] != 0 else 10000) * 180 / math.pi
line4_delta = math.atan(
(corners[2][1] - corners[1][1]) / (corners[2][0] - corners[1][0]) if corners[2][0] - corners[1][
0] != 0 else 10000) * 180 / math.pi
second_parallel_distance = abs(line2_delta - line4_delta)
if matcher.debug:
print(line2_delta, line4_delta, second_parallel_distance)
parallel_distance = max(first_parallel_distance, second_parallel_distance)
if matcher.debug:
print('parallel_distance:{},{},{}'.format(parallel_distance, first_parallel_distance,
second_parallel_distance))
area = image.shape[1] * image.shape[0]
transfer_area = cv2.contourArea(corners)
area_distance = abs(transfer_area - area) / max(1, min(area, transfer_area))
if matcher.debug:
print('area_distance:{}'.format(area_distance))
score = matcher.caculate_score(numpy.sum(status),
# corner_distance,
parallel_distance,
area_distance)
if matcher.visual and matcher.debug:
visual_path = os.path.join(os.path.dirname(visual_image_path),
'visual_{}_{}_{}'.format(int(score * 100), key,
os.path.basename(visual_image_path)))
matcher.match_visual(visual_path, image, b_image, kp_pairs, status, H)
if score > matcher.min_score_thresh:
matcher.match_info[key] = score
# if score >= self.max_score_thresh:
# break
matcher.task_info[task_cnt] += 1
###############################################################################
# Image Matching For Servicing
###############################################################################
class Matcher:
def __init__(self, min_match_points_cnt=4, min_score_thresh=0.5, max_score_thresh=0.8, debug=False, visual=False, max_thread=20):
self.path_to_baseline_info = {}
self.upc_to_cnt = {}
self.detector = cv2.xfeatures2d.SURF_create(400, 5, 5)
self.matcher = cv2.BFMatcher(cv2.NORM_L2)
self.min_match_points_cnt = min_match_points_cnt
self.min_score_thresh = min_score_thresh
self.max_score_thresh = max_score_thresh
self.debug = debug
self.visual = visual
self.task_cnt = 0
self.task_info = {}
self.match_info = None
self.max_thread = max_thread
self.thread_pool = ThreadPool(max_thread)
def add_baseline_image(self, image_path, upc):
image = cv2.imread(image_path)
kp, desc = self.detector.detectAndCompute(image, None)
if self.debug:
print('b_image kp:{},{}'.format(len(kp), upc))
if len(kp) == 0:
print('error: no key point to base image:{}'.format(image_path))
return False
if len(kp)< 10:
print('error: too less keypoint count to base image:{}/{}'.format(len(kp),image_path))
return False
if upc in self.upc_to_cnt:
self.upc_to_cnt[upc] += 1
else:
self.upc_to_cnt[upc] = 1
key = upc + '_'+ str(self.upc_to_cnt[upc])
if self.debug:
self.path_to_baseline_info[key] = (kp, desc, image)
else:
self.path_to_baseline_info[key] = (kp, desc, image.shape[1],image.shape[0])
return True
def removeall_baseline_image(self,upc):
if upc in self.upc_to_cnt:
for i in range(self.upc_to_cnt[upc]):
key = upc + '_' + str(i + 1)
if key in self.path_to_baseline_info:
del self.path_to_baseline_info[key]
del self.upc_to_cnt[upc]
def get_baseline_cnt(self):
return len(self.path_to_baseline_info)
# def get_thread_size(self):
# thread_size = int(len(self.path_to_baseline_info)/100)
# if thread_size > self.max_thread:
# thread_size = self.max_thread
#
# return thread_size
#
def _all_match(self, image_path, image=None, within_upcs=None, filter_upcs=None):
if image is None:
image = cv2.imread(image_path)
kp, desc = self.detector.detectAndCompute(image, None)
if self.debug:
print('image kp:{}'.format(len(kp)))
self.match_info = {}
if len(kp) < 10:
print('warn: too less keypoint count to match image:{}/{}'.format(len(kp),image_path))
if self.debug:
print('baseline image:{}'.format(len(self.path_to_baseline_info)))
task_cnt = self.task_cnt + 1
self.task_cnt += 1
self.task_info[task_cnt] = 0
need_task_cnt = 0
# print (self.path_to_baseline_info)
for key in self.path_to_baseline_info:
if within_upcs is not None:
upc = key.split('_')[0]
if upc not in within_upcs:
continue
if filter_upcs is not None:
upc = key.split('_')[0]
if upc in filter_upcs:
continue
if self.thread_pool is not None:
need_task_cnt += 1
self.thread_pool.put(_one_match, (self, task_cnt, key, image_path, image, kp, desc), None)
else:
_one_match('main_thread',self, task_cnt, key, image_path, image, kp, desc)
if self.thread_pool is not None:
time0 = time.time()
i = 0
while i < 30:
i += 1
if self.task_info[task_cnt] == need_task_cnt:
time1 = time.time()
if self.debug:
print("\033[32;0m任务正常完成%s(%.2f秒):目前线程池中有%s个线程,空闲的线程有%s个!\033[0m"
% (self.task_info[task_cnt], time1-time0, len(self.thread_pool.generate_list), len(self.thread_pool.free_list)))
break
time.sleep(0.1)
else:
time1 = time.time()
if self.debug:
print("\033[31;0m任务没有完成%s(共%s,%.2f秒):目前线程池中有%s个线程,空闲的线程有%s个!\033[0m"
% (self.task_info[task_cnt], need_task_cnt, time1-time0, len(self.thread_pool.generate_list), len(self.thread_pool.free_list)))
def filter_matches(self, kp1, kp2, matches, ratio=0.75):
mkp1, mkp2 = [], []
trainIdxs = {}
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
# if m.queryIdx in queryIdxs:
# continue
if m.trainIdx in trainIdxs:
if trainIdxs[m.trainIdx] > 2: # FIXME 匹配两个以上尚未支持
continue
mkp1.append(kp1[m.queryIdx])
mkp2.append(kp2[m.trainIdx])
if m.trainIdx in trainIdxs:
trainIdxs[m.trainIdx] += 1
else:
trainIdxs[m.trainIdx] = 1
kp_pairs = list(zip(mkp1, mkp2))
return kp_pairs
def caculate_score(self, cnt, parallel_distance,area_distance):
if cnt <= 10:
cnt_score = 0.1*(cnt-5)
elif cnt <= 20:
cnt_score = 0.03*(cnt-10) + 0.5
else:
cnt_score = 0.01*(cnt-20) + 0.8
if cnt_score >= 1:
cnt_score = 0.99
parallel_score = 0.02 * (50 - parallel_distance)# 平行角度差距大于20, 则惩罚为负值
area_score = 1 - area_distance # 面积接近差1倍,则惩罚为负值
if area_score < -1:
area_score = -1
score = cnt_score * 0.5 + min(parallel_score,area_score) * 0.5
if self.debug:
print('score: %.2f = %.2f*0.5+min(%.2f, %.2f)*0.5' % (score, cnt_score,parallel_score,area_score))
return score
# def match_image_top_n(self, image_path, n=5, min_match_points_cnt=5, max_match_points=20, visual=True):
# match_info = self.match_image_all_info(image_path, min_match_points_cnt=min_match_points_cnt,max_match_points=max_match_points)
# if len(match_info) == 0:
# return None,0
# sorted_match_info = sorted(match_info.items(), key=lambda d: numpy.sum(d[1][3]), reverse=True)
# top_n = sorted_match_info[:n]
# ret = []
# for match in top_n:
# score = self.caculate_score(numpy.sum(match[1][3]))
# ret.append((match[0].split('_')[0],score))
# if visual:
# for i in range(len(top_n)):
# match = top_n[i]
# visual_path = os.path.join(os.path.dirname(image_path),'visual_{}_{}_{}'.format(match[0],i,os.path.basename(image_path)) )
# self.match_visual(visual_path, match[1][0],match[1][1],match[1][2],match[1][3],match[1][4])
# return ret
def match_image_best_one(self, image_path, within_upcs=None, filter_upcs=None):
self._all_match(image_path,
within_upcs=within_upcs,
filter_upcs=filter_upcs)
if self.match_info is None or len(self.match_info) == 0:
return None,0
if self.debug:
print('match_info:{}'.format(len(self.match_info)))
sorted_match_info = sorted(self.match_info.items(), key=lambda d: d[1], reverse=True)
best_match = sorted_match_info[0]
ret = (best_match[0].split('_')[0], best_match[1])
return ret
def match_image_best_one_with_cv2array(self, visual_image_path, image, within_upcs=None, filter_upcs=None):
self._all_match(visual_image_path,
image=image,
within_upcs=within_upcs,
filter_upcs=filter_upcs)
if self.match_info is None or len(self.match_info) == 0:
return None,0
if self.debug:
print('match_info:{}'.format(len(self.match_info)))
sorted_match_info = sorted(self.match_info.items(), key=lambda d: d[1], reverse=True)
best_match = sorted_match_info[0]
ret = (best_match[0].split('_')[0], best_match[1])
return ret
def is_find_match(self, image_path, within_upcs=None, filter_upcs=None):
upc, score = self.match_image_best_one(image_path, within_upcs=within_upcs,filter_upcs=filter_upcs)
return upc != None and score > 0.6
def match_visual(self, visual_path, img1, img2, kp_pairs, status=None, H=None):
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = numpy.zeros((max(h1, h2), w1 + w2, 3), numpy.uint8)
vis[:h1, :w1, :] = img1
vis[:h2, w1:w1 + w2, :] = img2
# vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
if H is not None:
corners = numpy.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners = numpy.int32(cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0))
cv2.polylines(vis, [corners], True, (255, 255, 255))
# center = numpy.int32(numpy.sum(corners, 0) / len(corners))
# print(center)
# col = (255, 0, 0)
# r = 2
# thickness = 3
# cv2.line(vis, (center[0] - r, center[1] - r), (center[0] + r, center[1] + r), col, thickness)
# cv2.line(vis, (center[0] - r, center[1] + r), (center[0] + r, center[1] - r), col, thickness)
if status is None:
status = numpy.ones(len(kp_pairs), numpy.bool_)
p1 = numpy.int32([kpp[0].pt for kpp in kp_pairs])
p2 = numpy.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0)
green = (0, 255, 0)
red = (0, 0, 255)
white = (255, 255, 255)
kp_color = (51, 103, 236)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
col = green
cv2.circle(vis, (x1, y1), 2, col, -1)
cv2.circle(vis, (x2, y2), 2, col, -1)
else:
col = red
r = 2
thickness = 3
cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), col, thickness)
cv2.line(vis, (x1 - r, y1 + r), (x1 + r, y1 - r), col, thickness)
cv2.line(vis, (x2 - r, y2 - r), (x2 + r, y2 + r), col, thickness)
cv2.line(vis, (x2 - r, y2 + r), (x2 + r, y2 - r), col, thickness)
vis0 = vis.copy()
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
cv2.line(vis, (x1, y1), (x2, y2), green)
cv2.imwrite(visual_path, vis)
###############################################################################
# Test Main
###############################################################################
def test_1():
time0 = time.time()
matcher = Matcher(debug=True, visual=True)
time1 = time.time()
for i in range(8):
matcher.add_baseline_image('images/%d.jpg' % (i + 1), str(i))
time2 = time.time()
match_key, score = matcher.match_image_best_one('images/9.jpg')
time3 = time.time()
print('MATCH: %.2f, %.2f, %.2f, %.2f' % (time3 - time0, time1 - time0, time2 - time1, time3 - time2))
print(match_key, score)
def test_2(image1,image2):
time0 = time.time()
matcher = Matcher(debug=True, visual=True)
time1 = time.time()
matcher.add_baseline_image(image1, 'tt')
time2 = time.time()
match_key, score = matcher.match_image_best_one(image2)
time3 = time.time()
print('MATCH: %.2f, %.2f, %.2f, %.2f' % (time3 - time0, time1 - time0, time2 - time1, time3 - time2))
print(match_key, score)
def test_match_all():
time0 = time.time()
matcher = Matcher(debug=False, visual=False)
time1 = time.time()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "main.settings")
import django
django.setup()
from goods.models import SampleImageClass
from django.conf import settings
samples = SampleImageClass.objects.filter(deviceid='')
upc_to_image_path = {}
for sample in samples:
image_path = sample.source.path
image_path = image_path.replace(settings.MEDIA_ROOT, '\\\\192.168.1.60\Image')
# image_path = image_path.replace('\\','/')
# image_path = '\\' + image_path
if os.path.isfile(image_path):
matcher.add_baseline_image(image_path, sample.upc)
upc_to_image_path[sample.upc] = image_path
time2 = time.time()
for upc in upc_to_image_path:
image_path = upc_to_image_path[upc]
# print(image_path)
match_key, score = matcher.match_image_best_one(image_path, within_upcs=[upc])
if score < 0.8:
print(match_key, score)
time3 = time.time()
print('MATCH: %.2f, %.2f, %.2f, %.2f' % (time3 - time0, time1 - time0, time2 - time1, time3 - time2))
def test_match_one(test_image_path):
time0 = time.time()
matcher = Matcher(debug=True, visual=True)
time1 = time.time()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "main.settings")
import django
django.setup()
from goods.models import SampleImageClass
from django.conf import settings
from dl import common
samples = SampleImageClass.objects.filter(deviceid=common.STEP2S_PREFIX)
upc_to_image_path = {}
for sample in samples:
image_path = sample.source.path
image_path = image_path.replace(settings.MEDIA_ROOT, '\\\\192.168.1.60\Image')
# image_path = image_path.replace('\\','/')
# image_path = '\\' + image_path
if os.path.isfile(image_path):
matcher.add_baseline_image(image_path, sample.upc)
upc_to_image_path[sample.upc] = image_path
time2 = time.time()
match_key, score = matcher.match_image_best_one(test_image_path)
print(match_key, score)
time3 = time.time()
print('MATCH: %.2f, %.2f, %.2f, %.2f' % (time3 - time0, time1 - time0, time2 - time1, time3 - time2))
if __name__ == '__main__':
"""Test code: Uses the two specified"""
# test_1()
# sys.exit(0)
fn1 = 'images/1.jpg'
fn2 = 'images/2.jpg'
# test_2(fn1, fn2)
# fn1 = 'images/12.jpg'
# fn2 = 'images/13.jpg'
# fn1 = 'images/test/old/15.jpg'
# fn2 = 'images/test/old/14.jpg'
# #
# fn1 = 'images/test/1.jpg'
# fn2 = 'images/test/2.jpg'
#
# fn1 = 'images/error/1.jpg'
# fn2 = 'images/error/2.jpg'
test_2(fn1, fn2)
# test_match_all()
# test_match_one(fn1) | [
"django.setup",
"numpy.sum",
"tradition.matcher.thread_pool.ThreadPool",
"cv2.xfeatures2d.SURF_create",
"os.path.isfile",
"cv2.line",
"cv2.contourArea",
"cv2.imwrite",
"os.path.dirname",
"cv2.BFMatcher",
"numpy.max",
"numpy.int32",
"cv2.circle",
"os.environ.setdefault",
"os.path.basename... | [((16286, 16297), 'time.time', 'time.time', ([], {}), '()\n', (16295, 16297), False, 'import time\n'), ((16357, 16368), 'time.time', 'time.time', ([], {}), '()\n', (16366, 16368), False, 'import time\n'), ((16474, 16485), 'time.time', 'time.time', ([], {}), '()\n', (16483, 16485), False, 'import time\n'), ((16566, 16577), 'time.time', 'time.time', ([], {}), '()\n', (16575, 16577), False, 'import time\n'), ((16752, 16763), 'time.time', 'time.time', ([], {}), '()\n', (16761, 16763), False, 'import time\n'), ((16823, 16834), 'time.time', 'time.time', ([], {}), '()\n', (16832, 16834), False, 'import time\n'), ((16892, 16903), 'time.time', 'time.time', ([], {}), '()\n', (16901, 16903), False, 'import time\n'), ((16976, 16987), 'time.time', 'time.time', ([], {}), '()\n', (16985, 16987), False, 'import time\n'), ((17157, 17168), 'time.time', 'time.time', ([], {}), '()\n', (17166, 17168), False, 'import time\n'), ((17230, 17241), 'time.time', 'time.time', ([], {}), '()\n', (17239, 17241), False, 'import time\n'), ((17246, 17310), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""main.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'main.settings')\n", (17267, 17310), False, 'import os\n'), ((17333, 17347), 'django.setup', 'django.setup', ([], {}), '()\n', (17345, 17347), False, 'import django\n'), ((17445, 17489), 'goods.models.SampleImageClass.objects.filter', 'SampleImageClass.objects.filter', ([], {'deviceid': '""""""'}), "(deviceid='')\n", (17476, 17489), False, 'from goods.models import SampleImageClass\n'), ((17933, 17944), 'time.time', 'time.time', ([], {}), '()\n', (17942, 17944), False, 'import time\n'), ((18211, 18222), 'time.time', 'time.time', ([], {}), '()\n', (18220, 18222), False, 'import time\n'), ((18379, 18390), 'time.time', 'time.time', ([], {}), '()\n', (18388, 18390), False, 'import time\n'), ((18450, 18461), 'time.time', 'time.time', ([], {}), '()\n', (18459, 18461), False, 'import time\n'), ((18466, 18530), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""main.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'main.settings')\n", (18487, 18530), False, 'import os\n'), ((18553, 18567), 'django.setup', 'django.setup', ([], {}), '()\n', (18565, 18567), False, 'import django\n'), ((18691, 18753), 'goods.models.SampleImageClass.objects.filter', 'SampleImageClass.objects.filter', ([], {'deviceid': 'common.STEP2S_PREFIX'}), '(deviceid=common.STEP2S_PREFIX)\n', (18722, 18753), False, 'from goods.models import SampleImageClass\n'), ((19197, 19208), 'time.time', 'time.time', ([], {}), '()\n', (19206, 19208), False, 'import time\n'), ((19319, 19330), 'time.time', 'time.time', ([], {}), '()\n', (19328, 19330), False, 'import time\n'), ((1286, 1323), 'numpy.float32', 'numpy.float32', (['[kp.pt for kp in mkp1]'], {}), '([kp.pt for kp in mkp1])\n', (1299, 1323), False, 'import numpy\n'), ((1337, 1374), 'numpy.float32', 'numpy.float32', (['[kp.pt for kp in mkp2]'], {}), '([kp.pt for kp in mkp2])\n', (1350, 1374), False, 'import numpy\n'), ((1395, 1438), 'cv2.findHomography', 'cv2.findHomography', (['p1', 'p2', 'cv2.RANSAC', '(3.0)'], {}), '(p1, p2, cv2.RANSAC, 3.0)\n', (1413, 1438), False, 'import cv2\n'), ((5596, 5634), 'cv2.xfeatures2d.SURF_create', 'cv2.xfeatures2d.SURF_create', (['(400)', '(5)', '(5)'], {}), '(400, 5, 5)\n', (5623, 5634), False, 'import cv2\n'), ((5658, 5684), 'cv2.BFMatcher', 'cv2.BFMatcher', (['cv2.NORM_L2'], {}), '(cv2.NORM_L2)\n', (5671, 5684), False, 'import cv2\n'), ((6045, 6067), 'tradition.matcher.thread_pool.ThreadPool', 'ThreadPool', (['max_thread'], {}), '(max_thread)\n', (6055, 6067), False, 'from tradition.matcher.thread_pool import ThreadPool\n'), ((6137, 6159), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (6147, 6159), False, 'import cv2\n'), ((14997, 15041), 'numpy.int32', 'numpy.int32', (['[kpp[0].pt for kpp in kp_pairs]'], {}), '([kpp[0].pt for kpp in kp_pairs])\n', (15008, 15041), False, 'import numpy\n'), ((16055, 16084), 'cv2.imwrite', 'cv2.imwrite', (['visual_path', 'vis'], {}), '(visual_path, vis)\n', (16066, 16084), False, 'import cv2\n'), ((17775, 17801), 'os.path.isfile', 'os.path.isfile', (['image_path'], {}), '(image_path)\n', (17789, 17801), False, 'import os\n'), ((19039, 19065), 'os.path.isfile', 'os.path.isfile', (['image_path'], {}), '(image_path)\n', (19053, 19065), False, 'import os\n'), ((1533, 1550), 'numpy.sum', 'numpy.sum', (['status'], {}), '(status)\n', (1542, 1550), False, 'import numpy\n'), ((1606, 1710), 'numpy.float32', 'numpy.float32', (['[[0, 0], [image.shape[1], 0], [image.shape[1], image.shape[0]], [0, image.\n shape[0]]]'], {}), '([[0, 0], [image.shape[1], 0], [image.shape[1], image.shape[0]\n ], [0, image.shape[0]]])\n', (1619, 1710), False, 'import numpy\n'), ((7727, 7749), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (7737, 7749), False, 'import cv2\n'), ((9020, 9031), 'time.time', 'time.time', ([], {}), '()\n', (9029, 9031), False, 'import time\n'), ((14267, 14318), 'numpy.float32', 'numpy.float32', (['[[0, 0], [w1, 0], [w1, h1], [0, h1]]'], {}), '([[0, 0], [w1, 0], [w1, h1], [0, h1]])\n', (14280, 14318), False, 'import numpy\n'), ((14446, 14498), 'cv2.polylines', 'cv2.polylines', (['vis', '[corners]', '(True)', '(255, 255, 255)'], {}), '(vis, [corners], True, (255, 255, 255))\n', (14459, 14498), False, 'import cv2\n'), ((15055, 15099), 'numpy.int32', 'numpy.int32', (['[kpp[1].pt for kpp in kp_pairs]'], {}), '([kpp[1].pt for kpp in kp_pairs])\n', (15066, 15099), False, 'import numpy\n'), ((4021, 4045), 'cv2.contourArea', 'cv2.contourArea', (['corners'], {}), '(corners)\n', (4036, 4045), False, 'import cv2\n'), ((9510, 9525), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (9520, 9525), False, 'import time\n'), ((9568, 9579), 'time.time', 'time.time', ([], {}), '()\n', (9577, 9579), False, 'import time\n'), ((15361, 15398), 'cv2.circle', 'cv2.circle', (['vis', '(x1, y1)', '(2)', 'col', '(-1)'], {}), '(vis, (x1, y1), 2, col, -1)\n', (15371, 15398), False, 'import cv2\n'), ((15415, 15452), 'cv2.circle', 'cv2.circle', (['vis', '(x2, y2)', '(2)', 'col', '(-1)'], {}), '(vis, (x2, y2), 2, col, -1)\n', (15425, 15452), False, 'import cv2\n'), ((15565, 15630), 'cv2.line', 'cv2.line', (['vis', '(x1 - r, y1 - r)', '(x1 + r, y1 + r)', 'col', 'thickness'], {}), '(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), col, thickness)\n', (15573, 15630), False, 'import cv2\n'), ((15647, 15712), 'cv2.line', 'cv2.line', (['vis', '(x1 - r, y1 + r)', '(x1 + r, y1 - r)', 'col', 'thickness'], {}), '(vis, (x1 - r, y1 + r), (x1 + r, y1 - r), col, thickness)\n', (15655, 15712), False, 'import cv2\n'), ((15729, 15794), 'cv2.line', 'cv2.line', (['vis', '(x2 - r, y2 - r)', '(x2 + r, y2 + r)', 'col', 'thickness'], {}), '(vis, (x2 - r, y2 - r), (x2 + r, y2 + r), col, thickness)\n', (15737, 15794), False, 'import cv2\n'), ((15811, 15876), 'cv2.line', 'cv2.line', (['vis', '(x2 - r, y2 + r)', '(x2 + r, y2 - r)', 'col', 'thickness'], {}), '(vis, (x2 - r, y2 + r), (x2 + r, y2 - r), col, thickness)\n', (15819, 15876), False, 'import cv2\n'), ((16005, 16045), 'cv2.line', 'cv2.line', (['vis', '(x1, y1)', '(x2, y2)', 'green'], {}), '(vis, (x1, y1), (x2, y2), green)\n', (16013, 16045), False, 'import cv2\n'), ((1502, 1519), 'numpy.sum', 'numpy.sum', (['status'], {}), '(status)\n', (1511, 1519), False, 'import numpy\n'), ((4288, 4305), 'numpy.sum', 'numpy.sum', (['status'], {}), '(status)\n', (4297, 4305), False, 'import numpy\n'), ((9189, 9200), 'time.time', 'time.time', ([], {}), '()\n', (9198, 9200), False, 'import time\n'), ((1961, 1973), 'numpy.min', 'numpy.min', (['x'], {}), '(x)\n', (1970, 1973), False, 'import numpy\n'), ((2006, 2018), 'numpy.min', 'numpy.min', (['y'], {}), '(y)\n', (2015, 2018), False, 'import numpy\n'), ((2401, 2530), 'math.atan', 'math.atan', (['((corners[1][1] - corners[0][1]) / (corners[1][0] - corners[0][0]) if \n corners[1][0] - corners[0][0] != 0 else 10000)'], {}), '((corners[1][1] - corners[0][1]) / (corners[1][0] - corners[0][0]) if\n corners[1][0] - corners[0][0] != 0 else 10000)\n', (2410, 2530), False, 'import math\n'), ((2619, 2748), 'math.atan', 'math.atan', (['((corners[2][1] - corners[3][1]) / (corners[2][0] - corners[3][0]) if \n corners[2][0] - corners[3][0] != 0 else 10000)'], {}), '((corners[2][1] - corners[3][1]) / (corners[2][0] - corners[3][0]) if\n corners[2][0] - corners[3][0] != 0 else 10000)\n', (2628, 2748), False, 'import math\n'), ((3021, 3150), 'math.atan', 'math.atan', (['((corners[3][1] - corners[0][1]) / (corners[3][0] - corners[0][0]) if \n corners[3][0] - corners[0][0] != 0 else 10000)'], {}), '((corners[3][1] - corners[0][1]) / (corners[3][0] - corners[0][0]) if\n corners[3][0] - corners[0][0] != 0 else 10000)\n', (3030, 3150), False, 'import math\n'), ((3239, 3368), 'math.atan', 'math.atan', (['((corners[2][1] - corners[1][1]) / (corners[2][0] - corners[1][0]) if \n corners[2][0] - corners[1][0] != 0 else 10000)'], {}), '((corners[2][1] - corners[1][1]) / (corners[2][0] - corners[1][0]) if\n corners[2][0] - corners[1][0] != 0 else 10000)\n', (3248, 3368), False, 'import math\n'), ((4599, 4633), 'os.path.dirname', 'os.path.dirname', (['visual_image_path'], {}), '(visual_image_path)\n', (4614, 4633), False, 'import os\n'), ((2052, 2064), 'numpy.max', 'numpy.max', (['x'], {}), '(x)\n', (2061, 2064), False, 'import numpy\n'), ((2107, 2119), 'numpy.max', 'numpy.max', (['y'], {}), '(y)\n', (2116, 2119), False, 'import numpy\n'), ((4802, 4837), 'os.path.basename', 'os.path.basename', (['visual_image_path'], {}), '(visual_image_path)\n', (4818, 4837), False, 'import os\n')] |
from __future__ import print_function
import numpy as np
from sklearn import metrics
from sklearn.metrics import roc_auc_score
import math
import six
#import bootstrapped.bootstrap as bs
#import bootstrapped.stats_functions as bs_stats
from six.moves import cPickle as pkl
#from sklearn.covariance import GraphLasso
#import nitime
#import nitime.analysis as nta
#import nitime.timeseries as ts
#import nitime.utils as tsu
# def bst_0(A, num_iterations=10000, alpha=0.05):
# """
# bootstrap estimation
# Parameters
# ----------
# num_iterations: int, iterations for bootstrap
# alpha: significant level
# Returns
# ---------
# numpy array, bootstrapped estimations
# """
# tmp_0 = np.zeros(A.shape[0:-1])
# if len(A.shape) == 3:
# for i in range(A.shape[0]):
# for j in range(A.shape[1]):
# tmp = bs.bootstrap(A[i,j,:], stat_func=bs_stats.mean,num_iterations=20000,alpha=0.05)
# if 0<tmp.lower_bound or 0>tmp.upper_bound:
# tmp_0[i,j] = 1
# else:
# for i in range(A.shape[0]):
# for j in range(A.shape[1]):
# for k in range(A.shape[2]):
# tmp = bs.bootstrap(A[i,j,k,:], stat_func=bs_stats.mean,num_iterations=20000,alpha=0.1)
# if 0<tmp.lower_bound or 0>tmp.upper_bound:
# tmp_0[i,j,k] = 1
# return tmp_0
def spl_mean(org, num_iterations):
"""
bootstrap sampling
Parameters
------------
nums_iterations: int
Returns
------------
samples: list of means
"""
l = [0]*num_iterations
for i in range(num_iterations):
l[i] = np.mean(np.random.choice(org, len(org)))
return l
def bst(A, num_iterations=10000, alpha=0.05):
"""
bootstrap estimation of p values without using packages
Parameters
----------
num_iterations: int, iterations for bootstrap
Returns
---------
numpy array, bootstrapped p values
numpy array, bootstrapped estimations
"""
tmp_0 = np.zeros(A.shape[0:-1])
para_mean = np.mean(A, axis=-1)
if len(A.shape) == 3:
for i in range(A.shape[0]):
for j in range(A.shape[1]):
if para_mean[i,j] >= 0:
tmp_0[i,j] = np.mean(spl_mean(A[i,j,:]<=0, num_iterations))
else:
tmp_0[i,j] = np.mean(spl_mean(A[i,j,:]>0, num_iterations))
else:
for i in range(A.shape[0]):
for j in range(A.shape[1]):
for k in range(A.shape[2]):
if para_mean[i,j,k] >= 0:
tmp_0[i,j,k] = np.mean(spl_mean(A[i,j,k,:]<=0, num_iterations))
else:
tmp_0[i,j,k] = np.mean(spl_mean(A[i,j,k,:]>0, num_iterations))
return tmp_0, (tmp_0 < alpha)*para_mean
def fd_0(A1,A):
"""
compute AUC of estimated A if real A is known, used for simulated data
Parameters
----------
A1: numpy array, real A
A: estimated data, all estimations from all subjects
Returns
----------
scalar, AUC
"""
if np.sum(abs(A1)) < 1e-6:
return -1
if len(A1.shape) == 3 and np.sum(abs(A1)>0) == (A1.shape[0]*A1.shape[1]*A1.shape[2]):
return -1
if len(A1.shape) == 2 and np.sum(abs(A1)>0) == (A1.shape[0]*A1.shape[1]):
return -1
if len(A.shape) == 3:
tmp = abs(np.mean(A,axis=2))
else:
tmp = abs(np.mean(A,axis=3))
if np.max(tmp) == 0:
return -1
tmp = tmp/np.max(tmp)
A1 = (abs(A1)>0)
#fpr,tpr,thresholds = metrics.roc_curve(A1.reshape((-1)),tmp.reshape((-1)))
sr = roc_auc_score(A1.reshape((-1)),tmp.reshape((-1)))
return sr
def eva(folder_name, saved_folder_name=None, real_parameters=None, num_iterations=10000, alpha=0.1):
"""
evaluation of estimations
Parameters
-----------
folder_name: folder names for all subjects analysis, the same meaning as that in function cdn_multi_sub
saved_folder_name: folder used to save bootstrapped estimations
nums_iterations, alpha: bootstrap para
"""
n = len(folder_name)
for i in range(n):
with open(folder_name[i]+'results/result.pkl', 'rb') as f:
if six.PY2:
save = pkl.load(f)
else:
save = pkl.load(f, encoding='latin1')
A = save['A']
B = save['B']
C = save['C']
if i == 0:
A_all = np.zeros((A.shape[0], A.shape[1], n))
B_all = np.zeros((B.shape[0], B.shape[1], B.shape[2], n))
C_all = np.zeros((C.shape[0], C.shape[1], n))
A_all[:,:,i] = A
B_all[:,:,:,i] = B
C_all[:,:,i] = C
if real_parameters:
with open(real_parameters, 'rb') as f:
if six.PY2:
save = pkl.load(f)
else:
save = pkl.load(f, encoding='latin1')
A_real = save['A_real']
B_real = save['B_real']
C_real = save['C_real']
auc_a = fd_0(A_real, A_all)
auc_b = fd_0(B_real, B_all)
auc_c = fd_0(C_real, C_all)
print('AUC(A):{0}, AUC(B):{1}, AUC(C):{2}'.format(auc_a, auc_b, auc_c))
if saved_folder_name:
save = {}
save['bst_A']=bst(A_all)
save['bst_B']=bst(B_all)
save['bst_C']=bst(C_all)
with open(saved_folder_name+'bst.pkl', 'wb') as f:
pkl.dump(save, f, pkl.HIGHEST_PROTOCOL)
| [
"six.moves.cPickle.dump",
"numpy.zeros",
"numpy.max",
"numpy.mean",
"six.moves.cPickle.load"
] | [((2091, 2114), 'numpy.zeros', 'np.zeros', (['A.shape[0:-1]'], {}), '(A.shape[0:-1])\n', (2099, 2114), True, 'import numpy as np\n'), ((2131, 2150), 'numpy.mean', 'np.mean', (['A'], {'axis': '(-1)'}), '(A, axis=-1)\n', (2138, 2150), True, 'import numpy as np\n'), ((3561, 3572), 'numpy.max', 'np.max', (['tmp'], {}), '(tmp)\n', (3567, 3572), True, 'import numpy as np\n'), ((3611, 3622), 'numpy.max', 'np.max', (['tmp'], {}), '(tmp)\n', (3617, 3622), True, 'import numpy as np\n'), ((3479, 3497), 'numpy.mean', 'np.mean', (['A'], {'axis': '(2)'}), '(A, axis=2)\n', (3486, 3497), True, 'import numpy as np\n'), ((3535, 3553), 'numpy.mean', 'np.mean', (['A'], {'axis': '(3)'}), '(A, axis=3)\n', (3542, 3553), True, 'import numpy as np\n'), ((4549, 4586), 'numpy.zeros', 'np.zeros', (['(A.shape[0], A.shape[1], n)'], {}), '((A.shape[0], A.shape[1], n))\n', (4557, 4586), True, 'import numpy as np\n'), ((4607, 4656), 'numpy.zeros', 'np.zeros', (['(B.shape[0], B.shape[1], B.shape[2], n)'], {}), '((B.shape[0], B.shape[1], B.shape[2], n))\n', (4615, 4656), True, 'import numpy as np\n'), ((4677, 4714), 'numpy.zeros', 'np.zeros', (['(C.shape[0], C.shape[1], n)'], {}), '((C.shape[0], C.shape[1], n))\n', (4685, 4714), True, 'import numpy as np\n'), ((5499, 5538), 'six.moves.cPickle.dump', 'pkl.dump', (['save', 'f', 'pkl.HIGHEST_PROTOCOL'], {}), '(save, f, pkl.HIGHEST_PROTOCOL)\n', (5507, 5538), True, 'from six.moves import cPickle as pkl\n'), ((4360, 4371), 'six.moves.cPickle.load', 'pkl.load', (['f'], {}), '(f)\n', (4368, 4371), True, 'from six.moves import cPickle as pkl\n'), ((4413, 4443), 'six.moves.cPickle.load', 'pkl.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (4421, 4443), True, 'from six.moves import cPickle as pkl\n'), ((4910, 4921), 'six.moves.cPickle.load', 'pkl.load', (['f'], {}), '(f)\n', (4918, 4921), True, 'from six.moves import cPickle as pkl\n'), ((4963, 4993), 'six.moves.cPickle.load', 'pkl.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (4971, 4993), True, 'from six.moves import cPickle as pkl\n')] |
import copy
from typing import Tuple, Union
import numpy as np
from .module import Module
from .utils import bin2dec_vector, dec2bin_vector
class BinaryEncoder(Module):
@staticmethod
def __check_init_args(
dim: int, interval: Union[Tuple[Union[float, int], Union[float, int]]]
) -> Tuple[int, np.ndarray]:
if not isinstance(dim, int):
raise TypeError(
f"Expected argument dim to be an int, instead it is {type(dim)}."
)
if dim < 1:
raise ValueError(
f"Expected argument dim to be a positive integer, instead it is {dim}."
)
if not isinstance(interval, tuple):
try:
interval = tuple(interval)
except Exception:
raise TypeError(
"Expected argument interval to be a tuple, instead it is "
f"{type(interval)}"
)
if len(interval) < 1:
raise ValueError("Expected argument interval to be non-empty.")
if len(interval) < 2:
interval = (interval[0], 0) if interval < 0 else (0, interval[0])
interval = interval[:2]
if interval[0] > interval[1]:
raise ValueError(
"Expected argument interval to have a first item smaller than the "
f"second one, instead it is {interval}."
)
interval = np.array(interval, dtype=np.float)
return dim, interval
def __init__(
self, dim: int, interval: Union[Tuple[Union[float, int], Union[float, int]]]
):
self.__dim, self.__interval = self.__check_init_args(dim=dim, interval=interval)
self.__max_value = int(2 ** dim) - 1
self.__quantum = (interval[1] - interval[0]) / self.max_value
@property
def dim(self):
return self.__dim
@property
def interval(self):
return self.__interval
@property
def quantum(self):
return self.__quantum
@property
def max_value(self):
return self.__max_value
def _input_to_int(self, x) -> int:
return np.maximum(
0, np.minimum(self.max_value, (x - self.interval[0]) / self.quantum)
).astype(np.int)
def apply(self, x):
x_int = self._input_to_int(x)
return dec2bin_vector(x_int, self.dim)
def __str__(self):
return f"BinaryEncoder({self.dim} bit, {self.interval})"
class BinaryDecoder(Module):
@staticmethod
def __check_init_args(
dim: int, interval: Union[Tuple[Union[float, int], Union[float, int]]]
) -> Tuple[int, np.ndarray]:
if not isinstance(dim, int):
raise TypeError(
f"Expected argument dim to be an int, instead it is {type(dim)}."
)
if dim < 1:
raise ValueError(
f"Expected argument dim to be a positive integer, instead it is {dim}."
)
if not isinstance(interval, tuple):
try:
interval = tuple(interval)
except Exception:
raise TypeError(
"Expected argument interval to be a tuple, instead it is "
f"{type(interval)}"
)
if len(interval) < 1:
raise ValueError("Expected argument interval to be non-empty.")
if len(interval) < 2:
interval = (interval[0], 0) if interval < 0 else (0, interval[0])
interval = interval[:2]
if interval[0] > interval[1]:
raise ValueError(
"Expected argument interval to have a first item smaller than the "
f"second one, instead it is {interval}."
)
interval = np.array(interval, dtype=np.float)
return dim, interval
def __init__(
self, dim: int, interval: Union[Tuple[Union[float, int], Union[float, int]]]
):
self.__dim, self.__interval = self.__check_init_args(dim=dim, interval=interval)
self.__max_value = int(2 ** dim) - 1
self.__quantum = (interval[1] - interval[0]) / self.max_value
@property
def dim(self):
return self.__dim
@property
def interval(self):
return self.__interval
@property
def max_value(self):
return self.__max_value
@property
def quantum(self):
return self.__quantum
def apply(self, x):
x_int = bin2dec_vector(x)
return (x_int * self.quantum) + self.interval[0]
def __str__(self):
return f"BinaryDecoder({self.dim} bit, {self.interval})"
| [
"numpy.minimum",
"numpy.array"
] | [((1443, 1477), 'numpy.array', 'np.array', (['interval'], {'dtype': 'np.float'}), '(interval, dtype=np.float)\n', (1451, 1477), True, 'import numpy as np\n'), ((3767, 3801), 'numpy.array', 'np.array', (['interval'], {'dtype': 'np.float'}), '(interval, dtype=np.float)\n', (3775, 3801), True, 'import numpy as np\n'), ((2176, 2241), 'numpy.minimum', 'np.minimum', (['self.max_value', '((x - self.interval[0]) / self.quantum)'], {}), '(self.max_value, (x - self.interval[0]) / self.quantum)\n', (2186, 2241), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import scipy.sparse as sp
import numpy as np
from time import time
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="Run GMF.")
parser.add_argument(
'--path', nargs='?', default='Data/', help='Input data path.')
parser.add_argument(
'--dataset', nargs='?', default='ml-1m', help='Choose a dataset.')
parser.add_argument(
'--num_neg',
type=int,
default=4,
help='Number of negative instances to pair with a positive instance.')
parser.add_argument(
'--train_data_path',
type=str,
default="Data/train_data.csv",
help='train_data_path')
return parser.parse_args()
def get_train_data(filename, write_file, num_negatives):
'''
Read .rating file and Return dok matrix.
The first line of .rating file is: num_users\t num_items
'''
# Get number of users and items
num_users, num_items = 0, 0
with open(filename, "r") as f:
line = f.readline()
while line != None and line != "":
arr = line.split("\t")
u, i = int(arr[0]), int(arr[1])
num_users = max(num_users, u)
num_items = max(num_items, i)
line = f.readline()
print("users_num:", num_users, "items_num:", num_items)
# Construct matrix
mat = sp.dok_matrix((num_users + 1, num_items + 1), dtype=np.float32)
with open(filename, "r") as f:
line = f.readline()
while line != None and line != "":
arr = line.split("\t")
user, item, rating = int(arr[0]), int(arr[1]), float(arr[2])
if (rating > 0):
mat[user, item] = 1.0
line = f.readline()
file = open(write_file, 'w')
print("writing " + write_file)
for (u, i) in mat.keys():
# positive instance
user_input = str(u)
item_input = str(i)
label = str(1)
sample = "{0},{1},{2}".format(user_input, item_input, label) + "\n"
file.write(sample)
# negative instances
for t in range(num_negatives):
j = np.random.randint(num_items)
while (u, j) in mat.keys():
j = np.random.randint(num_items)
user_input = str(u)
item_input = str(j)
label = str(0)
sample = "{0},{1},{2}".format(user_input, item_input, label) + "\n"
file.write(sample)
if __name__ == "__main__":
args = parse_args()
get_train_data(args.path + args.dataset + ".train.rating",
args.train_data_path, args.num_neg)
| [
"scipy.sparse.dok_matrix",
"numpy.random.randint",
"argparse.ArgumentParser"
] | [((727, 774), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run GMF."""'}), "(description='Run GMF.')\n", (750, 774), False, 'import argparse\n'), ((1962, 2025), 'scipy.sparse.dok_matrix', 'sp.dok_matrix', (['(num_users + 1, num_items + 1)'], {'dtype': 'np.float32'}), '((num_users + 1, num_items + 1), dtype=np.float32)\n', (1975, 2025), True, 'import scipy.sparse as sp\n'), ((2733, 2761), 'numpy.random.randint', 'np.random.randint', (['num_items'], {}), '(num_items)\n', (2750, 2761), True, 'import numpy as np\n'), ((2822, 2850), 'numpy.random.randint', 'np.random.randint', (['num_items'], {}), '(num_items)\n', (2839, 2850), True, 'import numpy as np\n')] |
from __future__ import print_function, division, absolute_import
import itertools
from copy import copy
import numpy as np
import regreg.atoms.seminorms as S
import regreg.api as rr
import nose.tools as nt
def all_close(x, y, msg, solver):
"""
Check to see if x and y are close
"""
try:
v = np.linalg.norm(x-y) <= 1.0e-03 * max([1, np.linalg.norm(x), np.linalg.norm(y)])
except:
print("""
check_failed
============
msg: %s
x: %s
y: %s
""" % (msg, x, y))
return False
v = v or np.allclose(x,y)
if not v:
print("""
summary
=======
msg: %s
comparison: %0.3f
x : %s
y : %s
""" % (msg, np.linalg.norm(x-y) / max([1, np.linalg.norm(x), np.linalg.norm(y)]), x, y))
if not hasattr(solver, 'interactive') or not solver.interactive:
nt.assert_true(v)
else:
print(msg.split('\n')[0])
@np.testing.dec.slow
def test_proximal_maps(interactive=False):
for klass in [S.l1norm, S.supnorm, S.l2norm,
S.positive_part, S.constrained_max]:
factory = SolverFactory(klass, 'lagrange')
for solver in factory:
penalty = solver.atom
dual = penalty.conjugate
Z = solver.prox_center
L = solver.L
yield all_close, penalty.lagrange_prox(Z, lipschitz=L), Z-dual.bound_prox(Z*L)/L, 'testing lagrange_prox and bound_prox starting from atom\n %s ' % klass, None
# some arguments of the constructor
nt.assert_raises(AttributeError, setattr, penalty, 'bound', 4.)
nt.assert_raises(AttributeError, setattr, dual, 'lagrange', 4.)
nt.assert_raises(AttributeError, setattr, penalty, 'bound', 4.)
nt.assert_raises(AttributeError, setattr, dual, 'lagrange', 4.)
# call these to ensure coverage at least
repr(penalty)
repr(dual)
penalty.seminorm(Z, lagrange=1)
penalty.constraint(Z, bound=1)
dual.seminorm(Z, lagrange=1)
dual.constraint(Z, bound=1)
for t in solver.all_tests():
yield t
factory = SolverFactory(klass, 'bound')
for solver in factory:
for t in solver.all_tests():
yield t
for klass in sorted(S.nonpaired_atoms):
factory = SolverFactory(klass, 'lagrange')
for solver in factory:
penalty = solver.atom
dual = penalty.conjugate
Z = solver.prox_center
L = solver.L
yield all_close, penalty.lagrange_prox(Z, lipschitz=L), Z-dual.bound_prox(Z*L)/L, 'testing lagrange_prox and bound_prox starting from atom %s\n ' % klass, None
nt.assert_raises(AttributeError, setattr, penalty, 'bound', 4.)
nt.assert_raises(AttributeError, setattr, dual, 'lagrange', 4.)
nt.assert_raises(AttributeError, setattr, penalty, 'bound', 4.)
nt.assert_raises(AttributeError, setattr, dual, 'lagrange', 4.)
for t in solver.all_tests():
yield t
class SolverFactory(object):
offset_choices = [True, False]
FISTA_choices =[True,False]
coef_stop_choices = [True,False]
lagrange = 0.13
bound = 0.14
L_choices = [0.1,0.5,1]
quadratic_choices = [True, False]
shape = (20,)
interactive = False
def __init__(self, klass, mode):
self.klass = klass
self.mode = mode
def __iter__(self):
for offset, FISTA, coef_stop, L, q in itertools.product(self.offset_choices,
self.FISTA_choices,
self.coef_stop_choices,
self.L_choices,
self.quadratic_choices):
self.FISTA = FISTA
self.coef_stop = coef_stop
self.L = L
if self.mode == 'lagrange':
atom = self.klass(self.shape, lagrange=self.lagrange)
else:
atom = self.klass(self.shape, bound=self.bound)
if q:
atom.quadratic = rr.identity_quadratic(0,0,np.random.standard_normal(atom.shape)*0.02)
if offset:
atom.offset = 0.02 * np.random.standard_normal(atom.shape)
solver = Solver(atom, interactive=self.interactive,
coef_stop=coef_stop,
FISTA=FISTA,
L=L)
# make sure certain lines of code are tested
assert(atom == atom)
atom.latexify(), atom.dual, atom.conjugate
yield solver
class Solver(object):
def __iter__(self):
factory = SolverFactory(self.atom.__class__)
for solver in factory:
yield solver
def __repr__(self):
return 'Solver(%s, L=%f, prox_center=%s)' % (repr(self.atom), self.L, repr(self.prox_center))
def __init__(self, atom, interactive=False, coef_stop=False,
FISTA=True, L=1, prox_center=None):
self.atom = atom
self.interactive = interactive
self.coef_stop = coef_stop
self.FISTA = FISTA
self.L = L
if prox_center is None:
self.prox_center = np.random.standard_normal(atom.shape)
else:
self.prox_center = prox_center
self.q = rr.identity_quadratic(L, self.prox_center, 0, 0)
self.loss = rr.quadratic.shift(self.prox_center, coef=L)
def test_duality_of_projections(self):
if self.atom.quadratic == rr.identity_quadratic(0,0,0,0) or self.atom.quadratic is None:
tests = []
d = self.atom.conjugate
q = rr.identity_quadratic(1, self.prox_center, 0, 0)
tests.append((self.prox_center-self.atom.proximal(q), d.proximal(q), 'testing duality of projections starting from atom\n %s ' % str(self)))
if hasattr(self.atom, 'check_subgradient') and self.atom.offset is None:
# check subgradient condition
v1, v2 = self.atom.check_subgradient(self.atom, self.prox_center)
tests.append((v1, v2, 'checking subgradient condition\n %s' % str(self)))
if not self.interactive:
for test in tests:
yield (all_close,) + test + (self,)
else:
for test in tests:
yield all_close(*((test + (self,))))
def test_simple_problem_nonsmooth(self):
tests = []
atom, q = self.atom, self.q
loss = self.loss
p2 = copy(atom)
p2.quadratic = atom.quadratic + q
problem = rr.simple_problem.nonsmooth(p2)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-14, FISTA=self.FISTA, coef_stop=self.coef_stop, min_its=100)
gg = rr.gengrad(problem, 2.) # this lipschitz constant is based on knowing our loss...
tests.append((atom.proximal(q), gg, 'solving prox with gengrad\n %s ' % str(self)))
tests.append((atom.proximal(q), atom.solve(q), 'solving prox with solve method\n %s ' % str(self)))
tests.append((atom.proximal(q), solver.composite.coefs, 'solving prox with simple_problem.nonsmooth with monotonicity\n %s ' % str(self)))
# use the solve method
p3 = copy(atom)
p3.quadratic = atom.quadratic + q
soln = p3.solve(tol=1.e-14, min_its=10)
tests.append((atom.proximal(q), soln, 'solving prox with solve method\n %s ' % str(self)))
p4 = copy(atom)
p4.quadratic = atom.quadratic + q
problem = rr.simple_problem.nonsmooth(p4)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-14, monotonicity_restart=False, coef_stop=self.coef_stop,
FISTA=self.FISTA,
min_its=100)
tests.append((atom.proximal(q), solver.composite.coefs, 'solving prox with simple_problem.nonsmooth with no monotonocity\n %s ' % str(self)))
if not self.interactive:
for test in tests:
yield (all_close,) + test + (self,)
else:
for test in tests:
yield all_close(*((test + (self,))))
def test_simple_problem(self):
tests = []
atom, q, prox_center, L = self.atom, self.q, self.prox_center, self.L
loss = self.loss
problem = rr.simple_problem(loss, atom)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-12, FISTA=self.FISTA, coef_stop=self.coef_stop, min_its=100)
tests.append((atom.proximal(q), solver.composite.coefs, 'solving prox with simple_problem with monotonicity\n %s' % str(self)))
# write the loss in terms of a quadratic for the smooth loss and a smooth function...
q = rr.identity_quadratic(L, prox_center, 0, 0)
lossq = rr.quadratic.shift(prox_center.copy(), coef=0.6*L)
lossq.quadratic = rr.identity_quadratic(0.4*L, prox_center.copy(), 0, 0)
problem = rr.simple_problem(lossq, atom)
tests.append((atom.proximal(q),
problem.solve(coef_stop=self.coef_stop,
FISTA=self.FISTA,
tol=1.0e-12),
'solving prox with simple_problem ' +
'with monotonicity but loss has identity_quadratic %s\n ' % str(self)))
problem = rr.simple_problem(loss, atom)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-12, monotonicity_restart=False,
coef_stop=self.coef_stop, FISTA=self.FISTA, min_its=100)
tests.append((atom.proximal(q), solver.composite.coefs, 'solving prox with simple_problem no monotonicity_restart\n %s' % str(self)))
d = atom.conjugate
problem = rr.simple_problem(loss, d)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-12, monotonicity_restart=False,
coef_stop=self.coef_stop, FISTA=self.FISTA, min_its=100)
tests.append((d.proximal(q), problem.solve(tol=1.e-12,
FISTA=self.FISTA,
coef_stop=self.coef_stop,
monotonicity_restart=False),
'solving dual prox with simple_problem no monotonocity\n %s ' % str(self)))
if not self.interactive:
for test in tests:
yield (all_close,) + test + (self,)
else:
for test in tests:
yield all_close(*((test + (self,))))
def test_dual_problem(self):
tests = []
atom, q, prox_center, L = self.atom, self.q, self.prox_center, self.L
loss = self.loss
dproblem = rr.dual_problem.fromprimal(loss, atom)
dcoef = dproblem.solve(coef_stop=self.coef_stop, tol=1.0e-14)
tests.append((atom.proximal(q), dcoef, 'solving prox with dual_problem.fromprimal with monotonicity \n %s ' % str(self)))
dproblem2 = rr.dual_problem(loss.conjugate,
rr.identity(loss.shape),
atom.conjugate)
dcoef2 = dproblem2.solve(coef_stop=self.coef_stop, tol=1.e-14)
tests.append((atom.proximal(q), dcoef2, 'solving prox with dual_problem with monotonicity %s \n' % str(self)))
if not self.interactive:
for test in tests:
yield (all_close,) + test + (self,)
else:
for test in tests:
yield all_close(*((test + (self,))))
def test_separable(self):
tests = []
atom, q, prox_center, L = self.atom, self.q, self.prox_center, self.L
loss = self.loss
problem = rr.separable_problem.singleton(atom, loss)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-12,
coef_stop=self.coef_stop, FISTA=self.FISTA, min_its=100)
tests.append((atom.proximal(q), solver.composite.coefs, 'solving atom prox with separable_atom.singleton \n%s ' % str(self)))
d = atom.conjugate
problem = rr.separable_problem.singleton(d, loss)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-12,
coef_stop=self.coef_stop, FISTA=self.FISTA, min_its=100)
tests.append((d.proximal(q), solver.composite.coefs, 'solving dual atom prox with separable_atom.singleton \n%s ' % str(self)))
if not self.interactive:
for test in tests:
yield (all_close,) + test + (self,)
else:
for test in tests:
yield all_close(*((test + (self,))))
def test_container(self):
tests = []
atom, q, prox_center, L = self.atom, self.q, self.prox_center, self.L
loss = self.loss
problem = rr.container(loss, atom)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-12,
coef_stop=self.coef_stop, FISTA=self.FISTA, min_its=100)
tests.append((atom.proximal(q), solver.composite.coefs, 'solving atom prox with container\n %s ' % str(self)))
# write the loss in terms of a quadratic for the smooth loss and a smooth function...
q = rr.identity_quadratic(L, prox_center, 0, 0)
lossq = rr.quadratic.shift(prox_center.copy(), coef=0.6*L)
lossq.quadratic = rr.identity_quadratic(0.4*L, prox_center.copy(), 0, 0)
problem = rr.container(lossq, atom)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-12, FISTA=self.FISTA, coef_stop=self.coef_stop)
tests.append((atom.proximal(q),
problem.solve(tol=1.e-12,FISTA=self.FISTA,coef_stop=self.coef_stop),
'solving prox with container with monotonicity ' +
'but loss has identity_quadratic\n %s ' % str(self)))
d = atom.conjugate
problem = rr.container(d, loss)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-12,
coef_stop=self.coef_stop, FISTA=self.FISTA, min_its=100)
tests.append((d.proximal(q), solver.composite.coefs, 'solving dual prox with container\n %s ' % str(self)))
if not self.interactive:
for test in tests:
yield (all_close,) + test + (self,)
else:
for test in tests:
yield all_close(*((test + (self,))))
def all_tests(self):
for group in [self.test_duality_of_projections,
self.test_simple_problem,
self.test_separable,
self.test_dual_problem,
self.test_container,
self.test_simple_problem_nonsmooth
]:
for t in group():
yield t
| [
"regreg.api.identity_quadratic",
"regreg.api.simple_problem.nonsmooth",
"regreg.api.gengrad",
"nose.tools.assert_true",
"numpy.allclose",
"regreg.api.simple_problem",
"regreg.api.quadratic.shift",
"copy.copy",
"regreg.api.FISTA",
"regreg.api.dual_problem.fromprimal",
"regreg.api.separable_proble... | [((528, 545), 'numpy.allclose', 'np.allclose', (['x', 'y'], {}), '(x, y)\n', (539, 545), True, 'import numpy as np\n'), ((799, 816), 'nose.tools.assert_true', 'nt.assert_true', (['v'], {}), '(v)\n', (813, 816), True, 'import nose.tools as nt\n'), ((3519, 3646), 'itertools.product', 'itertools.product', (['self.offset_choices', 'self.FISTA_choices', 'self.coef_stop_choices', 'self.L_choices', 'self.quadratic_choices'], {}), '(self.offset_choices, self.FISTA_choices, self.\n coef_stop_choices, self.L_choices, self.quadratic_choices)\n', (3536, 3646), False, 'import itertools\n'), ((5494, 5542), 'regreg.api.identity_quadratic', 'rr.identity_quadratic', (['L', 'self.prox_center', '(0)', '(0)'], {}), '(L, self.prox_center, 0, 0)\n', (5515, 5542), True, 'import regreg.api as rr\n'), ((5563, 5607), 'regreg.api.quadratic.shift', 'rr.quadratic.shift', (['self.prox_center'], {'coef': 'L'}), '(self.prox_center, coef=L)\n', (5581, 5607), True, 'import regreg.api as rr\n'), ((6711, 6721), 'copy.copy', 'copy', (['atom'], {}), '(atom)\n', (6715, 6721), False, 'from copy import copy\n'), ((6782, 6813), 'regreg.api.simple_problem.nonsmooth', 'rr.simple_problem.nonsmooth', (['p2'], {}), '(p2)\n', (6809, 6813), True, 'import regreg.api as rr\n'), ((6831, 6848), 'regreg.api.FISTA', 'rr.FISTA', (['problem'], {}), '(problem)\n', (6839, 6848), True, 'import regreg.api as rr\n'), ((6952, 6976), 'regreg.api.gengrad', 'rr.gengrad', (['problem', '(2.0)'], {}), '(problem, 2.0)\n', (6962, 6976), True, 'import regreg.api as rr\n'), ((7429, 7439), 'copy.copy', 'copy', (['atom'], {}), '(atom)\n', (7433, 7439), False, 'from copy import copy\n'), ((7643, 7653), 'copy.copy', 'copy', (['atom'], {}), '(atom)\n', (7647, 7653), False, 'from copy import copy\n'), ((7714, 7745), 'regreg.api.simple_problem.nonsmooth', 'rr.simple_problem.nonsmooth', (['p4'], {}), '(p4)\n', (7741, 7745), True, 'import regreg.api as rr\n'), ((7763, 7780), 'regreg.api.FISTA', 'rr.FISTA', (['problem'], {}), '(problem)\n', (7771, 7780), True, 'import regreg.api as rr\n'), ((8479, 8508), 'regreg.api.simple_problem', 'rr.simple_problem', (['loss', 'atom'], {}), '(loss, atom)\n', (8496, 8508), True, 'import regreg.api as rr\n'), ((8526, 8543), 'regreg.api.FISTA', 'rr.FISTA', (['problem'], {}), '(problem)\n', (8534, 8543), True, 'import regreg.api as rr\n'), ((8878, 8921), 'regreg.api.identity_quadratic', 'rr.identity_quadratic', (['L', 'prox_center', '(0)', '(0)'], {}), '(L, prox_center, 0, 0)\n', (8899, 8921), True, 'import regreg.api as rr\n'), ((9088, 9118), 'regreg.api.simple_problem', 'rr.simple_problem', (['lossq', 'atom'], {}), '(lossq, atom)\n', (9105, 9118), True, 'import regreg.api as rr\n'), ((9466, 9495), 'regreg.api.simple_problem', 'rr.simple_problem', (['loss', 'atom'], {}), '(loss, atom)\n', (9483, 9495), True, 'import regreg.api as rr\n'), ((9513, 9530), 'regreg.api.FISTA', 'rr.FISTA', (['problem'], {}), '(problem)\n', (9521, 9530), True, 'import regreg.api as rr\n'), ((9856, 9882), 'regreg.api.simple_problem', 'rr.simple_problem', (['loss', 'd'], {}), '(loss, d)\n', (9873, 9882), True, 'import regreg.api as rr\n'), ((9900, 9917), 'regreg.api.FISTA', 'rr.FISTA', (['problem'], {}), '(problem)\n', (9908, 9917), True, 'import regreg.api as rr\n'), ((10818, 10856), 'regreg.api.dual_problem.fromprimal', 'rr.dual_problem.fromprimal', (['loss', 'atom'], {}), '(loss, atom)\n', (10844, 10856), True, 'import regreg.api as rr\n'), ((11801, 11843), 'regreg.api.separable_problem.singleton', 'rr.separable_problem.singleton', (['atom', 'loss'], {}), '(atom, loss)\n', (11831, 11843), True, 'import regreg.api as rr\n'), ((11861, 11878), 'regreg.api.FISTA', 'rr.FISTA', (['problem'], {}), '(problem)\n', (11869, 11878), True, 'import regreg.api as rr\n'), ((12170, 12209), 'regreg.api.separable_problem.singleton', 'rr.separable_problem.singleton', (['d', 'loss'], {}), '(d, loss)\n', (12200, 12209), True, 'import regreg.api as rr\n'), ((12227, 12244), 'regreg.api.FISTA', 'rr.FISTA', (['problem'], {}), '(problem)\n', (12235, 12244), True, 'import regreg.api as rr\n'), ((12878, 12902), 'regreg.api.container', 'rr.container', (['loss', 'atom'], {}), '(loss, atom)\n', (12890, 12902), True, 'import regreg.api as rr\n'), ((12920, 12937), 'regreg.api.FISTA', 'rr.FISTA', (['problem'], {}), '(problem)\n', (12928, 12937), True, 'import regreg.api as rr\n'), ((13275, 13318), 'regreg.api.identity_quadratic', 'rr.identity_quadratic', (['L', 'prox_center', '(0)', '(0)'], {}), '(L, prox_center, 0, 0)\n', (13296, 13318), True, 'import regreg.api as rr\n'), ((13485, 13510), 'regreg.api.container', 'rr.container', (['lossq', 'atom'], {}), '(lossq, atom)\n', (13497, 13510), True, 'import regreg.api as rr\n'), ((13528, 13545), 'regreg.api.FISTA', 'rr.FISTA', (['problem'], {}), '(problem)\n', (13536, 13545), True, 'import regreg.api as rr\n'), ((13952, 13973), 'regreg.api.container', 'rr.container', (['d', 'loss'], {}), '(d, loss)\n', (13964, 13973), True, 'import regreg.api as rr\n'), ((13991, 14008), 'regreg.api.FISTA', 'rr.FISTA', (['problem'], {}), '(problem)\n', (13999, 14008), True, 'import regreg.api as rr\n'), ((319, 340), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - y)'], {}), '(x - y)\n', (333, 340), True, 'import numpy as np\n'), ((1479, 1543), 'nose.tools.assert_raises', 'nt.assert_raises', (['AttributeError', 'setattr', 'penalty', '"""bound"""', '(4.0)'], {}), "(AttributeError, setattr, penalty, 'bound', 4.0)\n", (1495, 1543), True, 'import nose.tools as nt\n'), ((1555, 1619), 'nose.tools.assert_raises', 'nt.assert_raises', (['AttributeError', 'setattr', 'dual', '"""lagrange"""', '(4.0)'], {}), "(AttributeError, setattr, dual, 'lagrange', 4.0)\n", (1571, 1619), True, 'import nose.tools as nt\n'), ((1640, 1704), 'nose.tools.assert_raises', 'nt.assert_raises', (['AttributeError', 'setattr', 'penalty', '"""bound"""', '(4.0)'], {}), "(AttributeError, setattr, penalty, 'bound', 4.0)\n", (1656, 1704), True, 'import nose.tools as nt\n'), ((1716, 1780), 'nose.tools.assert_raises', 'nt.assert_raises', (['AttributeError', 'setattr', 'dual', '"""lagrange"""', '(4.0)'], {}), "(AttributeError, setattr, dual, 'lagrange', 4.0)\n", (1732, 1780), True, 'import nose.tools as nt\n'), ((2711, 2775), 'nose.tools.assert_raises', 'nt.assert_raises', (['AttributeError', 'setattr', 'penalty', '"""bound"""', '(4.0)'], {}), "(AttributeError, setattr, penalty, 'bound', 4.0)\n", (2727, 2775), True, 'import nose.tools as nt\n'), ((2787, 2851), 'nose.tools.assert_raises', 'nt.assert_raises', (['AttributeError', 'setattr', 'dual', '"""lagrange"""', '(4.0)'], {}), "(AttributeError, setattr, dual, 'lagrange', 4.0)\n", (2803, 2851), True, 'import nose.tools as nt\n'), ((2872, 2936), 'nose.tools.assert_raises', 'nt.assert_raises', (['AttributeError', 'setattr', 'penalty', '"""bound"""', '(4.0)'], {}), "(AttributeError, setattr, penalty, 'bound', 4.0)\n", (2888, 2936), True, 'import nose.tools as nt\n'), ((2948, 3012), 'nose.tools.assert_raises', 'nt.assert_raises', (['AttributeError', 'setattr', 'dual', '"""lagrange"""', '(4.0)'], {}), "(AttributeError, setattr, dual, 'lagrange', 4.0)\n", (2964, 3012), True, 'import nose.tools as nt\n'), ((5381, 5418), 'numpy.random.standard_normal', 'np.random.standard_normal', (['atom.shape'], {}), '(atom.shape)\n', (5406, 5418), True, 'import numpy as np\n'), ((5826, 5874), 'regreg.api.identity_quadratic', 'rr.identity_quadratic', (['(1)', 'self.prox_center', '(0)', '(0)'], {}), '(1, self.prox_center, 0, 0)\n', (5847, 5874), True, 'import regreg.api as rr\n'), ((11147, 11170), 'regreg.api.identity', 'rr.identity', (['loss.shape'], {}), '(loss.shape)\n', (11158, 11170), True, 'import regreg.api as rr\n'), ((5686, 5719), 'regreg.api.identity_quadratic', 'rr.identity_quadratic', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (5707, 5719), True, 'import regreg.api as rr\n'), ((4369, 4406), 'numpy.random.standard_normal', 'np.random.standard_normal', (['atom.shape'], {}), '(atom.shape)\n', (4394, 4406), True, 'import numpy as np\n'), ((360, 377), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (374, 377), True, 'import numpy as np\n'), ((379, 396), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (393, 396), True, 'import numpy as np\n'), ((645, 666), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - y)'], {}), '(x - y)\n', (659, 666), True, 'import numpy as np\n'), ((4264, 4301), 'numpy.random.standard_normal', 'np.random.standard_normal', (['atom.shape'], {}), '(atom.shape)\n', (4289, 4301), True, 'import numpy as np\n'), ((675, 692), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (689, 692), True, 'import numpy as np\n'), ((694, 711), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (708, 711), True, 'import numpy as np\n')] |
"""
MIT License
Copyright (c) 2020
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Union, List
from pathlib import Path
import itertools
import logging
import numpy as np
from .metrics import LowRankMetrics
from .components import HyperParameters
logger = logging.getLogger()
def compute_rank(metrics: LowRankMetrics) -> float:
per_S_zero = np.mean([np.mean(np.isclose(
metric.input_channel_S + metric.output_channel_S,
0).astype(np.float16)) for
metric in metrics.historical_metrics])
return per_S_zero
def optimize(epoch_trainer: callable,
hyper_parameters: HyperParameters,
min_delta: float = 5e-3,
scale_delta: float = 5e-3,
epochs: Union[range, List[int]] = range(0, 5),
power: float = 0.8,
output_path: str = 'autohyper-output'):
"""
@arguments:
epoch_trainer: callable
required arguments (
hyper_parameters: Dict[str, float],
epochs: iter(int),
)
must also return LowRankMetrics, computed using metrics.py:LowRankMetrics
hyper_parameters: HyperParameters
starting hyper-parameter config object. See components.py
for default
min_delta: float
delta between successive ranks that defines plateauing
scale_delta:
delta absolute tolerance
epochs: iter[int]
integer iterable for number of epochs per trial. (fixed)
to range(5)
power: float
regularization power for cummulative product
output_path: str
string path of output directory for logging and results
"""
cur_rank = -1
tr_count, trial_count = -1, -1
establish_start = True
auto_lr_path = Path(output_path)
auto_lr_path.mkdir(exist_ok=True)
# date = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
# for param in hyper_parameters.config:
# delta[param] = .1
# if param == 'init_lr':
# training_agent.config['init_lr'] = \
# hyper_parameters.config[param].current
# elif param == 'weight_decay':
# training_agent.config['optimizer_kwargs']['weight_decay'] = \
# hyper_parameters.config[param].current
rank_history = list()
num_hp = len(hyper_parameters)
# defines the trust region
# we will [scale-down, not-scale, scale-up]
scale_powers = [-1, 0, 1]
trust_region = list(itertools.product(
*[scale_powers for i in range(num_hp)]))
trust_buffer = np.full([len(scale_powers) for _ in range(num_hp)],
fill_value=-1., dtype=float)
def reset():
pass
cur_train_params = {p: v.current for p, v in hyper_parameters.items()}
while True:
tr_count += 1
logger.info(f'autoHyper: Trust Region #{tr_count}')
for scale_power in trust_region:
index = tuple(np.array(scale_power) + 1)
if np.less(trust_buffer[index], 0.):
for i, param in enumerate(hyper_parameters):
if scale_power[i] == 1 and np.equal(
hyper_parameters[param].current, 0.):
current = 1e-7
else:
current = hyper_parameters[param].current * \
(hyper_parameters[param].scale **
scale_power[i])
cur_train_params[param] = current
trial_count += 1
logger.info(f'autoHyper: Trial #{trial_count}')
logger.info('autoHyper: HP Config:')
for k, v in cur_train_params.items():
logger.info(' ' * 4 + f'{k}: {v}')
metrics = epoch_trainer(hyper_parameters=cur_train_params,
epochs=epochs,)
cur_rank = compute_rank(metrics)
logger.info(f'autoHyper: Trial Rank #{cur_rank}')
trust_buffer[index] = cur_rank
# TODO only works for 2D cse
# Will handle duplicates and take the last index
if establish_start and (trust_buffer < .85).all() and all(
np.greater_equal(hp.current, hp.minimum)
for k, hp in hyper_parameters.items()):
index = tuple(np.argwhere(
trust_buffer == np.max(trust_buffer))[0])
if index == (1, 1):
index = (0, 0)
else:
establish_start = False
index = tuple(np.argwhere(
trust_buffer == np.min(trust_buffer))[-1])
rank_history.append(np.min(trust_buffer))
if index == (1, 1):
index = (2, 2)
for axis, i in enumerate(index):
mid = int(trust_buffer.shape[axis] / 2)
trust_buffer = np.roll(trust_buffer, (i - mid) * -1, axis=axis)
# TODO THIS ONLY WORKS FOR 2D MATRIX
if index[0] == 0 or index[0] == trust_buffer.shape[0] - 1:
trust_buffer[index[0], :] = -1.
if index[1] == 0 or index[1] == trust_buffer.shape[1] - 1:
trust_buffer[:, index[1]] = -1.
scale_power = list(np.array(index) - 1)
for i, param in enumerate(hyper_parameters):
if scale_power[i] == 1 and np.equal(
hyper_parameters[param].current, 0.):
current = 1e-7
else:
current = hyper_parameters[param].current * (
hyper_parameters[param].scale ** scale_power[i])
hyper_parameters[param].current = current
if establish_start:
continue
zeta = np.cumprod(rank_history) ** power
if np.less(zeta[-1], min_delta):
for param in hyper_parameters:
if np.isclose(hyper_parameters[param].scale, 1.,
atol=scale_delta):
hyper_parameters[param].stop = True
hyper_parameters[param].scale = \
(hyper_parameters[param].scale - 1.) * \
np.exp(-2.5) + 1
if all([param.stop for param in
hyper_parameters.values()]):
break
logger.info(f'autoHyper Done. Final config: {hyper_parameters.final()}')
return hyper_parameters
| [
"numpy.cumprod",
"numpy.roll",
"numpy.equal",
"pathlib.Path",
"numpy.min",
"numpy.array",
"numpy.isclose",
"numpy.exp",
"numpy.max",
"numpy.less",
"numpy.greater_equal",
"logging.getLogger"
] | [((1261, 1280), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1278, 1280), False, 'import logging\n'), ((2824, 2841), 'pathlib.Path', 'Path', (['output_path'], {}), '(output_path)\n', (2828, 2841), False, 'from pathlib import Path\n'), ((6776, 6804), 'numpy.less', 'np.less', (['zeta[-1]', 'min_delta'], {}), '(zeta[-1], min_delta)\n', (6783, 6804), True, 'import numpy as np\n'), ((4028, 4061), 'numpy.less', 'np.less', (['trust_buffer[index]', '(0.0)'], {}), '(trust_buffer[index], 0.0)\n', (4035, 4061), True, 'import numpy as np\n'), ((5909, 5957), 'numpy.roll', 'np.roll', (['trust_buffer', '((i - mid) * -1)'], {'axis': 'axis'}), '(trust_buffer, (i - mid) * -1, axis=axis)\n', (5916, 5957), True, 'import numpy as np\n'), ((6731, 6755), 'numpy.cumprod', 'np.cumprod', (['rank_history'], {}), '(rank_history)\n', (6741, 6755), True, 'import numpy as np\n'), ((5704, 5724), 'numpy.min', 'np.min', (['trust_buffer'], {}), '(trust_buffer)\n', (5710, 5724), True, 'import numpy as np\n'), ((6252, 6267), 'numpy.array', 'np.array', (['index'], {}), '(index)\n', (6260, 6267), True, 'import numpy as np\n'), ((6365, 6411), 'numpy.equal', 'np.equal', (['hyper_parameters[param].current', '(0.0)'], {}), '(hyper_parameters[param].current, 0.0)\n', (6373, 6411), True, 'import numpy as np\n'), ((6868, 6932), 'numpy.isclose', 'np.isclose', (['hyper_parameters[param].scale', '(1.0)'], {'atol': 'scale_delta'}), '(hyper_parameters[param].scale, 1.0, atol=scale_delta)\n', (6878, 6932), True, 'import numpy as np\n'), ((3986, 4007), 'numpy.array', 'np.array', (['scale_power'], {}), '(scale_power)\n', (3994, 4007), True, 'import numpy as np\n'), ((5267, 5307), 'numpy.greater_equal', 'np.greater_equal', (['hp.current', 'hp.minimum'], {}), '(hp.current, hp.minimum)\n', (5283, 5307), True, 'import numpy as np\n'), ((1369, 1432), 'numpy.isclose', 'np.isclose', (['(metric.input_channel_S + metric.output_channel_S)', '(0)'], {}), '(metric.input_channel_S + metric.output_channel_S, 0)\n', (1379, 1432), True, 'import numpy as np\n'), ((4170, 4216), 'numpy.equal', 'np.equal', (['hyper_parameters[param].current', '(0.0)'], {}), '(hyper_parameters[param].current, 0.0)\n', (4178, 4216), True, 'import numpy as np\n'), ((7150, 7162), 'numpy.exp', 'np.exp', (['(-2.5)'], {}), '(-2.5)\n', (7156, 7162), True, 'import numpy as np\n'), ((5435, 5455), 'numpy.max', 'np.max', (['trust_buffer'], {}), '(trust_buffer)\n', (5441, 5455), True, 'import numpy as np\n'), ((5645, 5665), 'numpy.min', 'np.min', (['trust_buffer'], {}), '(trust_buffer)\n', (5651, 5665), True, 'import numpy as np\n')] |
import os
import sys
import random
import time
import gym_mdptetris.envs
import numpy as np
from gym_mdptetris.envs import board, piece
from torch.utils.tensorboard import SummaryWriter
class RandomLinearGame():
def __init__(self, board_height=20, board_width=10, piece_set='pieces4.dat', seed=12345):
"""
Class to implement a linear game with random strategy using the structures
and methods from gym-mdptetris.
"""
self.board_height = board_height
self.board_width = board_width
path = os.path.dirname(gym_mdptetris.envs.__file__)
pieces_path = path + '/data/' + piece_set
self.pieces, self.nb_pieces = piece.load_pieces(pieces_path)
self.max_piece_height = 0
for p in self.pieces:
for o in p.orientations:
self.max_piece_height = max(self.max_piece_height, o.height)
random.seed(seed)
self.new_piece()
self.board = board.Board(max_piece_height=self.max_piece_height,
width=board_width, height=board_height)
def new_piece(self):
"""
Method to select the next piece.
"""
self.current_piece = random.choice(range(self.nb_pieces))
def seed(self, seed_value: int):
"""
Seed randomness for game.
:param seed_value: New seed value for game
"""
random.seed(seed_value)
def reset(self):
"""
Reset the game and return the new board state.
return: Current board state
"""
self.board.reset()
self.new_piece()
self.lines_cleared = 0
def board_step(self):
"""
Make one random action.
:return: Returns the lines cleared by the action.
"""
a = [random.randint(
0, self.pieces[self.current_piece].nb_orientations - 1), 0]
a[1] = random.randint(
0, self.board_width - self.pieces[self.current_piece].orientations[a[0]].width - 1)
return self.board.drop_piece(self.pieces[self.current_piece].orientations[a[0]], a[1])
def play_game(self, render=False):
"""
Method to play an episode of a random strategy game.
"""
cleared = 0
timesteps = 0
while self.board.wall_height < self.board_height:
timesteps += 1
cleared += self.board_step()
self.new_piece()
if render:
print(self.board)
return cleared, timesteps
def test_performance(seed: int=12345, nb_games: int=100, log_dir: str='runs', save_dir: str='./'):
"""
Method to test performance of Dellacherie method.
:param seed: Seed for the environment
:param nb_games: number of episodes to run test for
:param log_dir: Directory to log TensorBoard results to
:param save_dir: Directory to save episode reward results to
"""
runid = time.strftime('%Y%m%dT%H%M%SZ')
writer = SummaryWriter(log_dir, comment=f"Random-{runid}")
lg = RandomLinearGame()
episode_rewards = []
episode_duration = []
for i in range(nb_games):
reward, timesteps = lg.play_game()
print(f"Episode reward: {reward}, episode duration: {timesteps}")
episode_rewards.append(reward)
episode_duration.append(timesteps)
lg.reset()
writer.add_scalar(f"Random-{runid}/Episode reward", reward, i)
writer.add_scalar(f"Random-{runid}/Episode duration", timesteps, i)
np.array(episode_rewards).tofile(f"{save_dir}/Random-rewards-{runid}.csv", sep=',')
np.array(episode_duration).tofile(f"{save_dir}/Random-timesteps-{runid}.csv", sep=',')
print(f"Average rewards: {np.mean(np.array(episode_rewards))}")
print(f"Average duration: {np.mean(np.array(episode_duration))}")
if __name__ == "__main__":
if len(sys.argv) > 1:
if sys.argv[1] == "test":
nb_games = 10000
if len(sys.argv) > 2:
nb_games = int(sys.argv[2])
test_performance(nb_games=nb_games, save_dir="./runs")
sys.exit(0)
lg = RandomLinearGame()
start = time.time()
lg.play_game()
end = time.time()
print(f"That took {end - start}s")
| [
"random.randint",
"gym_mdptetris.envs.piece.load_pieces",
"os.path.dirname",
"time.strftime",
"time.time",
"random.seed",
"numpy.array",
"torch.utils.tensorboard.SummaryWriter",
"gym_mdptetris.envs.board.Board",
"sys.exit"
] | [((2942, 2973), 'time.strftime', 'time.strftime', (['"""%Y%m%dT%H%M%SZ"""'], {}), "('%Y%m%dT%H%M%SZ')\n", (2955, 2973), False, 'import time\n'), ((2987, 3036), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['log_dir'], {'comment': 'f"""Random-{runid}"""'}), "(log_dir, comment=f'Random-{runid}')\n", (3000, 3036), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((4156, 4167), 'time.time', 'time.time', ([], {}), '()\n', (4165, 4167), False, 'import time\n'), ((4197, 4208), 'time.time', 'time.time', ([], {}), '()\n', (4206, 4208), False, 'import time\n'), ((553, 597), 'os.path.dirname', 'os.path.dirname', (['gym_mdptetris.envs.__file__'], {}), '(gym_mdptetris.envs.__file__)\n', (568, 597), False, 'import os\n'), ((686, 716), 'gym_mdptetris.envs.piece.load_pieces', 'piece.load_pieces', (['pieces_path'], {}), '(pieces_path)\n', (703, 716), False, 'from gym_mdptetris.envs import board, piece\n'), ((903, 920), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (914, 920), False, 'import random\n'), ((967, 1062), 'gym_mdptetris.envs.board.Board', 'board.Board', ([], {'max_piece_height': 'self.max_piece_height', 'width': 'board_width', 'height': 'board_height'}), '(max_piece_height=self.max_piece_height, width=board_width,\n height=board_height)\n', (978, 1062), False, 'from gym_mdptetris.envs import board, piece\n'), ((1407, 1430), 'random.seed', 'random.seed', (['seed_value'], {}), '(seed_value)\n', (1418, 1430), False, 'import random\n'), ((1912, 2015), 'random.randint', 'random.randint', (['(0)', '(self.board_width - self.pieces[self.current_piece].orientations[a[0]].\n width - 1)'], {}), '(0, self.board_width - self.pieces[self.current_piece].\n orientations[a[0]].width - 1)\n', (1926, 2015), False, 'import random\n'), ((1809, 1879), 'random.randint', 'random.randint', (['(0)', '(self.pieces[self.current_piece].nb_orientations - 1)'], {}), '(0, self.pieces[self.current_piece].nb_orientations - 1)\n', (1823, 1879), False, 'import random\n'), ((3516, 3541), 'numpy.array', 'np.array', (['episode_rewards'], {}), '(episode_rewards)\n', (3524, 3541), True, 'import numpy as np\n'), ((3604, 3630), 'numpy.array', 'np.array', (['episode_duration'], {}), '(episode_duration)\n', (3612, 3630), True, 'import numpy as np\n'), ((4104, 4115), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4112, 4115), False, 'import sys\n'), ((3729, 3754), 'numpy.array', 'np.array', (['episode_rewards'], {}), '(episode_rewards)\n', (3737, 3754), True, 'import numpy as np\n'), ((3798, 3824), 'numpy.array', 'np.array', (['episode_duration'], {}), '(episode_duration)\n', (3806, 3824), True, 'import numpy as np\n')] |
"""
Logarithm base 10.
"""
import numpy
from ..baseclass import Dist
class Log10(Dist):
"""Logarithm base 10."""
def __init__(self, dist):
"""
Constructor.
Args:
dist (Dist) : distribution (>=0).
"""
assert isinstance(dist, Dist)
assert numpy.all(dist.range()>=0)
Dist.__init__(self, dist=dist, _length=len(dist),
_advance=True)
def _str(self, dist):
"""String representation."""
return "Log10(%s)" % dist
def _val(self, graph):
"""Value extraction."""
if "dist" in graph.keys:
return numpy.log10(graph.keys["dist"])
return self
def _pdf(self, xloc, graph):
"""Probability density function."""
return graph(10**xloc, graph.dists["dist"])*numpy.log(10)*10**xloc
def _cdf(self, xloc, graph):
"""Cumulative distribution function."""
return graph(10**xloc, graph.dists["dist"])
def _ppf(self, q, graph):
"""Point percentile function."""
return numpy.log10(graph(q, graph.dists["dist"]))
def _bnd(self, xloc, graph):
"""Distribution bounds."""
lower,upper = graph(10**xloc, graph.dists["dist"])
return numpy.log10(lower), numpy.log10(upper)
def log10(dist):
"""
Logarithm base 10.
Args:
dist (Dist) : distribution (>=0).
"""
return Log10(dist)
| [
"numpy.log10",
"numpy.log"
] | [((636, 667), 'numpy.log10', 'numpy.log10', (["graph.keys['dist']"], {}), "(graph.keys['dist'])\n", (647, 667), False, 'import numpy\n'), ((1248, 1266), 'numpy.log10', 'numpy.log10', (['lower'], {}), '(lower)\n', (1259, 1266), False, 'import numpy\n'), ((1268, 1286), 'numpy.log10', 'numpy.log10', (['upper'], {}), '(upper)\n', (1279, 1286), False, 'import numpy\n'), ((818, 831), 'numpy.log', 'numpy.log', (['(10)'], {}), '(10)\n', (827, 831), False, 'import numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ALEXA Wide Gamut RGB Colourspace
================================
Defines the *ALEXA Wide Gamut RGB* colourspace:
- :attr:`ALEXA_WIDE_GAMUT_RGB_COLOURSPACE`.
See Also
--------
`RGB Colourspaces IPython Notebook
<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/rgb.ipynb>`_ # noqa
References
----------
.. [1] http://www.arri.com/?eID=registration&file_uid=8026
(Last accessed 13 April 2014)
"""
from __future__ import division, unicode_literals
import math
import numpy as np
from colour.colorimetry import ILLUMINANTS
from colour.models import RGB_Colourspace
from colour.utilities import CaseInsensitiveMapping
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['ALEXA_LOG_C_CURVE_BCL_DATA',
'ALEXA_LOG_C_CURVE_CONVERSION_DATA',
'ALEXA_WIDE_GAMUT_RGB_PRIMARIES',
'ALEXA_WIDE_GAMUT_RGB_WHITEPOINT',
'ALEXA_WIDE_GAMUT_RGB_TO_XYZ_MATRIX',
'XYZ_TO_ALEXA_WIDE_GAMUT_RGB_MATRIX',
'ALEXA_WIDE_GAMUT_RGB_TRANSFER_FUNCTION',
'ALEXA_WIDE_GAMUT_RGB_INVERSE_TRANSFER_FUNCTION',
'ALEXA_WIDE_GAMUT_RGB_COLOURSPACE']
ALEXA_LOG_C_CURVE_BCL_DATA = CaseInsensitiveMapping(
{'SUP 3.x': {
160: (0.0928, 0.8128),
200: (0.0928, 0.8341),
250: (0.0928, 0.8549),
320: (0.0928, 0.8773),
400: (0.0928, 0.8968),
500: (0.0928, 0.9158),
640: (0.0928, 0.9362),
800: (0.0928, 0.9539),
1000: (0.0928, 0.9711),
1280: (0.0928, 0.9895),
1600: (0.0928, 1.0000),
2000: (0.0928, 1.0000),
2560: (0.0928, 1.0000),
3200: (0.0928, 1.0000)},
'SUP 2.x': {
160: (0.1083, 0.8110),
200: (0.1115, 0.8320),
250: (0.1146, 0.8524),
320: (0.1181, 0.8743),
400: (0.1213, 0.8935),
500: (0.1245, 0.9121),
640: (0.1280, 0.9320),
800: (0.1311, 0.9494),
1000: (0.1343, 0.9662),
1280: (0.1378, 0.9841),
1600: (0.1409, 0.9997)}})
"""
*ALEXA Log C* curve *Ei, Black, Clipping Level* data.
ALEXA_LOG_C_CURVE_BCL_DATA : dict
('SUP 3.x', 'SUP 2.x')
"""
# @formatter:off
ALEXA_LOG_C_CURVE_CONVERSION_DATA = CaseInsensitiveMapping(
{'SUP 3.x': CaseInsensitiveMapping(
{'Normalised Sensor Signal': {
160: (0.004680, 40.0, -0.076072, 0.269036, 0.381991, 42.062665, -0.071569, 0.125266), # noqa
200: (0.004597, 50.0, -0.118740, 0.266007, 0.382478, 51.986387, -0.110339, 0.128643), # noqa
250: (0.004518, 62.5, -0.171260, 0.262978, 0.382966, 64.243053, -0.158224, 0.132021), # noqa
320: (0.004436, 80.0, -0.243808, 0.259627, 0.383508, 81.183335, -0.224409, 0.135761), # noqa
400: (0.004369, 100.0, -0.325820, 0.256598, 0.383999, 100.295280, -0.299079, 0.139142), # noqa
500: (0.004309, 125.0, -0.427461, 0.253569, 0.384493, 123.889239, -0.391261, 0.142526), # noqa
640: (0.004249, 160.0, -0.568709, 0.250219, 0.385040, 156.482680, -0.518605, 0.146271), # noqa
800: (0.004201, 200.0, -0.729169, 0.247190, 0.385537, 193.235573, -0.662201, 0.149658), # noqa
1000: (0.004160, 250.0, -0.928805, 0.244161, 0.386036, 238.584745, -0.839385, 0.153047), # noqa
1280: (0.004120, 320.0, -1.207168, 0.240810, 0.386590, 301.197380, -1.084020, 0.156799), # noqa
1600: (0.004088, 400.0, -1.524256, 0.237781, 0.387093, 371.761171, -1.359723, 0.160192)}, # noqa
'Linear Scene Exposure Factor': {
160: (0.005561, 5.555556, 0.080216, 0.269036, 0.381991, 5.842037, 0.092778, 0.125266), # noqa
200: (0.006208, 5.555556, 0.076621, 0.266007, 0.382478, 5.776265, 0.092782, 0.128643), # noqa
250: (0.006871, 5.555556, 0.072941, 0.262978, 0.382966, 5.710494, 0.092786, 0.132021), # noqa
320: (0.007622, 5.555556, 0.068768, 0.259627, 0.383508, 5.637732, 0.092791, 0.135761), # noqa
400: (0.008318, 5.555556, 0.064901, 0.256598, 0.383999, 5.571960, 0.092795, 0.139142), # noqa
500: (0.009031, 5.555556, 0.060939, 0.253569, 0.384493, 5.506188, 0.092800, 0.142526), # noqa
640: (0.009840, 5.555556, 0.056443, 0.250219, 0.385040, 5.433426, 0.092805, 0.146271), # noqa
800: (0.010591, 5.555556, 0.052272, 0.247190, 0.385537, 5.367655, 0.092809, 0.149658), # noqa
1000: (0.011361, 5.555556, 0.047996, 0.244161, 0.386036, 5.301883, 0.092814, 0.153047), # noqa
1280: (0.012235, 5.555556, 0.043137, 0.240810, 0.386590, 5.229121, 0.092819, 0.156799), # noqa
1600: (0.013047, 5.555556, 0.038625, 0.237781, 0.387093, 5.163350, 0.092824, 0.16019)}}), # noqa
'SUP 2.x': CaseInsensitiveMapping(
{'Normalised Sensor Signal': {
160: (0.003907, 36.439829, -0.053366, 0.269035, 0.391007, 45.593473, -0.069772, 0.10836), # noqa
200: (0.003907, 45.549786, -0.088959, 0.266007, 0.391007, 55.709581, -0.106114, 0.11154), # noqa
250: (0.003907, 56.937232, -0.133449, 0.262978, 0.391007, 67.887153, -0.150510, 0.11472), # noqa
320: (0.003907, 72.879657, -0.195737, 0.259627, 0.391007, 84.167616, -0.210597, 0.11824), # noqa
400: (0.003907, 91.099572, -0.266922, 0.256598, 0.391007, 101.811426, -0.276349, 0.12142), # noqa
500: (0.003907, 113.874465, -0.355903, 0.253569, 0.391007, 122.608379, -0.354421, 0.12461), # noqa
640: (0.003907, 145.759315, -0.480477, 0.250218, 0.391007, 149.703304, -0.456760, 0.12813), # noqa
800: (0.003907, 182.199144, -0.622848, 0.247189, 0.391007, 178.216873, -0.564981, 0.13131), # noqa
1000: (0.003907, 227.748930, -0.800811, 0.244161, 0.391007, 210.785040, -0.689043, 0.13449), # noqa
1280: (0.003907, 291.518630, -1.049959, 0.240810, 0.391007, 251.689459, -0.845336, 0.13801), # noqa
1600: (0.003907, 364.398287, -1.334700, 0.237781, 0.391007, 293.073575, -1.003841, 0.14119)}, # noqa
'Linear Scene Exposure Factor': {
160: (0.000000, 5.061087, 0.089004, 0.269035, 0.391007, 6.332427, 0.108361, 0.108361), # noqa
200: (0.000000, 5.061087, 0.089004, 0.266007, 0.391007, 6.189953, 0.111543, 0.111543), # noqa
250: (0.000000, 5.061087, 0.089004, 0.262978, 0.391007, 6.034414, 0.114725, 0.114725), # noqa
320: (0.000000, 5.061087, 0.089004, 0.259627, 0.391007, 5.844973, 0.118246, 0.118246), # noqa
400: (0.000000, 5.061087, 0.089004, 0.256598, 0.391007, 5.656190, 0.121428, 0.121428), # noqa
500: (0.000000, 5.061087, 0.089004, 0.253569, 0.391007, 5.449261, 0.124610, 0.124610), # noqa
640: (0.000000, 5.061087, 0.089004, 0.250218, 0.391007, 5.198031, 0.128130, 0.128130), # noqa
800: (0.000000, 5.061087, 0.089004, 0.247189, 0.391007, 4.950469, 0.131313, 0.131313), # noqa
1000: (0.000000, 5.061087, 0.089004, 0.244161, 0.391007, 4.684112, 0.134495, 0.134495), # noqa
1280: (0.000000, 5.061087, 0.089004, 0.240810, 0.391007, 4.369609, 0.138015, 0.138015), # noqa
1600: (0.000000, 5.061087, 0.089004, 0.237781, 0.391007, 4.070466, 0.141197, 0.14119)}})}) # noqa
"""
*ALEXA Log C* curve conversion data between signal and linear scene exposure
factor for *SUP 3.x* and signal and normalized sensor signal for *SUP 2.x*.
ALEXA_LOG_C_CURVE_CONVERSION_DATA : dict
('SUP 3.x', 'SUP 2.x')
"""
# @formatter:on
ALEXA_WIDE_GAMUT_RGB_PRIMARIES = np.array(
[[0.6840, 0.3130],
[0.2210, 0.8480],
[0.0861, -0.1020]])
"""
*ALEXA Wide Gamut RGB* colourspace primaries.
ALEXA_WIDE_GAMUT_RGB_PRIMARIES : ndarray, (3, 2)
"""
ALEXA_WIDE_GAMUT_RGB_WHITEPOINT = ILLUMINANTS.get(
'CIE 1931 2 Degree Standard Observer').get('D65')
"""
*ALEXA Wide Gamut RGB* colourspace whitepoint.
ALEXA_WIDE_GAMUT_RGB_WHITEPOINT : tuple
"""
ALEXA_WIDE_GAMUT_RGB_TO_XYZ_MATRIX = np.array(
[[0.638008, 0.214704, 0.097744],
[0.291954, 0.823841, -0.115795],
[0.002798, -0.067034, 1.153294]])
"""
*ALEXA Wide Gamut RGB* colourspace to *CIE XYZ* colourspace matrix.
ALEXA_WIDE_GAMUT_RGB_TO_XYZ_MATRIX : array_like, (3, 3)
"""
XYZ_TO_ALEXA_WIDE_GAMUT_RGB_MATRIX = np.linalg.inv(
ALEXA_WIDE_GAMUT_RGB_TO_XYZ_MATRIX)
"""
*CIE XYZ* colourspace to *ALEXA Wide Gamut RGB* colourspace matrix.
XYZ_TO_ALEXA_WIDE_GAMUT_RGB_MATRIX : array_like, (3, 3)
"""
def _alexa_wide_gamut_rgb_transfer_function(
value,
firmware='SUP 3.x',
method='Linear Scene Exposure Factor',
EI=800):
"""
Defines the *ALEXA Wide Gamut value* colourspace transfer function.
Parameters
----------
value : numeric
value.
firmware : unicode,
('SUP 2.x', 'SUP 3.x')
Alexa firmware version.
method : unicode, optional
('Linear Scene Exposure Factor', 'Normalised Sensor Signal')
Conversion method.
EI : int, optional
Ei.
Returns
-------
numeric
Companded value.
"""
cut, a, b, c, d, e, f, ecutf = ALEXA_LOG_C_CURVE_CONVERSION_DATA.get(
firmware).get(method).get(EI)
return c * math.log10(a * value + b) + d if value > cut else ecutf
def _alexa_wide_gamut_rgb_inverse_transfer_function(
value,
firmware='SUP 3.x',
method='Linear Scene Exposure Factor',
EI=800):
"""
Defines the *ALEXA Wide Gamut value* colourspace inverse transfer function.
Parameters
----------
value : numeric
value.
firmware : unicode,
('SUP 2.x', 'SUP 3.x')
Alexa firmware version.
method : unicode, optional
('Linear Scene Exposure Factor', 'Normalised Sensor Signal')
Conversion method.
EI : int, optional
Ei.
Returns
-------
numeric
Companded value.
"""
cut, a, b, c, d, e, f, ecutf = (
ALEXA_LOG_C_CURVE_CONVERSION_DATA.get(firmware).get(method).get(EI))
return ((math.pow(10, (value - d) / c) - b) / a
if value > ecutf else
(value - f) / e)
ALEXA_WIDE_GAMUT_RGB_TRANSFER_FUNCTION = (
_alexa_wide_gamut_rgb_transfer_function)
"""
Transfer function from linear to *ALEXA Wide Gamut RGB* colourspace.
ALEXA_WIDE_GAMUT_RGB_TRANSFER_FUNCTION : object
"""
ALEXA_WIDE_GAMUT_RGB_INVERSE_TRANSFER_FUNCTION = (
_alexa_wide_gamut_rgb_inverse_transfer_function)
"""
Inverse transfer function from *ALEXA Wide Gamut RGB* colourspace to linear.
ALEXA_WIDE_GAMUT_RGB_INVERSE_TRANSFER_FUNCTION : object
"""
ALEXA_WIDE_GAMUT_RGB_COLOURSPACE = RGB_Colourspace(
'ALEXA Wide Gamut RGB',
ALEXA_WIDE_GAMUT_RGB_PRIMARIES,
ALEXA_WIDE_GAMUT_RGB_WHITEPOINT,
ALEXA_WIDE_GAMUT_RGB_TO_XYZ_MATRIX,
XYZ_TO_ALEXA_WIDE_GAMUT_RGB_MATRIX,
ALEXA_WIDE_GAMUT_RGB_TRANSFER_FUNCTION,
ALEXA_WIDE_GAMUT_RGB_INVERSE_TRANSFER_FUNCTION)
"""
*ALEXA Wide Gamut RGB* colourspace.
ALEXA_WIDE_GAMUT_RGB_COLOURSPACE : RGB_Colourspace
"""
| [
"colour.utilities.CaseInsensitiveMapping",
"math.pow",
"colour.models.RGB_Colourspace",
"colour.colorimetry.ILLUMINANTS.get",
"math.log10",
"numpy.linalg.inv",
"numpy.array"
] | [((1460, 2169), 'colour.utilities.CaseInsensitiveMapping', 'CaseInsensitiveMapping', (["{'SUP 3.x': {(160): (0.0928, 0.8128), (200): (0.0928, 0.8341), (250): (\n 0.0928, 0.8549), (320): (0.0928, 0.8773), (400): (0.0928, 0.8968), (500\n ): (0.0928, 0.9158), (640): (0.0928, 0.9362), (800): (0.0928, 0.9539),\n (1000): (0.0928, 0.9711), (1280): (0.0928, 0.9895), (1600): (0.0928, \n 1.0), (2000): (0.0928, 1.0), (2560): (0.0928, 1.0), (3200): (0.0928, \n 1.0)}, 'SUP 2.x': {(160): (0.1083, 0.811), (200): (0.1115, 0.832), (250\n ): (0.1146, 0.8524), (320): (0.1181, 0.8743), (400): (0.1213, 0.8935),\n (500): (0.1245, 0.9121), (640): (0.128, 0.932), (800): (0.1311, 0.9494),\n (1000): (0.1343, 0.9662), (1280): (0.1378, 0.9841), (1600): (0.1409, \n 0.9997)}}"], {}), "({'SUP 3.x': {(160): (0.0928, 0.8128), (200): (0.0928,\n 0.8341), (250): (0.0928, 0.8549), (320): (0.0928, 0.8773), (400): (\n 0.0928, 0.8968), (500): (0.0928, 0.9158), (640): (0.0928, 0.9362), (800\n ): (0.0928, 0.9539), (1000): (0.0928, 0.9711), (1280): (0.0928, 0.9895),\n (1600): (0.0928, 1.0), (2000): (0.0928, 1.0), (2560): (0.0928, 1.0), (\n 3200): (0.0928, 1.0)}, 'SUP 2.x': {(160): (0.1083, 0.811), (200): (\n 0.1115, 0.832), (250): (0.1146, 0.8524), (320): (0.1181, 0.8743), (400):\n (0.1213, 0.8935), (500): (0.1245, 0.9121), (640): (0.128, 0.932), (800):\n (0.1311, 0.9494), (1000): (0.1343, 0.9662), (1280): (0.1378, 0.9841), (\n 1600): (0.1409, 0.9997)}})\n", (1482, 2169), False, 'from colour.utilities import CaseInsensitiveMapping\n'), ((7866, 7926), 'numpy.array', 'np.array', (['[[0.684, 0.313], [0.221, 0.848], [0.0861, -0.102]]'], {}), '([[0.684, 0.313], [0.221, 0.848], [0.0861, -0.102]])\n', (7874, 7926), True, 'import numpy as np\n'), ((8291, 8403), 'numpy.array', 'np.array', (['[[0.638008, 0.214704, 0.097744], [0.291954, 0.823841, -0.115795], [0.002798,\n -0.067034, 1.153294]]'], {}), '([[0.638008, 0.214704, 0.097744], [0.291954, 0.823841, -0.115795],\n [0.002798, -0.067034, 1.153294]])\n', (8299, 8403), True, 'import numpy as np\n'), ((8586, 8635), 'numpy.linalg.inv', 'np.linalg.inv', (['ALEXA_WIDE_GAMUT_RGB_TO_XYZ_MATRIX'], {}), '(ALEXA_WIDE_GAMUT_RGB_TO_XYZ_MATRIX)\n', (8599, 8635), True, 'import numpy as np\n'), ((10946, 11226), 'colour.models.RGB_Colourspace', 'RGB_Colourspace', (['"""ALEXA Wide Gamut RGB"""', 'ALEXA_WIDE_GAMUT_RGB_PRIMARIES', 'ALEXA_WIDE_GAMUT_RGB_WHITEPOINT', 'ALEXA_WIDE_GAMUT_RGB_TO_XYZ_MATRIX', 'XYZ_TO_ALEXA_WIDE_GAMUT_RGB_MATRIX', 'ALEXA_WIDE_GAMUT_RGB_TRANSFER_FUNCTION', 'ALEXA_WIDE_GAMUT_RGB_INVERSE_TRANSFER_FUNCTION'], {}), "('ALEXA Wide Gamut RGB', ALEXA_WIDE_GAMUT_RGB_PRIMARIES,\n ALEXA_WIDE_GAMUT_RGB_WHITEPOINT, ALEXA_WIDE_GAMUT_RGB_TO_XYZ_MATRIX,\n XYZ_TO_ALEXA_WIDE_GAMUT_RGB_MATRIX,\n ALEXA_WIDE_GAMUT_RGB_TRANSFER_FUNCTION,\n ALEXA_WIDE_GAMUT_RGB_INVERSE_TRANSFER_FUNCTION)\n", (10961, 11226), False, 'from colour.models import RGB_Colourspace\n'), ((2536, 4706), 'colour.utilities.CaseInsensitiveMapping', 'CaseInsensitiveMapping', (["{'Normalised Sensor Signal': {(160): (0.00468, 40.0, -0.076072, 0.269036, \n 0.381991, 42.062665, -0.071569, 0.125266), (200): (0.004597, 50.0, -\n 0.11874, 0.266007, 0.382478, 51.986387, -0.110339, 0.128643), (250): (\n 0.004518, 62.5, -0.17126, 0.262978, 0.382966, 64.243053, -0.158224, \n 0.132021), (320): (0.004436, 80.0, -0.243808, 0.259627, 0.383508, \n 81.183335, -0.224409, 0.135761), (400): (0.004369, 100.0, -0.32582, \n 0.256598, 0.383999, 100.29528, -0.299079, 0.139142), (500): (0.004309, \n 125.0, -0.427461, 0.253569, 0.384493, 123.889239, -0.391261, 0.142526),\n (640): (0.004249, 160.0, -0.568709, 0.250219, 0.38504, 156.48268, -\n 0.518605, 0.146271), (800): (0.004201, 200.0, -0.729169, 0.24719, \n 0.385537, 193.235573, -0.662201, 0.149658), (1000): (0.00416, 250.0, -\n 0.928805, 0.244161, 0.386036, 238.584745, -0.839385, 0.153047), (1280):\n (0.00412, 320.0, -1.207168, 0.24081, 0.38659, 301.19738, -1.08402, \n 0.156799), (1600): (0.004088, 400.0, -1.524256, 0.237781, 0.387093, \n 371.761171, -1.359723, 0.160192)}, 'Linear Scene Exposure Factor': {(\n 160): (0.005561, 5.555556, 0.080216, 0.269036, 0.381991, 5.842037, \n 0.092778, 0.125266), (200): (0.006208, 5.555556, 0.076621, 0.266007, \n 0.382478, 5.776265, 0.092782, 0.128643), (250): (0.006871, 5.555556, \n 0.072941, 0.262978, 0.382966, 5.710494, 0.092786, 0.132021), (320): (\n 0.007622, 5.555556, 0.068768, 0.259627, 0.383508, 5.637732, 0.092791, \n 0.135761), (400): (0.008318, 5.555556, 0.064901, 0.256598, 0.383999, \n 5.57196, 0.092795, 0.139142), (500): (0.009031, 5.555556, 0.060939, \n 0.253569, 0.384493, 5.506188, 0.0928, 0.142526), (640): (0.00984, \n 5.555556, 0.056443, 0.250219, 0.38504, 5.433426, 0.092805, 0.146271), (\n 800): (0.010591, 5.555556, 0.052272, 0.24719, 0.385537, 5.367655, \n 0.092809, 0.149658), (1000): (0.011361, 5.555556, 0.047996, 0.244161, \n 0.386036, 5.301883, 0.092814, 0.153047), (1280): (0.012235, 5.555556, \n 0.043137, 0.24081, 0.38659, 5.229121, 0.092819, 0.156799), (1600): (\n 0.013047, 5.555556, 0.038625, 0.237781, 0.387093, 5.16335, 0.092824, \n 0.16019)}}"], {}), "({'Normalised Sensor Signal': {(160): (0.00468, 40.0,\n -0.076072, 0.269036, 0.381991, 42.062665, -0.071569, 0.125266), (200):\n (0.004597, 50.0, -0.11874, 0.266007, 0.382478, 51.986387, -0.110339, \n 0.128643), (250): (0.004518, 62.5, -0.17126, 0.262978, 0.382966, \n 64.243053, -0.158224, 0.132021), (320): (0.004436, 80.0, -0.243808, \n 0.259627, 0.383508, 81.183335, -0.224409, 0.135761), (400): (0.004369, \n 100.0, -0.32582, 0.256598, 0.383999, 100.29528, -0.299079, 0.139142), (\n 500): (0.004309, 125.0, -0.427461, 0.253569, 0.384493, 123.889239, -\n 0.391261, 0.142526), (640): (0.004249, 160.0, -0.568709, 0.250219, \n 0.38504, 156.48268, -0.518605, 0.146271), (800): (0.004201, 200.0, -\n 0.729169, 0.24719, 0.385537, 193.235573, -0.662201, 0.149658), (1000):\n (0.00416, 250.0, -0.928805, 0.244161, 0.386036, 238.584745, -0.839385, \n 0.153047), (1280): (0.00412, 320.0, -1.207168, 0.24081, 0.38659, \n 301.19738, -1.08402, 0.156799), (1600): (0.004088, 400.0, -1.524256, \n 0.237781, 0.387093, 371.761171, -1.359723, 0.160192)},\n 'Linear Scene Exposure Factor': {(160): (0.005561, 5.555556, 0.080216, \n 0.269036, 0.381991, 5.842037, 0.092778, 0.125266), (200): (0.006208, \n 5.555556, 0.076621, 0.266007, 0.382478, 5.776265, 0.092782, 0.128643),\n (250): (0.006871, 5.555556, 0.072941, 0.262978, 0.382966, 5.710494, \n 0.092786, 0.132021), (320): (0.007622, 5.555556, 0.068768, 0.259627, \n 0.383508, 5.637732, 0.092791, 0.135761), (400): (0.008318, 5.555556, \n 0.064901, 0.256598, 0.383999, 5.57196, 0.092795, 0.139142), (500): (\n 0.009031, 5.555556, 0.060939, 0.253569, 0.384493, 5.506188, 0.0928, \n 0.142526), (640): (0.00984, 5.555556, 0.056443, 0.250219, 0.38504, \n 5.433426, 0.092805, 0.146271), (800): (0.010591, 5.555556, 0.052272, \n 0.24719, 0.385537, 5.367655, 0.092809, 0.149658), (1000): (0.011361, \n 5.555556, 0.047996, 0.244161, 0.386036, 5.301883, 0.092814, 0.153047),\n (1280): (0.012235, 5.555556, 0.043137, 0.24081, 0.38659, 5.229121, \n 0.092819, 0.156799), (1600): (0.013047, 5.555556, 0.038625, 0.237781, \n 0.387093, 5.16335, 0.092824, 0.16019)}})\n", (2558, 4706), False, 'from colour.utilities import CaseInsensitiveMapping\n'), ((5035, 7199), 'colour.utilities.CaseInsensitiveMapping', 'CaseInsensitiveMapping', (["{'Normalised Sensor Signal': {(160): (0.003907, 36.439829, -0.053366, \n 0.269035, 0.391007, 45.593473, -0.069772, 0.10836), (200): (0.003907, \n 45.549786, -0.088959, 0.266007, 0.391007, 55.709581, -0.106114, 0.11154\n ), (250): (0.003907, 56.937232, -0.133449, 0.262978, 0.391007, \n 67.887153, -0.15051, 0.11472), (320): (0.003907, 72.879657, -0.195737, \n 0.259627, 0.391007, 84.167616, -0.210597, 0.11824), (400): (0.003907, \n 91.099572, -0.266922, 0.256598, 0.391007, 101.811426, -0.276349, \n 0.12142), (500): (0.003907, 113.874465, -0.355903, 0.253569, 0.391007, \n 122.608379, -0.354421, 0.12461), (640): (0.003907, 145.759315, -\n 0.480477, 0.250218, 0.391007, 149.703304, -0.45676, 0.12813), (800): (\n 0.003907, 182.199144, -0.622848, 0.247189, 0.391007, 178.216873, -\n 0.564981, 0.13131), (1000): (0.003907, 227.74893, -0.800811, 0.244161, \n 0.391007, 210.78504, -0.689043, 0.13449), (1280): (0.003907, 291.51863,\n -1.049959, 0.24081, 0.391007, 251.689459, -0.845336, 0.13801), (1600):\n (0.003907, 364.398287, -1.3347, 0.237781, 0.391007, 293.073575, -\n 1.003841, 0.14119)}, 'Linear Scene Exposure Factor': {(160): (0.0, \n 5.061087, 0.089004, 0.269035, 0.391007, 6.332427, 0.108361, 0.108361),\n (200): (0.0, 5.061087, 0.089004, 0.266007, 0.391007, 6.189953, 0.111543,\n 0.111543), (250): (0.0, 5.061087, 0.089004, 0.262978, 0.391007, \n 6.034414, 0.114725, 0.114725), (320): (0.0, 5.061087, 0.089004, \n 0.259627, 0.391007, 5.844973, 0.118246, 0.118246), (400): (0.0, \n 5.061087, 0.089004, 0.256598, 0.391007, 5.65619, 0.121428, 0.121428), (\n 500): (0.0, 5.061087, 0.089004, 0.253569, 0.391007, 5.449261, 0.12461, \n 0.12461), (640): (0.0, 5.061087, 0.089004, 0.250218, 0.391007, 5.198031,\n 0.12813, 0.12813), (800): (0.0, 5.061087, 0.089004, 0.247189, 0.391007,\n 4.950469, 0.131313, 0.131313), (1000): (0.0, 5.061087, 0.089004, \n 0.244161, 0.391007, 4.684112, 0.134495, 0.134495), (1280): (0.0, \n 5.061087, 0.089004, 0.24081, 0.391007, 4.369609, 0.138015, 0.138015), (\n 1600): (0.0, 5.061087, 0.089004, 0.237781, 0.391007, 4.070466, 0.141197,\n 0.14119)}}"], {}), "({'Normalised Sensor Signal': {(160): (0.003907, \n 36.439829, -0.053366, 0.269035, 0.391007, 45.593473, -0.069772, 0.10836\n ), (200): (0.003907, 45.549786, -0.088959, 0.266007, 0.391007, \n 55.709581, -0.106114, 0.11154), (250): (0.003907, 56.937232, -0.133449,\n 0.262978, 0.391007, 67.887153, -0.15051, 0.11472), (320): (0.003907, \n 72.879657, -0.195737, 0.259627, 0.391007, 84.167616, -0.210597, 0.11824\n ), (400): (0.003907, 91.099572, -0.266922, 0.256598, 0.391007, \n 101.811426, -0.276349, 0.12142), (500): (0.003907, 113.874465, -\n 0.355903, 0.253569, 0.391007, 122.608379, -0.354421, 0.12461), (640): (\n 0.003907, 145.759315, -0.480477, 0.250218, 0.391007, 149.703304, -\n 0.45676, 0.12813), (800): (0.003907, 182.199144, -0.622848, 0.247189, \n 0.391007, 178.216873, -0.564981, 0.13131), (1000): (0.003907, 227.74893,\n -0.800811, 0.244161, 0.391007, 210.78504, -0.689043, 0.13449), (1280):\n (0.003907, 291.51863, -1.049959, 0.24081, 0.391007, 251.689459, -\n 0.845336, 0.13801), (1600): (0.003907, 364.398287, -1.3347, 0.237781, \n 0.391007, 293.073575, -1.003841, 0.14119)},\n 'Linear Scene Exposure Factor': {(160): (0.0, 5.061087, 0.089004, \n 0.269035, 0.391007, 6.332427, 0.108361, 0.108361), (200): (0.0, \n 5.061087, 0.089004, 0.266007, 0.391007, 6.189953, 0.111543, 0.111543),\n (250): (0.0, 5.061087, 0.089004, 0.262978, 0.391007, 6.034414, 0.114725,\n 0.114725), (320): (0.0, 5.061087, 0.089004, 0.259627, 0.391007, \n 5.844973, 0.118246, 0.118246), (400): (0.0, 5.061087, 0.089004, \n 0.256598, 0.391007, 5.65619, 0.121428, 0.121428), (500): (0.0, 5.061087,\n 0.089004, 0.253569, 0.391007, 5.449261, 0.12461, 0.12461), (640): (0.0,\n 5.061087, 0.089004, 0.250218, 0.391007, 5.198031, 0.12813, 0.12813), (\n 800): (0.0, 5.061087, 0.089004, 0.247189, 0.391007, 4.950469, 0.131313,\n 0.131313), (1000): (0.0, 5.061087, 0.089004, 0.244161, 0.391007, \n 4.684112, 0.134495, 0.134495), (1280): (0.0, 5.061087, 0.089004, \n 0.24081, 0.391007, 4.369609, 0.138015, 0.138015), (1600): (0.0, \n 5.061087, 0.089004, 0.237781, 0.391007, 4.070466, 0.141197, 0.14119)}})\n", (5057, 7199), False, 'from colour.utilities import CaseInsensitiveMapping\n'), ((8086, 8140), 'colour.colorimetry.ILLUMINANTS.get', 'ILLUMINANTS.get', (['"""CIE 1931 2 Degree Standard Observer"""'], {}), "('CIE 1931 2 Degree Standard Observer')\n", (8101, 8140), False, 'from colour.colorimetry import ILLUMINANTS\n'), ((9523, 9548), 'math.log10', 'math.log10', (['(a * value + b)'], {}), '(a * value + b)\n', (9533, 9548), False, 'import math\n'), ((10345, 10374), 'math.pow', 'math.pow', (['(10)', '((value - d) / c)'], {}), '(10, (value - d) / c)\n', (10353, 10374), False, 'import math\n')] |
"""Forecast plotter tests."""
from copy import deepcopy
import locale
import numpy as np
import pytest
from soam.constants import (
ANOMALY_PLOT,
FIG_SIZE,
MONTHLY_TIME_GRANULARITY,
PLOT_CONFIG,
Y_COL,
YHAT_COL,
)
from soam.plotting.forecast_plotter import ForecastPlotterTask
from tests.helpers import sample_data_df # pylint: disable=unused-import
@pytest.fixture
def set_time_locale():
locale.setlocale(locale.LC_TIME, (None, None))
def perturb_ts(df, col, scale=1):
"""Add noise to ts
"""
mean = df[col].mean() * scale
df[col] += np.random.default_rng(42).uniform(
low=-mean / 2, high=mean / 2, size=len(df)
)
return df
def assert_out_paths_equal(fnames, tmp_path):
expected_out_files = [(tmp_path / fn).as_posix() for fn in fnames]
out_files = [p.as_posix() for p in tmp_path.iterdir()]
assert len(expected_out_files) == len(out_files)
assert all(a == b for a, b in zip(expected_out_files, out_files))
def run_standard_ForecastPlotterTask(tmp_path, time_series, prediction):
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
fpt = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
fpt.run(time_series, prediction)
return fpt
@pytest.mark.mpl_image_compare
def test_ForecastPlotterTask_simple(
tmp_path, sample_data_df, set_time_locale
): # pylint: disable=redefined-outer-name,unused-argument
time_series = sample_data_df.iloc[:30]
prediction = sample_data_df.iloc[30:]
prediction = prediction.rename(columns={Y_COL: YHAT_COL})
fpt = run_standard_ForecastPlotterTask(tmp_path, time_series, prediction)
assert_out_paths_equal(['0_forecast_2013020100_2015080100_.png'], tmp_path)
return fpt.fig
@pytest.mark.mpl_image_compare
def test_ForecastPlotterTask_overlapping(
tmp_path, sample_data_df, set_time_locale
): # pylint: disable=redefined-outer-name,unused-argument
time_series = sample_data_df
prediction = sample_data_df.iloc[30:]
prediction = prediction.rename(columns={Y_COL: YHAT_COL})
prediction = perturb_ts(prediction, YHAT_COL, scale=0.1)
fpt = run_standard_ForecastPlotterTask(tmp_path, time_series, prediction)
assert_out_paths_equal(['0_forecast_2013020100_2015080100_.png'], tmp_path)
return fpt.fig
| [
"numpy.random.default_rng",
"copy.deepcopy",
"locale.setlocale",
"soam.plotting.forecast_plotter.ForecastPlotterTask"
] | [((422, 468), 'locale.setlocale', 'locale.setlocale', (['locale.LC_TIME', '(None, None)'], {}), '(locale.LC_TIME, (None, None))\n', (438, 468), False, 'import locale\n'), ((1085, 1106), 'copy.deepcopy', 'deepcopy', (['PLOT_CONFIG'], {}), '(PLOT_CONFIG)\n', (1093, 1106), False, 'from copy import deepcopy\n'), ((1192, 1319), 'soam.plotting.forecast_plotter.ForecastPlotterTask', 'ForecastPlotterTask', ([], {'path': 'tmp_path', 'metric_name': '"""test"""', 'time_granularity': 'MONTHLY_TIME_GRANULARITY', 'plot_config': 'plot_config'}), "(path=tmp_path, metric_name='test', time_granularity=\n MONTHLY_TIME_GRANULARITY, plot_config=plot_config)\n", (1211, 1319), False, 'from soam.plotting.forecast_plotter import ForecastPlotterTask\n'), ((585, 610), 'numpy.random.default_rng', 'np.random.default_rng', (['(42)'], {}), '(42)\n', (606, 610), True, 'import numpy as np\n')] |
"""
neff.py
- compares predictions of PriMiDM and PRIMAT including additional dark radiation
- outputs pdf Neffcheck.pdf
- uses data from DeltaNeffPRIMAT.txt and DeltaNeffPRIMI.txt
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
plt.rcParams['axes.linewidth'] = 1.75
plt.rcParams['xtick.minor.size'] = 5
plt.rcParams['xtick.major.size'] = 7
plt.rcParams['ytick.minor.size'] = 5
plt.rcParams['ytick.major.size'] = 7
plt.rcParams['xtick.major.width'] = 1.0
plt.rcParams['ytick.major.width'] = 1.0
plt.rcParams['xtick.minor.visible'] = True
plt.rcParams['ytick.minor.visible'] = True
if __name__ == '__main__':
primatData = np.loadtxt('DeltaNeffPRIMAT.txt', skiprows=1, delimiter=',')
primiData = np.loadtxt('DeltaNeffPRIMI.txt', skiprows=1)
DeltaNeff = primatData[:, 0]
primi_df = {'DeltaNeff': DeltaNeff,
'H': primiData[:, 2],
'Yp': primiData[:, 6],
'D/H x 10^5': primiData[:, 3]/primiData[:, 2],
'3He/H x 10^5': primiData[:, 5]/primiData[:, 2],
'7Li/H x 10^11': primiData[:, 7]/primiData[:, 2]}
primat_df = {'DeltaNeff': DeltaNeff,
'H': primatData[:, 2],
'Yp': primatData[:, 6],
'D/H x 10^5': primatData[:, 3]/primatData[:, 2],
'3He/H x 10^5': primatData[:, 5]/primatData[:, 2],
'7Li/H x 10^11': primatData[:, 7]/primatData[:, 2]}
figsize = (8, 5)
plt.figure(figsize=figsize)
columns = ['H',
'Yp',
'D/H x 10^5',
'3He/H x 10^5',
'7Li/H x 10^11']
column_labels = [r'$\mathrm{H}$',
r'$Y_p$',
r'$\mathrm{D}/\mathrm{H}$',
r'$^{3}\mathrm{He}/\mathrm{H}$',
r'$^{7}\mathrm{Li}/\mathrm{H}$']
colors = ['#01295F',
'#419D78',
'#FFBF00',
'#D1495B',
'#DCDCDD']
#linestyles = ['-', '--', '-.', ':', (0, (1, 10))]
colors = ['purple', '#306B37', 'darkgoldenrod', '#3F7BB6', '#BF4145']
markers = ["^", "o", "s", "*", "d"]
sizes = np.array([60, 60, 60, 90, 60])
for idx, column in enumerate(columns):
plt.plot(primi_df['DeltaNeff'], np.abs(1 - (primi_df[column]/primat_df[column])),
c=colors[idx],
ls='-',
lw=1.7,
label='',
zorder=1)
plt.scatter(primi_df['DeltaNeff'], np.abs(1 - (primi_df[column]/primat_df[column])),
c=colors[idx],
alpha=0.9,
s=sizes[idx],
linewidths=0.4,
edgecolors='k',
marker=markers[idx],
label=column_labels[idx],
zorder=2)
plt.yscale('log')
plt.xlabel(r'$\Delta N_{\mathrm{eff}}$', fontsize=22)
plt.ylabel(r'$\mathrm{Rel.}\,\,\mathrm{Diff.}$', fontsize=22)
plt.legend(fontsize=18,
title_fontsize=12,
loc=4,
fancybox=False,
frameon=False,
markerfirst=False)
plt.xlim(0.0, 1.0)
plt.ylim(3*10**(-6), 10**(-3))
ax = plt.gca()
ax.xaxis.set_tick_params(labelsize=20)
ax.yaxis.set_tick_params(labelsize=20)
# ax.tick_params(axis='x', which='major', size=4)
# ax.tick_params(axis='y', which='major', size=4)
# ax.tick_params(axis='y', which='minor', size=2)
plt.savefig('Neffcheck.pdf')
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.yscale",
"numpy.abs",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.loadtxt",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((671, 731), 'numpy.loadtxt', 'np.loadtxt', (['"""DeltaNeffPRIMAT.txt"""'], {'skiprows': '(1)', 'delimiter': '""","""'}), "('DeltaNeffPRIMAT.txt', skiprows=1, delimiter=',')\n", (681, 731), True, 'import numpy as np\n'), ((745, 789), 'numpy.loadtxt', 'np.loadtxt', (['"""DeltaNeffPRIMI.txt"""'], {'skiprows': '(1)'}), "('DeltaNeffPRIMI.txt', skiprows=1)\n", (755, 789), True, 'import numpy as np\n'), ((1401, 1428), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1411, 1428), True, 'import matplotlib.pyplot as plt\n'), ((1956, 1986), 'numpy.array', 'np.array', (['[60, 60, 60, 90, 60]'], {}), '([60, 60, 60, 90, 60])\n', (1964, 1986), True, 'import numpy as np\n'), ((2422, 2439), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2432, 2439), True, 'import matplotlib.pyplot as plt\n'), ((2441, 2495), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\Delta N_{\\\\mathrm{eff}}$"""'], {'fontsize': '(22)'}), "('$\\\\Delta N_{\\\\mathrm{eff}}$', fontsize=22)\n", (2451, 2495), True, 'import matplotlib.pyplot as plt\n'), ((2496, 2560), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\mathrm{Rel.}\\\\,\\\\,\\\\mathrm{Diff.}$"""'], {'fontsize': '(22)'}), "('$\\\\mathrm{Rel.}\\\\,\\\\,\\\\mathrm{Diff.}$', fontsize=22)\n", (2506, 2560), True, 'import matplotlib.pyplot as plt\n'), ((2559, 2663), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(18)', 'title_fontsize': '(12)', 'loc': '(4)', 'fancybox': '(False)', 'frameon': '(False)', 'markerfirst': '(False)'}), '(fontsize=18, title_fontsize=12, loc=4, fancybox=False, frameon=\n False, markerfirst=False)\n', (2569, 2663), True, 'import matplotlib.pyplot as plt\n'), ((2672, 2690), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2680, 2690), True, 'import matplotlib.pyplot as plt\n'), ((2692, 2724), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(3 * 10 ** -6)', '(10 ** -3)'], {}), '(3 * 10 ** -6, 10 ** -3)\n', (2700, 2724), True, 'import matplotlib.pyplot as plt\n'), ((2730, 2739), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2737, 2739), True, 'import matplotlib.pyplot as plt\n'), ((2974, 3002), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Neffcheck.pdf"""'], {}), "('Neffcheck.pdf')\n", (2985, 3002), True, 'import matplotlib.pyplot as plt\n'), ((2062, 2110), 'numpy.abs', 'np.abs', (['(1 - primi_df[column] / primat_df[column])'], {}), '(1 - primi_df[column] / primat_df[column])\n', (2068, 2110), True, 'import numpy as np\n'), ((2216, 2264), 'numpy.abs', 'np.abs', (['(1 - primi_df[column] / primat_df[column])'], {}), '(1 - primi_df[column] / primat_df[column])\n', (2222, 2264), True, 'import numpy as np\n')] |
"""
Random height, weight generator for males and females. Uses parameters from
<NAME>. & <NAME>. (1992). Bivariate distributions for height and
weight of men and women in the United States. Risk Analysis, 12(2), 267-275.
<NAME>, January 2008.
"""
from __future__ import division
from scipy.stats import multivariate_normal
import numpy as np
def HtWtDataGenerator(nSubj, rndsd=None):
# Specify parameters of multivariate normal (MVN) distributions.
# Men:
HtMmu = 69.18
HtMsd = 2.87
lnWtMmu = 5.14
lnWtMsd = 0.17
Mrho = 0.42
Mmean = np.array([HtMmu , lnWtMmu])
Msigma = np.array([[HtMsd**2, Mrho * HtMsd * lnWtMsd],
[Mrho * HtMsd * lnWtMsd, lnWtMsd**2]])
# Women cluster 1:
HtFmu1 = 63.11
HtFsd1 = 2.76
lnWtFmu1 = 5.06
lnWtFsd1 = 0.24
Frho1 = 0.41
prop1 = 0.46
Fmean1 = np.array([HtFmu1, lnWtFmu1])
Fsigma1 = np.array([[HtFsd1**2, Frho1 * HtFsd1 * lnWtFsd1],
[Frho1 * HtFsd1 * lnWtFsd1, lnWtFsd1**2]])
# Women cluster 2:
HtFmu2 = 64.36
HtFsd2 = 2.49
lnWtFmu2 = 4.86
lnWtFsd2 = 0.14
Frho2 = 0.44
prop2 = 1 - prop1
Fmean2 = np.array([HtFmu2, lnWtFmu2])
Fsigma2 = np.array([[HtFsd2**2 , Frho2 * HtFsd2 * lnWtFsd2],
[Frho2 * HtFsd2 * lnWtFsd2 , lnWtFsd2**2]])
# Randomly generate data values from those MVN distributions.
if rndsd is not None:
np.random.seed(rndsd)
datamatrix = np.zeros((nSubj, 3))
# arbitrary coding values
maleval = 1
femaleval = 0
for i in range(0, nSubj):
# Flip coin to decide sex
sex = np.random.choice([maleval, femaleval], replace=True, p=(.5,.5), size=1)
if sex == maleval:
datum = multivariate_normal.rvs(mean=Mmean, cov=Msigma)
if sex == femaleval:
Fclust = np.random.choice([1, 2], replace=True, p=(prop1, prop2), size=1)
if Fclust == 1:
datum = multivariate_normal.rvs(mean=Fmean1, cov=Fsigma1)
if Fclust == 2:
datum = multivariate_normal.rvs(mean=Fmean2, cov=Fsigma2)
datamatrix[i] = np.concatenate([sex, np.round([datum[0], np.exp(datum[1])], 1)])
return datamatrix
| [
"numpy.random.seed",
"scipy.stats.multivariate_normal.rvs",
"numpy.zeros",
"numpy.array",
"numpy.exp",
"numpy.random.choice"
] | [((587, 613), 'numpy.array', 'np.array', (['[HtMmu, lnWtMmu]'], {}), '([HtMmu, lnWtMmu])\n', (595, 613), True, 'import numpy as np\n'), ((629, 722), 'numpy.array', 'np.array', (['[[HtMsd ** 2, Mrho * HtMsd * lnWtMsd], [Mrho * HtMsd * lnWtMsd, lnWtMsd ** 2]]'], {}), '([[HtMsd ** 2, Mrho * HtMsd * lnWtMsd], [Mrho * HtMsd * lnWtMsd, \n lnWtMsd ** 2]])\n', (637, 722), True, 'import numpy as np\n'), ((893, 921), 'numpy.array', 'np.array', (['[HtFmu1, lnWtFmu1]'], {}), '([HtFmu1, lnWtFmu1])\n', (901, 921), True, 'import numpy as np\n'), ((937, 1037), 'numpy.array', 'np.array', (['[[HtFsd1 ** 2, Frho1 * HtFsd1 * lnWtFsd1], [Frho1 * HtFsd1 * lnWtFsd1, \n lnWtFsd1 ** 2]]'], {}), '([[HtFsd1 ** 2, Frho1 * HtFsd1 * lnWtFsd1], [Frho1 * HtFsd1 *\n lnWtFsd1, lnWtFsd1 ** 2]])\n', (945, 1037), True, 'import numpy as np\n'), ((1212, 1240), 'numpy.array', 'np.array', (['[HtFmu2, lnWtFmu2]'], {}), '([HtFmu2, lnWtFmu2])\n', (1220, 1240), True, 'import numpy as np\n'), ((1256, 1356), 'numpy.array', 'np.array', (['[[HtFsd2 ** 2, Frho2 * HtFsd2 * lnWtFsd2], [Frho2 * HtFsd2 * lnWtFsd2, \n lnWtFsd2 ** 2]]'], {}), '([[HtFsd2 ** 2, Frho2 * HtFsd2 * lnWtFsd2], [Frho2 * HtFsd2 *\n lnWtFsd2, lnWtFsd2 ** 2]])\n', (1264, 1356), True, 'import numpy as np\n'), ((1513, 1533), 'numpy.zeros', 'np.zeros', (['(nSubj, 3)'], {}), '((nSubj, 3))\n', (1521, 1533), True, 'import numpy as np\n'), ((1473, 1494), 'numpy.random.seed', 'np.random.seed', (['rndsd'], {}), '(rndsd)\n', (1487, 1494), True, 'import numpy as np\n'), ((1683, 1757), 'numpy.random.choice', 'np.random.choice', (['[maleval, femaleval]'], {'replace': '(True)', 'p': '(0.5, 0.5)', 'size': '(1)'}), '([maleval, femaleval], replace=True, p=(0.5, 0.5), size=1)\n', (1699, 1757), True, 'import numpy as np\n'), ((1804, 1851), 'scipy.stats.multivariate_normal.rvs', 'multivariate_normal.rvs', ([], {'mean': 'Mmean', 'cov': 'Msigma'}), '(mean=Mmean, cov=Msigma)\n', (1827, 1851), False, 'from scipy.stats import multivariate_normal\n'), ((1905, 1969), 'numpy.random.choice', 'np.random.choice', (['[1, 2]'], {'replace': '(True)', 'p': '(prop1, prop2)', 'size': '(1)'}), '([1, 2], replace=True, p=(prop1, prop2), size=1)\n', (1921, 1969), True, 'import numpy as np\n'), ((2024, 2073), 'scipy.stats.multivariate_normal.rvs', 'multivariate_normal.rvs', ([], {'mean': 'Fmean1', 'cov': 'Fsigma1'}), '(mean=Fmean1, cov=Fsigma1)\n', (2047, 2073), False, 'from scipy.stats import multivariate_normal\n'), ((2129, 2178), 'scipy.stats.multivariate_normal.rvs', 'multivariate_normal.rvs', ([], {'mean': 'Fmean2', 'cov': 'Fsigma2'}), '(mean=Fmean2, cov=Fsigma2)\n', (2152, 2178), False, 'from scipy.stats import multivariate_normal\n'), ((2245, 2261), 'numpy.exp', 'np.exp', (['datum[1]'], {}), '(datum[1])\n', (2251, 2261), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 21:26:36 2018
@author: acer
function made and called from previous test1 mask value
"""
import numpy as np
import cv2
from test_frames_main2 import mask
class analyse_frame:
def frame_new1(filename):
filename1=filename
#filename is the frame from the main file
img=mask.canny(filename)
#
while(True):
if(img=='NULL'):
tot1=0
break
else:
ret,thresh_img = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
arr=np.asarray(thresh_img)
tot=arr.sum(-1)
mn = arr.mean(-1)
tot1 = arr.sum(0).sum(0)
break
img1=img
while(True):
if np.any((img1=='NULL')==False and tot1>0):
edges = cv2.Canny(img, 75, 150)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 40, maxLineGap=450)
j=0
x_center=[0,0]
y_center=[0,0]
m_center=[0,0]
x1_low=[]
x2_low=[]
y1_max=[]
y2_max=[]
for line in lines:
x1, y1, x2, y2 = (line[0])
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
x1_low.append(x1)
x2_low.append(x2)
x1_low.extend(x2_low)
y1_max.append(y1)
y2_max.append(y2)
y1_max.extend(y2_max)
x=min(x1_low)
y=max(y1_max)
z=min(y1_max)
z1=max(x1_low)
k1=int((x+z1)/2)
k2=int((y+z)/2)
m_center=(k1,k2)
cv2.line(img, (k1,k2), (k1,k2), (255, 0, 0), 10)
return m_center
else:
return 'NULL'
| [
"cv2.line",
"cv2.Canny",
"numpy.asarray",
"cv2.threshold",
"numpy.any",
"test_frames_main2.mask.canny",
"cv2.HoughLinesP"
] | [((364, 384), 'test_frames_main2.mask.canny', 'mask.canny', (['filename'], {}), '(filename)\n', (374, 384), False, 'from test_frames_main2 import mask\n'), ((838, 884), 'numpy.any', 'np.any', (["((img1 == 'NULL') == False and tot1 > 0)"], {}), "((img1 == 'NULL') == False and tot1 > 0)\n", (844, 884), True, 'import numpy as np\n'), ((550, 597), 'cv2.threshold', 'cv2.threshold', (['img', '(127)', '(255)', 'cv2.THRESH_BINARY'], {}), '(img, 127, 255, cv2.THRESH_BINARY)\n', (563, 597), False, 'import cv2\n'), ((616, 638), 'numpy.asarray', 'np.asarray', (['thresh_img'], {}), '(thresh_img)\n', (626, 638), True, 'import numpy as np\n'), ((905, 928), 'cv2.Canny', 'cv2.Canny', (['img', '(75)', '(150)'], {}), '(img, 75, 150)\n', (914, 928), False, 'import cv2\n'), ((972, 1030), 'cv2.HoughLinesP', 'cv2.HoughLinesP', (['edges', '(1)', '(np.pi / 180)', '(40)'], {'maxLineGap': '(450)'}), '(edges, 1, np.pi / 180, 40, maxLineGap=450)\n', (987, 1030), False, 'import cv2\n'), ((1959, 2009), 'cv2.line', 'cv2.line', (['img', '(k1, k2)', '(k1, k2)', '(255, 0, 0)', '(10)'], {}), '(img, (k1, k2), (k1, k2), (255, 0, 0), 10)\n', (1967, 2009), False, 'import cv2\n'), ((1397, 1446), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n', (1405, 1446), False, 'import cv2\n')] |
"""Used for rendering frames as png files."""
import os
import numpy as np
import pyvista as pv
import nibabel as nb
SCALAR = "/home/faruk/Documents/temp_flooding_brains/data/okapi/okapi_N4.nii.gz"
DIST = "/home/faruk/Documents/temp_flooding_brains/data/okapi/okapi_cerebrum_RH_v05_borders_inputrim_centroids1_geodistance.nii.gz"
MASK = "/home/faruk/Documents/temp_flooding_brains/data/okapi/okapi_cerebrum_RH_v05_inputrim_columns10000.nii.gz"
OUTDIR = "/home/faruk/Documents/temp_flooding_brains/test"
CAMPOS = [[-518, 181, -177], [149, 184, 122], [0, 0, -1]]
NR_FRAMES = 24*5
CMAP = "CET_L1"
BACKGROUND = "black"
RESOLUTION = (720, 720)
CLIM = (0, 200)
# =============================================================================
# Output directory
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
print(" Output directory: {}".format(OUTDIR))
# Get scalars
data = nb.load(SCALAR).get_fdata()
# Get distances
dist = nb.load(DIST).get_fdata()
# Get mask
mask = np.asarray(nb.load(MASK).dataobj)
dims = mask.shape
# Set opacity to make min and max invisible
opacity = np.ones(255)
opacity[0] = 0
opacity[-1] = 0
# Prep pyvista plotter
p = pv.Plotter(window_size=RESOLUTION, off_screen=True)
p.set_background(BACKGROUND)
p.disable_anti_aliasing()
p.camera_position = CAMPOS
dmax = np.linspace(0, np.max(dist), NR_FRAMES+1)[1:]
# Render frames
for i, d in enumerate(dmax):
print(" Frame {}/{}".format(i+1, NR_FRAMES))
# Select voxels that will be rendered
idx0 = dist > 0
idx1 = dist > d - 5
idx2 = dist < d
idx3 = (idx1 * idx2) * idx0
ids = np.unique(mask[idx3])
idx4 = np.in1d(mask.reshape(np.prod(dims)), ids)
idx4 = idx4.reshape(dims)
temp = np.copy(data)
temp[~idx4] = CLIM[0]
# Render
p.add_volume(temp, show_scalar_bar=False, cmap=CMAP, clim=CLIM,
shade=False, opacity=opacity, blending="composite")
out_name = "frame-{}.png".format(str(i+1).zfill(3))
p.screenshot(os.path.join(OUTDIR, out_name))
p.clear()
p.close()
print("Finished.")
| [
"os.makedirs",
"numpy.copy",
"nibabel.load",
"os.path.exists",
"pyvista.Plotter",
"numpy.ones",
"numpy.prod",
"numpy.max",
"os.path.join",
"numpy.unique"
] | [((1086, 1098), 'numpy.ones', 'np.ones', (['(255)'], {}), '(255)\n', (1093, 1098), True, 'import numpy as np\n'), ((1158, 1209), 'pyvista.Plotter', 'pv.Plotter', ([], {'window_size': 'RESOLUTION', 'off_screen': '(True)'}), '(window_size=RESOLUTION, off_screen=True)\n', (1168, 1209), True, 'import pyvista as pv\n'), ((765, 787), 'os.path.exists', 'os.path.exists', (['OUTDIR'], {}), '(OUTDIR)\n', (779, 787), False, 'import os\n'), ((793, 812), 'os.makedirs', 'os.makedirs', (['OUTDIR'], {}), '(OUTDIR)\n', (804, 812), False, 'import os\n'), ((1590, 1611), 'numpy.unique', 'np.unique', (['mask[idx3]'], {}), '(mask[idx3])\n', (1599, 1611), True, 'import numpy as np\n'), ((1706, 1719), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (1713, 1719), True, 'import numpy as np\n'), ((882, 897), 'nibabel.load', 'nb.load', (['SCALAR'], {}), '(SCALAR)\n', (889, 897), True, 'import nibabel as nb\n'), ((934, 947), 'nibabel.load', 'nb.load', (['DIST'], {}), '(DIST)\n', (941, 947), True, 'import nibabel as nb\n'), ((990, 1003), 'nibabel.load', 'nb.load', (['MASK'], {}), '(MASK)\n', (997, 1003), True, 'import nibabel as nb\n'), ((1315, 1327), 'numpy.max', 'np.max', (['dist'], {}), '(dist)\n', (1321, 1327), True, 'import numpy as np\n'), ((1970, 2000), 'os.path.join', 'os.path.join', (['OUTDIR', 'out_name'], {}), '(OUTDIR, out_name)\n', (1982, 2000), False, 'import os\n'), ((1644, 1657), 'numpy.prod', 'np.prod', (['dims'], {}), '(dims)\n', (1651, 1657), True, 'import numpy as np\n')] |
import numpy as np
from chainer0.function import Function
class ReLU(Function):
def forward(self, x):
return np.maximum(x, 0)
def backward(self, gy):
y = self.outputs[0]
gx = gy * (y.data > 0)
return gx
def relu(x):
"""Rectified Linear Unit function."""
f = ReLU()
return f(x)
| [
"numpy.maximum"
] | [((124, 140), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (134, 140), True, 'import numpy as np\n')] |
'''
be sure the python libraries are reachable
python dataset_generator.py --folder tr_dataset --num_images 100
qr capacity https://www.qrcode.com/en/about/version.html
qr code:4999 | elapsed time: 4m: 0s dll 25
qr code:19999 | elapsed time: 13m:50s dll 25
qr code:19999 | elapsed time: 14m:25s python 25
qr damaging time: 0m: 0s
qr code:19999 | elapsed time: 65m:44s dll 700
qr damaging time: 0m: 5s
qr code:1757 | elapsed time: 181m:42s python 700
'''
from PIL import Image, ImageDraw
import numpy as np
#from skimage.util import random_noise
import cv2
import qrcode
import math
import time
import random
import string
import argparse
import os
import ctypes
import copy
from ctypes import *
from qrcodes import MIN_SIZE, MAX_SIZE, LETTERS, IMAGE_SIZE_DATASET, VERSION, PADDING_SIZE, LEVEL,NUM_MASKS_PER_CORNER
level_l = qrcode.constants.ERROR_CORRECT_L
level_m = qrcode.constants.ERROR_CORRECT_M
level_q = qrcode.constants.ERROR_CORRECT_Q
level_h = qrcode.constants.ERROR_CORRECT_H
mask_lib = ctypes.WinDLL('absolute path to maskWhite.dll')
mask_fun = mask_lib.applyDamageMask
def createDamageMask():
masks = [None]*2
masks[0] = [None] *2
masks[1] = [None] * 2
quarto = IMAGE_SIZE_DATASET//4
terzo = IMAGE_SIZE_DATASET//3
print("preloading masks....")
for x in 0,1:
for y in 0,1:
masks[x][y] = []
centre = [x*(IMAGE_SIZE_DATASET-1),y*(IMAGE_SIZE_DATASET-1)]
for mask in range(NUM_MASKS_PER_CORNER):
w,h = np.random.randint(low=quarto, high=terzo+1,size=2, dtype=int)
im = Image.new('RGB', (IMAGE_SIZE_DATASET, IMAGE_SIZE_DATASET), (255,255, 255))
square = ImageDraw.Draw(im)
coorx_sx = centre[0]-w
coory_sx = centre[1]-h
coorx_dx = centre[0]+w
coory_dx = centre[1]+h
square.rectangle((coorx_sx,coory_sx ,coorx_dx,coory_dx), fill=(0, 0, 0) )
width, height, channels = im.size[1], im.size[0], 3
masks[x][y].append(np.asarray(im.getdata(),dtype=np.int32).reshape((width, height, channels)))
print("mask loaded")
return masks
def applyDamageMaskPython(qr, masks):
qr_damaged_list = []
for qr_d in range(4):
qr_damaged_list.append(np.zeros((IMAGE_SIZE_DATASET,IMAGE_SIZE_DATASET,3),dtype=np.uint32))
height, width = np.shape(qr)
for mask_x in 0,1:
for mask_y in 0,1:
mask = random.choice(masks[mask_x][mask_y])
for i in range(height):
for j in range(width):
if mask[i][j][0] == 0 and mask[i][j][1]==0 and mask[i][j][2]==0:
qr_damaged_list[mask_x*2+mask_y][i][j] = 255
else:
qr_damaged_list[mask_x*2+mask_y][i,j,0] = qr[i,j]
qr_damaged_list[mask_x*2+mask_y][i,j,1] = qr[i,j]
qr_damaged_list[mask_x*2+mask_y][i,j,2] = qr[i,j]
return qr_damaged_list
def applyDamageMaskDll(qr, masks):
qr_damaged_list = []
for qr_d in range(4):
qr_damaged_list.append(np.zeros((IMAGE_SIZE_DATASET,IMAGE_SIZE_DATASET,3),dtype=np.int32))
height, width = np.shape(qr)
for mask_x in 0,1:
for mask_y in 0,1:
mask = random.choice(masks[mask_x][mask_y])
qr_damaged = np.zeros((height,width,3),dtype=np.int32)
qr_p = c_void_p(qr.ctypes.data)
mask_p = c_void_p(mask.ctypes.data)
res_p = c_void_p(qr_damaged.ctypes.data)
h_p = c_int(height)
w_p = c_int(width)
mask_fun(qr_p, mask_p, res_p, h_p, w_p)
qr_damaged_list[mask_x*2+mask_y] = qr_damaged
return qr_damaged_list
def generate_version_err_lev_qr(rand_string,level,version):
module_per_side = 21+4*(version-1)+PADDING_SIZE*2
box_size = IMAGE_SIZE_DATASET // module_per_side
qr = qrcode.QRCode(
version=version,
error_correction=level,
box_size=box_size,
border=PADDING_SIZE,
)
qr.add_data(rand_string)
# fit=False throw exception if the string cannot fit into qr
qr.make(fit=False)
qr = qr.make_image(fill_color="black", back_color="white").getdata()
qr = np.reshape(qr,(module_per_side*box_size,module_per_side*box_size))
qr = np.asarray(qr,dtype=np.int32)
return qr
def generateQRDamaged():
qr_levels = {'level_l':level_l, 'level_m':level_m,'level_q':level_q, 'level_h':level_h}
i=0
for f in os.listdir(args.folder):
i = i + 1
masks = createDamageMask()
start = time.time()
for iter in range(i,i+args.num_images):
size = random.randint(MIN_SIZE, MAX_SIZE)
rand_string = "".join(random.choice(LETTERS) for _ in range(size))
main_dirname = os.path.join(args.folder,'{}'.format(iter))
os.mkdir(main_dirname)
label_filename = os.path.join(main_dirname,'label.txt')
f=open(label_filename,'w')
f.write(rand_string)
f.close()
try:
#start_qr_time = time.time()
qr = generate_version_err_lev_qr(rand_string,level=LEVEL,version=VERSION)
#qr_time = time.time() - start_qr_time
#print('qr creation time: {:2.0f}m:{:2.0f}s'.format(qr_time // (60),int(qr_time)%60))
except Exception as e:
print(e)
exit()
start_damaging_time = time.time()
qr_damaged_list = applyDamageMaskDll(qr, masks)
damaging_time = time.time() - start_damaging_time
print('qr damaging time: {:2.0f}m:{:2.0f}s'.format(damaging_time // (60),int(damaging_time)%60))
#start_saving_time = time.time()
for qr_d in range(4):
img_filename = os.path.join(main_dirname, '{}.jpg'.format(qr_d))
image = np.asarray(qr_damaged_list[qr_d],np.float32)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imwrite(img_filename,image)
qr_filename = os.path.join(main_dirname, 'original.jpg')
image = np.asarray(qr,np.float32)
cv2.imwrite(qr_filename,image)
#saving_time = time.time() - start_saving_time
#print('saving time: {:2.0f}m:{:2.0f}s'.format(saving_time // (60),int(saving_time)%60))
elapsed_time = time.time() - start
print('qr code:{} | elapsed time: {:2.0f}m:{:2.0f}s'.format(iter-i,elapsed_time // (60),int(elapsed_time)%60))
print('done')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create damaged qr codes')
parser.add_argument('--folder', type=str, required=True, help='folder to put into qr codes')
parser.add_argument('--num_images', type=int,required=True, help='number of string. qr codes will be version*4*4')
args = parser.parse_args()
try:
generateQRDamaged()
except Exception as e:
print(e)
| [
"ctypes.WinDLL",
"os.mkdir",
"PIL.Image.new",
"argparse.ArgumentParser",
"random.randint",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.asarray",
"numpy.zeros",
"random.choice",
"time.time",
"numpy.shape",
"numpy.random.randint",
"numpy.reshape",
"qrcode.QRCode",
"PIL.ImageDraw.Draw",
"os.p... | [((1013, 1060), 'ctypes.WinDLL', 'ctypes.WinDLL', (['"""absolute path to maskWhite.dll"""'], {}), "('absolute path to maskWhite.dll')\n", (1026, 1060), False, 'import ctypes\n'), ((2208, 2220), 'numpy.shape', 'np.shape', (['qr'], {}), '(qr)\n', (2216, 2220), True, 'import numpy as np\n'), ((2884, 2896), 'numpy.shape', 'np.shape', (['qr'], {}), '(qr)\n', (2892, 2896), True, 'import numpy as np\n'), ((3503, 3601), 'qrcode.QRCode', 'qrcode.QRCode', ([], {'version': 'version', 'error_correction': 'level', 'box_size': 'box_size', 'border': 'PADDING_SIZE'}), '(version=version, error_correction=level, box_size=box_size,\n border=PADDING_SIZE)\n', (3516, 3601), False, 'import qrcode\n'), ((3816, 3888), 'numpy.reshape', 'np.reshape', (['qr', '(module_per_side * box_size, module_per_side * box_size)'], {}), '(qr, (module_per_side * box_size, module_per_side * box_size))\n', (3826, 3888), True, 'import numpy as np\n'), ((3893, 3923), 'numpy.asarray', 'np.asarray', (['qr'], {'dtype': 'np.int32'}), '(qr, dtype=np.int32)\n', (3903, 3923), True, 'import numpy as np\n'), ((4071, 4094), 'os.listdir', 'os.listdir', (['args.folder'], {}), '(args.folder)\n', (4081, 4094), False, 'import os\n'), ((4147, 4158), 'time.time', 'time.time', ([], {}), '()\n', (4156, 4158), False, 'import time\n'), ((5824, 5886), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create damaged qr codes"""'}), "(description='Create damaged qr codes')\n", (5847, 5886), False, 'import argparse\n'), ((4210, 4244), 'random.randint', 'random.randint', (['MIN_SIZE', 'MAX_SIZE'], {}), '(MIN_SIZE, MAX_SIZE)\n', (4224, 4244), False, 'import random\n'), ((4380, 4402), 'os.mkdir', 'os.mkdir', (['main_dirname'], {}), '(main_dirname)\n', (4388, 4402), False, 'import os\n'), ((4422, 4461), 'os.path.join', 'os.path.join', (['main_dirname', '"""label.txt"""'], {}), "(main_dirname, 'label.txt')\n", (4434, 4461), False, 'import os\n'), ((4858, 4869), 'time.time', 'time.time', ([], {}), '()\n', (4867, 4869), False, 'import time\n'), ((5359, 5401), 'os.path.join', 'os.path.join', (['main_dirname', '"""original.jpg"""'], {}), "(main_dirname, 'original.jpg')\n", (5371, 5401), False, 'import os\n'), ((5412, 5438), 'numpy.asarray', 'np.asarray', (['qr', 'np.float32'], {}), '(qr, np.float32)\n', (5422, 5438), True, 'import numpy as np\n'), ((5440, 5471), 'cv2.imwrite', 'cv2.imwrite', (['qr_filename', 'image'], {}), '(qr_filename, image)\n', (5451, 5471), False, 'import cv2\n'), ((2121, 2191), 'numpy.zeros', 'np.zeros', (['(IMAGE_SIZE_DATASET, IMAGE_SIZE_DATASET, 3)'], {'dtype': 'np.uint32'}), '((IMAGE_SIZE_DATASET, IMAGE_SIZE_DATASET, 3), dtype=np.uint32)\n', (2129, 2191), True, 'import numpy as np\n'), ((2272, 2308), 'random.choice', 'random.choice', (['masks[mask_x][mask_y]'], {}), '(masks[mask_x][mask_y])\n', (2285, 2308), False, 'import random\n'), ((2797, 2866), 'numpy.zeros', 'np.zeros', (['(IMAGE_SIZE_DATASET, IMAGE_SIZE_DATASET, 3)'], {'dtype': 'np.int32'}), '((IMAGE_SIZE_DATASET, IMAGE_SIZE_DATASET, 3), dtype=np.int32)\n', (2805, 2866), True, 'import numpy as np\n'), ((2948, 2984), 'random.choice', 'random.choice', (['masks[mask_x][mask_y]'], {}), '(masks[mask_x][mask_y])\n', (2961, 2984), False, 'import random\n'), ((3001, 3045), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {'dtype': 'np.int32'}), '((height, width, 3), dtype=np.int32)\n', (3009, 3045), True, 'import numpy as np\n'), ((4938, 4949), 'time.time', 'time.time', ([], {}), '()\n', (4947, 4949), False, 'import time\n'), ((5212, 5257), 'numpy.asarray', 'np.asarray', (['qr_damaged_list[qr_d]', 'np.float32'], {}), '(qr_damaged_list[qr_d], np.float32)\n', (5222, 5257), True, 'import numpy as np\n'), ((5268, 5307), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (5280, 5307), False, 'import cv2\n'), ((5311, 5343), 'cv2.imwrite', 'cv2.imwrite', (['img_filename', 'image'], {}), '(img_filename, image)\n', (5322, 5343), False, 'import cv2\n'), ((5629, 5640), 'time.time', 'time.time', ([], {}), '()\n', (5638, 5640), False, 'import time\n'), ((1453, 1517), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'quarto', 'high': '(terzo + 1)', 'size': '(2)', 'dtype': 'int'}), '(low=quarto, high=terzo + 1, size=2, dtype=int)\n', (1470, 1517), True, 'import numpy as np\n'), ((1524, 1599), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(IMAGE_SIZE_DATASET, IMAGE_SIZE_DATASET)', '(255, 255, 255)'], {}), "('RGB', (IMAGE_SIZE_DATASET, IMAGE_SIZE_DATASET), (255, 255, 255))\n", (1533, 1599), False, 'from PIL import Image, ImageDraw\n'), ((1612, 1630), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (1626, 1630), False, 'from PIL import Image, ImageDraw\n'), ((4269, 4291), 'random.choice', 'random.choice', (['LETTERS'], {}), '(LETTERS)\n', (4282, 4291), False, 'import random\n')] |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.linalg import qr
mean = [0, 0, 0]
cov = np.eye(3)
x_y_z = np.random.multivariate_normal(mean, cov, 50000).T
def get_orthogonal_matrix(dim):
H = np.random.randn(dim, dim)
Q, R = qr(H)
return Q
def plot_3d(x_y_z):
'''
plot points in 3D
:param x_y_z: the points. numpy array with shape: 3 X num_samples (first dimension for x, y, z
coordinate)
'''
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_y_z[0], x_y_z[1], x_y_z[2], s=1, marker='.', depthshade=False)
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_zlim(-5, 5)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
def plot_2d(x_y):
'''
plot points in 2D
:param x_y_z: the points. numpy array with shape: 2 X num_samples (first dimension for x, y
coordinate)
'''
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x_y[0], x_y[1], s=1, marker='.')
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_xlabel('x')
ax.set_ylabel('y')
def q11():
plot_3d(x_y_z)
plt.title("Question 11: identity matrix")
plt.show()
def q12():
m = np.array([[0.1, 0, 0], [0, 0.5, 0], [0, 0, 2]])
x_y_z12 = m.dot(x_y_z)
plot_3d(x_y_z12)
plt.title("Question 12: scaling matrix")
plt.show()
return x_y_z12
def q13(x_y_z12):
"""
:param x_y_z12: result from question 12 to multiply by orthogonal matrix
:return:
"""
o = get_orthogonal_matrix(3)
x_y_z13 = o.dot(x_y_z12)
plot_3d(x_y_z13)
plt.title("Question 13: orthogonal matrix")
plt.show()
return x_y_z13
def q14(x_y_z13):
"""
:param x_y_z13: result from question 13 to plot in 2d
:return:
"""
x_y14 = x_y_z13[0:2]
plot_2d(x_y14)
plt.title("Question 14: 2d projection of question 13")
plt.show()
def q15(x_y_z13):
"""
:param x_y_z13: result from question 13 to take points from
:return:
"""
x_coords = []
y_coords = []
for i in range(50000):
if -0.4 < x_y_z13[2][i] < 0.1:
x_coords.append(x_y_z13[0][i])
y_coords.append(x_y_z13[1][i])
x_y15 = np.array([x_coords, y_coords])
plot_2d(x_y15)
plt.title("Question 15: points where 0.1 > z > -0.4")
plt.show()
if __name__ == '__main__':
# Question answers in order
q11()
x_y_z12 = q12()
x_y_z13 = q13(x_y_z12)
q14(x_y_z13)
q15(x_y_z13) | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.random.randn",
"scipy.linalg.qr",
"matplotlib.pyplot.figure",
"numpy.random.multivariate_normal",
"numpy.array",
"numpy.eye"
] | [((149, 158), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (155, 158), True, 'import numpy as np\n'), ((168, 215), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', '(50000)'], {}), '(mean, cov, 50000)\n', (197, 215), True, 'import numpy as np\n'), ((264, 289), 'numpy.random.randn', 'np.random.randn', (['dim', 'dim'], {}), '(dim, dim)\n', (279, 289), True, 'import numpy as np\n'), ((302, 307), 'scipy.linalg.qr', 'qr', (['H'], {}), '(H)\n', (304, 307), False, 'from scipy.linalg import qr\n'), ((516, 528), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (526, 528), True, 'import matplotlib.pyplot as plt\n'), ((991, 1003), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1001, 1003), True, 'import matplotlib.pyplot as plt\n'), ((1221, 1262), 'matplotlib.pyplot.title', 'plt.title', (['"""Question 11: identity matrix"""'], {}), "('Question 11: identity matrix')\n", (1230, 1262), True, 'import matplotlib.pyplot as plt\n'), ((1268, 1278), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1276, 1278), True, 'import matplotlib.pyplot as plt\n'), ((1304, 1351), 'numpy.array', 'np.array', (['[[0.1, 0, 0], [0, 0.5, 0], [0, 0, 2]]'], {}), '([[0.1, 0, 0], [0, 0.5, 0], [0, 0, 2]])\n', (1312, 1351), True, 'import numpy as np\n'), ((1407, 1447), 'matplotlib.pyplot.title', 'plt.title', (['"""Question 12: scaling matrix"""'], {}), "('Question 12: scaling matrix')\n", (1416, 1447), True, 'import matplotlib.pyplot as plt\n'), ((1453, 1463), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1461, 1463), True, 'import matplotlib.pyplot as plt\n'), ((1708, 1751), 'matplotlib.pyplot.title', 'plt.title', (['"""Question 13: orthogonal matrix"""'], {}), "('Question 13: orthogonal matrix')\n", (1717, 1751), True, 'import matplotlib.pyplot as plt\n'), ((1757, 1767), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1765, 1767), True, 'import matplotlib.pyplot as plt\n'), ((1953, 2007), 'matplotlib.pyplot.title', 'plt.title', (['"""Question 14: 2d projection of question 13"""'], {}), "('Question 14: 2d projection of question 13')\n", (1962, 2007), True, 'import matplotlib.pyplot as plt\n'), ((2013, 2023), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2021, 2023), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2381), 'numpy.array', 'np.array', (['[x_coords, y_coords]'], {}), '([x_coords, y_coords])\n', (2359, 2381), True, 'import numpy as np\n'), ((2407, 2460), 'matplotlib.pyplot.title', 'plt.title', (['"""Question 15: points where 0.1 > z > -0.4"""'], {}), "('Question 15: points where 0.1 > z > -0.4')\n", (2416, 2460), True, 'import matplotlib.pyplot as plt\n'), ((2466, 2476), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2474, 2476), True, 'import matplotlib.pyplot as plt\n')] |
import os
import torch
import numpy as np
from PIL import Image
import matplotlib
from torch.serialization import save
matplotlib.use('Agg')
from matplotlib import pyplot as plt
class GraphPlotter:
def __init__(self, save_dir, metrics: list, phase):
self.save_dir = save_dir
self.graph_name = 'result_{}.png'.format(phase)
self.metrics = metrics
self.epochs = []
self.value_dict = dict()
for metric in metrics:
self.value_dict[metric] = []
def __call__(self, epoch, values: list):
assert (len(values) == len(self.value_dict)), 'metrics and values length shoud be same size.'
self.epochs.append(epoch)
fig, ax = plt.subplots()
for i, metric in enumerate(self.metrics):
self.value_dict[metric].append(values[i])
ax.plot(self.epochs, self.value_dict[metric], label=metric)
ax.legend(loc=0)
fig.tight_layout() # レイアウトの設定
fig.savefig(os.path.join(self.save_dir, self.graph_name))
plt.title(self.metrics)
plt.close()
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
def inputs_convert(img, N):
_, c, _, _ = img.size()
if c > 3:
img = img[N,0:3,:,:].to('cpu').detach().numpy().copy()
else:
img = img[N,:,:,:].to('cpu').detach().numpy().copy()
img = img.transpose(1,2,0)
img = img*std+mean
return img
def outputs_convert(img, N):
img = img[N,:,:].to('cpu').detach().numpy().copy().squeeze()
return img
def divide_im_geo(img, geo_num, N):
rgb_img = img[N,0:3,:,:].to('cpu').detach().numpy().copy()
rgb_img = rgb_img.transpose(1,2,0)
rgb_img = rgb_img*std+mean
geo_imgs = []
for i in range(geo_num):
geo_img = img[N,3+i,:,:].to('cpu').detach().numpy().copy()
geo_imgs.append(geo_img)
return rgb_img, geo_imgs
class Plotter:
def __init__(self, opts, vis_num, save_dir):
# vis_num is number of batch to display
self.opts = opts
self.vis_num = vis_num
self.save_dir = save_dir
@torch.no_grad()
def __call__(self, epoch, inputs, outputs, target, phase, num=None):
b, _, _, _ = inputs.size()
if b < self.vis_num:
self.vis_num = b
in_im = []
out_im = []
targets = []
for n in range(self.vis_num):
in_im.append(inputs_convert(inputs, n))
out_im.append(outputs_convert(outputs, n))
targets.append(outputs_convert(target, n))
self._display_images(epoch, in_im, out_im, targets, phase, num)
def _display_images(self, epoch, images1, images2, targets, phase, num, label_font_size=8):
if not (images1 and images2):
print("No images to display.")
return
plt.figure()
i = 1
for (im1, im2, tar) in zip(images1, images2, targets):
im1 = Image.fromarray(np.uint8(im1*255))
plt.subplot(3, self.vis_num, i)
plt.title('Input Image', fontsize=10)
plt.imshow(im1)
plt.subplot(3, self.vis_num, i+self.vis_num)
if num is not None:
plt.title('Ground Truth {}'.format(int(num)), fontsize=10)
else:
plt.title('Ground Truth {:.2f}'.format(tar.sum()), fontsize=10)
plt.imshow(tar, cmap='jet')
plt.subplot(3, self.vis_num, i+(self.vis_num*2))
plt.title('Prediction {:.2f}'.format(im2.sum()), fontsize=10)
plt.imshow(im2, cmap='jet')
i += 1
plt.tight_layout()
output_img_name = self.opts.dataset + '_{}_{}.png'.format(phase, epoch)
if not os.path.exists(os.path.join(self.save_dir, 'images')):
os.mkdir(os.path.join(self.save_dir, 'images'))
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot",
"numpy.uint8",
"os.path.join",
"matplotlib.pyplot.close",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.array",
"torch.no_grad",
"matplotlib.pyplot.subplots"
] | [((129, 150), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (143, 150), False, 'import matplotlib\n'), ((1154, 1185), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1162, 1185), True, 'import numpy as np\n'), ((1193, 1224), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1201, 1224), True, 'import numpy as np\n'), ((2204, 2219), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2217, 2219), False, 'import torch\n'), ((751, 765), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (763, 765), True, 'from matplotlib import pyplot as plt\n'), ((1097, 1120), 'matplotlib.pyplot.title', 'plt.title', (['self.metrics'], {}), '(self.metrics)\n', (1106, 1120), True, 'from matplotlib import pyplot as plt\n'), ((1130, 1141), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1139, 1141), True, 'from matplotlib import pyplot as plt\n'), ((2950, 2962), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2960, 2962), True, 'from matplotlib import pyplot as plt\n'), ((3754, 3772), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3770, 3772), True, 'from matplotlib import pyplot as plt\n'), ((4088, 4099), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4097, 4099), True, 'from matplotlib import pyplot as plt\n'), ((1040, 1084), 'os.path.join', 'os.path.join', (['self.save_dir', 'self.graph_name'], {}), '(self.save_dir, self.graph_name)\n', (1052, 1084), False, 'import os\n'), ((3113, 3144), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', 'self.vis_num', 'i'], {}), '(3, self.vis_num, i)\n', (3124, 3144), True, 'from matplotlib import pyplot as plt\n'), ((3158, 3195), 'matplotlib.pyplot.title', 'plt.title', (['"""Input Image"""'], {'fontsize': '(10)'}), "('Input Image', fontsize=10)\n", (3167, 3195), True, 'from matplotlib import pyplot as plt\n'), ((3210, 3225), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im1'], {}), '(im1)\n', (3220, 3225), True, 'from matplotlib import pyplot as plt\n'), ((3239, 3285), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', 'self.vis_num', '(i + self.vis_num)'], {}), '(3, self.vis_num, i + self.vis_num)\n', (3250, 3285), True, 'from matplotlib import pyplot as plt\n'), ((3508, 3535), 'matplotlib.pyplot.imshow', 'plt.imshow', (['tar'], {'cmap': '"""jet"""'}), "(tar, cmap='jet')\n", (3518, 3535), True, 'from matplotlib import pyplot as plt\n'), ((3549, 3599), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', 'self.vis_num', '(i + self.vis_num * 2)'], {}), '(3, self.vis_num, i + self.vis_num * 2)\n', (3560, 3599), True, 'from matplotlib import pyplot as plt\n'), ((3687, 3714), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im2'], {'cmap': '"""jet"""'}), "(im2, cmap='jet')\n", (3697, 3714), True, 'from matplotlib import pyplot as plt\n'), ((4023, 4077), 'os.path.join', 'os.path.join', (['self.save_dir', '"""images"""', 'output_img_name'], {}), "(self.save_dir, 'images', output_img_name)\n", (4035, 4077), False, 'import os\n'), ((3079, 3098), 'numpy.uint8', 'np.uint8', (['(im1 * 255)'], {}), '(im1 * 255)\n', (3087, 3098), True, 'import numpy as np\n'), ((3887, 3924), 'os.path.join', 'os.path.join', (['self.save_dir', '"""images"""'], {}), "(self.save_dir, 'images')\n", (3899, 3924), False, 'import os\n'), ((3949, 3986), 'os.path.join', 'os.path.join', (['self.save_dir', '"""images"""'], {}), "(self.save_dir, 'images')\n", (3961, 3986), False, 'import os\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import time
from functools import partial
import numpy as np
import tqdm
import pgl
import paddle
from pgl.utils.logger import log
from pgl.utils.data import Dataloader
from model import GraphSage
from dataset import ShardedDataset, batch_fn
def train(dataloader, model, feature, criterion, optim, log_per_step=100):
model.train()
batch = 0
total_loss = 0.
total_acc = 0.
total_sample = 0
for g, sample_index, index, label in dataloader:
batch += 1
num_samples = len(index)
g.tensor()
sample_index = paddle.to_tensor(sample_index)
index = paddle.to_tensor(index)
label = paddle.to_tensor(label)
feat = paddle.gather(feature, sample_index)
pred = model(g, feat)
pred = paddle.gather(pred, index)
loss = criterion(pred, label)
loss.backward()
acc = paddle.metric.accuracy(input=pred, label=label, k=1)
optim.step()
optim.clear_grad()
total_loss += loss.numpy() * num_samples
total_acc += acc.numpy() * num_samples
total_sample += num_samples
if batch % log_per_step == 0:
log.info("Batch %s %s-Loss %s %s-Acc %s" %
(batch, "train", loss.numpy(), "train", acc.numpy()))
return total_loss / total_sample, total_acc / total_sample
@paddle.no_grad()
def eval(dataloader, model, feature, criterion):
model.eval()
loss_all, acc_all = [], []
for g, sample_index, index, label in dataloader:
g.tensor()
sample_index = paddle.to_tensor(sample_index)
index = paddle.to_tensor(index)
label = paddle.to_tensor(label)
feat = paddle.gather(feature, sample_index)
pred = model(g, feat)
pred = paddle.gather(pred, index)
loss = criterion(pred, label)
acc = paddle.metric.accuracy(input=pred, label=label, k=1)
loss_all.append(loss.numpy())
acc_all.append(acc.numpy())
return np.mean(loss_all), np.mean(acc_all)
def main(args):
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
data = pgl.dataset.RedditDataset(args.normalize, args.symmetry)
log.info("Preprocess finish")
log.info("Train Examples: %s" % len(data.train_index))
log.info("Val Examples: %s" % len(data.val_index))
log.info("Test Examples: %s" % len(data.test_index))
log.info("Num nodes %s" % data.graph.num_nodes)
log.info("Num edges %s" % data.graph.num_edges)
log.info("Average Degree %s" % np.mean(data.graph.indegree()))
graph = data.graph
train_index = data.train_index
val_index = data.val_index
test_index = data.test_index
train_label = data.train_label
val_label = data.val_label
test_label = data.test_label
model = GraphSage(
input_size=data.feature.shape[-1],
num_class=data.num_classes,
hidden_size=args.hidden_size,
num_layers=len(args.samples))
model = paddle.DataParallel(model)
criterion = paddle.nn.loss.CrossEntropyLoss()
optim = paddle.optimizer.Adam(
learning_rate=args.lr,
parameters=model.parameters(),
weight_decay=0.001)
feature = paddle.to_tensor(data.feature)
train_ds = ShardedDataset(train_index, train_label)
val_ds = ShardedDataset(val_index, val_label)
test_ds = ShardedDataset(test_index, test_label)
collate_fn = partial(batch_fn, graph=graph, samples=args.samples)
train_loader = Dataloader(
train_ds,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.sample_workers,
collate_fn=collate_fn)
val_loader = Dataloader(
test_ds,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.sample_workers,
collate_fn=collate_fn)
test_loader = Dataloader(
test_ds,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.sample_workers,
collate_fn=collate_fn)
cal_val_acc = []
cal_test_acc = []
cal_val_loss = []
for epoch in tqdm.tqdm(range(args.epoch)):
train_loss, train_acc = train(train_loader, model, feature, criterion,
optim)
log.info("Runing epoch:%s\t train_loss:%s\t train_acc:%s", epoch,
train_loss, train_acc)
val_loss, val_acc = eval(val_loader, model, feature, criterion)
cal_val_acc.append(val_acc)
cal_val_loss.append(val_loss)
log.info("Runing epoch:%s\t val_loss:%s\t val_acc:%s", epoch, val_loss,
val_acc)
test_loss, test_acc = eval(test_loader, model, feature, criterion)
cal_test_acc.append(test_acc)
log.info("Runing epoch:%s\t test_loss:%s\t test_acc:%s", epoch,
test_loss, test_acc)
log.info("Runs %s: Model: %s Best Test Accuracy: %f" %
(0, "graphsage", cal_test_acc[np.argmax(cal_val_acc)]))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='graphsage')
parser.add_argument(
"--normalize", action='store_true', help="normalize features")
parser.add_argument(
"--symmetry", action='store_true', help="undirect graph")
parser.add_argument("--sample_workers", type=int, default=5)
parser.add_argument("--epoch", type=int, default=10)
parser.add_argument("--hidden_size", type=int, default=128)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument('--samples', nargs='+', type=int, default=[25, 10])
args = parser.parse_args()
log.info(args)
main(args)
| [
"functools.partial",
"paddle.distributed.get_world_size",
"pgl.utils.logger.log.info",
"argparse.ArgumentParser",
"paddle.nn.loss.CrossEntropyLoss",
"numpy.argmax",
"dataset.ShardedDataset",
"paddle.no_grad",
"paddle.metric.accuracy",
"paddle.distributed.init_parallel_env",
"pgl.utils.data.Datal... | [((1982, 1998), 'paddle.no_grad', 'paddle.no_grad', ([], {}), '()\n', (1996, 1998), False, 'import paddle\n'), ((2779, 2835), 'pgl.dataset.RedditDataset', 'pgl.dataset.RedditDataset', (['args.normalize', 'args.symmetry'], {}), '(args.normalize, args.symmetry)\n', (2804, 2835), False, 'import pgl\n'), ((2840, 2869), 'pgl.utils.logger.log.info', 'log.info', (['"""Preprocess finish"""'], {}), "('Preprocess finish')\n", (2848, 2869), False, 'from pgl.utils.logger import log\n'), ((3045, 3092), 'pgl.utils.logger.log.info', 'log.info', (["('Num nodes %s' % data.graph.num_nodes)"], {}), "('Num nodes %s' % data.graph.num_nodes)\n", (3053, 3092), False, 'from pgl.utils.logger import log\n'), ((3097, 3144), 'pgl.utils.logger.log.info', 'log.info', (["('Num edges %s' % data.graph.num_edges)"], {}), "('Num edges %s' % data.graph.num_edges)\n", (3105, 3144), False, 'from pgl.utils.logger import log\n'), ((3627, 3653), 'paddle.DataParallel', 'paddle.DataParallel', (['model'], {}), '(model)\n', (3646, 3653), False, 'import paddle\n'), ((3671, 3704), 'paddle.nn.loss.CrossEntropyLoss', 'paddle.nn.loss.CrossEntropyLoss', ([], {}), '()\n', (3702, 3704), False, 'import paddle\n'), ((3854, 3884), 'paddle.to_tensor', 'paddle.to_tensor', (['data.feature'], {}), '(data.feature)\n', (3870, 3884), False, 'import paddle\n'), ((3901, 3941), 'dataset.ShardedDataset', 'ShardedDataset', (['train_index', 'train_label'], {}), '(train_index, train_label)\n', (3915, 3941), False, 'from dataset import ShardedDataset, batch_fn\n'), ((3955, 3991), 'dataset.ShardedDataset', 'ShardedDataset', (['val_index', 'val_label'], {}), '(val_index, val_label)\n', (3969, 3991), False, 'from dataset import ShardedDataset, batch_fn\n'), ((4006, 4044), 'dataset.ShardedDataset', 'ShardedDataset', (['test_index', 'test_label'], {}), '(test_index, test_label)\n', (4020, 4044), False, 'from dataset import ShardedDataset, batch_fn\n'), ((4063, 4115), 'functools.partial', 'partial', (['batch_fn'], {'graph': 'graph', 'samples': 'args.samples'}), '(batch_fn, graph=graph, samples=args.samples)\n', (4070, 4115), False, 'from functools import partial\n'), ((4136, 4259), 'pgl.utils.data.Dataloader', 'Dataloader', (['train_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.sample_workers', 'collate_fn': 'collate_fn'}), '(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=\n args.sample_workers, collate_fn=collate_fn)\n', (4146, 4259), False, 'from pgl.utils.data import Dataloader\n'), ((4313, 4436), 'pgl.utils.data.Dataloader', 'Dataloader', (['test_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.sample_workers', 'collate_fn': 'collate_fn'}), '(test_ds, batch_size=args.batch_size, shuffle=False, num_workers=\n args.sample_workers, collate_fn=collate_fn)\n', (4323, 4436), False, 'from pgl.utils.data import Dataloader\n'), ((4491, 4614), 'pgl.utils.data.Dataloader', 'Dataloader', (['test_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.sample_workers', 'collate_fn': 'collate_fn'}), '(test_ds, batch_size=args.batch_size, shuffle=False, num_workers=\n args.sample_workers, collate_fn=collate_fn)\n', (4501, 4614), False, 'from pgl.utils.data import Dataloader\n'), ((5648, 5696), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""graphsage"""'}), "(description='graphsage')\n", (5671, 5696), False, 'import argparse\n'), ((6302, 6316), 'pgl.utils.logger.log.info', 'log.info', (['args'], {}), '(args)\n', (6310, 6316), False, 'from pgl.utils.logger import log\n'), ((1200, 1230), 'paddle.to_tensor', 'paddle.to_tensor', (['sample_index'], {}), '(sample_index)\n', (1216, 1230), False, 'import paddle\n'), ((1247, 1270), 'paddle.to_tensor', 'paddle.to_tensor', (['index'], {}), '(index)\n', (1263, 1270), False, 'import paddle\n'), ((1287, 1310), 'paddle.to_tensor', 'paddle.to_tensor', (['label'], {}), '(label)\n', (1303, 1310), False, 'import paddle\n'), ((1327, 1363), 'paddle.gather', 'paddle.gather', (['feature', 'sample_index'], {}), '(feature, sample_index)\n', (1340, 1363), False, 'import paddle\n'), ((1409, 1435), 'paddle.gather', 'paddle.gather', (['pred', 'index'], {}), '(pred, index)\n', (1422, 1435), False, 'import paddle\n'), ((1512, 1564), 'paddle.metric.accuracy', 'paddle.metric.accuracy', ([], {'input': 'pred', 'label': 'label', 'k': '(1)'}), '(input=pred, label=label, k=1)\n', (1534, 1564), False, 'import paddle\n'), ((2191, 2221), 'paddle.to_tensor', 'paddle.to_tensor', (['sample_index'], {}), '(sample_index)\n', (2207, 2221), False, 'import paddle\n'), ((2238, 2261), 'paddle.to_tensor', 'paddle.to_tensor', (['index'], {}), '(index)\n', (2254, 2261), False, 'import paddle\n'), ((2278, 2301), 'paddle.to_tensor', 'paddle.to_tensor', (['label'], {}), '(label)\n', (2294, 2301), False, 'import paddle\n'), ((2318, 2354), 'paddle.gather', 'paddle.gather', (['feature', 'sample_index'], {}), '(feature, sample_index)\n', (2331, 2354), False, 'import paddle\n'), ((2400, 2426), 'paddle.gather', 'paddle.gather', (['pred', 'index'], {}), '(pred, index)\n', (2413, 2426), False, 'import paddle\n'), ((2479, 2531), 'paddle.metric.accuracy', 'paddle.metric.accuracy', ([], {'input': 'pred', 'label': 'label', 'k': '(1)'}), '(input=pred, label=label, k=1)\n', (2501, 2531), False, 'import paddle\n'), ((2618, 2635), 'numpy.mean', 'np.mean', (['loss_all'], {}), '(loss_all)\n', (2625, 2635), True, 'import numpy as np\n'), ((2637, 2653), 'numpy.mean', 'np.mean', (['acc_all'], {}), '(acc_all)\n', (2644, 2653), True, 'import numpy as np\n'), ((2679, 2714), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (2712, 2714), False, 'import paddle\n'), ((2728, 2766), 'paddle.distributed.init_parallel_env', 'paddle.distributed.init_parallel_env', ([], {}), '()\n', (2764, 2766), False, 'import paddle\n'), ((4896, 4988), 'pgl.utils.logger.log.info', 'log.info', (['"""Runing epoch:%s\t train_loss:%s\t train_acc:%s"""', 'epoch', 'train_loss', 'train_acc'], {}), "('Runing epoch:%s\\t train_loss:%s\\t train_acc:%s', epoch,\n train_loss, train_acc)\n", (4904, 4988), False, 'from pgl.utils.logger import log\n'), ((5156, 5241), 'pgl.utils.logger.log.info', 'log.info', (['"""Runing epoch:%s\t val_loss:%s\t val_acc:%s"""', 'epoch', 'val_loss', 'val_acc'], {}), "('Runing epoch:%s\\t val_loss:%s\\t val_acc:%s', epoch, val_loss, val_acc\n )\n", (5164, 5241), False, 'from pgl.utils.logger import log\n'), ((5375, 5463), 'pgl.utils.logger.log.info', 'log.info', (['"""Runing epoch:%s\t test_loss:%s\t test_acc:%s"""', 'epoch', 'test_loss', 'test_acc'], {}), "('Runing epoch:%s\\t test_loss:%s\\t test_acc:%s', epoch, test_loss,\n test_acc)\n", (5383, 5463), False, 'from pgl.utils.logger import log\n'), ((5580, 5602), 'numpy.argmax', 'np.argmax', (['cal_val_acc'], {}), '(cal_val_acc)\n', (5589, 5602), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from preprocess import *
from driveModel import driveModel
from keras.models import load_model
# Constants
PROCESSED_FILE_NAME = "data_processed/processed.txt"
BATCH_SIZE = 256
preprocess = False # Flip this switch on to prepare dataset and save static copies to disk.
###### HELPER FUNCTIONS #######
# Import data from new dataset reference file.
def importData():
parsed_db = []
db_file = open(PROCESSED_FILE_NAME, 'r')
for line in db_file:
parsed_db.append(line.split())
db_file.close()
return parsed_db
# Generator for training data.
def training_generator():
global XY_train
while True:
samples = shuffle(XY_train)
for idx in range(0, len(samples), BATCH_SIZE):
batch = samples[idx:idx+BATCH_SIZE]
X_train_batch = []
Y_train_batch = []
for datapoint in batch:
steering_ang = float(datapoint[1])
try:
img = cv2.cvtColor(cv2.imread(datapoint[0]), cv2.COLOR_BGR2RGB)
X_train_batch.append(img)
Y_train_batch.append(steering_ang)
except:
pass
yield (np.array(X_train_batch), np.array(Y_train_batch))
# Generator for validation data.
def valid_generator():
global XY_valid
while True:
samples = shuffle(XY_valid)
for idx in range(0, len(samples), BATCH_SIZE):
batch = samples[idx:idx+BATCH_SIZE]
X_valid_batch = []
Y_valid_batch = []
for datapoint in batch:
steering_ang = float(datapoint[1])
try:
img = cv2.cvtColor(cv2.imread(datapoint[0]), cv2.COLOR_BGR2RGB)
X_valid_batch.append(img)
Y_valid_batch.append(steering_ang)
except:
pass
yield (np.array(X_valid_batch), np.array(Y_valid_batch))
###### MAIN SEQUENCE ######
# Can optionally redo preprocessing with a single bool switch:
if preprocess:
preprocessImgs()
# Import prepared dataset
train_db = importData()
# Split training and validation sets
XY_train, XY_valid = train_test_split(train_db, test_size=0.1)
# Trains model one epoch at a time, saving entire model to disk at each iteration.
for i in range(10):
print("epoch", i)
if (i==0):
initialized_model = driveModel()
initialized_model.save(str(i+1)+'model.h5')
continue
initialized_model = load_model(str(i) + 'model.h5')
history_object = initialized_model.fit_generator(generator=training_generator(),
steps_per_epoch=np.ceil(2*len(XY_train)/BATCH_SIZE),
epochs=1,
verbose = 1,
validation_data=valid_generator(),
validation_steps=np.ceil(len(XY_valid)/BATCH_SIZE))
initialized_model.save(str(i+1) + 'model.h5')
###### END OF MAIN SCRIPT #######
| [
"sklearn.model_selection.train_test_split",
"driveModel.driveModel",
"cv2.imread",
"numpy.array",
"sklearn.utils.shuffle"
] | [((2302, 2343), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_db'], {'test_size': '(0.1)'}), '(train_db, test_size=0.1)\n', (2318, 2343), False, 'from sklearn.model_selection import train_test_split\n'), ((771, 788), 'sklearn.utils.shuffle', 'shuffle', (['XY_train'], {}), '(XY_train)\n', (778, 788), False, 'from sklearn.utils import shuffle\n'), ((1473, 1490), 'sklearn.utils.shuffle', 'shuffle', (['XY_valid'], {}), '(XY_valid)\n', (1480, 1490), False, 'from sklearn.utils import shuffle\n'), ((2513, 2525), 'driveModel.driveModel', 'driveModel', ([], {}), '()\n', (2523, 2525), False, 'from driveModel import driveModel\n'), ((1312, 1335), 'numpy.array', 'np.array', (['X_train_batch'], {}), '(X_train_batch)\n', (1320, 1335), True, 'import numpy as np\n'), ((1337, 1360), 'numpy.array', 'np.array', (['Y_train_batch'], {}), '(Y_train_batch)\n', (1345, 1360), True, 'import numpy as np\n'), ((2014, 2037), 'numpy.array', 'np.array', (['X_valid_batch'], {}), '(X_valid_batch)\n', (2022, 2037), True, 'import numpy as np\n'), ((2039, 2062), 'numpy.array', 'np.array', (['Y_valid_batch'], {}), '(Y_valid_batch)\n', (2047, 2062), True, 'import numpy as np\n'), ((1101, 1125), 'cv2.imread', 'cv2.imread', (['datapoint[0]'], {}), '(datapoint[0])\n', (1111, 1125), False, 'import cv2\n'), ((1804, 1828), 'cv2.imread', 'cv2.imread', (['datapoint[0]'], {}), '(datapoint[0])\n', (1814, 1828), False, 'import cv2\n')] |
import numpy as np
import cv2
_useGaussian = True
_gaussianPixels = 21
def prepare_frame(frame):
"""
todo
"""
# frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if _useGaussian: gray = cv2.GaussianBlur(gray, (_gaussianPixels, _gaussianPixels), 0)
return frame, gray
def find_largest_contour(binary_threshold):
(img, contours, hierarchy) = cv2.findContours(
binary_threshold.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
largest_area = 0
largest_contour = contours[0] if len(contours) > 0 else None
for cnt in contours:
if len(cnt) < 6:
continue
if cv2.contourArea(cnt) < 2000:
continue
if cv2.contourArea(cnt) > largest_area:
largest_area = cv2.contourArea(cnt)
largest_contour = cnt
if largest_contour is not None:
largest_contour = largest_contour if len(largest_contour) > 6 else None
return largest_contour
def clean_frame_within_contour(src, contour):
"""
todo
"""
mask = np.zeros_like(src)
ellipse = cv2.fitEllipse(contour)
mask = cv2.ellipse(mask, ellipse, (255, 255, 255), -1)
src = np.bitwise_and(src, mask)
return src
def calculate_ellipse(contour):
ellipse = cv2.fitEllipse(contour)
return ellipse
'''' DRAWING FUNCTIONS '''
__font_face, __font_scale, = cv2.FONT_HERSHEY_SIMPLEX, 0.75
__font_color, __font_thickness = (255, 255, 255), 2
def draw_ellipse(frame, contour, is_fall=False):
ellipse = cv2.fitEllipse(contour)
color = (0, 0, 255) if is_fall is True else (0, 255, 0)
cv2.ellipse(frame, ellipse, color, 2)
return frame, ellipse
def draw_angles(vector_angles, delta_angle, src):
angle1, org1 = "primary PCA: " + str(int(vector_angles[0])), (10,30)
angle2, org2 = "secondary PCA: " + str(int(vector_angles[1])), (10,60)
angle3, org3 = "PCA delta: " + str(int(delta_angle)), (10,90)
cv2.putText(src, angle1, org1, __font_face, __font_scale, __font_color, __font_thickness)
cv2.putText(src, angle2, org2, __font_face, __font_scale, __font_color, __font_thickness)
cv2.putText(src, angle3, org3, __font_face, __font_scale, __font_color, __font_thickness)
return src
def draw_movement(movement, src):
text = "movement: " + str(movement)
org = (10,120)
cv2.putText(src, text, org, __font_face, __font_scale, __font_color, __font_thickness)
return src | [
"cv2.GaussianBlur",
"cv2.contourArea",
"numpy.zeros_like",
"cv2.putText",
"cv2.cvtColor",
"cv2.ellipse",
"cv2.fitEllipse",
"numpy.bitwise_and"
] | [((182, 221), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (194, 221), False, 'import cv2\n'), ((1089, 1107), 'numpy.zeros_like', 'np.zeros_like', (['src'], {}), '(src)\n', (1102, 1107), True, 'import numpy as np\n'), ((1122, 1145), 'cv2.fitEllipse', 'cv2.fitEllipse', (['contour'], {}), '(contour)\n', (1136, 1145), False, 'import cv2\n'), ((1157, 1204), 'cv2.ellipse', 'cv2.ellipse', (['mask', 'ellipse', '(255, 255, 255)', '(-1)'], {}), '(mask, ellipse, (255, 255, 255), -1)\n', (1168, 1204), False, 'import cv2\n'), ((1215, 1240), 'numpy.bitwise_and', 'np.bitwise_and', (['src', 'mask'], {}), '(src, mask)\n', (1229, 1240), True, 'import numpy as np\n'), ((1304, 1327), 'cv2.fitEllipse', 'cv2.fitEllipse', (['contour'], {}), '(contour)\n', (1318, 1327), False, 'import cv2\n'), ((1552, 1575), 'cv2.fitEllipse', 'cv2.fitEllipse', (['contour'], {}), '(contour)\n', (1566, 1575), False, 'import cv2\n'), ((1640, 1677), 'cv2.ellipse', 'cv2.ellipse', (['frame', 'ellipse', 'color', '(2)'], {}), '(frame, ellipse, color, 2)\n', (1651, 1677), False, 'import cv2\n'), ((1974, 2067), 'cv2.putText', 'cv2.putText', (['src', 'angle1', 'org1', '__font_face', '__font_scale', '__font_color', '__font_thickness'], {}), '(src, angle1, org1, __font_face, __font_scale, __font_color,\n __font_thickness)\n', (1985, 2067), False, 'import cv2\n'), ((2068, 2161), 'cv2.putText', 'cv2.putText', (['src', 'angle2', 'org2', '__font_face', '__font_scale', '__font_color', '__font_thickness'], {}), '(src, angle2, org2, __font_face, __font_scale, __font_color,\n __font_thickness)\n', (2079, 2161), False, 'import cv2\n'), ((2162, 2255), 'cv2.putText', 'cv2.putText', (['src', 'angle3', 'org3', '__font_face', '__font_scale', '__font_color', '__font_thickness'], {}), '(src, angle3, org3, __font_face, __font_scale, __font_color,\n __font_thickness)\n', (2173, 2255), False, 'import cv2\n'), ((2367, 2457), 'cv2.putText', 'cv2.putText', (['src', 'text', 'org', '__font_face', '__font_scale', '__font_color', '__font_thickness'], {}), '(src, text, org, __font_face, __font_scale, __font_color,\n __font_thickness)\n', (2378, 2457), False, 'import cv2\n'), ((250, 311), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(_gaussianPixels, _gaussianPixels)', '(0)'], {}), '(gray, (_gaussianPixels, _gaussianPixels), 0)\n', (266, 311), False, 'import cv2\n'), ((680, 700), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (695, 700), False, 'import cv2\n'), ((742, 762), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (757, 762), False, 'import cv2\n'), ((806, 826), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (821, 826), False, 'import cv2\n')] |
"""
Benchmark inference speed on ImageNet
Example (run on Firefly RK3399):
python mali_imagenet_bench.py --target-host 'llvm -target=aarch64-linux-gnu' --host 192.168.0.100 --port 9090 --model mobilenet
"""
import time
import argparse
import numpy as np
import tvm
import nnvm.compiler
import nnvm.testing
from tvm.contrib import util, rpc
from tvm.contrib import graph_runtime as runtime
def run_case(model, dtype):
# load model
if model == 'vgg16':
net, params = nnvm.testing.vgg.get_workload(num_layers=16,
batch_size=1, image_shape=image_shape, dtype=dtype)
elif model == 'resnet18':
net, params = nnvm.testing.resnet.get_workload(num_layers=18,
batch_size=1, image_shape=image_shape, dtype=dtype)
elif model == 'mobilenet':
net, params = nnvm.testing.mobilenet.get_workload(
batch_size=1, image_shape=image_shape, dtype=dtype)
else:
raise ValueError('no benchmark prepared for {}.'.format(model))
# compile
opt_level = 2 if dtype == 'float32' else 1
with nnvm.compiler.build_config(opt_level=opt_level):
graph, lib, params = nnvm.compiler.build(
net, tvm.target.mali(), shape={"data": data_shape}, params=params,
dtype=dtype, target_host=args.target_host)
# upload model to remote device
tmp = util.tempdir()
lib_fname = tmp.relpath('net.tar')
lib.export_library(lib_fname)
if args.host is not None:
remote = rpc.connect(args.host, args.port)
remote.upload(lib_fname)
ctx = remote.cl(0)
rlib = remote.load_module('net.tar')
rparams = {k: tvm.nd.array(v, ctx) for k, v in params.items()}
else:
ctx = tvm.cl(0)
rlib = lib
rparams = params
# create graph runtime
module = runtime.create(graph, rlib, ctx)
module.set_input('data', tvm.nd.array(np.random.uniform(size=(data_shape)).astype(dtype)))
module.set_input(**rparams)
# benchmark
# print("============================================================")
# print("model: %s, dtype: %s" % (model, dtype))
# the num of runs for warm up and test
num_warmup = 10
num_test = 60
if model == 'mobilenet': # mobilenet is fast, need more runs for stable measureament
num_warmup *= 5
num_test *= 5
# perform some warm up runs
# print("warm up..")
warm_up_timer = module.module.time_evaluator("run", ctx, num_warmup)
warm_up_timer()
# test
# print("test..")
ftimer = module.module.time_evaluator("run", ctx, num_test)
prof_res = ftimer()
# print("cost per image: %.4fs" % prof_res.mean)
print("backend: TVM-mali\tmodel: %s\tdtype: %s\tcost:%.4f" % (model, dtype, prof_res.mean))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True, choices=['vgg16', 'resnet18', 'mobilenet', 'all'],
help="The model type.")
parser.add_argument('--dtype', type=str, default='float32', choices=['float16', 'float32'])
parser.add_argument('--host', type=str, help="The host address of your arm device.", default=None)
parser.add_argument('--port', type=int, help="The port number of your arm device", default=None)
parser.add_argument('--target-host', type=str, help="The compilation target of host device.", default=None)
args = parser.parse_args()
# set parameter
batch_size = 1
num_classes = 1000
image_shape = (3, 224, 224)
# load model
data_shape = (batch_size,) + image_shape
out_shape = (batch_size, num_classes)
if args.model == 'all': # test all
for model in ['vgg16', 'resnet18', 'mobilenet']:
for dtype in ['float32', 'float16']:
run_case(model, dtype)
time.sleep(10)
else: # test single
run_case(args.model, args.dtype)
| [
"numpy.random.uniform",
"argparse.ArgumentParser",
"tvm.contrib.rpc.connect",
"tvm.target.mali",
"tvm.nd.array",
"tvm.contrib.util.tempdir",
"time.sleep",
"tvm.contrib.graph_runtime.create",
"tvm.cl"
] | [((1343, 1357), 'tvm.contrib.util.tempdir', 'util.tempdir', ([], {}), '()\n', (1355, 1357), False, 'from tvm.contrib import util, rpc\n'), ((1809, 1841), 'tvm.contrib.graph_runtime.create', 'runtime.create', (['graph', 'rlib', 'ctx'], {}), '(graph, rlib, ctx)\n', (1823, 1841), True, 'from tvm.contrib import graph_runtime as runtime\n'), ((2800, 2825), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2823, 2825), False, 'import argparse\n'), ((1479, 1512), 'tvm.contrib.rpc.connect', 'rpc.connect', (['args.host', 'args.port'], {}), '(args.host, args.port)\n', (1490, 1512), False, 'from tvm.contrib import util, rpc\n'), ((1714, 1723), 'tvm.cl', 'tvm.cl', (['(0)'], {}), '(0)\n', (1720, 1723), False, 'import tvm\n'), ((1179, 1196), 'tvm.target.mali', 'tvm.target.mali', ([], {}), '()\n', (1194, 1196), False, 'import tvm\n'), ((1641, 1661), 'tvm.nd.array', 'tvm.nd.array', (['v', 'ctx'], {}), '(v, ctx)\n', (1653, 1661), False, 'import tvm\n'), ((3829, 3843), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3839, 3843), False, 'import time\n'), ((1884, 1918), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'data_shape'}), '(size=data_shape)\n', (1901, 1918), True, 'import numpy as np\n')] |
import datetime
import os
import time
import unittest
import numpy
import cf
class AuxiliaryCoordinateTest(unittest.TestCase):
def setUp(self):
self.filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test_file.nc')
aux1 = cf.AuxiliaryCoordinate()
aux1.standard_name = 'latitude'
a = numpy.array([-30, -23.5, -17.8123, -11.3345, -0.7, -0.2, 0, 0.2, 0.7, 11.30003, 17.8678678, 23.5, 30])
aux1.set_data(cf.Data(a, 'degrees_north'))
bounds = cf.Bounds()
b = numpy.empty(a.shape + (2,))
b[:, 0] = a - 0.1
b[:, 1] = a + 0.1
bounds.set_data(cf.Data(b))
aux1.set_bounds(bounds)
self.aux1 = aux1
def test_AuxiliaryCoordinate_mask_invalid(self):
a = self.aux1.copy()
_ = a.mask_invalid()
self.assertTrue(a.mask_invalid(inplace=True) is None)
a.del_bounds()
_ = a.mask_invalid()
self.assertTrue(a.mask_invalid(inplace=True) is None)
def test_AuxiliaryCoordinate_chunk(self):
a = self.aux1.copy()
a.chunk()
def test_AuxiliaryCoordinate__repr__str__dump(self):
f = cf.read(self.filename)[0]
x = f.auxiliary_coordinates('latitude').value()
_ = repr(x)
_ = str(x)
_ = x.dump(display=False)
self.assertTrue(x.isauxiliary)
def test_AuxiliaryCoordinate_bounds(self):
f = cf.read(self.filename)[0]
d = f.dimension_coordinates('X').value()
x = cf.AuxiliaryCoordinate(source=d)
_ = x.upper_bounds
_ = x.lower_bounds
def test_AuxiliaryCoordinate_properties(self):
f = cf.read(self.filename)[0]
x = f.auxiliary_coordinates('latitude').value()
x.positive = 'up'
self.assertTrue(x.positive == 'up')
del x.positive
self.assertTrue(getattr(x, 'positive', None) is None)
x.axis = 'Z'
self.assertTrue(x.axis == 'Z')
del x.axis
self.assertTrue(getattr(x, 'axis', None) is None)
# x.axis = 'T'
# self.assertTrue(x.ndim == 2)
# self.assertTrue(x.T)
# self.assertTrue(x.ctype == 'T')
d = f.dimension_coordinates('X').value()
x = cf.AuxiliaryCoordinate(source=d)
# x.axis = 'T'
# self.assertTrue(x.ndim == 1)
# self.assertTrue(x.T)
def test_AuxiliaryCoordinate_insert_dimension(self):
f = cf.read(self.filename)[0]
d = f.dimension_coordinates('X').value()
x = cf.AuxiliaryCoordinate(source=d)
self.assertTrue(x.shape == (9,))
self.assertTrue(x.bounds.shape == (9, 2))
y = x.insert_dimension(0)
self.assertTrue(y.shape == (1, 9))
self.assertTrue(y.bounds.shape == (1, 9, 2), y.bounds.shape)
x.insert_dimension(-1, inplace=True)
self.assertTrue(x.shape == (9, 1))
self.assertTrue(x.bounds.shape == (9, 1, 2), x.bounds.shape)
def test_AuxiliaryCoordinate_transpose(self):
f = cf.read(self.filename)[0]
x = f.auxiliary_coordinates('longitude').value()
bounds = cf.Bounds(data=cf.Data(numpy.arange(9*10*4).reshape(9, 10, 4)))
x.set_bounds(bounds)
self.assertTrue(x.shape == (9, 10))
self.assertTrue(x.bounds.shape == (9, 10, 4))
y = x.transpose()
self.assertTrue(y.shape == (10, 9))
self.assertTrue(y.bounds.shape == (10, 9, 4), y.bounds.shape)
x.transpose([1, 0], inplace=True)
self.assertTrue(x.shape == (10, 9))
self.assertTrue(x.bounds.shape == (10, 9, 4), x.bounds.shape)
def test_AuxiliaryCoordinate_squeeze(self):
f = cf.read(self.filename)[0]
x = f.auxiliary_coordinates('longitude').value()
bounds = cf.Bounds(data=cf.Data(numpy.arange(9*10*4).reshape(9, 10, 4)))
x.set_bounds(bounds)
x.insert_dimension(1, inplace=True)
x.insert_dimension(0, inplace=True)
self.assertTrue(x.shape == (1, 9, 1, 10))
self.assertTrue(x.bounds.shape == (1, 9, 1, 10, 4))
y = x.squeeze()
self.assertTrue(y.shape == (9, 10))
self.assertTrue(y.bounds.shape == (9, 10, 4), y.bounds.shape)
x.squeeze(2, inplace=True)
self.assertTrue(x.shape == (1, 9, 10))
self.assertTrue(x.bounds.shape == (1, 9, 10, 4), x.bounds.shape)
def test_AuxiliaryCoordinate_floor(self):
aux = self.aux1.copy()
a = aux.array
b = aux.bounds.array
self.assertTrue((aux.floor().array == numpy.floor(a)).all())
self.assertTrue((aux.floor().bounds.array == numpy.floor(b)).all())
self.assertTrue((aux.floor(bounds=False).array == numpy.floor(a)).all())
self.assertTrue((aux.floor(bounds=False).bounds.array == b).all())
aux.del_bounds()
self.assertTrue((aux.floor().array == numpy.floor(a)).all())
self.assertTrue((aux.floor(bounds=False).array == numpy.floor(a)).all())
self.assertTrue(aux.floor(inplace=True) is None)
self.assertTrue((aux.array == numpy.floor(a)).all())
def test_AuxiliaryCoordinate_ceil(self):
aux = self.aux1.copy()
a = aux.array
b = aux.bounds.array
self.assertTrue((aux.ceil().array == numpy.ceil(a)).all())
self.assertTrue((aux.ceil().bounds.array == numpy.ceil(b)).all())
self.assertTrue((aux.ceil(bounds=False).array == numpy.ceil(a)).all())
self.assertTrue((aux.ceil(bounds=False).bounds.array == b).all())
aux.del_bounds()
self.assertTrue((aux.ceil().array == numpy.ceil(a)).all())
self.assertTrue((aux.ceil(bounds=False).array == numpy.ceil(a)).all())
self.assertTrue(aux.ceil(inplace=True) is None)
self.assertTrue((aux.array == numpy.ceil(a)).all())
def test_AuxiliaryCoordinate_trunc(self):
aux = self.aux1.copy()
a = aux.array
b = aux.bounds.array
self.assertTrue((aux.trunc().array == numpy.trunc(a)).all())
self.assertTrue((aux.trunc().bounds.array == numpy.trunc(b)).all())
self.assertTrue((aux.trunc(bounds=False).array == numpy.trunc(a)).all())
self.assertTrue((aux.trunc(bounds=False).bounds.array == b).all())
aux.del_bounds()
self.assertTrue((aux.trunc().array == numpy.trunc(a)).all())
self.assertTrue((aux.trunc(bounds=False).array == numpy.trunc(a)).all())
self.assertTrue(aux.trunc(inplace=True) is None)
self.assertTrue((aux.array == numpy.trunc(a)).all())
def test_AuxiliaryCoordinate_rint(self):
aux = self.aux1.copy()
a = aux.array
b = aux.bounds.array
x0 = aux.rint()
x = x0.array
self.assertTrue((x == numpy.rint(a)).all(), x)
self.assertTrue((aux.rint().bounds.array == numpy.rint(b)).all())
self.assertTrue((aux.rint(bounds=False).array == numpy.rint(a)).all())
self.assertTrue((aux.rint(bounds=False).bounds.array == b).all())
aux.del_bounds()
self.assertTrue((aux.rint().array == numpy.rint(a)).all())
self.assertTrue((aux.rint(bounds=False).array == numpy.rint(a)).all())
self.assertTrue(aux.rint(inplace=True) is None)
self.assertTrue((aux.array == numpy.rint(a)).all())
def test_AuxiliaryCoordinate_close(self):
aux = self.aux1.copy()
aux.close()
def test_AuxiliaryCoordinate_sin_cos_tan(self):
aux = self.aux1.copy()
_ = aux.cos()
self.assertTrue(aux.cos(inplace=True) is None)
_ = aux.sin()
self.assertTrue(aux.sin(inplace=True) is None)
_ = aux.tan()
self.assertTrue(aux.tan(inplace=True) is None)
def test_AuxiliaryCoordinate_log_exp(self):
aux = self.aux1.copy()
_ = aux.exp()
self.assertTrue(aux.exp(inplace=True) is None)
_ = aux.log()
self.assertTrue(aux.log(inplace=True) is None)
def test_AuxiliaryCoordinate_count(self):
aux = self.aux1.copy()
_ = aux.count()
aux.del_data()
with self.assertRaises(Exception):
aux.count()
def test_AuxiliaryCoordinate_cyclic(self):
aux = self.aux1.copy()
self.assertTrue(aux.cyclic() == set())
self.assertTrue(aux.cyclic(0) == set())
self.assertTrue(aux.cyclic() == set([0]))
def test_AuxiliaryCoordinate_roll(self):
aux = self.aux1.copy()
_ = aux.roll(0, 3)
self.assertTrue(aux.roll(-1, 4, inplace=True) is None)
def test_AuxiliaryCoordinate_round(self):
aux = self.aux1.copy()
a = aux.array
b = aux.bounds.array
for decimals in (0, 1, 2, 3, 4, 5):
aux = self.aux1.copy()
self.assertTrue((aux.round(decimals).array == numpy.round(a, decimals)).all())
self.assertTrue((aux.round(decimals).bounds.array == numpy.round(b, decimals)).all())
self.assertTrue((aux.round(decimals, bounds=False).array == numpy.round(a, decimals)).all())
self.assertTrue((aux.round(decimals, bounds=False).bounds.array == b).all())
aux.del_bounds()
self.assertTrue((aux.round(decimals).array == numpy.round(a, decimals)).all())
self.assertTrue((aux.round(decimals, bounds=False).array == numpy.round(a, decimals)).all())
self.assertTrue(aux.round(decimals, inplace=True) is None)
self.assertTrue((aux.array == numpy.round(a, decimals)).all())
def test_AuxiliaryCoordinate_clip(self):
aux = self.aux1.copy()
a = aux.array
b = aux.bounds.array
self.assertTrue((aux.clip(-15, 25).array == numpy.clip(a, -15, 25)).all())
self.assertTrue((aux.clip(-15, 25).bounds.array == numpy.clip(b, -15, 25)).all())
self.assertTrue((aux.clip(-15, 25, bounds=False).array == numpy.clip(a, -15, 25)).all())
self.assertTrue((aux.clip(-15, 25, bounds=False).bounds.array == b).all())
aux.del_bounds()
self.assertTrue((aux.clip(-15, 25).array == numpy.clip(a, -15, 25)).all())
self.assertTrue((aux.clip(-15, 25, bounds=False).array == numpy.clip(a, -15, 25)).all())
self.assertTrue(aux.clip(-15, 25, inplace=True) is None)
#--- End: class
if __name__ == "__main__":
print('Run date:', datetime.datetime.now())
cf.environment()
print()
unittest.main(verbosity=2)
| [
"cf.environment",
"unittest.main",
"os.path.abspath",
"numpy.trunc",
"numpy.ceil",
"numpy.empty",
"numpy.floor",
"cf.read",
"numpy.clip",
"cf.Data",
"numpy.rint",
"cf.Bounds",
"numpy.array",
"numpy.arange",
"cf.AuxiliaryCoordinate",
"numpy.round",
"datetime.datetime.now"
] | [((10778, 10794), 'cf.environment', 'cf.environment', ([], {}), '()\n', (10792, 10794), False, 'import cf\n'), ((10811, 10837), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (10824, 10837), False, 'import unittest\n'), ((301, 325), 'cf.AuxiliaryCoordinate', 'cf.AuxiliaryCoordinate', ([], {}), '()\n', (323, 325), False, 'import cf\n'), ((378, 485), 'numpy.array', 'numpy.array', (['[-30, -23.5, -17.8123, -11.3345, -0.7, -0.2, 0, 0.2, 0.7, 11.30003, \n 17.8678678, 23.5, 30]'], {}), '([-30, -23.5, -17.8123, -11.3345, -0.7, -0.2, 0, 0.2, 0.7, \n 11.30003, 17.8678678, 23.5, 30])\n', (389, 485), False, 'import numpy\n'), ((549, 560), 'cf.Bounds', 'cf.Bounds', ([], {}), '()\n', (558, 560), False, 'import cf\n'), ((573, 600), 'numpy.empty', 'numpy.empty', (['(a.shape + (2,))'], {}), '(a.shape + (2,))\n', (584, 600), False, 'import numpy\n'), ((1564, 1596), 'cf.AuxiliaryCoordinate', 'cf.AuxiliaryCoordinate', ([], {'source': 'd'}), '(source=d)\n', (1586, 1596), False, 'import cf\n'), ((2328, 2360), 'cf.AuxiliaryCoordinate', 'cf.AuxiliaryCoordinate', ([], {'source': 'd'}), '(source=d)\n', (2350, 2360), False, 'import cf\n'), ((2613, 2645), 'cf.AuxiliaryCoordinate', 'cf.AuxiliaryCoordinate', ([], {'source': 'd'}), '(source=d)\n', (2635, 2645), False, 'import cf\n'), ((10749, 10772), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10770, 10772), False, 'import datetime\n'), ((503, 530), 'cf.Data', 'cf.Data', (['a', '"""degrees_north"""'], {}), "(a, 'degrees_north')\n", (510, 530), False, 'import cf\n'), ((677, 687), 'cf.Data', 'cf.Data', (['b'], {}), '(b)\n', (684, 687), False, 'import cf\n'), ((1212, 1234), 'cf.read', 'cf.read', (['self.filename'], {}), '(self.filename)\n', (1219, 1234), False, 'import cf\n'), ((1477, 1499), 'cf.read', 'cf.read', (['self.filename'], {}), '(self.filename)\n', (1484, 1499), False, 'import cf\n'), ((1733, 1755), 'cf.read', 'cf.read', (['self.filename'], {}), '(self.filename)\n', (1740, 1755), False, 'import cf\n'), ((2526, 2548), 'cf.read', 'cf.read', (['self.filename'], {}), '(self.filename)\n', (2533, 2548), False, 'import cf\n'), ((3143, 3165), 'cf.read', 'cf.read', (['self.filename'], {}), '(self.filename)\n', (3150, 3165), False, 'import cf\n'), ((3850, 3872), 'cf.read', 'cf.read', (['self.filename'], {}), '(self.filename)\n', (3857, 3872), False, 'import cf\n'), ((205, 230), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (220, 230), False, 'import os\n'), ((4778, 4792), 'numpy.floor', 'numpy.floor', (['a'], {}), '(a)\n', (4789, 4792), False, 'import numpy\n'), ((4854, 4868), 'numpy.floor', 'numpy.floor', (['b'], {}), '(b)\n', (4865, 4868), False, 'import numpy\n'), ((4935, 4949), 'numpy.floor', 'numpy.floor', (['a'], {}), '(a)\n', (4946, 4949), False, 'import numpy\n'), ((5105, 5119), 'numpy.floor', 'numpy.floor', (['a'], {}), '(a)\n', (5116, 5119), False, 'import numpy\n'), ((5186, 5200), 'numpy.floor', 'numpy.floor', (['a'], {}), '(a)\n', (5197, 5200), False, 'import numpy\n'), ((5305, 5319), 'numpy.floor', 'numpy.floor', (['a'], {}), '(a)\n', (5316, 5319), False, 'import numpy\n'), ((5532, 5545), 'numpy.ceil', 'numpy.ceil', (['a'], {}), '(a)\n', (5542, 5545), False, 'import numpy\n'), ((5606, 5619), 'numpy.ceil', 'numpy.ceil', (['b'], {}), '(b)\n', (5616, 5619), False, 'import numpy\n'), ((5685, 5698), 'numpy.ceil', 'numpy.ceil', (['a'], {}), '(a)\n', (5695, 5698), False, 'import numpy\n'), ((5852, 5865), 'numpy.ceil', 'numpy.ceil', (['a'], {}), '(a)\n', (5862, 5865), False, 'import numpy\n'), ((5931, 5944), 'numpy.ceil', 'numpy.ceil', (['a'], {}), '(a)\n', (5941, 5944), False, 'import numpy\n'), ((6048, 6061), 'numpy.ceil', 'numpy.ceil', (['a'], {}), '(a)\n', (6058, 6061), False, 'import numpy\n'), ((6268, 6282), 'numpy.trunc', 'numpy.trunc', (['a'], {}), '(a)\n', (6279, 6282), False, 'import numpy\n'), ((6344, 6358), 'numpy.trunc', 'numpy.trunc', (['b'], {}), '(b)\n', (6355, 6358), False, 'import numpy\n'), ((6425, 6439), 'numpy.trunc', 'numpy.trunc', (['a'], {}), '(a)\n', (6436, 6439), False, 'import numpy\n'), ((6595, 6609), 'numpy.trunc', 'numpy.trunc', (['a'], {}), '(a)\n', (6606, 6609), False, 'import numpy\n'), ((6676, 6690), 'numpy.trunc', 'numpy.trunc', (['a'], {}), '(a)\n', (6687, 6690), False, 'import numpy\n'), ((6795, 6809), 'numpy.trunc', 'numpy.trunc', (['a'], {}), '(a)\n', (6806, 6809), False, 'import numpy\n'), ((7053, 7066), 'numpy.rint', 'numpy.rint', (['a'], {}), '(a)\n', (7063, 7066), False, 'import numpy\n'), ((7130, 7143), 'numpy.rint', 'numpy.rint', (['b'], {}), '(b)\n', (7140, 7143), False, 'import numpy\n'), ((7209, 7222), 'numpy.rint', 'numpy.rint', (['a'], {}), '(a)\n', (7219, 7222), False, 'import numpy\n'), ((7376, 7389), 'numpy.rint', 'numpy.rint', (['a'], {}), '(a)\n', (7386, 7389), False, 'import numpy\n'), ((7455, 7468), 'numpy.rint', 'numpy.rint', (['a'], {}), '(a)\n', (7465, 7468), False, 'import numpy\n'), ((7572, 7585), 'numpy.rint', 'numpy.rint', (['a'], {}), '(a)\n', (7582, 7585), False, 'import numpy\n'), ((10099, 10121), 'numpy.clip', 'numpy.clip', (['a', '(-15)', '(25)'], {}), '(a, -15, 25)\n', (10109, 10121), False, 'import numpy\n'), ((10189, 10211), 'numpy.clip', 'numpy.clip', (['b', '(-15)', '(25)'], {}), '(b, -15, 25)\n', (10199, 10211), False, 'import numpy\n'), ((10286, 10308), 'numpy.clip', 'numpy.clip', (['a', '(-15)', '(25)'], {}), '(a, -15, 25)\n', (10296, 10308), False, 'import numpy\n'), ((10478, 10500), 'numpy.clip', 'numpy.clip', (['a', '(-15)', '(25)'], {}), '(a, -15, 25)\n', (10488, 10500), False, 'import numpy\n'), ((10575, 10597), 'numpy.clip', 'numpy.clip', (['a', '(-15)', '(25)'], {}), '(a, -15, 25)\n', (10585, 10597), False, 'import numpy\n'), ((3275, 3299), 'numpy.arange', 'numpy.arange', (['(9 * 10 * 4)'], {}), '(9 * 10 * 4)\n', (3287, 3299), False, 'import numpy\n'), ((3982, 4006), 'numpy.arange', 'numpy.arange', (['(9 * 10 * 4)'], {}), '(9 * 10 * 4)\n', (3994, 4006), False, 'import numpy\n'), ((9181, 9205), 'numpy.round', 'numpy.round', (['a', 'decimals'], {}), '(a, decimals)\n', (9192, 9205), False, 'import numpy\n'), ((9279, 9303), 'numpy.round', 'numpy.round', (['b', 'decimals'], {}), '(b, decimals)\n', (9290, 9303), False, 'import numpy\n'), ((9384, 9408), 'numpy.round', 'numpy.round', (['a', 'decimals'], {}), '(a, decimals)\n', (9395, 9408), False, 'import numpy\n'), ((9606, 9630), 'numpy.round', 'numpy.round', (['a', 'decimals'], {}), '(a, decimals)\n', (9617, 9630), False, 'import numpy\n'), ((9711, 9735), 'numpy.round', 'numpy.round', (['a', 'decimals'], {}), '(a, decimals)\n', (9722, 9735), False, 'import numpy\n'), ((9870, 9894), 'numpy.round', 'numpy.round', (['a', 'decimals'], {}), '(a, decimals)\n', (9881, 9894), False, 'import numpy\n')] |
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 <NAME>, <NAME>, <NAME>,
# <NAME>. All rights reserved.
# Copyright (C) 2011-2014 <NAME>
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import casadi as c
import numpy
from numpy import random, array, linalg, matrix, zeros, ones
import unittest
from types import *
from helpers import *
from casadi import *
scipy_available = True
try:
from scipy.sparse import csr_matrix
except:
scipy_available = False
class SXtests(casadiTestCase):
def setUp(self):
self.pool=FunctionPool()
self.pool.append(lambda x: sqrt(x[0]),sqrt,"sqrt")
self.pool.append(lambda x: sin(x[0]),sin,"sin")
self.pool.append(lambda x: cos(x[0]),cos,"cos")
self.pool.append(lambda x: tan(x[0]),tan,"tan")
self.pool.append(lambda x: fabs(x[0]),fabs,"fabs")
self.pool.append(lambda x: sign(x[0]),sign,"sign")
self.pool.append(lambda x: arctan(x[0]),arctan,"arctan")
self.pool.append(lambda x: arcsin(x[0]),arcsin,"arcsin")
self.pool.append(lambda x: arccos(x[0]),arccos,"arccos")
self.pool.append(lambda x: exp(x[0]),exp,"exp")
self.pool.append(lambda x: log(x[0]),log,"log")
self.pool.append(lambda x: x[0]**0,lambda x : x**0,"x^0",flags={'nozero'})
self.pool.append(lambda x: x[0]**1,lambda x : x**1,"^1")
self.pool.append(lambda x: x[0]**(-2),lambda x : x**(-2),"^-2",flags={'nozero'})
self.pool.append(lambda x: x[0]**(0.3),lambda x : x**(0.3),"^0.3")
self.pool.append(lambda x: floor(x[0]),floor,"floor")
self.pool.append(lambda x: ceil(x[0]),ceil,"ceil")
self.Jpool=FunctionPool()
self.Jpool.append(lambda x: sqrt(x[0]),lambda x:diag(1/(2.0*sqrt(x))),"sqrt")
self.Jpool.append(lambda x: sin(x[0]),lambda x:diag(cos(x)),"sin")
self.Jpool.append(lambda x: fabs(x[0]),lambda x:diag(sign(x)),"fabs")
self.Jpool.append(lambda x: sign(x[0]),lambda x:diag(x*0),"fabs")
self.Jpool.append(lambda x: cos(x[0]),lambda x:diag(-sin(x)),"cos")
self.Jpool.append(lambda x: tan(x[0]),lambda x:diag(1.0/cos(x)**2),"tan")
self.Jpool.append(lambda x: arctan(x[0]),lambda x:diag( 1.0/(x**2+1)),"arctan")
self.Jpool.append(lambda x: arcsin(x[0]),lambda x:diag( 1.0/sqrt(1-x**2)),"arcsin")
self.Jpool.append(lambda x: arccos(x[0]),lambda x: diag(-1.0/sqrt(1-x**2)),"arccos")
self.Jpool.append(lambda x: exp(x[0]),lambda x: diag(exp(x)),"exp")
self.Jpool.append(lambda x: log(x[0]),lambda x: diag(1.0/x),"log")
self.Jpool.append(lambda x: x[0]**0,lambda x :diag(zeros(x.shape)),"x^0")
self.Jpool.append(lambda x: x[0]**1,lambda x : diag(ones(x.shape)),"^1")
self.Jpool.append(lambda x: x[0]**(-2),lambda x : diag(-2.0/x**3),"^-2")
self.Jpool.append(lambda x: x[0]**(0.3),lambda x :diag( 0.3/x**0.7),"^0.3")
self.matrixpool=FunctionPool()
self.matrixpool.append(lambda x: norm_2(x[0]),linalg.norm,"norm_2")
self.matrixbinarypool=FunctionPool()
self.matrixbinarypool.append(lambda a: a[0]+a[1],lambda a: a[0]+a[1],"Matrix+Matrix")
self.matrixbinarypool.append(lambda a: a[0]-a[1],lambda a: a[0]-a[1],"Matrix-Matrix")
self.matrixbinarypool.append(lambda a: a[0]*a[1],lambda a: a[0]*a[1],"Matrix*Matrix")
self.matrixbinarypool.append(lambda a: fmax(a[0],a[1]),lambda a: fmax(a[0],a[1]),"fmin")
self.matrixbinarypool.append(lambda a: fmin(a[0],a[1]),lambda a: fmin(a[0],a[1]),"fmax")
#self.matrixbinarypool.append(lambda a: dot(a[0],trans(a[1])),lambda a: dot(a[0].T,a[1]),name="dot(Matrix,Matrix)")
self.matrixbinarypool.append(lambda a: mtimes(a[0],a[1].T),lambda a: np.dot(a[0],a[1].T),"dot(Matrix,Matrix.T)")
#self.pool.append(lambda x: erf(x[0]),erf,"erf") # numpy has no erf
def test_scalarSX(self):
x=SX.sym("x")
x0=0.738
self.numpyEvaluationCheckPool(self.pool,[x],x0,name="scalarSX")
def test_gradient(self):
self.message("jacobian of SX**number")
x=SX.sym("x");
x0=1;
p=3 # increase to 20 to showcase ticket #56
y=x**p;
dx=jacobian(y,x);
dxr=p;
self.evaluationCheck([dx],dxr,[x],x0,name="jacobian");
dxr=1
for i in list(range(p)):
y=jacobian(y,x)
dxr=dxr*(p-i)
self.evaluationCheck([y],dxr,[x],x0,name="recursive jacobian");
def test_gradient2(self):
self.message("jacobian of SX**SX")
x=SX.sym("x");
p=SX.sym("p");
x0=1;
p0=3 # increase to 20 to showcase ticket #56
y=x**p;
dx=jacobian(y,x);
#print dx
dxr=p0;
self.evaluationCheck([dx],dxr,[x,p],[x0,p0],name="jacobian");
dxr=1
for i in list(range(p0)):
y=jacobian(y,x)
dxr=dxr*(p0-i)
self.evaluationCheck([y],dxr,[x,p],[x0,p0],name="jacobian");
def test_SXJacobian(self):
self.message("SX(1,1) unary operation, jacobian")
x=SX.sym("x")
x0=array([[0.738]])
def fmod(f,x):
J=f.jacobian_old(0, 0)
return J
self.numpyEvaluationCheckPool(self.Jpool,[x],x0,name="SX unary operations, jacobian",fmod=fmod)
def test_SXJac(self):
self.message("SX(1,1) unary operation, jac")
x=SX.sym("x")
x0=array([[0.738]])
def fmod(f,x):
y = f.call(x)
J = Function('J', x, [jacobian(y[0],x[0])])
return J
self.numpyEvaluationCheckPool(self.Jpool,[x],x0,name="SX unary operations, jac",fmod=fmod)
def test_SXJacobians(self):
self.message("SX(3,1) unary operation, jacobian")
x=SX.sym("x",3)
x0=array([0.738,0.9,0.3])
def fmod(f,x):
J=f.jacobian_old(0, 0)
return J
self.numpyEvaluationCheckPool(self.Jpool,[x],x0,name="SX unary operations, jacobian",fmod=fmod)
def test_SXJacobians2(self):
self.message("SX(1,3) unary operation, jacobian")
x=SX.sym("x",1,3)
x0=array([0.738,0.9,0.3])
def fmod(f,x):
J=f.jacobian_old(0, 0)
return J
self.numpyEvaluationCheckPool(self.Jpool,[x],x0,name="SX unary operations, jacobian",fmod=fmod)
def test_SX(self):
self.message("SX unary operations")
x=SX.sym("x",3,2)
x0=array([[0.738,0.2],[ 0.1,0.39 ],[0.99,0.999999]])
self.numpyEvaluationCheckPool(self.pool,[x],x0,name="SX")
x=SX.sym("x",3,3)
x0=array([[0.738,0.2,0.3],[ 0.1,0.39,-6 ],[0.99,0.999999,-12]])
#self.numpyEvaluationCheck(lambda x: c.det(x[0]), lambda x: linalg.det(x),[x],x0,name="det(SX)")
self.numpyEvaluationCheck(lambda x: SX(c.det(x[0])), lambda x: linalg.det(x),[x],x0,name="det(SX)")
self.numpyEvaluationCheck(lambda x: c.inv(x[0]), lambda x: linalg.inv(x),[x],x0,name="inv(SX)")
def test_SXSparse(self):
self.message("SX unary operations, sparse")
x=SX.sym("x")
y=SX.sym("y")
z=SX.sym("z")
x=SX(Sparsity(4,3,[0,2,2,3],[1,2,1]),vertcat(*[x,y,z]))
if scipy_available:
x0=DM(Sparsity(4,3,[0,2,2,3],[1,2,1]),[0.738,0.1,0.99]).sparse()
self.numpyEvaluationCheckPool(self.pool,[x],array(x0.todense()),name="SX",setx0=x0,excludeflags={'nozero'})
else:
x0=DM(Sparsity(4,3,[0,2,2,3],[1,2,1]),[0.738,0.1,0.99]).full()
self.numpyEvaluationCheckPool(self.pool,[x],x0,name="SX",setx0=x0,excludeflags={'nozero'})
def test_SXbinary(self):
self.message("SX binary operations")
x=SX.sym("x",3,2)
y=SX.sym("x",3,2)
x0=array([[0.738,0.2],[ 0.1,0.39 ],[0.99,0.999999]])
y0=array([[1.738,0.6],[ 0.7,12 ],[0,-6]])
self.numpyEvaluationCheckPool(self.matrixbinarypool,[x,y],[x0,y0],name="SX")
self.assertRaises(RuntimeError, lambda : mtimes(x,y))
def test_DMbinary(self):
self.message("SX binary operations")
x=SX.sym("x",3,2)
y=SX.sym("x",3,2)
x0=array([[0.738,0.2],[ 0.1,0.39 ],[0.99,0.999999]])
y0=array([[1.738,0.6],[ 0.7,12 ],[0,-6]])
for f,fr,label,flags in self.matrixbinarypool.zip():
self.checkarray(f(vertcat(*[x0,y0])),fr(vertcat(*[x0,y0])),label)
def test_SXbinarySparse(self):
self.message("SX binary operations")
x=SX.sym("x")
y=SX.sym("y")
z=SX.sym("z")
x2=SX.sym("x2")
y2=SX.sym("y2")
z2=SX.sym("z2")
xx=SX(Sparsity(4,3,[0,2,2,3],[1,2,1]),vertcat(*[x,y,z]))
yy=SX(Sparsity(4,3,[0,2,2,3],[0,2,3]),vertcat(*[x2,z2,y2]))
if scipy_available:
x0=DM(Sparsity(4,3,[0,2,2,3],[1,2,1]),[0.738,0.1,0.99]).sparse()
y0=DM(Sparsity(4,3,[0,2,2,3],[0,2,3]),[1.738,0.7,-6]).sparse()
self.numpyEvaluationCheckPool(self.matrixbinarypool,[xx,yy],[array(x0.todense()),array(y0.todense())],name="SX",setx0=[x0,y0])
else:
x0=DM(Sparsity(4,3,[0,2,2,3],[1,2,1]),[0.738,0.1,0.99]).full()
y0=DM(Sparsity(4,3,[0,2,2,3],[0,2,3]),[1.738,0.7,-6]).full()
self.numpyEvaluationCheckPool(self.matrixbinarypool,[xx,yy],[x0,y0],name="SX",setx0=[x0,y0])
self.assertRaises(RuntimeError, lambda : mtimes(xx,yy))
@known_bug() # Test refactoring, cf. #1436
def test_SXslicing(self):
self.message("SX slicing/indexing")
x=SX.sym("x",3,2)
x0=array([[0.738,0.2],[ 0.1,0.39 ],[0.99,0.999999]])
self.message(":dense")
self.numpyEvaluationCheck(lambda x: SX(x[0][0,0]), lambda x: matrix(x)[0,0],[x],x0,name="x[0,0]")
self.numpyEvaluationCheck(lambda x: SX(x[0][1,0]), lambda x: matrix(x)[1,0],[x],x0,name="x[1,0]")
self.numpyEvaluationCheck(lambda x: SX(x[0][0,1]), lambda x: matrix(x)[0,1],[x],x0,name="x[1,0]")
self.numpyEvaluationCheck(lambda x: SX(x[0][0,-1]), lambda x: matrix(x)[0,-1],[x],x0,name="x[0,-1]")
self.numpyEvaluationCheck(lambda x: x[0][:,0], lambda x: matrix(x)[:,0],[x],x0,name="x[:,0]")
self.numpyEvaluationCheck(lambda x: x[0][:,1], lambda x: matrix(x)[:,1],[x],x0,name="x[:,1]")
self.numpyEvaluationCheck(lambda x: x[0][1,:], lambda x: matrix(x)[1,:],[x],x0,name="x[1,:]")
self.numpyEvaluationCheck(lambda x: x[0][0,:], lambda x: matrix(x)[0,:],[x],x0,name="x[0,:]")
self.numpyEvaluationCheck(lambda x: x[0][-1,:], lambda x: matrix(x)[-1,:],[x],x0,name="x[-1,:]")
self.numpyEvaluationCheck(lambda x: x[0][:,-2], lambda x: matrix(x)[:,-2],[x],x0,name="x[:,-2]")
self.numpyEvaluationCheck(lambda x: x[0][0:-2,0:-1], lambda x: matrix(x)[0:-2,0:-1],[x],x0,name="x[0:-2,0:-1]")
self.numpyEvaluationCheck(lambda x: x[0][0:2,0:2], lambda x: matrix(x)[0:2,0:2],[x],x0,name="x[0:2,0:2]")
self.numpyEvaluationCheck(lambda x: x[0][[0,1],0:2], lambda x: matrix(x)[[0,1],0:2],[x],x0,name="x[[0,1],0:2]")
self.numpyEvaluationCheck(lambda x: x[0].nz[[0,2,3]], lambda x: matrix([x[0,0],x[2,0],x[0,1]]).T,[x],x0,name="x[[0,2,3]]")
myarray=array([0,2,3])
mylist=list(myarray)
#self.numpyEvaluationCheck(lambda x: x[0][mylist], lambda x: matrix([x[0,0],x[1,0],x[1,1]]).T,[x],x0,name="x[[0,2,3]]")
self.numpyEvaluationCheck(lambda x: x[0].nz[0:2], lambda x: matrix(x.T.ravel()[0:2]).T,[x],x0,name="x[0:2] on dense matrix")
self.numpyEvaluationCheck(lambda x: x[0].nz[1], lambda x: matrix(x.T.ravel()[1]).T,[x],x0,name="x[1]")
self.numpyEvaluationCheck(lambda x: x[0].nz[-1], lambda x: matrix(x.ravel()[-1]).T,[x],x0,name="x[-1]")
self.message(":sparse")
x=SX(Sparsity(4,3,[0,2,2,3],[1,2,1]),vertcat(*[SX.sym("x"),SX.sym("y"),SX.sym("z")]))
sx0=[0.738,0.39,0.99]
x0=DM(Sparsity(4,3,[0,2,2,3],[1,2,1]),[0.738,0.39,0.99]).full()
self.numpyEvaluationCheck(lambda x: SX(x[0][0,0]), lambda x: matrix(x)[0,0],[x],x0,name="x[0,0]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: SX(x[0][0,0]), lambda x: matrix(x)[0,0],[x],x0,name="x[0,0]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: SX(x[0][1,0]), lambda x: matrix(x)[1,0],[x],x0,name="x[1,0]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: SX(x[0][0,1]), lambda x: matrix(x)[0,1],[x],x0,name="x[1,0]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: SX(x[0][0,-1]), lambda x: matrix(x)[0,-1],[x],x0,name="x[0,-1]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: x[0][:,0], lambda x: matrix(x)[:,0],[x],x0,name="x[:,0]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: x[0][:,1], lambda x: matrix(x)[:,1],[x],x0,name="x[:,1]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: x[0][1,:], lambda x: matrix(x)[1,:],[x],x0,name="x[1,:]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: x[0][0,:], lambda x: matrix(x)[0,:],[x],x0,name="x[0,:]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: x[0][-1,:], lambda x: matrix(x)[-1,:],[x],x0,name="x[-1,:]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: x[0][:,-2], lambda x: matrix(x)[:,-2],[x],x0,name="x[:,-2]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: x[0][0:-2,0:-1], lambda x: matrix(x)[0:-2,0:-1],[x],x0,name="x[0:-2,0:-1]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: x[0][0:2,0:2], lambda x: matrix(x)[0:2,0:2],[x],x0,name="x[0:2,0:2]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: x[0][[0,1],0:2], lambda x: matrix(x)[[0,1],0:2],[x],x0,name="x[[0,1],0:2]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: x[0].nz[[2,1]], lambda x: matrix([x[1,2],x[2,0]]).T,[x],x0,name="x[[2,1]]")
self.numpyEvaluationCheck(lambda x: x[0].nz[0:2], lambda x: matrix(sx0[0:2]).T,[x],x0,name="x[0:2] on dense matrix")
self.numpyEvaluationCheck(lambda x: x[0].nz[1], lambda x: matrix(sx0[1]).T,[x],x0,name="x[1]",setx0=[sx0])
self.numpyEvaluationCheck(lambda x: x[0].nz[-1], lambda x: matrix(sx0[-1]).T,[x],x0,name="x[-1]",setx0=[sx0])
def test_SX1(self):
self.message("SXFunction evaluation")
fun=lambda x,y: [x+y,x*y,x**2+y**3]
x=SX.sym("x")
y=SX.sym("y")
f=Function("f", [vertcat(*[x,y])],[vertcat(*fun(x,y))])
L=[2,3]
f_in = [0]*f.n_in();f_in[0]=L
f_out = f.call(f_in)
z=f_out[0].full()
zr=fun(*L)
for i in range(3):
self.assertAlmostEqual(z[i], zr[i],10,'SXfunction output in correct')
self.message("SXFunction jacobian evaluation")
J=f.jacobian_old(0, 0)
J_in = [0]*J.n_in();J_in[0]=L
J_out = J.call(J_in)
Jr=matrix([[1,1],[3,2],[4,27]])
self.checkarray(J_out[0],Jr,"SXfunction jacobian evaluates incorrectly")
def test_SX2(self):
self.message("SXFunction evalution 2")
fun = lambda x,y: [3-sin(x*x)-y, sqrt(y)*x]
# variables
x = SX.sym("x")
y = SX.sym("y")
# Create function
f = fun(x,y)
if GlobalOptions.getSimplificationOnTheFly():
self.assertEqual(str(f),'[SX(((3-sin(sq(x)))-y)), SX((sqrt(y)*x))]','SX representation is wrong')
else:
self.assertEqual(str(f),'[SX(((3-sin((x*x)))-y)), SX((sqrt(y)*x))]','SX representation is wrong'+str(f))
fcn = Function("fcn", [vertcat(*[x,y])],[vertcat(*f)])
self.assertEqual(repr(fcn),'Function(fcn:(i0[2])->(o0[2]) SXFunction)','SX representation is wrong')
# Pass inputs
L=[2,3]
fcn_in = [0]*fcn.n_in();fcn_in[0]=L
# Evaluate numerically
fcn_out = fcn.call(fcn_in)
# Get the results
res = tuple(fcn_out[0].nonzeros())
self.assertAlmostEqual(res[0], fun(*L)[0],10,'SXfunction evaluation wrong')
self.assertAlmostEqual(res[1], fun(*L)[1],10,'SXfunction evaluation wrong')
def test_SX_func(self):
self.message("Function constructors")
x0=SX.sym("x")
x1=SX.sym("x")
x2=SX.sym("x")
x3=SX.sym("x")
x4=SX.sym("x")
x5=SX.sym("x")
x6=SX.sym("x")
y=SX.sym("y",2,3)
f=Function("f", [y],[y])
self.checkarray(f.size_in(0),(2,3),"Function constructors")
self.checkarray(f.size_out(0),(2,3),"Function constructors")
self.assertRaises(NotImplementedError,lambda: Function("f", y,[y,y]))
self.assertRaises(NotImplementedError,lambda: Function("f", x0,[x0,x1]))
def test_evalfail(self):
self.message("eval fail test")
x = SX.sym("x",2,2)
f = Function("f", [x], [x])
self.assertRaises(NotImplementedError,lambda: f.call(x))
def test_SXconversion(self):
self.message("Conversions from and to SX")
y=SX.sym("y")
x=SX.sym("x",3,3)
SX(y)
SX(x)
c.det(x)
y=array(DM(x))
c.det(y)
def test_SXbool(self):
self.message("bool")
x = SX.sym("x")
y = SX.sym("y")
f = Function("f", [vertcat(*[x,y])],[vertcat(*[logic_and(x,y),logic_or(x,y),logic_not(x)])])
for t1 in [0,1]:
for t2 in [0,1]:
T1 = t1!=0
T2 = t2!=0
f_in = [0]*f.n_in();f_in[0]=[t1,t2]
f_out = f.call(f_in)
self.checkarray(f_out[0],DM([T1 and T2,T1 or T2,not T1]),"bool(%d,%d): %s" % (t1,t2,str(f_out[0])))
def test_SXineq(self):
self.message("SX ineq")
x = SX.sym("x")
y = SX.sym("y")
f = Function("f", [vertcat(*[x,y])],[vertcat(*[x<y,x<=y,x>=y,x==y,x!=y])])
for t1 in [-10,0.1,0,1,10]:
for t2 in [-10,0.1,0,1,10]:
T1 = t1
T2 = t2
f_in = [0]*f.n_in();f_in[0]=[t1,t2]
f_out = f.call(f_in)
self.checkarray(f_out[0],DM([T1 < T2,T1 <= T2, T1 >= T2, T1 == T2, T1 != T2]),"ineq(%d,%d)" % (t1,t2))
def test_SX_func2(self):
self.message("SXmatrix typemaps constructors")
simplify(SX.sym("x"))
list = [ ("number",2.3, (1,1)),
("SX", SX.sym("x"), (1,1))
];
for name, arg,shape in list:
self.message(":" + name)
i=c.transpose(c.transpose(arg))
self.assertEqual(i.shape[0],shape[0],"shape mismatch")
self.assertEqual(i.shape[1],shape[1],"shape mismatch")
SX(arg).is_empty()
def test_SX_func3(self):
self.message("vector(SXmatrix) typemaps constructors")
y=SX.sym("y")
x=SX.sym("x",3,1)
vertcat(*[x,x])
vertcat(*[y,y])
vertcat(*[x,[]])
def test_eval(self):
self.message("Function eval")
x=SX.sym("x",2,2)
y=SX.sym("y",2,2)
f = Function("f", [x,y], [x*y])
f(x,y)
def test_symbolcheck(self):
self.message("Check if non-symbolic inputs are caught")
self.assertRaises(RuntimeError, lambda : Function("f", [SX(0)],[SX.sym("x")]))
def test_sparseconstr(self):
self.message("Check sparsity constructors")
self.checkarray(DM.ones(Sparsity.lower(3)).full(),matrix([[1,0,0],[1,1,0],[1,1,1]]),"tril")
self.checkarray(DM.ones(Sparsity.diag(3)).full(),matrix([[1,0,0],[0,1,0],[0,0,1]]),"diag")
def test_subsassignment(self):
self.message("Check subscripted assignment")
import numpy
numpy.random.seed(42)
xn = numpy.random.random((3,4))
x=DM(xn)
y=DM(7,8)
z = numpy.zeros((7,8))
y[0,0]=12; z[0,0] = 12
self.checkarray(y,z,"scalar assignment")
z[1:4,[2,4,5,6]]=xn
y[1:4,[2,4,5,6]]=x
self.checkarray(y,z,"range assignment")
kl=[2,4,5,8]
y.nz[kl]=1.0
s=y.sparsity()
for k in kl:
z[s.row()[k],s.get_col()[k]]=1.0
self.checkarray(y,z,"nonzero scalar assignment")
y.nz[kl]=DM(kl)
cnt=0
for k in kl:
z[s.row()[k],s.get_col()[k]]=kl[cnt]
cnt+=1
self.checkarray(y,z,"nonzero range assignment")
@skip(not GlobalOptions.getSimplificationOnTheFly())
def test_substitute(self):
self.message("Basic symbolic algebra: substitute")
x=SX.sym("x")
y=SX.sym("y")
z = cos(x)*y
self.assertTrue(depends_on(z,y))
self.assertTrue(depends_on(z,x))
w = substitute(z,x,0)
self.assertTrue(w.is_symbolic())
self.assertTrue(depends_on(w,y))
self.assertFalse(depends_on(w,x))
self.assertTrue(is_equal(w,y))
r=w-y
self.assertFalse(r.is_symbolic())
self.assertTrue(r.is_zero())
self.assertEqual(float(r),0)
self.assertEqual(float(r),0)
y = SX.sym("y",2)
y = substitute(y+6,y,0)
self.assertEqual(int(y[0]),6)
self.assertEqual(int(y[1]),6)
def test_primitivefunctions(self):
self.message("Primitive functions")
x=SX.sym("x")
nums = [-2,-1.5,-1,-0.5,-0.25,0,0.25,0.5,1,1.5,2]
def test(fun,comment,nums,reference):
self.message(":"+comment)
f = Function("f", [x],[fun(x)])
for n,r in zip(nums,reference):
f_in = [0]*f.n_in();f_in[0]=n
f_out = f.call(f_in)
self.assertEqual(f_out[0][0],r)
test(casadi.sign,"sign",nums,[-1,-1,-1,-1,-1,0,1,1,1,1,1])
test(casadi.heaviside,"heaviside",nums,[0,0,0,0,0,0.5,1,1,1,1,1])
test(casadi.ramp,"ramp",nums,[0,0,0,0,0,0,0.25,0.50,1,1.5,2])
test(casadi.rectangle,"rectangle",nums,[0,0,0,0.5,1,1,1,0.5,0,0,0])
test(casadi.triangle,"triangle",nums,[0,0,0,0.5,0.75,1,0.75,0.5,0,0,0])
def test_taylor(self):
self.message("univariate taylor expansion")
x=SX.sym("x")
if GlobalOptions.getSimplificationOnTheFly():
self.assertTrue(is_equal(taylor(sin(x),x),x))
a_=0.13
x_=0.15
a = SX.sym("a")
def test(e,r):
f = Function("f", [x,a],[e])
f_in = [0]*f.n_in();f_in[0]=x_
f_in[1]=a_
f_out = f.call(f_in)
self.assertAlmostEqual(f_out[0][0],r,10)
test(taylor(sin(x),x,a,0),sin(a_))
test(taylor(sin(x),x,a,1),sin(a_)+cos(a_)*(x_-a_))
test(taylor(sin(x),x,a,2),sin(a_)+cos(a_)*(x_-a_)-(sin(a_)*(x_-a_)**2)/2.0)
test(taylor(sin(x),x,a,3),sin(a_)+cos(a_)*(x_-a_)-(sin(a_)*(x_-a_)**2)/2.0-(cos(a_)*(x_-a_)**3)/6.0)
M=blockcat([[a*sin(x),a*cos(x)],[exp(a*x),a*x**2],[cos(x),0]])
f = Function("f", [x,a],[taylor(M,x)])
f_in = [0]*f.n_in();f_in[0]=x_
f_in[1]=a_
f_out = f.call(f_in)
self.checkarray(f_out[0],matrix([[x_*a_,a_],[1+a_*x_,0],[1,0]]),"taylor on dense matrices")
def test_null(self):
self.message("Function null")
x = SX.sym("x")
f = Function("f", [x],[x**2,[]])
f_out = f.call([0])
self.assertTrue(f_out[1].is_empty())
f = Function("f", [x,[]],[x**2,[]])
f_out = f.call([0,0])
self.assertTrue(f_out[1].is_empty())
f_out = f.call([0,0])
r = f.call([x,[]])
self.assertTrue(r[1].is_empty())
r = f.call([x,[]])
self.assertTrue(r[1].is_empty())
r = f.call([x,SX(0,1)])
self.assertTrue(r[1].is_empty())
r = f.call([x,SX(1,0)])
self.assertTrue(r[1].is_empty())
#self.assertRaises(Exception,lambda : f([x,x]))
#self.assertRaises(Exception,lambda : f([[],[]]))
def test_mtaylor(self):
self.message("multivariate taylor expansions")
x=SX.sym("x")
y=SX.sym("y")
a=SX.sym("a")
b=SX.sym("b")
a_=0.13
x_=0.15
b_=0.73
y_=0.75
def test(e,r):
f = Function("f", [vertcat(*[x,y]),vertcat(*[a,b])],[e])
f_in = [0]*f.n_in();f_in[0]=[x_,y_]
f_in[1]=[a_,b_]
f_out = f.call(f_in)
self.assertAlmostEqual(f_out[0][0],r,10)
test(mtaylor(sin(x+y),vertcat(*[x,y]),vertcat(*[a,b]),0),sin(a_+b_))
test(mtaylor(sin(x+y),vertcat(*[x,y]),vertcat(*[a,b]),1),sin(a_+b_)+(cos(b_+a_)*(x_-a_)+cos(b_+a_)*(y_-b_)))
def sol(x,y,a,b):
return sin(b+a)+(cos(b+a)*(x-a)+cos(b+a)*(y-b))-(sin(b+a)*(x-a)**2+2*sin(b+a)*(y-b)*(x-a)+sin(b+a)*(y-b)**2)/2
test(mtaylor(sin(x+y),vertcat(*[x,y]),vertcat(*[a,b]),2),sol(x_,y_,a_,b_))
def sol(x,y,a,b):
return sin(b+a)+(cos(b+a)*(x-a)+cos(b+a)*(y-b))-(sin(b+a)*(x-a)**2+2*sin(b+a)*(y-b)*(x-a)+sin(b+a)*(y-b)**2)/2-(cos(b+a)*(x-a)**3+3*cos(b+a)*(y-b)*(x-a)**2+3*cos(b+a)*(y-b)**2*(x-a)+cos(b+a)*(y-b)**3)/6
test(mtaylor(sin(x+y),vertcat(*[x,y]),vertcat(*[a,b]),3),sol(x_,y_,a_,b_))
def sol(x,y,a,b):
return (-2*sin(b+a)*(x-a)*(y-b)-sin(b+a)*(x-a)**2)/2+cos(b+a)*(y-b)-(cos(b+a)*(x-a)**3)/6+cos(b+a)*(x-a)+sin(b+a)
test(mtaylor(sin(x+y),vertcat(*[x,y]),vertcat(*[a,b]),3,[1,2]),sol(x_,y_,a_,b_))
test(mtaylor(sin(x+y),vertcat(*[x,y]),vertcat(*[0,0]),4,[1,2]),(-3*x_**2*y_-x_**3)/6+y_+x_)
def test_issue107(self):
self.message("Regression test for issue 107: +=")
x=SX.sym("x")
y=SX.sym("y")
z=x
z+=y
self.assertTrue(x.is_symbolic())
self.assertFalse(z.is_symbolic())
x=SX.sym("x")
y=SX.sym("y")
z=x
z+=y
self.assertTrue(x.is_symbolic())
self.assertFalse(z.is_symbolic())
def test_evalchecking(self):
x = SX.sym("x",1,5)
y = SX.sym("y",1,3)
z = SX.sym("z",5,1)
q = SX.sym("z",1,6)
f = Function("f", [x],[x**2])
self.assertRaises(RuntimeError, lambda : f(y))
self.assertRaises(RuntimeError, lambda : f(q))
f(z)
def test_indexinglimits(self):
self.message("Limits of indexing")
y = casadi.SX.sym("y", 3)
self.assertRaises(RuntimeError,lambda : y[[0, 5]] )
try:
y[[0, 5]] = SX.sym("a")
self.assertTrue(False)
except RuntimeError:
pass
y[[0, 2]]
y[[0, 2]] = SX.sym("a")
def test_issue181(self):
self.message("Regression test #181")
x = SX.sym("x")
#self.assertRaises(TypeError,lambda : SX([x,None])) # FIXME: this is leaking memory
self.assertRaises(NotImplementedError,lambda: Function("f", [[x], [None]], [[2 * x]]))
@known_bug() # Not implemented
def test_is_equal(self):
self.message("equivalent")
x = SX.sym("x")
a = x*x
b = x*x
self.assertTrue(a.is_equal(b,1))
@skip(not GlobalOptions.getSimplificationOnTheFly())
def test_SXsimplifications(self):
self.message("simplifications")
x = SX.sym("x")
ops = []
def temp(x):
y = 0.5*x
return y+y
ops.append(temp)
def temp(x):
y = x/2
return y+y
ops.append(temp)
def temp(x):
y = x*0.5
return y+y
ops.append(temp)
def temp(x):
y = x*x
return ((-y)/y)*(-x)
ops.append(temp)
ops.append(lambda x: ((-(x*x))/(x*x))*(-x))
#ops.append(lambda x: ((-x*x)/(x*x))*(-x))
def temp(x):
y = x*x
return (y/(-y))*(-x)
ops.append(temp)
def temp(x):
y = x*x
return ((-y)/(-y))*(x)
ops.append(temp)
ops.append(lambda x: (x-x) + x)
ops.append(lambda x: ((x*x)-(x*x)) + x)
ops.append(lambda x: 4*(0.25*x))
ops.append(lambda x: 4*(x*0.25))
ops.append(lambda x: 4*(0.25*x))
ops.append(lambda x: 4*(x*0.25))
ops.append(lambda x: (0.25*x)*4)
ops.append(lambda x: (x*0.25)*4)
ops.append(lambda x: (4*x)/4)
ops.append(lambda x: 4*(x/4))
ops.append(lambda x: (x/4)/0.25)
ops.append(lambda x: x*(((4/x)*x)/4))
ops.append(lambda x: x*((x*(2/x))/2))
ops.append(lambda x: x*(((2*x)/x)/2))
ops.append(lambda x: x*((x/(2*x))*2))
ops.append(lambda x: x+0)
ops.append(lambda x: 0+x)
ops.append(lambda x: x-0)
ops.append(lambda x: 0-(-x))
ops.append(lambda x: x*1)
ops.append(lambda x: 1*x)
ops.append(lambda x: 1*(x*1))
ops.append(lambda x: (1*x)*1)
ops.append(lambda x: (0.5*x)+(0.5*x))
ops.append(lambda x: (x/2)+(x/2))
ops.append(lambda x: (x*0.5)+(0.5*x))
ops.append(lambda x: (SX(4)-SX(4))+x)
y = SX.sym("x")
ops.append(lambda x: ((x+y)-(y+x))+x)
ops.append(lambda x: ((x*y)-(y*x))+x)
ops.append(lambda x: ((-x)-(-x))+x)
for op in ops:
y = op(x)
f = Function("f", [x],[y])
f_in = [0]*f.n_in();f_in[0]=0.3
f_out = f.call(f_in)
self.checkarray(f_out[0],array(DM(op(0.3))),"simplifications")
self.assertEqual(str(y),"x")
y = op(-x)
f = Function("f", [x],[y])
f_in = [0]*f.n_in();f_in[0]=0.3
f_out = f.call(f_in)
self.checkarray(f_out[0],array(DM(op(-0.3))),"simplifications")
self.assertEqual(str(y),"(-x)")
def test_truth(self):
self.message("Truth values")
self.assertRaises(Exception, lambda : bool(SX.sym("x")))
self.assertRaises(Exception, lambda : bool(SX.sym("x")>0))
self.assertTrue(bool(SX(1)))
self.assertFalse(bool(SX(0)))
self.assertTrue(bool(SX(0.2)))
self.assertTrue(bool(SX(-0.2)))
self.assertRaises(Exception, lambda : bool(SX.sym("x")))
self.assertRaises(Exception, lambda : bool(SX.sym("x")>0))
self.assertTrue(bool(SX(SX(1))))
self.assertFalse(bool(SX(SX(0))))
self.assertTrue(bool(SX(SX(0.2))))
self.assertTrue(bool(SX(SX(-0.2))))
self.assertRaises(Exception, lambda : bool(SX([2.0,3])))
def test_if_else(self):
x = SX.sym("x")
y = if_else(x,1,2)
f = Function("f", [x],[y])
f_in = [0]*f.n_in();f_in[0]=1
f_out = f.call(f_in)
self.assertTrue(f_out[0]==1,"if_else")
f_in = [0]*f.n_in();f_in[0]=0
f_out = f.call(f_in)
self.assertTrue(f_out[0]==2,"if_else")
x0 = 2.1
y = if_else(x>1,x**2,x**3)
f = Function("f", [x],[y])
f_in = [0]*f.n_in();f_in[0]=x0
f_out = f.call(f_in)
self.checkarray(f_out[0],x0**2,"if_else sens")
x0 = -2.1
f_in = [0]*f.n_in();f_in[0]=x0
f_out = f.call(f_in)
self.checkarray(f_out[0],x0**3,"if_else sens")
def test_is_regular(self):
x = SX.sym("x")
self.assertTrue(SX(0).is_regular())
self.assertFalse(SX(inf).is_regular())
with self.assertRaises(Exception):
self.assertTrue(x.nz[0])
self.assertTrue(SX(DM([0,1])).is_regular())
self.assertFalse(SX(DM([0,inf])).is_regular())
self.assertFalse(vertcat(*[x,inf]).is_regular())
with self.assertRaises(Exception):
self.assertFalse(vertcat(*[x,x]).is_regular())
def test_symvar(self):
a = SX.sym("a")
b = SX.sym("b")
c = SX.sym("c")
e = cos(a*b) + c
w = symvar(e)
self.assertEqual(len(w),3)
if GlobalOptions.getSimplificationOnTheFly():
self.assertTrue(is_equal(w[0],a))
self.assertTrue(is_equal(w[1],b))
self.assertTrue(is_equal(w[2],c))
def test_poly_coeff(self):
x =SX.sym("x")
a= SX.sym("a")
c=SX.sym("c")
p=poly_coeff(12*x**4+x**2+a*x+c,x)
self.assertTrue(is_equal(p[0],12))
self.assertTrue(is_equal(p[1],0))
self.assertTrue(is_equal(p[2],1))
self.assertTrue(is_equal(p[3],a))
self.assertTrue(is_equal(p[4],c))
p=poly_coeff((x-a)*(x+a),x)
self.assertTrue(is_equal(p[0],1))
self.assertTrue(is_equal(p[1],0))
def test_poly_roots(self):
p = SX.sym("[a,b]")
r = poly_roots(p)
f = Function("f", [p],[r])
f_in = [0]*f.n_in()
f_in[0]=DM([2,7])
a_ = f_in[0][0]
b_ = f_in[0][1]
f_out = f.call(f_in)
f_out[0]
self.checkarray(f_out[0],vertcat(*[-b_/a_]))
p = SX.sym("[a,b]")
r = poly_roots(vertcat(*[p,0]))
f = Function("f", [p],[r])
f_in = [0]*f.n_in();f_in[0]=DM([2,7])
a_ = f_in[0][0]
b_ = f_in[0][1]
f_out = f.call(f_in)
f_out[0]
self.checkarray(f_out[0],vertcat(*[-b_/a_,0]))
p = SX.sym("[a,b,c]")
r = poly_roots(p)
f = Function("f", [p],[r])
f_in = [0]*f.n_in();f_in[0]=DM([1.13,7,3])
a_ = f_in[0][0]
b_ = f_in[0][1]
c_ = f_in[0][2]
d = b_**2-4*a_*c_
f_out = f.call(f_in)
x0 = (-b_-sqrt(d))/2/a_
x1 = (-b_+sqrt(d))/2/a_
f_out[0]
self.checkarray(f_out[0],vertcat(*[x0,x1]))
p = SX.sym("[a,b,c,d]")
r = poly_roots(p)
f = Function("f", [p],[r])
f_in = [0]*f.n_in();f_in[0]=DM([11,1.3,-1.7,0.1])
f_out = f.call(f_in)
f_out[0]
self.checkarray(f_out[0],DM([0.298028,-0.479787,0.0635774]),digits=5)
p = SX.sym("[a,b,c,d,e]")
r = poly_roots(p)
f = Function("f", [p],[r])
f_in = [0]*f.n_in();f_in[0]=DM([3,6,-123, -126,1080])
f_out = f.call(f_in)
f_out[0]
self.checkarray(f_out[0],DM([5,3,-4,-6]),digits=5)
def test_eig_symbolic(self):
x = SX.sym("x",2,2)
f = Function("f", [x],[eig_symbolic(x)])
f_in = [0]*f.n_in();f_in[0]=DM([[2,0.1],[0.3,0.7]])
f_out = f.call(f_in)
self.checkarray(f_out[0],DM([0.67732,2.02268]),digits=5)
x = SX.sym("x",2)
f = Function("f", [x],[eig_symbolic(c.diag(x))])
f_in = [0]*f.n_in();f_in[0]=DM([3,7])
f_out = f.call(f_in)
self.checkarray(f_out[0],f_in[0])
x = SX.sym("x",5)
f = Function("f", [x],[eig_symbolic(c.diag(x))])
f_in = [0]*f.n_in();f_in[0]=DM([3,7,2,1,6])
f_out = f.call(f_in)
self.checkarray(f_out[0],f_in[0])
x = SX.sym("x",2,2)
y = SX.sym("y",2)
f = Function("f", [x,y],[eig_symbolic(diagcat(*[x,c.diag(y)]))])
f_in = [0]*f.n_in();f_in[0]=DM([[2,0.1],[0.3,0.7]])
f_in[1]=[3,7]
f_out = f.call(f_in)
self.checkarray(f_out[0],DM([0.67732,2.02268,3,7]),digits=5)
x = SX.sym("x",3,3)
x[2,0] = 0
x[1,0] = 0
x = sparsify(x)
e = eig_symbolic(x)
f = Function("f", [x],[e])
f_in = [0]*f.n_in();f_in[0]=DM(f.sparsity_in(0),list(range(1,8)))
f_in[0].print_dense()
f_out = f.call(f_in)
self.checkarray(f_out[0],DM([1,-0.29150,10.29150]),digits=5)
x = SX.sym("x",3,3)
x[2,0] = 0
x[1,0] = 0
x[2,1] = 0
x = sparsify(x)
e = eig_symbolic(x)
f = Function("f", [x],[e])
f_in = [0]*f.n_in();f_in[0]=DM(f.sparsity_in(0),list(range(1,7)))
f_in[0].print_dense()
f_out = f.call(f_in)
self.checkarray(f_out[0],DM([1,3,6]),digits=5)
x = SX.sym("x",Sparsity.upper(5))
f = Function("f", [x],[eig_symbolic(x)])
fin = DM(x.sparsity(),0)
fin[Sparsity.diag(5)] = c.diag(list(range(5)))
self.checkarray(f(fin), DM(list(range(5))))
def test_jacobian_empty(self):
x = SX.sym("x",3)
s = jacobian(DM(0,0),x).shape
self.assertEqual(s[0],0)
self.assertEqual(s[1],3)
s = jacobian(x,SX.sym("x",0,4)).shape
self.assertEqual(s[0],3)
self.assertEqual(s[1],0)
def test_empty_SX(self):
s = SX([]).shape
self.assertEqual(s[0],0)
self.assertEqual(s[1],1)
x = vertcat(*(SX.sym("x"),SX([])))
def test_mul_sparsity(self):
N = 10
x = SX.sym("x",N,N)
y = SX.sym("y",N,N)
x_ = self.randDM(N,N)
y_ = self.randDM(N,N)
filt = Sparsity.diag(N)+Sparsity.triplet(N,N,[1],[3])
f = Function("f", [x,y],[mtimes(x,y)])
f_in = [0]*f.n_in();f_in[0]=x_
f_in[1]=y_
g = Function("g", [x,y],[mac(x,y,SX.zeros(filt))])
g_in = [0]*g.n_in();g_in[0]=x_
g_in[1]=y_
f_out = f.call(f_in)
g_out = g.call(g_in)
self.checkarray(IM.ones(filt),IM.ones(g.sparsity_out(0)))
self.checkarray(f_out[0][filt],g_out[0])
@skip(platform_arch==32)
@memory_heavy()
@unittest.skipIf(sys.version_info >= (3, 0),"pickle is not compatible")
def test_large_hessian(self):
import pickle
A = pickle.load(open("../data/apoa1-2.pkl","r"))
H = DM(A,list(range(A.nnz())))
H = H + H.T
H = H[:20000,:20000]
x = SX.sym("x",H.size1())
f = Function("f", [x],[mtimes([x.T,H,x])], {'verbose':True})
H *= 2
h = f.hessian_old(0, 0)
h_out = h.call([0])
self.assertTrue(h.sparsity_out(0)==H.sparsity())
self.checkarray(h_out[0].nonzeros(),H.nonzeros())
def test_mxnulloutput(self):
a = SX(5,0)
b = SX.sym("x",2)
bm = MX.sym("x",2)
f = Function("f", [b],[a])
c = f(bm)
self.assertEqual(c.size1(),5)
self.assertEqual(c.size2(),0)
c = f(b)
self.assertEqual(c.size1(),5)
self.assertEqual(c.size2(),0)
a = SX(0,0)
f = Function("f", [b],[a])
c = f(bm)
self.assertEqual(c.size1(),0)
self.assertEqual(c.size2(),0)
c = f(b)
self.assertEqual(c.size1(),0)
self.assertEqual(c.size2(),0)
def test_mxnull(self):
a = SX(5,0)
b = SX(0,3)
c = mtimes(a,b)
self.assertEqual(c.nnz(),0)
a = SX(5,3)
b = SX(3,4)
c = mtimes(a,b)
self.assertEqual(c.nnz(),0)
def test_mxnullop(self):
c = SX(0,0)
x = SX.sym("x",2,3)
with self.assertRaises(RuntimeError):
d = x + c
with self.assertRaises(RuntimeError):
d = x / c
def test_copysign(self):
x = SX.sym("x")
y = SX.sym("y")
z = copysign(x,y)
f = Function("f", [x,y],[z])
f_in = [0]*f.n_in();f_in[0]=2
f_in[1]=0.5
f_out = f.call(f_in)
self.checkarray(f_out[0],DM([2]))
f_in = [0]*f.n_in();f_in[0]=2
f_in[1]=-0.5
f_out = f.call(f_in)
self.checkarray(f_out[0],DM([-2]))
f_in = [0]*f.n_in();f_in[0]=-2
f_in[1]=0.5
f_out = f.call(f_in)
self.checkarray(f_out[0],DM([2]))
f_in = [0]*f.n_in();f_in[0]=-2
f_in[1]=-0.5
f_out = f.call(f_in)
self.checkarray(f_out[0],DM([-2]))
f_in = [0]*f.n_in();f_in[0]=2
f_in[1]=0
f_out = f.call(f_in)
self.checkarray(f_out[0],DM([2]))
J = f.jacobian_old(0, 0)
J_in = [0]*J.n_in();J_in[0]=2
J_in[1]=0.5
J_out = J.call(J_in)
self.checkarray(J_out[0],DM([1]))
J_in = [0]*J.n_in();J_in[0]=2
J_in[1]=-0.5
J_out = J.call(J_in)
self.checkarray(J_out[0],DM([-1]))
J_in = [0]*J.n_in();J_in[0]=-2
J_in[1]=0.5
J_out = J.call(J_in)
self.checkarray(J_out[0],DM([1]))
J_in = [0]*J.n_in();J_in[0]=-2
J_in[1]=-0.5
J_out = J.call(J_in)
self.checkarray(J_out[0],DM([-1]))
J_in = [0]*J.n_in();J_in[0]=2
J_in[1]=0
J_out = J.call(J_in)
self.checkarray(J_out[0],DM([1]))
J = f.jacobian_old(1, 0)
J_in = [0]*J.n_in();J_in[0]=2
J_in[1]=0.5
J_out = J.call(J_in)
self.checkarray(J_out[0],DM([0]))
J_in = [0]*J.n_in();J_in[0]=2
J_in[1]=-0.5
J_out = J.call(J_in)
self.checkarray(J_out[0],DM([0]))
J_in = [0]*J.n_in();J_in[0]=-2
J_in[1]=0.5
J_out = J.call(J_in)
self.checkarray(J_out[0],DM([0]))
J_in = [0]*J.n_in();J_in[0]=-2
J_in[1]=-0.5
J_out = J.call(J_in)
self.checkarray(J_out[0],DM([0]))
J_in = [0]*J.n_in();J_in[0]=2
J_in[1]=0
J_out = J.call(J_in)
self.checkarray(J_out[0],DM([0]))
def test_depends_on(self):
a = SX.sym("a")
b = SX.sym("b")
self.assertTrue(depends_on(a**2,a))
self.assertTrue(depends_on(a,a))
self.assertFalse(depends_on(0,a))
self.assertTrue(depends_on(a**2,vertcat(*[a,b])))
self.assertTrue(depends_on(a,vertcat(*[a,b])))
self.assertFalse(depends_on(0,vertcat(*[a,b])))
self.assertTrue(depends_on(b**2,vertcat(*[a,b])))
self.assertTrue(depends_on(b,vertcat(*[a,b])))
self.assertTrue(depends_on(a**2+b**2,vertcat(*[a,b])))
self.assertTrue(depends_on(a+b,vertcat(*[a,b])))
self.assertTrue(depends_on(vertcat(*[0,a]),a))
self.assertTrue(depends_on(vertcat(*[a,0]),a))
self.assertTrue(depends_on(vertcat(*[a**2,b**2]),vertcat(*[a,b])))
self.assertTrue(depends_on(vertcat(*[a,0]),vertcat(*[a,b])))
self.assertTrue(depends_on(vertcat(*[0,b]),vertcat(*[a,b])))
self.assertTrue(depends_on(vertcat(*[b,0]),vertcat(*[a,b])))
self.assertFalse(depends_on(vertcat(*[0,0]),vertcat(*[a,b])))
@requires("is_smooth")
def test_is_smooth(self):
x = SX.sym("a",2,2)
import warnings
with warnings.catch_warnings():
warnings.simplefilter("error",DeprecationWarning)
with self.assertRaises(Exception):
is_smooth(x)
warnings.simplefilter("ignore")
is_smooth(x)
def test_which_depends(self):
for X in [SX,MX]:
x = X.sym("x")
y = X.sym("y")
p = X.sym("p")
e = vertcat(0,x,y,p,2*p**3,x*y,x*p,sin(x),cos(y),sqrt(x+y),p*p*x,x*y*p)
self.checkarray(which_depends(e, vertcat(x,y),2,True),[0, 0, 0, 0,0, 1, 0, 1, 1, 1, 0, 1])
self.checkarray(which_depends(e, vertcat(x,y),1,True),[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1])
z =X.sym("z")
e = vertcat(x*p,x+y)
self.checkarray(which_depends(e, vertcat(x,y,p,z),2,False),[True, False, True, False])
self.checkarray(which_depends(e, vertcat(x,y,p,z),1,False),[True, True, True, False])
e = vertcat(x*p,x+z*y)
self.checkarray(which_depends(e, vertcat(x,y,p),2,False),[True, False, True])
self.checkarray(which_depends(e, vertcat(x,y,p),1,False),[True, True, True])
e = vertcat(x*p,x+z*y)
self.checkarray(which_depends(e, vertcat(x,y,p,z),2,False),[True, True, True, True])
self.checkarray(which_depends(e, vertcat(x,y,p,z),1,False),[True, True, True, True])
e = vertcat(sin(x+y)+p)
self.checkarray(which_depends(e, vertcat(x,y,p,z),2,False),[True, True, False, False])
self.checkarray(which_depends(e, vertcat(x,y,p,z),1,False),[True, True, True, False])
e = vertcat(sin(x)*p**2,y**2)
#self.checkarray(which_depends(e, vertcat(x,y,p),3,True),[True, False])
#self.checkarray(which_depends(e, vertcat(x,y,p),3,False),[True, False, True])
self.checkarray(which_depends(e, vertcat(x,y,p),2,True),[True, True])
self.checkarray(which_depends(e, vertcat(x,y,p),2,False),[True, True, True])
e = vertcat(x**2*p,y)
#self.checkarray(which_depends(e, vertcat(x,y,p),3,True),[True, False])
#self.checkarray(which_depends(e, vertcat(x,y,p),3,False),[True, False, False])
self.checkarray(which_depends(e, vertcat(x,y,p),2,True),[True, False])
self.checkarray(which_depends(e, vertcat(x,y,p),2,False),[True, False, True])
def test_if_else_zero_sens(self):
for X in [SX]:
x=X.sym('x')
a = 1+3*x+sqrt(3*x)*x+7*x
b = 1+2*x+sin(2*x)*x +x
z = if_else(x>0,a,b)*x
f = Function("f",[x],[z,jacobian(z,x)])
fa = Function("f",[x],[a*x,jacobian(a*x,x)])
fb = Function("f",[x],[b*x,jacobian(b*x,x)])
for i,j in zip(f([3]),fa([3])):
self.checkarray(i,j)
for i,j in zip(f([-3]),fb([-3])):
self.checkarray(i,j)
f = Function("f",[x],[z])
fa = Function("f",[x],[a*x])
fb = Function("f",[x],[b*x])
self.checkfunction(f,fa,inputs=[3])
self.checkfunction(f,fb,inputs=[-3],evals=1)
def test_pw_const(self):
t= SX.sym("t")
e = pw_const(t, [0,2,3],[7,1,3,5])
E = Function("E",[t],[e])
self.checkarray(E(-2),7)
self.checkarray(E(-1),7)
self.checkarray(E(0),1)
self.checkarray(E(1),1)
self.checkarray(E(1.9999),1)
self.checkarray(E(2),3)
self.checkarray(E(2.5),3)
self.checkarray(E(3),5)
self.checkarray(E(10),5)
def test_pw_lin(self):
t= SX.sym("t")
e = pw_lin(t, [0,2,3,5], [7,1,3,2])
E = Function("E",[t],[e])
self.checkarray(E(-2),13)
self.checkarray(E(-1),10)
self.checkarray(E(0),7)
self.checkarray(E(1),4)
self.checkarray(E(2),1)
self.checkarray(E(2.5),2)
self.checkarray(E(3),3)
self.checkarray(E(4),2.5)
self.checkarray(E(5),2)
self.checkarray(E(7),1)
def test_numpy_error(self):
x = SX.sym("x",3)
with self.assertInException("Use an equivalent CasADi function"):
np.linalg.norm(x)
def test_quadratic(self):
for X in [SX,MX]:
x = X.sym("x")
p = X.sym("p")
y = X.sym("y")
self.assertFalse(is_quadratic(sin(x),x))
self.assertFalse(is_quadratic(x**3,x))
self.assertTrue(is_quadratic(x**2,x))
self.assertTrue(is_quadratic(4*x,x))
self.assertTrue(is_quadratic(5,x))
self.assertFalse(is_quadratic(sin(x)*p**4,x))
self.assertFalse(is_quadratic(x**3*p**4,x))
self.assertTrue(is_quadratic(x**2*p**4,x))
self.assertTrue(is_quadratic(x*p**4,x))
self.assertTrue(is_quadratic(5*p**4,x))
self.assertFalse(is_linear(sin(x),x))
self.assertFalse(is_linear(x**3,x))
self.assertFalse(is_linear(x**2,x))
self.assertTrue(is_linear(3*x,x))
self.assertTrue(is_linear(5,x))
self.assertFalse(is_linear(sin(x)*p**4,x))
self.assertFalse(is_linear(x**3*p**4,x))
self.assertFalse(is_linear(x**2*p**4,x))
self.assertTrue(is_linear(x*p**4,x))
self.assertTrue(is_linear(5*p**4,x))
z = x**2+3*y**2 + 0.5*x*y + 7*x + 6*y+7
[A,b,c] = quadratic_coeff(z,vertcat(x,y))
with self.assertInException("non-quadratic"):
[A,b,c] = quadratic_coeff(x**2+3*y**2 + 0.5*x*y + 7*x + 6*y+7+sin(x),vertcat(x,y))
with self.assertInException("scalar"):
[A,b,c] = quadratic_coeff(vertcat(x,y),x)
z = x**2+3*y**2 + 0.5*x*y -p*y + 7*x + 6*y+7
[A,b,c] = quadratic_coeff(z,vertcat(x,y))
xy = vertcat(x,y)
e = 0.5*bilin(A,xy,xy)+dot(b,xy)+c
f = Function('f',[xy,p],[z])
f2 = Function('f',[xy,p],[e])
self.checkfunction(f,f2,inputs=[1.1,1.3])
with self.assertInException("non-linear"):
[A,b] = linear_coeff(x**2+3*y**2 + 0.5*x*y + 7*x + 6*y+7,vertcat(x,y))
with self.assertInException("vector"):
[A,b] = linear_coeff(blockcat([[x,y],[y,x]]),x)
z = vertcat(7*x + 6*y+7 ,5 -p*y )
[A,b] = linear_coeff(z,xy)
e = mtimes(A,xy)+b
f = Function('f',[xy,p],[z])
f2 = Function('f',[xy,p],[e])
self.checkfunction(f,f2,inputs=[1.1,1.3])
def test_evalf(self):
x = SX.sym("x")
y = SX(5)
self.checkarray(evalf(y),5)
with self.assertInException("since variables [x] are free"):
evalf(x)
if __name__ == '__main__':
unittest.main()
| [
"numpy.random.seed",
"casadi.diag",
"casadi.nnz",
"numpy.ones",
"casadi.inv",
"unittest.main",
"unittest.skipIf",
"warnings.simplefilter",
"casadi.size2",
"warnings.catch_warnings",
"numpy.linalg.det",
"casadi.det",
"numpy.linalg.inv",
"numpy.matrix",
"casadi.transpose",
"casadi.size1"... | [((34605, 34676), 'unittest.skipIf', 'unittest.skipIf', (['(sys.version_info >= (3, 0))', '"""pickle is not compatible"""'], {}), "(sys.version_info >= (3, 0), 'pickle is not compatible')\n", (34620, 34676), False, 'import unittest\n'), ((45163, 45178), 'unittest.main', 'unittest.main', ([], {}), '()\n', (45176, 45178), False, 'import unittest\n'), ((5636, 5652), 'numpy.array', 'array', (['[[0.738]]'], {}), '([[0.738]])\n', (5641, 5652), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((5931, 5947), 'numpy.array', 'array', (['[[0.738]]'], {}), '([[0.738]])\n', (5936, 5947), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((6283, 6307), 'numpy.array', 'array', (['[0.738, 0.9, 0.3]'], {}), '([0.738, 0.9, 0.3])\n', (6288, 6307), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((6601, 6625), 'numpy.array', 'array', (['[0.738, 0.9, 0.3]'], {}), '([0.738, 0.9, 0.3])\n', (6606, 6625), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((6894, 6946), 'numpy.array', 'array', (['[[0.738, 0.2], [0.1, 0.39], [0.99, 0.999999]]'], {}), '([[0.738, 0.2], [0.1, 0.39], [0.99, 0.999999]])\n', (6899, 6946), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((7043, 7109), 'numpy.array', 'array', (['[[0.738, 0.2, 0.3], [0.1, 0.39, -6], [0.99, 0.999999, -12]]'], {}), '([[0.738, 0.2, 0.3], [0.1, 0.39, -6], [0.99, 0.999999, -12]])\n', (7048, 7109), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((8148, 8200), 'numpy.array', 'array', (['[[0.738, 0.2], [0.1, 0.39], [0.99, 0.999999]]'], {}), '([[0.738, 0.2], [0.1, 0.39], [0.99, 0.999999]])\n', (8153, 8200), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((8207, 8248), 'numpy.array', 'array', (['[[1.738, 0.6], [0.7, 12], [0, -6]]'], {}), '([[1.738, 0.6], [0.7, 12], [0, -6]])\n', (8212, 8248), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((8518, 8570), 'numpy.array', 'array', (['[[0.738, 0.2], [0.1, 0.39], [0.99, 0.999999]]'], {}), '([[0.738, 0.2], [0.1, 0.39], [0.99, 0.999999]])\n', (8523, 8570), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((8577, 8618), 'numpy.array', 'array', (['[[1.738, 0.6], [0.7, 12], [0, -6]]'], {}), '([[1.738, 0.6], [0.7, 12], [0, -6]])\n', (8582, 8618), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((9855, 9907), 'numpy.array', 'array', (['[[0.738, 0.2], [0.1, 0.39], [0.99, 0.999999]]'], {}), '([[0.738, 0.2], [0.1, 0.39], [0.99, 0.999999]])\n', (9860, 9907), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((11452, 11468), 'numpy.array', 'array', (['[0, 2, 3]'], {}), '([0, 2, 3])\n', (11457, 11468), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((14860, 14893), 'numpy.matrix', 'matrix', (['[[1, 1], [3, 2], [4, 27]]'], {}), '([[1, 1], [3, 2], [4, 27]])\n', (14866, 14893), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((16826, 16834), 'casadi.det', 'c.det', (['x'], {}), '(x)\n', (16831, 16834), True, 'import casadi as c\n'), ((16858, 16866), 'casadi.det', 'c.det', (['y'], {}), '(y)\n', (16863, 16866), True, 'import casadi as c\n'), ((19114, 19135), 'numpy.random.seed', 'numpy.random.seed', (['(42)'], {}), '(42)\n', (19131, 19135), False, 'import numpy\n'), ((19145, 19172), 'numpy.random.random', 'numpy.random.random', (['(3, 4)'], {}), '((3, 4))\n', (19164, 19172), False, 'import numpy\n'), ((19209, 19228), 'numpy.zeros', 'numpy.zeros', (['(7, 8)'], {}), '((7, 8))\n', (19220, 19228), False, 'import numpy\n'), ((18872, 18913), 'numpy.matrix', 'matrix', (['[[1, 0, 0], [1, 1, 0], [1, 1, 1]]'], {}), '([[1, 0, 0], [1, 1, 0], [1, 1, 1]])\n', (18878, 18913), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((18967, 19008), 'numpy.matrix', 'matrix', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (18973, 19008), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((22092, 22141), 'numpy.matrix', 'matrix', (['[[x_ * a_, a_], [1 + a_ * x_, 0], [1, 0]]'], {}), '([[x_ * a_, a_], [1 + a_ * x_, 0], [1, 0]])\n', (22098, 22141), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((35295, 35304), 'casadi.size1', 'c.size1', ([], {}), '()\n', (35302, 35304), True, 'import casadi as c\n'), ((35330, 35339), 'casadi.size2', 'c.size2', ([], {}), '()\n', (35337, 35339), True, 'import casadi as c\n'), ((35381, 35390), 'casadi.size1', 'c.size1', ([], {}), '()\n', (35388, 35390), True, 'import casadi as c\n'), ((35416, 35425), 'casadi.size2', 'c.size2', ([], {}), '()\n', (35423, 35425), True, 'import casadi as c\n'), ((35519, 35528), 'casadi.size1', 'c.size1', ([], {}), '()\n', (35526, 35528), True, 'import casadi as c\n'), ((35554, 35563), 'casadi.size2', 'c.size2', ([], {}), '()\n', (35561, 35563), True, 'import casadi as c\n'), ((35605, 35614), 'casadi.size1', 'c.size1', ([], {}), '()\n', (35612, 35614), True, 'import casadi as c\n'), ((35640, 35649), 'casadi.size2', 'c.size2', ([], {}), '()\n', (35647, 35649), True, 'import casadi as c\n'), ((35758, 35765), 'casadi.nnz', 'c.nnz', ([], {}), '()\n', (35763, 35765), True, 'import casadi as c\n'), ((35849, 35856), 'casadi.nnz', 'c.nnz', ([], {}), '()\n', (35854, 35856), True, 'import casadi as c\n'), ((39052, 39077), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (39075, 39077), False, 'import warnings\n'), ((39085, 39135), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'DeprecationWarning'], {}), "('error', DeprecationWarning)\n", (39106, 39135), False, 'import warnings\n'), ((39203, 39234), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (39224, 39234), False, 'import warnings\n'), ((7280, 7293), 'numpy.linalg.det', 'linalg.det', (['x'], {}), '(x)\n', (7290, 7293), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((7359, 7370), 'casadi.inv', 'c.inv', (['x[0]'], {}), '(x[0])\n', (7364, 7370), True, 'import casadi as c\n'), ((7384, 7397), 'numpy.linalg.inv', 'linalg.inv', (['x'], {}), '(x)\n', (7394, 7397), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((18061, 18077), 'casadi.transpose', 'c.transpose', (['arg'], {}), '(arg)\n', (18072, 18077), True, 'import casadi as c\n'), ((3316, 3330), 'numpy.zeros', 'zeros', (['x.shape'], {}), '(x.shape)\n', (3321, 3330), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((3395, 3408), 'numpy.ones', 'ones', (['x.shape'], {}), '(x.shape)\n', (3399, 3408), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((7254, 7265), 'casadi.det', 'c.det', (['x[0]'], {}), '(x[0])\n', (7259, 7265), True, 'import casadi as c\n'), ((10002, 10011), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (10008, 10011), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((10106, 10115), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (10112, 10115), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((10210, 10219), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (10216, 10219), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((10315, 10324), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (10321, 10324), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((10417, 10426), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (10423, 10426), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((10517, 10526), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (10523, 10526), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((10617, 10626), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (10623, 10626), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((10717, 10726), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (10723, 10726), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((10818, 10827), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (10824, 10827), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((10921, 10930), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (10927, 10930), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((11029, 11038), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (11035, 11038), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((11145, 11154), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (11151, 11154), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((11259, 11268), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (11265, 11268), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((11378, 11413), 'numpy.matrix', 'matrix', (['[x[0, 0], x[2, 0], x[0, 1]]'], {}), '([x[0, 0], x[2, 0], x[0, 1]])\n', (11384, 11413), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((12259, 12268), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (12265, 12268), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((12375, 12384), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (12381, 12384), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((12491, 12500), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (12497, 12500), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((12607, 12616), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (12613, 12616), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((12724, 12733), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (12730, 12733), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((12838, 12847), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (12844, 12847), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((12950, 12959), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (12956, 12959), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((13062, 13071), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (13068, 13071), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((13174, 13183), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (13180, 13183), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((13287, 13296), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (13293, 13296), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((13402, 13411), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (13408, 13411), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((13522, 13531), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (13528, 13531), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((13650, 13659), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (13656, 13659), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((13776, 13785), 'numpy.matrix', 'matrix', (['x'], {}), '(x)\n', (13782, 13785), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((13905, 13931), 'numpy.matrix', 'matrix', (['[x[1, 2], x[2, 0]]'], {}), '([x[1, 2], x[2, 0]])\n', (13911, 13931), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((14021, 14037), 'numpy.matrix', 'matrix', (['sx0[0:2]'], {}), '(sx0[0:2])\n', (14027, 14037), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((14142, 14156), 'numpy.matrix', 'matrix', (['sx0[1]'], {}), '(sx0[1])\n', (14148, 14156), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((14256, 14271), 'numpy.matrix', 'matrix', (['sx0[-1]'], {}), '(sx0[-1])\n', (14262, 14271), False, 'from numpy import random, array, linalg, matrix, zeros, ones\n'), ((32158, 32167), 'casadi.diag', 'c.diag', (['x'], {}), '(x)\n', (32164, 32167), True, 'import casadi as c\n'), ((32340, 32349), 'casadi.diag', 'c.diag', (['x'], {}), '(x)\n', (32346, 32349), True, 'import casadi as c\n'), ((32565, 32574), 'casadi.diag', 'c.diag', (['y'], {}), '(y)\n', (32571, 32574), True, 'import casadi as c\n')] |
import pandas as pd
import numpy as np, os, sys, librosa
import warnings
warnings.simplefilter("ignore")
MIN_GAP = 0
avoid_edges=True
edge_gap = 0.5
# Predict w/ pytorch code for audioset data
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../../utils/')
import models, configs, torch
import dataset_utils, audio_utils, data_loaders, torch_utils
from torch import optim, nn
device = torch.device('cpu')
from eval_utils import *
warnings.simplefilter("ignore")
from tqdm import tqdm
audioset_annotations_df = pd.read_csv('../../data/audioset/annotations/clean_laughter_annotations.csv')
audioset_annotations2_df = pd.read_csv('../../data/audioset/annotations/clean_2nd_annotator_annotations.csv')
def get_inter_annotator_for_ID(audioset_ID, annotations_df, annotations_df2, min_gap=0.,
threshold=0.5, use_filter=False, min_length=0.0,
avoid_edges=True, edge_gap=0.5, expand_channel_dim=False):
annotations1_index = annotations_df[annotations_df['FileID'] == audioset_ID].index[0]
annotations1_line = dict(annotations_df.iloc[annotations1_index])
true_laughter_times = get_laughter_times_from_annotation_line(
annotations1_line,min_gap=min_gap,avoid_edges=avoid_edges,edge_gap=edge_gap)
true_non_laughter_times = get_non_laughter_times(
true_laughter_times,annotations1_line['window_start'],annotations1_line['window_length'],avoid_edges=avoid_edges,edge_gap=edge_gap)
annotations2_index = annotations_df2[annotations_df2['FileID'] == audioset_ID].index[0]
annotations2_line = dict(annotations_df2.iloc[annotations2_index])
predicted_laughter_times = get_laughter_times_from_annotation_line(
annotations2_line,min_gap=min_gap,avoid_edges=avoid_edges,edge_gap=edge_gap)
predicted_non_laughter_times = get_non_laughter_times(
predicted_laughter_times,annotations2_line['window_start'],annotations2_line['window_length'],avoid_edges=avoid_edges,edge_gap=edge_gap)
total_laughter_time = sum_overlap_amount(true_laughter_times,true_laughter_times)
total_non_laughter_time = sum_overlap_amount(true_non_laughter_times,true_non_laughter_times)
true_positive_time = sum_overlap_amount(true_laughter_times, predicted_laughter_times)
true_negative_time = sum_overlap_amount(true_non_laughter_times, predicted_non_laughter_times)
false_positive_time = sum_overlap_amount(true_non_laughter_times, predicted_laughter_times)
false_negative_time = sum_overlap_amount(true_laughter_times, predicted_non_laughter_times)
total_time = true_positive_time + true_negative_time + false_positive_time + false_negative_time
try:
assert(np.abs(total_laughter_time - (true_positive_time + false_negative_time)) < 0.2)
assert(np.abs(total_non_laughter_time - (true_negative_time + false_positive_time)) < 0.2)
except:
print(audioset_ID)
print(annotations1_line['window_length'])
print(np.abs(total_laughter_time - (true_positive_time + false_negative_time)))
print("\n")
h = {'FileID':audioset_ID, 'tp_time':true_positive_time, 'tn_time':true_negative_time,
'fp_time':false_positive_time, 'fn_time':false_negative_time,
'predicted_laughter': predicted_laughter_times, 'predicted_non_laughter': predicted_non_laughter_times,
'true_laughter': true_laughter_times, 'true_non_laughter': true_non_laughter_times}
return h
double_annotated_ids = list(set(audioset_annotations_df.FileID) & set(audioset_annotations2_df.FileID))
all_results = []
for audioset_ID in tqdm(double_annotated_ids):
h = get_inter_annotator_for_ID(audioset_ID, audioset_annotations_df, audioset_annotations2_df,
min_gap=0., threshold=0.5, use_filter=False, min_length=0.0,
avoid_edges=True, edge_gap=0.5)
all_results.append(h)
results_df = pd.DataFrame(all_results)
results_df.to_csv("interannotator_agreement_results.csv",index=None) | [
"sys.path.append",
"pandas.DataFrame",
"tqdm.tqdm",
"numpy.abs",
"warnings.simplefilter",
"pandas.read_csv",
"torch.device"
] | [((73, 104), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (94, 104), False, 'import warnings\n'), ((195, 217), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (210, 217), False, 'import numpy as np, os, sys, librosa\n'), ((218, 243), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (233, 243), False, 'import numpy as np, os, sys, librosa\n'), ((244, 275), 'sys.path.append', 'sys.path.append', (['"""../../utils/"""'], {}), "('../../utils/')\n", (259, 275), False, 'import numpy as np, os, sys, librosa\n'), ((404, 423), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (416, 423), False, 'import models, configs, torch\n'), ((449, 480), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (470, 480), False, 'import warnings\n'), ((531, 608), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/audioset/annotations/clean_laughter_annotations.csv"""'], {}), "('../../data/audioset/annotations/clean_laughter_annotations.csv')\n", (542, 608), True, 'import pandas as pd\n'), ((636, 723), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/audioset/annotations/clean_2nd_annotator_annotations.csv"""'], {}), "(\n '../../data/audioset/annotations/clean_2nd_annotator_annotations.csv')\n", (647, 723), True, 'import pandas as pd\n'), ((3660, 3686), 'tqdm.tqdm', 'tqdm', (['double_annotated_ids'], {}), '(double_annotated_ids)\n', (3664, 3686), False, 'from tqdm import tqdm\n'), ((3990, 4015), 'pandas.DataFrame', 'pd.DataFrame', (['all_results'], {}), '(all_results)\n', (4002, 4015), True, 'import pandas as pd\n'), ((2753, 2825), 'numpy.abs', 'np.abs', (['(total_laughter_time - (true_positive_time + false_negative_time))'], {}), '(total_laughter_time - (true_positive_time + false_negative_time))\n', (2759, 2825), True, 'import numpy as np, os, sys, librosa\n'), ((2848, 2924), 'numpy.abs', 'np.abs', (['(total_non_laughter_time - (true_negative_time + false_positive_time))'], {}), '(total_non_laughter_time - (true_negative_time + false_positive_time))\n', (2854, 2924), True, 'import numpy as np, os, sys, librosa\n'), ((3035, 3107), 'numpy.abs', 'np.abs', (['(total_laughter_time - (true_positive_time + false_negative_time))'], {}), '(total_laughter_time - (true_positive_time + false_negative_time))\n', (3041, 3107), True, 'import numpy as np, os, sys, librosa\n')] |
import math
from itertools import permutations, repeat
import numpy as np
# set radius in meters (e.g. here 5 km)
radius = 5000
# set bounding box (e.g. here Berlin)
start_lat = 52.341823
start_long = 13.088209
end_lat = 52.669724
end_long = 13.760610
# number of km per degree = 40075km / 360 = ~111
# (between 110.567km at the equator and 111.699km at the poles)
# 40075 km earths perimeter at equator
# 1km in degree = 1 / 111.32 = 0.0089
# 1m in degree = 0.0089 / 1000 = 0.0000089
one_meter_in_degree = 1 / 111.32 / 1000
coef = 2*radius * one_meter_in_degree
# distance between all latitudes always the same
def get_new_lat(old_lat):
return (old_lat + coef)
# pi / 180 = 0.018
# distance between longitudes depends on latitude
# This script is only usable for rather small areas (e.g. a city), as always the start_lat is used to calculate new longs!
# For larger areas a the longitude needs to be calculated according to the proper lat!
def get_new_long(old_long):
return (old_long + coef / math.cos(start_lat * (math.pi / 180)))
# get all lats:
first_row_lats = []
second_row_lats = []
current_lat1 = start_lat
current_lat2 = start_lat + radius * one_meter_in_degree
while current_lat1 < end_lat:
first_row_lats.append(current_lat1)
second_row_lats.append(current_lat2)
current_lat1 = get_new_lat(current_lat1)
current_lat2 = get_new_lat(current_lat2)
# get all longs:
first_row_longs = []
second_row_longs = []
current_long1 = start_long
current_long2 = start_long + (radius * one_meter_in_degree) / math.cos(start_lat * 0.018)
while current_long1 < end_long:
first_row_longs.append(current_long1)
second_row_longs.append(current_long2)
current_long1 = get_new_long(current_long1)
current_long2 = get_new_long(current_long2)
all_coordinates = np.array([]).reshape(0,2)
for long in first_row_longs:
coordinates = np.array(list(zip(first_row_lats, np.repeat(long, len(first_row_lats)))))
all_coordinates = np.append(all_coordinates, coordinates, axis = 0)
for long in second_row_longs:
coordinates = np.array(list(zip(second_row_lats, np.repeat(long, len(second_row_lats)))))
all_coordinates = np.append(all_coordinates, coordinates, axis = 0)
# save radius centers to csv
np.savetxt("centers.csv", all_coordinates, header= 'lat, long', delimiter=",", fmt="%10.6f") | [
"numpy.append",
"numpy.savetxt",
"numpy.array",
"math.cos"
] | [((2244, 2340), 'numpy.savetxt', 'np.savetxt', (['"""centers.csv"""', 'all_coordinates'], {'header': '"""lat, long"""', 'delimiter': '""","""', 'fmt': '"""%10.6f"""'}), "('centers.csv', all_coordinates, header='lat, long', delimiter=\n ',', fmt='%10.6f')\n", (2254, 2340), True, 'import numpy as np\n'), ((1967, 2014), 'numpy.append', 'np.append', (['all_coordinates', 'coordinates'], {'axis': '(0)'}), '(all_coordinates, coordinates, axis=0)\n', (1976, 2014), True, 'import numpy as np\n'), ((2164, 2211), 'numpy.append', 'np.append', (['all_coordinates', 'coordinates'], {'axis': '(0)'}), '(all_coordinates, coordinates, axis=0)\n', (2173, 2211), True, 'import numpy as np\n'), ((1538, 1565), 'math.cos', 'math.cos', (['(start_lat * 0.018)'], {}), '(start_lat * 0.018)\n', (1546, 1565), False, 'import math\n'), ((1798, 1810), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1806, 1810), True, 'import numpy as np\n'), ((1009, 1046), 'math.cos', 'math.cos', (['(start_lat * (math.pi / 180))'], {}), '(start_lat * (math.pi / 180))\n', (1017, 1046), False, 'import math\n')] |
from copy import deepcopy
from typing import Tuple
import GPy
import numpy as np
from emukit.model_wrappers.gpy_model_wrappers import GPyModelWrapper
class FabolasKernel(GPy.kern.Kern):
def __init__(self, input_dim, basis_func, a=1., b=1., active_dims=None):
super(FabolasKernel, self).__init__(input_dim, active_dims, "fabolas_kernel")
assert input_dim == 1
self.basis_func = basis_func
self.a = GPy.core.parameterization.Param("a", a)
self.b = GPy.core.parameterization.Param("b", b)
self.link_parameters(self.a, self.b)
def K(self, X, X2):
if X2 is None: X2 = X
X_ = self.basis_func(X)
X2_ = self.basis_func(X2)
k = np.dot(X_ * self.b, X2_.T) + self.a
return k
def update_gradients_full(self, dL_dK, X, X2):
if X2 is None: X2 = X
X_ = self.basis_func(X)
X2_ = self.basis_func(X2)
self.a.gradient = np.sum(dL_dK)
self.b.gradient = np.sum(np.dot(np.dot(X_, X2_.T), dL_dK))
def Kdiag(self, X):
return np.diag(self.K(X, X))
def linear(s):
return s
def quad(s):
return (1 - s) ** 2
def transform(s, s_min, s_max):
s_transform = (np.log2(s) - np.log2(s_min)) / (np.log2(s_max) - np.log2(s_min))
return s_transform
def retransform(s_transform, s_min, s_max):
s = np.rint(2 ** (s_transform * (np.log2(s_max) - np.log2(s_min)) + np.log2(s_min)))
return s
class FabolasModel(GPyModelWrapper):
def __init__(self, X_init: np.ndarray, Y_init: np.ndarray,
s_min: float, s_max: float, basis_func=linear, noise: float = 1e-6):
"""
Fabolas Gaussian processes model which models the validation error / cost of
hyperparameter configurations across training dataset subsets.
:param X_init: training data points
:param Y_init: training targets
:param basis_func: basis function which describes the change in performance across dataset subsets
:param noise: observation noise added to the diagonal of the kernel matrix
"""
self.noise = noise
self.s_min = s_min
self.s_max = s_max
self._X = deepcopy(X_init)
self._X[:, -1] = transform(self._X[:, -1], self.s_min, self.s_max)
self._Y = Y_init
self.basis_func = basis_func
kernel = GPy.kern.Matern52(input_dim=self._X.shape[1] - 1, active_dims=[i for i in range(self._X.shape[1] - 1)],
variance=np.var(self._Y), ARD=True)
kernel *= FabolasKernel(input_dim=1, active_dims=[self._X.shape[1] - 1], basis_func=basis_func)
kernel += GPy.kern.White(input_dim=1, active_dims=[self._X.shape[1] - 1], variance=1e-6)
gp = GPy.models.GPRegression(self._X, self._Y, kernel=kernel, noise_var=noise)
gp.kern.set_prior(GPy.priors.Uniform(0, 5))
gp.likelihood.constrain_positive()
super(FabolasModel, self).__init__(gpy_model=gp, n_restarts=3)
def predict(self, X):
"""
:param X: (n_points x n_dimensions) array containing locations at which to get predictions
:return: (mean, variance) Arrays of size n_points x 1 of the predictive distribution at each input location
"""
X_ = deepcopy(X)
X_[:, -1] = transform(X_[:, -1], self.s_min, self.s_max)
return super(FabolasModel, self).predict(X_)
def set_data(self, X, Y):
"""
Sets training data in model
:param X: New training features
:param Y: New training outputs
"""
self._X = deepcopy(X)
self._X[:, -1] = transform(self._X[:, -1], self.s_min, self.s_max)
self._Y = Y
try:
self.model.set_XY(self._X, self.Y)
except:
kernel = GPy.kern.Matern52(input_dim=self._X.shape[1] - 1,
active_dims=[i for i in range(self._X.shape[1] - 1)],
variance=np.var(self.Y), ARD=True)
kernel *= FabolasKernel(input_dim=1, active_dims=[self._X.shape[1] - 1], basis_func=self.basis_func)
kernel *= GPy.kern.OU(input_dim=1, active_dims=[self._X.shape[1] - 1])
self.model = GPy.models.GPRegression(self._X, self.Y, kernel=kernel, noise_var=self.noise)
self.model.likelihood.constrain_positive()
def get_f_minimum(self):
"""
Predicts for all observed data points the validation error on the full dataset and returns
the smallest mean prediciton
:return: Array of size 1 x 1
"""
proj_X = deepcopy(self._X)
proj_X[:, -1] = np.ones(proj_X.shape[0]) * self.s_max
mean_highest_dataset = self.model.predict(proj_X)
return np.min(mean_highest_dataset, axis=0)
@property
def X(self):
X = deepcopy(self._X)
X[:, -1] = retransform(X[:, -1], self.s_min, self.s_max)
return X
@property
def Y(self):
return self._Y
def get_prediction_gradients(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
:param X: (n_points x n_dimensions) array containing locations at which to get gradient of the predictions
:return: (mean gradient, variance gradient) n_points x n_dimensions arrays of the gradients of the predictive
distribution at each input location
"""
X_ = deepcopy(X)
X_[:, -1] = transform(X_[:, -1], self.s_min, self.s_max)
return super(FabolasModel, self).get_prediction_gradients(X_)
def predict_covariance(self, X: np.ndarray, with_noise: bool = True) -> np.ndarray:
"""
Calculates posterior covariance between points in X
:param X: Array of size n_points x n_dimensions containing input locations to compute posterior covariance at
:param with_noise: Whether to include likelihood noise in the covariance matrix
:return: Posterior covariance matrix of size n_points x n_points
"""
X_ = deepcopy(X)
X_[:, -1] = transform(X_[:, -1], self.s_min, self.s_max)
return super(FabolasModel, self).predict_covariance(X_, with_noise)
def get_covariance_between_points(self, X1: np.ndarray, X2: np.ndarray) -> np.ndarray:
"""
Calculate posterior covariance between two points
:param X1: An array of shape 1 x n_dimensions that contains a data single point. It is the first argument of the
posterior covariance function
:param X2: An array of shape n_points x n_dimensions that may contain multiple data points. This is the second
argument to the posterior covariance function.
:return: An array of shape n_points x 1 of posterior covariances between X1 and X2
"""
X_1 = deepcopy(X1)
X_1[:, -1] = transform(X_1[:, -1], self.s_min, self.s_max)
X_2 = deepcopy(X2)
X_2[:, -1] = transform(X_2[:, -1], self.s_min, self.s_max)
return super(FabolasModel, self).get_covariance_between_points(X1, X2)
| [
"copy.deepcopy",
"numpy.sum",
"GPy.models.GPRegression",
"numpy.log2",
"numpy.ones",
"GPy.kern.White",
"numpy.min",
"GPy.kern.OU",
"numpy.dot",
"numpy.var",
"GPy.core.parameterization.Param",
"GPy.priors.Uniform"
] | [((442, 481), 'GPy.core.parameterization.Param', 'GPy.core.parameterization.Param', (['"""a"""', 'a'], {}), "('a', a)\n", (473, 481), False, 'import GPy\n'), ((499, 538), 'GPy.core.parameterization.Param', 'GPy.core.parameterization.Param', (['"""b"""', 'b'], {}), "('b', b)\n", (530, 538), False, 'import GPy\n'), ((947, 960), 'numpy.sum', 'np.sum', (['dL_dK'], {}), '(dL_dK)\n', (953, 960), True, 'import numpy as np\n'), ((2192, 2208), 'copy.deepcopy', 'deepcopy', (['X_init'], {}), '(X_init)\n', (2200, 2208), False, 'from copy import deepcopy\n'), ((2660, 2739), 'GPy.kern.White', 'GPy.kern.White', ([], {'input_dim': '(1)', 'active_dims': '[self._X.shape[1] - 1]', 'variance': '(1e-06)'}), '(input_dim=1, active_dims=[self._X.shape[1] - 1], variance=1e-06)\n', (2674, 2739), False, 'import GPy\n'), ((2753, 2826), 'GPy.models.GPRegression', 'GPy.models.GPRegression', (['self._X', 'self._Y'], {'kernel': 'kernel', 'noise_var': 'noise'}), '(self._X, self._Y, kernel=kernel, noise_var=noise)\n', (2776, 2826), False, 'import GPy\n'), ((3272, 3283), 'copy.deepcopy', 'deepcopy', (['X'], {}), '(X)\n', (3280, 3283), False, 'from copy import deepcopy\n'), ((3591, 3602), 'copy.deepcopy', 'deepcopy', (['X'], {}), '(X)\n', (3599, 3602), False, 'from copy import deepcopy\n'), ((4612, 4629), 'copy.deepcopy', 'deepcopy', (['self._X'], {}), '(self._X)\n', (4620, 4629), False, 'from copy import deepcopy\n'), ((4766, 4802), 'numpy.min', 'np.min', (['mean_highest_dataset'], {'axis': '(0)'}), '(mean_highest_dataset, axis=0)\n', (4772, 4802), True, 'import numpy as np\n'), ((4847, 4864), 'copy.deepcopy', 'deepcopy', (['self._X'], {}), '(self._X)\n', (4855, 4864), False, 'from copy import deepcopy\n'), ((5414, 5425), 'copy.deepcopy', 'deepcopy', (['X'], {}), '(X)\n', (5422, 5425), False, 'from copy import deepcopy\n'), ((6027, 6038), 'copy.deepcopy', 'deepcopy', (['X'], {}), '(X)\n', (6035, 6038), False, 'from copy import deepcopy\n'), ((6815, 6827), 'copy.deepcopy', 'deepcopy', (['X1'], {}), '(X1)\n', (6823, 6827), False, 'from copy import deepcopy\n'), ((6909, 6921), 'copy.deepcopy', 'deepcopy', (['X2'], {}), '(X2)\n', (6917, 6921), False, 'from copy import deepcopy\n'), ((719, 745), 'numpy.dot', 'np.dot', (['(X_ * self.b)', 'X2_.T'], {}), '(X_ * self.b, X2_.T)\n', (725, 745), True, 'import numpy as np\n'), ((1212, 1222), 'numpy.log2', 'np.log2', (['s'], {}), '(s)\n', (1219, 1222), True, 'import numpy as np\n'), ((1225, 1239), 'numpy.log2', 'np.log2', (['s_min'], {}), '(s_min)\n', (1232, 1239), True, 'import numpy as np\n'), ((1244, 1258), 'numpy.log2', 'np.log2', (['s_max'], {}), '(s_max)\n', (1251, 1258), True, 'import numpy as np\n'), ((1261, 1275), 'numpy.log2', 'np.log2', (['s_min'], {}), '(s_min)\n', (1268, 1275), True, 'import numpy as np\n'), ((2853, 2877), 'GPy.priors.Uniform', 'GPy.priors.Uniform', (['(0)', '(5)'], {}), '(0, 5)\n', (2871, 2877), False, 'import GPy\n'), ((4654, 4678), 'numpy.ones', 'np.ones', (['proj_X.shape[0]'], {}), '(proj_X.shape[0])\n', (4661, 4678), True, 'import numpy as np\n'), ((1001, 1018), 'numpy.dot', 'np.dot', (['X_', 'X2_.T'], {}), '(X_, X2_.T)\n', (1007, 1018), True, 'import numpy as np\n'), ((1418, 1432), 'numpy.log2', 'np.log2', (['s_min'], {}), '(s_min)\n', (1425, 1432), True, 'import numpy as np\n'), ((2511, 2526), 'numpy.var', 'np.var', (['self._Y'], {}), '(self._Y)\n', (2517, 2526), True, 'import numpy as np\n'), ((4147, 4207), 'GPy.kern.OU', 'GPy.kern.OU', ([], {'input_dim': '(1)', 'active_dims': '[self._X.shape[1] - 1]'}), '(input_dim=1, active_dims=[self._X.shape[1] - 1])\n', (4158, 4207), False, 'import GPy\n'), ((4234, 4311), 'GPy.models.GPRegression', 'GPy.models.GPRegression', (['self._X', 'self.Y'], {'kernel': 'kernel', 'noise_var': 'self.noise'}), '(self._X, self.Y, kernel=kernel, noise_var=self.noise)\n', (4257, 4311), False, 'import GPy\n'), ((1383, 1397), 'numpy.log2', 'np.log2', (['s_max'], {}), '(s_max)\n', (1390, 1397), True, 'import numpy as np\n'), ((1400, 1414), 'numpy.log2', 'np.log2', (['s_min'], {}), '(s_min)\n', (1407, 1414), True, 'import numpy as np\n'), ((3986, 4000), 'numpy.var', 'np.var', (['self.Y'], {}), '(self.Y)\n', (3992, 4000), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from algo.ppo.Network import Actor, Critic
import os
from pathlib import Path
import sys
base_dir = Path(__file__).resolve().parent.parent.parent
sys.path.append(str(base_dir))
from common.buffer import Replay_buffer as buffer
def get_trajectory_property():
return ["action", "a_logit"]
class PPO(object):
def __init__(self, args):
self.state_dim = args.obs_space
self.action_dim = args.action_space
self.clip_param = args.clip_param
self.max_grad_norm = args.max_grad_norm
self.update_freq = args.update_freq
self.buffer_size = args.buffer_capacity
self.batch_size = args.batch_size
self.a_lr = args.a_lr
self.c_lr = args.c_lr
self.gamma = args.gamma
self.hidden_size = args.hidden_size
self.actor = Actor(self.state_dim, self.action_dim, self.hidden_size)
self.critic = Critic(self.state_dim, 1, self.hidden_size)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=self.a_lr)
self.critic_net_optimizer = optim.Adam(self.critic.parameters(), lr=self.c_lr)
trajectory_property = get_trajectory_property()
self.memory = buffer(self.buffer_size, trajectory_property)
self.memory.init_item_buffers()
self.counter = 0
self.training_step = 0
def choose_action(self, observation, train=True):
inference_output = self.inference(observation, train)
if train:
self.add_experience(inference_output)
return inference_output
def inference(self, observation, train=True):
state = torch.tensor(observation, dtype=torch.float).unsqueeze(0)
logits = self.actor(state).detach()
action = Categorical(torch.Tensor(logits)).sample()
return {"action": action.item(),
"a_logit": logits[:, action.item()].item()}
def add_experience(self, output):
agent_id = 0
for k, v in output.items():
self.memory.insert(k, agent_id, v)
def learn(self):
data_length = len(self.memory.item_buffers["rewards"].data)
data = self.memory.get_trajectory()
transitions = {
"o_0": np.array(data['states']),
"r_0": data['rewards'],
"u_0": np.array(data['action']),
"log_prob": np.array(data['a_logit'])
}
obs = torch.tensor(transitions["o_0"], dtype=torch.float)
action = torch.tensor(transitions["u_0"], dtype=torch.long).view(-1, 1)
reward = transitions["r_0"]
old_action_log_prob = torch.tensor(transitions["log_prob"], dtype=torch.float).view(-1, 1)
# 计算reward-to-go
R = 0
Gt = []
for r in reward[::-1]: # 反过来
R = r[0] + self.gamma * R
Gt.insert(0, R)
Gt = torch.tensor(Gt, dtype=torch.float)
for i in range(self.update_freq):
for index in BatchSampler(SubsetRandomSampler(range(data_length)), self.batch_size, False):
Gt_index = Gt[index].view(-1, 1)
V = self.critic(obs[index])
delta = Gt_index - V
advantage = delta.detach()
action_prob = self.actor(obs[index]).gather(1, action[index])
ratio = (action_prob / old_action_log_prob[index])
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1 - self.clip_param, 1 + self.clip_param) * advantage
action_loss = -torch.min(surr1, surr2).mean()
self.actor_optimizer.zero_grad()
action_loss.backward()
nn.utils.clip_grad_norm_(self.actor.parameters(), self.max_grad_norm)
self.actor_optimizer.step()
value_loss = F.mse_loss(Gt_index, V)
self.critic_net_optimizer.zero_grad()
value_loss.backward()
nn.utils.clip_grad_norm_(self.critic.parameters(), self.max_grad_norm)
self.critic_net_optimizer.step()
self.training_step += 1
self.memory.item_buffer_clear()
def save(self, save_path, episode):
base_path = os.path.join(save_path, 'trained_model')
if not os.path.exists(base_path):
os.makedirs(base_path)
model_actor_path = os.path.join(base_path, "actor_" + str(episode) + ".pth")
torch.save(self.actor.state_dict(), model_actor_path)
model_critic_path = os.path.join(base_path, "critic_" + str(episode) + ".pth")
torch.save(self.critic.state_dict(), model_critic_path)
def load(self, actor_net, critic_net):
self.actor.load_state_dict(torch.load(actor_net))
self.critic.load_state_dict(torch.load(critic_net))
| [
"algo.ppo.Network.Actor",
"os.makedirs",
"algo.ppo.Network.Critic",
"torch.load",
"torch.nn.functional.mse_loss",
"os.path.exists",
"common.buffer.Replay_buffer",
"pathlib.Path",
"torch.Tensor",
"numpy.array",
"torch.clamp",
"os.path.join",
"torch.min",
"torch.tensor"
] | [((1043, 1099), 'algo.ppo.Network.Actor', 'Actor', (['self.state_dim', 'self.action_dim', 'self.hidden_size'], {}), '(self.state_dim, self.action_dim, self.hidden_size)\n', (1048, 1099), False, 'from algo.ppo.Network import Actor, Critic\n'), ((1122, 1165), 'algo.ppo.Network.Critic', 'Critic', (['self.state_dim', '(1)', 'self.hidden_size'], {}), '(self.state_dim, 1, self.hidden_size)\n', (1128, 1165), False, 'from algo.ppo.Network import Actor, Critic\n'), ((1413, 1458), 'common.buffer.Replay_buffer', 'buffer', (['self.buffer_size', 'trajectory_property'], {}), '(self.buffer_size, trajectory_property)\n', (1419, 1458), True, 'from common.buffer import Replay_buffer as buffer\n'), ((2607, 2658), 'torch.tensor', 'torch.tensor', (["transitions['o_0']"], {'dtype': 'torch.float'}), "(transitions['o_0'], dtype=torch.float)\n", (2619, 2658), False, 'import torch\n'), ((3046, 3081), 'torch.tensor', 'torch.tensor', (['Gt'], {'dtype': 'torch.float'}), '(Gt, dtype=torch.float)\n', (3058, 3081), False, 'import torch\n'), ((4391, 4431), 'os.path.join', 'os.path.join', (['save_path', '"""trained_model"""'], {}), "(save_path, 'trained_model')\n", (4403, 4431), False, 'import os\n'), ((2425, 2449), 'numpy.array', 'np.array', (["data['states']"], {}), "(data['states'])\n", (2433, 2449), True, 'import numpy as np\n'), ((2506, 2530), 'numpy.array', 'np.array', (["data['action']"], {}), "(data['action'])\n", (2514, 2530), True, 'import numpy as np\n'), ((2556, 2581), 'numpy.array', 'np.array', (["data['a_logit']"], {}), "(data['a_logit'])\n", (2564, 2581), True, 'import numpy as np\n'), ((4447, 4472), 'os.path.exists', 'os.path.exists', (['base_path'], {}), '(base_path)\n', (4461, 4472), False, 'import os\n'), ((4486, 4508), 'os.makedirs', 'os.makedirs', (['base_path'], {}), '(base_path)\n', (4497, 4508), False, 'import os\n'), ((4887, 4908), 'torch.load', 'torch.load', (['actor_net'], {}), '(actor_net)\n', (4897, 4908), False, 'import torch\n'), ((4946, 4968), 'torch.load', 'torch.load', (['critic_net'], {}), '(critic_net)\n', (4956, 4968), False, 'import torch\n'), ((1840, 1884), 'torch.tensor', 'torch.tensor', (['observation'], {'dtype': 'torch.float'}), '(observation, dtype=torch.float)\n', (1852, 1884), False, 'import torch\n'), ((2676, 2726), 'torch.tensor', 'torch.tensor', (["transitions['u_0']"], {'dtype': 'torch.long'}), "(transitions['u_0'], dtype=torch.long)\n", (2688, 2726), False, 'import torch\n'), ((2805, 2861), 'torch.tensor', 'torch.tensor', (["transitions['log_prob']"], {'dtype': 'torch.float'}), "(transitions['log_prob'], dtype=torch.float)\n", (2817, 2861), False, 'import torch\n'), ((3998, 4021), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['Gt_index', 'V'], {}), '(Gt_index, V)\n', (4008, 4021), True, 'import torch.nn.functional as F\n'), ((331, 345), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (335, 345), False, 'from pathlib import Path\n'), ((1971, 1991), 'torch.Tensor', 'torch.Tensor', (['logits'], {}), '(logits)\n', (1983, 1991), False, 'import torch\n'), ((3614, 3674), 'torch.clamp', 'torch.clamp', (['ratio', '(1 - self.clip_param)', '(1 + self.clip_param)'], {}), '(ratio, 1 - self.clip_param, 1 + self.clip_param)\n', (3625, 3674), False, 'import torch\n'), ((3719, 3742), 'torch.min', 'torch.min', (['surr1', 'surr2'], {}), '(surr1, surr2)\n', (3728, 3742), False, 'import torch\n')] |
import unittest
import numpy as np
from pyscfit.utils import _match, _match_hash
class UtilsTestCase(unittest.TestCase):
def setUp(self):
self.x = np.array([19, 21, 11, 18, 46], dtype=np.int_)
def test__match_single_query_value(self):
y = 11
self.assertEqual(_match(self.x, y), 2)
def test__match_array_query(self):
y = np.array([11, 18, 19], dtype=np.int_)
np.testing.assert_array_equal(np.array([2, 3, 0]), _match(self.x, y))
def test__match_hash_single_value_raises_TypeError(self):
y = 11
self.assertRaises(TypeError, _match_hash, self.x, y)
def test__match_hash_array_query(self):
y = [18, 11]
self.assertEqual([3, 2], _match_hash(self.x, y))
def test__match_hash_array_query_missing_in_reference(self):
y = np.array([11, 12, 19, 25, 22])
self.assertEqual([2, None, 0, None, None], _match_hash(self.x, y))
| [
"pyscfit.utils._match_hash",
"pyscfit.utils._match",
"numpy.array"
] | [((161, 206), 'numpy.array', 'np.array', (['[19, 21, 11, 18, 46]'], {'dtype': 'np.int_'}), '([19, 21, 11, 18, 46], dtype=np.int_)\n', (169, 206), True, 'import numpy as np\n'), ((368, 405), 'numpy.array', 'np.array', (['[11, 18, 19]'], {'dtype': 'np.int_'}), '([11, 18, 19], dtype=np.int_)\n', (376, 405), True, 'import numpy as np\n'), ((824, 854), 'numpy.array', 'np.array', (['[11, 12, 19, 25, 22]'], {}), '([11, 12, 19, 25, 22])\n', (832, 854), True, 'import numpy as np\n'), ((294, 311), 'pyscfit.utils._match', '_match', (['self.x', 'y'], {}), '(self.x, y)\n', (300, 311), False, 'from pyscfit.utils import _match, _match_hash\n'), ((444, 463), 'numpy.array', 'np.array', (['[2, 3, 0]'], {}), '([2, 3, 0])\n', (452, 463), True, 'import numpy as np\n'), ((465, 482), 'pyscfit.utils._match', '_match', (['self.x', 'y'], {}), '(self.x, y)\n', (471, 482), False, 'from pyscfit.utils import _match, _match_hash\n'), ((722, 744), 'pyscfit.utils._match_hash', '_match_hash', (['self.x', 'y'], {}), '(self.x, y)\n', (733, 744), False, 'from pyscfit.utils import _match, _match_hash\n'), ((906, 928), 'pyscfit.utils._match_hash', '_match_hash', (['self.x', 'y'], {}), '(self.x, y)\n', (917, 928), False, 'from pyscfit.utils import _match, _match_hash\n')] |
'''Dealing with Boond Manager .csv holidays files
Check the type of holidays -- check_conge_exceptionel(attente, log_file, VALIDEE)
Check the date of holidays -- check_conge_less_5_months(attente, log_file, VALIDEE)
Prepare data for processing -- create_tab_to_insert_vacances_en_attente(attente, log_file, VALIDEE)
Preprocess csv and apply above function -- pipeline(csv_attente, log_file, VALIDEE):
'''
import pandas as pd
import numpy as np
import datetime
from datetime import date
from dateutil.relativedelta import relativedelta
### function that merge two dict from values
## dict1 = {"a":[1,2],"b":[2,3]}
## dict2 = {"a":[3,4],"b":[2,3]}
## out = {"a":[1,2,3,4],"b":[2,3,2,3]}
def merge_dict(dict1, dict2):
dict_merged = {}
keys_dict1 = dict1.keys()
keys_dict2 = dict2.keys()
inter = [key for key in keys_dict1 if key in keys_dict2]
for key in inter:
print(key)
dict_merged[key] = dict1[key]
dict_merged[key].extend(dict2[key])
exter_dict1 = [key for key in keys_dict1 if key not in inter]
exter_dict2 = [key for key in keys_dict2 if key not in inter]
for key in exter_dict1:
dict_merged[key] = dict1[key]
for key in exter_dict2:
dict_merged[key] = dict2[key]
return (dict_merged)
### RETRAIT DES CONGES DEMANDEES NE CORRESPONDANT NI A DES CP NI A DES RTT
def check_conge_exceptionel(attente, log_file, VALIDEE):
# attente : fichiers.csv extracted from BoondManager with all the holidays
# VALIDEE : TRUE is the holidays in "attente" have been validated and False ow
problemes_type_conge = {}
dico_type_congé = {}
dico_type_congé["7"] = "Exceptionnelle"
dico_type_congé["6"] = "Maladie sur justificatif"
dico_type_congé["5"] = "Congé Sans Solde"
if (not VALIDEE):
exceptionnelle_conge = attente[~ attente["Code Absence"].isin([3, 4])]
for name in exceptionnelle_conge["NOM Prénom"].unique():
if name in problemes_type_conge.keys():
problemes_type_conge[name].append(
"\nPB CONGES: type de conges à traiter manuellement (exceptionnelle, maladie, sans solde)\n")
else:
problemes_type_conge[name] = [
"\nPB CONGES: type de conges à traiter manuellement (exceptionnelle, maladie, sans solde)\n"]
attente = attente[attente["Code Absence"].isin([3, 4])]
return (attente, problemes_type_conge)
###VERIFICATION DE LA DATE DE DEMANDE DE CONGES ( > AJD et < 120 jours)
def check_conge_less_5_months(attente, log_file, VALIDEE):
index_to_delete = []
problemes_date = {}
for i in attente.index:
name = attente.loc[i, "<NAME>"]
# select only the less than 5 months coming holidays
date_of_today_plus_5months = date.today() + relativedelta(months=+4)
date_of_today_plus_5months.replace(day=1)
sup_5months = attente.loc[i, "Début"] > pd.Timestamp(date_of_today_plus_5months)
# CAS OU CONGES DEMANDEES ENCORE EN TRAITEMENT MAIS TROP LOINTAINES
cond = sup_5months and not VALIDEE
if cond:
if (name in problemes_date.keys()):
problemes_date[name].append("\nPB DATE 1. : la date de début est superieur "
"a 4 \n")
else:
problemes_date[name] = ["\nPB DATE 1. : la date de début est superieur "
"a 4 \n"]
index_to_delete.append(i)
# CAS OU CONGES DEMANDEES VALIDEES MAIS TROP LOINTAINES
cond = sup_5months and VALIDEE
if cond:
if (name in problemes_date.keys()):
problemes_date[name].append("\nPB DATE 2. : vacances"
"validees dans plus de 4 mois non prises en compte dans les calculs")
else:
problemes_date[name] = ["\nPB DATE 2. : vacances"
"validees dans plus de 4 mois non prises en compte dans les calculs"]
index_to_delete.append(i)
# CAS OU CONGES DEMANDEES ENCORE EN TRAITEMENT MAIS TROP PASSEES
cond = (pd.Timestamp.now() > attente.loc[i, "Début"] and \
(pd.Timestamp.now().month != attente.loc[i, "Début"].month))
if cond:
index_to_delete.append(i)
if (not VALIDEE):
if (name in problemes_date.keys()):
problemes_date[name].append("\nPB DATE 3. : date de demande de vacances deja passee \n")
else:
problemes_date[name] = ["\nPB DATE 3. : date de demande de vacances deja passee \n"]
attente = attente.drop(index_to_delete, axis=0)
return (attente, problemes_date)
### Create a dic of dataframe. The keys are the name of the BeNexters and the values are a df with
### number of days for each type of holidays (dim1 (CP or RTT)) and for each month (dim 2)
def create_tab_to_insert_vacances_en_attente(attente, log_file, VALIDEE):
actual_month = datetime.datetime.now().month
column_name = [str((actual_month + i) % 12) for i in range(5)]
attente, problemes_type_conge = check_conge_exceptionel(attente, log_file, VALIDEE)
attente.reset_index()
attente, problemes_date = check_conge_less_5_months(attente, log_file, VALIDEE)
attente.reset_index()
names = attente["NOM Prénom"].unique()
dict_out = {}
for name in names:
dict_out[name] = pd.DataFrame(data=np.zeros((2, 5)), index=['CP', 'RTT'], columns=column_name)
attente_cp = attente[attente["Code Absence"] == 3]
for name in attente_cp["NOM Prénom"].unique():
df_name = attente_cp[attente_cp["NOM Prénom"] == name]
df_name = df_name.groupby(df_name['Début'].dt.strftime('%m'))['Durée'].sum().sort_values()
for name_col in df_name.index:
dict_out[name].loc["CP", str(int(name_col))] = df_name[name_col]
attente_rtt = attente[attente["Code Absence"] == 4]
for name in attente_rtt["NOM Prénom"].unique():
df_name = attente_rtt[attente_rtt["NOM Prénom"] == name]
df_name = df_name.groupby(df_name['Début'].dt.strftime('%m'))['Durée'].sum().sort_values()
for name_col in df_name.index:
dict_out[name].loc["RTT", str(int(name_col))] = df_name[name_col]
return (dict_out, problemes_type_conge, problemes_date)
### Pipeline extracting csv from Bound and returning the dic of nb od days by month and type of holidays
def pipeline(csv_attente,
log_file,
VALIDEE):
attente = pd.read_csv(csv_attente, sep=";")
attente = attente.drop(["Référence de la ressource", "Matricule",
"Type", "Date", "Nom Absence", "Fin"], axis=1)
attente["NOM Prénom"] = [' '.join(i).replace("-", " ") for i in zip(attente["Nom"].map(str), attente["Prénom"])]
attente["NOM Prénom"] = attente["NOM Prénom"].apply(lambda x: x.replace("-", " ").strip())
all_names_waiting_for_validation = attente["NOM Prénom"].unique()
attente.Début = pd.to_datetime(attente.Début, dayfirst=True)
attente.Durée = attente.Durée.astype(str).str.replace(',', '.').astype(float)
out, problemes_type_conge, problemes_date = create_tab_to_insert_vacances_en_attente(attente, log_file, VALIDEE)
return (out, all_names_waiting_for_validation, problemes_type_conge, problemes_date) | [
"pandas.Timestamp",
"pandas.read_csv",
"numpy.zeros",
"dateutil.relativedelta.relativedelta",
"datetime.date.today",
"pandas.to_datetime",
"pandas.Timestamp.now",
"datetime.datetime.now"
] | [((6595, 6628), 'pandas.read_csv', 'pd.read_csv', (['csv_attente'], {'sep': '""";"""'}), "(csv_attente, sep=';')\n", (6606, 6628), True, 'import pandas as pd\n'), ((7078, 7122), 'pandas.to_datetime', 'pd.to_datetime', (['attente.Début'], {'dayfirst': '(True)'}), '(attente.Début, dayfirst=True)\n', (7092, 7122), True, 'import pandas as pd\n'), ((5053, 5076), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5074, 5076), False, 'import datetime\n'), ((2792, 2804), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2802, 2804), False, 'from datetime import date\n'), ((2807, 2831), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+4)'}), '(months=+4)\n', (2820, 2831), False, 'from dateutil.relativedelta import relativedelta\n'), ((2931, 2971), 'pandas.Timestamp', 'pd.Timestamp', (['date_of_today_plus_5months'], {}), '(date_of_today_plus_5months)\n', (2943, 2971), True, 'import pandas as pd\n'), ((4174, 4192), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (4190, 4192), True, 'import pandas as pd\n'), ((5505, 5521), 'numpy.zeros', 'np.zeros', (['(2, 5)'], {}), '((2, 5))\n', (5513, 5521), True, 'import numpy as np\n'), ((4242, 4260), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (4258, 4260), True, 'import pandas as pd\n')] |
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
"""Functionality for managing markers (shapes used to highlight datums in plots and text).
"""
import copy
import xml.sax.saxutils
import numpy
import toyplot.style
class Marker(object):
"""Represents the complete specification of a marker's appearance."""
def __init__(self, shape, mstyle, size, angle, label, lstyle):
self._shape = shape
self._mstyle = mstyle
self._size = size
self._angle = angle
self._label = label
self._lstyle = lstyle
@property
def shape(self):
return self._shape
@property
def mstyle(self):
return self._mstyle
@property
def size(self):
return self._size
@property
def angle(self):
return self._angle
@property
def label(self):
return self._label
@property
def lstyle(self):
return self._lstyle
def __add__(self, other):
if isinstance(other, str):
return self.to_html() + other
elif isinstance(other, toyplot.marker.Marker):
result = copy.deepcopy(self)
if other._shape is not None:
result._shape = other._shape
result._mstyle = toyplot.style.combine(result.mstyle, other._mstyle)
if other._size is not None:
result._size = other._size
if other._angle is not None:
result._angle = other._angle
if other._label is not None:
result._label = other._label
result._lstyle = toyplot.style.combine(result.lstyle, other._lstyle)
return result
else:
raise ValueError("Can't add toyplot.marker.Marker and %r" % other) # pragma: no cover
def __eq__(self, other):
return self._shape == other._shape and self._mstyle == other._mstyle and self._shape == other._shape and self._angle == other._angle and self._label == other._label and self._lstyle == other._lstyle
def __hash__(self):
return hash((self._shape, self._mstyle, self._size, self._angle, self._label, self._lstyle))
def __radd__(self, other):
return other + self.to_html()
def __repr__(self):
return self.to_html()
def to_html(self):
"""Convert a marker specification to HTML markup that can be embedded in rich text."""
return """<marker%s%s%s%s%s%s/>""" % (
" shape='%s'"% xml.sax.saxutils.escape(self._shape) if self._shape else "",
" mstyle='%s'" % toyplot.style.to_css(self._mstyle) if self._mstyle else "",
" size='%s'"% self._size if self._size else "",
" angle='%s'" % self._angle if self._angle else "",
" label='%s'" % xml.sax.saxutils.escape(self._label) if self._label else "",
" lstyle='%s'" % toyplot.style.to_css(self._lstyle) if self._lstyle else "",
)
def intersect(self, p):
"""Compute the intersection between this marker's border and a line segment.
Parameters
----------
p: :class:`numpy.ndarray` with shape (2), required
Relative coordinates of a line segment originating at the center of this marker.
Returns
-------
dp: :class:`numpy.ndarray` with shape (2)
Relative coordinates of the intersection with this marker's border.
"""
if self._size:
if self._shape in ["o", "oo", "o|", "o/", "o-", "o\\", "o+", "ox", "o*"]:
p /= numpy.linalg.norm(p)
p *= self._size / 2
return p
if self._shape in ["s"]:
u = numpy.max(numpy.abs(p))
p /= u
p *= self._size / 2
return p
if self._shape and self._shape[0] == "r":
width, height = self._shape[1:].split("x")
width = float(width)
height = float(height)
ap = numpy.abs(p)
if ap[1]:
if ap[0] / ap[1] > width / height:
p = p / ap[0] * self._size * width / 2
else:
p = p / ap[1] * self._size * height / 2
else:
p = p / ap[0] * self._size * width / 2
return p
return numpy.zeros((2,))
def create(shape=None, mstyle=None, size=None, angle=None, label=None, lstyle=None):
"""Factory function for creating instances of :class:`toyplot.marker.Marker`."""
return Marker(shape=shape, mstyle=mstyle, size=size, angle=angle, label=label, lstyle=lstyle)
def convert(value):
"""Construct an instance of :class:`toyplot.marker.Marker` from alternative representations."""
if value is None:
return value
if isinstance(value, Marker):
return value
if isinstance(value, str):
return Marker(shape=value, mstyle=None, size=None, angle=None, label=None, lstyle=None)
raise ValueError("Can't convert %r to toyplot.marker.Marker." % value) # pragma: no cover
def from_html(html):
"""Convert a parsed xml.etree.ElementTree representation of a marker to a :class:`toyplot.marker.Marker` object."""
size = html.get("size", None)
if size is not None:
size = float(size)
angle = html.get("angle", None)
if angle is not None:
angle = float(angle)
return Marker(
shape=html.get("shape", None),
mstyle=toyplot.style.parse(html.get("mstyle", "")),
size=size,
angle=angle,
label=html.get("label", None),
lstyle=toyplot.style.parse(html.get("lstyle", "")),
)
| [
"numpy.linalg.norm",
"copy.deepcopy",
"numpy.zeros",
"numpy.abs"
] | [((4488, 4505), 'numpy.zeros', 'numpy.zeros', (['(2,)'], {}), '((2,))\n', (4499, 4505), False, 'import numpy\n'), ((1243, 1262), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (1256, 1262), False, 'import copy\n'), ((3661, 3681), 'numpy.linalg.norm', 'numpy.linalg.norm', (['p'], {}), '(p)\n', (3678, 3681), False, 'import numpy\n'), ((4119, 4131), 'numpy.abs', 'numpy.abs', (['p'], {}), '(p)\n', (4128, 4131), False, 'import numpy\n'), ((3810, 3822), 'numpy.abs', 'numpy.abs', (['p'], {}), '(p)\n', (3819, 3822), False, 'import numpy\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @author: Wesley
# @time: 2020/11/25 20:29
import numpy as np
def iou(box, boxes, isMin=False):
box_area = (box[2] - box[0]) * (box[3] - box[1])
boxes_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# 计算相交框的坐标
xx1 = np.maximum(box[0], boxes[:, 0])
yy1 = np.maximum(box[1], boxes[:, 1])
xx2 = np.minimum(box[2], boxes[:, 2])
yy2 = np.minimum(box[3], boxes[:, 3])
w = np.maximum(0, xx2 - xx1)
h = np.maximum(0, yy2 - yy1)
# 相交面积
inter = w * h
if isMin:
# 最小面积
out = inter / np.minimum(box_area, boxes_area)
else:
out = inter / (box_area + boxes_area - inter)
return out
def nms(boxes, thresh=0.3, isMin=False):
if boxes.shape[0] == 0:
return np.array([])
boxes_sort = boxes[(-boxes[:, 4]).argsort()]
# boxes_sort = boxes[boxes[:, 4].argsort()[::-1]]
use_boxes = boxes_sort.copy()
reserve_boxes = []
while use_boxes.shape[0] > 1:
first_box = use_boxes[0]
remain_boxes = use_boxes[1:]
reserve_boxes.append(first_box)
out = iou(first_box, remain_boxes, isMin)
index = np.where(out < thresh)
use_boxes = remain_boxes[index]
if use_boxes.shape[0] == 1:
reserve_boxes.append(use_boxes[0])
reserve_boxes = np.stack(reserve_boxes)
return reserve_boxes
def expand_box(box):
new_box = box.copy()
if new_box.shape[0] == 0:
return np.array([])
w = new_box[:, 2] - new_box[:, 0]
h = new_box[:, 3] - new_box[:, 1]
max_side = np.maximum(w, h)
center_x = new_box[:, 0] + w * 0.5
center_y = new_box[:, 1] + h * 0.5
new_box[:, 0] = center_x - max_side * 0.5
new_box[:, 1] = center_y - max_side * 0.5
new_box[:, 2] = new_box[:, 0] + max_side
new_box[:, 3] = new_box[:, 1] + max_side
return new_box
def nms2(boxes, thresh=0.3, is_min=False, softnms=False):
if boxes.shape[0] == 0:
return np.array([])
_boxes = boxes[(-boxes[:, 4]).argsort()] # 按置信度排序
r_boxes = []
while _boxes.shape[0] > 1:
a_box = _boxes[0]
b_boxes = _boxes[1:]
score = b_boxes[:, 4]
r_boxes.append(a_box)
if softnms:
score_thresh = 0.5
# IOU>阈值的框 置信度衰减
t_idx = np.where(iou(a_box, b_boxes, is_min) > thresh)
score[t_idx] *= (1 - iou(a_box, b_boxes, is_min))[t_idx]
# 删除分数<阈值的框
_boxes = np.delete(b_boxes, np.where(score < score_thresh), axis=0)
else:
# 筛选IOU<阈值的框
index = np.where(iou(a_box, b_boxes, is_min) < thresh)
_boxes = b_boxes[index]
# 剩余最后1个框 保留
if _boxes.shape[0] == 1:
r_boxes.append(_boxes[0])
# 把list组装成矩阵
return np.stack(r_boxes)
| [
"numpy.stack",
"numpy.minimum",
"numpy.maximum",
"numpy.where",
"numpy.array"
] | [((301, 332), 'numpy.maximum', 'np.maximum', (['box[0]', 'boxes[:, 0]'], {}), '(box[0], boxes[:, 0])\n', (311, 332), True, 'import numpy as np\n'), ((343, 374), 'numpy.maximum', 'np.maximum', (['box[1]', 'boxes[:, 1]'], {}), '(box[1], boxes[:, 1])\n', (353, 374), True, 'import numpy as np\n'), ((385, 416), 'numpy.minimum', 'np.minimum', (['box[2]', 'boxes[:, 2]'], {}), '(box[2], boxes[:, 2])\n', (395, 416), True, 'import numpy as np\n'), ((427, 458), 'numpy.minimum', 'np.minimum', (['box[3]', 'boxes[:, 3]'], {}), '(box[3], boxes[:, 3])\n', (437, 458), True, 'import numpy as np\n'), ((468, 492), 'numpy.maximum', 'np.maximum', (['(0)', '(xx2 - xx1)'], {}), '(0, xx2 - xx1)\n', (478, 492), True, 'import numpy as np\n'), ((501, 525), 'numpy.maximum', 'np.maximum', (['(0)', '(yy2 - yy1)'], {}), '(0, yy2 - yy1)\n', (511, 525), True, 'import numpy as np\n'), ((1352, 1375), 'numpy.stack', 'np.stack', (['reserve_boxes'], {}), '(reserve_boxes)\n', (1360, 1375), True, 'import numpy as np\n'), ((1599, 1615), 'numpy.maximum', 'np.maximum', (['w', 'h'], {}), '(w, h)\n', (1609, 1615), True, 'import numpy as np\n'), ((2807, 2824), 'numpy.stack', 'np.stack', (['r_boxes'], {}), '(r_boxes)\n', (2815, 2824), True, 'import numpy as np\n'), ((808, 820), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (816, 820), True, 'import numpy as np\n'), ((1193, 1215), 'numpy.where', 'np.where', (['(out < thresh)'], {}), '(out < thresh)\n', (1201, 1215), True, 'import numpy as np\n'), ((1494, 1506), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1502, 1506), True, 'import numpy as np\n'), ((2000, 2012), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2008, 2012), True, 'import numpy as np\n'), ((609, 641), 'numpy.minimum', 'np.minimum', (['box_area', 'boxes_area'], {}), '(box_area, boxes_area)\n', (619, 641), True, 'import numpy as np\n'), ((2515, 2545), 'numpy.where', 'np.where', (['(score < score_thresh)'], {}), '(score < score_thresh)\n', (2523, 2545), True, 'import numpy as np\n')] |
# Practical Machine learning
# k-Nearest neighbor example
# Chapter 6
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors
class kNN():
def __init__(self, k):
self.k = k
def _euclidian_distance(self, x1, x2):
"""Computes Euclidian Distance b/w two feature vectors
X1 can be a numpy ndarray and x2 is numpy array
"""
a= x1-x2
a2 = a**2
b = np.sum(a2, axis=1)
c = np.sqrt(b)
return c
# return np.sqrt(np.sum((x1 - x2) ** 2, axis=1))
def fit(self, X, y):
"""takes input of features and corresponding labels
"""
self.X_data = X
self.y = y
def predict(self, X):
"""Classify features according to euclidian_distance from all data points
Parameters:
X:
numpy ndarray
"""
Xn = np.copy(X)
preds = []
# compute distance from all points
for x1 in Xn:
dist = self._euclidian_distance(self.X_data, x1)
dist = np.vstack((dist, self.y)).T
dist = dist[dist[:, 0].argsort(axis=0)][:,-1]
# get a vote from top k
pred = sts.mode(dist[0:self.k])[0][0]
preds.append(pred)
return np.array(preds)
# load dataset
data = pd.read_csv('data/iris.data', header=None)
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
# lets take only first two columns
X = data.iloc[:, :2].values
y = data.iloc[:, -1]
# convert to floats 0,1,2
y = y.apply(lambda x: 0 if x == 'Iris-setosa' else x)
y = y.apply(lambda x: 1 if x == 'Iris-versicolor' else x)
y = y.apply(lambda x: 2 if x == 'Iris-virginica' else x)
y = y.values
n_neighbors = 10
# ======================================
# my kNN
cl = kNN(n_neighbors)
cl.fit(X, y)
# ======================================
# scikit-learn
clf = neighbors.KNeighborsClassifier(n_neighbors, weights='uniform')
clf.fit(X, y)
# Plot decision boundary
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Get predictions
knn_preds = clf.predict(np.c_[xx.ravel(), yy.ravel()])
scikit_preds = cl.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
knn_preds = knn_preds.reshape(xx.shape)
scikit_preds = scikit_preds.reshape(xx.shape)
#=====================================================
# plot kNN prediction
plt.figure()
plt.pcolormesh(xx, yy, knn_preds, cmap=cmap_light,
vmin=knn_preds.min(), vmax=knn_preds.max())
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title('Prediction from kNN')
plt.savefig('knn_example.png')
#=====================================================
# plot scikit-learn predictions
plt.figure()
plt.pcolormesh(xx, yy, scikit_preds, cmap=cmap_light,
vmin=scikit_preds.min(), vmax=scikit_preds.max())
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title('Prediction from scikit-learn')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.copy",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.array",
"numpy.vstack",
"matplotlib.colors.ListedColormap",
"matplotlib... | [((1386, 1428), 'pandas.read_csv', 'pd.read_csv', (['"""data/iris.data"""'], {'header': 'None'}), "('data/iris.data', header=None)\n", (1397, 1428), True, 'import pandas as pd\n'), ((1496, 1545), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['#FFAAAA', '#AAFFAA', '#AAAAFF']"], {}), "(['#FFAAAA', '#AAFFAA', '#AAAAFF'])\n", (1510, 1545), False, 'from matplotlib.colors import ListedColormap\n'), ((1558, 1607), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['#FF0000', '#00FF00', '#0000FF']"], {}), "(['#FF0000', '#00FF00', '#0000FF'])\n", (1572, 1607), False, 'from matplotlib.colors import ListedColormap\n'), ((2071, 2133), 'sklearn.neighbors.KNeighborsClassifier', 'neighbors.KNeighborsClassifier', (['n_neighbors'], {'weights': '"""uniform"""'}), "(n_neighbors, weights='uniform')\n", (2101, 2133), False, 'from sklearn import neighbors\n'), ((2709, 2721), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2719, 2721), True, 'import matplotlib.pyplot as plt\n'), ((2864, 2914), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'y', 'cmap': 'cmap_bold'}), '(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)\n', (2875, 2914), True, 'import matplotlib.pyplot as plt\n'), ((2973, 3005), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction from kNN"""'], {}), "('Prediction from kNN')\n", (2982, 3005), True, 'import matplotlib.pyplot as plt\n'), ((3006, 3036), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""knn_example.png"""'], {}), "('knn_example.png')\n", (3017, 3036), True, 'import matplotlib.pyplot as plt\n'), ((3125, 3137), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3135, 3137), True, 'import matplotlib.pyplot as plt\n'), ((3289, 3339), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'y', 'cmap': 'cmap_bold'}), '(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)\n', (3300, 3339), True, 'import matplotlib.pyplot as plt\n'), ((3398, 3439), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction from scikit-learn"""'], {}), "('Prediction from scikit-learn')\n", (3407, 3439), True, 'import matplotlib.pyplot as plt\n'), ((3441, 3451), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3449, 3451), True, 'import matplotlib.pyplot as plt\n'), ((2300, 2326), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (2309, 2326), True, 'import numpy as np\n'), ((2349, 2375), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (2358, 2375), True, 'import numpy as np\n'), ((502, 520), 'numpy.sum', 'np.sum', (['a2'], {'axis': '(1)'}), '(a2, axis=1)\n', (508, 520), True, 'import numpy as np\n'), ((533, 543), 'numpy.sqrt', 'np.sqrt', (['b'], {}), '(b)\n', (540, 543), True, 'import numpy as np\n'), ((951, 961), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (958, 961), True, 'import numpy as np\n'), ((1346, 1361), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (1354, 1361), True, 'import numpy as np\n'), ((1127, 1152), 'numpy.vstack', 'np.vstack', (['(dist, self.y)'], {}), '((dist, self.y))\n', (1136, 1152), True, 'import numpy as np\n')] |
"""
Helper functions to deal with data from brick-spring-car
modelling.
"""
import numpy as np
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import os
from PIL import Image, ImageDraw, ImageFont
_c_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)))
_updir = os.path.split(_c_dir)[0]
FONT_FILE = os.path.join(_updir, 'Open_Sans', 'OpenSans-Regular.ttf')
def _group_columns(plot_cols):
col_groups = []
col_groups.append(([_c for _c in plot_cols if _c.endswith("position") or
_c.endswith("ext")], "Position [m]"))
col_groups.append(([_c for _c in plot_cols if _c.endswith("speed")],
"Speed [m/s]"))
col_groups.append(([_c for _c in plot_cols if _c.endswith("energy")],
"Energy [J]"))
col_groups.append(([_c for _c in plot_cols if _c.endswith("force")],
"Force [N]"))
col_groups.append(([_c for _c in plot_cols if _c.endswith("power")],
"Power [W]"))
col_groups = [_c for _c in col_groups if len(_c[0]) > 0]
return col_groups
def plot_brick_spring(df, plot_cols):
"""Helper function to plot brick-spring-car
simulation data
"""
col_groups = _group_columns(plot_cols)
fig = make_subplots(rows=len(col_groups), cols=1, shared_xaxes=True,
x_title="time [s]")
for _i, _c in enumerate(col_groups):
for _c_col in _c[0]:
_c_go = go.Scatter(x=df.index, y=np.array(df[_c_col]), name=_c_col)
fig.add_trace(_c_go, col=1, row=_i + 1)
fig.update_yaxes(title_text=_c[1], row=_i + 1, col=1)
return fig
class BrickSpringAnim():
"""Iterator class delivering images for animation
one image pr. row in the dataframe"""
def _create_base_im(self, h, w):
_im = Image.fromarray(np.uint8(np.ones((self.h, self.w)) * 255))
_draw_im = ImageDraw.Draw(_im)
# Draw "ground"
_draw_im.line([(0, h - 10), (w - 1, h - 10)], width=2, fill=0)
_cx = 0
while _cx < w:
_draw_im.line([(_cx, h-1), (_cx + 10, h-10)], fill=0)
_cx += 10
return _im
def __init__(self, df, font=None, h=100, w=600, cols=None):
self.data = df
if font is None:
self.font = ImageFont.truetype(FONT_FILE, 20)
else:
self.font = font
self.w = w
self.h = h
self.bw = 30
self.bh = 30
self.cw = 60
self.ch = 35
self.car_offset = 20
_tot_xoffset = self.bw + self.cw + self.car_offset
self.base_image = self._create_base_im(h, w + 2 * _tot_xoffset)
self.pos2pix = (w - _tot_xoffset * 2)/6.0
self.cols = df.columns if cols is None else cols
def __len__(self):
return self.data.shape[0]
def _draw_brick(self, draw_im, i):
# Draw brick
_x_pos = int(self.data.iloc[i, 0] * self.pos2pix)
_y_pos = self.h - self.bh - 13
_c_rect = [(_x_pos, _y_pos),
(_x_pos + self.bw, _y_pos + self.bh)]
draw_im.rectangle(_c_rect, outline=0, width=2)
def _draw_car(self, draw_im, i):
_x_inintial = self.bw + self.car_offset
draw_im.line([(_x_inintial, self.h), (_x_inintial, self.h - 15)],
width=3, fill=0)
_x_c = int(self.data.iloc[i, 2] * self.pos2pix) + \
self.bw + self.car_offset
_y_pos = self.h - self.ch - 15
draw_im.line([(_x_c, self.h), (_x_c, self.h - 15)],
width=3, fill=0)
_c_rect = [(_x_c, _y_pos),
(_x_c + self.cw, _y_pos + self.ch)]
draw_im.rectangle(_c_rect, outline=0, width=2)
# Draw wheels
_wheel1 = [(_x_c + self.cw/4 - 6, _y_pos + self.ch - 7),
(_x_c + self.cw/4 + 6, _y_pos + self.ch + 5)]
_wheel2 = [(_x_c + 3 * self.cw/4 - 6, _y_pos + self.ch - 7),
(_x_c + 3 * self.cw/4 + 6, _y_pos + self.ch + 5)]
draw_im.ellipse(_wheel1, fill=100,
outline=0, width=1)
draw_im.ellipse(_wheel2, fill=100,
outline=0, width=1)
def _draw_spring(self, draw_im, i, elems=20):
_c_ext = self.data.iloc[i, 4]
_x_brick_end = int(self.data.iloc[i, 0] * self.pos2pix) + \
self.bw
_x_s = _x_brick_end + int(self.car_offset/2)
_s_ext_pix = int(_c_ext * self.pos2pix)
_x_se = _x_s + _s_ext_pix
_y_upper = self.h - self.ch
_y_lower = _y_upper + 15
draw_im.line([(_x_s, _y_upper), (_x_s, _y_lower)], width=1, fill=0)
draw_im.line([(_x_s + _s_ext_pix, _y_upper),
(_x_s + _s_ext_pix, _y_lower)], width=1, fill=0)
draw_im.line([(_x_brick_end, _y_upper + 7),
(_x_s, _y_upper + 7)], width=1, fill=0)
draw_im.line([(_x_se, _y_upper + 7),
(_x_se + self.car_offset/2, _y_upper + 7)], width=1,
fill=0)
for _i in range(elems):
_x_start = _x_s + int(_c_ext * (_i / elems) * self.pos2pix)
_x_end = _x_s + int(_c_ext * ((_i + 1) / elems) * self.pos2pix)
if _i % 2 == 0:
_y_start, _y_end = _y_upper, _y_lower
else:
_y_start, _y_end = _y_lower, _y_upper
draw_im.line([(_x_start, _y_start), (_x_end, _y_end)],
width=1, fill=0)
def _draw_text(self, draw_im, i):
t = self.data.index[i]
draw_im.text((0, 0), "time: %0.02f [s]" % t,
fill=0, font=self.font)
cols = _group_columns(self.cols)
_y_loc = 20
_x_loc = 0
for c in cols:
_c_unit = c[-1].split()[-1]
for c_name in c[0]:
_c_val = float(self.data.loc[t, c_name])
_c_output = "%s: %0.2f %s" % (c_name, _c_val, _c_unit)
_c_x_size = self.font.getsize(_c_output)[0]
draw_im.text((_x_loc, _y_loc), _c_output,
fill=0, font=self.font)
_x_loc += _c_x_size + 10
if _x_loc + _c_x_size > self.w:
_x_loc = 0
_y_loc += 20
def _draw_frame(self, i):
_im = self.base_image.copy()
_draw_im = ImageDraw.Draw(_im)
self._draw_brick(_draw_im, i)
self._draw_spring(_draw_im, i)
self._draw_car(_draw_im, i)
self._draw_text(_draw_im, i)
return _im
def __getitem__(self, position):
return self._draw_frame(position)
| [
"os.path.dirname",
"numpy.ones",
"PIL.ImageFont.truetype",
"numpy.array",
"PIL.ImageDraw.Draw",
"os.path.split",
"os.path.join"
] | [((340, 397), 'os.path.join', 'os.path.join', (['_updir', '"""Open_Sans"""', '"""OpenSans-Regular.ttf"""'], {}), "(_updir, 'Open_Sans', 'OpenSans-Regular.ttf')\n", (352, 397), False, 'import os\n'), ((303, 324), 'os.path.split', 'os.path.split', (['_c_dir'], {}), '(_c_dir)\n', (316, 324), False, 'import os\n'), ((266, 291), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (281, 291), False, 'import os\n'), ((1927, 1946), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['_im'], {}), '(_im)\n', (1941, 1946), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((6378, 6397), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['_im'], {}), '(_im)\n', (6392, 6397), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2327, 2360), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['FONT_FILE', '(20)'], {}), '(FONT_FILE, 20)\n', (2345, 2360), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1510, 1530), 'numpy.array', 'np.array', (['df[_c_col]'], {}), '(df[_c_col])\n', (1518, 1530), True, 'import numpy as np\n'), ((1874, 1899), 'numpy.ones', 'np.ones', (['(self.h, self.w)'], {}), '((self.h, self.w))\n', (1881, 1899), True, 'import numpy as np\n')] |
import datetime
import sys
import yaml
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
from copy import deepcopy
from agents.PPO import PPO
from envs.env_factory import EnvFactory
from automl.bohb_optim import run_bohb_parallel, run_bohb_serial
import numpy as np
NUM_EVALS = 3
class ExperimentWrapper():
def get_bohb_parameters(self):
params = {}
params['min_budget'] = 1
params['max_budget'] = 2
params['eta'] = 2
params['iterations'] = 1000
params['random_fraction'] = 0.3
return params
def get_configspace(self):
cs = CS.ConfigurationSpace()
cs.add_hyperparameter(CSH.UniformFloatHyperparameter(name='lr', lower=1e-6, upper=1e-3, log=True, default_value=1e-4))
cs.add_hyperparameter(CSH.UniformFloatHyperparameter(name='beta', lower=0.001, upper=1.0, log=True, default_value=0.2))
cs.add_hyperparameter(CSH.UniformFloatHyperparameter(name='eta', lower=0.001, upper=1.0, log=True, default_value=0.5))
cs.add_hyperparameter(CSH.UniformIntegerHyperparameter(name='feature_dim', lower=16, upper=256, log=True, default_value=64))
cs.add_hyperparameter(CSH.UniformIntegerHyperparameter(name='hidden_size', lower=16, upper=256, log=True, default_value=128))
return cs
def get_specific_config(self, cso, default_config, budget):
config = deepcopy(default_config)
config['agents']['ppo'] = {}
config['agents']['ppo']['test_episodes'] = 1
config['agents']['ppo']['print_rate'] = 100
config['agents']['ppo']['init_episodes'] = 0
config['agents']['ppo']['update_episodes'] = 1
config['agents']['ppo']['ppo_epochs'] = 10
config['agents']['ppo']['gamma'] = 0.99
config['agents']['ppo']['lr'] = 1e-5
config['agents']['ppo']['vf_coef'] = 1
config['agents']['ppo']['ent_coef'] = 0.001
config['agents']['ppo']['eps_clip'] = 0.2
config['agents']['ppo']['rb_size'] = 1000000
config['agents']['ppo']['same_action_num'] = 1
config['agents']['ppo']['activation_fn'] = 'tanh'
config['agents']['ppo']['hidden_size'] = 128
config['agents']['ppo']['hidden_layer'] = 2
config['agents']['ppo']['action_std'] = 0.1
config['agents']['ppo']['early_out_num'] = 50
config['agents']['ppo']['early_out_virtual_diff'] = 0.02
config["agents"]["icm"]["lr"] = cso["lr"]
config["agents"]["icm"]["beta"] = cso["beta"]
config["agents"]["icm"]["eta"] = cso["eta"]
config["agents"]["icm"]["feature_dim"] = cso["feature_dim"]
config["agents"]["icm"]["hidden_size"] = cso["hidden_size"]
config["device"] = 'cuda'
return config
def compute(self, working_dir, bohb_id, config_id, cso, budget, *args, **kwargs):
with open("default_config_halfcheetah.yaml", 'r') as stream:
default_config = yaml.safe_load(stream)
config = self.get_specific_config(cso, default_config, budget)
print('----------------------------')
print("START BOHB ITERATION")
print('CONFIG: ' + str(config))
print('CSO: ' + str(cso))
print('BUDGET: ' + str(budget))
print('----------------------------')
config["agents"]["ppo"]["train_episodes"] = 3000
info = {}
# generate environment
env_fac = EnvFactory(config)
real_env = env_fac.generate_real_env()
# score = 0
rewards_list = []
for i in range(NUM_EVALS):
ppo = PPO(env=real_env,
config=config,
icm=True)
rewards, _, _ = ppo.train(real_env)
rewards_list.append(sum(rewards))
# score += len(rewards)
# score = score/NUM_EVALS
score = -np.mean(rewards_list)
info['config'] = str(config)
print('----------------------------')
print('FINAL SCORE: ' + str(score))
print("END BOHB ITERATION")
print('----------------------------')
return {
"loss": score,
"info": info
}
if __name__ == "__main__":
x = datetime.datetime.now()
run_id = 'bohb_params_ppo_hc_icm_1e-3_ent_coef_1e-1_action_std_' + x.strftime("%Y-%m-%d-%H")
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
print(arg)
res = run_bohb_parallel(id=sys.argv[1],
bohb_workers=sys.argv[2],
run_id=run_id,
experiment_wrapper=ExperimentWrapper())
else:
res = run_bohb_serial(run_id=run_id,
experiment_wrapper=ExperimentWrapper())
| [
"ConfigSpace.ConfigurationSpace",
"copy.deepcopy",
"ConfigSpace.hyperparameters.UniformIntegerHyperparameter",
"envs.env_factory.EnvFactory",
"agents.PPO.PPO",
"numpy.mean",
"ConfigSpace.hyperparameters.UniformFloatHyperparameter",
"yaml.safe_load",
"datetime.datetime.now"
] | [((4201, 4224), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4222, 4224), False, 'import datetime\n'), ((618, 641), 'ConfigSpace.ConfigurationSpace', 'CS.ConfigurationSpace', ([], {}), '()\n', (639, 641), True, 'import ConfigSpace as CS\n'), ((1394, 1418), 'copy.deepcopy', 'deepcopy', (['default_config'], {}), '(default_config)\n', (1402, 1418), False, 'from copy import deepcopy\n'), ((3417, 3435), 'envs.env_factory.EnvFactory', 'EnvFactory', (['config'], {}), '(config)\n', (3427, 3435), False, 'from envs.env_factory import EnvFactory\n'), ((673, 777), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""lr"""', 'lower': '(1e-06)', 'upper': '(0.001)', 'log': '(True)', 'default_value': '(0.0001)'}), "(name='lr', lower=1e-06, upper=0.001, log=\n True, default_value=0.0001)\n", (703, 777), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((800, 901), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""beta"""', 'lower': '(0.001)', 'upper': '(1.0)', 'log': '(True)', 'default_value': '(0.2)'}), "(name='beta', lower=0.001, upper=1.0, log=\n True, default_value=0.2)\n", (830, 901), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((928, 1027), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""eta"""', 'lower': '(0.001)', 'upper': '(1.0)', 'log': '(True)', 'default_value': '(0.5)'}), "(name='eta', lower=0.001, upper=1.0, log=True,\n default_value=0.5)\n", (958, 1027), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((1055, 1160), 'ConfigSpace.hyperparameters.UniformIntegerHyperparameter', 'CSH.UniformIntegerHyperparameter', ([], {'name': '"""feature_dim"""', 'lower': '(16)', 'upper': '(256)', 'log': '(True)', 'default_value': '(64)'}), "(name='feature_dim', lower=16, upper=256,\n log=True, default_value=64)\n", (1087, 1160), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((1188, 1294), 'ConfigSpace.hyperparameters.UniformIntegerHyperparameter', 'CSH.UniformIntegerHyperparameter', ([], {'name': '"""hidden_size"""', 'lower': '(16)', 'upper': '(256)', 'log': '(True)', 'default_value': '(128)'}), "(name='hidden_size', lower=16, upper=256,\n log=True, default_value=128)\n", (1220, 1294), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((2948, 2970), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (2962, 2970), False, 'import yaml\n'), ((3583, 3625), 'agents.PPO.PPO', 'PPO', ([], {'env': 'real_env', 'config': 'config', 'icm': '(True)'}), '(env=real_env, config=config, icm=True)\n', (3586, 3625), False, 'from agents.PPO import PPO\n'), ((3852, 3873), 'numpy.mean', 'np.mean', (['rewards_list'], {}), '(rewards_list)\n', (3859, 3873), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_cpca_vs_pca [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_cpca_vs_pca&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-exer-cpca).
import numpy as np
from arpym.tools import cpca_cov, pca_cov
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_cpca_vs_pca-parameters)
# +
# symmetric and positive definite covariance matrix
sigma2 = np.array([[0.25, 0.30, 0.25], [0.30, 1, 0], [0.25, 0, 6.25]])
# full rank linear constraints matrix
d = np.array([[1, 0, 1], [0, 1, 0]])
k_, n_ = np.shape(d)
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_cpca_vs_pca-implementation-step01): Compute the conditional principal variances/directions of sigma2
e_d, lambda2_d = cpca_cov(sigma2, d)
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_cpca_vs_pca-implementation-step02): Compute the product e_d'*sigma2*e_d and check that it coincides with the diagonal matrix Diag(lambda2_d)
err_cpca_diag = np.linalg.norm(e_d.T@sigma2@e_d - np.diag(lambda2_d))/np.linalg.norm(np.diag(lambda2_d))
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_cpca_vs_pca-implementation-step03): Compute the products e_d'*e_d and e_d*e_d' and verify that the conditional principal directions are not orthogonal
err_cpca_orth1 = np.linalg.norm(e_d.T@e_d-np.eye(n_))/np.linalg.norm(np.eye(n_))
err_cpca_orth2 = np.linalg.norm(e_d@e_d.T-np.eye(n_))/np.linalg.norm(np.eye(n_))
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_cpca_vs_pca-implementation-step04): Compute the principal variances/directions of sigma2
e, lambda2 = pca_cov(sigma2)
# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_cpca_vs_pca-implementation-step05): Compute the product e'*sigma2*e and check that it coincides with the diagonal matrix Diag(lambda2)
err_pca_diag = np.linalg.norm(e.T@sigma2@e - np.diag(lambda2))/np.linalg.norm(np.diag(lambda2))
# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_cpca_vs_pca-implementation-step06): Compute the products e'*e and e*e' and verify that the principal directions are orthogonal
err_pca_orth1 = np.linalg.norm(e.T@e-np.eye(n_))/np.linalg.norm(np.eye(n_))
err_pca_orth2 = np.linalg.norm(e@e.T-np.eye(n_))/np.linalg.norm(np.eye(n_))
| [
"numpy.eye",
"arpym.tools.pca_cov",
"arpym.tools.cpca_cov",
"numpy.shape",
"numpy.array",
"numpy.diag"
] | [((824, 883), 'numpy.array', 'np.array', (['[[0.25, 0.3, 0.25], [0.3, 1, 0], [0.25, 0, 6.25]]'], {}), '([[0.25, 0.3, 0.25], [0.3, 1, 0], [0.25, 0, 6.25]])\n', (832, 883), True, 'import numpy as np\n'), ((929, 961), 'numpy.array', 'np.array', (['[[1, 0, 1], [0, 1, 0]]'], {}), '([[1, 0, 1], [0, 1, 0]])\n', (937, 961), True, 'import numpy as np\n'), ((972, 983), 'numpy.shape', 'np.shape', (['d'], {}), '(d)\n', (980, 983), True, 'import numpy as np\n'), ((1171, 1190), 'arpym.tools.cpca_cov', 'cpca_cov', (['sigma2', 'd'], {}), '(sigma2, d)\n', (1179, 1190), False, 'from arpym.tools import cpca_cov, pca_cov\n'), ((2047, 2062), 'arpym.tools.pca_cov', 'pca_cov', (['sigma2'], {}), '(sigma2)\n', (2054, 2062), False, 'from arpym.tools import cpca_cov, pca_cov\n'), ((1482, 1500), 'numpy.diag', 'np.diag', (['lambda2_d'], {}), '(lambda2_d)\n', (1489, 1500), True, 'import numpy as np\n'), ((1787, 1797), 'numpy.eye', 'np.eye', (['n_'], {}), '(n_)\n', (1793, 1797), True, 'import numpy as np\n'), ((1868, 1878), 'numpy.eye', 'np.eye', (['n_'], {}), '(n_)\n', (1874, 1878), True, 'import numpy as np\n'), ((2341, 2357), 'numpy.diag', 'np.diag', (['lambda2'], {}), '(lambda2)\n', (2348, 2357), True, 'import numpy as np\n'), ((2615, 2625), 'numpy.eye', 'np.eye', (['n_'], {}), '(n_)\n', (2621, 2625), True, 'import numpy as np\n'), ((2691, 2701), 'numpy.eye', 'np.eye', (['n_'], {}), '(n_)\n', (2697, 2701), True, 'import numpy as np\n'), ((1447, 1465), 'numpy.diag', 'np.diag', (['lambda2_d'], {}), '(lambda2_d)\n', (1454, 1465), True, 'import numpy as np\n'), ((1760, 1770), 'numpy.eye', 'np.eye', (['n_'], {}), '(n_)\n', (1766, 1770), True, 'import numpy as np\n'), ((1841, 1851), 'numpy.eye', 'np.eye', (['n_'], {}), '(n_)\n', (1847, 1851), True, 'import numpy as np\n'), ((2308, 2324), 'numpy.diag', 'np.diag', (['lambda2'], {}), '(lambda2)\n', (2315, 2324), True, 'import numpy as np\n'), ((2588, 2598), 'numpy.eye', 'np.eye', (['n_'], {}), '(n_)\n', (2594, 2598), True, 'import numpy as np\n'), ((2664, 2674), 'numpy.eye', 'np.eye', (['n_'], {}), '(n_)\n', (2670, 2674), True, 'import numpy as np\n')] |
import sys
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
sys.path.insert(0,'../')
from neurwin import fcnn
from envs.deadlineSchedulingEnv import deadlineSchedulingEnv
from envs.recoveringBanditsEnv import recoveringBanditsEnv
from envs.sizeAwareIndexEnv import sizeAwareIndexEnv
WIDTH = 12
HEIGHT = 3
plt.rcParams['font.size'] = 12
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.family'] = 'Times New Roman'
LINEWIDTH = 1
BATCHSIZE = 5
BETA = 0.99
TIMESTEPS = 300
SEED = 42
NORUNS = 50
linestyles=['solid', 'dotted', 'dashed', 'dashdot', (0, (3,1,1,1,1,1))]
fig, axes = plt.subplots(nrows=1,ncols=3, figsize=(WIDTH, HEIGHT), gridspec_kw={'wspace':0.13, 'hspace':0.0}, frameon=False)
#################################### DEADLINE SCHEDULING ####################################
print(f'D_s for deadline')
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
PROCESSINGCOST = 0.5
EPISODESTRAINED = 2000
D_vals = np.arange(1,13)
B_vals = np.arange(1,10)
numOfStatesToTest = 5
statesToTest = []
for a in range(numOfStatesToTest):
state = []
D = np.random.choice(D_vals)
B = np.random.choice(B_vals)
state.append(D)
state.append(B)
statesToTest.append(state)
seed = np.random.randint(0, 100000, size = 1000000)
currentActivationCost = np.arange(start=0, stop=6, step=1)
savedDirectory = (f'../trainResults/neurwin/deadline_env/')
modelDir = savedDirectory+(f'seed_{50}_lr_0.001_batchSize_{5}_trainedNumEpisodes_{EPISODESTRAINED}/trained_model.pt')
agent = fcnn(stateSize=2)
agent.load_state_dict(torch.load(modelDir))
D_final = []
for state in statesToTest:
D_states = []
for currentAC in currentActivationCost:
Q_passive_list = []
Q_active_list = []
for i in range(NORUNS):
Q_active = 0
Q_passive = 0
env = deadlineSchedulingEnv(seed=seed[i], numEpisodes=1, episodeLimit=TIMESTEPS, maxDeadline=12,
maxLoad=9, newJobProb=0.7, processingCost=PROCESSINGCOST, train=False, batchSize=5, noiseVar=0)
env.reset()
env.arm[0][0] = state[0]
env.arm[0][2] = state[0]
env.arm[0][1] = state[1]
nextState, reward, done, info = env.step(1)
Q_active += BETA**(0)*(reward - currentAC)
for x in range(1, TIMESTEPS):
index = agent.forward(nextState)
if index >= currentAC:
action = 1
nextState, reward, done, info = env.step(action)
else:
action = 0
nextState, reward, done, info = env.step(action)
Q_active += BETA**(x)*(reward - currentAC*action)
env = deadlineSchedulingEnv(seed=seed[i], numEpisodes=1, episodeLimit=TIMESTEPS, maxDeadline=12,
maxLoad=9, newJobProb=0.7, processingCost=PROCESSINGCOST, train=False, batchSize=5, noiseVar=0)
env.reset()
env.arm[0][0] = state[0]
env.arm[0][2] = state[0]
env.arm[0][1] = state[1]
nextState, reward, done, info = env.step(0)
Q_passive += BETA**(0)*(reward)
for g in range(1, TIMESTEPS):
index = agent.forward(nextState)
if index >= currentAC:
action = 1
nextState, reward, done, info = env.step(action)
else:
action = 0
nextState, reward, done, info = env.step(action)
Q_passive += BETA**(g)*(reward - currentAC*action)
Q_active_list.append(Q_active)
Q_passive_list.append(Q_passive)
average = sum([a_i - b_i for a_i, b_i in zip(Q_active_list, Q_passive_list)]) / NORUNS
D_states.append(average)
D_final.append(D_states)
for q in range(np.shape(statesToTest)[0]):
axes[0].plot(currentActivationCost, D_final[q], marker='.', label=f's = {statesToTest[q]}', linewidth=LINEWIDTH, linestyle= linestyles[q])
axes[0].set_xticks(currentActivationCost)
axes[0].legend(frameon=False, loc='lower left')
################################### RECOVERING SCHEDULING ###################################
print(f'D_s for recovering')
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
EPISODESTRAINED = 50000
MAXWAIT = 20
selectedActivationFunction = 'A'
D_final = []
numOfStatesToTest = 5
states = np.random.randint(1,MAXWAIT+1, size=numOfStatesToTest)
seed = np.random.randint(0, 1000, size = 1)[0]
THETA = [10.,0.2,0.0]
currentActivationCost = np.arange(start=0,stop=10.6, step=0.6)
savedDirectory = (f'../trainResults/neurwin/recovering_bandits_env/recovery_function_{selectedActivationFunction}/')
modelDir = savedDirectory+(f'seed_{50}_lr_0.001_batchSize_{5}_trainedNumEpisodes_{EPISODESTRAINED}/trained_model.pt')
agent = fcnn(stateSize=1)
agent.load_state_dict(torch.load(modelDir))
for state in states:
activation_states = []
for a in range(np.shape(currentActivationCost)[0]):
Q_passive = 0
Q_active = 0
env = recoveringBanditsEnv(seed=seed, numEpisodes=1, episodeLimit=TIMESTEPS, train=False,
batchSize=5, thetaVals=THETA, noiseVar=0.0, maxWait = 20)
env.arm[0] = state
nextState, reward, done, info = env.step(1)
Q_active += BETA**(0)*(reward - currentActivationCost[a])
for i in range(1,TIMESTEPS):
index = agent.forward(nextState)
if index >= currentActivationCost[a]:
action = 1
nextState, reward, done, info = env.step(action)
else:
action = 0
nextState, reward, done, info = env.step(action)
Q_active += BETA**(i)*(reward - currentActivationCost[a]*action)
env = recoveringBanditsEnv(seed=seed, numEpisodes=1, episodeLimit=TIMESTEPS, train=False,
batchSize=5, thetaVals=THETA, noiseVar=0.0, maxWait = 20)
env.arm[0] = state
nextState, reward, done, info = env.step(0)
Q_passive += BETA**(0)*(reward)
for i in range(1,TIMESTEPS):
index = agent.forward(nextState)
if index >= currentActivationCost[a]:
action = 1
nextState, reward, done, info = env.step(action)
else:
action = 0
nextState, reward, done, info = env.step(action)
Q_passive += BETA**(i)*(reward - currentActivationCost[a]*action)
activation_states.append(Q_active - Q_passive)
D_final.append(activation_states)
for x in range(len(states)):
axes[1].plot(currentActivationCost, D_final[x], marker='.', label=f's = {states[x]}.', linewidth=LINEWIDTH, linestyle= linestyles[x])
axes[1].set_xticks(np.arange(0,11))
axes[1].legend(frameon=False, loc='lower left')
################################### SIZE-AWARE SCHEDULING ###################################
print(f'D_s for wireless scheduling')
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
MAXLOAD = 1000000
EPISODESTRAINED = 20000
goodStateProb = 0.1
armClass=2
seed = np.random.randint(0, 1000, size = 1000000)
statesToTest = []
numOfStatesToTest = 5
load_vals = np.random.randint(low=300000, high=800000, size=numOfStatesToTest)
for a in range(numOfStatesToTest):
channelState = np.random.choice([1.0,0.0])
state = [load_vals[a], channelState]
statesToTest.append(state)
currentActivationCost = np.arange(start=0, stop=16, step=2)
savedDirectory = (f'../trainResults/neurwin/size_aware_env/case_1/class_{armClass}/')
modelDir = savedDirectory+(f'seed_{50}_lr_0.001_batchSize_{5}_trainedNumEpisodes_{EPISODESTRAINED}/trained_model.pt')
agent = fcnn(stateSize=2)
agent.load_state_dict(torch.load(modelDir))
D_final = []
for state in statesToTest:
D_states = []
for cost in currentActivationCost:
Q_active_list = []
Q_passive_list = []
for i in range(NORUNS):
Q_active = 0
Q_passive = 0
env = sizeAwareIndexEnv(numEpisodes=1, HOLDINGCOST=1, seed=seed[i], Training=False,
r1=8400, r2=33600, q=goodStateProb, case=1, classVal=armClass, load=state[0], noiseVar = 0.0,
maxLoad = MAXLOAD, batchSize=5, episodeLimit=TIMESTEPS, fixedSizeMDP=False)
env.reset()
env.arm[0][0] = state[0]
env.arm[0][1] = state[1]
env.channelState[0] = state[1]
nextState, reward, done, info = env.step(1)
Q_active += BETA**(0)*(reward - cost)
x = 1
while (x < TIMESTEPS and nextState[0] > 0):
index = agent.forward(nextState)
if index >= cost:
action = 1
nextState, reward, done, info = env.step(action)
else:
action = 0
nextState, reward, done, info = env.step(action)
Q_active += BETA**(x)*(reward - cost*action)
x += 1
env = sizeAwareIndexEnv(numEpisodes=1, HOLDINGCOST=1, seed=seed[i], Training=False,
r1=8400, r2=33600, q=goodStateProb, case=1, classVal=armClass, load=state[0], noiseVar = 0.0,
maxLoad = MAXLOAD, batchSize=5, episodeLimit=TIMESTEPS, fixedSizeMDP=False)
env.reset()
env.arm[0][0] = state[0]
env.arm[0][1] = state[1]
env.channelState[0] = state[1]
nextState, reward, done, info = env.step(0)
Q_passive += BETA**(0)*(reward)
x = 1
while (x < TIMESTEPS and nextState[0] > 0):
index = agent.forward(nextState)
if index >= cost:
action = 1
nextState, reward, done, info = env.step(action)
else:
action = 0
nextState, reward, done, info = env.step(action)
Q_passive += BETA**(x)*(reward - cost*action)
x += 1
Q_active_list.append(Q_active)
Q_passive_list.append(Q_passive)
average = sum([a_i - b_i for a_i, b_i in zip(Q_active_list, Q_passive_list)]) / NORUNS
D_states.append(average)
D_final.append(D_states)
for q in range(np.shape(statesToTest)[0]):
axes[2].plot(currentActivationCost, D_final[q], marker='.', label=f's = {statesToTest[q]}', linewidth=LINEWIDTH, linestyle= linestyles[q])
axes[2].set_xticks(np.arange(0, 16, 2))
axes[2].legend(frameon=False, loc='upper right')
#############################################################################################
axes[0].set_title(f'Deadline Scheduling', weight='bold', fontsize=10)
axes[1].set_title(f'Recovering Bandits', weight='bold', fontsize=10)
axes[2].set_title(f'Wireless Scheduling', weight='bold', fontsize=10)
axes[0].set_ylabel('$D_s(\lambda)$', weight='bold', fontsize=10)
axes[1].set_xlabel('$\lambda$', weight='bold', fontsize=10)
plt.savefig('../plotResults/d_s_results.pdf', bbox_inches='tight')
plt.show()
| [
"envs.deadlineSchedulingEnv.deadlineSchedulingEnv",
"numpy.random.seed",
"matplotlib.pyplot.show",
"envs.sizeAwareIndexEnv.sizeAwareIndexEnv",
"torch.manual_seed",
"torch.load",
"sys.path.insert",
"numpy.shape",
"envs.recoveringBanditsEnv.recoveringBanditsEnv",
"numpy.random.randint",
"random.se... | [((116, 141), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (131, 141), False, 'import sys\n'), ((716, 836), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': '(WIDTH, HEIGHT)', 'gridspec_kw': "{'wspace': 0.13, 'hspace': 0.0}", 'frameon': '(False)'}), "(nrows=1, ncols=3, figsize=(WIDTH, HEIGHT), gridspec_kw={\n 'wspace': 0.13, 'hspace': 0.0}, frameon=False)\n", (728, 836), True, 'import matplotlib.pyplot as plt\n'), ((952, 969), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (963, 969), False, 'import random\n'), ((970, 990), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (984, 990), True, 'import numpy as np\n'), ((991, 1014), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (1008, 1014), False, 'import torch\n'), ((1071, 1087), 'numpy.arange', 'np.arange', (['(1)', '(13)'], {}), '(1, 13)\n', (1080, 1087), True, 'import numpy as np\n'), ((1096, 1112), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (1105, 1112), True, 'import numpy as np\n'), ((1350, 1392), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100000)'], {'size': '(1000000)'}), '(0, 100000, size=1000000)\n', (1367, 1392), True, 'import numpy as np\n'), ((1419, 1453), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(6)', 'step': '(1)'}), '(start=0, stop=6, step=1)\n', (1428, 1453), True, 'import numpy as np\n'), ((1641, 1658), 'neurwin.fcnn', 'fcnn', ([], {'stateSize': '(2)'}), '(stateSize=2)\n', (1645, 1658), False, 'from neurwin import fcnn\n'), ((4388, 4405), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (4399, 4405), False, 'import random\n'), ((4406, 4426), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (4420, 4426), True, 'import numpy as np\n'), ((4427, 4450), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (4444, 4450), False, 'import torch\n'), ((4568, 4625), 'numpy.random.randint', 'np.random.randint', (['(1)', '(MAXWAIT + 1)'], {'size': 'numOfStatesToTest'}), '(1, MAXWAIT + 1, size=numOfStatesToTest)\n', (4585, 4625), True, 'import numpy as np\n'), ((4717, 4756), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(10.6)', 'step': '(0.6)'}), '(start=0, stop=10.6, step=0.6)\n', (4726, 4756), True, 'import numpy as np\n'), ((5000, 5017), 'neurwin.fcnn', 'fcnn', ([], {'stateSize': '(1)'}), '(stateSize=1)\n', (5004, 5017), False, 'from neurwin import fcnn\n'), ((7147, 7164), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (7158, 7164), False, 'import random\n'), ((7165, 7185), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (7179, 7185), True, 'import numpy as np\n'), ((7186, 7209), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (7203, 7209), False, 'import torch\n'), ((7293, 7333), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {'size': '(1000000)'}), '(0, 1000, size=1000000)\n', (7310, 7333), True, 'import numpy as np\n'), ((7390, 7456), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(300000)', 'high': '(800000)', 'size': 'numOfStatesToTest'}), '(low=300000, high=800000, size=numOfStatesToTest)\n', (7407, 7456), True, 'import numpy as np\n'), ((7636, 7671), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(16)', 'step': '(2)'}), '(start=0, stop=16, step=2)\n', (7645, 7671), True, 'import numpy as np\n'), ((7885, 7902), 'neurwin.fcnn', 'fcnn', ([], {'stateSize': '(2)'}), '(stateSize=2)\n', (7889, 7902), False, 'from neurwin import fcnn\n'), ((11197, 11263), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plotResults/d_s_results.pdf"""'], {'bbox_inches': '"""tight"""'}), "('../plotResults/d_s_results.pdf', bbox_inches='tight')\n", (11208, 11263), True, 'import matplotlib.pyplot as plt\n'), ((11264, 11274), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11272, 11274), True, 'import matplotlib.pyplot as plt\n'), ((1212, 1236), 'numpy.random.choice', 'np.random.choice', (['D_vals'], {}), '(D_vals)\n', (1228, 1236), True, 'import numpy as np\n'), ((1245, 1269), 'numpy.random.choice', 'np.random.choice', (['B_vals'], {}), '(B_vals)\n', (1261, 1269), True, 'import numpy as np\n'), ((1681, 1701), 'torch.load', 'torch.load', (['modelDir'], {}), '(modelDir)\n', (1691, 1701), False, 'import torch\n'), ((4630, 4664), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {'size': '(1)'}), '(0, 1000, size=1)\n', (4647, 4664), True, 'import numpy as np\n'), ((5040, 5060), 'torch.load', 'torch.load', (['modelDir'], {}), '(modelDir)\n', (5050, 5060), False, 'import torch\n'), ((6948, 6964), 'numpy.arange', 'np.arange', (['(0)', '(11)'], {}), '(0, 11)\n', (6957, 6964), True, 'import numpy as np\n'), ((7511, 7539), 'numpy.random.choice', 'np.random.choice', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (7527, 7539), True, 'import numpy as np\n'), ((7925, 7945), 'torch.load', 'torch.load', (['modelDir'], {}), '(modelDir)\n', (7935, 7945), False, 'import torch\n'), ((10694, 10713), 'numpy.arange', 'np.arange', (['(0)', '(16)', '(2)'], {}), '(0, 16, 2)\n', (10703, 10713), True, 'import numpy as np\n'), ((4001, 4023), 'numpy.shape', 'np.shape', (['statesToTest'], {}), '(statesToTest)\n', (4009, 4023), True, 'import numpy as np\n'), ((5225, 5368), 'envs.recoveringBanditsEnv.recoveringBanditsEnv', 'recoveringBanditsEnv', ([], {'seed': 'seed', 'numEpisodes': '(1)', 'episodeLimit': 'TIMESTEPS', 'train': '(False)', 'batchSize': '(5)', 'thetaVals': 'THETA', 'noiseVar': '(0.0)', 'maxWait': '(20)'}), '(seed=seed, numEpisodes=1, episodeLimit=TIMESTEPS,\n train=False, batchSize=5, thetaVals=THETA, noiseVar=0.0, maxWait=20)\n', (5245, 5368), False, 'from envs.recoveringBanditsEnv import recoveringBanditsEnv\n'), ((5964, 6107), 'envs.recoveringBanditsEnv.recoveringBanditsEnv', 'recoveringBanditsEnv', ([], {'seed': 'seed', 'numEpisodes': '(1)', 'episodeLimit': 'TIMESTEPS', 'train': '(False)', 'batchSize': '(5)', 'thetaVals': 'THETA', 'noiseVar': '(0.0)', 'maxWait': '(20)'}), '(seed=seed, numEpisodes=1, episodeLimit=TIMESTEPS,\n train=False, batchSize=5, thetaVals=THETA, noiseVar=0.0, maxWait=20)\n', (5984, 6107), False, 'from envs.recoveringBanditsEnv import recoveringBanditsEnv\n'), ((10502, 10524), 'numpy.shape', 'np.shape', (['statesToTest'], {}), '(statesToTest)\n', (10510, 10524), True, 'import numpy as np\n'), ((1966, 2161), 'envs.deadlineSchedulingEnv.deadlineSchedulingEnv', 'deadlineSchedulingEnv', ([], {'seed': 'seed[i]', 'numEpisodes': '(1)', 'episodeLimit': 'TIMESTEPS', 'maxDeadline': '(12)', 'maxLoad': '(9)', 'newJobProb': '(0.7)', 'processingCost': 'PROCESSINGCOST', 'train': '(False)', 'batchSize': '(5)', 'noiseVar': '(0)'}), '(seed=seed[i], numEpisodes=1, episodeLimit=TIMESTEPS,\n maxDeadline=12, maxLoad=9, newJobProb=0.7, processingCost=\n PROCESSINGCOST, train=False, batchSize=5, noiseVar=0)\n', (1987, 2161), False, 'from envs.deadlineSchedulingEnv import deadlineSchedulingEnv\n'), ((2861, 3056), 'envs.deadlineSchedulingEnv.deadlineSchedulingEnv', 'deadlineSchedulingEnv', ([], {'seed': 'seed[i]', 'numEpisodes': '(1)', 'episodeLimit': 'TIMESTEPS', 'maxDeadline': '(12)', 'maxLoad': '(9)', 'newJobProb': '(0.7)', 'processingCost': 'PROCESSINGCOST', 'train': '(False)', 'batchSize': '(5)', 'noiseVar': '(0)'}), '(seed=seed[i], numEpisodes=1, episodeLimit=TIMESTEPS,\n maxDeadline=12, maxLoad=9, newJobProb=0.7, processingCost=\n PROCESSINGCOST, train=False, batchSize=5, noiseVar=0)\n', (2882, 3056), False, 'from envs.deadlineSchedulingEnv import deadlineSchedulingEnv\n'), ((5131, 5162), 'numpy.shape', 'np.shape', (['currentActivationCost'], {}), '(currentActivationCost)\n', (5139, 5162), True, 'import numpy as np\n'), ((8206, 8463), 'envs.sizeAwareIndexEnv.sizeAwareIndexEnv', 'sizeAwareIndexEnv', ([], {'numEpisodes': '(1)', 'HOLDINGCOST': '(1)', 'seed': 'seed[i]', 'Training': '(False)', 'r1': '(8400)', 'r2': '(33600)', 'q': 'goodStateProb', 'case': '(1)', 'classVal': 'armClass', 'load': 'state[0]', 'noiseVar': '(0.0)', 'maxLoad': 'MAXLOAD', 'batchSize': '(5)', 'episodeLimit': 'TIMESTEPS', 'fixedSizeMDP': '(False)'}), '(numEpisodes=1, HOLDINGCOST=1, seed=seed[i], Training=\n False, r1=8400, r2=33600, q=goodStateProb, case=1, classVal=armClass,\n load=state[0], noiseVar=0.0, maxLoad=MAXLOAD, batchSize=5, episodeLimit\n =TIMESTEPS, fixedSizeMDP=False)\n', (8223, 8463), False, 'from envs.sizeAwareIndexEnv import sizeAwareIndexEnv\n'), ((9242, 9499), 'envs.sizeAwareIndexEnv.sizeAwareIndexEnv', 'sizeAwareIndexEnv', ([], {'numEpisodes': '(1)', 'HOLDINGCOST': '(1)', 'seed': 'seed[i]', 'Training': '(False)', 'r1': '(8400)', 'r2': '(33600)', 'q': 'goodStateProb', 'case': '(1)', 'classVal': 'armClass', 'load': 'state[0]', 'noiseVar': '(0.0)', 'maxLoad': 'MAXLOAD', 'batchSize': '(5)', 'episodeLimit': 'TIMESTEPS', 'fixedSizeMDP': '(False)'}), '(numEpisodes=1, HOLDINGCOST=1, seed=seed[i], Training=\n False, r1=8400, r2=33600, q=goodStateProb, case=1, classVal=armClass,\n load=state[0], noiseVar=0.0, maxLoad=MAXLOAD, batchSize=5, episodeLimit\n =TIMESTEPS, fixedSizeMDP=False)\n', (9259, 9499), False, 'from envs.sizeAwareIndexEnv import sizeAwareIndexEnv\n')] |
import numpy as np
from mne import pick_channels
def get_sub_list(data_dir, allow_all=False):
# TODO Add docstring
# Ask for subject IDs to analyze
print('What IDs are being preprocessed?')
print('(Enter multiple values separated by a comma; e.g., 101,102)')
if allow_all:
print('To process all subjects, type all')
sub_list = input('Enter IDs: ')
if sub_list == 'all' and allow_all:
sub_list = [x.name for x in data_dir.glob('sub-*')]
else:
sub_list = [f'sub-{x}' for x in sub_list.split(',')]
return sorted(sub_list)
def adjust_events_photosensor(raw, events, photosensor='Photosensor',
tmin=-.02, tmax=.05, threshold=.80,
min_tdiff=.0085, return_diagnostics=False):
# TODO Add docstring
# TODO Add input checks
# Extract the needed data
data, times = raw.copy().pick_channels(
[photosensor]).get_data(return_times=True)
data = np.squeeze(data)
sfreq = raw.info['sfreq']
latencies = (times * sfreq).astype(int)
# Convert tmin, tmax and min_tdiff to samples
lmin = int(tmin * sfreq)
lmax = int(tmax * sfreq)
min_ldiff = np.ceil(min_tdiff * sfreq).astype(int)
# Loop through events
delays = np.array([])
n_events_adjusted = 0
for event in events:
# Get segment latency window and baseline window
seg_lonset = event[0]
seg_lstart = seg_lonset + lmin
seg_lend = seg_lonset + lmax
seg_window = np.logical_and(
latencies >= seg_lstart, latencies < seg_lend)
seg_baseline = np.logical_and(
latencies >= seg_lstart, latencies < seg_lonset)
# Extract data segment and substract baseline, and latencies
# for this event
data_segment = data[seg_window] - data[seg_baseline].mean()
latency_segment = latencies[seg_window]
psensor_lonset = latency_segment[np.where(
data_segment > (data_segment.max() * threshold))[0][0]]
# Compute the delay in samples
this_delay = psensor_lonset - seg_lonset
delays = np.append(delays, this_delay)
# Correct onset if delayed by too much (either direction)
if this_delay > min_ldiff:
event[0] = psensor_lonset
n_events_adjusted += 1
if return_diagnostics:
return (events, delays * sfreq, n_events_adjusted)
else:
return events
def inspect_epochs(inst, bad_epochs=None, events=None, event_id=None,
n_epochs=10, block=True, scalings=None, return_copy=True):
# TODO Add Docstring
# TODO check color issues
# If return copy is true, make a copy
if return_copy:
epochs = inst.copy()
else:
epochs = inst
# Update bad epochs
if bad_epochs is None:
bad_epochs = []
# Make color index for artifacts
epoch_colors = list()
n_channels = len(epochs.ch_names)
for epoch_idx in np.arange(len(epochs)):
if epoch_idx in bad_epochs:
epoch_color = ['m'] * n_channels
else:
epoch_color = ['k'] * n_channels
epoch_colors.append(epoch_color)
# Mark bad channels as grey
bad_chans = pick_channels(epochs.ch_names, epochs.info['bads']).tolist()
for i, _ in enumerate(epoch_colors):
for c in bad_chans:
epoch_colors[i][c] = (.8, .8, .8, 1)
# Visually inspect epochs
epochs.plot(n_channels=n_channels, n_epochs=n_epochs, block=block,
scalings=scalings, epoch_colors=epoch_colors,
events=events, event_id=event_id, picks=epochs.ch_names)
return epochs
| [
"numpy.ceil",
"numpy.logical_and",
"numpy.append",
"numpy.array",
"mne.pick_channels",
"numpy.squeeze"
] | [((987, 1003), 'numpy.squeeze', 'np.squeeze', (['data'], {}), '(data)\n', (997, 1003), True, 'import numpy as np\n'), ((1282, 1294), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1290, 1294), True, 'import numpy as np\n'), ((1531, 1592), 'numpy.logical_and', 'np.logical_and', (['(latencies >= seg_lstart)', '(latencies < seg_lend)'], {}), '(latencies >= seg_lstart, latencies < seg_lend)\n', (1545, 1592), True, 'import numpy as np\n'), ((1629, 1692), 'numpy.logical_and', 'np.logical_and', (['(latencies >= seg_lstart)', '(latencies < seg_lonset)'], {}), '(latencies >= seg_lstart, latencies < seg_lonset)\n', (1643, 1692), True, 'import numpy as np\n'), ((2142, 2171), 'numpy.append', 'np.append', (['delays', 'this_delay'], {}), '(delays, this_delay)\n', (2151, 2171), True, 'import numpy as np\n'), ((1203, 1229), 'numpy.ceil', 'np.ceil', (['(min_tdiff * sfreq)'], {}), '(min_tdiff * sfreq)\n', (1210, 1229), True, 'import numpy as np\n'), ((3249, 3300), 'mne.pick_channels', 'pick_channels', (['epochs.ch_names', "epochs.info['bads']"], {}), "(epochs.ch_names, epochs.info['bads'])\n", (3262, 3300), False, 'from mne import pick_channels\n')] |
try:
import jax
import jax.numpy as np
import jax.experimental.stax as stax
except ModuleNotFoundError:
import warnings
warnings.warn("ksc.backends.jax: Cannot find JAX! This is expected on Windows.")
import numpy as np
# Use relative import to work around a python 3.6 issue
# https://stackoverflow.com/questions/57615877/importing-difference-in-python3-6-and-python3-7
from . import common
from .common import *
_built_ins = common._built_ins
def broadcast_add(x, b):
return x + b[None, :]
def dot(x, y):
return np.dot(x, y)
def transpose(x):
return x.T
def relu(x):
return np.maximum(x, 0.0)
def sigmoid(x):
return jax.nn.sigmoid(x)
def log_softmax(x):
x_max = np.amax(x, axis=-1, keepdims=True)
return (x - x_max) - np.log(np.exp(x - x_max).sum(axis=-1, keepdims=True))
def conv_2d_no_bias(x, weights, ksizes, strides, paddings):
y = jax.lax.conv_general_dilated(
x,
weights,
strides,
paddings,
dimension_numbers=("NCHW", "OIHW", "NCHW"), # the same as pytorch / onnx
)
print(f"conv_2d shape: {y.shape}")
return y
def normalize_2d(x, weights):
mean, sigma = weights
return (x - mean[:, None, None]) / sigma[:, None, None]
def batch_norm_2d(x, weights):
mean, var, gamma, beta = weights
sigma = np.sqrt(var + 1e-5)
z = normalize_2d(x, (mean, sigma))
return gamma[:, None, None] * z + beta[:, None, None]
def to_float(x):
if hasattr(x, "astype"):
return x.astype(np.float32)
else:
return float(x)
def _pooling_factory(pool_type, padding):
def pooling(x, pool_size, strides):
f = getattr(stax, pool_type)
_, apply_fn = f(pool_size, strides, padding=padding, spec="NCHW")
y = apply_fn((), x)
return y
return pooling
max_pool_same = _pooling_factory("MaxPool", "SAME")
avg_pool_valid = _pooling_factory("AvgPool", "VALID")
# This is a bit silly but jax does not have an API
# to provide the precise padding sizes for pooling layers
def max_pool(x, pool_size, strides, paddings):
if paddings == ((0, 0), (0, 0)):
return max_pool_valid(x, pool_size, strides)
else:
return max_pool_same(x, pool_size, strides)
def avg_pool(x, pool_size, strides, paddings):
if paddings == ((0, 0), (0, 0)):
return avg_pool_valid(x, pool_size, strides)
else:
return avg_pool_same(x, pool_size, strides)
def flatten(x):
b = x.shape[0]
return x.reshape((b, -1))
def reducemean(x):
return np.mean(x)
| [
"numpy.maximum",
"jax.lax.conv_general_dilated",
"warnings.warn",
"numpy.amax",
"numpy.mean",
"numpy.exp",
"numpy.dot",
"jax.nn.sigmoid",
"numpy.sqrt"
] | [((554, 566), 'numpy.dot', 'np.dot', (['x', 'y'], {}), '(x, y)\n', (560, 566), True, 'import numpy as np\n'), ((628, 646), 'numpy.maximum', 'np.maximum', (['x', '(0.0)'], {}), '(x, 0.0)\n', (638, 646), True, 'import numpy as np\n'), ((676, 693), 'jax.nn.sigmoid', 'jax.nn.sigmoid', (['x'], {}), '(x)\n', (690, 693), False, 'import jax\n'), ((728, 762), 'numpy.amax', 'np.amax', (['x'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x, axis=-1, keepdims=True)\n', (735, 762), True, 'import numpy as np\n'), ((912, 1019), 'jax.lax.conv_general_dilated', 'jax.lax.conv_general_dilated', (['x', 'weights', 'strides', 'paddings'], {'dimension_numbers': "('NCHW', 'OIHW', 'NCHW')"}), "(x, weights, strides, paddings,\n dimension_numbers=('NCHW', 'OIHW', 'NCHW'))\n", (940, 1019), False, 'import jax\n'), ((1345, 1365), 'numpy.sqrt', 'np.sqrt', (['(var + 1e-05)'], {}), '(var + 1e-05)\n', (1352, 1365), True, 'import numpy as np\n'), ((2557, 2567), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2564, 2567), True, 'import numpy as np\n'), ((141, 227), 'warnings.warn', 'warnings.warn', (['"""ksc.backends.jax: Cannot find JAX! This is expected on Windows."""'], {}), "(\n 'ksc.backends.jax: Cannot find JAX! This is expected on Windows.')\n", (154, 227), False, 'import warnings\n'), ((795, 812), 'numpy.exp', 'np.exp', (['(x - x_max)'], {}), '(x - x_max)\n', (801, 812), True, 'import numpy as np\n')] |
#
# Fast discrete cosine transform algorithms (Python)
#
# Copyright (c) 2020 Project Nayuki. (MIT License)
# https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# - The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# - The Software is provided "as is", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising from,
# out of or in connection with the Software or the use or other dealings in the
# Software.
#
# 20toduc01's note:
# I moved all the constants inside the functions so it's horrible to read now.
# I wrote a wrapper for 2-D DCT based on provided 1-D DCT code.
# I wrote this in a hurry, so it's probably not the prettiest code and I suspect
# the way I calculate 2-D DCT is not the most efficient.
import numba
import numpy as np
@numba.njit()
def dct8x8(block):
# DCT type II, scaled. Algorithm by Arai, <NAME>, 1988.
# See: https://web.stanford.edu/class/ee398a/handouts/lectures/07-TransformCoding.pdf#page=30
def dct8(vector):
v0 = vector[0] + vector[7]
v1 = vector[1] + vector[6]
v2 = vector[2] + vector[5]
v3 = vector[3] + vector[4]
v4 = vector[3] - vector[4]
v5 = vector[2] - vector[5]
v6 = vector[1] - vector[6]
v7 = vector[0] - vector[7]
v8 = v0 + v3
v9 = v1 + v2
v10 = v1 - v2
v11 = v0 - v3
v12 = -v4 - v5
v13 = (v5 + v6) * 0.7071067811865476
v14 = v6 + v7
v15 = v8 + v9
v16 = v8 - v9
v17 = (v10 + v11) * 0.7071067811865476
v18 = (v12 + v14) * 0.38268343236508984
v19 = -v12 * 0.5411961001461969 - v18
v20 = v14 * 1.3065629648763766 - v18
v21 = v17 + v11
v22 = v11 - v17
v23 = v13 + v7
v24 = v7 - v13
v25 = v19 + v24
v26 = v23 + v20
v27 = v23 - v20
v28 = v24 - v19
return np.array([
0.35355339059327373 * v15,
0.25489778955207960 * v26,
0.27059805007309850 * v21,
0.30067244346752264 * v28,
0.35355339059327373 * v16,
0.44998811156820780 * v25,
0.65328148243818820 * v22,
1.28145772387075270 * v27,
])
ans = np.zeros((8, 8))
for idx in range(8):
ans[:, idx] = dct8(block[:, idx])
for idx in range(8):
ans[idx, :] = dct8(ans[idx, :])
return ans
def idct8x8(block):
# DCT type III, scaled. A straightforward inverse of the forward algorithm.
def idct8(vector):
v15 = vector[0] / 0.35355339059327373
v26 = vector[1] / 0.25489778955207960
v21 = vector[2] / 0.27059805007309850
v28 = vector[3] / 0.30067244346752264
v16 = vector[4] / 0.35355339059327373
v25 = vector[5] / 0.44998811156820780
v22 = vector[6] / 0.65328148243818820
v27 = vector[7] / 1.28145772387075270
v19 = (v25 - v28) / 2
v20 = (v26 - v27) / 2
v23 = (v26 + v27) / 2
v24 = (v25 + v28) / 2
v7 = (v23 + v24) / 2
v11 = (v21 + v22) / 2
v13 = (v23 - v24) / 2
v17 = (v21 - v22) / 2
v8 = (v15 + v16) / 2
v9 = (v15 - v16) / 2
v18 = (v19 - v20) * 0.38268343236508984 # Different from original
v12 = -(v19 * 1.3065629648763766 - v18)
v14 = -(v18 - v20 * 0.5411961001461969)
v6 = v14 - v7
v5 = v13 / 0.7071067811865476 - v6
v4 = -v5 - v12
v10 = v17 / 0.7071067811865476 - v11
v0 = (v8 + v11) / 2
v1 = (v9 + v10) / 2
v2 = (v9 - v10) / 2
v3 = (v8 - v11) / 2
return np.array([
(v0 + v7) / 2,
(v1 + v6) / 2,
(v2 + v5) / 2,
(v3 + v4) / 2,
(v3 - v4) / 2,
(v2 - v5) / 2,
(v1 - v6) / 2,
(v0 - v7) / 2,
])
ans = np.zeros((8, 8))
for idx in range(8):
ans[:, idx] = idct8(block[:, idx])
for idx in range(8):
ans[idx, :] = idct8(ans[idx, :])
return ans | [
"numpy.array",
"numba.njit",
"numpy.zeros"
] | [((1594, 1606), 'numba.njit', 'numba.njit', ([], {}), '()\n', (1604, 1606), False, 'import numba\n'), ((2791, 2807), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (2799, 2807), True, 'import numpy as np\n'), ((4159, 4175), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (4167, 4175), True, 'import numpy as np\n'), ((2527, 2762), 'numpy.array', 'np.array', (['[0.35355339059327373 * v15, 0.2548977895520796 * v26, 0.2705980500730985 *\n v21, 0.30067244346752264 * v28, 0.35355339059327373 * v16, \n 0.4499881115682078 * v25, 0.6532814824381882 * v22, 1.2814577238707527 *\n v27]'], {}), '([0.35355339059327373 * v15, 0.2548977895520796 * v26, \n 0.2705980500730985 * v21, 0.30067244346752264 * v28, \n 0.35355339059327373 * v16, 0.4499881115682078 * v25, 0.6532814824381882 *\n v22, 1.2814577238707527 * v27])\n', (2535, 2762), True, 'import numpy as np\n'), ((3991, 4125), 'numpy.array', 'np.array', (['[(v0 + v7) / 2, (v1 + v6) / 2, (v2 + v5) / 2, (v3 + v4) / 2, (v3 - v4) / 2,\n (v2 - v5) / 2, (v1 - v6) / 2, (v0 - v7) / 2]'], {}), '([(v0 + v7) / 2, (v1 + v6) / 2, (v2 + v5) / 2, (v3 + v4) / 2, (v3 -\n v4) / 2, (v2 - v5) / 2, (v1 - v6) / 2, (v0 - v7) / 2])\n', (3999, 4125), True, 'import numpy as np\n')] |
# Simple script to calculate halo/subhalo mass functions from hdf5
#
# Below run gives mass functions of the Micro-Uchuu simulation at z=0
# python uchuu_h5_mfunc.py MicroUchuu_halolist_z0p00.h5 mfunc.pdf
import numpy as np
import matplotlib.pyplot as plt
import h5py
import sys
args = sys.argv
inputfile = args[1]
outputfile = args[2]
hf = h5py.File( inputfile, 'r')
mvir = np.array( hf['Mvir'])
pid = np.array(hf['pid'])
hf.close()
mvir_halo = mvir[pid==-1]
mvir_subhalo = mvir[pid!=-1]
bins0 = np.logspace( 8, 16, 33)
n_halo, bins = np.histogram( mvir_halo, bins=(bins0))
n_subhalo, bins = np.histogram( mvir_subhalo, bins=(bins0))
mbins = np.zeros_like(n_halo)
for i in range( len(bins)-1):
mbins[i] = np.sqrt( bins[i] * bins[i+1])
plt.xscale("log")
plt.yscale("log")
plt.plot( mbins, n_halo, "o-", label="halo")
plt.plot( mbins, n_subhalo, "s-", label="subhalo")
plt.legend()
plt.savefig( outputfile)
| [
"matplotlib.pyplot.xscale",
"h5py.File",
"numpy.zeros_like",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.plot",
"numpy.logspace",
"matplotlib.pyplot.legend",
"numpy.histogram",
"numpy.array",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] | [((775, 800), 'h5py.File', 'h5py.File', (['inputfile', '"""r"""'], {}), "(inputfile, 'r')\n", (784, 800), False, 'import h5py\n'), ((809, 829), 'numpy.array', 'np.array', (["hf['Mvir']"], {}), "(hf['Mvir'])\n", (817, 829), True, 'import numpy as np\n'), ((837, 856), 'numpy.array', 'np.array', (["hf['pid']"], {}), "(hf['pid'])\n", (845, 856), True, 'import numpy as np\n'), ((935, 957), 'numpy.logspace', 'np.logspace', (['(8)', '(16)', '(33)'], {}), '(8, 16, 33)\n', (946, 957), True, 'import numpy as np\n'), ((974, 1009), 'numpy.histogram', 'np.histogram', (['mvir_halo'], {'bins': 'bins0'}), '(mvir_halo, bins=bins0)\n', (986, 1009), True, 'import numpy as np\n'), ((1031, 1069), 'numpy.histogram', 'np.histogram', (['mvir_subhalo'], {'bins': 'bins0'}), '(mvir_subhalo, bins=bins0)\n', (1043, 1069), True, 'import numpy as np\n'), ((1082, 1103), 'numpy.zeros_like', 'np.zeros_like', (['n_halo'], {}), '(n_halo)\n', (1095, 1103), True, 'import numpy as np\n'), ((1181, 1198), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (1191, 1198), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1216), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (1209, 1216), True, 'import matplotlib.pyplot as plt\n'), ((1217, 1260), 'matplotlib.pyplot.plot', 'plt.plot', (['mbins', 'n_halo', '"""o-"""'], {'label': '"""halo"""'}), "(mbins, n_halo, 'o-', label='halo')\n", (1225, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1311), 'matplotlib.pyplot.plot', 'plt.plot', (['mbins', 'n_subhalo', '"""s-"""'], {'label': '"""subhalo"""'}), "(mbins, n_subhalo, 's-', label='subhalo')\n", (1270, 1311), True, 'import matplotlib.pyplot as plt\n'), ((1313, 1325), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1323, 1325), True, 'import matplotlib.pyplot as plt\n'), ((1326, 1349), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outputfile'], {}), '(outputfile)\n', (1337, 1349), True, 'import matplotlib.pyplot as plt\n'), ((1150, 1180), 'numpy.sqrt', 'np.sqrt', (['(bins[i] * bins[i + 1])'], {}), '(bins[i] * bins[i + 1])\n', (1157, 1180), True, 'import numpy as np\n')] |
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input
from keras.utils.data_utils import get_file
import keras.backend as K
import h5py
import numpy as np
import tensorflow as tf
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
MEAN_PIXEL = np.array([103.939, 116.779, 123.68])
WEIGHTS_PATH = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='253f8cb515780f3b799900260a226db6')
def vgg_layers(inputs, target_layer):
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs)
if target_layer == 1:
return x
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
if target_layer == 2:
return x
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
if target_layer == 3:
return x
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
if target_layer == 4:
return x
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
return x
def load_weights(model):
f = h5py.File(WEIGHTS_PATH)
layer_names = [name for name in f.attrs['layer_names']]
for layer in model.layers:
b_name = layer.name.encode()
if b_name in layer_names:
g = f[b_name]
weights = [g[name] for name in g.attrs['weight_names']]
layer.set_weights(weights)
layer.trainable = False
f.close()
def VGG19(input_tensor=None, input_shape=None, target_layer=1):
"""
VGG19, up to the target layer (1 for relu1_1, 2 for relu2_1, etc.)
"""
if input_tensor is None:
inputs = Input(shape=input_shape)
else:
inputs = Input(tensor=input_tensor, shape=input_shape)
model = Model(inputs, vgg_layers(inputs, target_layer), name='vgg19')
load_weights(model)
return model
def preprocess_input(x):
# Convert 'RGB' -> 'BGR'
if type(x) is np.ndarray:
x = x[..., ::-1]
else:
x = tf.reverse(x, [-1])
return x - MEAN_PIXEL
| [
"h5py.File",
"tensorflow.reverse",
"keras.utils.data_utils.get_file",
"numpy.array",
"keras.layers.Conv2D",
"keras.layers.Input",
"keras.layers.MaxPooling2D"
] | [((390, 426), 'numpy.array', 'np.array', (['[103.939, 116.779, 123.68]'], {}), '([103.939, 116.779, 123.68])\n', (398, 426), True, 'import numpy as np\n'), ((443, 603), 'keras.utils.data_utils.get_file', 'get_file', (['"""vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5"""', 'WEIGHTS_PATH_NO_TOP'], {'cache_subdir': '"""models"""', 'file_hash': '"""253f8cb515780f3b799900260a226db6"""'}), "('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP, cache_subdir='models', file_hash=\n '253f8cb515780f3b799900260a226db6')\n", (451, 603), False, 'from keras.utils.data_utils import get_file\n'), ((2406, 2429), 'h5py.File', 'h5py.File', (['WEIGHTS_PATH'], {}), '(WEIGHTS_PATH)\n', (2415, 2429), False, 'import h5py\n'), ((728, 802), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block1_conv1"""'}), "(64, (3, 3), activation='relu', padding='same', name='block1_conv1')\n", (734, 802), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((862, 936), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block1_conv2"""'}), "(64, (3, 3), activation='relu', padding='same', name='block1_conv2')\n", (868, 936), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((948, 1004), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)', 'name': '"""block1_pool"""'}), "((2, 2), strides=(2, 2), name='block1_pool')\n", (960, 1004), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((1031, 1106), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block2_conv1"""'}), "(128, (3, 3), activation='relu', padding='same', name='block2_conv1')\n", (1037, 1106), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((1161, 1236), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block2_conv2"""'}), "(128, (3, 3), activation='relu', padding='same', name='block2_conv2')\n", (1167, 1236), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((1248, 1304), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)', 'name': '"""block2_pool"""'}), "((2, 2), strides=(2, 2), name='block2_pool')\n", (1260, 1304), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((1331, 1406), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block3_conv1"""'}), "(256, (3, 3), activation='relu', padding='same', name='block3_conv1')\n", (1337, 1406), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((1461, 1536), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block3_conv2"""'}), "(256, (3, 3), activation='relu', padding='same', name='block3_conv2')\n", (1467, 1536), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((1548, 1623), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block3_conv3"""'}), "(256, (3, 3), activation='relu', padding='same', name='block3_conv3')\n", (1554, 1623), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((1635, 1710), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block3_conv4"""'}), "(256, (3, 3), activation='relu', padding='same', name='block3_conv4')\n", (1641, 1710), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((1722, 1778), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)', 'name': '"""block3_pool"""'}), "((2, 2), strides=(2, 2), name='block3_pool')\n", (1734, 1778), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((1805, 1880), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block4_conv1"""'}), "(512, (3, 3), activation='relu', padding='same', name='block4_conv1')\n", (1811, 1880), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((1935, 2010), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block4_conv2"""'}), "(512, (3, 3), activation='relu', padding='same', name='block4_conv2')\n", (1941, 2010), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((2022, 2097), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block4_conv3"""'}), "(512, (3, 3), activation='relu', padding='same', name='block4_conv3')\n", (2028, 2097), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((2109, 2184), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block4_conv4"""'}), "(512, (3, 3), activation='relu', padding='same', name='block4_conv4')\n", (2115, 2184), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((2196, 2252), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)', 'name': '"""block4_pool"""'}), "((2, 2), strides=(2, 2), name='block4_pool')\n", (2208, 2252), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((2279, 2354), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block5_conv1"""'}), "(512, (3, 3), activation='relu', padding='same', name='block5_conv1')\n", (2285, 2354), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((2976, 3000), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (2981, 3000), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((3028, 3073), 'keras.layers.Input', 'Input', ([], {'tensor': 'input_tensor', 'shape': 'input_shape'}), '(tensor=input_tensor, shape=input_shape)\n', (3033, 3073), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D, Input\n'), ((3322, 3341), 'tensorflow.reverse', 'tf.reverse', (['x', '[-1]'], {}), '(x, [-1])\n', (3332, 3341), True, 'import tensorflow as tf\n')] |
from ast import Not
from lib2to3.pytree import convert
from trace import CoverageResults
from pandas import options
import streamlit as st
import cv2 as cv
import numpy as np
import string
import random
from io import BytesIO
import requests
import shutil
import imutils
import streamlit.components.v1 as components
from datetime import datetime
from streamlit_cropper import st_cropper
from webcolors import hex_to_name
from PIL import Image, ImageColor
from matplotlib import pyplot as plt
from utils_helpers import (
auto_canny_thresh,
source_code,
version,
load_image,
load_image_PIL,
converted,
#download_button,
get_location_data,
download_button1 ,
convolve,
insert_data_mongodb,
average_ratings_mongodb,
source_code,
scrape_duckduckgo)
selected_boxes = (
"Welcome",
"Demo Adaptive Thresholding",
"Demo Auto Canny",
"Demo Canny Edge Detector",
"Demo Convolutions",
"Demo Image Gradients",
"Demo Morphological Operations",
"Demo Color Spaces",
"Demo Color Thresholding",
"Demo Smoothing and Blurring",
)
rand = ''.join(random.choice(string.ascii_lowercase) for i in range(10))
download = f'{rand}.jpeg'
language = 'python'
default_image = 'images/nice.jpeg'
button = 'Download Result Image'
original = 'Original Image'
code = 'Source Code'
mime_type = 'image/jpeg'
font = cv.FONT_HERSHEY_SIMPLEX
def app():
selected_box = st.sidebar.selectbox("Choosse on of the following", selected_boxes)
if selected_box == "Welcome":
welcome()
if selected_box == "Demo Adaptive Thresholding":
adaptive_thresholding()
if selected_box == "Demo Auto Canny":
auto_canny()
if selected_box == "Demo Canny Edge Detector":
canny_edge_detector()
if selected_box == "Demo Convolutions":
convolutions()
if selected_box == "Demo Image Gradients":
image_gradients()
if selected_box == "Demo Morphological Operations":
morphological_operations()
if selected_box == "Demo Color Thresholding":
thresholding()
if selected_box == "Demo Color Spaces":
color_spaces()
if selected_box == "Demo Smoothing and Blurring":
smoothing_blurring()
def welcome():
cols = st.columns(2)
with cols[0]:
st.title('Basic Image Processing Operations')
st.image('images/image_processing.jpeg',use_column_width=True)
st.title('Usage')
st.markdown('A simple app that shows different image processing techniques. You can choose the options from the dropdwon menu on the left.' +
'Technologies use to build the app:', unsafe_allow_html=True)
st.title('Technology Stack')
st.markdown('''
<p align="center">
<img src="https://img.shields.io/badge/Python-FFD43B?style=for-the-badge&logo=python&logoColor=blue" />
<img src="https://img.shields.io/badge/MongoDB-4EA94B?style=for-the-badge&logo=mongodb&logoColor=white" />
<img src="https://img.shields.io/badge/Streamlit-FF4B4B?style=for-the-badge&logo=Streamlit&logoColor=white" />
<img src="https://img.shields.io/badge/OpenCV-27338e?style=for-the-badge&logo=OpenCV&logoColor=white" />
<img src="https://img.shields.io/badge/Visual_Studio_Code-0078D4?style=for-the-badge&logo=visual%20studio%20code&logoColor=white" />
</p>''', unsafe_allow_html=True)
with cols[1]:
st.title('Image Processing Techniques')
st.markdown('''
>Morphological Operations --- OpenCV Morphological Operations
>
>Smoothing and Blurring --- OpenCV Smoothing and Blurring
>
>Color Spaces -- OpenCV Color Spaces (cv2.cvtColor)
>
>Basic Thresholding --- OpenCV Thresholding (cv2.threshold)
>
>Adaptive Thresholding --- Adaptive Thresholding with OpenCV (cv2.adaptiveThreshold)
>
>Kernels --- Convolutions with OpenCV and Python
>
>Image Gradients --- Image Gradients with OpenCV (Sobel and Scharr)
>
>Edge Detection --- OpenCV Edge Detection (cv2.Canny)
>
>Automatic Edge Detection --- Zero-parameter, automatic Canny edge detection with Python and OpenCV''', unsafe_allow_html=True)
st.title('Dedication')
st.markdown('''> To my Mother (Elsa), Paula, Cris, Maty and Sofia, To whom made this possible.
>
> Special thanks to Adrian from pyimagesearch.com for great tutorials of image processing, deep learning, augmented realty, etc. ''')
st.markdown('''> Long Live Rock N Roll.
>
> - "Well if I have to, I will die seven deaths just to lie In the arms of my eversleeping aim"''')
st.title('Contact')
st.markdown('''<p align="center">
<a href="mailto:<EMAIL>" rel="nofollow">
<img alt="Gmail" src="https://img.shields.io/badge/Gmail-D14836?style=for-the-badge&logo=gmail&logoColor=white"/>
</a>
<a href="https://github.com/jjaramillo34/" rel="nofollow">
<img alt="Github" src="https://img.shields.io/badge/GitHub-%2312100E.svg?&style=for-the-badge&logo=Github&logoColor=white"/>
</a>
<a href="https://twitter.com/jejaramilloc" rel="nofollow">
<img alt="Twitter" src="https://img.shields.io/badge/Twitter-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white"/>
</a>
<a href="https://www.linkedin.com/in/javierjaramillo1/" rel="nofollow">
<img alt="Linkedin" src="https://img.shields.io/badge/LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white"/>
</a>
</p>''', unsafe_allow_html=True)
location_dict = get_location_data()
date_r = datetime.now()
city = location_dict['city']
ip = location_dict['ip']
region = location_dict['region']
country = location_dict['country']
loc = location_dict['loc']
with st.sidebar.form(key='columns_in_form',clear_on_submit=True): #set clear_on_submit=True so that the form will be reset/cleared once it's submitted
rating=st.slider("Please rate the app", min_value=1, max_value=5, value=3,help='Drag the slider to rate the app. This is a 1-5 rating scale where 5 is the highest rating')
feedback=st.text_input(label='Please leave your feedback here')
submitted = st.form_submit_button('Submit')
if submitted:
st.write('Thanks for your feedback!')
st.markdown('Your Rating:')
st.markdown(rating)
st.markdown('Your Feedback:')
st.markdown(feedback)
insert_data_mongodb(rating=rating, feedback=feedback, date_r=date_r, city=city, ip=ip, region=region, country=country, loc=loc)
score_average = average_ratings_mongodb()
if score_average == 5.0:
st.sidebar.title('App Ratings')
st.sidebar.markdown(f'⭐⭐⭐⭐⭐ <p style="font-weight:bold;color:green;font-size:20px;border-radius:2%;">{round(score_average, 1)}</p>', unsafe_allow_html=True)
elif score_average >=4.0 and score_average < 5.0:
st.sidebar.title('App Ratings')
st.sidebar.markdown(f'⭐⭐⭐⭐ <p style="font-weight:bold;color:green;font-size:20px;border-radius:2%;">{round(score_average, 1)}</p>', unsafe_allow_html=True)
elif score_average >=3.0 and score_average < 4.0:
st.sidebar.title('App Ratings')
st.sidebar.markdown(f'⭐⭐⭐ <p style="font-weight:bold;color:green;font-size:20px;border-radius:2%;">{round(score_average, 1)}</p>', unsafe_allow_html=True)
elif score_average >=2.0 and score_average < 3.0:
st.sidebar.title('App Ratings')
st.sidebar.markdown(f'⭐⭐ <p style="font-weight:bold;color:green;font-size:20px;border-radius:2%;">{round(score_average, 1)}</p>', unsafe_allow_html=True)
elif score_average < 2.0:
st.sidebar.title('App Ratings')
st.sidebar.markdown(f'⭐ <p style="font-weight:bold;color:green;font-size:20px;border-radius:2%;">{round(score_average, 1)}</p>', unsafe_allow_html=True)
st.sidebar.markdown(f'<p style="font-weight:bold;color:black;font-size:12px;border-radius:2%;">Ratings live atlas mongodb database feed</p>', unsafe_allow_html=True)
with st.expander('Show MongoDB Dashboard'):
components.iframe('https://charts.mongodb.com/charts-project-0-koqvp/public/dashboards/62523657-6131-48ab-8c6c-3893cfb849fa', height=800)
version()
def adaptive_thresholding():
st.header("Demo Adaptive Thresholding")
options = st.sidebar.radio('Adaptive Thresholding Options', ('Adaptive Thresholding', 'Adaptive Thesholding Interactive'))
if options == 'Adaptive Thresholding':
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'], key='1')
if img_file is not None:
# load the image and display it
with st.expander('Show Original Image'):
image = load_image_PIL(img_file)
image = converted(image)
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
st.image(image)
with st.expander('Show Adaptive Thresholding', expanded=True):
cols = st.columns(4)
(T, threshInv) = cv.threshold(blurred, 51, 255,
cv.THRESH_BINARY_INV)
cols[0].markdown("Simple Thresholding")
cols[0].image(threshInv)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
# apply Otsu's automatic thresholding
(T, threshInv) = cv.threshold(blurred, 0, 255,
cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cols[1].markdown("Otsu Thresholding")
cols[1].image(threshInv)
with cols[1]:
download_button1(threshInv, button, download, mime_type, key='1.2')
# instead of manually specifying the threshold value, we can use adaptive thresholding to examine neighborhoods
# of pixels and adaptively threshold each neighborhood
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 21, 10)
cols[2].markdown("Mean Adaptive Thresholding")
cols[2].image(threshInv)
with cols[2]:
download_button1(thresh, button, download, mime_type, key='1.3')
# perform adaptive thresholding again, this time using a Gaussian weighting versus a simple mean to compute our
# local threshold value
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, 21, 4)
cols[3].markdown("Gaussian Adaptive Thresholding")
cols[3].image(thresh)
with cols[3]:
download_button1(thresh, button, download, mime_type, key='1.4')
with st.expander("Show Adaptive Thresholding Types Interactive"):
x = st.slider('Change Threshold value', min_value = 50, max_value = 255, key='1')
ret,thresh1 = cv.threshold(blurred, x, 255, cv.THRESH_BINARY)
ret,thresh2 = cv.threshold(blurred, x, 255, cv.THRESH_BINARY_INV)
ret,thresh3 = cv.threshold(blurred, x, 255, cv.THRESH_TRUNC)
ret,thresh4 = cv.threshold(blurred, x, 255, cv.THRESH_TOZERO)
ret,thresh5 = cv.threshold(blurred, x, 255, cv.THRESH_TOZERO_INV)
titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
images = [blurred, thresh1, thresh2, thresh3, thresh4, thresh5]
cols = st.columns(3)
for i in range(0, 3):
cols[i].markdown(i)
cols[i].markdown(titles[i])
cols[i].image(images[i])
with cols[i]:
download_button1(images[i], button, download, mime_type, key='{i}.1.1')
cols = st.columns(3)
for i in range(3, 6):
cols[i-3].markdown(i)
cols[i-3].markdown(titles[i])
cols[i-3].image(images[i])
with cols[i- 3]:
download_button1(images[i], button, download, mime_type, key='{i}.2.2')
else:
# load the image and display it
with st.expander('Show Original Image'):
image = load_image('images/steve-jobs.jpg')
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
st.image(image)
with st.expander('Show Adaptive Thresholding', expanded=True):
cols = st.columns(4)
(T, threshInv) = cv.threshold(blurred, 51, 255,
cv.THRESH_BINARY_INV)
cols[0].markdown("Simple Thresholding")
cols[0].image(threshInv)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
# apply Otsu's automatic thresholding
(T, threshInv) = cv.threshold(blurred, 0, 255,
cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cols[1].markdown("Otsu Thresholding")
cols[1].image(threshInv)
with cols[1]:
download_button1(threshInv, button, download, mime_type, key='1.2')
# instead of manually specifying the threshold value, we can use adaptive thresholding to examine neighborhoods
# of pixels and adaptively threshold each neighborhood
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 21, 10)
cols[2].markdown("Mean Adaptive Thresholding")
cols[2].image(threshInv)
with cols[2]:
download_button1(thresh, button, download, mime_type, key='1.3')
# perform adaptive thresholding again, this time using a Gaussian weighting versus a simple mean to compute our
# local threshold value
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, 21, 4)
cols[3].markdown("Gaussian Adaptive Thresholding")
cols[3].image(thresh)
with cols[3]:
download_button1(thresh, button, download, mime_type, key='1.4')
with st.expander("Show Adaptive Thresholding Types Interactive"):
x = st.slider('Change Threshold value', min_value = 50, max_value = 255, key='1')
ret,thresh1 = cv.threshold(blurred, x, 255, cv.THRESH_BINARY)
ret,thresh2 = cv.threshold(blurred, x, 255, cv.THRESH_BINARY_INV)
ret,thresh3 = cv.threshold(blurred, x, 255, cv.THRESH_TRUNC)
ret,thresh4 = cv.threshold(blurred, x, 255, cv.THRESH_TOZERO)
ret,thresh5 = cv.threshold(blurred, x, 255, cv.THRESH_TOZERO_INV)
titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
images = [blurred, thresh1, thresh2, thresh3, thresh4, thresh5]
cols = st.columns(3)
for i in range(0, 3):
cols[i].markdown(i)
cols[i].markdown(titles[i])
cols[i].image(images[i])
with cols[i]:
download_button1(images[i], button, download, mime_type, key='{i}.1.1')
cols = st.columns(3)
for i in range(3, 6):
cols[i-3].markdown(i)
cols[i-3].markdown(titles[i])
cols[i-3].image(images[i])
with cols[i- 3]:
download_button1(images[i], button, download, mime_type, key='{i}.2.2')
else:
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'], key='1')
if img_file is not None:
with st.expander('Show Original Image'):
image = load_image_PIL(img_file)
image = converted(image)
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
st.image(image)
with st.expander('Show Adaptive Thresholding Interactive', expanded=True):
cols = st.columns(4)
x = cols[0].slider('Change Threshold value', min_value = 50, max_value = 255, key='1')
(T, threshInv) = cv.threshold(blurred, x, 255,
cv.THRESH_BINARY_INV)
cols[0].markdown('Simple Thresholding')
cols[0].image(threshInv, use_column_width=True,clamp = True)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
x = cols[1].slider('Change Threshold value', min_value = 50, max_value = 255, key='2', help='Auto threshold value selected')
# apply Otsu's automatic thresholding
(T, threshInv) = cv.threshold(blurred, 0, 255,
cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cols[1].markdown("Otsu's Automatic Thresholding")
cols[1].image(threshInv, use_column_width=True,clamp = True)
with cols[1]:
download_button1(threshInv, button, download, mime_type, key='1.2')
x = cols[2].slider('Change Threshold value', min_value = 21, max_value = 255, step = 2 ,key='3')
# instead of manually specifying the threshold value, we can use adaptive thresholding to examine neighborhoods
# of pixels and adaptively threshold each neighborhood
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, x, 10)
cols[2].markdown('Mean Adaptive Thresholding')
cols[2].image(thresh, use_column_width=True,clamp = True)
with cols[2]:
download_button1(thresh, button, download, mime_type, key='1.3')
x = cols[3].slider('Change Threshold value', min_value = 21, max_value = 255, step = 2 ,key='4')
# perform adaptive thresholding again, this time using a Gaussian weighting versus a simple mean to compute our
# local threshold value
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, x, 4)
cols[3].markdown('Gaussian Adaptive Thresholding')
cols[3].image(thresh, use_column_width=True,clamp = True)
cols[3].text("Bar Chart of the image")
with cols[3]:
download_button1(thresh, button, download, mime_type, key='1.4')
else:
with st.expander('Show Original Image'):
image = load_image('images/steve-jobs.jpg')
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
st.image(image)
with st.expander('Show Adaptive Thresholding Interactive', expanded=True):
cols = st.columns(4)
x = cols[0].slider('Change Threshold value', min_value = 50, max_value = 255, key='1')
(T, threshInv) = cv.threshold(blurred, x, 255,
cv.THRESH_BINARY_INV)
cols[0].markdown('Simple Thresholding')
cols[0].image(threshInv, use_column_width=True,clamp = True)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
x = cols[1].slider('Change Threshold value', min_value = 50, max_value = 255, key='2', disabled=True, help='Auto threshold value selected')
# apply Otsu's automatic thresholding
(T, threshInv) = cv.threshold(blurred, 0, 255,
cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cols[1].markdown("Otsu's Automatic Thresholding")
cols[1].image(threshInv, use_column_width=True,clamp = True)
with cols[1]:
download_button1(threshInv, button, download, mime_type, key='1.2')
x = cols[2].slider('Change Threshold value', min_value = 21, max_value = 255, step = 2 ,key='3')
# instead of manually specifying the threshold value, we can use adaptive thresholding to examine neighborhoods
# of pixels and adaptively threshold each neighborhood
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, x, 10)
cols[2].markdown('Mean Adaptive Thresholding')
cols[2].image(thresh, use_column_width=True,clamp = True)
with cols[2]:
download_button1(thresh, button, download, mime_type, key='1.3')
x = cols[3].slider('Change Threshold value', min_value = 21, max_value = 255, step = 2 ,key='4')
# perform adaptive thresholding again, this time using a Gaussian weighting versus a simple mean to compute our
# local threshold value
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, x, 4)
cols[3].markdown('Gaussian Adaptive Thresholding')
cols[3].image(thresh, use_column_width=True,clamp = True)
cols[3].text("Bar Chart of the image")
with cols[3]:
download_button1(thresh, button, download, mime_type, key='1.4')
source_code(
'Source Code + Adaptive Thresholding pyimagesearch.com',
'https://pyimagesearch.com/2021/05/12/adaptive-thresholding-with-opencv-cv2-adaptivethreshold/',
'https://gist.github.com/jjaramillo34/331a1aaeebeb4ff47d9b80a658643b60')
with st.expander('DuckDuckGo Search Results'):
st.subheader('More About Adaptive Thresholding')
#scrape_duckduckgo('adaptive thresholding opencv')
scrape_duckduckgo('adaptive thresholding opencv')
def auto_canny():
st.header("Auto Canny Demo")
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpeg'])
if img_file is not None:
# load the image, convert it to grayscale, and blur it slightly
image = load_image_PIL(img_file)
image = converted(image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (3, 3), 0)
# apply Canny edge detection using a wide threshold, tight threshold, and automatically determined threshold
wide = cv.Canny(blurred, 10, 200)
tight = cv.Canny(blurred, 225, 250)
auto = auto_canny_thresh(blurred)
images = [wide, tight, auto]
labels = ['Wide Edges', 'Tight Edges', 'Auto Canny']
# show the images
with st.expander('Show Original Image'):
st.markdown("Original")
st.image(image)
with st.expander('Show Auto Canny', expanded=True):
cols = st.columns(3)
for i, image in enumerate(images):
cols[i].markdown(labels[i])
cols[i].image(image)
with cols[i]:
download_button1(image, button, download, mime_type, key=f'{i}.1')
else:
# load the image, convert it to grayscale, and blur it slightly
image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (3, 3), 0)
# apply Canny edge detection using a wide threshold, tight
# threshold, and automatically determined threshold
wide = cv.Canny(blurred, 10, 200)
tight = cv.Canny(blurred, 225, 250)
auto = auto_canny_thresh(blurred)
images = [wide, tight, auto]
labels = ['Wide Edges', 'Tight Edges', 'Auto Canny']
# show the images
with st.expander('Show Original Image'):
st.markdown("Original")
st.image(image)
with st.expander('Show Auto Canny', expanded=True):
cols = st.columns(3)
for i, image in enumerate(images):
cols[i].markdown(labels[i])
cols[i].image(image)
with cols[i]:
download_button1(image, button, download, mime_type, key=f'{i}.1')
source_code(
'Source Code + Auto Canny pyimagesearch.com',
'https://pyimagesearch.com/2015/04/06/zero-parameter-automatic-canny-edge-detection-with-python-and-opencv/',
'https://gist.github.com/jjaramillo34/fb83acff62ce6502c398ba7133ab066c')
with st.expander('DuckDuckGo Search Results'):
st.subheader('More About Auto Canny')
scrape_duckduckgo('auto canny opencv')
def convolutions():
st.header("Resizing Demo")
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
image = load_image_PIL(img_file)
image = converted(image)
else:
# construct average blurring kernels used to smooth an image
smallBlur = np.ones((7, 7), dtype="float") * (1.0 / (7 * 7))
largeBlur = np.ones((21, 21), dtype="float") * (1.0 / (21 * 21))
# construct a sharpening filter
sharpen = np.array((
[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]), dtype="int")
# construct the Laplacian kernel used to detect edge-like regions of an image
laplacian = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
# construct the Sobel x-axis kernel
sobelX = np.array((
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]), dtype="int")
# construct the Sobel y-axis kernel
sobelY = np.array((
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]), dtype="int")
# construct the kernel bank, a list of kernels we're going to apply using both our
# custom `convole` function and OpenCV's `filter2D` function
kernelBank = (
("small_blur", smallBlur),
("large_blur", largeBlur),
("sharpen", sharpen),
("laplacian", laplacian),
("sobel_x", sobelX),
("sobel_y", sobelY)
)
# load the input image and convert it to grayscale
image = load_image('images/supermario.jpg')
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# loop over the kernels
with st.spinner('Creating Convolutions please wait for it...'):
for (kernelName, kernel) in kernelBank:
# apply the kernel to the grayscale image using both our custom 'convole'
# function and OpenCV's 'filter2D' function
st.write("[INFO] applying {} kernel".format(kernelName))
convoleOutput = convolve(gray, kernel)
opencvOutput = cv.filter2D(gray, -1, kernel)
# show the output images
col1, col2, col3 = st.columns(3)
with col1:
st.markdown("original")
st.image(gray)
with col2:
st.write("{} - convole".format(kernelName))
st.image(convoleOutput)
with col3:
st.write("{} - opencv".format(kernelName))
st.image(opencvOutput)
st.success('Convolutions were created succesfully!')
col1, col2 = st.columns(2)
with st.expander('Source Code'):
with col1:
st.markdown(code)
st.code('''
# construct average blurring kernels used to smooth an image
smallBlur = np.ones((7, 7), dtype="float") * (1.0 / (7 * 7))
largeBlur = np.ones((21, 21), dtype="float") * (1.0 / (21 * 21))
# construct a sharpening filter
sharpen = np.array((
[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]), dtype="int")
# construct the Laplacian kernel used to detect edge-like regions of an image
laplacian = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
# construct the Sobel x-axis kernel
sobelX = np.array((
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]), dtype="int")
# construct the Sobel y-axis kernel
sobelY = np.array((
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]), dtype="int")
# construct the kernel bank, a list of kernels we're going to apply using both our
# custom `convole` function and OpenCV's `filter2D` function
kernelBank = (
("small_blur", smallBlur),
("large_blur", largeBlur),
("sharpen", sharpen),
("laplacian", laplacian),
("sobel_x", sobelX),
("sobel_y", sobelY)
)
# load the input image and convert it to grayscale
image = load_image('images/supermario.jpg')
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# loop over the kernels
with st.spinner('Creating Convolutions please wait for it...'):
for (kernelName, kernel) in kernelBank:
# apply the kernel to the grayscale image using both our custom 'convole'
# function and OpenCV's 'filter2D' function
st.write("[INFO] applying {} kernel".format(kernelName))
convoleOutput = convolve(gray, kernel)
opencvOutput = cv.filter2D(gray, -1, kernel)
# show the output images
col1, col2, col3 = st.columns(3)
with col1:
st.markdown("original")
st.image(gray)
with col2:
st.write("{} - convole".format(kernelName))
st.image(convoleOutput)
with col3:
st.write("{} - opencv".format(kernelName))
st.image(opencvOutput)''', language=language)
with col2:
st.markdown('Source Code Convole Function')
st.code('''
def convolve(image, kernel):
# grab the spatial dimensions of the image, along with
# the spatial dimensions of the kernel
(iH, iW) = image.shape[:2]
(kH, kW) = kernel.shape[:2]
# allocate memory for the output image, taking care to
# "pad" the borders of the input image so the spatial
# size (i.e., width and height) are not reduced
pad = (kW - 1) // 2
image = cv2.copyMakeBorder(image, pad, pad, pad, pad,
cv2.BORDER_REPLICATE)
output = np.zeros((iH, iW), dtype="float32")
# loop over the input image, "sliding" the kernel across
# each (x, y)-coordinate from left-to-right and top to
# bottom
for y in np.arange(pad, iH + pad):
for x in np.arange(pad, iW + pad):
# extract the ROI of the image by extracting the
# *center* region of the current (x, y)-coordinates
# dimensions
roi = image[y - pad:y + pad + 1, x - pad:x + pad + 1]
# perform the actual convolution by taking the
# element-wise multiplicate between the ROI and
# the kernel, then summing the matrix
k = (roi * kernel).sum()
# store the convolved value in the output (x,y)-
# coordinate of the output image
output[y - pad, x - pad] = k
# rescale the output image to be in the range [0, 255]
output = rescale_intensity(output, in_range=(0, 255))
output = (output * 255).astype("uint8")
# return the output image
return output''', language=language)
with st.expander('Convolutions with OpenCV and Python'):
# embed streamlit docs in a streamlit app
components.iframe("https://pyimagesearch.com/2016/07/25/convolutions-with-opencv-and-python/", height=800)
version()
def canny_edge_detector():
st.header("Canny Edge Detector Demo")
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'], key='1')
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
image = load_image_PIL(img_file)
image = converted(image)
options = st.sidebar.radio('Canny Edge Detector Options', ('Canny Edge Detector', 'Canny Edge Detector Interactive'))
if options == 'Canny Edge Detector':
# load the image, convert it to grayscale, and blur it slightly
#image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (5, 5), 0)
col1, col2 = st.columns(2)
with col1:
with st.expander('Show Original Image'):
# show the original and blurred images
st.markdown("Original")
st.image(image)
with col2:
with st.expander('Show Blurred Image'):
st.markdown("Blurred")
st.image(blurred)
# compute a "wide", "mid-range", and "tight" threshold for the edges
# using the Canny edge detector
wide = cv.Canny(blurred, 10, 200)
mid = cv.Canny(blurred, 30, 150)
tight = cv.Canny(blurred, 240, 250)
col1, col2, col3 = st.columns(3)
# show the output Canny edge maps
with col1:
st.markdown("Wide Edge Map")
st.image(wide)
with col2:
st.markdown("Mid Edge Map")
st.image(mid)
with col3:
st.markdown("Tight Edge Map")
st.image(tight)
else:
image = load_image_PIL(img_file)
image = converted(image)
# load the image, convert it to grayscale, and blur it slightly
image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (5, 5), 0)
col1, col2 = st.columns(2)
with col1:
with st.expander('Show Original Image'):
# show the original and blurred images
st.markdown("Original")
st.image(image)
with col2:
with st.expander('Show Blurred Image'):
st.markdown("Blurred")
st.image(blurred)
# compute a "wide", "mid-range", and "tight" threshold for the edges
# using the Canny edge detector
col1, col2, col3 = st.columns(3)
# show the output Canny edge maps
with col1:
values = st.slider(
'Select a range of values',
10, 200, (10, 200), step=10)
wide = cv.Canny(blurred, values[0], values[1])
st.markdown("Wide Edge Map")
st.image(wide)
with col2:
values = st.slider(
'Select a range of values',
30, 150, (30, 150), step=5)
mid = cv.Canny(blurred, values[0], values[1])
st.markdown("Mid Edge Map")
st.image(mid)
with col3:
values = st.slider(
'Select a range of values',
200, 250, (200, 250))
tight = cv.Canny(blurred, values[0], values[1])
st.markdown("Tight Edge Map")
st.image(tight)
else:
options = st.sidebar.radio('Canny Edge Detector Options', ('Canny Edge Detector', 'Canny Edge Detector Interactive'))
if options == 'Canny Edge Detector':
# load the image, convert it to grayscale, and blur it slightly
image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (5, 5), 0)
col1, col2 = st.columns(2)
with col1:
with st.expander('Show Original Image'):
# show the original and blurred images
st.markdown("Original")
st.image(image)
with col2:
with st.expander('Show Blurred Image'):
st.markdown("Blurred")
st.image(blurred)
# compute a "wide", "mid-range", and "tight" threshold for the edges
# using the Canny edge detector
wide = cv.Canny(blurred, 10, 200)
mid = cv.Canny(blurred, 30, 150)
tight = cv.Canny(blurred, 240, 250)
col1, col2, col3 = st.columns(3)
# show the output Canny edge maps
with col1:
st.markdown("Wide Edge Map")
st.image(wide)
with col2:
st.markdown("Mid Edge Map")
st.image(mid)
with col3:
st.markdown("Tight Edge Map")
st.image(tight)
else:
# load the image, convert it to grayscale, and blur it slightly
image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (5, 5), 0)
col1, col2 = st.columns(2)
with col1:
with st.expander('Show Original Image'):
# show the original and blurred images
st.markdown("Original")
st.image(image)
with col2:
with st.expander('Show Blurred Image'):
st.markdown("Blurred")
st.image(blurred)
# compute a "wide", "mid-range", and "tight" threshold for the edges
# using the Canny edge detector
col1, col2, col3 = st.columns(3)
# show the output Canny edge maps
with col1:
values = st.slider(
'Select a range of values',
10, 200, (10, 200), step=10)
wide = cv.Canny(blurred, values[0], values[1])
st.markdown("Wide Edge Map")
st.image(wide)
with col2:
values = st.slider(
'Select a range of values',
30, 150, (30, 150), step=5)
mid = cv.Canny(blurred, values[0], values[1])
st.markdown("Mid Edge Map")
st.image(mid)
with col3:
values = st.slider(
'Select a range of values',
200, 250, (200, 250))
tight = cv.Canny(blurred, values[0], values[1])
st.markdown("Tight Edge Map")
st.image(tight)
def image_gradients():
st.header("Image Gradient Demo")
options = st.sidebar.radio('Image Gradient Options', ('Sobel/Scharr', 'Magnitude Orientation'))
if options == 'Sobel/Scharr':
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
# load the image, convert it to grayscale, and display the original
# grayscale image
with st.expander('Show Sobel/Scharr Image Gradient', expanded=True):
image = load_image_PIL(img_file)
image = converted(image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# set the kernel size, depending on whether we are using the Sobel operator of the Scharr operator,
# then compute the gradients along the x and y axis, respectively
op = st.selectbox('Operators', ('sobel', 'scharr'))
if op == 'scharr':
s = 1
else:
s = 0
st.success(f'Operator Selected: {op}')
ksize = -1 if s > 0 else 3
gX = cv.Sobel(gray, ddepth=cv.CV_32F, dx=1, dy=0, ksize=ksize)
gY = cv.Sobel(gray, ddepth=cv.CV_32F, dx=0, dy=1, ksize=ksize)
# the gradient magnitude images are now of the floating point data type, so we need to take care
# to convert them back a to unsigned 8-bit integer representation so other OpenCV functions can
# operate on them and visualize them
gX = cv.convertScaleAbs(gX)
gY = cv.convertScaleAbs(gY)
# combine the gradient representations into a single image
combined = cv.addWeighted(gX, 0.5, gY, 0.5, 0)
# show our output images
cols = st.columns(4)
cols[0].markdown("Gray")
cols[0].image(gray)
cols[1].markdown("Sobel/Scharr X")
cols[1].image(gX)
with cols[1]:
download_button1(gX, button, download, mime_type, key='1.1')
cols[2].markdown("Sobel/Scharr Y")
cols[2].image(gY)
with cols[2]:
download_button1(gY, button, download, mime_type, key='1.2')
cols[3].markdown("Sobel/Scharr Combined")
cols[3].image(combined)
with cols[3]:
download_button1(combined, button, download, mime_type, key='1.3')
else:
# load the image, convert it to grayscale, and display the original
# grayscale image
with st.expander('Show Sobel/Scharr Image Gradient', expanded=True):
image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# set the kernel size, depending on whether we are using the Sobel operator of the Scharr operator,
# then compute the gradients along the x and y axis, respectively
op = st.selectbox('Operators', ('sobel', 'scharr'))
if op == 'scharr':
s = 1
else:
s = 0
st.success(f'Operator Selected: {op}')
ksize = -1 if s > 0 else 3
gX = cv.Sobel(gray, ddepth=cv.CV_32F, dx=1, dy=0, ksize=ksize)
gY = cv.Sobel(gray, ddepth=cv.CV_32F, dx=0, dy=1, ksize=ksize)
# the gradient magnitude images are now of the floating point data type, so we need to take care
# to convert them back a to unsigned 8-bit integer representation so other OpenCV functions can
# operate on them and visualize them
gX = cv.convertScaleAbs(gX)
gY = cv.convertScaleAbs(gY)
# combine the gradient representations into a single image
combined = cv.addWeighted(gX, 0.5, gY, 0.5, 0)
# show our output images
cols = st.columns(4)
cols[0].markdown("Gray")
cols[0].image(gray)
cols[1].markdown("Sobel/Scharr X")
cols[1].image(gX)
with cols[1]:
download_button1(gX, button, download, mime_type, key='2.1')
cols[2].markdown("Sobel/Scharr Y")
cols[2].image(gY)
with cols[2]:
download_button1(gY, button, download, mime_type, key='2.2')
cols[3].markdown("Sobel/Scharr Combined")
cols[3].image(combined)
with cols[3]:
download_button1(combined, button, download, mime_type, key='2.3')
else:
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
# load the input image and convert it to grayscale
image = load_image_PIL(img_file)
image = converted(image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# compute gradients along the x and y axis, respectively
gX = cv.Sobel(gray, cv.CV_64F, 1, 0)
gY = cv.Sobel(gray, cv.CV_64F, 0, 1)
# compute the gradient magnitude and orientation
magnitude = np.sqrt((gX ** 2) + (gY ** 2))
orientation = np.arctan2(gY, gX) * (180 / np.pi) % 180
#arr = np.uint8(magnitude)
dist1 = cv.convertScaleAbs(magnitude)
imC1 = cv.applyColorMap(dist1, cv.COLORMAP_JET)
dist2 = cv.convertScaleAbs(orientation)
imC2 = cv.applyColorMap(dist2, cv.COLORMAP_JET)
# display all images
with st.expander('Show Magnitude - Orientation Image Gradients - Streamlit', expanded=True):
cols = st.columns(3)
cols[0].markdown("Grayscale")
cols[0].image(gray)
cols[1].markdown("Gradient Magnitude")
cols[1].image(imC1, channels='BGR')
with cols[1]:
download_button1(imC1, button, download, mime_type, key='3.1')
cols[2].markdown("Gradient Orientation [0, 180]")
cols[2].image(imC2, channels='BGR')
with cols[2]:
download_button1(imC2, button, download, mime_type, key='3.2')
# initialize a figure to display the input grayscle image along with
# the gradient magnitude and orientation representations, respectively
(fig, axs) = plt.subplots(nrows=1, ncols=3, figsize=(8, 4))
# plot each of the images
axs[0].imshow(gray, cmap="gray")
axs[1].imshow(magnitude, cmap="jet")
axs[2].imshow(orientation, cmap="jet")
# set the titles of each axes
axs[0].set_title("Grayscale")
axs[1].set_title("Gradient Magnitude")
axs[2].set_title("Gradient Orientation [0, 180]")
# loop over each of the axes and turn off the x and y ticks
for i in range(0, 3):
axs[i].get_xaxis().set_ticks([])
axs[i].get_yaxis().set_ticks([])
with st.expander('Show Magnitude - Orientation Image Gradients - Mapplotlib'):
# show the plots
plt.tight_layout()
st.pyplot(fig)
else:
# load the input image and convert it to grayscale
image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# compute gradients along the x and y axis, respectively
gX = cv.Sobel(gray, cv.CV_64F, 1, 0)
gY = cv.Sobel(gray, cv.CV_64F, 0, 1)
# compute the gradient magnitude and orientation
magnitude = np.sqrt((gX ** 2) + (gY ** 2))
orientation = np.arctan2(gY, gX) * (180 / np.pi) % 180
#arr = np.uint8(magnitude)
dist1 = cv.convertScaleAbs(magnitude)
imC1 = cv.applyColorMap(dist1, cv.COLORMAP_OCEAN)
dist2 = cv.convertScaleAbs(orientation)
imC2 = cv.applyColorMap(dist2, cv.COLORMAP_JET)
# display all images
with st.expander('Show Magnitude - Orientation Image Gradients - Streamlit', expanded=True):
cols = st.columns(3)
cols[0].markdown("Grayscale")
cols[0].image(gray)
cols[1].markdown("Gradient Magnitude")
cols[1].image(imC1, clamp=False)
with cols[1]:
download_button1(imC1, button, download, mime_type, key='5.1')
cols[2].markdown("Gradient Orientation [0, 180]")
cols[2].image(imC2, clamp=True)
with cols[2]:
download_button1(imC2, button, download, mime_type, key='5.2')
# initialize a figure to display the input grayscle image along with
# the gradient magnitude and orientation representations, respectively
(fig, axs) = plt.subplots(nrows=1, ncols=3, figsize=(8, 4))
# plot each of the images
axs[0].imshow(gray, cmap="gray")
axs[1].imshow(magnitude, cmap="jet")
axs[2].imshow(orientation, cmap="jet")
# set the titles of each axes
axs[0].set_title("Grayscale")
axs[1].set_title("Gradient Magnitude")
axs[2].set_title("Gradient Orientation [0, 180]")
# loop over each of the axes and turn off the x and y ticks
for i in range(0, 3):
axs[i].get_xaxis().set_ticks([])
axs[i].get_yaxis().set_ticks([])
with st.expander('Show Magnitude - Orientation Image Gradients - Mapplotlib'):
# show the plots
plt.tight_layout()
st.pyplot(fig)
source_code(
'Source Code + Image Gradients Tutorial pyimagesearch.com',
'https://pyimagesearch.com/2021/05/12/image-gradients-with-opencv-sobel-and-scharr/',
'https://gist.github.com/jjaramillo34/4a40d2faeddda4c1275b2c40c86260a4')
with st.expander('DuckDuckGo Search Results'):
st.subheader('More About Morphological Operations')
scrape_duckduckgo('morphological operations opencv')
def morphological_operations():
st.header("Morphological Operations Demo")
options = st.sidebar.radio('Morphological Operations Options', ('Morphological Hats', 'Morphological Operations'))
if options == 'Morphological Hats':
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
# load the image and convert it to grayscale
image = load_image_PIL(img_file)
image = converted(image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# construct a rectangular kernel (13x5) and apply a blackhat operation which enables
# us to find dark regions on a light background
rectKernel = cv.getStructuringElement(cv.MORPH_RECT, (13, 5))
blackhat = cv.morphologyEx(gray, cv.MORPH_BLACKHAT, rectKernel)
# similarly, a tophat (also called a "whitehat") operation will enable us to find light
# regions on a dark background
tophat = cv.morphologyEx(gray, cv.MORPH_TOPHAT, rectKernel)
st.subheader('Morphological Hats')
# show the output images
with st.expander('Show Original Image'):
st.markdown("Original")
st.image(image)
with st.expander('Show Morphological Hats', expanded=True):
cols = st.columns(2)
cols[0].markdown("Blackhat")
cols[0].image(blackhat)
with cols[0]:
download_button1(blackhat, button, download, mime_type, key='1.1')
cols[1].markdown("Tophat")
cols[1].image(tophat)
with cols[1]:
download_button1(tophat, button, download, mime_type, key='1.2')
else:
# load the image and convert it to grayscale
image = load_image('images/pyimagesearch_logo_noise.png')
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# construct a rectangular kernel (13x5) and apply a blackhat operation which enables
# us to find dark regions on a light background
rectKernel = cv.getStructuringElement(cv.MORPH_RECT, (13, 5))
blackhat = cv.morphologyEx(gray, cv.MORPH_BLACKHAT, rectKernel)
# similarly, a tophat (also called a "whitehat") operation will enable us to find light
# regions on a dark background
tophat = cv.morphologyEx(gray, cv.MORPH_TOPHAT, rectKernel)
st.subheader('Morphological Hats')
# show the output images
with st.expander('Show Original Image'):
st.markdown("Original")
st.image(image)
with st.expander('Show Morphological Hats', expanded=True):
cols = st.columns(2)
cols[0].markdown("Blackhat")
cols[0].image(blackhat)
with cols[0]:
download_button1(blackhat, button, download, mime_type, key='1.1')
cols[1].markdown("Tophat")
cols[1].image(tophat)
with cols[1]:
download_button1(tophat, button, download, mime_type, key='1.2')
else:
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
# load the image, convert it to grayscale, and display it to our screen
image = load_image_PIL(img_file)
image = converted(image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
with st.expander('Show Morphological Operations - Erosion', expanded=True):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# apply a series of erosions
for i in range(0, 3):
eroded = cv.erode(gray.copy(), None, iterations=i + 1)
cols[i+1].markdown("Eroded {} times".format(i + 1))
cols[i+1].image(eroded)
with cols[i+1]:
download_button1(eroded, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Dilation'):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# apply a series of dilations
for i in range(0, 3):
dilated = cv.dilate(gray.copy(), None, iterations=i + 1)
cols[i+1].markdown("Dilated {} times".format(i + 1))
cols[i+1].image(dilated)
with cols[i+1]:
download_button1(dilated, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Opening'):
# initialize a list of kernels sizes that will be applied to the image
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
kernelSizes = [(3, 3), (5, 5), (7, 7)]
# loop over the kernels sizes
for i, kernelSize in enumerate(kernelSizes):
# construct a rectangular kernel from the current size and then
# apply an "opening" operation
kernel = cv.getStructuringElement(cv.MORPH_RECT, kernelSize)
opening = cv.morphologyEx(gray, cv.MORPH_OPEN, kernel)
cols[i+1].markdown("Opening: ({}, {})".format(
kernelSize[0], kernelSize[1]))
cols[i+1].image(opening)
with cols[i+1]:
download_button1(opening, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Closing'):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# loop over the kernels sizes again
for i, kernelSize in enumerate(kernelSizes):
# construct a rectangular kernel form the current size, but this
# time apply a "closing" operation
kernel = cv.getStructuringElement(cv.MORPH_RECT, kernelSize)
closing = cv.morphologyEx(gray, cv.MORPH_CLOSE, kernel)
cols[i+1].markdown("Closing: ({}, {})".format(
kernelSize[0], kernelSize[1]))
cols[i+1].image(closing)
with cols[i+1]:
download_button1(closing, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Gradient'):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# loop over the kernels a final time
for i, kernelSize in enumerate(kernelSizes):
# construct a rectangular kernel and apply a "morphological
# gradient" operation to the image
kernel = cv.getStructuringElement(cv.MORPH_RECT, kernelSize)
gradient = cv.morphologyEx(gray, cv.MORPH_GRADIENT, kernel)
cols[i+1].markdown("Gradient: ({}, {})".format(
kernelSize[0], kernelSize[1]))
cols[i+1].image(gradient)
with cols[i+1]:
download_button1(gradient, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Interactive Morphological Operations - Erosion, Dilation', expanded=True):
x = st.number_input('Erored-Dilated Iterations', 1, 6)
cols = st.columns(3)
cols[0].markdown("Original")
cols[0].image(image)
eroded = cv.erode(gray.copy(), None, iterations=x)
cols[1].markdown("Eroded {} times".format(x))
cols[1].image(eroded)
with cols[1]:
download_button1(eroded, button, download, mime_type, key='4.1')
dilated = cv.dilate(gray.copy(), None, iterations=x)
cols[2].markdown("Dilated {} times".format(x))
cols[2].image(dilated)
with cols[2]:
download_button1(dilated, button, download, mime_type, key='4.2')
with st.expander('Show Interactive Morphological Operations - Opening, Closing & Gradient'):
kX = st.number_input('Opening, Closing & Gradient Kernel Size', 1, 11, step=2)
kY = st.number_input('Opening, Closing & Gradient Kernel Size', int(kX), 11, step=2, disabled=True)
kernelSize = [(kX, kY)]
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (kX, kY))
opening = cv.morphologyEx(gray, cv.MORPH_OPEN, kernel)
cols[1].markdown("Opening: ({}, {})".format(
kernelSize[0][0], kernelSize[0][1]))
cols[1].image(opening)
with cols[1]:
download_button1(eroded, button, download, mime_type, key='5.1')
kernel = cv.getStructuringElement(cv.MORPH_RECT, (kX, kY))
closing = cv.morphologyEx(gray, cv.MORPH_CLOSE, kernel)
cols[2].markdown("Closing: ({}, {})".format(
kernelSize[0][0], kernelSize[0][1]))
cols[2].image(closing)
with cols[2]:
download_button1(closing, button, download, mime_type, key='5.2')
kernel = cv.getStructuringElement(cv.MORPH_RECT, (kX, kY))
gradient = cv.morphologyEx(gray, cv.MORPH_GRADIENT, kernel)
cols[3].markdown("Gradient: ({}, {})".format(
kernelSize[0][0], kernelSize[0][1]))
cols[3].image(gradient)
with cols[3]:
download_button1(gradient, button, download, mime_type, key='5.3')
else:
# load the image, convert it to grayscale, and display it to our screen
image = load_image('images/pyimagesearch_logo_noise.png')
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
with st.expander('Show Morphological Operations - Erosion', expanded=True):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# apply a series of erosions
for i in range(0, 3):
eroded = cv.erode(gray.copy(), None, iterations=i + 1)
cols[i+1].markdown("Eroded {} times".format(i + 1))
cols[i+1].image(eroded)
with cols[i+1]:
download_button1(eroded, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Dilation'):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# apply a series of dilations
for i in range(0, 3):
dilated = cv.dilate(gray.copy(), None, iterations=i + 1)
cols[i+1].markdown("Dilated {} times".format(i + 1))
cols[i+1].image(dilated)
with cols[i+1]:
download_button1(dilated, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Opening'):
# initialize a list of kernels sizes that will be applied to the image
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
kernelSizes = [(3, 3), (5, 5), (7, 7)]
# loop over the kernels sizes
for i, kernelSize in enumerate(kernelSizes):
# construct a rectangular kernel from the current size and then
# apply an "opening" operation
kernel = cv.getStructuringElement(cv.MORPH_RECT, kernelSize)
opening = cv.morphologyEx(gray, cv.MORPH_OPEN, kernel)
cols[i+1].markdown("Opening: ({}, {})".format(
kernelSize[0], kernelSize[1]))
cols[i+1].image(opening)
with cols[i+1]:
download_button1(opening, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Closing'):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# loop over the kernels sizes again
for i, kernelSize in enumerate(kernelSizes):
# construct a rectangular kernel form the current size, but this
# time apply a "closing" operation
kernel = cv.getStructuringElement(cv.MORPH_RECT, kernelSize)
closing = cv.morphologyEx(gray, cv.MORPH_CLOSE, kernel)
cols[i+1].markdown("Closing: ({}, {})".format(
kernelSize[0], kernelSize[1]))
cols[i+1].image(closing)
with cols[i+1]:
download_button1(closing, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Gradient'):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# loop over the kernels a final time
for i, kernelSize in enumerate(kernelSizes):
# construct a rectangular kernel and apply a "morphological
# gradient" operation to the image
kernel = cv.getStructuringElement(cv.MORPH_RECT, kernelSize)
gradient = cv.morphologyEx(gray, cv.MORPH_GRADIENT, kernel)
cols[i+1].markdown("Gradient: ({}, {})".format(
kernelSize[0], kernelSize[1]))
cols[i+1].image(gradient)
with cols[i+1]:
download_button1(gradient, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Interactive Morphological Operations - Erosion, Dilation', expanded=True):
x = st.number_input('Erored-Dilated Iterations', 1, 6)
cols = st.columns(3)
cols[0].markdown("Original")
cols[0].image(image)
eroded = cv.erode(gray.copy(), None, iterations=x)
cols[1].markdown("Eroded {} times".format(x))
cols[1].image(eroded)
with cols[1]:
download_button1(eroded, button, download, mime_type, key='6.1')
dilated = cv.dilate(gray.copy(), None, iterations=x)
cols[2].markdown("Dilated {} times".format(x))
cols[2].image(dilated)
with cols[2]:
download_button1(dilated, button, download, mime_type, key='6.2')
with st.expander('Show Interactive Morphological Operations - Opening, Closing & Gradient'):
kX = st.number_input('Opening, Closing & Gradient Kernel Size', 1, 11, step=2)
kY = st.number_input('Opening, Closing & Gradient Kernel Size', kX, 11, step=2, disabled=True)
kernelSize = [(kX, kY)]
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (kX, kY))
opening = cv.morphologyEx(gray, cv.MORPH_OPEN, kernel)
cols[1].markdown("Opening: ({}, {})".format(
kernelSize[0][0], kernelSize[0][1]))
cols[1].image(opening)
with cols[1]:
download_button1(eroded, button, download, mime_type, key='7.1')
kernel = cv.getStructuringElement(cv.MORPH_RECT, (kX, kY))
closing = cv.morphologyEx(gray, cv.MORPH_CLOSE, kernel)
cols[2].markdown("Closing: ({}, {})".format(
kernelSize[0][0], kernelSize[0][1]))
cols[2].image(closing)
with cols[2]:
download_button1(closing, button, download, mime_type, key='7.2')
kernel = cv.getStructuringElement(cv.MORPH_RECT, (kX, kY))
gradient = cv.morphologyEx(gray, cv.MORPH_GRADIENT, kernel)
cols[3].markdown("Gradient: ({}, {})".format(
kernelSize[0][0], kernelSize[0][1]))
cols[3].image(gradient)
with cols[3]:
download_button1(gradient, button, download, mime_type, key='7.3')
source_code(
'Source Code + Morphological Operations Tutorial pyimagesearch.com',
'https://pyimagesearch.com/2021/04/28/opencv-morphological-operations/',
'https://gist.github.com/jjaramillo34/3c1a8489e7882a3dba1127f3046c2a78')
with st.expander('DuckDuckGo Search Results'):
st.subheader('More About Morphological Operations')
scrape_duckduckgo('morphological operations opencv')
def thresholding():
st.header("Thresholding Demo")
options = st.sidebar.radio('Thresholding Options', ('Simple Thresholding', "Otsu's Thresholding"))
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if options == 'Simple Thresholding':
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
if img_file is not None:
image = load_image_PIL(img_file)
image = converted(image)
with st.expander('Show Original Image'):
st.markdown(original)
st.image(image)
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
with st.expander('Show Simple Thresholding', expanded=True):
cols = st.columns(3)
# apply basic thresholding -- the first parameter is the image we want to thhreshold, the second value
# is our threshold check; if a pixel value is greater than out threshold (in this case, 200), we set
# it to be *black, otherwise it is *white*
(T, threshInv) = cv.threshold(blurred, 200, 255, cv.THRESH_BINARY_INV)
cols[0].markdown("Threshold Binary Inverse")
cols[0].image(threshInv)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
# using normal thresholding (rather than inverse thresholding)
(T, thresh) = cv.threshold(blurred, 200, 255, cv.THRESH_BINARY)
cols[1].markdown("Threshold Binary")
cols[1].image(thresh)
with cols[1]:
download_button1(thresh, button, download, mime_type, key='1.2')
# visualize only the masted regions in the image
masked = cv.bitwise_and(image, image, mask=threshInv)
cols[2].markdown("Masked")
cols[2].image(masked)
with cols[2]:
download_button1(masked, button, download, mime_type, key='1.3')
with st.expander('Show Simple Thresholding Auto', expanded=True):
x = st.slider('Change Threshold value', min_value = 50, max_value = 255)
cols = st.columns(3)
# apply basic thresholding -- the first parameter is the image we want to thhreshold, the second value
# is our threshold check; if a pixel value is greater than out threshold (in this case, 200), we set
# it to be *black, otherwise it is *white*
(T, threshInv) = cv.threshold(blurred, x, 255, cv.THRESH_BINARY_INV)
cols[0].markdown("Threshold Binary Inverse")
cols[0].image(threshInv)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='2.1')
# using normal thresholding (rather than inverse thresholding)
(T, thresh) = cv.threshold(blurred, x, 255, cv.THRESH_BINARY)
cols[1].markdown("Threshold Binary")
cols[1].image(thresh)
with cols[1]:
download_button1(thresh, button, download, mime_type, key='2.2')
# visualize only the masted regions in the image
masked = cv.bitwise_and(image, image, mask=threshInv)
cols[2].markdown("Masked")
cols[2].image(masked)
with cols[2]:
download_button1(masked, button, download, mime_type, key='2.3')
else:
image = load_image(default_image)
with st.expander('Show Original Image'):
st.markdown(original)
st.image(image)
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
with st.expander('Show Simple Thresholding', expanded=True):
cols = st.columns(3)
# apply basic thresholding -- the first parameter is the image we want to thhreshold, the second value
# is our threshold check; if a pixel value is greater than out threshold (in this case, 200), we set
# it to be *black, otherwise it is *white*
(T, threshInv) = cv.threshold(blurred, 200, 255, cv.THRESH_BINARY_INV)
cols[0].markdown("Threshold Binary Inverse")
cols[0].image(threshInv)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
# using normal thresholding (rather than inverse thresholding)
(T, thresh) = cv.threshold(blurred, 200, 255, cv.THRESH_BINARY)
cols[1].markdown("Threshold Binary")
cols[1].image(thresh)
with cols[1]:
download_button1(thresh, button, download, mime_type, key='1.2')
# visualize only the masted regions in the image
masked = cv.bitwise_and(image, image, mask=threshInv)
cols[2].markdown("Masked")
cols[2].image(masked)
with cols[2]:
download_button1(masked, button, download, mime_type, key='1.3')
with st.expander('Show Simple Thresholding Auto', expanded=True):
x = st.slider('Change Threshold value', min_value = 50, max_value = 255)
cols = st.columns(3)
# apply basic thresholding -- the first parameter is the image we want to thhreshold, the second value
# is our threshold check; if a pixel value is greater than out threshold (in this case, 200), we set
# it to be *black, otherwise it is *white*
(T, threshInv) = cv.threshold(blurred, x, 255, cv.THRESH_BINARY_INV)
cols[0].markdown("Threshold Binary Inverse")
cols[0].image(threshInv)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='2.1')
# using normal thresholding (rather than inverse thresholding)
(T, thresh) = cv.threshold(blurred, x, 255, cv.THRESH_BINARY)
cols[1].markdown("Threshold Binary")
cols[1].image(thresh)
with cols[1]:
download_button1(thresh, button, download, mime_type, key='2.2')
# visualize only the masted regions in the image
masked = cv.bitwise_and(image, image, mask=threshInv)
cols[2].markdown("Masked")
cols[2].image(masked)
with cols[2]:
download_button1(masked, button, download, mime_type, key='2.3')
else:
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
if img_file is not None:
# load the image and display it
image = load_image_PIL(img_file)
image = converted(image)
with st.expander('Show Original Image'):
st.markdown("Image")
st.image(image)
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
with st.expander("Show Otsu's Thresholding", expanded=True):
cols = st.columns(2)
# apply Otsu's automatic thresholding which automatically determines
# the best threshold value
(T, threshInv) = cv.threshold(blurred, 0, 255,
cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cols[0].markdown("Threshold")
cols[0].image(threshInv)
st.success("[INFO] otsu's thresholding value: {}".format(T))
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
# visualize only the masked regions in the image
masked = cv.bitwise_and(image, image, mask=threshInv)
cols[1].markdown("Output")
cols[1].image(masked)
with cols[1]:
download_button1(threshInv, button, download, mime_type, key='1.2')
else:
# load the image and display it
image = load_image(default_image)
with st.expander('Show Original Image'):
st.markdown("Image")
st.image(image)
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
with st.expander("Show Otsu's Thresholding", expanded=True):
cols = st.columns(2)
# apply Otsu's automatic thresholding which automatically determines
# the best threshold value
(T, threshInv) = cv.threshold(blurred, 0, 255,
cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cols[0].markdown("Threshold")
cols[0].image(threshInv)
st.success("[INFO] otsu's thresholding value: {}".format(T))
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='2.1')
# visualize only the masked regions in the image
masked = cv.bitwise_and(image, image, mask=threshInv)
cols[1].markdown("Output")
cols[1].image(masked)
with cols[1]:
download_button1(threshInv, button, download, mime_type, key='2.2')
source_code(
'Source Code + Thresholding Tutorial pyimagesearch.com',
'https://pyimagesearch.com/2021/04/28/opencv-thresholding-cv2-threshold/',
'https://gist.github.com/jjaramillo34/d504d5a9d6f88833c3720f132e734193')
with st.expander('DuckDuckGo Search Results'):
st.subheader('More About Thesholding')
scrape_duckduckgo('opencv thresholding')
def color_spaces():
st.header("Color Spaces Demo")
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
with st.expander('Show RGB Color Spaces', expanded=True):
# load the original image and show it
cols = st.columns(4)
image = load_image_PIL(img_file)
image = converted(image)
with cols[0]:
st.markdown("RGB Color Spaces")
st.image(image)
# loop over each of the individual channels and display them
for i, (name, chan) in enumerate(zip(("B", "G", "R"), cv.split(image))):
with cols[i+1]:
st.markdown(name)
st.image(chan)
download_button1(chan, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show HSV Color Spaces'):
# convert the image to the HSV color space and show it
cols = st.columns(4)
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
with cols[0]:
st.markdown("HSV Color Spaces")
st.image(hsv)
# loop over each of the invidiaul channels and display them
for i, (name, chan) in enumerate(zip(("H", "S", "V"), cv.split(hsv))):
with cols[i+1]:
st.markdown(name)
st.image(chan)
download_button1(chan, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show L*a*b* Color Spaces'):
# convert the image to the L*a*b* color space and show it
cols = st.columns(4)
lab = cv.cvtColor(image, cv.COLOR_BGR2LAB)
with cols[0]:
st.markdown("L*a*b*")
st.image(lab)
# loop over each of the invidiaul channels and display them
for i, (name, chan) in enumerate(zip(("L*", "a*", "b*"), cv.split(lab))):
with cols[i+1]:
st.markdown(name)
st.image(chan)
download_button1(chan, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Grayscale'):
# show the original and grayscale versions of the image
cols = st.columns(2)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
cols[0].markdown("Original")
cols[0].image(image)
with cols[0]:
download_button1(image, button, download, mime_type, key=f'2.1')
cols[1].markdown("Grayscale")
cols[1].image(gray)
with cols[1]:
download_button1(image, button, download, mime_type, key=f'2.2')
else:
with st.expander('Show RGB Color Spaces', expanded=True):
# load the original image and show it
cols = st.columns(4)
image = load_image(default_image)
with cols[0]:
st.markdown("RGB Color Spaces")
st.image(image)
# loop over each of the individual channels and display them
for i, (name, chan) in enumerate(zip(("B", "G", "R"), cv.split(image))):
with cols[i+1]:
st.markdown(name)
st.image(chan)
download_button1(chan, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show HSV Color Spaces'):
# convert the image to the HSV color space and show it
cols = st.columns(4)
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
with cols[0]:
st.markdown("HSV Color Spaces")
st.image(hsv)
# loop over each of the invidiaul channels and display them
for i, (name, chan) in enumerate(zip(("H", "S", "V"), cv.split(hsv))):
with cols[i+1]:
st.markdown(name)
st.image(chan)
download_button1(chan, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show L*a*b* Color Spaces'):
# convert the image to the L*a*b* color space and show it
cols = st.columns(4)
lab = cv.cvtColor(image, cv.COLOR_BGR2LAB)
with cols[0]:
st.markdown("L*a*b*")
st.image(lab)
# loop over each of the invidiaul channels and display them
for i, (name, chan) in enumerate(zip(("L*", "a*", "b*"), cv.split(lab))):
with cols[i+1]:
st.markdown(name)
st.image(chan)
download_button1(chan, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Grayscale'):
# show the original and grayscale versions of the image
cols = st.columns(2)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
cols[0].markdown("Original")
cols[0].image(image)
with cols[0]:
download_button1(image, button, download, mime_type, key=f'2.1')
cols[1].markdown("Grayscale")
cols[1].image(gray)
with cols[1]:
download_button1(image, button, download, mime_type, key=f'2.2')
source_code(
'Source Code + Color Spaces Tutorial pyimagesearch.com',
'https://pyimagesearch.com/2021/04/28/opencv-color-spaces-cv2-cvtcolor/',
'https://gist.github.com/jjaramillo34/74ef1a86014fb4fd7617c03ea10c3602')
with st.expander('DuckDuckGo Search Results'):
t = 'Color Spaces'
st.subheader(f'More About {t.capitalize()}')
scrape_duckduckgo(f'opencv {t}')
def smoothing_blurring():
st.header("Smoothing & Blurring Demo")
options = st.sidebar.radio('Smoothing & Blurring Options', ('Bilateral', 'Blurring'))
if options == 'Bilateral':
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
with st.expander('Show Original Image'):
image = load_image_PIL(img_file)
image = converted(image)
st.image(image)
# hard-code parameters list
params = [(11, 21, 7), (11, 41, 21), (11, 61, 39)]
with st.expander('Bilateral Blurring', expanded=True):
st.subheader('Bilateral Blurring')
cols = st.columns(3)
# loop over the diameter, sigma color, and sigma space
for i, (diameter, sigmaColor, sigmaSpace) in enumerate(params):
# apply bilateral filtering to the image using the current set of parameters
blurred = cv.bilateralFilter(image, diameter, sigmaColor, sigmaSpace)
# show the output image and associated parameters
title = "Blurred d={}, sc={}, ss={}".format(
diameter, sigmaColor, sigmaSpace)
with cols[i]:
st.markdown(title)
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Bilateral Blurring Interactive'):
st.subheader('Bilateral Blurring Interactive')
cols = st.columns(3)
d = cols[0].slider('Select starting diameter', min_value = 11, max_value = 100, step=1, key='1')
sc = cols[1].slider('Select starting sigmaColor', min_value = 21, max_value = 100, step=1, key='2')
ss = cols[2].slider('Select starting sigmaSpace' ,min_value = 7, max_value = 100, step=1, key='3')
blurred = cv.bilateralFilter(image, d, sc, ss)
# show the output image and associated parameters
title = "Blurred d={}, sc={}, ss={}".format(
d, sc, ss)
st.markdown(title)
st.image(blurred)
download_button1(blurred, button, download, mime_type, key='1.1')
else:
with st.expander('Show Original Image'):
image = load_image(default_image)
st.image(image)
# hard-code parameters list
params = [(11, 21, 7), (11, 41, 21), (11, 61, 39)]
with st.expander('Bilateral Blurring', expanded=True):
st.subheader('Bilateral Blurring')
cols = st.columns(3)
# loop over the diameter, sigma color, and sigma space
for i, (diameter, sigmaColor, sigmaSpace) in enumerate(params):
# apply bilateral filtering to the image using the current set of parameters
blurred = cv.bilateralFilter(image, diameter, sigmaColor, sigmaSpace)
# show the output image and associated parameters
title = "Blurred d={}, sc={}, ss={}".format(
diameter, sigmaColor, sigmaSpace)
with cols[i]:
st.markdown(title)
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Bilateral Blurring Interactive'):
st.subheader('Bilateral Blurring Interactive')
cols = st.columns(3)
d = cols[0].slider('Select starting diameter', min_value = 11, max_value = 100, step=1, key='1')
sc = cols[1].slider('Select starting sigmaColor', min_value = 21, max_value = 100, step=1, key='2')
ss = cols[2].slider('Select starting sigmaSpace' ,min_value = 7, max_value = 100, step=1, key='3')
blurred = cv.bilateralFilter(image, d, sc, ss)
# show the output image and associated parameters
title = "Blurred d={}, sc={}, ss={}".format(
d, sc, ss)
st.markdown(title)
st.image(blurred)
download_button1(blurred, button, download, mime_type, key='1.1')
else:
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
# load the image, display it to our screen, and initialize a list of
# kernel sizes (so we can evaluate the relationship between kernel
# size and amount of blurring)
image = load_image_PIL(img_file)
image = converted(image)
with st.expander('Show Original Image'):
st.image(image)
kernelSizes = [(3, 3), (9, 9), (15, 15)]
with st.expander('Show Average Blur', expanded=True):
cols = st.columns(3)
# loop over the kernel sizes
for i, (kX, kY) in enumerate(kernelSizes):
# apply an "average" blur to the image using the current kernel size
with cols[i]:
blurred = cv.blur(image, (kX, kY))
st.markdown("Average Blur ({}, {})".format(kX, kY))
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Show Gaussian Blur'):
cols = st.columns(3)
# loop over the kernel sizes again
for i, (kX, kY) in enumerate(kernelSizes):
# apply a "Gaussian" blur to the image
with cols[i]:
blurred = cv.GaussianBlur(image, (kX, kY), 0)
st.markdown("Gaussian Blur ({}, {})".format(kX, kY))
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Show Median Blur'):
cols = st.columns(3)
# loop over the kernel sizes a final time
for i, k in enumerate((3, 9, 15)):
# apply a "median" blur to the image
with cols[i]:
blurred = cv.medianBlur(image, k)
st.markdown("Median Blur {}".format(k))
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Show Auto Blurring', expanded=True):
cols = st.columns(3)
kX = cols[0].number_input('Kernel Sizes kX', min_value=1, max_value=25, step=2, key='2.1')
kY = cols[1].number_input('Kernel Sizes kY', min_value=1, max_value=25, step=2, key='2.2', value=kX, disabled=True)
k = cols[2].number_input('Kernel Sizes k', min_value=1, max_value=25, step=2, key='2.3')
# apply an "average" blur to the image using the current kernel size
blurred = cv.blur(image, (kX, kX))
cols[0].markdown("Average Blur ({}, {})".format(kX, kY))
cols[0].image(blurred)
with cols[0]:
download_button1(blurred, button, download, mime_type, key='3.1')
# apply a "Gaussian" blur to the image
blurred = cv.GaussianBlur(image, (kX, kY), 0)
cols[1].markdown("Gaussian Blur ({}, {})".format(kX, kY))
cols[1].image(blurred)
with cols[1]:
download_button1(blurred, button, download, mime_type, key='3.2')
# apply a "median" blur to the image
blurred = cv.medianBlur(image, k)
cols[2].markdown("Median Blur {}".format(k))
cols[2].image(blurred)
with cols[2]:
download_button1(blurred, button, download, mime_type, key='3.3')
else:
# load the image, display it to our screen, and initialize a list of
# kernel sizes (so we can evaluate the relationship between kernel
# size and amount of blurring)
image = load_image(default_image)
with st.expander('Show Original Image'):
st.image(image)
kernelSizes = [(3, 3), (9, 9), (15, 15)]
with st.expander('Show Average Blur', expanded=True):
cols = st.columns(3)
# loop over the kernel sizes
for i, (kX, kY) in enumerate(kernelSizes):
# apply an "average" blur to the image using the current kernel size
with cols[i]:
blurred = cv.blur(image, (kX, kY))
st.markdown("Average Blur ({}, {})".format(kX, kY))
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Show Gaussian Blur'):
cols = st.columns(3)
# loop over the kernel sizes again
for i, (kX, kY) in enumerate(kernelSizes):
# apply a "Gaussian" blur to the image
with cols[i]:
blurred = cv.GaussianBlur(image, (kX, kY), 0)
st.markdown("Gaussian Blur ({}, {})".format(kX, kY))
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Show Median Blur'):
cols = st.columns(3)
# loop over the kernel sizes a final time
for i, k in enumerate((3, 9, 15)):
# apply a "median" blur to the image
with cols[i]:
blurred = cv.medianBlur(image, k)
st.markdown("Median Blur {}".format(k))
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Show Auto Blurring', expanded=True):
cols = st.columns(3)
kX = cols[0].number_input('Kernel Sizes kX', min_value=1, max_value=25, step=2, key='2.1')
kY = cols[1].number_input('Kernel Sizes kY', min_value=1, max_value=25, step=2, key='2.2', value=kX, disabled=True)
k = cols[2].number_input('Kernel Sizes k', min_value=1, max_value=25, step=2, key='2.3')
# apply an "average" blur to the image using the current kernel size
blurred = cv.blur(image, (kX, kX))
cols[0].markdown("Average Blur ({}, {})".format(kX, kY))
cols[0].image(blurred)
with cols[0]:
download_button1(blurred, button, download, mime_type, key='4.1')
# apply a "Gaussian" blur to the image
blurred = cv.GaussianBlur(image, (kX, kY), 0)
cols[1].markdown("Gaussian Blur ({}, {})".format(kX, kY))
cols[1].image(blurred)
with cols[1]:
download_button1(blurred, button, download, mime_type, key='4.2')
# apply a "median" blur to the image
blurred = cv.medianBlur(image, k)
cols[2].markdown("Median Blur {}".format(k))
cols[2].image(blurred)
with cols[2]:
download_button1(blurred, button, download, mime_type, key='4.3')
source_code(
f'Source Code + Smoothing and Blurring Tutorial pyimagesearch.com',
'https://pyimagesearch.com/2021/04/28/opencv-smoothing-and-blurring/',
'https://gist.github.com/jjaramillo34/84863214120f9e6bcf49874670250ebb')
with st.expander('DuckDuckGo Search Results'):
t = 'blurring and smoothing'
st.subheader(f'More About {t.capitalize()}')
scrape_duckduckgo(f'opencv {t}') | [
"cv2.GaussianBlur",
"streamlit.text_input",
"streamlit.image",
"utils_helpers.convolve",
"streamlit.code",
"cv2.bitwise_and",
"cv2.medianBlur",
"utils_helpers.download_button1",
"numpy.arctan2",
"streamlit.expander",
"streamlit.title",
"utils_helpers.insert_data_mongodb",
"numpy.ones",
"st... | [((1448, 1515), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Choosse on of the following"""', 'selected_boxes'], {}), "('Choosse on of the following', selected_boxes)\n", (1468, 1515), True, 'import streamlit as st\n'), ((2291, 2304), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (2301, 2304), True, 'import streamlit as st\n'), ((5486, 5505), 'utils_helpers.get_location_data', 'get_location_data', ([], {}), '()\n', (5503, 5505), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((5524, 5538), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5536, 5538), False, 'from datetime import datetime\n'), ((6557, 6582), 'utils_helpers.average_ratings_mongodb', 'average_ratings_mongodb', ([], {}), '()\n', (6580, 6582), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((7828, 8003), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['f"""<p style="font-weight:bold;color:black;font-size:12px;border-radius:2%;">Ratings live atlas mongodb database feed</p>"""'], {'unsafe_allow_html': '(True)'}), '(\n f\'<p style="font-weight:bold;color:black;font-size:12px;border-radius:2%;">Ratings live atlas mongodb database feed</p>\'\n , unsafe_allow_html=True)\n', (7847, 8003), True, 'import streamlit as st\n'), ((8202, 8211), 'utils_helpers.version', 'version', ([], {}), '()\n', (8209, 8211), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((8251, 8290), 'streamlit.header', 'st.header', (['"""Demo Adaptive Thresholding"""'], {}), "('Demo Adaptive Thresholding')\n", (8260, 8290), True, 'import streamlit as st\n'), ((8305, 8421), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Adaptive Thresholding Options"""', "('Adaptive Thresholding', 'Adaptive Thesholding Interactive')"], {}), "('Adaptive Thresholding Options', ('Adaptive Thresholding',\n 'Adaptive Thesholding Interactive'))\n", (8321, 8421), True, 'import streamlit as st\n'), ((22591, 22838), 'utils_helpers.source_code', 'source_code', (['"""Source Code + Adaptive Thresholding pyimagesearch.com"""', '"""https://pyimagesearch.com/2021/05/12/adaptive-thresholding-with-opencv-cv2-adaptivethreshold/"""', '"""https://gist.github.com/jjaramillo34/331a1aaeebeb4ff47d9b80a658643b60"""'], {}), "('Source Code + Adaptive Thresholding pyimagesearch.com',\n 'https://pyimagesearch.com/2021/05/12/adaptive-thresholding-with-opencv-cv2-adaptivethreshold/'\n , 'https://gist.github.com/jjaramillo34/331a1aaeebeb4ff47d9b80a658643b60')\n", (22602, 22838), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((23128, 23156), 'streamlit.header', 'st.header', (['"""Auto Canny Demo"""'], {}), "('Auto Canny Demo')\n", (23137, 23156), True, 'import streamlit as st\n'), ((23179, 23239), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', ([], {'label': '"""Update in Real Time"""', 'value': '(True)'}), "(label='Update in Real Time', value=True)\n", (23198, 23239), True, 'import streamlit as st\n'), ((23255, 23323), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpeg']"}), "(label='Upload a file', type=['png', 'jpg', 'jpeg'])\n", (23271, 23323), True, 'import streamlit as st\n'), ((25538, 25787), 'utils_helpers.source_code', 'source_code', (['"""Source Code + Auto Canny pyimagesearch.com"""', '"""https://pyimagesearch.com/2015/04/06/zero-parameter-automatic-canny-edge-detection-with-python-and-opencv/"""', '"""https://gist.github.com/jjaramillo34/fb83acff62ce6502c398ba7133ab066c"""'], {}), "('Source Code + Auto Canny pyimagesearch.com',\n 'https://pyimagesearch.com/2015/04/06/zero-parameter-automatic-canny-edge-detection-with-python-and-opencv/'\n , 'https://gist.github.com/jjaramillo34/fb83acff62ce6502c398ba7133ab066c')\n", (25549, 25787), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((25974, 26000), 'streamlit.header', 'st.header', (['"""Resizing Demo"""'], {}), "('Resizing Demo')\n", (25983, 26000), True, 'import streamlit as st\n'), ((26016, 26084), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']"}), "(label='Upload a file', type=['png', 'jpg', 'jpge'])\n", (26032, 26084), True, 'import streamlit as st\n'), ((26107, 26167), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', ([], {'label': '"""Update in Real Time"""', 'value': '(True)'}), "(label='Update in Real Time', value=True)\n", (26126, 26167), True, 'import streamlit as st\n'), ((32938, 32975), 'streamlit.header', 'st.header', (['"""Canny Edge Detector Demo"""'], {}), "('Canny Edge Detector Demo')\n", (32947, 32975), True, 'import streamlit as st\n'), ((32991, 33068), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']", 'key': '"""1"""'}), "(label='Upload a file', type=['png', 'jpg', 'jpge'], key='1')\n", (33007, 33068), True, 'import streamlit as st\n'), ((33091, 33151), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', ([], {'label': '"""Update in Real Time"""', 'value': '(True)'}), "(label='Update in Real Time', value=True)\n", (33110, 33151), True, 'import streamlit as st\n'), ((39895, 39927), 'streamlit.header', 'st.header', (['"""Image Gradient Demo"""'], {}), "('Image Gradient Demo')\n", (39904, 39927), True, 'import streamlit as st\n'), ((39947, 40036), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Image Gradient Options"""', "('Sobel/Scharr', 'Magnitude Orientation')"], {}), "('Image Gradient Options', ('Sobel/Scharr',\n 'Magnitude Orientation'))\n", (39963, 40036), True, 'import streamlit as st\n'), ((50314, 50553), 'utils_helpers.source_code', 'source_code', (['"""Source Code + Image Gradients Tutorial pyimagesearch.com"""', '"""https://pyimagesearch.com/2021/05/12/image-gradients-with-opencv-sobel-and-scharr/"""', '"""https://gist.github.com/jjaramillo34/4a40d2faeddda4c1275b2c40c86260a4"""'], {}), "('Source Code + Image Gradients Tutorial pyimagesearch.com',\n 'https://pyimagesearch.com/2021/05/12/image-gradients-with-opencv-sobel-and-scharr/'\n , 'https://gist.github.com/jjaramillo34/4a40d2faeddda4c1275b2c40c86260a4')\n", (50325, 50553), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((50792, 50834), 'streamlit.header', 'st.header', (['"""Morphological Operations Demo"""'], {}), "('Morphological Operations Demo')\n", (50801, 50834), True, 'import streamlit as st\n'), ((50854, 50962), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Morphological Operations Options"""', "('Morphological Hats', 'Morphological Operations')"], {}), "('Morphological Operations Options', ('Morphological Hats',\n 'Morphological Operations'))\n", (50870, 50962), True, 'import streamlit as st\n'), ((68405, 68644), 'utils_helpers.source_code', 'source_code', (['"""Source Code + Morphological Operations Tutorial pyimagesearch.com"""', '"""https://pyimagesearch.com/2021/04/28/opencv-morphological-operations/"""', '"""https://gist.github.com/jjaramillo34/3c1a8489e7882a3dba1127f3046c2a78"""'], {}), "('Source Code + Morphological Operations Tutorial pyimagesearch.com'\n ,\n 'https://pyimagesearch.com/2021/04/28/opencv-morphological-operations/',\n 'https://gist.github.com/jjaramillo34/3c1a8489e7882a3dba1127f3046c2a78')\n", (68416, 68644), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((68863, 68893), 'streamlit.header', 'st.header', (['"""Thresholding Demo"""'], {}), "('Thresholding Demo')\n", (68872, 68893), True, 'import streamlit as st\n'), ((68908, 69000), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Thresholding Options"""', '(\'Simple Thresholding\', "Otsu\'s Thresholding")'], {}), '(\'Thresholding Options\', (\'Simple Thresholding\',\n "Otsu\'s Thresholding"))\n', (68924, 69000), True, 'import streamlit as st\n'), ((69019, 69079), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', ([], {'label': '"""Update in Real Time"""', 'value': '(True)'}), "(label='Update in Real Time', value=True)\n", (69038, 69079), True, 'import streamlit as st\n'), ((78873, 79097), 'utils_helpers.source_code', 'source_code', (['"""Source Code + Thresholding Tutorial pyimagesearch.com"""', '"""https://pyimagesearch.com/2021/04/28/opencv-thresholding-cv2-threshold/"""', '"""https://gist.github.com/jjaramillo34/d504d5a9d6f88833c3720f132e734193"""'], {}), "('Source Code + Thresholding Tutorial pyimagesearch.com',\n 'https://pyimagesearch.com/2021/04/28/opencv-thresholding-cv2-threshold/',\n 'https://gist.github.com/jjaramillo34/d504d5a9d6f88833c3720f132e734193')\n", (78884, 79097), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((79304, 79334), 'streamlit.header', 'st.header', (['"""Color Spaces Demo"""'], {}), "('Color Spaces Demo')\n", (79313, 79334), True, 'import streamlit as st\n'), ((79350, 79418), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']"}), "(label='Upload a file', type=['png', 'jpg', 'jpge'])\n", (79366, 79418), True, 'import streamlit as st\n'), ((79441, 79501), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', ([], {'label': '"""Update in Real Time"""', 'value': '(True)'}), "(label='Update in Real Time', value=True)\n", (79460, 79501), True, 'import streamlit as st\n'), ((84661, 84884), 'utils_helpers.source_code', 'source_code', (['"""Source Code + Color Spaces Tutorial pyimagesearch.com"""', '"""https://pyimagesearch.com/2021/04/28/opencv-color-spaces-cv2-cvtcolor/"""', '"""https://gist.github.com/jjaramillo34/74ef1a86014fb4fd7617c03ea10c3602"""'], {}), "('Source Code + Color Spaces Tutorial pyimagesearch.com',\n 'https://pyimagesearch.com/2021/04/28/opencv-color-spaces-cv2-cvtcolor/',\n 'https://gist.github.com/jjaramillo34/74ef1a86014fb4fd7617c03ea10c3602')\n", (84672, 84884), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((85116, 85154), 'streamlit.header', 'st.header', (['"""Smoothing & Blurring Demo"""'], {}), "('Smoothing & Blurring Demo')\n", (85125, 85154), True, 'import streamlit as st\n'), ((85174, 85249), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Smoothing & Blurring Options"""', "('Bilateral', 'Blurring')"], {}), "('Smoothing & Blurring Options', ('Bilateral', 'Blurring'))\n", (85190, 85249), True, 'import streamlit as st\n'), ((97259, 97490), 'utils_helpers.source_code', 'source_code', (['f"""Source Code + Smoothing and Blurring Tutorial pyimagesearch.com"""', '"""https://pyimagesearch.com/2021/04/28/opencv-smoothing-and-blurring/"""', '"""https://gist.github.com/jjaramillo34/84863214120f9e6bcf49874670250ebb"""'], {}), "(f'Source Code + Smoothing and Blurring Tutorial pyimagesearch.com',\n 'https://pyimagesearch.com/2021/04/28/opencv-smoothing-and-blurring/',\n 'https://gist.github.com/jjaramillo34/84863214120f9e6bcf49874670250ebb')\n", (97270, 97490), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((1134, 1171), 'random.choice', 'random.choice', (['string.ascii_lowercase'], {}), '(string.ascii_lowercase)\n', (1147, 1171), False, 'import random\n'), ((2331, 2376), 'streamlit.title', 'st.title', (['"""Basic Image Processing Operations"""'], {}), "('Basic Image Processing Operations')\n", (2339, 2376), True, 'import streamlit as st\n'), ((2385, 2448), 'streamlit.image', 'st.image', (['"""images/image_processing.jpeg"""'], {'use_column_width': '(True)'}), "('images/image_processing.jpeg', use_column_width=True)\n", (2393, 2448), True, 'import streamlit as st\n'), ((2456, 2473), 'streamlit.title', 'st.title', (['"""Usage"""'], {}), "('Usage')\n", (2464, 2473), True, 'import streamlit as st\n'), ((2482, 2695), 'streamlit.markdown', 'st.markdown', (["('A simple app that shows different image processing techniques. You can choose the options from the dropdwon menu on the left.'\n + 'Technologies use to build the app:')"], {'unsafe_allow_html': '(True)'}), "(\n 'A simple app that shows different image processing techniques. You can choose the options from the dropdwon menu on the left.'\n + 'Technologies use to build the app:', unsafe_allow_html=True)\n", (2493, 2695), True, 'import streamlit as st\n'), ((2715, 2743), 'streamlit.title', 'st.title', (['"""Technology Stack"""'], {}), "('Technology Stack')\n", (2723, 2743), True, 'import streamlit as st\n'), ((2752, 3409), 'streamlit.markdown', 'st.markdown', (['"""\n<p align="center">\n <img src="https://img.shields.io/badge/Python-FFD43B?style=for-the-badge&logo=python&logoColor=blue" />\n <img src="https://img.shields.io/badge/MongoDB-4EA94B?style=for-the-badge&logo=mongodb&logoColor=white" />\n <img src="https://img.shields.io/badge/Streamlit-FF4B4B?style=for-the-badge&logo=Streamlit&logoColor=white" />\n <img src="https://img.shields.io/badge/OpenCV-27338e?style=for-the-badge&logo=OpenCV&logoColor=white" />\n <img src="https://img.shields.io/badge/Visual_Studio_Code-0078D4?style=for-the-badge&logo=visual%20studio%20code&logoColor=white" />\n</p>"""'], {'unsafe_allow_html': '(True)'}), '(\n """\n<p align="center">\n <img src="https://img.shields.io/badge/Python-FFD43B?style=for-the-badge&logo=python&logoColor=blue" />\n <img src="https://img.shields.io/badge/MongoDB-4EA94B?style=for-the-badge&logo=mongodb&logoColor=white" />\n <img src="https://img.shields.io/badge/Streamlit-FF4B4B?style=for-the-badge&logo=Streamlit&logoColor=white" />\n <img src="https://img.shields.io/badge/OpenCV-27338e?style=for-the-badge&logo=OpenCV&logoColor=white" />\n <img src="https://img.shields.io/badge/Visual_Studio_Code-0078D4?style=for-the-badge&logo=visual%20studio%20code&logoColor=white" />\n</p>"""\n , unsafe_allow_html=True)\n', (2763, 3409), True, 'import streamlit as st\n'), ((3426, 3465), 'streamlit.title', 'st.title', (['"""Image Processing Techniques"""'], {}), "('Image Processing Techniques')\n", (3434, 3465), True, 'import streamlit as st\n'), ((3474, 4133), 'streamlit.markdown', 'st.markdown', (['"""\n>Morphological Operations --- OpenCV Morphological Operations\n>\n>Smoothing and Blurring --- OpenCV Smoothing and Blurring\n>\n>Color Spaces -- OpenCV Color Spaces (cv2.cvtColor)\n>\n>Basic Thresholding --- OpenCV Thresholding (cv2.threshold) \n>\n>Adaptive Thresholding --- Adaptive Thresholding with OpenCV (cv2.adaptiveThreshold)\n>\n>Kernels --- Convolutions with OpenCV and Python\n>\n>Image Gradients --- Image Gradients with OpenCV (Sobel and Scharr)\n>\n>Edge Detection --- OpenCV Edge Detection (cv2.Canny)\n>\n>Automatic Edge Detection --- Zero-parameter, automatic Canny edge detection with Python and OpenCV"""'], {'unsafe_allow_html': '(True)'}), '(\n """\n>Morphological Operations --- OpenCV Morphological Operations\n>\n>Smoothing and Blurring --- OpenCV Smoothing and Blurring\n>\n>Color Spaces -- OpenCV Color Spaces (cv2.cvtColor)\n>\n>Basic Thresholding --- OpenCV Thresholding (cv2.threshold) \n>\n>Adaptive Thresholding --- Adaptive Thresholding with OpenCV (cv2.adaptiveThreshold)\n>\n>Kernels --- Convolutions with OpenCV and Python\n>\n>Image Gradients --- Image Gradients with OpenCV (Sobel and Scharr)\n>\n>Edge Detection --- OpenCV Edge Detection (cv2.Canny)\n>\n>Automatic Edge Detection --- Zero-parameter, automatic Canny edge detection with Python and OpenCV"""\n , unsafe_allow_html=True)\n', (3485, 4133), True, 'import streamlit as st\n'), ((4132, 4154), 'streamlit.title', 'st.title', (['"""Dedication"""'], {}), "('Dedication')\n", (4140, 4154), True, 'import streamlit as st\n'), ((4163, 4403), 'streamlit.markdown', 'st.markdown', (['"""> To my Mother (Elsa), Paula, Cris, Maty and Sofia, To whom made this possible.\n>\n> Special thanks to Adrian from pyimagesearch.com for great tutorials of image processing, deep learning, augmented realty, etc. """'], {}), '(\n """> To my Mother (Elsa), Paula, Cris, Maty and Sofia, To whom made this possible.\n>\n> Special thanks to Adrian from pyimagesearch.com for great tutorials of image processing, deep learning, augmented realty, etc. """\n )\n', (4174, 4403), True, 'import streamlit as st\n'), ((4411, 4563), 'streamlit.markdown', 'st.markdown', (['"""> Long Live Rock N Roll.\n>\n> - "Well if I have to, I will die seven deaths just to lie In the arms of my eversleeping aim\\""""'], {}), '(\n """> Long Live Rock N Roll.\n>\n> - "Well if I have to, I will die seven deaths just to lie In the arms of my eversleeping aim\\""""\n )\n', (4422, 4563), True, 'import streamlit as st\n'), ((4561, 4580), 'streamlit.title', 'st.title', (['"""Contact"""'], {}), "('Contact')\n", (4569, 4580), True, 'import streamlit as st\n'), ((4589, 5466), 'streamlit.markdown', 'st.markdown', (['"""<p align="center">\n <a href="mailto:<EMAIL>" rel="nofollow">\n <img alt="Gmail" src="https://img.shields.io/badge/Gmail-D14836?style=for-the-badge&logo=gmail&logoColor=white"/>\n </a>\n <a href="https://github.com/jjaramillo34/" rel="nofollow">\n <img alt="Github" src="https://img.shields.io/badge/GitHub-%2312100E.svg?&style=for-the-badge&logo=Github&logoColor=white"/>\n </a>\n <a href="https://twitter.com/jejaramilloc" rel="nofollow">\n <img alt="Twitter" src="https://img.shields.io/badge/Twitter-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white"/>\n </a>\n <a href="https://www.linkedin.com/in/javierjaramillo1/" rel="nofollow">\n <img alt="Linkedin" src="https://img.shields.io/badge/LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white"/>\n </a>\n </p>"""'], {'unsafe_allow_html': '(True)'}), '(\n """<p align="center">\n <a href="mailto:<EMAIL>" rel="nofollow">\n <img alt="Gmail" src="https://img.shields.io/badge/Gmail-D14836?style=for-the-badge&logo=gmail&logoColor=white"/>\n </a>\n <a href="https://github.com/jjaramillo34/" rel="nofollow">\n <img alt="Github" src="https://img.shields.io/badge/GitHub-%2312100E.svg?&style=for-the-badge&logo=Github&logoColor=white"/>\n </a>\n <a href="https://twitter.com/jejaramilloc" rel="nofollow">\n <img alt="Twitter" src="https://img.shields.io/badge/Twitter-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white"/>\n </a>\n <a href="https://www.linkedin.com/in/javierjaramillo1/" rel="nofollow">\n <img alt="Linkedin" src="https://img.shields.io/badge/LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white"/>\n </a>\n </p>"""\n , unsafe_allow_html=True)\n', (4600, 5466), True, 'import streamlit as st\n'), ((5722, 5782), 'streamlit.sidebar.form', 'st.sidebar.form', ([], {'key': '"""columns_in_form"""', 'clear_on_submit': '(True)'}), "(key='columns_in_form', clear_on_submit=True)\n", (5737, 5782), True, 'import streamlit as st\n'), ((5883, 6058), 'streamlit.slider', 'st.slider', (['"""Please rate the app"""'], {'min_value': '(1)', 'max_value': '(5)', 'value': '(3)', 'help': '"""Drag the slider to rate the app. This is a 1-5 rating scale where 5 is the highest rating"""'}), "('Please rate the app', min_value=1, max_value=5, value=3, help=\n 'Drag the slider to rate the app. This is a 1-5 rating scale where 5 is the highest rating'\n )\n", (5892, 6058), True, 'import streamlit as st\n'), ((6065, 6119), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Please leave your feedback here"""'}), "(label='Please leave your feedback here')\n", (6078, 6119), True, 'import streamlit as st\n'), ((6140, 6171), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit"""'], {}), "('Submit')\n", (6161, 6171), True, 'import streamlit as st\n'), ((6620, 6651), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""App Ratings"""'], {}), "('App Ratings')\n", (6636, 6651), True, 'import streamlit as st\n'), ((8008, 8045), 'streamlit.expander', 'st.expander', (['"""Show MongoDB Dashboard"""'], {}), "('Show MongoDB Dashboard')\n", (8019, 8045), True, 'import streamlit as st\n'), ((8055, 8202), 'streamlit.components.v1.iframe', 'components.iframe', (['"""https://charts.mongodb.com/charts-project-0-koqvp/public/dashboards/62523657-6131-48ab-8c6c-3893cfb849fa"""'], {'height': '(800)'}), "(\n 'https://charts.mongodb.com/charts-project-0-koqvp/public/dashboards/62523657-6131-48ab-8c6c-3893cfb849fa'\n , height=800)\n", (8072, 8202), True, 'import streamlit.components.v1 as components\n'), ((8480, 8557), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']", 'key': '"""1"""'}), "(label='Upload a file', type=['png', 'jpg', 'jpge'], key='1')\n", (8496, 8557), True, 'import streamlit as st\n'), ((16410, 16487), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']", 'key': '"""1"""'}), "(label='Upload a file', type=['png', 'jpg', 'jpge'], key='1')\n", (16426, 16487), True, 'import streamlit as st\n'), ((22853, 22893), 'streamlit.expander', 'st.expander', (['"""DuckDuckGo Search Results"""'], {}), "('DuckDuckGo Search Results')\n", (22864, 22893), True, 'import streamlit as st\n'), ((22903, 22951), 'streamlit.subheader', 'st.subheader', (['"""More About Adaptive Thresholding"""'], {}), "('More About Adaptive Thresholding')\n", (22915, 22951), True, 'import streamlit as st\n'), ((23019, 23068), 'utils_helpers.scrape_duckduckgo', 'scrape_duckduckgo', (['"""adaptive thresholding opencv"""'], {}), "('adaptive thresholding opencv')\n", (23036, 23068), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((23441, 23465), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (23455, 23465), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((23482, 23498), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (23491, 23498), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((23514, 23551), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (23525, 23551), True, 'import cv2 as cv\n'), ((23570, 23602), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(3, 3)', '(0)'], {}), '(gray, (3, 3), 0)\n', (23585, 23602), True, 'import cv2 as cv\n'), ((23736, 23762), 'cv2.Canny', 'cv.Canny', (['blurred', '(10)', '(200)'], {}), '(blurred, 10, 200)\n', (23744, 23762), True, 'import cv2 as cv\n'), ((23779, 23806), 'cv2.Canny', 'cv.Canny', (['blurred', '(225)', '(250)'], {}), '(blurred, 225, 250)\n', (23787, 23806), True, 'import cv2 as cv\n'), ((23822, 23848), 'utils_helpers.auto_canny_thresh', 'auto_canny_thresh', (['blurred'], {}), '(blurred)\n', (23839, 23848), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((24543, 24568), 'utils_helpers.load_image', 'load_image', (['default_image'], {}), '(default_image)\n', (24553, 24568), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((24584, 24621), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (24595, 24621), True, 'import cv2 as cv\n'), ((24640, 24672), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(3, 3)', '(0)'], {}), '(gray, (3, 3), 0)\n', (24655, 24672), True, 'import cv2 as cv\n'), ((24816, 24842), 'cv2.Canny', 'cv.Canny', (['blurred', '(10)', '(200)'], {}), '(blurred, 10, 200)\n', (24824, 24842), True, 'import cv2 as cv\n'), ((24859, 24886), 'cv2.Canny', 'cv.Canny', (['blurred', '(225)', '(250)'], {}), '(blurred, 225, 250)\n', (24867, 24886), True, 'import cv2 as cv\n'), ((24902, 24928), 'utils_helpers.auto_canny_thresh', 'auto_canny_thresh', (['blurred'], {}), '(blurred)\n', (24919, 24928), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((25802, 25842), 'streamlit.expander', 'st.expander', (['"""DuckDuckGo Search Results"""'], {}), "('DuckDuckGo Search Results')\n", (25813, 25842), True, 'import streamlit as st\n'), ((25852, 25889), 'streamlit.subheader', 'st.subheader', (['"""More About Auto Canny"""'], {}), "('More About Auto Canny')\n", (25864, 25889), True, 'import streamlit as st\n'), ((25898, 25936), 'utils_helpers.scrape_duckduckgo', 'scrape_duckduckgo', (['"""auto canny opencv"""'], {}), "('auto canny opencv')\n", (25915, 25936), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((26218, 26242), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (26232, 26242), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((26259, 26275), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (26268, 26275), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((26565, 26625), 'numpy.array', 'np.array', (['([0, -1, 0], [-1, 5, -1], [0, -1, 0])'], {'dtype': '"""int"""'}), "(([0, -1, 0], [-1, 5, -1], [0, -1, 0]), dtype='int')\n", (26573, 26625), True, 'import numpy as np\n'), ((26770, 26827), 'numpy.array', 'np.array', (['([0, 1, 0], [1, -4, 1], [0, 1, 0])'], {'dtype': '"""int"""'}), "(([0, 1, 0], [1, -4, 1], [0, 1, 0]), dtype='int')\n", (26778, 26827), True, 'import numpy as np\n'), ((26927, 26986), 'numpy.array', 'np.array', (['([-1, 0, 1], [-2, 0, 2], [-1, 0, 1])'], {'dtype': '"""int"""'}), "(([-1, 0, 1], [-2, 0, 2], [-1, 0, 1]), dtype='int')\n", (26935, 26986), True, 'import numpy as np\n'), ((27086, 27145), 'numpy.array', 'np.array', (['([-1, -2, -1], [0, 0, 0], [1, 2, 1])'], {'dtype': '"""int"""'}), "(([-1, -2, -1], [0, 0, 0], [1, 2, 1]), dtype='int')\n", (27094, 27145), True, 'import numpy as np\n'), ((27669, 27704), 'utils_helpers.load_image', 'load_image', (['"""images/supermario.jpg"""'], {}), "('images/supermario.jpg')\n", (27679, 27704), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((27720, 27757), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (27731, 27757), True, 'import cv2 as cv\n'), ((28807, 28820), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (28817, 28820), True, 'import streamlit as st\n'), ((32886, 32895), 'utils_helpers.version', 'version', ([], {}), '()\n', (32893, 32895), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((33197, 33221), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (33211, 33221), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((33238, 33254), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (33247, 33254), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((33273, 33384), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Canny Edge Detector Options"""', "('Canny Edge Detector', 'Canny Edge Detector Interactive')"], {}), "('Canny Edge Detector Options', ('Canny Edge Detector',\n 'Canny Edge Detector Interactive'))\n", (33289, 33384), True, 'import streamlit as st\n'), ((36634, 36745), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Canny Edge Detector Options"""', "('Canny Edge Detector', 'Canny Edge Detector Interactive')"], {}), "('Canny Edge Detector Options', ('Canny Edge Detector',\n 'Canny Edge Detector Interactive'))\n", (36650, 36745), True, 'import streamlit as st\n'), ((40091, 40159), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']"}), "(label='Upload a file', type=['png', 'jpg', 'jpge'])\n", (40107, 40159), True, 'import streamlit as st\n'), ((40186, 40246), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', ([], {'label': '"""Update in Real Time"""', 'value': '(True)'}), "(label='Update in Real Time', value=True)\n", (40205, 40246), True, 'import streamlit as st\n'), ((44928, 44996), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']"}), "(label='Upload a file', type=['png', 'jpg', 'jpge'])\n", (44944, 44996), True, 'import streamlit as st\n'), ((45023, 45083), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', ([], {'label': '"""Update in Real Time"""', 'value': '(True)'}), "(label='Update in Real Time', value=True)\n", (45042, 45083), True, 'import streamlit as st\n'), ((50584, 50624), 'streamlit.expander', 'st.expander', (['"""DuckDuckGo Search Results"""'], {}), "('DuckDuckGo Search Results')\n", (50595, 50624), True, 'import streamlit as st\n'), ((50634, 50685), 'streamlit.subheader', 'st.subheader', (['"""More About Morphological Operations"""'], {}), "('More About Morphological Operations')\n", (50646, 50685), True, 'import streamlit as st\n'), ((50694, 50746), 'utils_helpers.scrape_duckduckgo', 'scrape_duckduckgo', (['"""morphological operations opencv"""'], {}), "('morphological operations opencv')\n", (50711, 50746), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((51028, 51096), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']"}), "(label='Upload a file', type=['png', 'jpg', 'jpge'])\n", (51044, 51096), True, 'import streamlit as st\n'), ((51123, 51183), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', ([], {'label': '"""Update in Real Time"""', 'value': '(True)'}), "(label='Update in Real Time', value=True)\n", (51142, 51183), True, 'import streamlit as st\n'), ((54152, 54220), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']"}), "(label='Upload a file', type=['png', 'jpg', 'jpge'])\n", (54168, 54220), True, 'import streamlit as st\n'), ((54247, 54307), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', ([], {'label': '"""Update in Real Time"""', 'value': '(True)'}), "(label='Update in Real Time', value=True)\n", (54266, 54307), True, 'import streamlit as st\n'), ((68671, 68711), 'streamlit.expander', 'st.expander', (['"""DuckDuckGo Search Results"""'], {}), "('DuckDuckGo Search Results')\n", (68682, 68711), True, 'import streamlit as st\n'), ((68721, 68772), 'streamlit.subheader', 'st.subheader', (['"""More About Morphological Operations"""'], {}), "('More About Morphological Operations')\n", (68733, 68772), True, 'import streamlit as st\n'), ((68781, 68833), 'utils_helpers.scrape_duckduckgo', 'scrape_duckduckgo', (['"""morphological operations opencv"""'], {}), "('morphological operations opencv')\n", (68798, 68833), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((69145, 69213), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']"}), "(label='Upload a file', type=['png', 'jpg', 'jpge'])\n", (69161, 69213), True, 'import streamlit as st\n'), ((75879, 75947), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']"}), "(label='Upload a file', type=['png', 'jpg', 'jpge'])\n", (75895, 75947), True, 'import streamlit as st\n'), ((79129, 79169), 'streamlit.expander', 'st.expander', (['"""DuckDuckGo Search Results"""'], {}), "('DuckDuckGo Search Results')\n", (79140, 79169), True, 'import streamlit as st\n'), ((79179, 79217), 'streamlit.subheader', 'st.subheader', (['"""More About Thesholding"""'], {}), "('More About Thesholding')\n", (79191, 79217), True, 'import streamlit as st\n'), ((79226, 79266), 'utils_helpers.scrape_duckduckgo', 'scrape_duckduckgo', (['"""opencv thresholding"""'], {}), "('opencv thresholding')\n", (79243, 79266), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((84916, 84956), 'streamlit.expander', 'st.expander', (['"""DuckDuckGo Search Results"""'], {}), "('DuckDuckGo Search Results')\n", (84927, 84956), True, 'import streamlit as st\n'), ((85046, 85078), 'utils_helpers.scrape_duckduckgo', 'scrape_duckduckgo', (['f"""opencv {t}"""'], {}), "(f'opencv {t}')\n", (85063, 85078), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((85309, 85377), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']"}), "(label='Upload a file', type=['png', 'jpg', 'jpge'])\n", (85325, 85377), True, 'import streamlit as st\n'), ((85404, 85464), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', ([], {'label': '"""Update in Real Time"""', 'value': '(True)'}), "(label='Update in Real Time', value=True)\n", (85423, 85464), True, 'import streamlit as st\n'), ((89744, 89812), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload a file"""', 'type': "['png', 'jpg', 'jpge']"}), "(label='Upload a file', type=['png', 'jpg', 'jpge'])\n", (89760, 89812), True, 'import streamlit as st\n'), ((89839, 89899), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', ([], {'label': '"""Update in Real Time"""', 'value': '(True)'}), "(label='Update in Real Time', value=True)\n", (89858, 89899), True, 'import streamlit as st\n'), ((97522, 97562), 'streamlit.expander', 'st.expander', (['"""DuckDuckGo Search Results"""'], {}), "('DuckDuckGo Search Results')\n", (97533, 97562), True, 'import streamlit as st\n'), ((97662, 97694), 'utils_helpers.scrape_duckduckgo', 'scrape_duckduckgo', (['f"""opencv {t}"""'], {}), "(f'opencv {t}')\n", (97679, 97694), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((6206, 6243), 'streamlit.write', 'st.write', (['"""Thanks for your feedback!"""'], {}), "('Thanks for your feedback!')\n", (6214, 6243), True, 'import streamlit as st\n'), ((6256, 6283), 'streamlit.markdown', 'st.markdown', (['"""Your Rating:"""'], {}), "('Your Rating:')\n", (6267, 6283), True, 'import streamlit as st\n'), ((6296, 6315), 'streamlit.markdown', 'st.markdown', (['rating'], {}), '(rating)\n', (6307, 6315), True, 'import streamlit as st\n'), ((6328, 6357), 'streamlit.markdown', 'st.markdown', (['"""Your Feedback:"""'], {}), "('Your Feedback:')\n", (6339, 6357), True, 'import streamlit as st\n'), ((6370, 6391), 'streamlit.markdown', 'st.markdown', (['feedback'], {}), '(feedback)\n', (6381, 6391), True, 'import streamlit as st\n'), ((6404, 6536), 'utils_helpers.insert_data_mongodb', 'insert_data_mongodb', ([], {'rating': 'rating', 'feedback': 'feedback', 'date_r': 'date_r', 'city': 'city', 'ip': 'ip', 'region': 'region', 'country': 'country', 'loc': 'loc'}), '(rating=rating, feedback=feedback, date_r=date_r, city=\n city, ip=ip, region=region, country=country, loc=loc)\n', (6423, 6536), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((6879, 6910), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""App Ratings"""'], {}), "('App Ratings')\n", (6895, 6910), True, 'import streamlit as st\n'), ((23995, 24029), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (24006, 24029), True, 'import streamlit as st\n'), ((24043, 24066), 'streamlit.markdown', 'st.markdown', (['"""Original"""'], {}), "('Original')\n", (24054, 24066), True, 'import streamlit as st\n'), ((24079, 24094), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (24087, 24094), True, 'import streamlit as st\n'), ((24117, 24162), 'streamlit.expander', 'st.expander', (['"""Show Auto Canny"""'], {'expanded': '(True)'}), "('Show Auto Canny', expanded=True)\n", (24128, 24162), True, 'import streamlit as st\n'), ((24183, 24196), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (24193, 24196), True, 'import streamlit as st\n'), ((25075, 25109), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (25086, 25109), True, 'import streamlit as st\n'), ((25123, 25146), 'streamlit.markdown', 'st.markdown', (['"""Original"""'], {}), "('Original')\n", (25134, 25146), True, 'import streamlit as st\n'), ((25159, 25174), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (25167, 25174), True, 'import streamlit as st\n'), ((25197, 25242), 'streamlit.expander', 'st.expander', (['"""Show Auto Canny"""'], {'expanded': '(True)'}), "('Show Auto Canny', expanded=True)\n", (25208, 25242), True, 'import streamlit as st\n'), ((25263, 25276), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (25273, 25276), True, 'import streamlit as st\n'), ((26384, 26414), 'numpy.ones', 'np.ones', (['(7, 7)'], {'dtype': '"""float"""'}), "((7, 7), dtype='float')\n", (26391, 26414), True, 'import numpy as np\n'), ((26453, 26485), 'numpy.ones', 'np.ones', (['(21, 21)'], {'dtype': '"""float"""'}), "((21, 21), dtype='float')\n", (26460, 26485), True, 'import numpy as np\n'), ((27804, 27861), 'streamlit.spinner', 'st.spinner', (['"""Creating Convolutions please wait for it..."""'], {}), "('Creating Convolutions please wait for it...')\n", (27814, 27861), True, 'import streamlit as st\n'), ((28733, 28785), 'streamlit.success', 'st.success', (['"""Convolutions were created succesfully!"""'], {}), "('Convolutions were created succesfully!')\n", (28743, 28785), True, 'import streamlit as st\n'), ((28834, 28860), 'streamlit.expander', 'st.expander', (['"""Source Code"""'], {}), "('Source Code')\n", (28845, 28860), True, 'import streamlit as st\n'), ((32652, 32702), 'streamlit.expander', 'st.expander', (['"""Convolutions with OpenCV and Python"""'], {}), "('Convolutions with OpenCV and Python')\n", (32663, 32702), True, 'import streamlit as st\n'), ((32770, 32886), 'streamlit.components.v1.iframe', 'components.iframe', (['"""https://pyimagesearch.com/2016/07/25/convolutions-with-opencv-and-python/"""'], {'height': '(800)'}), "(\n 'https://pyimagesearch.com/2016/07/25/convolutions-with-opencv-and-python/'\n , height=800)\n", (32787, 32886), True, 'import streamlit.components.v1 as components\n'), ((33568, 33605), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (33579, 33605), True, 'import cv2 as cv\n'), ((33628, 33660), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (33643, 33660), True, 'import cv2 as cv\n'), ((33687, 33700), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (33697, 33700), True, 'import streamlit as st\n'), ((34225, 34251), 'cv2.Canny', 'cv.Canny', (['blurred', '(10)', '(200)'], {}), '(blurred, 10, 200)\n', (34233, 34251), True, 'import cv2 as cv\n'), ((34270, 34296), 'cv2.Canny', 'cv.Canny', (['blurred', '(30)', '(150)'], {}), '(blurred, 30, 150)\n', (34278, 34296), True, 'import cv2 as cv\n'), ((34317, 34344), 'cv2.Canny', 'cv.Canny', (['blurred', '(240)', '(250)'], {}), '(blurred, 240, 250)\n', (34325, 34344), True, 'import cv2 as cv\n'), ((34389, 34402), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (34399, 34402), True, 'import streamlit as st\n'), ((34797, 34821), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (34811, 34821), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((34842, 34858), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (34851, 34858), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((34955, 34980), 'utils_helpers.load_image', 'load_image', (['default_image'], {}), '(default_image)\n', (34965, 34980), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((35000, 35037), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (35011, 35037), True, 'import cv2 as cv\n'), ((35060, 35092), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (35075, 35092), True, 'import cv2 as cv\n'), ((35119, 35132), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (35129, 35132), True, 'import streamlit as st\n'), ((35669, 35682), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (35679, 35682), True, 'import streamlit as st\n'), ((36883, 36908), 'utils_helpers.load_image', 'load_image', (['default_image'], {}), '(default_image)\n', (36893, 36908), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((36928, 36965), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (36939, 36965), True, 'import cv2 as cv\n'), ((36988, 37020), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (37003, 37020), True, 'import cv2 as cv\n'), ((37047, 37060), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (37057, 37060), True, 'import streamlit as st\n'), ((37585, 37611), 'cv2.Canny', 'cv.Canny', (['blurred', '(10)', '(200)'], {}), '(blurred, 10, 200)\n', (37593, 37611), True, 'import cv2 as cv\n'), ((37630, 37656), 'cv2.Canny', 'cv.Canny', (['blurred', '(30)', '(150)'], {}), '(blurred, 30, 150)\n', (37638, 37656), True, 'import cv2 as cv\n'), ((37677, 37704), 'cv2.Canny', 'cv.Canny', (['blurred', '(240)', '(250)'], {}), '(blurred, 240, 250)\n', (37685, 37704), True, 'import cv2 as cv\n'), ((37749, 37762), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (37759, 37762), True, 'import streamlit as st\n'), ((38216, 38241), 'utils_helpers.load_image', 'load_image', (['default_image'], {}), '(default_image)\n', (38226, 38241), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((38261, 38298), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (38272, 38298), True, 'import cv2 as cv\n'), ((38321, 38353), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (38336, 38353), True, 'import cv2 as cv\n'), ((38380, 38393), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (38390, 38393), True, 'import streamlit as st\n'), ((38930, 38943), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (38940, 38943), True, 'import streamlit as st\n'), ((45200, 45224), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (45214, 45224), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((45245, 45261), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (45254, 45261), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((45281, 45318), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (45292, 45318), True, 'import cv2 as cv\n'), ((45406, 45437), 'cv2.Sobel', 'cv.Sobel', (['gray', 'cv.CV_64F', '(1)', '(0)'], {}), '(gray, cv.CV_64F, 1, 0)\n', (45414, 45437), True, 'import cv2 as cv\n'), ((45455, 45486), 'cv2.Sobel', 'cv.Sobel', (['gray', 'cv.CV_64F', '(0)', '(1)'], {}), '(gray, cv.CV_64F, 0, 1)\n', (45463, 45486), True, 'import cv2 as cv\n'), ((45573, 45599), 'numpy.sqrt', 'np.sqrt', (['(gX ** 2 + gY ** 2)'], {}), '(gX ** 2 + gY ** 2)\n', (45580, 45599), True, 'import numpy as np\n'), ((45743, 45772), 'cv2.convertScaleAbs', 'cv.convertScaleAbs', (['magnitude'], {}), '(magnitude)\n', (45761, 45772), True, 'import cv2 as cv\n'), ((45792, 45832), 'cv2.applyColorMap', 'cv.applyColorMap', (['dist1', 'cv.COLORMAP_JET'], {}), '(dist1, cv.COLORMAP_JET)\n', (45808, 45832), True, 'import cv2 as cv\n'), ((45866, 45897), 'cv2.convertScaleAbs', 'cv.convertScaleAbs', (['orientation'], {}), '(orientation)\n', (45884, 45897), True, 'import cv2 as cv\n'), ((45917, 45957), 'cv2.applyColorMap', 'cv.applyColorMap', (['dist2', 'cv.COLORMAP_JET'], {}), '(dist2, cv.COLORMAP_JET)\n', (45933, 45957), True, 'import cv2 as cv\n'), ((46885, 46931), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': '(8, 4)'}), '(nrows=1, ncols=3, figsize=(8, 4))\n', (46897, 46931), True, 'from matplotlib import pyplot as plt\n'), ((47824, 47849), 'utils_helpers.load_image', 'load_image', (['default_image'], {}), '(default_image)\n', (47834, 47849), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((47869, 47906), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (47880, 47906), True, 'import cv2 as cv\n'), ((47994, 48025), 'cv2.Sobel', 'cv.Sobel', (['gray', 'cv.CV_64F', '(1)', '(0)'], {}), '(gray, cv.CV_64F, 1, 0)\n', (48002, 48025), True, 'import cv2 as cv\n'), ((48043, 48074), 'cv2.Sobel', 'cv.Sobel', (['gray', 'cv.CV_64F', '(0)', '(1)'], {}), '(gray, cv.CV_64F, 0, 1)\n', (48051, 48074), True, 'import cv2 as cv\n'), ((48161, 48187), 'numpy.sqrt', 'np.sqrt', (['(gX ** 2 + gY ** 2)'], {}), '(gX ** 2 + gY ** 2)\n', (48168, 48187), True, 'import numpy as np\n'), ((48331, 48360), 'cv2.convertScaleAbs', 'cv.convertScaleAbs', (['magnitude'], {}), '(magnitude)\n', (48349, 48360), True, 'import cv2 as cv\n'), ((48380, 48422), 'cv2.applyColorMap', 'cv.applyColorMap', (['dist1', 'cv.COLORMAP_OCEAN'], {}), '(dist1, cv.COLORMAP_OCEAN)\n', (48396, 48422), True, 'import cv2 as cv\n'), ((48456, 48487), 'cv2.convertScaleAbs', 'cv.convertScaleAbs', (['orientation'], {}), '(orientation)\n', (48474, 48487), True, 'import cv2 as cv\n'), ((48507, 48547), 'cv2.applyColorMap', 'cv.applyColorMap', (['dist2', 'cv.COLORMAP_JET'], {}), '(dist2, cv.COLORMAP_JET)\n', (48523, 48547), True, 'import cv2 as cv\n'), ((49468, 49514), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': '(8, 4)'}), '(nrows=1, ncols=3, figsize=(8, 4))\n', (49480, 49514), True, 'from matplotlib import pyplot as plt\n'), ((51303, 51327), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (51317, 51327), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((51348, 51364), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (51357, 51364), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((51384, 51421), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (51395, 51421), True, 'import cv2 as cv\n'), ((51606, 51654), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(13, 5)'], {}), '(cv.MORPH_RECT, (13, 5))\n', (51630, 51654), True, 'import cv2 as cv\n'), ((51678, 51730), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_BLACKHAT', 'rectKernel'], {}), '(gray, cv.MORPH_BLACKHAT, rectKernel)\n', (51693, 51730), True, 'import cv2 as cv\n'), ((51897, 51947), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_TOPHAT', 'rectKernel'], {}), '(gray, cv.MORPH_TOPHAT, rectKernel)\n', (51912, 51947), True, 'import cv2 as cv\n'), ((51961, 51995), 'streamlit.subheader', 'st.subheader', (['"""Morphological Hats"""'], {}), "('Morphological Hats')\n", (51973, 51995), True, 'import streamlit as st\n'), ((52756, 52805), 'utils_helpers.load_image', 'load_image', (['"""images/pyimagesearch_logo_noise.png"""'], {}), "('images/pyimagesearch_logo_noise.png')\n", (52766, 52805), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((52825, 52862), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (52836, 52862), True, 'import cv2 as cv\n'), ((53047, 53095), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(13, 5)'], {}), '(cv.MORPH_RECT, (13, 5))\n', (53071, 53095), True, 'import cv2 as cv\n'), ((53119, 53171), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_BLACKHAT', 'rectKernel'], {}), '(gray, cv.MORPH_BLACKHAT, rectKernel)\n', (53134, 53171), True, 'import cv2 as cv\n'), ((53338, 53388), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_TOPHAT', 'rectKernel'], {}), '(gray, cv.MORPH_TOPHAT, rectKernel)\n', (53353, 53388), True, 'import cv2 as cv\n'), ((53402, 53436), 'streamlit.subheader', 'st.subheader', (['"""Morphological Hats"""'], {}), "('Morphological Hats')\n", (53414, 53436), True, 'import streamlit as st\n'), ((54445, 54469), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (54459, 54469), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((54490, 54506), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (54499, 54506), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((54526, 54563), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (54537, 54563), True, 'import cv2 as cv\n'), ((61488, 61537), 'utils_helpers.load_image', 'load_image', (['"""images/pyimagesearch_logo_noise.png"""'], {}), "('images/pyimagesearch_logo_noise.png')\n", (61498, 61537), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((61557, 61594), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (61568, 61594), True, 'import cv2 as cv\n'), ((69267, 69291), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (69281, 69291), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((69312, 69328), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (69321, 69328), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((69538, 69575), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (69549, 69575), True, 'import cv2 as cv\n'), ((69598, 69630), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(7, 7)', '(0)'], {}), '(gray, (7, 7), 0)\n', (69613, 69630), True, 'import cv2 as cv\n'), ((72593, 72618), 'utils_helpers.load_image', 'load_image', (['default_image'], {}), '(default_image)\n', (72603, 72618), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((72828, 72865), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (72839, 72865), True, 'import cv2 as cv\n'), ((72888, 72920), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(7, 7)', '(0)'], {}), '(gray, (7, 7), 0)\n', (72903, 72920), True, 'import cv2 as cv\n'), ((76054, 76078), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (76068, 76078), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((76099, 76115), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (76108, 76115), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((76325, 76362), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (76336, 76362), True, 'import cv2 as cv\n'), ((76385, 76417), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(7, 7)', '(0)'], {}), '(gray, (7, 7), 0)\n', (76400, 76417), True, 'import cv2 as cv\n'), ((77519, 77544), 'utils_helpers.load_image', 'load_image', (['default_image'], {}), '(default_image)\n', (77529, 77544), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((77753, 77790), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (77764, 77790), True, 'import cv2 as cv\n'), ((77813, 77845), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(7, 7)', '(0)'], {}), '(gray, (7, 7), 0)\n', (77828, 77845), True, 'import cv2 as cv\n'), ((79549, 79600), 'streamlit.expander', 'st.expander', (['"""Show RGB Color Spaces"""'], {'expanded': '(True)'}), "('Show RGB Color Spaces', expanded=True)\n", (79560, 79600), True, 'import streamlit as st\n'), ((79671, 79684), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (79681, 79684), True, 'import streamlit as st\n'), ((79705, 79729), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (79719, 79729), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((79750, 79766), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (79759, 79766), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((80239, 80275), 'streamlit.expander', 'st.expander', (['"""Show HSV Color Spaces"""'], {}), "('Show HSV Color Spaces')\n", (80250, 80275), True, 'import streamlit as st\n'), ((80363, 80376), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (80373, 80376), True, 'import streamlit as st\n'), ((80395, 80431), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2HSV'], {}), '(image, cv.COLOR_BGR2HSV)\n', (80406, 80431), True, 'import cv2 as cv\n'), ((80899, 80938), 'streamlit.expander', 'st.expander', (['"""Show L*a*b* Color Spaces"""'], {}), "('Show L*a*b* Color Spaces')\n", (80910, 80938), True, 'import streamlit as st\n'), ((81029, 81042), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (81039, 81042), True, 'import streamlit as st\n'), ((81061, 81097), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2LAB'], {}), '(image, cv.COLOR_BGR2LAB)\n', (81072, 81097), True, 'import cv2 as cv\n'), ((81558, 81587), 'streamlit.expander', 'st.expander', (['"""Show Grayscale"""'], {}), "('Show Grayscale')\n", (81569, 81587), True, 'import streamlit as st\n'), ((81676, 81689), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (81686, 81689), True, 'import streamlit as st\n'), ((81709, 81746), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (81720, 81746), True, 'import cv2 as cv\n'), ((82132, 82183), 'streamlit.expander', 'st.expander', (['"""Show RGB Color Spaces"""'], {'expanded': '(True)'}), "('Show RGB Color Spaces', expanded=True)\n", (82143, 82183), True, 'import streamlit as st\n'), ((82254, 82267), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (82264, 82267), True, 'import streamlit as st\n'), ((82288, 82313), 'utils_helpers.load_image', 'load_image', (['default_image'], {}), '(default_image)\n', (82298, 82313), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((82786, 82822), 'streamlit.expander', 'st.expander', (['"""Show HSV Color Spaces"""'], {}), "('Show HSV Color Spaces')\n", (82797, 82822), True, 'import streamlit as st\n'), ((82910, 82923), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (82920, 82923), True, 'import streamlit as st\n'), ((82942, 82978), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2HSV'], {}), '(image, cv.COLOR_BGR2HSV)\n', (82953, 82978), True, 'import cv2 as cv\n'), ((83446, 83485), 'streamlit.expander', 'st.expander', (['"""Show L*a*b* Color Spaces"""'], {}), "('Show L*a*b* Color Spaces')\n", (83457, 83485), True, 'import streamlit as st\n'), ((83576, 83589), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (83586, 83589), True, 'import streamlit as st\n'), ((83608, 83644), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2LAB'], {}), '(image, cv.COLOR_BGR2LAB)\n', (83619, 83644), True, 'import cv2 as cv\n'), ((84105, 84134), 'streamlit.expander', 'st.expander', (['"""Show Grayscale"""'], {}), "('Show Grayscale')\n", (84116, 84134), True, 'import streamlit as st\n'), ((84223, 84236), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (84233, 84236), True, 'import streamlit as st\n'), ((84256, 84293), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (84267, 84293), True, 'import cv2 as cv\n'), ((90165, 90189), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (90179, 90189), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((90210, 90226), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (90219, 90226), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((93838, 93863), 'utils_helpers.load_image', 'load_image', (['default_image'], {}), '(default_image)\n', (93848, 93863), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((7137, 7168), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""App Ratings"""'], {}), "('App Ratings')\n", (7153, 7168), True, 'import streamlit as st\n'), ((8652, 8686), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (8663, 8686), True, 'import streamlit as st\n'), ((8712, 8736), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (8726, 8736), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((8761, 8777), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (8770, 8777), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((8871, 8908), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (8882, 8908), True, 'import cv2 as cv\n'), ((8935, 8967), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(7, 7)', '(0)'], {}), '(gray, (7, 7), 0)\n', (8950, 8967), True, 'import cv2 as cv\n'), ((8984, 8999), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (8992, 8999), True, 'import streamlit as st\n'), ((9030, 9086), 'streamlit.expander', 'st.expander', (['"""Show Adaptive Thresholding"""'], {'expanded': '(True)'}), "('Show Adaptive Thresholding', expanded=True)\n", (9041, 9086), True, 'import streamlit as st\n'), ((9111, 9124), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (9121, 9124), True, 'import streamlit as st\n'), ((9158, 9210), 'cv2.threshold', 'cv.threshold', (['blurred', '(51)', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(blurred, 51, 255, cv.THRESH_BINARY_INV)\n', (9170, 9210), True, 'import cv2 as cv\n'), ((9550, 9618), 'cv2.threshold', 'cv.threshold', (['blurred', '(0)', '(255)', '(cv.THRESH_BINARY_INV | cv.THRESH_OTSU)'], {}), '(blurred, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)\n', (9562, 9618), True, 'import cv2 as cv\n'), ((10094, 10190), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['blurred', '(255)', 'cv.ADAPTIVE_THRESH_MEAN_C', 'cv.THRESH_BINARY_INV', '(21)', '(10)'], {}), '(blurred, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.\n THRESH_BINARY_INV, 21, 10)\n', (10114, 10190), True, 'import cv2 as cv\n'), ((10636, 10735), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['blurred', '(255)', 'cv.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv.THRESH_BINARY_INV', '(21)', '(4)'], {}), '(blurred, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.\n THRESH_BINARY_INV, 21, 4)\n', (10656, 10735), True, 'import cv2 as cv\n'), ((11005, 11064), 'streamlit.expander', 'st.expander', (['"""Show Adaptive Thresholding Types Interactive"""'], {}), "('Show Adaptive Thresholding Types Interactive')\n", (11016, 11064), True, 'import streamlit as st\n'), ((11086, 11159), 'streamlit.slider', 'st.slider', (['"""Change Threshold value"""'], {'min_value': '(50)', 'max_value': '(255)', 'key': '"""1"""'}), "('Change Threshold value', min_value=50, max_value=255, key='1')\n", (11095, 11159), True, 'import streamlit as st\n'), ((11215, 11262), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_BINARY'], {}), '(blurred, x, 255, cv.THRESH_BINARY)\n', (11227, 11262), True, 'import cv2 as cv\n'), ((11293, 11344), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(blurred, x, 255, cv.THRESH_BINARY_INV)\n', (11305, 11344), True, 'import cv2 as cv\n'), ((11375, 11421), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_TRUNC'], {}), '(blurred, x, 255, cv.THRESH_TRUNC)\n', (11387, 11421), True, 'import cv2 as cv\n'), ((11452, 11499), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_TOZERO'], {}), '(blurred, x, 255, cv.THRESH_TOZERO)\n', (11464, 11499), True, 'import cv2 as cv\n'), ((11530, 11581), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_TOZERO_INV'], {}), '(blurred, x, 255, cv.THRESH_TOZERO_INV)\n', (11542, 11581), True, 'import cv2 as cv\n'), ((11798, 11811), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (11808, 11811), True, 'import streamlit as st\n'), ((12153, 12166), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (12163, 12166), True, 'import streamlit as st\n'), ((12577, 12611), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (12588, 12611), True, 'import streamlit as st\n'), ((12637, 12672), 'utils_helpers.load_image', 'load_image', (['"""images/steve-jobs.jpg"""'], {}), "('images/steve-jobs.jpg')\n", (12647, 12672), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((12766, 12803), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (12777, 12803), True, 'import cv2 as cv\n'), ((12830, 12862), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(7, 7)', '(0)'], {}), '(gray, (7, 7), 0)\n', (12845, 12862), True, 'import cv2 as cv\n'), ((12879, 12894), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (12887, 12894), True, 'import streamlit as st\n'), ((12925, 12981), 'streamlit.expander', 'st.expander', (['"""Show Adaptive Thresholding"""'], {'expanded': '(True)'}), "('Show Adaptive Thresholding', expanded=True)\n", (12936, 12981), True, 'import streamlit as st\n'), ((13006, 13019), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (13016, 13019), True, 'import streamlit as st\n'), ((13053, 13105), 'cv2.threshold', 'cv.threshold', (['blurred', '(51)', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(blurred, 51, 255, cv.THRESH_BINARY_INV)\n', (13065, 13105), True, 'import cv2 as cv\n'), ((13445, 13513), 'cv2.threshold', 'cv.threshold', (['blurred', '(0)', '(255)', '(cv.THRESH_BINARY_INV | cv.THRESH_OTSU)'], {}), '(blurred, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)\n', (13457, 13513), True, 'import cv2 as cv\n'), ((13989, 14085), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['blurred', '(255)', 'cv.ADAPTIVE_THRESH_MEAN_C', 'cv.THRESH_BINARY_INV', '(21)', '(10)'], {}), '(blurred, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.\n THRESH_BINARY_INV, 21, 10)\n', (14009, 14085), True, 'import cv2 as cv\n'), ((14531, 14630), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['blurred', '(255)', 'cv.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv.THRESH_BINARY_INV', '(21)', '(4)'], {}), '(blurred, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.\n THRESH_BINARY_INV, 21, 4)\n', (14551, 14630), True, 'import cv2 as cv\n'), ((14900, 14959), 'streamlit.expander', 'st.expander', (['"""Show Adaptive Thresholding Types Interactive"""'], {}), "('Show Adaptive Thresholding Types Interactive')\n", (14911, 14959), True, 'import streamlit as st\n'), ((14981, 15054), 'streamlit.slider', 'st.slider', (['"""Change Threshold value"""'], {'min_value': '(50)', 'max_value': '(255)', 'key': '"""1"""'}), "('Change Threshold value', min_value=50, max_value=255, key='1')\n", (14990, 15054), True, 'import streamlit as st\n'), ((15110, 15157), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_BINARY'], {}), '(blurred, x, 255, cv.THRESH_BINARY)\n', (15122, 15157), True, 'import cv2 as cv\n'), ((15188, 15239), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(blurred, x, 255, cv.THRESH_BINARY_INV)\n', (15200, 15239), True, 'import cv2 as cv\n'), ((15270, 15316), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_TRUNC'], {}), '(blurred, x, 255, cv.THRESH_TRUNC)\n', (15282, 15316), True, 'import cv2 as cv\n'), ((15347, 15394), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_TOZERO'], {}), '(blurred, x, 255, cv.THRESH_TOZERO)\n', (15359, 15394), True, 'import cv2 as cv\n'), ((15425, 15476), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_TOZERO_INV'], {}), '(blurred, x, 255, cv.THRESH_TOZERO_INV)\n', (15437, 15476), True, 'import cv2 as cv\n'), ((15693, 15706), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (15703, 15706), True, 'import streamlit as st\n'), ((16048, 16061), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (16058, 16061), True, 'import streamlit as st\n'), ((16538, 16572), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (16549, 16572), True, 'import streamlit as st\n'), ((16598, 16622), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (16612, 16622), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((16647, 16663), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (16656, 16663), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((16757, 16794), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (16768, 16794), True, 'import cv2 as cv\n'), ((16821, 16853), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(7, 7)', '(0)'], {}), '(gray, (7, 7), 0)\n', (16836, 16853), True, 'import cv2 as cv\n'), ((16870, 16885), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (16878, 16885), True, 'import streamlit as st\n'), ((16916, 16984), 'streamlit.expander', 'st.expander', (['"""Show Adaptive Thresholding Interactive"""'], {'expanded': '(True)'}), "('Show Adaptive Thresholding Interactive', expanded=True)\n", (16927, 16984), True, 'import streamlit as st\n'), ((17012, 17025), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (17022, 17025), True, 'import streamlit as st\n'), ((17162, 17213), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(blurred, x, 255, cv.THRESH_BINARY_INV)\n', (17174, 17213), True, 'import cv2 as cv\n'), ((17734, 17802), 'cv2.threshold', 'cv.threshold', (['blurred', '(0)', '(255)', '(cv.THRESH_BINARY_INV | cv.THRESH_OTSU)'], {}), '(blurred, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)\n', (17746, 17802), True, 'import cv2 as cv\n'), ((18439, 18534), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['blurred', '(255)', 'cv.ADAPTIVE_THRESH_MEAN_C', 'cv.THRESH_BINARY_INV', 'x', '(10)'], {}), '(blurred, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.\n THRESH_BINARY_INV, x, 10)\n', (18459, 18534), True, 'import cv2 as cv\n'), ((19125, 19223), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['blurred', '(255)', 'cv.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv.THRESH_BINARY_INV', 'x', '(4)'], {}), '(blurred, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.\n THRESH_BINARY_INV, x, 4)\n', (19145, 19223), True, 'import cv2 as cv\n'), ((19581, 19615), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (19592, 19615), True, 'import streamlit as st\n'), ((19641, 19676), 'utils_helpers.load_image', 'load_image', (['"""images/steve-jobs.jpg"""'], {}), "('images/steve-jobs.jpg')\n", (19651, 19676), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((19770, 19807), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (19781, 19807), True, 'import cv2 as cv\n'), ((19834, 19866), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(7, 7)', '(0)'], {}), '(gray, (7, 7), 0)\n', (19849, 19866), True, 'import cv2 as cv\n'), ((19883, 19898), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (19891, 19898), True, 'import streamlit as st\n'), ((19929, 19997), 'streamlit.expander', 'st.expander', (['"""Show Adaptive Thresholding Interactive"""'], {'expanded': '(True)'}), "('Show Adaptive Thresholding Interactive', expanded=True)\n", (19940, 19997), True, 'import streamlit as st\n'), ((20025, 20038), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (20035, 20038), True, 'import streamlit as st\n'), ((20175, 20226), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(blurred, x, 255, cv.THRESH_BINARY_INV)\n', (20187, 20226), True, 'import cv2 as cv\n'), ((20762, 20830), 'cv2.threshold', 'cv.threshold', (['blurred', '(0)', '(255)', '(cv.THRESH_BINARY_INV | cv.THRESH_OTSU)'], {}), '(blurred, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)\n', (20774, 20830), True, 'import cv2 as cv\n'), ((21467, 21562), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['blurred', '(255)', 'cv.ADAPTIVE_THRESH_MEAN_C', 'cv.THRESH_BINARY_INV', 'x', '(10)'], {}), '(blurred, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.\n THRESH_BINARY_INV, x, 10)\n', (21487, 21562), True, 'import cv2 as cv\n'), ((22153, 22251), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['blurred', '(255)', 'cv.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv.THRESH_BINARY_INV', 'x', '(4)'], {}), '(blurred, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.\n THRESH_BINARY_INV, x, 4)\n', (22173, 22251), True, 'import cv2 as cv\n'), ((28171, 28193), 'utils_helpers.convolve', 'convolve', (['gray', 'kernel'], {}), '(gray, kernel)\n', (28179, 28193), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((28225, 28254), 'cv2.filter2D', 'cv.filter2D', (['gray', '(-1)', 'kernel'], {}), '(gray, -1, kernel)\n', (28236, 28254), True, 'import cv2 as cv\n'), ((28332, 28345), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (28342, 28345), True, 'import streamlit as st\n'), ((28901, 28918), 'streamlit.markdown', 'st.markdown', (['code'], {}), '(code)\n', (28912, 28918), True, 'import streamlit as st\n'), ((28935, 30965), 'streamlit.code', 'st.code', (['"""\n# construct average blurring kernels used to smooth an image\nsmallBlur = np.ones((7, 7), dtype="float") * (1.0 / (7 * 7))\nlargeBlur = np.ones((21, 21), dtype="float") * (1.0 / (21 * 21))\n\n# construct a sharpening filter\nsharpen = np.array((\n [0, -1, 0],\n [-1, 5, -1],\n [0, -1, 0]), dtype="int")\n\n# construct the Laplacian kernel used to detect edge-like regions of an image\nlaplacian = np.array((\n [0, 1, 0],\n [1, -4, 1],\n [0, 1, 0]), dtype="int")\n\n# construct the Sobel x-axis kernel\nsobelX = np.array((\n [-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1]), dtype="int")\n\n# construct the Sobel y-axis kernel\nsobelY = np.array((\n [-1, -2, -1],\n [0, 0, 0],\n [1, 2, 1]), dtype="int")\n\n# construct the kernel bank, a list of kernels we\'re going to apply using both our \n# custom `convole` function and OpenCV\'s `filter2D` function\nkernelBank = (\n ("small_blur", smallBlur),\n ("large_blur", largeBlur),\n ("sharpen", sharpen),\n ("laplacian", laplacian),\n ("sobel_x", sobelX),\n ("sobel_y", sobelY)\n)\n\n# load the input image and convert it to grayscale\nimage = load_image(\'images/supermario.jpg\')\ngray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n\n# loop over the kernels\nwith st.spinner(\'Creating Convolutions please wait for it...\'):\n for (kernelName, kernel) in kernelBank:\n # apply the kernel to the grayscale image using both our custom \'convole\' \n # function and OpenCV\'s \'filter2D\' function\n st.write("[INFO] applying {} kernel".format(kernelName))\n convoleOutput = convolve(gray, kernel)\n opencvOutput = cv.filter2D(gray, -1, kernel)\n\n # show the output images\n col1, col2, col3 = st.columns(3)\n with col1:\n st.markdown("original")\n st.image(gray)\n with col2:\n st.write("{} - convole".format(kernelName)) \n st.image(convoleOutput)\n with col3:\n st.write("{} - opencv".format(kernelName))\n st.image(opencvOutput)"""'], {'language': 'language'}), '(\n """\n# construct average blurring kernels used to smooth an image\nsmallBlur = np.ones((7, 7), dtype="float") * (1.0 / (7 * 7))\nlargeBlur = np.ones((21, 21), dtype="float") * (1.0 / (21 * 21))\n\n# construct a sharpening filter\nsharpen = np.array((\n [0, -1, 0],\n [-1, 5, -1],\n [0, -1, 0]), dtype="int")\n\n# construct the Laplacian kernel used to detect edge-like regions of an image\nlaplacian = np.array((\n [0, 1, 0],\n [1, -4, 1],\n [0, 1, 0]), dtype="int")\n\n# construct the Sobel x-axis kernel\nsobelX = np.array((\n [-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1]), dtype="int")\n\n# construct the Sobel y-axis kernel\nsobelY = np.array((\n [-1, -2, -1],\n [0, 0, 0],\n [1, 2, 1]), dtype="int")\n\n# construct the kernel bank, a list of kernels we\'re going to apply using both our \n# custom `convole` function and OpenCV\'s `filter2D` function\nkernelBank = (\n ("small_blur", smallBlur),\n ("large_blur", largeBlur),\n ("sharpen", sharpen),\n ("laplacian", laplacian),\n ("sobel_x", sobelX),\n ("sobel_y", sobelY)\n)\n\n# load the input image and convert it to grayscale\nimage = load_image(\'images/supermario.jpg\')\ngray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n\n# loop over the kernels\nwith st.spinner(\'Creating Convolutions please wait for it...\'):\n for (kernelName, kernel) in kernelBank:\n # apply the kernel to the grayscale image using both our custom \'convole\' \n # function and OpenCV\'s \'filter2D\' function\n st.write("[INFO] applying {} kernel".format(kernelName))\n convoleOutput = convolve(gray, kernel)\n opencvOutput = cv.filter2D(gray, -1, kernel)\n\n # show the output images\n col1, col2, col3 = st.columns(3)\n with col1:\n st.markdown("original")\n st.image(gray)\n with col2:\n st.write("{} - convole".format(kernelName)) \n st.image(convoleOutput)\n with col3:\n st.write("{} - opencv".format(kernelName))\n st.image(opencvOutput)"""\n , language=language)\n', (28942, 30965), True, 'import streamlit as st\n'), ((31012, 31055), 'streamlit.markdown', 'st.markdown', (['"""Source Code Convole Function"""'], {}), "('Source Code Convole Function')\n", (31023, 31055), True, 'import streamlit as st\n'), ((31072, 32639), 'streamlit.code', 'st.code', (['"""\ndef convolve(image, kernel):\n # grab the spatial dimensions of the image, along with\n # the spatial dimensions of the kernel\n (iH, iW) = image.shape[:2]\n (kH, kW) = kernel.shape[:2]\n\n # allocate memory for the output image, taking care to\n # "pad" the borders of the input image so the spatial\n # size (i.e., width and height) are not reduced\n pad = (kW - 1) // 2\n image = cv2.copyMakeBorder(image, pad, pad, pad, pad,\n cv2.BORDER_REPLICATE)\n output = np.zeros((iH, iW), dtype="float32")\n\n # loop over the input image, "sliding" the kernel across\n # each (x, y)-coordinate from left-to-right and top to\n # bottom\n for y in np.arange(pad, iH + pad):\n for x in np.arange(pad, iW + pad):\n # extract the ROI of the image by extracting the\n # *center* region of the current (x, y)-coordinates\n # dimensions\n roi = image[y - pad:y + pad + 1, x - pad:x + pad + 1]\n\n # perform the actual convolution by taking the\n # element-wise multiplicate between the ROI and\n # the kernel, then summing the matrix\n k = (roi * kernel).sum()\n\n # store the convolved value in the output (x,y)-\n # coordinate of the output image\n output[y - pad, x - pad] = k\n\n # rescale the output image to be in the range [0, 255]\n output = rescale_intensity(output, in_range=(0, 255))\n output = (output * 255).astype("uint8")\n\n # return the output image\n return output"""'], {'language': 'language'}), '(\n """\ndef convolve(image, kernel):\n # grab the spatial dimensions of the image, along with\n # the spatial dimensions of the kernel\n (iH, iW) = image.shape[:2]\n (kH, kW) = kernel.shape[:2]\n\n # allocate memory for the output image, taking care to\n # "pad" the borders of the input image so the spatial\n # size (i.e., width and height) are not reduced\n pad = (kW - 1) // 2\n image = cv2.copyMakeBorder(image, pad, pad, pad, pad,\n cv2.BORDER_REPLICATE)\n output = np.zeros((iH, iW), dtype="float32")\n\n # loop over the input image, "sliding" the kernel across\n # each (x, y)-coordinate from left-to-right and top to\n # bottom\n for y in np.arange(pad, iH + pad):\n for x in np.arange(pad, iW + pad):\n # extract the ROI of the image by extracting the\n # *center* region of the current (x, y)-coordinates\n # dimensions\n roi = image[y - pad:y + pad + 1, x - pad:x + pad + 1]\n\n # perform the actual convolution by taking the\n # element-wise multiplicate between the ROI and\n # the kernel, then summing the matrix\n k = (roi * kernel).sum()\n\n # store the convolved value in the output (x,y)-\n # coordinate of the output image\n output[y - pad, x - pad] = k\n\n # rescale the output image to be in the range [0, 255]\n output = rescale_intensity(output, in_range=(0, 255))\n output = (output * 255).astype("uint8")\n\n # return the output image\n return output"""\n , language=language)\n', (31079, 32639), True, 'import streamlit as st\n'), ((34488, 34516), 'streamlit.markdown', 'st.markdown', (['"""Wide Edge Map"""'], {}), "('Wide Edge Map')\n", (34499, 34516), True, 'import streamlit as st\n'), ((34533, 34547), 'streamlit.image', 'st.image', (['wide'], {}), '(wide)\n', (34541, 34547), True, 'import streamlit as st\n'), ((34587, 34614), 'streamlit.markdown', 'st.markdown', (['"""Mid Edge Map"""'], {}), "('Mid Edge Map')\n", (34598, 34614), True, 'import streamlit as st\n'), ((34631, 34644), 'streamlit.image', 'st.image', (['mid'], {}), '(mid)\n', (34639, 34644), True, 'import streamlit as st\n'), ((34684, 34713), 'streamlit.markdown', 'st.markdown', (['"""Tight Edge Map"""'], {}), "('Tight Edge Map')\n", (34695, 34713), True, 'import streamlit as st\n'), ((34730, 34745), 'streamlit.image', 'st.image', (['tight'], {}), '(tight)\n', (34738, 34745), True, 'import streamlit as st\n'), ((35777, 35843), 'streamlit.slider', 'st.slider', (['"""Select a range of values"""', '(10)', '(200)', '(10, 200)'], {'step': '(10)'}), "('Select a range of values', 10, 200, (10, 200), step=10)\n", (35786, 35843), True, 'import streamlit as st\n'), ((35908, 35947), 'cv2.Canny', 'cv.Canny', (['blurred', 'values[0]', 'values[1]'], {}), '(blurred, values[0], values[1])\n', (35916, 35947), True, 'import cv2 as cv\n'), ((35964, 35992), 'streamlit.markdown', 'st.markdown', (['"""Wide Edge Map"""'], {}), "('Wide Edge Map')\n", (35975, 35992), True, 'import streamlit as st\n'), ((36009, 36023), 'streamlit.image', 'st.image', (['wide'], {}), '(wide)\n', (36017, 36023), True, 'import streamlit as st\n'), ((36072, 36137), 'streamlit.slider', 'st.slider', (['"""Select a range of values"""', '(30)', '(150)', '(30, 150)'], {'step': '(5)'}), "('Select a range of values', 30, 150, (30, 150), step=5)\n", (36081, 36137), True, 'import streamlit as st\n'), ((36201, 36240), 'cv2.Canny', 'cv.Canny', (['blurred', 'values[0]', 'values[1]'], {}), '(blurred, values[0], values[1])\n', (36209, 36240), True, 'import cv2 as cv\n'), ((36257, 36284), 'streamlit.markdown', 'st.markdown', (['"""Mid Edge Map"""'], {}), "('Mid Edge Map')\n", (36268, 36284), True, 'import streamlit as st\n'), ((36301, 36314), 'streamlit.image', 'st.image', (['mid'], {}), '(mid)\n', (36309, 36314), True, 'import streamlit as st\n'), ((36363, 36422), 'streamlit.slider', 'st.slider', (['"""Select a range of values"""', '(200)', '(250)', '(200, 250)'], {}), "('Select a range of values', 200, 250, (200, 250))\n", (36372, 36422), True, 'import streamlit as st\n'), ((36488, 36527), 'cv2.Canny', 'cv.Canny', (['blurred', 'values[0]', 'values[1]'], {}), '(blurred, values[0], values[1])\n', (36496, 36527), True, 'import cv2 as cv\n'), ((36544, 36573), 'streamlit.markdown', 'st.markdown', (['"""Tight Edge Map"""'], {}), "('Tight Edge Map')\n", (36555, 36573), True, 'import streamlit as st\n'), ((36590, 36605), 'streamlit.image', 'st.image', (['tight'], {}), '(tight)\n', (36598, 36605), True, 'import streamlit as st\n'), ((37848, 37876), 'streamlit.markdown', 'st.markdown', (['"""Wide Edge Map"""'], {}), "('Wide Edge Map')\n", (37859, 37876), True, 'import streamlit as st\n'), ((37893, 37907), 'streamlit.image', 'st.image', (['wide'], {}), '(wide)\n', (37901, 37907), True, 'import streamlit as st\n'), ((37947, 37974), 'streamlit.markdown', 'st.markdown', (['"""Mid Edge Map"""'], {}), "('Mid Edge Map')\n", (37958, 37974), True, 'import streamlit as st\n'), ((37991, 38004), 'streamlit.image', 'st.image', (['mid'], {}), '(mid)\n', (37999, 38004), True, 'import streamlit as st\n'), ((38044, 38073), 'streamlit.markdown', 'st.markdown', (['"""Tight Edge Map"""'], {}), "('Tight Edge Map')\n", (38055, 38073), True, 'import streamlit as st\n'), ((38090, 38105), 'streamlit.image', 'st.image', (['tight'], {}), '(tight)\n', (38098, 38105), True, 'import streamlit as st\n'), ((39038, 39104), 'streamlit.slider', 'st.slider', (['"""Select a range of values"""', '(10)', '(200)', '(10, 200)'], {'step': '(10)'}), "('Select a range of values', 10, 200, (10, 200), step=10)\n", (39047, 39104), True, 'import streamlit as st\n'), ((39169, 39208), 'cv2.Canny', 'cv.Canny', (['blurred', 'values[0]', 'values[1]'], {}), '(blurred, values[0], values[1])\n', (39177, 39208), True, 'import cv2 as cv\n'), ((39225, 39253), 'streamlit.markdown', 'st.markdown', (['"""Wide Edge Map"""'], {}), "('Wide Edge Map')\n", (39236, 39253), True, 'import streamlit as st\n'), ((39270, 39284), 'streamlit.image', 'st.image', (['wide'], {}), '(wide)\n', (39278, 39284), True, 'import streamlit as st\n'), ((39333, 39398), 'streamlit.slider', 'st.slider', (['"""Select a range of values"""', '(30)', '(150)', '(30, 150)'], {'step': '(5)'}), "('Select a range of values', 30, 150, (30, 150), step=5)\n", (39342, 39398), True, 'import streamlit as st\n'), ((39462, 39501), 'cv2.Canny', 'cv.Canny', (['blurred', 'values[0]', 'values[1]'], {}), '(blurred, values[0], values[1])\n', (39470, 39501), True, 'import cv2 as cv\n'), ((39518, 39545), 'streamlit.markdown', 'st.markdown', (['"""Mid Edge Map"""'], {}), "('Mid Edge Map')\n", (39529, 39545), True, 'import streamlit as st\n'), ((39562, 39575), 'streamlit.image', 'st.image', (['mid'], {}), '(mid)\n', (39570, 39575), True, 'import streamlit as st\n'), ((39624, 39683), 'streamlit.slider', 'st.slider', (['"""Select a range of values"""', '(200)', '(250)', '(200, 250)'], {}), "('Select a range of values', 200, 250, (200, 250))\n", (39633, 39683), True, 'import streamlit as st\n'), ((39749, 39788), 'cv2.Canny', 'cv.Canny', (['blurred', 'values[0]', 'values[1]'], {}), '(blurred, values[0], values[1])\n', (39757, 39788), True, 'import cv2 as cv\n'), ((39805, 39834), 'streamlit.markdown', 'st.markdown', (['"""Tight Edge Map"""'], {}), "('Tight Edge Map')\n", (39816, 39834), True, 'import streamlit as st\n'), ((39851, 39866), 'streamlit.image', 'st.image', (['tight'], {}), '(tight)\n', (39859, 39866), True, 'import streamlit as st\n'), ((40416, 40478), 'streamlit.expander', 'st.expander', (['"""Show Sobel/Scharr Image Gradient"""'], {'expanded': '(True)'}), "('Show Sobel/Scharr Image Gradient', expanded=True)\n", (40427, 40478), True, 'import streamlit as st\n'), ((40504, 40528), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (40518, 40528), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((40553, 40569), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (40562, 40569), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((40593, 40630), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (40604, 40630), True, 'import cv2 as cv\n'), ((40868, 40914), 'streamlit.selectbox', 'st.selectbox', (['"""Operators"""', "('sobel', 'scharr')"], {}), "('Operators', ('sobel', 'scharr'))\n", (40880, 40914), True, 'import streamlit as st\n'), ((41061, 41099), 'streamlit.success', 'st.success', (['f"""Operator Selected: {op}"""'], {}), "(f'Operator Selected: {op}')\n", (41071, 41099), True, 'import streamlit as st\n'), ((41181, 41238), 'cv2.Sobel', 'cv.Sobel', (['gray'], {'ddepth': 'cv.CV_32F', 'dx': '(1)', 'dy': '(0)', 'ksize': 'ksize'}), '(gray, ddepth=cv.CV_32F, dx=1, dy=0, ksize=ksize)\n', (41189, 41238), True, 'import cv2 as cv\n'), ((41260, 41317), 'cv2.Sobel', 'cv.Sobel', (['gray'], {'ddepth': 'cv.CV_32F', 'dx': '(0)', 'dy': '(1)', 'ksize': 'ksize'}), '(gray, ddepth=cv.CV_32F, dx=0, dy=1, ksize=ksize)\n', (41268, 41317), True, 'import cv2 as cv\n'), ((41620, 41642), 'cv2.convertScaleAbs', 'cv.convertScaleAbs', (['gX'], {}), '(gX)\n', (41638, 41642), True, 'import cv2 as cv\n'), ((41664, 41686), 'cv2.convertScaleAbs', 'cv.convertScaleAbs', (['gY'], {}), '(gY)\n', (41682, 41686), True, 'import cv2 as cv\n'), ((41790, 41825), 'cv2.addWeighted', 'cv.addWeighted', (['gX', '(0.5)', 'gY', '(0.5)', '(0)'], {}), '(gX, 0.5, gY, 0.5, 0)\n', (41804, 41825), True, 'import cv2 as cv\n'), ((41891, 41904), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (41901, 41904), True, 'import streamlit as st\n'), ((42748, 42810), 'streamlit.expander', 'st.expander', (['"""Show Sobel/Scharr Image Gradient"""'], {'expanded': '(True)'}), "('Show Sobel/Scharr Image Gradient', expanded=True)\n", (42759, 42810), True, 'import streamlit as st\n'), ((42836, 42861), 'utils_helpers.load_image', 'load_image', (['default_image'], {}), '(default_image)\n', (42846, 42861), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((42885, 42922), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (42896, 42922), True, 'import cv2 as cv\n'), ((43160, 43206), 'streamlit.selectbox', 'st.selectbox', (['"""Operators"""', "('sobel', 'scharr')"], {}), "('Operators', ('sobel', 'scharr'))\n", (43172, 43206), True, 'import streamlit as st\n'), ((43353, 43391), 'streamlit.success', 'st.success', (['f"""Operator Selected: {op}"""'], {}), "(f'Operator Selected: {op}')\n", (43363, 43391), True, 'import streamlit as st\n'), ((43473, 43530), 'cv2.Sobel', 'cv.Sobel', (['gray'], {'ddepth': 'cv.CV_32F', 'dx': '(1)', 'dy': '(0)', 'ksize': 'ksize'}), '(gray, ddepth=cv.CV_32F, dx=1, dy=0, ksize=ksize)\n', (43481, 43530), True, 'import cv2 as cv\n'), ((43552, 43609), 'cv2.Sobel', 'cv.Sobel', (['gray'], {'ddepth': 'cv.CV_32F', 'dx': '(0)', 'dy': '(1)', 'ksize': 'ksize'}), '(gray, ddepth=cv.CV_32F, dx=0, dy=1, ksize=ksize)\n', (43560, 43609), True, 'import cv2 as cv\n'), ((43912, 43934), 'cv2.convertScaleAbs', 'cv.convertScaleAbs', (['gX'], {}), '(gX)\n', (43930, 43934), True, 'import cv2 as cv\n'), ((43956, 43978), 'cv2.convertScaleAbs', 'cv.convertScaleAbs', (['gY'], {}), '(gY)\n', (43974, 43978), True, 'import cv2 as cv\n'), ((44082, 44117), 'cv2.addWeighted', 'cv.addWeighted', (['gX', '(0.5)', 'gY', '(0.5)', '(0)'], {}), '(gX, 0.5, gY, 0.5, 0)\n', (44096, 44117), True, 'import cv2 as cv\n'), ((44183, 44196), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (44193, 44196), True, 'import streamlit as st\n'), ((46021, 46111), 'streamlit.expander', 'st.expander', (['"""Show Magnitude - Orientation Image Gradients - Streamlit"""'], {'expanded': '(True)'}), "('Show Magnitude - Orientation Image Gradients - Streamlit',\n expanded=True)\n", (46032, 46111), True, 'import streamlit as st\n'), ((46132, 46145), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (46142, 46145), True, 'import streamlit as st\n'), ((47537, 47609), 'streamlit.expander', 'st.expander', (['"""Show Magnitude - Orientation Image Gradients - Mapplotlib"""'], {}), "('Show Magnitude - Orientation Image Gradients - Mapplotlib')\n", (47548, 47609), True, 'import streamlit as st\n'), ((47660, 47678), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (47676, 47678), True, 'from matplotlib import pyplot as plt\n'), ((47695, 47709), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (47704, 47709), True, 'import streamlit as st\n'), ((48611, 48701), 'streamlit.expander', 'st.expander', (['"""Show Magnitude - Orientation Image Gradients - Streamlit"""'], {'expanded': '(True)'}), "('Show Magnitude - Orientation Image Gradients - Streamlit',\n expanded=True)\n", (48622, 48701), True, 'import streamlit as st\n'), ((48722, 48735), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (48732, 48735), True, 'import streamlit as st\n'), ((50120, 50192), 'streamlit.expander', 'st.expander', (['"""Show Magnitude - Orientation Image Gradients - Mapplotlib"""'], {}), "('Show Magnitude - Orientation Image Gradients - Mapplotlib')\n", (50131, 50192), True, 'import streamlit as st\n'), ((50243, 50261), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (50259, 50261), True, 'from matplotlib import pyplot as plt\n'), ((50278, 50292), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (50287, 50292), True, 'import streamlit as st\n'), ((52050, 52084), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (52061, 52084), True, 'import streamlit as st\n'), ((52102, 52125), 'streamlit.markdown', 'st.markdown', (['"""Original"""'], {}), "('Original')\n", (52113, 52125), True, 'import streamlit as st\n'), ((52142, 52157), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (52150, 52157), True, 'import streamlit as st\n'), ((52175, 52228), 'streamlit.expander', 'st.expander', (['"""Show Morphological Hats"""'], {'expanded': '(True)'}), "('Show Morphological Hats', expanded=True)\n", (52186, 52228), True, 'import streamlit as st\n'), ((52253, 52266), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (52263, 52266), True, 'import streamlit as st\n'), ((53491, 53525), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (53502, 53525), True, 'import streamlit as st\n'), ((53543, 53566), 'streamlit.markdown', 'st.markdown', (['"""Original"""'], {}), "('Original')\n", (53554, 53566), True, 'import streamlit as st\n'), ((53583, 53598), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (53591, 53598), True, 'import streamlit as st\n'), ((53616, 53669), 'streamlit.expander', 'st.expander', (['"""Show Morphological Hats"""'], {'expanded': '(True)'}), "('Show Morphological Hats', expanded=True)\n", (53627, 53669), True, 'import streamlit as st\n'), ((53694, 53707), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (53704, 53707), True, 'import streamlit as st\n'), ((54581, 54650), 'streamlit.expander', 'st.expander', (['"""Show Morphological Operations - Erosion"""'], {'expanded': '(True)'}), "('Show Morphological Operations - Erosion', expanded=True)\n", (54592, 54650), True, 'import streamlit as st\n'), ((54675, 54688), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (54685, 54688), True, 'import streamlit as st\n'), ((55193, 55248), 'streamlit.expander', 'st.expander', (['"""Show Morphological Operations - Dilation"""'], {}), "('Show Morphological Operations - Dilation')\n", (55204, 55248), True, 'import streamlit as st\n'), ((55273, 55286), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (55283, 55286), True, 'import streamlit as st\n'), ((55797, 55851), 'streamlit.expander', 'st.expander', (['"""Show Morphological Operations - Opening"""'], {}), "('Show Morphological Operations - Opening')\n", (55808, 55851), True, 'import streamlit as st\n'), ((55959, 55972), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (55969, 55972), True, 'import streamlit as st\n'), ((56825, 56879), 'streamlit.expander', 'st.expander', (['"""Show Morphological Operations - Closing"""'], {}), "('Show Morphological Operations - Closing')\n", (56836, 56879), True, 'import streamlit as st\n'), ((56904, 56917), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (56914, 56917), True, 'import streamlit as st\n'), ((57759, 57814), 'streamlit.expander', 'st.expander', (['"""Show Morphological Operations - Gradient"""'], {}), "('Show Morphological Operations - Gradient')\n", (57770, 57814), True, 'import streamlit as st\n'), ((57839, 57852), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (57849, 57852), True, 'import streamlit as st\n'), ((58701, 58796), 'streamlit.expander', 'st.expander', (['"""Show Interactive Morphological Operations - Erosion, Dilation"""'], {'expanded': '(True)'}), "('Show Interactive Morphological Operations - Erosion, Dilation',\n expanded=True)\n", (58712, 58796), True, 'import streamlit as st\n'), ((58814, 58864), 'streamlit.number_input', 'st.number_input', (['"""Erored-Dilated Iterations"""', '(1)', '(6)'], {}), "('Erored-Dilated Iterations', 1, 6)\n", (58829, 58864), True, 'import streamlit as st\n'), ((58888, 58901), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (58898, 58901), True, 'import streamlit as st\n'), ((59596, 59687), 'streamlit.expander', 'st.expander', (['"""Show Interactive Morphological Operations - Opening, Closing & Gradient"""'], {}), "(\n 'Show Interactive Morphological Operations - Opening, Closing & Gradient')\n", (59607, 59687), True, 'import streamlit as st\n'), ((59705, 59778), 'streamlit.number_input', 'st.number_input', (['"""Opening, Closing & Gradient Kernel Size"""', '(1)', '(11)'], {'step': '(2)'}), "('Opening, Closing & Gradient Kernel Size', 1, 11, step=2)\n", (59720, 59778), True, 'import streamlit as st\n'), ((59958, 59971), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (59968, 59971), True, 'import streamlit as st\n'), ((60096, 60145), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(kX, kY)'], {}), '(cv.MORPH_RECT, (kX, kY))\n', (60120, 60145), True, 'import cv2 as cv\n'), ((60172, 60216), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_OPEN', 'kernel'], {}), '(gray, cv.MORPH_OPEN, kernel)\n', (60187, 60216), True, 'import cv2 as cv\n'), ((60531, 60580), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(kX, kY)'], {}), '(cv.MORPH_RECT, (kX, kY))\n', (60555, 60580), True, 'import cv2 as cv\n'), ((60607, 60652), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_CLOSE', 'kernel'], {}), '(gray, cv.MORPH_CLOSE, kernel)\n', (60622, 60652), True, 'import cv2 as cv\n'), ((60968, 61017), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(kX, kY)'], {}), '(cv.MORPH_RECT, (kX, kY))\n', (60992, 61017), True, 'import cv2 as cv\n'), ((61045, 61093), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_GRADIENT', 'kernel'], {}), '(gray, cv.MORPH_GRADIENT, kernel)\n', (61060, 61093), True, 'import cv2 as cv\n'), ((61612, 61681), 'streamlit.expander', 'st.expander', (['"""Show Morphological Operations - Erosion"""'], {'expanded': '(True)'}), "('Show Morphological Operations - Erosion', expanded=True)\n", (61623, 61681), True, 'import streamlit as st\n'), ((61706, 61719), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (61716, 61719), True, 'import streamlit as st\n'), ((62224, 62279), 'streamlit.expander', 'st.expander', (['"""Show Morphological Operations - Dilation"""'], {}), "('Show Morphological Operations - Dilation')\n", (62235, 62279), True, 'import streamlit as st\n'), ((62304, 62317), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (62314, 62317), True, 'import streamlit as st\n'), ((62828, 62882), 'streamlit.expander', 'st.expander', (['"""Show Morphological Operations - Opening"""'], {}), "('Show Morphological Operations - Opening')\n", (62839, 62882), True, 'import streamlit as st\n'), ((62990, 63003), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (63000, 63003), True, 'import streamlit as st\n'), ((63856, 63910), 'streamlit.expander', 'st.expander', (['"""Show Morphological Operations - Closing"""'], {}), "('Show Morphological Operations - Closing')\n", (63867, 63910), True, 'import streamlit as st\n'), ((63935, 63948), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (63945, 63948), True, 'import streamlit as st\n'), ((64790, 64845), 'streamlit.expander', 'st.expander', (['"""Show Morphological Operations - Gradient"""'], {}), "('Show Morphological Operations - Gradient')\n", (64801, 64845), True, 'import streamlit as st\n'), ((64870, 64883), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (64880, 64883), True, 'import streamlit as st\n'), ((65732, 65827), 'streamlit.expander', 'st.expander', (['"""Show Interactive Morphological Operations - Erosion, Dilation"""'], {'expanded': '(True)'}), "('Show Interactive Morphological Operations - Erosion, Dilation',\n expanded=True)\n", (65743, 65827), True, 'import streamlit as st\n'), ((65845, 65895), 'streamlit.number_input', 'st.number_input', (['"""Erored-Dilated Iterations"""', '(1)', '(6)'], {}), "('Erored-Dilated Iterations', 1, 6)\n", (65860, 65895), True, 'import streamlit as st\n'), ((65919, 65932), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (65929, 65932), True, 'import streamlit as st\n'), ((66627, 66718), 'streamlit.expander', 'st.expander', (['"""Show Interactive Morphological Operations - Opening, Closing & Gradient"""'], {}), "(\n 'Show Interactive Morphological Operations - Opening, Closing & Gradient')\n", (66638, 66718), True, 'import streamlit as st\n'), ((66736, 66809), 'streamlit.number_input', 'st.number_input', (['"""Opening, Closing & Gradient Kernel Size"""', '(1)', '(11)'], {'step': '(2)'}), "('Opening, Closing & Gradient Kernel Size', 1, 11, step=2)\n", (66751, 66809), True, 'import streamlit as st\n'), ((66831, 66924), 'streamlit.number_input', 'st.number_input', (['"""Opening, Closing & Gradient Kernel Size"""', 'kX', '(11)'], {'step': '(2)', 'disabled': '(True)'}), "('Opening, Closing & Gradient Kernel Size', kX, 11, step=2,\n disabled=True)\n", (66846, 66924), True, 'import streamlit as st\n'), ((66984, 66997), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (66994, 66997), True, 'import streamlit as st\n'), ((67122, 67171), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(kX, kY)'], {}), '(cv.MORPH_RECT, (kX, kY))\n', (67146, 67171), True, 'import cv2 as cv\n'), ((67198, 67242), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_OPEN', 'kernel'], {}), '(gray, cv.MORPH_OPEN, kernel)\n', (67213, 67242), True, 'import cv2 as cv\n'), ((67557, 67606), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(kX, kY)'], {}), '(cv.MORPH_RECT, (kX, kY))\n', (67581, 67606), True, 'import cv2 as cv\n'), ((67633, 67678), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_CLOSE', 'kernel'], {}), '(gray, cv.MORPH_CLOSE, kernel)\n', (67648, 67678), True, 'import cv2 as cv\n'), ((67994, 68043), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(kX, kY)'], {}), '(cv.MORPH_RECT, (kX, kY))\n', (68018, 68043), True, 'import cv2 as cv\n'), ((68071, 68119), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_GRADIENT', 'kernel'], {}), '(gray, cv.MORPH_GRADIENT, kernel)\n', (68086, 68119), True, 'import cv2 as cv\n'), ((69346, 69380), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (69357, 69380), True, 'import streamlit as st\n'), ((69398, 69419), 'streamlit.markdown', 'st.markdown', (['original'], {}), '(original)\n', (69409, 69419), True, 'import streamlit as st\n'), ((69436, 69451), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (69444, 69451), True, 'import streamlit as st\n'), ((69661, 69715), 'streamlit.expander', 'st.expander', (['"""Show Simple Thresholding"""'], {'expanded': '(True)'}), "('Show Simple Thresholding', expanded=True)\n", (69672, 69715), True, 'import streamlit as st\n'), ((69740, 69753), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (69750, 69753), True, 'import streamlit as st\n'), ((70084, 70137), 'cv2.threshold', 'cv.threshold', (['blurred', '(200)', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(blurred, 200, 255, cv.THRESH_BINARY_INV)\n', (70096, 70137), True, 'import cv2 as cv\n'), ((70468, 70517), 'cv2.threshold', 'cv.threshold', (['blurred', '(200)', '(255)', 'cv.THRESH_BINARY'], {}), '(blurred, 200, 255, cv.THRESH_BINARY)\n', (70480, 70517), True, 'import cv2 as cv\n'), ((70815, 70859), 'cv2.bitwise_and', 'cv.bitwise_and', (['image', 'image'], {'mask': 'threshInv'}), '(image, image, mask=threshInv)\n', (70829, 70859), True, 'import cv2 as cv\n'), ((71074, 71133), 'streamlit.expander', 'st.expander', (['"""Show Simple Thresholding Auto"""'], {'expanded': '(True)'}), "('Show Simple Thresholding Auto', expanded=True)\n", (71085, 71133), True, 'import streamlit as st\n'), ((71155, 71219), 'streamlit.slider', 'st.slider', (['"""Change Threshold value"""'], {'min_value': '(50)', 'max_value': '(255)'}), "('Change Threshold value', min_value=50, max_value=255)\n", (71164, 71219), True, 'import streamlit as st\n'), ((71247, 71260), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (71257, 71260), True, 'import streamlit as st\n'), ((71591, 71642), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(blurred, x, 255, cv.THRESH_BINARY_INV)\n', (71603, 71642), True, 'import cv2 as cv\n'), ((71973, 72020), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_BINARY'], {}), '(blurred, x, 255, cv.THRESH_BINARY)\n', (71985, 72020), True, 'import cv2 as cv\n'), ((72318, 72362), 'cv2.bitwise_and', 'cv.bitwise_and', (['image', 'image'], {'mask': 'threshInv'}), '(image, image, mask=threshInv)\n', (72332, 72362), True, 'import cv2 as cv\n'), ((72636, 72670), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (72647, 72670), True, 'import streamlit as st\n'), ((72688, 72709), 'streamlit.markdown', 'st.markdown', (['original'], {}), '(original)\n', (72699, 72709), True, 'import streamlit as st\n'), ((72726, 72741), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (72734, 72741), True, 'import streamlit as st\n'), ((72951, 73005), 'streamlit.expander', 'st.expander', (['"""Show Simple Thresholding"""'], {'expanded': '(True)'}), "('Show Simple Thresholding', expanded=True)\n", (72962, 73005), True, 'import streamlit as st\n'), ((73030, 73043), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (73040, 73043), True, 'import streamlit as st\n'), ((73374, 73427), 'cv2.threshold', 'cv.threshold', (['blurred', '(200)', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(blurred, 200, 255, cv.THRESH_BINARY_INV)\n', (73386, 73427), True, 'import cv2 as cv\n'), ((73758, 73807), 'cv2.threshold', 'cv.threshold', (['blurred', '(200)', '(255)', 'cv.THRESH_BINARY'], {}), '(blurred, 200, 255, cv.THRESH_BINARY)\n', (73770, 73807), True, 'import cv2 as cv\n'), ((74105, 74149), 'cv2.bitwise_and', 'cv.bitwise_and', (['image', 'image'], {'mask': 'threshInv'}), '(image, image, mask=threshInv)\n', (74119, 74149), True, 'import cv2 as cv\n'), ((74364, 74423), 'streamlit.expander', 'st.expander', (['"""Show Simple Thresholding Auto"""'], {'expanded': '(True)'}), "('Show Simple Thresholding Auto', expanded=True)\n", (74375, 74423), True, 'import streamlit as st\n'), ((74445, 74509), 'streamlit.slider', 'st.slider', (['"""Change Threshold value"""'], {'min_value': '(50)', 'max_value': '(255)'}), "('Change Threshold value', min_value=50, max_value=255)\n", (74454, 74509), True, 'import streamlit as st\n'), ((74537, 74550), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (74547, 74550), True, 'import streamlit as st\n'), ((74881, 74932), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(blurred, x, 255, cv.THRESH_BINARY_INV)\n', (74893, 74932), True, 'import cv2 as cv\n'), ((75263, 75310), 'cv2.threshold', 'cv.threshold', (['blurred', 'x', '(255)', 'cv.THRESH_BINARY'], {}), '(blurred, x, 255, cv.THRESH_BINARY)\n', (75275, 75310), True, 'import cv2 as cv\n'), ((75608, 75652), 'cv2.bitwise_and', 'cv.bitwise_and', (['image', 'image'], {'mask': 'threshInv'}), '(image, image, mask=threshInv)\n', (75622, 75652), True, 'import cv2 as cv\n'), ((76134, 76168), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (76145, 76168), True, 'import streamlit as st\n'), ((76186, 76206), 'streamlit.markdown', 'st.markdown', (['"""Image"""'], {}), "('Image')\n", (76197, 76206), True, 'import streamlit as st\n'), ((76223, 76238), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (76231, 76238), True, 'import streamlit as st\n'), ((76448, 76502), 'streamlit.expander', 'st.expander', (['"""Show Otsu\'s Thresholding"""'], {'expanded': '(True)'}), '("Show Otsu\'s Thresholding", expanded=True)\n', (76459, 76502), True, 'import streamlit as st\n'), ((76527, 76540), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (76537, 76540), True, 'import streamlit as st\n'), ((76702, 76770), 'cv2.threshold', 'cv.threshold', (['blurred', '(0)', '(255)', '(cv.THRESH_BINARY_INV | cv.THRESH_OTSU)'], {}), '(blurred, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)\n', (76714, 76770), True, 'import cv2 as cv\n'), ((77180, 77224), 'cv2.bitwise_and', 'cv.bitwise_and', (['image', 'image'], {'mask': 'threshInv'}), '(image, image, mask=threshInv)\n', (77194, 77224), True, 'import cv2 as cv\n'), ((77562, 77596), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (77573, 77596), True, 'import streamlit as st\n'), ((77614, 77634), 'streamlit.markdown', 'st.markdown', (['"""Image"""'], {}), "('Image')\n", (77625, 77634), True, 'import streamlit as st\n'), ((77651, 77666), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (77659, 77666), True, 'import streamlit as st\n'), ((77876, 77930), 'streamlit.expander', 'st.expander', (['"""Show Otsu\'s Thresholding"""'], {'expanded': '(True)'}), '("Show Otsu\'s Thresholding", expanded=True)\n', (77887, 77930), True, 'import streamlit as st\n'), ((77955, 77968), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (77965, 77968), True, 'import streamlit as st\n'), ((78130, 78198), 'cv2.threshold', 'cv.threshold', (['blurred', '(0)', '(255)', '(cv.THRESH_BINARY_INV | cv.THRESH_OTSU)'], {}), '(blurred, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)\n', (78142, 78198), True, 'import cv2 as cv\n'), ((78608, 78652), 'cv2.bitwise_and', 'cv.bitwise_and', (['image', 'image'], {'mask': 'threshInv'}), '(image, image, mask=threshInv)\n', (78622, 78652), True, 'import cv2 as cv\n'), ((79809, 79840), 'streamlit.markdown', 'st.markdown', (['"""RGB Color Spaces"""'], {}), "('RGB Color Spaces')\n", (79820, 79840), True, 'import streamlit as st\n'), ((79857, 79872), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (79865, 79872), True, 'import streamlit as st\n'), ((80474, 80505), 'streamlit.markdown', 'st.markdown', (['"""HSV Color Spaces"""'], {}), "('HSV Color Spaces')\n", (80485, 80505), True, 'import streamlit as st\n'), ((80522, 80535), 'streamlit.image', 'st.image', (['hsv'], {}), '(hsv)\n', (80530, 80535), True, 'import streamlit as st\n'), ((81140, 81161), 'streamlit.markdown', 'st.markdown', (['"""L*a*b*"""'], {}), "('L*a*b*')\n", (81151, 81161), True, 'import streamlit as st\n'), ((81178, 81191), 'streamlit.image', 'st.image', (['lab'], {}), '(lab)\n', (81186, 81191), True, 'import streamlit as st\n'), ((81863, 81927), 'utils_helpers.download_button1', 'download_button1', (['image', 'button', 'download', 'mime_type'], {'key': 'f"""2.1"""'}), "(image, button, download, mime_type, key=f'2.1')\n", (81879, 81927), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((82044, 82108), 'utils_helpers.download_button1', 'download_button1', (['image', 'button', 'download', 'mime_type'], {'key': 'f"""2.2"""'}), "(image, button, download, mime_type, key=f'2.2')\n", (82060, 82108), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((82356, 82387), 'streamlit.markdown', 'st.markdown', (['"""RGB Color Spaces"""'], {}), "('RGB Color Spaces')\n", (82367, 82387), True, 'import streamlit as st\n'), ((82404, 82419), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (82412, 82419), True, 'import streamlit as st\n'), ((83021, 83052), 'streamlit.markdown', 'st.markdown', (['"""HSV Color Spaces"""'], {}), "('HSV Color Spaces')\n", (83032, 83052), True, 'import streamlit as st\n'), ((83069, 83082), 'streamlit.image', 'st.image', (['hsv'], {}), '(hsv)\n', (83077, 83082), True, 'import streamlit as st\n'), ((83687, 83708), 'streamlit.markdown', 'st.markdown', (['"""L*a*b*"""'], {}), "('L*a*b*')\n", (83698, 83708), True, 'import streamlit as st\n'), ((83725, 83738), 'streamlit.image', 'st.image', (['lab'], {}), '(lab)\n', (83733, 83738), True, 'import streamlit as st\n'), ((84410, 84474), 'utils_helpers.download_button1', 'download_button1', (['image', 'button', 'download', 'mime_type'], {'key': 'f"""2.1"""'}), "(image, button, download, mime_type, key=f'2.1')\n", (84426, 84474), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((84591, 84655), 'utils_helpers.download_button1', 'download_button1', (['image', 'button', 'download', 'mime_type'], {'key': 'f"""2.2"""'}), "(image, button, download, mime_type, key=f'2.2')\n", (84607, 84655), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((85524, 85558), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (85535, 85558), True, 'import streamlit as st\n'), ((85584, 85608), 'utils_helpers.load_image_PIL', 'load_image_PIL', (['img_file'], {}), '(img_file)\n', (85598, 85608), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((85633, 85649), 'utils_helpers.converted', 'converted', (['image'], {}), '(image)\n', (85642, 85649), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((85666, 85681), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (85674, 85681), True, 'import streamlit as st\n'), ((85815, 85863), 'streamlit.expander', 'st.expander', (['"""Bilateral Blurring"""'], {'expanded': '(True)'}), "('Bilateral Blurring', expanded=True)\n", (85826, 85863), True, 'import streamlit as st\n'), ((85881, 85915), 'streamlit.subheader', 'st.subheader', (['"""Bilateral Blurring"""'], {}), "('Bilateral Blurring')\n", (85893, 85915), True, 'import streamlit as st\n'), ((85939, 85952), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (85949, 85952), True, 'import streamlit as st\n'), ((86737, 86782), 'streamlit.expander', 'st.expander', (['"""Bilateral Blurring Interactive"""'], {}), "('Bilateral Blurring Interactive')\n", (86748, 86782), True, 'import streamlit as st\n'), ((86800, 86846), 'streamlit.subheader', 'st.subheader', (['"""Bilateral Blurring Interactive"""'], {}), "('Bilateral Blurring Interactive')\n", (86812, 86846), True, 'import streamlit as st\n'), ((86870, 86883), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (86880, 86883), True, 'import streamlit as st\n'), ((87275, 87311), 'cv2.bilateralFilter', 'cv.bilateralFilter', (['image', 'd', 'sc', 'ss'], {}), '(image, d, sc, ss)\n', (87293, 87311), True, 'import cv2 as cv\n'), ((87487, 87505), 'streamlit.markdown', 'st.markdown', (['title'], {}), '(title)\n', (87498, 87505), True, 'import streamlit as st\n'), ((87522, 87539), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (87530, 87539), True, 'import streamlit as st\n'), ((87556, 87621), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': '"""1.1"""'}), "(blurred, button, download, mime_type, key='1.1')\n", (87572, 87621), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((87657, 87691), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (87668, 87691), True, 'import streamlit as st\n'), ((87717, 87742), 'utils_helpers.load_image', 'load_image', (['default_image'], {}), '(default_image)\n', (87727, 87742), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((87759, 87774), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (87767, 87774), True, 'import streamlit as st\n'), ((87908, 87956), 'streamlit.expander', 'st.expander', (['"""Bilateral Blurring"""'], {'expanded': '(True)'}), "('Bilateral Blurring', expanded=True)\n", (87919, 87956), True, 'import streamlit as st\n'), ((87974, 88008), 'streamlit.subheader', 'st.subheader', (['"""Bilateral Blurring"""'], {}), "('Bilateral Blurring')\n", (87986, 88008), True, 'import streamlit as st\n'), ((88032, 88045), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (88042, 88045), True, 'import streamlit as st\n'), ((88830, 88875), 'streamlit.expander', 'st.expander', (['"""Bilateral Blurring Interactive"""'], {}), "('Bilateral Blurring Interactive')\n", (88841, 88875), True, 'import streamlit as st\n'), ((88893, 88939), 'streamlit.subheader', 'st.subheader', (['"""Bilateral Blurring Interactive"""'], {}), "('Bilateral Blurring Interactive')\n", (88905, 88939), True, 'import streamlit as st\n'), ((88963, 88976), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (88973, 88976), True, 'import streamlit as st\n'), ((89368, 89404), 'cv2.bilateralFilter', 'cv.bilateralFilter', (['image', 'd', 'sc', 'ss'], {}), '(image, d, sc, ss)\n', (89386, 89404), True, 'import cv2 as cv\n'), ((89580, 89598), 'streamlit.markdown', 'st.markdown', (['title'], {}), '(title)\n', (89591, 89598), True, 'import streamlit as st\n'), ((89615, 89632), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (89623, 89632), True, 'import streamlit as st\n'), ((89649, 89714), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': '"""1.1"""'}), "(blurred, button, download, mime_type, key='1.1')\n", (89665, 89714), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((90244, 90278), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (90255, 90278), True, 'import streamlit as st\n'), ((90296, 90311), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (90304, 90311), True, 'import streamlit as st\n'), ((90382, 90429), 'streamlit.expander', 'st.expander', (['"""Show Average Blur"""'], {'expanded': '(True)'}), "('Show Average Blur', expanded=True)\n", (90393, 90429), True, 'import streamlit as st\n'), ((90454, 90467), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (90464, 90467), True, 'import streamlit as st\n'), ((90997, 91030), 'streamlit.expander', 'st.expander', (['"""Show Gaussian Blur"""'], {}), "('Show Gaussian Blur')\n", (91008, 91030), True, 'import streamlit as st\n'), ((91055, 91068), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (91065, 91068), True, 'import streamlit as st\n'), ((91590, 91621), 'streamlit.expander', 'st.expander', (['"""Show Median Blur"""'], {}), "('Show Median Blur')\n", (91601, 91621), True, 'import streamlit as st\n'), ((91646, 91659), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (91656, 91659), True, 'import streamlit as st\n'), ((92137, 92185), 'streamlit.expander', 'st.expander', (['"""Show Auto Blurring"""'], {'expanded': '(True)'}), "('Show Auto Blurring', expanded=True)\n", (92148, 92185), True, 'import streamlit as st\n'), ((92210, 92223), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (92220, 92223), True, 'import streamlit as st\n'), ((92683, 92707), 'cv2.blur', 'cv.blur', (['image', '(kX, kX)'], {}), '(image, (kX, kX))\n', (92690, 92707), True, 'import cv2 as cv\n'), ((93017, 93052), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['image', '(kX, kY)', '(0)'], {}), '(image, (kX, kY), 0)\n', (93032, 93052), True, 'import cv2 as cv\n'), ((93361, 93384), 'cv2.medianBlur', 'cv.medianBlur', (['image', 'k'], {}), '(image, k)\n', (93374, 93384), True, 'import cv2 as cv\n'), ((93881, 93915), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (93892, 93915), True, 'import streamlit as st\n'), ((93933, 93948), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (93941, 93948), True, 'import streamlit as st\n'), ((94019, 94066), 'streamlit.expander', 'st.expander', (['"""Show Average Blur"""'], {'expanded': '(True)'}), "('Show Average Blur', expanded=True)\n", (94030, 94066), True, 'import streamlit as st\n'), ((94091, 94104), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (94101, 94104), True, 'import streamlit as st\n'), ((94634, 94667), 'streamlit.expander', 'st.expander', (['"""Show Gaussian Blur"""'], {}), "('Show Gaussian Blur')\n", (94645, 94667), True, 'import streamlit as st\n'), ((94692, 94705), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (94702, 94705), True, 'import streamlit as st\n'), ((95227, 95258), 'streamlit.expander', 'st.expander', (['"""Show Median Blur"""'], {}), "('Show Median Blur')\n", (95238, 95258), True, 'import streamlit as st\n'), ((95283, 95296), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (95293, 95296), True, 'import streamlit as st\n'), ((95774, 95822), 'streamlit.expander', 'st.expander', (['"""Show Auto Blurring"""'], {'expanded': '(True)'}), "('Show Auto Blurring', expanded=True)\n", (95785, 95822), True, 'import streamlit as st\n'), ((95847, 95860), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (95857, 95860), True, 'import streamlit as st\n'), ((96320, 96344), 'cv2.blur', 'cv.blur', (['image', '(kX, kX)'], {}), '(image, (kX, kX))\n', (96327, 96344), True, 'import cv2 as cv\n'), ((96654, 96689), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['image', '(kX, kY)', '(0)'], {}), '(image, (kX, kY), 0)\n', (96669, 96689), True, 'import cv2 as cv\n'), ((96998, 97021), 'cv2.medianBlur', 'cv.medianBlur', (['image', 'k'], {}), '(image, k)\n', (97011, 97021), True, 'import cv2 as cv\n'), ((7394, 7425), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""App Ratings"""'], {}), "('App Ratings')\n", (7410, 7425), True, 'import streamlit as st\n'), ((9378, 9445), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""1.1"""'}), "(threshInv, button, download, mime_type, key='1.1')\n", (9394, 9445), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((9784, 9851), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""1.2"""'}), "(threshInv, button, download, mime_type, key='1.2')\n", (9800, 9851), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((10360, 10424), 'utils_helpers.download_button1', 'download_button1', (['thresh', 'button', 'download', 'mime_type'], {'key': '"""1.3"""'}), "(thresh, button, download, mime_type, key='1.3')\n", (10376, 10424), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((10906, 10970), 'utils_helpers.download_button1', 'download_button1', (['thresh', 'button', 'download', 'mime_type'], {'key': '"""1.4"""'}), "(thresh, button, download, mime_type, key='1.4')\n", (10922, 10970), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((13273, 13340), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""1.1"""'}), "(threshInv, button, download, mime_type, key='1.1')\n", (13289, 13340), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((13679, 13746), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""1.2"""'}), "(threshInv, button, download, mime_type, key='1.2')\n", (13695, 13746), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((14255, 14319), 'utils_helpers.download_button1', 'download_button1', (['thresh', 'button', 'download', 'mime_type'], {'key': '"""1.3"""'}), "(thresh, button, download, mime_type, key='1.3')\n", (14271, 14319), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((14801, 14865), 'utils_helpers.download_button1', 'download_button1', (['thresh', 'button', 'download', 'mime_type'], {'key': '"""1.4"""'}), "(thresh, button, download, mime_type, key='1.4')\n", (14817, 14865), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((17417, 17484), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""1.1"""'}), "(threshInv, button, download, mime_type, key='1.1')\n", (17433, 17484), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((18012, 18079), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""1.2"""'}), "(threshInv, button, download, mime_type, key='1.2')\n", (18028, 18079), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((18737, 18801), 'utils_helpers.download_button1', 'download_button1', (['thresh', 'button', 'download', 'mime_type'], {'key': '"""1.3"""'}), "(thresh, button, download, mime_type, key='1.3')\n", (18753, 18801), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((19485, 19549), 'utils_helpers.download_button1', 'download_button1', (['thresh', 'button', 'download', 'mime_type'], {'key': '"""1.4"""'}), "(thresh, button, download, mime_type, key='1.4')\n", (19501, 19549), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((20430, 20497), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""1.1"""'}), "(threshInv, button, download, mime_type, key='1.1')\n", (20446, 20497), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((21040, 21107), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""1.2"""'}), "(threshInv, button, download, mime_type, key='1.2')\n", (21056, 21107), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((21765, 21829), 'utils_helpers.download_button1', 'download_button1', (['thresh', 'button', 'download', 'mime_type'], {'key': '"""1.3"""'}), "(thresh, button, download, mime_type, key='1.3')\n", (21781, 21829), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((22513, 22577), 'utils_helpers.download_button1', 'download_button1', (['thresh', 'button', 'download', 'mime_type'], {'key': '"""1.4"""'}), "(thresh, button, download, mime_type, key='1.4')\n", (22529, 22577), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((24378, 24444), 'utils_helpers.download_button1', 'download_button1', (['image', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.1"""'}), "(image, button, download, mime_type, key=f'{i}.1')\n", (24394, 24444), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((25458, 25524), 'utils_helpers.download_button1', 'download_button1', (['image', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.1"""'}), "(image, button, download, mime_type, key=f'{i}.1')\n", (25474, 25524), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((28393, 28416), 'streamlit.markdown', 'st.markdown', (['"""original"""'], {}), "('original')\n", (28404, 28416), True, 'import streamlit as st\n'), ((28437, 28451), 'streamlit.image', 'st.image', (['gray'], {}), '(gray)\n', (28445, 28451), True, 'import streamlit as st\n'), ((28564, 28587), 'streamlit.image', 'st.image', (['convoleOutput'], {}), '(convoleOutput)\n', (28572, 28587), True, 'import streamlit as st\n'), ((28698, 28720), 'streamlit.image', 'st.image', (['opencvOutput'], {}), '(opencvOutput)\n', (28706, 28720), True, 'import streamlit as st\n'), ((33745, 33779), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (33756, 33779), True, 'import streamlit as st\n'), ((33860, 33883), 'streamlit.markdown', 'st.markdown', (['"""Original"""'], {}), "('Original')\n", (33871, 33883), True, 'import streamlit as st\n'), ((33904, 33919), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (33912, 33919), True, 'import streamlit as st\n'), ((33964, 33997), 'streamlit.expander', 'st.expander', (['"""Show Blurred Image"""'], {}), "('Show Blurred Image')\n", (33975, 33997), True, 'import streamlit as st\n'), ((34019, 34041), 'streamlit.markdown', 'st.markdown', (['"""Blurred"""'], {}), "('Blurred')\n", (34030, 34041), True, 'import streamlit as st\n'), ((34062, 34079), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (34070, 34079), True, 'import streamlit as st\n'), ((35177, 35211), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (35188, 35211), True, 'import streamlit as st\n'), ((35292, 35315), 'streamlit.markdown', 'st.markdown', (['"""Original"""'], {}), "('Original')\n", (35303, 35315), True, 'import streamlit as st\n'), ((35336, 35351), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (35344, 35351), True, 'import streamlit as st\n'), ((35396, 35429), 'streamlit.expander', 'st.expander', (['"""Show Blurred Image"""'], {}), "('Show Blurred Image')\n", (35407, 35429), True, 'import streamlit as st\n'), ((35451, 35473), 'streamlit.markdown', 'st.markdown', (['"""Blurred"""'], {}), "('Blurred')\n", (35462, 35473), True, 'import streamlit as st\n'), ((35494, 35511), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (35502, 35511), True, 'import streamlit as st\n'), ((37105, 37139), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (37116, 37139), True, 'import streamlit as st\n'), ((37220, 37243), 'streamlit.markdown', 'st.markdown', (['"""Original"""'], {}), "('Original')\n", (37231, 37243), True, 'import streamlit as st\n'), ((37264, 37279), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (37272, 37279), True, 'import streamlit as st\n'), ((37324, 37357), 'streamlit.expander', 'st.expander', (['"""Show Blurred Image"""'], {}), "('Show Blurred Image')\n", (37335, 37357), True, 'import streamlit as st\n'), ((37379, 37401), 'streamlit.markdown', 'st.markdown', (['"""Blurred"""'], {}), "('Blurred')\n", (37390, 37401), True, 'import streamlit as st\n'), ((37422, 37439), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (37430, 37439), True, 'import streamlit as st\n'), ((38438, 38472), 'streamlit.expander', 'st.expander', (['"""Show Original Image"""'], {}), "('Show Original Image')\n", (38449, 38472), True, 'import streamlit as st\n'), ((38553, 38576), 'streamlit.markdown', 'st.markdown', (['"""Original"""'], {}), "('Original')\n", (38564, 38576), True, 'import streamlit as st\n'), ((38597, 38612), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (38605, 38612), True, 'import streamlit as st\n'), ((38657, 38690), 'streamlit.expander', 'st.expander', (['"""Show Blurred Image"""'], {}), "('Show Blurred Image')\n", (38668, 38690), True, 'import streamlit as st\n'), ((38712, 38734), 'streamlit.markdown', 'st.markdown', (['"""Blurred"""'], {}), "('Blurred')\n", (38723, 38734), True, 'import streamlit as st\n'), ((38755, 38772), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (38763, 38772), True, 'import streamlit as st\n'), ((42118, 42178), 'utils_helpers.download_button1', 'download_button1', (['gX', 'button', 'download', 'mime_type'], {'key': '"""1.1"""'}), "(gX, button, download, mime_type, key='1.1')\n", (42134, 42178), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((42314, 42374), 'utils_helpers.download_button1', 'download_button1', (['gY', 'button', 'download', 'mime_type'], {'key': '"""1.2"""'}), "(gY, button, download, mime_type, key='1.2')\n", (42330, 42374), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((42523, 42589), 'utils_helpers.download_button1', 'download_button1', (['combined', 'button', 'download', 'mime_type'], {'key': '"""1.3"""'}), "(combined, button, download, mime_type, key='1.3')\n", (42539, 42589), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((44410, 44470), 'utils_helpers.download_button1', 'download_button1', (['gX', 'button', 'download', 'mime_type'], {'key': '"""2.1"""'}), "(gX, button, download, mime_type, key='2.1')\n", (44426, 44470), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((44606, 44666), 'utils_helpers.download_button1', 'download_button1', (['gY', 'button', 'download', 'mime_type'], {'key': '"""2.2"""'}), "(gY, button, download, mime_type, key='2.2')\n", (44622, 44666), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((44815, 44881), 'utils_helpers.download_button1', 'download_button1', (['combined', 'button', 'download', 'mime_type'], {'key': '"""2.3"""'}), "(combined, button, download, mime_type, key='2.3')\n", (44831, 44881), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((45630, 45648), 'numpy.arctan2', 'np.arctan2', (['gY', 'gX'], {}), '(gY, gX)\n', (45640, 45648), True, 'import numpy as np\n'), ((46385, 46447), 'utils_helpers.download_button1', 'download_button1', (['imC1', 'button', 'download', 'mime_type'], {'key': '"""3.1"""'}), "(imC1, button, download, mime_type, key='3.1')\n", (46401, 46447), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((46616, 46678), 'utils_helpers.download_button1', 'download_button1', (['imC2', 'button', 'download', 'mime_type'], {'key': '"""3.2"""'}), "(imC2, button, download, mime_type, key='3.2')\n", (46632, 46678), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((48218, 48236), 'numpy.arctan2', 'np.arctan2', (['gY', 'gX'], {}), '(gY, gX)\n', (48228, 48236), True, 'import numpy as np\n'), ((48972, 49034), 'utils_helpers.download_button1', 'download_button1', (['imC1', 'button', 'download', 'mime_type'], {'key': '"""5.1"""'}), "(imC1, button, download, mime_type, key='5.1')\n", (48988, 49034), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((49199, 49261), 'utils_helpers.download_button1', 'download_button1', (['imC2', 'button', 'download', 'mime_type'], {'key': '"""5.2"""'}), "(imC2, button, download, mime_type, key='5.2')\n", (49215, 49261), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((52402, 52468), 'utils_helpers.download_button1', 'download_button1', (['blackhat', 'button', 'download', 'mime_type'], {'key': '"""1.1"""'}), "(blackhat, button, download, mime_type, key='1.1')\n", (52418, 52468), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((52600, 52664), 'utils_helpers.download_button1', 'download_button1', (['tophat', 'button', 'download', 'mime_type'], {'key': '"""1.2"""'}), "(tophat, button, download, mime_type, key='1.2')\n", (52616, 52664), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((53843, 53909), 'utils_helpers.download_button1', 'download_button1', (['blackhat', 'button', 'download', 'mime_type'], {'key': '"""1.1"""'}), "(blackhat, button, download, mime_type, key='1.1')\n", (53859, 53909), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((54041, 54105), 'utils_helpers.download_button1', 'download_button1', (['tophat', 'button', 'download', 'mime_type'], {'key': '"""1.2"""'}), "(tophat, button, download, mime_type, key='1.2')\n", (54057, 54105), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((56382, 56433), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', 'kernelSize'], {}), '(cv.MORPH_RECT, kernelSize)\n', (56406, 56433), True, 'import cv2 as cv\n'), ((56464, 56508), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_OPEN', 'kernel'], {}), '(gray, cv.MORPH_OPEN, kernel)\n', (56479, 56508), True, 'import cv2 as cv\n'), ((57295, 57346), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', 'kernelSize'], {}), '(cv.MORPH_RECT, kernelSize)\n', (57319, 57346), True, 'import cv2 as cv\n'), ((57377, 57422), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_CLOSE', 'kernel'], {}), '(gray, cv.MORPH_CLOSE, kernel)\n', (57392, 57422), True, 'import cv2 as cv\n'), ((58230, 58281), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', 'kernelSize'], {}), '(cv.MORPH_RECT, kernelSize)\n', (58254, 58281), True, 'import cv2 as cv\n'), ((58313, 58361), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_GRADIENT', 'kernel'], {}), '(gray, cv.MORPH_GRADIENT, kernel)\n', (58328, 58361), True, 'import cv2 as cv\n'), ((59201, 59265), 'utils_helpers.download_button1', 'download_button1', (['eroded', 'button', 'download', 'mime_type'], {'key': '"""4.1"""'}), "(eroded, button, download, mime_type, key='4.1')\n", (59217, 59265), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((59496, 59561), 'utils_helpers.download_button1', 'download_button1', (['dilated', 'button', 'download', 'mime_type'], {'key': '"""4.2"""'}), "(dilated, button, download, mime_type, key='4.2')\n", (59512, 59561), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((60424, 60488), 'utils_helpers.download_button1', 'download_button1', (['eroded', 'button', 'download', 'mime_type'], {'key': '"""5.1"""'}), "(eroded, button, download, mime_type, key='5.1')\n", (60440, 60488), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((60860, 60925), 'utils_helpers.download_button1', 'download_button1', (['closing', 'button', 'download', 'mime_type'], {'key': '"""5.2"""'}), "(closing, button, download, mime_type, key='5.2')\n", (60876, 60925), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((61303, 61369), 'utils_helpers.download_button1', 'download_button1', (['gradient', 'button', 'download', 'mime_type'], {'key': '"""5.3"""'}), "(gradient, button, download, mime_type, key='5.3')\n", (61319, 61369), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((63413, 63464), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', 'kernelSize'], {}), '(cv.MORPH_RECT, kernelSize)\n', (63437, 63464), True, 'import cv2 as cv\n'), ((63495, 63539), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_OPEN', 'kernel'], {}), '(gray, cv.MORPH_OPEN, kernel)\n', (63510, 63539), True, 'import cv2 as cv\n'), ((64326, 64377), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', 'kernelSize'], {}), '(cv.MORPH_RECT, kernelSize)\n', (64350, 64377), True, 'import cv2 as cv\n'), ((64408, 64453), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_CLOSE', 'kernel'], {}), '(gray, cv.MORPH_CLOSE, kernel)\n', (64423, 64453), True, 'import cv2 as cv\n'), ((65261, 65312), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', 'kernelSize'], {}), '(cv.MORPH_RECT, kernelSize)\n', (65285, 65312), True, 'import cv2 as cv\n'), ((65344, 65392), 'cv2.morphologyEx', 'cv.morphologyEx', (['gray', 'cv.MORPH_GRADIENT', 'kernel'], {}), '(gray, cv.MORPH_GRADIENT, kernel)\n', (65359, 65392), True, 'import cv2 as cv\n'), ((66232, 66296), 'utils_helpers.download_button1', 'download_button1', (['eroded', 'button', 'download', 'mime_type'], {'key': '"""6.1"""'}), "(eroded, button, download, mime_type, key='6.1')\n", (66248, 66296), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((66527, 66592), 'utils_helpers.download_button1', 'download_button1', (['dilated', 'button', 'download', 'mime_type'], {'key': '"""6.2"""'}), "(dilated, button, download, mime_type, key='6.2')\n", (66543, 66592), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((67450, 67514), 'utils_helpers.download_button1', 'download_button1', (['eroded', 'button', 'download', 'mime_type'], {'key': '"""7.1"""'}), "(eroded, button, download, mime_type, key='7.1')\n", (67466, 67514), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((67886, 67951), 'utils_helpers.download_button1', 'download_button1', (['closing', 'button', 'download', 'mime_type'], {'key': '"""7.2"""'}), "(closing, button, download, mime_type, key='7.2')\n", (67902, 67951), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((68329, 68395), 'utils_helpers.download_button1', 'download_button1', (['gradient', 'button', 'download', 'mime_type'], {'key': '"""7.3"""'}), "(gradient, button, download, mime_type, key='7.3')\n", (68345, 68395), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((70290, 70357), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""1.1"""'}), "(threshInv, button, download, mime_type, key='1.1')\n", (70306, 70357), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((70659, 70723), 'utils_helpers.download_button1', 'download_button1', (['thresh', 'button', 'download', 'mime_type'], {'key': '"""1.2"""'}), "(thresh, button, download, mime_type, key='1.2')\n", (70675, 70723), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((70991, 71055), 'utils_helpers.download_button1', 'download_button1', (['masked', 'button', 'download', 'mime_type'], {'key': '"""1.3"""'}), "(masked, button, download, mime_type, key='1.3')\n", (71007, 71055), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((71795, 71862), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""2.1"""'}), "(threshInv, button, download, mime_type, key='2.1')\n", (71811, 71862), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((72162, 72226), 'utils_helpers.download_button1', 'download_button1', (['thresh', 'button', 'download', 'mime_type'], {'key': '"""2.2"""'}), "(thresh, button, download, mime_type, key='2.2')\n", (72178, 72226), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((72494, 72558), 'utils_helpers.download_button1', 'download_button1', (['masked', 'button', 'download', 'mime_type'], {'key': '"""2.3"""'}), "(masked, button, download, mime_type, key='2.3')\n", (72510, 72558), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((73580, 73647), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""1.1"""'}), "(threshInv, button, download, mime_type, key='1.1')\n", (73596, 73647), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((73949, 74013), 'utils_helpers.download_button1', 'download_button1', (['thresh', 'button', 'download', 'mime_type'], {'key': '"""1.2"""'}), "(thresh, button, download, mime_type, key='1.2')\n", (73965, 74013), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((74281, 74345), 'utils_helpers.download_button1', 'download_button1', (['masked', 'button', 'download', 'mime_type'], {'key': '"""1.3"""'}), "(masked, button, download, mime_type, key='1.3')\n", (74297, 74345), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((75085, 75152), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""2.1"""'}), "(threshInv, button, download, mime_type, key='2.1')\n", (75101, 75152), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((75452, 75516), 'utils_helpers.download_button1', 'download_button1', (['thresh', 'button', 'download', 'mime_type'], {'key': '"""2.2"""'}), "(thresh, button, download, mime_type, key='2.2')\n", (75468, 75516), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((75784, 75848), 'utils_helpers.download_button1', 'download_button1', (['masked', 'button', 'download', 'mime_type'], {'key': '"""2.3"""'}), "(masked, button, download, mime_type, key='2.3')\n", (75800, 75848), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((77005, 77072), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""1.1"""'}), "(threshInv, button, download, mime_type, key='1.1')\n", (77021, 77072), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((77356, 77423), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""1.2"""'}), "(threshInv, button, download, mime_type, key='1.2')\n", (77372, 77423), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((78433, 78500), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""2.1"""'}), "(threshInv, button, download, mime_type, key='2.1')\n", (78449, 78500), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((78784, 78851), 'utils_helpers.download_button1', 'download_button1', (['threshInv', 'button', 'download', 'mime_type'], {'key': '"""2.2"""'}), "(threshInv, button, download, mime_type, key='2.2')\n", (78800, 78851), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((80013, 80028), 'cv2.split', 'cv.split', (['image'], {}), '(image)\n', (80021, 80028), True, 'import cv2 as cv\n'), ((80084, 80101), 'streamlit.markdown', 'st.markdown', (['name'], {}), '(name)\n', (80095, 80101), True, 'import streamlit as st\n'), ((80122, 80136), 'streamlit.image', 'st.image', (['chan'], {}), '(chan)\n', (80130, 80136), True, 'import streamlit as st\n'), ((80157, 80224), 'utils_helpers.download_button1', 'download_button1', (['chan', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(chan, button, download, mime_type, key=f'{i}.{i}')\n", (80173, 80224), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((80675, 80688), 'cv2.split', 'cv.split', (['hsv'], {}), '(hsv)\n', (80683, 80688), True, 'import cv2 as cv\n'), ((80744, 80761), 'streamlit.markdown', 'st.markdown', (['name'], {}), '(name)\n', (80755, 80761), True, 'import streamlit as st\n'), ((80782, 80796), 'streamlit.image', 'st.image', (['chan'], {}), '(chan)\n', (80790, 80796), True, 'import streamlit as st\n'), ((80817, 80884), 'utils_helpers.download_button1', 'download_button1', (['chan', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(chan, button, download, mime_type, key=f'{i}.{i}')\n", (80833, 80884), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((81334, 81347), 'cv2.split', 'cv.split', (['lab'], {}), '(lab)\n', (81342, 81347), True, 'import cv2 as cv\n'), ((81403, 81420), 'streamlit.markdown', 'st.markdown', (['name'], {}), '(name)\n', (81414, 81420), True, 'import streamlit as st\n'), ((81441, 81455), 'streamlit.image', 'st.image', (['chan'], {}), '(chan)\n', (81449, 81455), True, 'import streamlit as st\n'), ((81476, 81543), 'utils_helpers.download_button1', 'download_button1', (['chan', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(chan, button, download, mime_type, key=f'{i}.{i}')\n", (81492, 81543), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((82560, 82575), 'cv2.split', 'cv.split', (['image'], {}), '(image)\n', (82568, 82575), True, 'import cv2 as cv\n'), ((82631, 82648), 'streamlit.markdown', 'st.markdown', (['name'], {}), '(name)\n', (82642, 82648), True, 'import streamlit as st\n'), ((82669, 82683), 'streamlit.image', 'st.image', (['chan'], {}), '(chan)\n', (82677, 82683), True, 'import streamlit as st\n'), ((82704, 82771), 'utils_helpers.download_button1', 'download_button1', (['chan', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(chan, button, download, mime_type, key=f'{i}.{i}')\n", (82720, 82771), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((83222, 83235), 'cv2.split', 'cv.split', (['hsv'], {}), '(hsv)\n', (83230, 83235), True, 'import cv2 as cv\n'), ((83291, 83308), 'streamlit.markdown', 'st.markdown', (['name'], {}), '(name)\n', (83302, 83308), True, 'import streamlit as st\n'), ((83329, 83343), 'streamlit.image', 'st.image', (['chan'], {}), '(chan)\n', (83337, 83343), True, 'import streamlit as st\n'), ((83364, 83431), 'utils_helpers.download_button1', 'download_button1', (['chan', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(chan, button, download, mime_type, key=f'{i}.{i}')\n", (83380, 83431), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((83881, 83894), 'cv2.split', 'cv.split', (['lab'], {}), '(lab)\n', (83889, 83894), True, 'import cv2 as cv\n'), ((83950, 83967), 'streamlit.markdown', 'st.markdown', (['name'], {}), '(name)\n', (83961, 83967), True, 'import streamlit as st\n'), ((83988, 84002), 'streamlit.image', 'st.image', (['chan'], {}), '(chan)\n', (83996, 84002), True, 'import streamlit as st\n'), ((84023, 84090), 'utils_helpers.download_button1', 'download_button1', (['chan', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(chan, button, download, mime_type, key=f'{i}.{i}')\n", (84039, 84090), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((86231, 86290), 'cv2.bilateralFilter', 'cv.bilateralFilter', (['image', 'diameter', 'sigmaColor', 'sigmaSpace'], {}), '(image, diameter, sigmaColor, sigmaSpace)\n', (86249, 86290), True, 'import cv2 as cv\n'), ((88324, 88383), 'cv2.bilateralFilter', 'cv.bilateralFilter', (['image', 'diameter', 'sigmaColor', 'sigmaSpace'], {}), '(image, diameter, sigmaColor, sigmaSpace)\n', (88342, 88383), True, 'import cv2 as cv\n'), ((92870, 92935), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': '"""3.1"""'}), "(blurred, button, download, mime_type, key='3.1')\n", (92886, 92935), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((93216, 93281), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': '"""3.2"""'}), "(blurred, button, download, mime_type, key='3.2')\n", (93232, 93281), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((93535, 93600), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': '"""3.3"""'}), "(blurred, button, download, mime_type, key='3.3')\n", (93551, 93600), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((96507, 96572), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': '"""4.1"""'}), "(blurred, button, download, mime_type, key='4.1')\n", (96523, 96572), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((96853, 96918), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': '"""4.2"""'}), "(blurred, button, download, mime_type, key='4.2')\n", (96869, 96918), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((97172, 97237), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': '"""4.3"""'}), "(blurred, button, download, mime_type, key='4.3')\n", (97188, 97237), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((7626, 7657), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""App Ratings"""'], {}), "('App Ratings')\n", (7642, 7657), True, 'import streamlit as st\n'), ((12041, 12112), 'utils_helpers.download_button1', 'download_button1', (['images[i]', 'button', 'download', 'mime_type'], {'key': '"""{i}.1.1"""'}), "(images[i], button, download, mime_type, key='{i}.1.1')\n", (12057, 12112), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((12405, 12476), 'utils_helpers.download_button1', 'download_button1', (['images[i]', 'button', 'download', 'mime_type'], {'key': '"""{i}.2.2"""'}), "(images[i], button, download, mime_type, key='{i}.2.2')\n", (12421, 12476), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((15936, 16007), 'utils_helpers.download_button1', 'download_button1', (['images[i]', 'button', 'download', 'mime_type'], {'key': '"""{i}.1.1"""'}), "(images[i], button, download, mime_type, key='{i}.1.1')\n", (15952, 16007), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((16300, 16371), 'utils_helpers.download_button1', 'download_button1', (['images[i]', 'button', 'download', 'mime_type'], {'key': '"""{i}.2.2"""'}), "(images[i], button, download, mime_type, key='{i}.2.2')\n", (16316, 16371), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((55105, 55174), 'utils_helpers.download_button1', 'download_button1', (['eroded', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(eroded, button, download, mime_type, key=f'{i}.{i}')\n", (55121, 55174), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((55708, 55778), 'utils_helpers.download_button1', 'download_button1', (['dilated', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(dilated, button, download, mime_type, key=f'{i}.{i}')\n", (55724, 55778), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((56736, 56806), 'utils_helpers.download_button1', 'download_button1', (['opening', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(opening, button, download, mime_type, key=f'{i}.{i}')\n", (56752, 56806), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((57650, 57720), 'utils_helpers.download_button1', 'download_button1', (['closing', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(closing, button, download, mime_type, key=f'{i}.{i}')\n", (57666, 57720), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((58591, 58662), 'utils_helpers.download_button1', 'download_button1', (['gradient', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(gradient, button, download, mime_type, key=f'{i}.{i}')\n", (58607, 58662), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((62136, 62205), 'utils_helpers.download_button1', 'download_button1', (['eroded', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(eroded, button, download, mime_type, key=f'{i}.{i}')\n", (62152, 62205), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((62739, 62809), 'utils_helpers.download_button1', 'download_button1', (['dilated', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(dilated, button, download, mime_type, key=f'{i}.{i}')\n", (62755, 62809), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((63767, 63837), 'utils_helpers.download_button1', 'download_button1', (['opening', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(opening, button, download, mime_type, key=f'{i}.{i}')\n", (63783, 63837), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((64681, 64751), 'utils_helpers.download_button1', 'download_button1', (['closing', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(closing, button, download, mime_type, key=f'{i}.{i}')\n", (64697, 64751), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((65622, 65693), 'utils_helpers.download_button1', 'download_button1', (['gradient', 'button', 'download', 'mime_type'], {'key': 'f"""{i}.{i}"""'}), "(gradient, button, download, mime_type, key=f'{i}.{i}')\n", (65638, 65693), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((86543, 86561), 'streamlit.markdown', 'st.markdown', (['title'], {}), '(title)\n', (86554, 86561), True, 'import streamlit as st\n'), ((86586, 86603), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (86594, 86603), True, 'import streamlit as st\n'), ((86628, 86694), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': 'f"""{i}"""'}), "(blurred, button, download, mime_type, key=f'{i}')\n", (86644, 86694), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((88636, 88654), 'streamlit.markdown', 'st.markdown', (['title'], {}), '(title)\n', (88647, 88654), True, 'import streamlit as st\n'), ((88679, 88696), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (88687, 88696), True, 'import streamlit as st\n'), ((88721, 88787), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': 'f"""{i}"""'}), "(blurred, button, download, mime_type, key=f'{i}')\n", (88737, 88787), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((90729, 90753), 'cv2.blur', 'cv.blur', (['image', '(kX, kY)'], {}), '(image, (kX, kY))\n', (90736, 90753), True, 'import cv2 as cv\n'), ((90854, 90871), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (90862, 90871), True, 'import streamlit as st\n'), ((90896, 90962), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': 'f"""{i}"""'}), "(blurred, button, download, mime_type, key=f'{i}')\n", (90912, 90962), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((91310, 91345), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['image', '(kX, kY)', '(0)'], {}), '(image, (kX, kY), 0)\n', (91325, 91345), True, 'import cv2 as cv\n'), ((91447, 91464), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (91455, 91464), True, 'import streamlit as st\n'), ((91489, 91555), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': 'f"""{i}"""'}), "(blurred, button, download, mime_type, key=f'{i}')\n", (91505, 91555), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((91898, 91921), 'cv2.medianBlur', 'cv.medianBlur', (['image', 'k'], {}), '(image, k)\n', (91911, 91921), True, 'import cv2 as cv\n'), ((92010, 92027), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (92018, 92027), True, 'import streamlit as st\n'), ((92052, 92118), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': 'f"""{i}"""'}), "(blurred, button, download, mime_type, key=f'{i}')\n", (92068, 92118), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((94366, 94390), 'cv2.blur', 'cv.blur', (['image', '(kX, kY)'], {}), '(image, (kX, kY))\n', (94373, 94390), True, 'import cv2 as cv\n'), ((94491, 94508), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (94499, 94508), True, 'import streamlit as st\n'), ((94533, 94599), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': 'f"""{i}"""'}), "(blurred, button, download, mime_type, key=f'{i}')\n", (94549, 94599), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((94947, 94982), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['image', '(kX, kY)', '(0)'], {}), '(image, (kX, kY), 0)\n', (94962, 94982), True, 'import cv2 as cv\n'), ((95084, 95101), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (95092, 95101), True, 'import streamlit as st\n'), ((95126, 95192), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': 'f"""{i}"""'}), "(blurred, button, download, mime_type, key=f'{i}')\n", (95142, 95192), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n'), ((95535, 95558), 'cv2.medianBlur', 'cv.medianBlur', (['image', 'k'], {}), '(image, k)\n', (95548, 95558), True, 'import cv2 as cv\n'), ((95647, 95664), 'streamlit.image', 'st.image', (['blurred'], {}), '(blurred)\n', (95655, 95664), True, 'import streamlit as st\n'), ((95689, 95755), 'utils_helpers.download_button1', 'download_button1', (['blurred', 'button', 'download', 'mime_type'], {'key': 'f"""{i}"""'}), "(blurred, button, download, mime_type, key=f'{i}')\n", (95705, 95755), False, 'from utils_helpers import auto_canny_thresh, source_code, version, load_image, load_image_PIL, converted, get_location_data, download_button1, convolve, insert_data_mongodb, average_ratings_mongodb, source_code, scrape_duckduckgo\n')] |
'''Plots and example weighting function'''
import numpy as np
import matplotlib.pyplot as plt
import misc
pressure = np.exp(np.linspace(np.log(1000), np.log(5), 300))
H = 8000
q = 0.01
g = 9.81
k = 7
Z = -H*np.log(pressure/1000)/1000
tau = k*q/g * pressure
T= np.exp(-tau)
plt.plot(T, Z, label='Transmissivity')
plt.plot(pressure/1000, Z, label='Pressure (atm)')
dTdZ = np.diff(T)/np.diff(Z)
plt.plot(dTdZ/np.max(dTdZ), misc.stats.lin_av(Z), label='Weighting fn')
plt.axhline(15.7, lw=0.5, c='k')
#plt.plot(0.9*(T*pressure/1000)/np.max(T*pressure/1000), Z)
plt.ylabel('Pressure height (km)')
plt.xlim(0, 1.05)
plt.ylim(0, 40)
plt.legend()
fig = plt.gcf()
fig.set_size_inches(4,4)
fig.savefig('output/weighting_fn_co2.pdf', bbox_inches='tight')
| [
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.xlim",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"misc.stats.lin_av",
"numpy.max",
"numpy.diff",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf"
] | [((264, 276), 'numpy.exp', 'np.exp', (['(-tau)'], {}), '(-tau)\n', (270, 276), True, 'import numpy as np\n'), ((278, 316), 'matplotlib.pyplot.plot', 'plt.plot', (['T', 'Z'], {'label': '"""Transmissivity"""'}), "(T, Z, label='Transmissivity')\n", (286, 316), True, 'import matplotlib.pyplot as plt\n'), ((317, 369), 'matplotlib.pyplot.plot', 'plt.plot', (['(pressure / 1000)', 'Z'], {'label': '"""Pressure (atm)"""'}), "(pressure / 1000, Z, label='Pressure (atm)')\n", (325, 369), True, 'import matplotlib.pyplot as plt\n'), ((469, 501), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(15.7)'], {'lw': '(0.5)', 'c': '"""k"""'}), "(15.7, lw=0.5, c='k')\n", (480, 501), True, 'import matplotlib.pyplot as plt\n'), ((564, 598), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure height (km)"""'], {}), "('Pressure height (km)')\n", (574, 598), True, 'import matplotlib.pyplot as plt\n'), ((599, 616), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1.05)'], {}), '(0, 1.05)\n', (607, 616), True, 'import matplotlib.pyplot as plt\n'), ((617, 632), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(40)'], {}), '(0, 40)\n', (625, 632), True, 'import matplotlib.pyplot as plt\n'), ((633, 645), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (643, 645), True, 'import matplotlib.pyplot as plt\n'), ((653, 662), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (660, 662), True, 'import matplotlib.pyplot as plt\n'), ((375, 385), 'numpy.diff', 'np.diff', (['T'], {}), '(T)\n', (382, 385), True, 'import numpy as np\n'), ((386, 396), 'numpy.diff', 'np.diff', (['Z'], {}), '(Z)\n', (393, 396), True, 'import numpy as np\n'), ((425, 445), 'misc.stats.lin_av', 'misc.stats.lin_av', (['Z'], {}), '(Z)\n', (442, 445), False, 'import misc\n'), ((137, 149), 'numpy.log', 'np.log', (['(1000)'], {}), '(1000)\n', (143, 149), True, 'import numpy as np\n'), ((151, 160), 'numpy.log', 'np.log', (['(5)'], {}), '(5)\n', (157, 160), True, 'import numpy as np\n'), ((210, 233), 'numpy.log', 'np.log', (['(pressure / 1000)'], {}), '(pressure / 1000)\n', (216, 233), True, 'import numpy as np\n'), ((411, 423), 'numpy.max', 'np.max', (['dTdZ'], {}), '(dTdZ)\n', (417, 423), True, 'import numpy as np\n')] |
import numpy as np
import taichi as ti
from tests import test_utils
@test_utils.test(arch=ti.vulkan)
def test_ndarray_int():
n = 4
@ti.kernel
def test(pos: ti.types.ndarray(field_dim=1)):
for i in range(n):
pos[i] = 1
sym_pos = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'pos', ti.i32)
g_init = ti.graph.GraphBuilder()
g_init.dispatch(test, sym_pos)
g = g_init.compile()
a = ti.ndarray(ti.i32, shape=(n, ))
g.run({'pos': a})
assert (a.to_numpy() == np.ones(4)).all()
| [
"taichi.ndarray",
"numpy.ones",
"taichi.graph.Arg",
"tests.test_utils.test",
"taichi.graph.GraphBuilder",
"taichi.types.ndarray"
] | [((72, 103), 'tests.test_utils.test', 'test_utils.test', ([], {'arch': 'ti.vulkan'}), '(arch=ti.vulkan)\n', (87, 103), False, 'from tests import test_utils\n'), ((269, 322), 'taichi.graph.Arg', 'ti.graph.Arg', (['ti.graph.ArgKind.NDARRAY', '"""pos"""', 'ti.i32'], {}), "(ti.graph.ArgKind.NDARRAY, 'pos', ti.i32)\n", (281, 322), True, 'import taichi as ti\n'), ((336, 359), 'taichi.graph.GraphBuilder', 'ti.graph.GraphBuilder', ([], {}), '()\n', (357, 359), True, 'import taichi as ti\n'), ((429, 459), 'taichi.ndarray', 'ti.ndarray', (['ti.i32'], {'shape': '(n,)'}), '(ti.i32, shape=(n,))\n', (439, 459), True, 'import taichi as ti\n'), ((172, 201), 'taichi.types.ndarray', 'ti.types.ndarray', ([], {'field_dim': '(1)'}), '(field_dim=1)\n', (188, 201), True, 'import taichi as ti\n'), ((511, 521), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (518, 521), True, 'import numpy as np\n')] |
import lasagne, theano, numpy as np, logging
from theano import tensor as T
class Identity(lasagne.init.Initializer):
def sample(self, shape):
return lasagne.utils.floatX(np.eye(*shape))
class RDNN_Dummy:
def __init__(self, nf, kwargs):
pass
def train(self, dsetdat):
import time
time.sleep(5)
return 19
def predict(self, dsetdat):
ecost, rnn_last_predictions = 0, []
for Xdset, Xdsetmsk, ydset, ydsetmsk in dsetdat:
ecost += 0
sentLens = Xdsetmsk.sum(axis=-1)
rnn_last_predictions.append(\
[np.random.rand(slen) for i, slen in enumerate(sentLens)])
return ecost, rnn_last_predictions
def get_param_values(self):
return []
def set_param_values(self, values):
pass
def randprob(self, sent_len):
randvals = np.random.rand(sent_len)
randprobs = randvals / np.sum(randvals,axis=0)
return randprobs
def extract_rnn_params(kwargs):
return dict((pname,kwargs[pname]) for pname in RDNN.param_names)
class RDNN:
# param_names=['activation','n_hidden','drates','opt','lr','norm']
param_names=['activation','n_hidden','fbmerge','drates','opt','lr','norm','gclip','truncate','in2out','emb','fbias','gnoise','eps']
def __init__(self, nf, kwargs):
assert nf;
self.kwargs = extract_rnn_params(kwargs)
for pname in RDNN.param_names:
setattr(self, pname, kwargs[pname])
self.lr = theano.shared(np.array(self.lr, dtype='float32'), allow_downcast=True)
self.gclip = False if self.gclip == 0 else self.gclip # mysteriously, we need this line
self.activation = [self.activation] * len(self.n_hidden)
self.deep_ltypes = [act_str.split('-')[1] for act_str in self.activation]
self.opt = getattr(lasagne.updates, self.opt)
ldepth = len(self.n_hidden)
# network
default_gate = lambda : lasagne.layers.Gate(W_in=lasagne.init.GlorotUniform(),
W_hid=lasagne.init.GlorotUniform())
forget_gate = lambda : lasagne.layers.Gate(W_in=lasagne.init.GlorotUniform(),
W_hid=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(self.fbias))
l_in = lasagne.layers.InputLayer(shape=(None, None, nf))
logging.debug('l_in: {}'.format(lasagne.layers.get_output_shape(l_in)))
N_BATCH_VAR, MAX_SEQ_LEN_VAR, _ = l_in.input_var.shape # symbolic ref to input_var shape
l_mask = lasagne.layers.InputLayer(shape=(N_BATCH_VAR, MAX_SEQ_LEN_VAR))
logging.debug('l_mask: {}'.format(lasagne.layers.get_output_shape(l_mask)))
curlayer = l_in
if self.emb:
l_reshape = lasagne.layers.ReshapeLayer(l_in, (-1, nf))
logging.debug('l_reshape: {}'.format(lasagne.layers.get_output_shape(l_reshape)))
l_emb = lasagne.layers.DenseLayer(l_reshape, num_units=self.emb, nonlinearity=None, b=None)
logging.debug('l_emb: {}'.format(lasagne.layers.get_output_shape(l_emb)))
l_emb = lasagne.layers.ReshapeLayer(l_emb, (N_BATCH_VAR, MAX_SEQ_LEN_VAR, self.emb))
logging.debug('l_emb: {}'.format(lasagne.layers.get_output_shape(l_emb)))
curlayer = l_emb
if self.drates[0] > 0:
l_in_drop = lasagne.layers.DropoutLayer(curlayer, p=self.drates[0])
logging.debug('l_drop: {}'.format(lasagne.layers.get_output_shape(l_in_drop)))
self.layers = [l_in_drop]
else:
self.layers = [l_in]
for level, ltype, n_hidden in zip(range(1,ldepth+1), self.deep_ltypes, self.n_hidden):
prev_layer = self.layers[level-1]
if ltype in ['relu','lrelu', 'relu6', 'elu']:
LayerType = lasagne.layers.RecurrentLayer
if ltype == 'relu': nonlin = lasagne.nonlinearities.rectify
elif ltype == 'lrelu': nonlin = lasagne.nonlinearities.leaky_rectify
elif ltype == 'relu6': nonlin = lambda x: T.min(lasagne.nonlinearities.rectify(x), 6)
elif ltype == 'elu': nonlin = lambda x: T.switch(x >= 0, x, T.exp(x) - 1)
l_forward = LayerType(prev_layer, n_hidden, mask_input=l_mask, grad_clipping=self.gclip, gradient_steps=self.truncate,
# W_hid_to_hid=Identity(), W_in_to_hid=lasagne.init.GlorotUniform(gain='relu'), nonlinearity=nonlin)
W_hid_to_hid=lasagne.init.GlorotUniform(gain='relu'), W_in_to_hid=lasagne.init.GlorotUniform(gain='relu'), nonlinearity=nonlin)
l_backward = LayerType(prev_layer, n_hidden, mask_input=l_mask, grad_clipping=self.gclip, gradient_steps=self.truncate,
# W_hid_to_hid=Identity(), W_in_to_hid=lasagne.init.GlorotUniform(gain='relu'), nonlinearity=nonlin, backwards=True)
W_hid_to_hid=lasagne.init.GlorotUniform(gain='relu'), W_in_to_hid=lasagne.init.GlorotUniform(gain='relu'), nonlinearity=nonlin, backwards=True)
elif ltype == 'lstm':
LayerType = lasagne.layers.LSTMLayer
l_forward = LayerType(prev_layer, n_hidden, ingate=default_gate(),
forgetgate=forget_gate(), outgate=default_gate(), mask_input=l_mask, grad_clipping=self.gclip, gradient_steps=self.truncate)
l_backward = LayerType(prev_layer, n_hidden, ingate=default_gate(),
forgetgate=forget_gate(), outgate=default_gate(), mask_input=l_mask, grad_clipping=self.gclip, gradient_steps=self.truncate, backwards=True)
elif ltype == 'gru':
LayerType = lasagne.layers.GRULayer
l_forward = LayerType(prev_layer, n_hidden, mask_input=l_mask, grad_clipping=self.gclip, gradient_steps=self.truncate)
l_backward = LayerType(prev_layer, n_hidden, mask_input=l_mask, grad_clipping=self.gclip, gradient_steps=self.truncate, backwards=True)
logging.debug('l_forward: {}'.format(lasagne.layers.get_output_shape(l_forward)))
logging.debug('l_backward: {}'.format(lasagne.layers.get_output_shape(l_backward)))
"""
if self.fbmerge == 'concat':
l_fbmerge = lasagne.layers.ConcatLayer([l_forward, l_backward], axis=2)
elif self.fbmerge == 'sum':
l_fbmerge = lasagne.layers.ElemwiseSumLayer([l_forward, l_backward])
"""
l_fbmerge = lasagne.layers.ConcatLayer([l_forward, l_backward], axis=2)
logging.debug('l_fbmerge: {}'.format(lasagne.layers.get_output_shape(l_fbmerge)))
if self.drates[level] > 0:
l_fbmerge = lasagne.layers.DropoutLayer(l_fbmerge, p=self.drates[level])
self.layers.append(l_fbmerge)
l_fbmerge = lasagne.layers.ConcatLayer([l_fbmerge, curlayer], axis=2) if self.in2out else l_fbmerge
l_reshape = lasagne.layers.ReshapeLayer(l_fbmerge, (-1, self.n_hidden[-1]*2))
logging.debug('l_reshape: {}'.format(lasagne.layers.get_output_shape(l_reshape)))
l_out = lasagne.layers.DenseLayer(l_reshape, num_units=2, nonlinearity=log_softmax)
l_out = lasagne.layers.ReshapeLayer(l_rec_out, (N_BATCH_VAR, MAX_SEQ_LEN_VAR, 2))
logging.debug('l_out: {}'.format(lasagne.layers.get_output_shape(l_out)))
""" sigmoid
l_reshape = lasagne.layers.ReshapeLayer(l_fbmerge, (-1, self.n_hidden[-1]*2))
logging.debug('l_reshape: {}'.format(lasagne.layers.get_output_shape(l_reshape)))
l_out = lasagne.layers.DenseLayer(l_reshape, num_units=1, nonlinearity=lasagne.nonlinearities.sigmoid)
l_out = lasagne.layers.ReshapeLayer(l_out, (N_BATCH_VAR, MAX_SEQ_LEN_VAR))
logging.debug('l_out: {}'.format(lasagne.layers.get_output_shape(l_out)))
"""
self.output_layer = l_out
target_output = T.matrix('target_output')
out_mask = T.matrix('mask')
"""
def cost(output):
return -T.sum(out_mask*target_output*T.log(output))/T.sum(out_mask)
"""
def cost(output): # expects log softmax output
p = output[out_mask.nonzero()]
t = target_output[out_mask.nonzero()]
return T.sum(lasagne.objectives.binary_crossentropy(p,t)) / T.sum(out_mask)
# return -T.sum(t*T.log(p) + (1-t)*T.log(1-p)) / T.sum(out_mask)
# return -T.sum(out_mask*target_output*T.log(output+2e-6) + out_mask*(1-target_output)*T.log(1-output+2e-6)) / T.sum(out_mask)
# return -T.sum(out_mask*target_output*T.log(output))/T.sum(out_mask)
cost_train = cost(lasagne.layers.get_output(l_out, deterministic=False))
cost_eval = cost(lasagne.layers.get_output(l_out, deterministic=True))
all_params = lasagne.layers.get_all_params(l_out, trainable=True)
logging.debug(all_params)
f_hid2hid = l_forward.get_params()[-1]
b_hid2hid = l_backward.get_params()[-1]
all_grads = T.grad(cost_train, all_params)
all_grads, total_norm = lasagne.updates.total_norm_constraint(all_grads, self.norm, return_norm=True)
all_grads = [T.switch(T.or_(T.isnan(total_norm), T.isinf(total_norm)), p*0.01 , g) for g,p in zip(all_grads, all_params)]
# updates = self.opt(all_grads, all_params, self.lr, self.eps)
updates = self.opt(all_grads, all_params, self.lr)
"""
if self.gnoise:
from theano.tensor.shared_randomstreams import RandomStreams
srng = RandomStreams(seed=1234)
e_prev = theano.shared(lasagne.utils.floatX(0.))
nu = 0.01
gamma = 0.55
gs = [g + srng.normal(T.shape(g), std=(nu / ((1 + e_prev)**gamma))) for g in all_grads]
updates = self.opt(gs, all_params, self.lr, self.eps)
updates[e_prev] = e_prev + 1
else:
updates = self.opt(all_grads, all_params, self.lr, self.eps)
"""
logging.info("Compiling functions...")
self.train_model = theano.function(inputs=[l_in.input_var, target_output, l_mask.input_var, out_mask], outputs=cost_train, updates=updates, allow_input_downcast=True)
self.predict_model = theano.function(
inputs=[l_in.input_var, target_output, l_mask.input_var, out_mask],
outputs=[cost_eval, lasagne.layers.get_output(l_out, deterministic=True)])
# aux
self.train_model_debug = theano.function(
inputs=[l_in.input_var, target_output, l_mask.input_var, out_mask],
outputs=[cost_train]+lasagne.layers.get_output([l_out, l_fbmerge], deterministic=True)+[total_norm],
updates=updates)
self.compute_cost = theano.function([l_in.input_var, target_output, l_mask.input_var, out_mask], cost_eval)
self.compute_cost_train = theano.function([l_in.input_var, target_output, l_mask.input_var, out_mask], cost_train)
# self.info_model = theano.function([],recout_hid2hid)
logging.info("Compiling done.")
def train(self, dsetdat):
tcost = np.mean([self.train_model(Xdset, ydset, Xdsetmsk, ydsetmsk) for Xdset, Xdsetmsk, ydset, ydsetmsk in dsetdat])
"""
for Xdset, Xdsetmsk, ydset, ydsetmsk in dsetdat:
print self.train_model(Xdset, ydset, Xdsetmsk, ydsetmsk)
"""
# pcost, pred = self.predict(dsetdat)
return tcost
def predict(self, dsetdat):
bcosts, rnn_last_predictions = [], []
for Xdset, Xdsetmsk, ydset, ydsetmsk in dsetdat:
bcost, pred = self.predict_model(Xdset, ydset, Xdsetmsk, ydsetmsk)
bcosts.append(bcost)
# predictions = np.argmax(pred*ydsetmsk, axis=-1).flatten()
sentLens, mlen = Xdsetmsk.sum(axis=-1), Xdset.shape[1]
rnn_last_predictions.append([pred[i,0:slen] for i, slen in enumerate(sentLens)])
return np.mean(bcosts), rnn_last_predictions
def get_param_values(self):
return lasagne.layers.get_all_param_values(self.output_layer)
def set_param_values(self, values):
lasagne.layers.set_all_param_values(self.output_layer, values)
| [
"lasagne.layers.ConcatLayer",
"numpy.sum",
"numpy.mean",
"lasagne.layers.get_output",
"lasagne.layers.get_output_shape",
"lasagne.layers.InputLayer",
"lasagne.init.Constant",
"lasagne.layers.set_all_param_values",
"lasagne.updates.total_norm_constraint",
"lasagne.layers.get_all_param_values",
"l... | [((329, 342), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (339, 342), False, 'import time\n'), ((887, 911), 'numpy.random.rand', 'np.random.rand', (['sent_len'], {}), '(sent_len)\n', (901, 911), True, 'import lasagne, theano, numpy as np, logging\n'), ((2311, 2360), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', ([], {'shape': '(None, None, nf)'}), '(shape=(None, None, nf))\n', (2336, 2360), False, 'import lasagne, theano, numpy as np, logging\n'), ((2555, 2618), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', ([], {'shape': '(N_BATCH_VAR, MAX_SEQ_LEN_VAR)'}), '(shape=(N_BATCH_VAR, MAX_SEQ_LEN_VAR))\n', (2580, 2618), False, 'import lasagne, theano, numpy as np, logging\n'), ((6969, 7036), 'lasagne.layers.ReshapeLayer', 'lasagne.layers.ReshapeLayer', (['l_fbmerge', '(-1, self.n_hidden[-1] * 2)'], {}), '(l_fbmerge, (-1, self.n_hidden[-1] * 2))\n', (6996, 7036), False, 'import lasagne, theano, numpy as np, logging\n'), ((7141, 7216), 'lasagne.layers.DenseLayer', 'lasagne.layers.DenseLayer', (['l_reshape'], {'num_units': '(2)', 'nonlinearity': 'log_softmax'}), '(l_reshape, num_units=2, nonlinearity=log_softmax)\n', (7166, 7216), False, 'import lasagne, theano, numpy as np, logging\n'), ((7233, 7306), 'lasagne.layers.ReshapeLayer', 'lasagne.layers.ReshapeLayer', (['l_rec_out', '(N_BATCH_VAR, MAX_SEQ_LEN_VAR, 2)'], {}), '(l_rec_out, (N_BATCH_VAR, MAX_SEQ_LEN_VAR, 2))\n', (7260, 7306), False, 'import lasagne, theano, numpy as np, logging\n'), ((7934, 7959), 'theano.tensor.matrix', 'T.matrix', (['"""target_output"""'], {}), "('target_output')\n", (7942, 7959), True, 'from theano import tensor as T\n'), ((7979, 7995), 'theano.tensor.matrix', 'T.matrix', (['"""mask"""'], {}), "('mask')\n", (7987, 7995), True, 'from theano import tensor as T\n'), ((8845, 8897), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_out'], {'trainable': '(True)'}), '(l_out, trainable=True)\n', (8874, 8897), False, 'import lasagne, theano, numpy as np, logging\n'), ((8906, 8931), 'logging.debug', 'logging.debug', (['all_params'], {}), '(all_params)\n', (8919, 8931), False, 'import lasagne, theano, numpy as np, logging\n'), ((9049, 9079), 'theano.tensor.grad', 'T.grad', (['cost_train', 'all_params'], {}), '(cost_train, all_params)\n', (9055, 9079), True, 'from theano import tensor as T\n'), ((9113, 9190), 'lasagne.updates.total_norm_constraint', 'lasagne.updates.total_norm_constraint', (['all_grads', 'self.norm'], {'return_norm': '(True)'}), '(all_grads, self.norm, return_norm=True)\n', (9150, 9190), False, 'import lasagne, theano, numpy as np, logging\n'), ((10046, 10084), 'logging.info', 'logging.info', (['"""Compiling functions..."""'], {}), "('Compiling functions...')\n", (10058, 10084), False, 'import lasagne, theano, numpy as np, logging\n'), ((10112, 10263), 'theano.function', 'theano.function', ([], {'inputs': '[l_in.input_var, target_output, l_mask.input_var, out_mask]', 'outputs': 'cost_train', 'updates': 'updates', 'allow_input_downcast': '(True)'}), '(inputs=[l_in.input_var, target_output, l_mask.input_var,\n out_mask], outputs=cost_train, updates=updates, allow_input_downcast=True)\n', (10127, 10263), False, 'import lasagne, theano, numpy as np, logging\n'), ((10808, 10899), 'theano.function', 'theano.function', (['[l_in.input_var, target_output, l_mask.input_var, out_mask]', 'cost_eval'], {}), '([l_in.input_var, target_output, l_mask.input_var, out_mask],\n cost_eval)\n', (10823, 10899), False, 'import lasagne, theano, numpy as np, logging\n'), ((10930, 11022), 'theano.function', 'theano.function', (['[l_in.input_var, target_output, l_mask.input_var, out_mask]', 'cost_train'], {}), '([l_in.input_var, target_output, l_mask.input_var, out_mask],\n cost_train)\n', (10945, 11022), False, 'import lasagne, theano, numpy as np, logging\n'), ((11090, 11121), 'logging.info', 'logging.info', (['"""Compiling done."""'], {}), "('Compiling done.')\n", (11102, 11121), False, 'import lasagne, theano, numpy as np, logging\n'), ((12078, 12132), 'lasagne.layers.get_all_param_values', 'lasagne.layers.get_all_param_values', (['self.output_layer'], {}), '(self.output_layer)\n', (12113, 12132), False, 'import lasagne, theano, numpy as np, logging\n'), ((12182, 12244), 'lasagne.layers.set_all_param_values', 'lasagne.layers.set_all_param_values', (['self.output_layer', 'values'], {}), '(self.output_layer, values)\n', (12217, 12244), False, 'import lasagne, theano, numpy as np, logging\n'), ((185, 199), 'numpy.eye', 'np.eye', (['*shape'], {}), '(*shape)\n', (191, 199), True, 'import lasagne, theano, numpy as np, logging\n'), ((943, 967), 'numpy.sum', 'np.sum', (['randvals'], {'axis': '(0)'}), '(randvals, axis=0)\n', (949, 967), True, 'import lasagne, theano, numpy as np, logging\n'), ((1547, 1581), 'numpy.array', 'np.array', (['self.lr'], {'dtype': '"""float32"""'}), "(self.lr, dtype='float32')\n", (1555, 1581), True, 'import lasagne, theano, numpy as np, logging\n'), ((2773, 2816), 'lasagne.layers.ReshapeLayer', 'lasagne.layers.ReshapeLayer', (['l_in', '(-1, nf)'], {}), '(l_in, (-1, nf))\n', (2800, 2816), False, 'import lasagne, theano, numpy as np, logging\n'), ((2931, 3018), 'lasagne.layers.DenseLayer', 'lasagne.layers.DenseLayer', (['l_reshape'], {'num_units': 'self.emb', 'nonlinearity': 'None', 'b': 'None'}), '(l_reshape, num_units=self.emb, nonlinearity=None,\n b=None)\n', (2956, 3018), False, 'import lasagne, theano, numpy as np, logging\n'), ((3121, 3197), 'lasagne.layers.ReshapeLayer', 'lasagne.layers.ReshapeLayer', (['l_emb', '(N_BATCH_VAR, MAX_SEQ_LEN_VAR, self.emb)'], {}), '(l_emb, (N_BATCH_VAR, MAX_SEQ_LEN_VAR, self.emb))\n', (3148, 3197), False, 'import lasagne, theano, numpy as np, logging\n'), ((3369, 3424), 'lasagne.layers.DropoutLayer', 'lasagne.layers.DropoutLayer', (['curlayer'], {'p': 'self.drates[0]'}), '(curlayer, p=self.drates[0])\n', (3396, 3424), False, 'import lasagne, theano, numpy as np, logging\n'), ((6505, 6564), 'lasagne.layers.ConcatLayer', 'lasagne.layers.ConcatLayer', (['[l_forward, l_backward]'], {'axis': '(2)'}), '([l_forward, l_backward], axis=2)\n', (6531, 6564), False, 'import lasagne, theano, numpy as np, logging\n'), ((6860, 6917), 'lasagne.layers.ConcatLayer', 'lasagne.layers.ConcatLayer', (['[l_fbmerge, curlayer]'], {'axis': '(2)'}), '([l_fbmerge, curlayer], axis=2)\n', (6886, 6917), False, 'import lasagne, theano, numpy as np, logging\n'), ((8688, 8741), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_out'], {'deterministic': '(False)'}), '(l_out, deterministic=False)\n', (8713, 8741), False, 'import lasagne, theano, numpy as np, logging\n'), ((8768, 8820), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_out'], {'deterministic': '(True)'}), '(l_out, deterministic=True)\n', (8793, 8820), False, 'import lasagne, theano, numpy as np, logging\n'), ((11992, 12007), 'numpy.mean', 'np.mean', (['bcosts'], {}), '(bcosts)\n', (11999, 12007), True, 'import lasagne, theano, numpy as np, logging\n'), ((2401, 2438), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_in'], {}), '(l_in)\n', (2432, 2438), False, 'import lasagne, theano, numpy as np, logging\n'), ((2661, 2700), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_mask'], {}), '(l_mask)\n', (2692, 2700), False, 'import lasagne, theano, numpy as np, logging\n'), ((6727, 6787), 'lasagne.layers.DropoutLayer', 'lasagne.layers.DropoutLayer', (['l_fbmerge'], {'p': 'self.drates[level]'}), '(l_fbmerge, p=self.drates[level])\n', (6754, 6787), False, 'import lasagne, theano, numpy as np, logging\n'), ((7080, 7122), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_reshape'], {}), '(l_reshape)\n', (7111, 7122), False, 'import lasagne, theano, numpy as np, logging\n'), ((7348, 7386), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_out'], {}), '(l_out)\n', (7379, 7386), False, 'import lasagne, theano, numpy as np, logging\n'), ((8347, 8362), 'theano.tensor.sum', 'T.sum', (['out_mask'], {}), '(out_mask)\n', (8352, 8362), True, 'from theano import tensor as T\n'), ((627, 647), 'numpy.random.rand', 'np.random.rand', (['slen'], {}), '(slen)\n', (641, 647), True, 'import lasagne, theano, numpy as np, logging\n'), ((2015, 2043), 'lasagne.init.GlorotUniform', 'lasagne.init.GlorotUniform', ([], {}), '()\n', (2041, 2043), False, 'import lasagne, theano, numpy as np, logging\n'), ((2064, 2092), 'lasagne.init.GlorotUniform', 'lasagne.init.GlorotUniform', ([], {}), '()\n', (2090, 2092), False, 'import lasagne, theano, numpy as np, logging\n'), ((2159, 2187), 'lasagne.init.GlorotUniform', 'lasagne.init.GlorotUniform', ([], {}), '()\n', (2185, 2187), False, 'import lasagne, theano, numpy as np, logging\n'), ((2208, 2236), 'lasagne.init.GlorotUniform', 'lasagne.init.GlorotUniform', ([], {}), '()\n', (2234, 2236), False, 'import lasagne, theano, numpy as np, logging\n'), ((2252, 2285), 'lasagne.init.Constant', 'lasagne.init.Constant', (['self.fbias'], {}), '(self.fbias)\n', (2273, 2285), False, 'import lasagne, theano, numpy as np, logging\n'), ((2866, 2908), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_reshape'], {}), '(l_reshape)\n', (2897, 2908), False, 'import lasagne, theano, numpy as np, logging\n'), ((3060, 3098), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_emb'], {}), '(l_emb)\n', (3091, 3098), False, 'import lasagne, theano, numpy as np, logging\n'), ((3243, 3281), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_emb'], {}), '(l_emb)\n', (3274, 3281), False, 'import lasagne, theano, numpy as np, logging\n'), ((3471, 3513), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_in_drop'], {}), '(l_in_drop)\n', (3502, 3513), False, 'import lasagne, theano, numpy as np, logging\n'), ((6053, 6095), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_forward'], {}), '(l_forward)\n', (6084, 6095), False, 'import lasagne, theano, numpy as np, logging\n'), ((6148, 6191), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_backward'], {}), '(l_backward)\n', (6179, 6191), False, 'import lasagne, theano, numpy as np, logging\n'), ((6614, 6656), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_fbmerge'], {}), '(l_fbmerge)\n', (6645, 6656), False, 'import lasagne, theano, numpy as np, logging\n'), ((8300, 8344), 'lasagne.objectives.binary_crossentropy', 'lasagne.objectives.binary_crossentropy', (['p', 't'], {}), '(p, t)\n', (8338, 8344), False, 'import lasagne, theano, numpy as np, logging\n'), ((9227, 9246), 'theano.tensor.isnan', 'T.isnan', (['total_norm'], {}), '(total_norm)\n', (9234, 9246), True, 'from theano import tensor as T\n'), ((9248, 9267), 'theano.tensor.isinf', 'T.isinf', (['total_norm'], {}), '(total_norm)\n', (9255, 9267), True, 'from theano import tensor as T\n'), ((10426, 10478), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_out'], {'deterministic': '(True)'}), '(l_out, deterministic=True)\n', (10451, 10478), False, 'import lasagne, theano, numpy as np, logging\n'), ((4510, 4549), 'lasagne.init.GlorotUniform', 'lasagne.init.GlorotUniform', ([], {'gain': '"""relu"""'}), "(gain='relu')\n", (4536, 4549), False, 'import lasagne, theano, numpy as np, logging\n'), ((4563, 4602), 'lasagne.init.GlorotUniform', 'lasagne.init.GlorotUniform', ([], {'gain': '"""relu"""'}), "(gain='relu')\n", (4589, 4602), False, 'import lasagne, theano, numpy as np, logging\n'), ((4939, 4978), 'lasagne.init.GlorotUniform', 'lasagne.init.GlorotUniform', ([], {'gain': '"""relu"""'}), "(gain='relu')\n", (4965, 4978), False, 'import lasagne, theano, numpy as np, logging\n'), ((4992, 5031), 'lasagne.init.GlorotUniform', 'lasagne.init.GlorotUniform', ([], {'gain': '"""relu"""'}), "(gain='relu')\n", (5018, 5031), False, 'import lasagne, theano, numpy as np, logging\n'), ((10667, 10732), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['[l_out, l_fbmerge]'], {'deterministic': '(True)'}), '([l_out, l_fbmerge], deterministic=True)\n', (10692, 10732), False, 'import lasagne, theano, numpy as np, logging\n'), ((4084, 4117), 'lasagne.nonlinearities.rectify', 'lasagne.nonlinearities.rectify', (['x'], {}), '(x)\n', (4114, 4117), False, 'import lasagne, theano, numpy as np, logging\n'), ((4198, 4206), 'theano.tensor.exp', 'T.exp', (['x'], {}), '(x)\n', (4203, 4206), True, 'from theano import tensor as T\n')] |
from __future__ import absolute_import
import numpy as np
from holoviews.element import Violin
from holoviews.operation.stats import univariate_kde
from .testplot import TestMPLPlot, mpl_renderer
class TestMPLViolinPlot(TestMPLPlot):
def test_violin_simple(self):
values = np.random.rand(100)
violin = Violin(values)
plot = mpl_renderer.get_plot(violin)
data, style, axis_opts = plot.get_data(violin, {}, {})
self.assertEqual(data[0][0], values)
self.assertEqual(style['positions'], [0])
self.assertEqual(style['labels'], [''])
def test_violin_multi(self):
violin = Violin((np.random.randint(0, 2, 100), np.random.rand(100)), kdims=['A']).sort()
r1, r2 = violin.range(1)
plot = mpl_renderer.get_plot(violin)
data, style, axis_opts = plot.get_data(violin, {}, {})
self.assertEqual(data[0][0], violin.select(A=0).dimension_values(1))
self.assertEqual(data[0][1], violin.select(A=1).dimension_values(1))
self.assertEqual(style['positions'], [0, 1])
self.assertEqual(style['labels'], ['0', '1'])
| [
"numpy.random.rand",
"holoviews.element.Violin",
"numpy.random.randint"
] | [((291, 310), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (305, 310), True, 'import numpy as np\n'), ((328, 342), 'holoviews.element.Violin', 'Violin', (['values'], {}), '(values)\n', (334, 342), False, 'from holoviews.element import Violin\n'), ((653, 681), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(100)'], {}), '(0, 2, 100)\n', (670, 681), True, 'import numpy as np\n'), ((683, 702), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (697, 702), True, 'import numpy as np\n')] |
## 1. Introduction ##
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('AmesHousing.txt', delimiter="\t")
train = data[0:1460]
test = data[1460:]
features = ['Wood Deck SF', 'Fireplaces', 'Full Bath', '1st Flr SF', 'Garage Area',
'Gr Liv Area', 'Overall Qual']
X = train[features]
y = train['SalePrice']
first_term = np.linalg.inv(
np.dot(
np.transpose(X),
X
)
)
second_term = np.dot(
np.transpose(X),
y
)
ols_estimation = np.dot(first_term, second_term)
print(ols_estimation) | [
"pandas.read_csv",
"numpy.dot",
"numpy.transpose"
] | [((102, 148), 'pandas.read_csv', 'pd.read_csv', (['"""AmesHousing.txt"""'], {'delimiter': '"""\t"""'}), "('AmesHousing.txt', delimiter='\\t')\n", (113, 148), True, 'import pandas as pd\n'), ((540, 571), 'numpy.dot', 'np.dot', (['first_term', 'second_term'], {}), '(first_term, second_term)\n', (546, 571), True, 'import numpy as np\n'), ((490, 505), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (502, 505), True, 'import numpy as np\n'), ((412, 427), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (424, 427), True, 'import numpy as np\n')] |
import glob
import logging
import os
import sys
import traceback
from functools import partial, reduce
from multiprocessing import Pool, cpu_count
import click
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.decomposition import KernelPCA
from tqdm import tqdm
from pimkl.analysis import significant_pathways
from pimkl.inducers import read_inducer
from pimkl.models import PIMKL
from pimkl.run import fold_generator, run_model
from pimkl.utils.preprocessing import Standardizer
# plotting setup
sns.set_palette(sns.color_palette('colorblind'))
sns.set_style('white')
sns.set_context('talk')
significant_color = sns.color_palette('colorblind')[2]
other_color = sns.color_palette('colorblind')[0]
def read_preprocessed(
data_names, network_name, gene_sets_name, preprocess_dir
):
inducers_filenames = {}
inducers_names = {}
inducers = {}
data = {}
for data_name in data_names:
# data
data[data_name] = pd.read_csv(
os.path.join(
preprocess_dir, '{}_{}_{}_{}.csv'.format(
'data', data_name, network_name, gene_sets_name
)
),
index_col=0
)
# inducers
inducers_filenames[data_name] = glob.glob(
os.path.join(
preprocess_dir,
'{}_{}_{}_*.csv.gz'.format(
'inducer', data_name, network_name
# * matches gene set (inducer) name which can contain "_"
)
)
)
inducers_names[data_name] = [
'_'.join(os.path.basename(filename).split('.')[0].split('_')[4:])
for filename in inducers_filenames[data_name]
]
assert len(inducers_names[data_name]
) == len(set(inducers_names[data_name]))
inducers[data_name] = [
read_inducer(filename, size=data[data_name].shape[1])
for filename in inducers_filenames[data_name]
]
inducers_extended_names = [
'{}-{}'.format(data_name, name)
for data_name, inducer_names in inducers_names.items()
for name in inducer_names
]
return data, inducers, inducers_extended_names
def analyse(
data_names, network_name, gene_sets_name,
preprocess_dir, output_dir,
class_label_file,
model_name='EasyMKL', lam=.2, k=5,
number_of_folds=2, max_per_class=20,
seed=0, max_processes=cpu_count()
):
# reproducible results
np.random.seed(seed)
# model parameters
regularization_factor = False
kernel_normalization = True
estimator_parameters = {
'trace_normalization': kernel_normalization,
'regularization_factor': regularization_factor,
'lam': lam
}
if model_name == 'UMKLKNN':
mkl_parameters = {'trace_normalization': kernel_normalization, 'k': k}
elif model_name == 'AverageMKL':
mkl_parameters = {'trace_normalization': kernel_normalization}
else: # EasyMKL
mkl_parameters = estimator_parameters
# prepare: read data and inducers
data, inducers, inducers_extended_names = read_preprocessed(
data_names, network_name, gene_sets_name, preprocess_dir
)
# prepare: classification labels
class_labels = pd.read_csv(
class_label_file, index_col=0, header=None, squeeze=True
)
class_labels = class_labels[~pd.isna(class_labels)]
# match samples in data and labels
measurement_data_samples = sorted(
list(
reduce(
lambda a, b: a & b,
(set(data[data_name].index) for data_name in data_names)
)
)
)
samples = sorted(
list(set(measurement_data_samples) & set(class_labels.index))
)
labels = (
class_labels[samples].values if model_name == 'EasyMKL' else
None # TODO keep series, check model later
)
for data_name in data_names:
data[data_name] = data[data_name].loc[samples].values
# no more pandas labels
# learn support vector and kernel weights for different data splits
all_trace_factors = {}
all_aucs = {}
all_weights = {}
# parallel
processes = max_processes if max_processes < number_of_folds else number_of_folds # noqa
if (processes == 1) or (
logging.root.level <= logging.DEBUG
): # serial, allows easier debugging
for fold_parameters in tqdm(
fold_generator(number_of_folds, data, labels, max_per_class)
):
try:
aucs, weights, trace_factors = run_model(
inducers=inducers,
induction_name="induce_linear_kernel",
mkl_name=model_name,
estimator_name="EasyMKL",
mkl_parameters=estimator_parameters,
estimator_parameters=estimator_parameters,
induction_parameters={},
inducers_extended_names=inducers_extended_names,
fold_parameters=fold_parameters
)
all_trace_factors[fold_parameters['fold']] = trace_factors
all_aucs[fold_parameters['fold']] = aucs
if isinstance(weights, list):
for label, weights_per_label in weights:
all_weights[(fold_parameters['fold'], label)
] = weights_per_label
else:
all_weights[fold_parameters['fold']] = weights
except TypeError: # run returned None
logging.debug(
'fold {} not appended'.format(fold_parameters['fold'])
)
traceback.print_exc()
else: # parallel
run_fold = partial(
run_model, inducers, 'induce_linear_kernel', model_name, 'EasyMKL',
mkl_parameters, estimator_parameters, {}, inducers_extended_names
)
logging.debug('fold runs start')
with Pool(processes=processes) as pool:
runner = pool.imap(
run_fold,
fold_generator(number_of_folds, data, labels, max_per_class)
)
logging.debug('lazy iterator created')
results = list(runner)
logging.debug('fold runs done')
results = filter(lambda x: x is not None, results) # a generator
for i, (aucs, weights, trace_factors) in enumerate(results):
all_trace_factors[i] = trace_factors
all_aucs[i] = aucs
if isinstance(weights, list):
for label, weights_per_label in weights:
all_weights[(i, label)] = weights_per_label
else:
all_weights[i] = weights
# preparing output
aucs_df = pd.DataFrame(all_aucs).T
weights_df = pd.DataFrame(all_weights).T
trace_factors_df = pd.DataFrame(all_trace_factors).T
output_filename_part = '{}_{}_{}_cv={}_mc={}_{}'.format(
'-'.join(data_names), network_name, gene_sets_name, number_of_folds,
max_per_class, model_name
)
print(aucs_df)
# write to file
aucs_df.to_csv('{}/auc_{}.csv'.format(output_dir, output_filename_part))
weights_df.to_csv(
'{}/weights_{}.csv'.format(output_dir, output_filename_part)
)
trace_factors_df.to_csv(
'{}/tracefactors_{}.csv'.format(output_dir, output_filename_part)
)
# visualize weights
inducers_ordering = weights_df.median().sort_values(ascending=False).index
plt.close()
colors = [
significant_color if is_significant else other_color for is_significant
in significant_pathways(weights_df[inducers_ordering])
]
sns.boxplot(data=weights_df[inducers_ordering], palette=colors)
plt.xlabel('Pathway')
plt.ylabel('Weight')
_ = plt.xticks(rotation=90, fontsize=8)
plt.axhline(y=1. / weights_df.shape[1], lw=1., ls='--', c='black')
plt.savefig(
'{}/weights_{}.pdf'.format(output_dir, output_filename_part),
bbox_inches='tight'
)
plt.close()
# visualize auc
aucs_df_box = aucs_df.melt(value_name='AUC')
sns.boxplot(data=aucs_df_box, y='AUC', x='variable')
plt.savefig(
'{}/aucs_{}.pdf'.format(output_dir, output_filename_part),
bbox_inches='tight'
)
plt.close()
print("Files *_{}.* written to disk".format(output_filename_part))
return output_filename_part
def kpca(
data_names,
network_name,
gene_sets_name,
preprocess_dir,
output_dir,
class_label_file,
weights_csv_file,
fold,
):
# model parameters
kernel_normalization = True
# prepare: read data and inducers
data, inducers, inducers_extended_names = read_preprocessed(
data_names, network_name, gene_sets_name, preprocess_dir
)
# prepare: classification labels
# if class_label_file:
class_labels = pd.read_csv(
class_label_file, index_col=0, header=None, squeeze=True
)
class_labels = class_labels[~pd.isna(class_labels)]
# match samples in data and labels
measurement_data_samples = sorted(
list(
reduce(
lambda a, b: a & b,
(set(data[data_name].index) for data_name in data_names)
)
)
)
samples = sorted(
list(set(measurement_data_samples) & set(class_labels.index))
)
labels = (class_labels[samples].values)
for data_name in data_names:
data[data_name] = data[data_name].loc[samples].values
# read learned weight to compute final kernel on all data
weights = pd.read_csv(weights_csv_file, index_col=0)
kpca_basename = os.path.basename(weights_csv_file).split('.')[0]
if fold == -1:
weights = weights.median()
fold_name = 'median'
else:
weights = weights.loc[fold, :]
fold_name = 'fold{}'.format(fold)
# kernel pca
model_trained_weights = PIMKL(
inducers=inducers,
induction='induce_linear_kernel',
mkl='WeightedAverageMKL',
mkl_parameters={
'trace_normalization': kernel_normalization,
'kernels_weights': weights
}
)
model_trained_weights.fit(data)
optimal_kernel = model_trained_weights.predict(data)
kernel_pca = KernelPCA(kernel='precomputed'
).fit(Standardizer().apply(optimal_kernel))
transformed_data = kernel_pca.transform(optimal_kernel)
# # not really meaningful for KernelPCA
# explained_variance = np.var(transformed_data, axis=0)
# explained_variance_ratio = explained_variance / explained_variance.sum()
# plt.plot(np.cumsum(explained_variance_ratio))
# plt.xlabel('KernelPCA components')
# plt.ylabel('Explained Variance Ratio')
# plt.ylim((explained_variance_ratio[0], 1.0))
# plt.savefig(
# '{}/kernel_pca_explained_variance_{}_{}.pdf'.format(
# output_dir, fold_name, kpca_basename
# ),
# bbox_inches='tight'
# )
components = 3
kpca_columns = list(
map(lambda index: 'KernelPC{}'.format(index + 1), range(components))
)
kernel_pca_signature = pd.DataFrame(
transformed_data[:, :components], index=samples, columns=kpca_columns
)
kernel_pca_signature['class'] = labels
plt.clf()
sns.pairplot(
kernel_pca_signature,
kind='scatter',
hue='class',
vars=kpca_columns,
plot_kws=dict(s=10, edgecolor='darkgrey', linewidth=1)
)
plt.legend()
plt.savefig(
'{}/kernel_pca_signature_{}_{}_{}.pdf'.format(
output_dir, components, fold_name, kpca_basename
),
bbox_inches='tight'
)
@click.group()
def main():
pass
@click.command(short_help='train and test many folds')
@click.option('-nd', '--data_name', 'data_names', required=True, multiple=True)
@click.argument('network_name', required=True)
@click.argument('gene_sets_name', required=True)
@click.argument(
'preprocess_dir',
required=True,
type=click.Path(exists=True, file_okay=False)
)
@click.argument(
'output_dir',
required=True,
type=click.Path(exists=True, file_okay=False, writable=True)
)
@click.argument(
'class_label_file',
required=True,
type=click.Path(exists=True, file_okay=True)
)
@click.option(
'--model_name',
default='EasyMKL',
type=click.Choice(['EasyMKL', 'UMKLKNN', 'AverageMKL'])
)
@click.argument('lam', default=0.2)
@click.argument('k', default=5)
@click.argument('number_of_folds', default=10)
@click.argument('max_per_class', default=20)
@click.argument('seed', default=0)
@click.argument('max_processes', default=1)
def run_performance_analysis(
data_names, network_name, gene_sets_name, preprocess_dir, output_dir,
class_label_file, model_name, lam, k, number_of_folds, max_per_class, seed,
max_processes
):
"""
Run classifications using pathway induced multiple kernel learning on
preprocessed data and inducers on a number of train/test splits and analyse
the resulting classification performance and learned pathway weights.
The `class_label_file` should be readable with `pd.read_csv(
class_label_file, index_col=0, header=None, squeeze=True)`
"""
output_filename_core = analyse(
data_names, network_name, gene_sets_name, preprocess_dir, output_dir,
class_label_file, model_name, lam, k, number_of_folds, max_per_class,
seed, max_processes
)
return 0
@click.command(short_help='KernelPCA with given pathway weights')
@click.option('-nd', '--data_name', 'data_names', required=True, multiple=True)
@click.argument('network_name', required=True)
@click.argument('gene_sets_name', required=True)
@click.argument(
'preprocess_dir',
required=True,
type=click.Path(exists=True, file_okay=False)
)
@click.argument(
'output_dir',
required=True,
type=click.Path(exists=True, file_okay=False, writable=True)
)
@click.argument(
'class_label_file',
required=True,
type=click.Path(exists=True, file_okay=True)
)
@click.argument(
'weights_csv_file',
required=True,
type=click.Path(exists=True, file_okay=True)
)
@click.argument('fold', default=-1)
def run_kpca(
data_names,
network_name,
gene_sets_name,
preprocess_dir,
output_dir,
class_label_file,
weights_csv_file,
fold,
):
"""
Following pathway weight computation during performance analysis, perform
KernelPCA on a final kernel defined by weights of either a given fold or by
default the median pathway weight.
"""
kpca(
data_names,
network_name,
gene_sets_name,
preprocess_dir,
output_dir,
class_label_file,
weights_csv_file,
fold,
)
return 0
main.add_command(run_performance_analysis)
main.add_command(run_kpca)
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| [
"numpy.random.seed",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"click.option",
"pimkl.analysis.significant_pathways",
"pimkl.inducers.read_inducer",
"click.Path",
"multiprocessing.cpu_count",
"pandas.DataFrame",
"pimkl.run.fold_generator",
"traceback.print_exc",
"matplotlib.pyplot.close",
... | [((606, 628), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (619, 628), True, 'import seaborn as sns\n'), ((629, 652), 'seaborn.set_context', 'sns.set_context', (['"""talk"""'], {}), "('talk')\n", (644, 652), True, 'import seaborn as sns\n'), ((11830, 11843), 'click.group', 'click.group', ([], {}), '()\n', (11841, 11843), False, 'import click\n'), ((11868, 11921), 'click.command', 'click.command', ([], {'short_help': '"""train and test many folds"""'}), "(short_help='train and test many folds')\n", (11881, 11921), False, 'import click\n'), ((11923, 12001), 'click.option', 'click.option', (['"""-nd"""', '"""--data_name"""', '"""data_names"""'], {'required': '(True)', 'multiple': '(True)'}), "('-nd', '--data_name', 'data_names', required=True, multiple=True)\n", (11935, 12001), False, 'import click\n'), ((12003, 12048), 'click.argument', 'click.argument', (['"""network_name"""'], {'required': '(True)'}), "('network_name', required=True)\n", (12017, 12048), False, 'import click\n'), ((12050, 12097), 'click.argument', 'click.argument', (['"""gene_sets_name"""'], {'required': '(True)'}), "('gene_sets_name', required=True)\n", (12064, 12097), False, 'import click\n'), ((12561, 12595), 'click.argument', 'click.argument', (['"""lam"""'], {'default': '(0.2)'}), "('lam', default=0.2)\n", (12575, 12595), False, 'import click\n'), ((12597, 12627), 'click.argument', 'click.argument', (['"""k"""'], {'default': '(5)'}), "('k', default=5)\n", (12611, 12627), False, 'import click\n'), ((12629, 12674), 'click.argument', 'click.argument', (['"""number_of_folds"""'], {'default': '(10)'}), "('number_of_folds', default=10)\n", (12643, 12674), False, 'import click\n'), ((12676, 12719), 'click.argument', 'click.argument', (['"""max_per_class"""'], {'default': '(20)'}), "('max_per_class', default=20)\n", (12690, 12719), False, 'import click\n'), ((12721, 12754), 'click.argument', 'click.argument', (['"""seed"""'], {'default': '(0)'}), "('seed', default=0)\n", (12735, 12754), False, 'import click\n'), ((12756, 12798), 'click.argument', 'click.argument', (['"""max_processes"""'], {'default': '(1)'}), "('max_processes', default=1)\n", (12770, 12798), False, 'import click\n'), ((13624, 13688), 'click.command', 'click.command', ([], {'short_help': '"""KernelPCA with given pathway weights"""'}), "(short_help='KernelPCA with given pathway weights')\n", (13637, 13688), False, 'import click\n'), ((13690, 13768), 'click.option', 'click.option', (['"""-nd"""', '"""--data_name"""', '"""data_names"""'], {'required': '(True)', 'multiple': '(True)'}), "('-nd', '--data_name', 'data_names', required=True, multiple=True)\n", (13702, 13768), False, 'import click\n'), ((13770, 13815), 'click.argument', 'click.argument', (['"""network_name"""'], {'required': '(True)'}), "('network_name', required=True)\n", (13784, 13815), False, 'import click\n'), ((13817, 13864), 'click.argument', 'click.argument', (['"""gene_sets_name"""'], {'required': '(True)'}), "('gene_sets_name', required=True)\n", (13831, 13864), False, 'import click\n'), ((14319, 14353), 'click.argument', 'click.argument', (['"""fold"""'], {'default': '(-1)'}), "('fold', default=-1)\n", (14333, 14353), False, 'import click\n'), ((573, 604), 'seaborn.color_palette', 'sns.color_palette', (['"""colorblind"""'], {}), "('colorblind')\n", (590, 604), True, 'import seaborn as sns\n'), ((673, 704), 'seaborn.color_palette', 'sns.color_palette', (['"""colorblind"""'], {}), "('colorblind')\n", (690, 704), True, 'import seaborn as sns\n'), ((722, 753), 'seaborn.color_palette', 'sns.color_palette', (['"""colorblind"""'], {}), "('colorblind')\n", (739, 753), True, 'import seaborn as sns\n'), ((2505, 2516), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (2514, 2516), False, 'from multiprocessing import Pool, cpu_count\n'), ((2551, 2571), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2565, 2571), True, 'import numpy as np\n'), ((3344, 3413), 'pandas.read_csv', 'pd.read_csv', (['class_label_file'], {'index_col': '(0)', 'header': 'None', 'squeeze': '(True)'}), '(class_label_file, index_col=0, header=None, squeeze=True)\n', (3355, 3413), True, 'import pandas as pd\n'), ((7626, 7637), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7635, 7637), True, 'import matplotlib.pyplot as plt\n'), ((7807, 7870), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'weights_df[inducers_ordering]', 'palette': 'colors'}), '(data=weights_df[inducers_ordering], palette=colors)\n', (7818, 7870), True, 'import seaborn as sns\n'), ((7875, 7896), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Pathway"""'], {}), "('Pathway')\n", (7885, 7896), True, 'import matplotlib.pyplot as plt\n'), ((7901, 7921), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Weight"""'], {}), "('Weight')\n", (7911, 7921), True, 'import matplotlib.pyplot as plt\n'), ((7930, 7965), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)', 'fontsize': '(8)'}), '(rotation=90, fontsize=8)\n', (7940, 7965), True, 'import matplotlib.pyplot as plt\n'), ((7970, 8038), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(1.0 / weights_df.shape[1])', 'lw': '(1.0)', 'ls': '"""--"""', 'c': '"""black"""'}), "(y=1.0 / weights_df.shape[1], lw=1.0, ls='--', c='black')\n", (7981, 8038), True, 'import matplotlib.pyplot as plt\n'), ((8163, 8174), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8172, 8174), True, 'import matplotlib.pyplot as plt\n'), ((8249, 8301), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'aucs_df_box', 'y': '"""AUC"""', 'x': '"""variable"""'}), "(data=aucs_df_box, y='AUC', x='variable')\n", (8260, 8301), True, 'import seaborn as sns\n'), ((8425, 8436), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8434, 8436), True, 'import matplotlib.pyplot as plt\n'), ((9014, 9083), 'pandas.read_csv', 'pd.read_csv', (['class_label_file'], {'index_col': '(0)', 'header': 'None', 'squeeze': '(True)'}), '(class_label_file, index_col=0, header=None, squeeze=True)\n', (9025, 9083), True, 'import pandas as pd\n'), ((9721, 9763), 'pandas.read_csv', 'pd.read_csv', (['weights_csv_file'], {'index_col': '(0)'}), '(weights_csv_file, index_col=0)\n', (9732, 9763), True, 'import pandas as pd\n'), ((10054, 10237), 'pimkl.models.PIMKL', 'PIMKL', ([], {'inducers': 'inducers', 'induction': '"""induce_linear_kernel"""', 'mkl': '"""WeightedAverageMKL"""', 'mkl_parameters': "{'trace_normalization': kernel_normalization, 'kernels_weights': weights}"}), "(inducers=inducers, induction='induce_linear_kernel', mkl=\n 'WeightedAverageMKL', mkl_parameters={'trace_normalization':\n kernel_normalization, 'kernels_weights': weights})\n", (10059, 10237), False, 'from pimkl.models import PIMKL\n'), ((11287, 11375), 'pandas.DataFrame', 'pd.DataFrame', (['transformed_data[:, :components]'], {'index': 'samples', 'columns': 'kpca_columns'}), '(transformed_data[:, :components], index=samples, columns=\n kpca_columns)\n', (11299, 11375), True, 'import pandas as pd\n'), ((11433, 11442), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11440, 11442), True, 'import matplotlib.pyplot as plt\n'), ((11636, 11648), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11646, 11648), True, 'import matplotlib.pyplot as plt\n'), ((5860, 6006), 'functools.partial', 'partial', (['run_model', 'inducers', '"""induce_linear_kernel"""', 'model_name', '"""EasyMKL"""', 'mkl_parameters', 'estimator_parameters', '{}', 'inducers_extended_names'], {}), "(run_model, inducers, 'induce_linear_kernel', model_name, 'EasyMKL',\n mkl_parameters, estimator_parameters, {}, inducers_extended_names)\n", (5867, 6006), False, 'from functools import partial, reduce\n'), ((6045, 6077), 'logging.debug', 'logging.debug', (['"""fold runs start"""'], {}), "('fold runs start')\n", (6058, 6077), False, 'import logging\n'), ((6889, 6911), 'pandas.DataFrame', 'pd.DataFrame', (['all_aucs'], {}), '(all_aucs)\n', (6901, 6911), True, 'import pandas as pd\n'), ((6931, 6956), 'pandas.DataFrame', 'pd.DataFrame', (['all_weights'], {}), '(all_weights)\n', (6943, 6956), True, 'import pandas as pd\n'), ((6982, 7013), 'pandas.DataFrame', 'pd.DataFrame', (['all_trace_factors'], {}), '(all_trace_factors)\n', (6994, 7013), True, 'import pandas as pd\n'), ((12165, 12205), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(False)'}), '(exists=True, file_okay=False)\n', (12175, 12205), False, 'import click\n'), ((12271, 12326), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(False)', 'writable': '(True)'}), '(exists=True, file_okay=False, writable=True)\n', (12281, 12326), False, 'import click\n'), ((12398, 12437), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(True)'}), '(exists=True, file_okay=True)\n', (12408, 12437), False, 'import click\n'), ((12507, 12557), 'click.Choice', 'click.Choice', (["['EasyMKL', 'UMKLKNN', 'AverageMKL']"], {}), "(['EasyMKL', 'UMKLKNN', 'AverageMKL'])\n", (12519, 12557), False, 'import click\n'), ((13932, 13972), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(False)'}), '(exists=True, file_okay=False)\n', (13942, 13972), False, 'import click\n'), ((14038, 14093), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(False)', 'writable': '(True)'}), '(exists=True, file_okay=False, writable=True)\n', (14048, 14093), False, 'import click\n'), ((14165, 14204), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(True)'}), '(exists=True, file_okay=True)\n', (14175, 14204), False, 'import click\n'), ((14276, 14315), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(True)'}), '(exists=True, file_okay=True)\n', (14286, 14315), False, 'import click\n'), ((1916, 1969), 'pimkl.inducers.read_inducer', 'read_inducer', (['filename'], {'size': 'data[data_name].shape[1]'}), '(filename, size=data[data_name].shape[1])\n', (1928, 1969), False, 'from pimkl.inducers import read_inducer\n'), ((3461, 3482), 'pandas.isna', 'pd.isna', (['class_labels'], {}), '(class_labels)\n', (3468, 3482), True, 'import pandas as pd\n'), ((4515, 4575), 'pimkl.run.fold_generator', 'fold_generator', (['number_of_folds', 'data', 'labels', 'max_per_class'], {}), '(number_of_folds, data, labels, max_per_class)\n', (4529, 4575), False, 'from pimkl.run import fold_generator, run_model\n'), ((6091, 6116), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'processes'}), '(processes=processes)\n', (6095, 6116), False, 'from multiprocessing import Pool, cpu_count\n'), ((6287, 6325), 'logging.debug', 'logging.debug', (['"""lazy iterator created"""'], {}), "('lazy iterator created')\n", (6300, 6325), False, 'import logging\n'), ((6373, 6404), 'logging.debug', 'logging.debug', (['"""fold runs done"""'], {}), "('fold runs done')\n", (6386, 6404), False, 'import logging\n'), ((7745, 7796), 'pimkl.analysis.significant_pathways', 'significant_pathways', (['weights_df[inducers_ordering]'], {}), '(weights_df[inducers_ordering])\n', (7765, 7796), False, 'from pimkl.analysis import significant_pathways\n'), ((9131, 9152), 'pandas.isna', 'pd.isna', (['class_labels'], {}), '(class_labels)\n', (9138, 9152), True, 'import pandas as pd\n'), ((10412, 10443), 'sklearn.decomposition.KernelPCA', 'KernelPCA', ([], {'kernel': '"""precomputed"""'}), "(kernel='precomputed')\n", (10421, 10443), False, 'from sklearn.decomposition import KernelPCA\n'), ((4651, 4970), 'pimkl.run.run_model', 'run_model', ([], {'inducers': 'inducers', 'induction_name': '"""induce_linear_kernel"""', 'mkl_name': 'model_name', 'estimator_name': '"""EasyMKL"""', 'mkl_parameters': 'estimator_parameters', 'estimator_parameters': 'estimator_parameters', 'induction_parameters': '{}', 'inducers_extended_names': 'inducers_extended_names', 'fold_parameters': 'fold_parameters'}), "(inducers=inducers, induction_name='induce_linear_kernel',\n mkl_name=model_name, estimator_name='EasyMKL', mkl_parameters=\n estimator_parameters, estimator_parameters=estimator_parameters,\n induction_parameters={}, inducers_extended_names=\n inducers_extended_names, fold_parameters=fold_parameters)\n", (4660, 4970), False, 'from pimkl.run import fold_generator, run_model\n'), ((6200, 6260), 'pimkl.run.fold_generator', 'fold_generator', (['number_of_folds', 'data', 'labels', 'max_per_class'], {}), '(number_of_folds, data, labels, max_per_class)\n', (6214, 6260), False, 'from pimkl.run import fold_generator, run_model\n'), ((9784, 9818), 'os.path.basename', 'os.path.basename', (['weights_csv_file'], {}), '(weights_csv_file)\n', (9800, 9818), False, 'import os\n'), ((10476, 10490), 'pimkl.utils.preprocessing.Standardizer', 'Standardizer', ([], {}), '()\n', (10488, 10490), False, 'from pimkl.utils.preprocessing import Standardizer\n'), ((5797, 5818), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5816, 5818), False, 'import traceback\n'), ((1642, 1668), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1658, 1668), False, 'import os\n')] |
#!/usr/bin/env python
# coding: utf-8
import dash
from dash.exceptions import PreventUpdate
from dash.dependencies import Input, Output, State
import dash_html_components as html
import dash_core_components as dcc
from dash_canvas import DashCanvas
import os
from dash_canvas.utils import (array_to_data_url, parse_jsonstring, parse_jsonstring_rectangle)
from skimage import io, color, img_as_ubyte
import io
import cv2
import numpy as np
import base64
from base64 import decodestring
from zipfile import ZipFile
from urllib.parse import quote as urlquote
from flask import Flask, send_from_directory
import plotly.express as px
import json
import glob
import dash_daq as daq
import shutil
globalImage = ""
static_image_route = './static/'
if not os.path.exists(static_image_route):
os.makedirs(static_image_route)
server = Flask(__name__)
app = dash.Dash(server=server)
@server.route("/download/<path:path>")
def download(path):
return send_from_directory(static_image_route, path, as_attachment=True)
app.config['suppress_callback_exceptions']=True
app.config.suppress_callback_exceptions = True
canvas_width = 500
columns = ['type', 'width', 'height', 'scaleX', 'strokeWidth', 'path']
app.layout = html.Div([
html.Div([html.H1('Options',style={'font-family':'Times New Roman',
'font-size': '25px'}),
html.Div([
daq.ToggleSwitch(
id='selectionMode',
label='Manual Automatic',
labelPosition='bottom'
)
],style={'margin':'5px'}),
html.Div(id='sideNav'),
html.Div(id='sideNavOptions'),
html.Div(id='GridValues'),
html.Button('Save ROI', id='button', style={'display':'block'
,'position':'absolute',
'font-size': '16px',
'padding': '8px 12px',
'border-radius': '4px',
'text-align': 'center',
'align':'center',
'color':'black',
'margin':'25px',
'font-family':'Times New Roman',
'textAlign':'center'}),
html.Div(id='AutoDownload'),
html.Div(id='GridDownload'),
html.Div([
daq.Knob(
id='my-knob',
size=80,
min=-3,
max=3,
value=0,
className='dark-theme-control'
),
html.Div(id='knob-output')
],id='knob', style={'display':'None'}),
],style={'right':5, 'position':'fixed','top':5, 'width':'10%','height':'85%', 'background-color':'#3b6466'}),
html.Div([
html.Hr(),
html.H1('NDSU Precision Ag Group'),
html.H1('Small Grain Plots Segmentation Tool'),
html.H4('A faster Dataset Creation Tool '),
html.Hr(),
html.H2('Upload Image below'),
html.Div([
dcc.Upload(
id='upload-image',
children=html.Div([
'Drag and Drop or ',
html.A('Select color image')
]),
style={
'width': '100%',
'height': '300px',
'lineHeight': '300px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px',
'margin-bottom':'200px'
},
multiple=False
),
html.Br(),
html.Div([
html.Div(id='output-image-upload', style={'display': 'relative',
'position':'center',
'align':'center',
'margin-left': 'auto',
'margin-right': 'auto',
'padding-left': '40px',
'padding-right': '40px',
'padding-topg': '25px',
'box-shadow': '0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19)'}),
html.Div(id='output-image-uploadCorrection'),
html.Div(id='output-image-uploadGrid')
],style={'height':'100', 'width':'100'}),
html.Img(id='Output-generatl_image', style={'float':'left','margin-left':'None', 'left':25,'position': 'absolute', 'left': '50%', 'margin-bottom':'20px'}),# 'transform': 'translate(-50%, 10%)'}),#,'height':'510px', 'width':'920px'}),
html.Img(id='Output-operation_image', style={'float':'left','margin-left':'None', 'left':25,'position': 'relative', 'left': '50%','height':'510px', 'width':'920px', 'transform': 'translate(-50%, 10%)','margin-bottom':'20px'}),
], style={'textAlign': 'center','display': 'block', 'marginLeft': 'auto', 'marginRight': 'auto', 'width': '32.5%','backgroundImage': 'url(https://www.pexels.com/photo/scenic-view-of-agricultural-field-against-sky-during-sunset-325944/)'}, className="five columns"),
], className="five columns", style={'height':'100', 'width':'100'}),
], style={'textAlign': 'center','background-color': 'rgb(87, 95, 110)','height':'75%','color': 'white'})
@app.callback([Output('output-image-upload', 'children'),
Output('upload-image','style'),
Output('button','style'),
Output('Output-generatl_image','src'),
Output('Output-operation_image','style'),
Output('knob','style')],
[Input('upload-image', 'contents'), Input('selectionMode','value')])
def update_output_div(list_of_contents, opt):
global globalImage
if opt == False or opt == None:
if list_of_contents is not None:
MainImage = list_of_contents.encode("utf8").split(b";base64,")[1]
IMG = io.BytesIO()
IMG.write(base64.b64decode(MainImage))
IMG.seek(0)
i = np.asarray(bytearray(IMG.read()), dtype=np.uint8)
i = cv2.imdecode(i, cv2.IMREAD_COLOR)
i = cv2.cvtColor(i, cv2.COLOR_RGB2BGR)
globalImage = i
figu = px.imshow(i, width=920, height=510)
figu.update_layout(dragmode="drawrect")
figu.update_layout(coloraxis_showscale=False)
figu.update_xaxes(showticklabels=False)
figu.update_yaxes(showticklabels=False)
return html.Div([
daq.ToggleSwitch(
id='Semi-Manual',
label='Manual and Semi-Manual crop',
labelPosition='bottom'
),
dcc.Graph(id='multiAnnotation',figure=figu,
config={
"modeBarButtonsToAdd": [
"drawline",
"drawopenpath",
"drawclosedpath",
"drawcircle",
"drawrect",
"eraseshape",
]
}, style={'text-align': 'center', 'position': 'absolute', 'left': '50%', 'transform': 'translate(-50%, 10%)','height':'510px', 'width':'920px'}),
daq.ToggleSwitch(
id='angleCorrection',
label='Correct Angle',
labelPosition='bottom'
),
], style={'display':'relative','left':0,'margin-right':'50px'}) , {'display':'None'}, {'display':'block'
,'position':'relative',
'font-size': '16px',
'padding': '8px 12px',
'border-radius': '4px',
'text-align': 'center',
'align':'center',
'color':'black',
'font-family':'Times New Roman',
'textAlign':'center'}, None, {'display':'None'}, {'display':'block','filter':' drop-shadow(-10px 10px 4px #4a4a49)','color':'white'}
else:
if list_of_contents is not None:
return None, {'display':'None'}, {'display':'block',
'position':'relative',
'font-size': '16px',
'padding': '8px 12px',
'border-radius': '4px',
'text-align': 'center',
'align':'center',
'color':'black',
'font-family':'Times New Roman',
'textAlign':'center'}, None , {'float':'left','margin-left':'None', 'left':25,'position': 'relative', 'left': '50%','height':'510px', 'width':'920px','transform': 'translate(-50%, 10%)'},{'display':'None'}
dataArray=[]
selection = 0
def generateRIOs(roiArray,i):
count=0
for coordinates in roiArray:
for string in json.loads(coordinates):
for key, value in string.items():
x, y, x1, y1 = int(string['x0']),int(string['y0']),int(string['x1']),int(string['y1'])
ROI = cv2.cvtColor(i[y:y1, x:x1], cv2.COLOR_BGR2RGB)
count+=1
cv2.imwrite(static_image_route+str(count)+'_cropped.png', cv2.cvtColor(ROI, cv2.COLOR_RGB2BGR ))
imgs = glob.glob(static_image_route+'/'+'*.png')
with ZipFile(static_image_route+'cropped.zip', 'w') as zipObj2:
for image_files in imgs:
zipObj2.write(image_files)
@app.callback(
Output('sideNav', 'children'), #Output("annotations-data", "children"),
[Input("multiAnnotation", "relayoutData"),
Input('upload-image', 'contents'),
Input('button', 'n_clicks')],
prevent_initial_call=True,
)
def on_new_annotation(relayout_data, contents, generateSeg):#, cropMode, angle, angleCor):
if relayout_data is not None:
if "shapes" in relayout_data:
data = contents.encode("utf8").split(b";base64,")[1]
img = io.BytesIO()
imgNDVI = io.BytesIO()
img.write(base64.b64decode(data))
img.seek(0)
i = np.asarray(bytearray(img.read()), dtype=np.uint8)
i = cv2.imdecode(i, cv2.IMREAD_COLOR)
if json.dumps(relayout_data["shapes"], indent=2) not in dataArray:
dataArray.append(json.dumps(relayout_data["shapes"], indent=2))
selection= len(dataArray)
if generateSeg is not None:
try:
generateRIOs(dataArray,i)
except Exception: pass
selection = len(dataArray)
return html.A(html.Button('Download',style={'display':'block'
,'position':'relative',
'top': '55%','left': '10%',
'font-size': '16px',
'padding': '8px 12px',
'border-radius': '4px',
'text-align': 'center',
'align':'center',
'color':'black',
'font-family':'Times New Roman',
'textAlign':'center'}), href=os.path.join(static_image_route,'cropped.zip'), style={'position':'relative',
'top': '55%','left': '0%',})
return html.Button('No of selection '+str(selection),style={'display':'block'
,'position':'relative',
'top': '45%','left': '10%',
'font-size': '16px',
'padding': '8px 12px',
'border-radius': '4px',
'text-align': 'center',
'align':'center',
'color':'black',
'font-family':'Times New Roman',
'textAlign':'center'})
else:
return dash.no_update
def angleCalibration(image, angle):
center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
@app.callback(
[Output('output-image-uploadCorrection', 'children'),Output('output-image-upload', 'style')], #Output("annotations-data", "children"),
[Input('upload-image', 'contents'),
Input('Semi-Manual','value'),
Input('my-knob', 'value'),
Input('angleCorrection', 'value')],
prevent_initial_call=True,
)
def ImageCaliberation(im,cropMode, angle, angleCor):
global globalImage
if im is not None:
if cropMode == True:
if angleCor is True:
data = im.encode("utf8").split(b";base64,")[1]
img = io.BytesIO()
imgNDVI = io.BytesIO()
img.write(base64.b64decode(data))
img.seek(0)
i = np.asarray(bytearray(img.read()), dtype=np.uint8)
i = cv2.imdecode(i, cv2.IMREAD_COLOR)
i = cv2.cvtColor(i, cv2.COLOR_RGB2BGR)
i = angleCalibration(i, angle)
globalImage = i
figu = px.imshow(i, width=920, height=510)
figu.update_layout(dragmode="drawrect")
figu.update_layout(margin=dict(l=0, r=0, t=0, b=0)),
figu.update_xaxes(showticklabels=False)
figu.update_yaxes(showticklabels=False)
return dcc.Graph(id='multiAnnotation',figure=figu,
config={
"modeBarButtonsToAdd": [
"drawline",
"drawopenpath",
"drawclosedpath",
"drawcircle",
"drawrect",
"eraseshape",
]
}, style={'text-align': 'center',
'position': 'absolute',
'left': '50%',
'transform': 'translate(-50%, 10%)',
'height':'510px', 'width':'920px'}), {'display':'None'}
@app.callback(
Output('sideNavOptions', 'children'), #Output("annotations-data", "children"),
[Input('selectionMode','value'), Input('upload-image', 'contents')],
prevent_initial_call=True,
)
def automatic_seg(opt_, image):
if opt_ == True:
if image is not None:
return [html.Div([
dcc.Input(
id='RLower_value',
type='number',
placeholder="RL",
min=0, max=255, step=1,
),
dcc.Input(
id='Rupper_value',
type='number',
placeholder="RU",
min=0, max=255, step=1,
),
dcc.Input(
id='GLower_value',
type='number',
placeholder="GL",
min=0, max=255, step=1,
),
dcc.Input(
id='Gupper_value',
type='number',
placeholder="GU",
min=0, max=255, step=1,
),
dcc.Input(
id='BLower_value',
type='number',
placeholder="BL",
min=0, max=255, step=1,
),
dcc.Input(
id='Bupper_value',
type='number',
placeholder="BU",
min=0, max=255, step=1,
),
]
+ [html.Div(id="out-all-types")]
),
daq.ToggleSwitch(
id='V_mask',
label='Apply vertical Mask',
labelPosition='bottom'
) ,
daq.ToggleSwitch(
id='H_mask',
label='Apply horizontal Mask',
labelPosition='bottom'
) ,
daq.ToggleSwitch(
id='Combined_mask',
label='Combine V&H Masks',
labelPosition='bottom'
) ,
daq.ToggleSwitch(
id='cont',
label='Find Contours',
labelPosition='bottom'
) ,
dcc.Input(
id='MinArea',
type='number',
placeholder="Min Area",
),
dcc.Input(
id='MaxArea',
type='number',
placeholder="Max Area",
),
html.H5('Filter by Area')]
@app.callback(
[Output('Output-operation_image', 'src'),Output('Output-generatl_image','style'),Output('AutoDownload','children')], #Output("annotations-data", "children"),
[Input('selectionMode','value'),
Input('upload-image', 'contents'),
Input('RLower_value','value'),
Input('Rupper_value','value'),
Input('GLower_value','value'),
Input('Gupper_value','value'),
Input('BLower_value','value'),
Input('Bupper_value','value'),
Input('V_mask','value'),
Input('H_mask','value'),
Input('Combined_mask','value'),
Input('cont','value'),
Input('MinArea','value'),
Input('MaxArea','value'),
Input('button', 'n_clicks')
],
prevent_initial_call=True,
)
def automatic_segOutput(opt_, image, RL, RU, GL, GU, BL, BU, V_mask, H_mask, C_mask, contour, MinA, MaxA, genRIO):
if opt_ == True:
if image is not None:
MainImage = image.encode("utf8").split(b";base64,")[1]
IMG = io.BytesIO()
IMG.write(base64.b64decode(MainImage))
IMG.seek(0)
i = np.asarray(bytearray(IMG.read()), dtype=np.uint8)
i = cv2.imdecode(i, cv2.IMREAD_COLOR)
i = cv2.cvtColor(i, cv2.COLOR_RGB2BGR)
FinalImage = i#np.zeros_like(i, np.uint8)
mask = cv2.inRange(i, (0, 0, 0), (255, 255, 255))
if RL is not None and RU is not None and GL is not None and GU is not None and BL is not None and BU is not None:
mask = cv2.inRange(i, (RL, GL, BL), (RU, GU, BU))
imask = mask>0
green = np.zeros_like(i, np.uint8)
green[imask] = i[imask]
if green is not None:
FinalImage = green
grey = cv2.cvtColor(green, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(grey,(5,5),0)
ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))
close = cv2.morphologyEx(th3, cv2.MORPH_CLOSE, kernel, iterations=8)
else:
FinalImage = i
if V_mask == True:
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,110))
vertical_mask = cv2.morphologyEx(close, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
vertical = cv2.dilate(vertical_mask, vertical_kernel, iterations=7)
if vertical is not None:
FinalImage = vertical
if H_mask == True:
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,1))
horizontal_mask = cv2.morphologyEx(close, cv2.MORPH_OPEN, horizontal_kernel, iterations=1)
horizontal_mask = cv2.dilate(horizontal_mask, horizontal_kernel, iterations=10)
if horizontal_mask is not None:
FinalImage = horizontal_mask
if C_mask == True:
hmm = vertical==0
horizontal_mask[hmm] = vertical[hmm]
if horizontal_mask is not None:
FinalImage = horizontal_mask
if contour == True and MinA is not None and MaxA is not None:
if not os.path.exists(os.path.join(static_image_route,'data')):
os.makedirs(os.path.join(static_image_route,'data'))
boundingBox = horizontal_mask
cnts = cv2.findContours(boundingBox, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
image_number = 0
for c in cnts:
area = cv2.contourArea(c)
if area > MinA and area < MaxA:
x,y,w,h = cv2.boundingRect(c)
ROI = i[y:y+h, x:x+w]
cv2.imwrite(static_image_route+'/data/ROI_{}.png'.format(image_number), ROI)
cv2.rectangle(i, (x, y), (x + w, y + h), (36,255,12), 20)
image_number += 1
if boundingBox is not None:
FinalImage = i
if genRIO is not None:
shutil.make_archive(static_image_route+'Automatic_dataset', 'zip', os.path.join(static_image_route,'data'))
return array_to_data_url(img_as_ubyte(FinalImage)),{'display':'None'}, html.A(html.Button('Download',style={'display':'block'
,'position':'relative',
'font-size': '16px',
'padding': '8px 12px',
'border-radius': '4px',
'text-align': 'center',
'align':'center',
'color':'black',
'font-family':'Times New Roman',
'textAlign':'center'}), href=os.path.join(static_image_route,'Automatic_dataset.zip'), style={'position':'relative',
})
return array_to_data_url(img_as_ubyte(FinalImage)),{'display':'None'}, None
else:
return None
@app.callback(
[Output('GridValues','children')],
[Input('Semi-Manual','value')],
prevent_initial_call=True)
def inputMenu(condition):
if condition is True:
return [html.Div([
dcc.Input(
id='verticalGrid',
type='number',
placeholder="Vertical Grids",
min=0, max=100, step=1,
),
dcc.Input(
id='horizontalGrid',
type='number',
placeholder="Horizontal Grids",
min=0, max=100, step=1,
),
dcc.Input(
id='subverticalGrid',
type='number',
placeholder="Sub-vertical Grids",
min=0, max=10, step=1,
),
dcc.Input(
id='subhorizontalGrid',
type='number',
placeholder="Sub-horizontal Grids",
min=0, max=10, step=1,
),
html.Button('Draw Grids', id='GridSubmit'),
],style={'margin':'5px'})]
darray= []
def gridD(roiArray,i,VG, HG, SVG, SHG):
if SHG is None:
SHG = 1
count=0
for coordinates in roiArray:
for string in json.loads(coordinates):
for key, value in string.items():
x, y, x1, y1 = int(string['x0']),int(string['y0']),int(string['x1']),int(string['y1'])
ROIint = i[y:y1, x:x1]
for x in range(0, int(ROIint.shape[1]), int(ROIint.shape[1]/VG)):
cv2.line(ROIint,(x,0),(x,int(ROIint.shape[0])),(0,0,255),5)
xDiv = int(int(ROIint.shape[0]/HG)/SHG)
for x in range(0, int(ROIint.shape[0]), int(ROIint.shape[0]/HG)):
cv2.line(ROIint,(0,x),(int(ROIint.shape[1]),x),(0,0,255),5)
try:
for y in range(0, x, xDiv):
cv2.line(ROIint,(int(ROIint.shape[1]/2),y),(int(ROIint.shape[1]),y),(0,0,255),5)
except Exception:
pass
count+=1
saveROI = True
if saveROI == True:
print(saveROI)
if not os.path.exists(os.path.join(static_image_route,'dataCROP')):
os.makedirs(os.path.join(static_image_route,'dataCROP'))
path_ = os.path.join(static_image_route,'dataCROP')
mask = cv2.inRange(ROIint, (0, 0, 254), (0, 0,255))
imask = mask>0
blue = np.zeros_like(ROIint, np.uint8)
blue[imask] = ROIint[imask]
greyblue = cv2.cvtColor(blue, cv2.COLOR_RGB2GRAY)
blurblue = cv2.GaussianBlur(greyblue,(5,5),0)
ret4,th4 = cv2.threshold(blurblue,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
th4 = 255 - th4
cnts = cv2.findContours(th4, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
image_number = 0
for c in cnts:
area = cv2.contourArea(c)
x,y,w,h = cv2.boundingRect(c)
ROI = ROIint[y:y+h, x:x+w]
cv2.imwrite(os.path.join(path_,'ROI_{}.png'.format(image_number)), ROI)
image_number += 1
shutil.make_archive(static_image_route+'Grid_dataset', 'zip', os.path.join(static_image_route,'dataCROP'))
return ROIint, html.A(html.Button('Download',style={'display':'block'
,'position':'relative',
'font-size': '16px',
'padding': '8px 12px',
'border-radius': '4px',
'text-align': 'center',
'align':'center',
'color':'black',
'font-family':'Times New Roman',
'textAlign':'center'}), href=os.path.join(static_image_route,'Grid_dataset.zip'), style={'position':'relative',
})
@app.callback(
[Output('output-image-uploadGrid','children'),Output('output-image-uploadCorrection', 'style'),Output('GridDownload','children')], #Output("annotations-data", "children"),
[Input("multiAnnotation", "relayoutData"),
Input('Semi-Manual','value'),
Input('angleCorrection','value'),
Input('verticalGrid','value'),
Input('horizontalGrid','value'),
Input('subverticalGrid','value'),
Input('subhorizontalGrid','value'),
]
,prevent_initial_call=True,
)
def drawGrid(sel, opt1, opt2, VG, HG, SVG, SHG):
if globalImage is not None:
if opt1 is True:
if opt2 is True:
if sel is not None:
if json.dumps(sel["shapes"], indent=2) not in darray:
darray.append(json.dumps(sel["shapes"], indent=2))
if HG is not None and VG is not None:
igrph, saveButton = gridD(darray,globalImage, VG, HG, SVG, SHG)
figu = px.imshow(igrph, width=920, height=510)
figu.update_layout(dragmode="drawrect")
figu.update_layout(coloraxis_showscale=False)
figu.update_xaxes(showticklabels=False)
figu.update_yaxes(showticklabels=False)
return dcc.Graph(id='multiAnnotation2',figure=figu,
config={
"modeBarButtonsToAdd": [
"drawline",
"drawopenpath",
"drawclosedpath",
"drawcircle",
"drawrect",
"eraseshape",
]
}, style={'text-align': 'center',
'position': 'absolute',
'left': '50%',
'transform': 'translate(-50%, 10%)',
'height':'510px', 'width':'920px'}), {'display':'None'}, saveButton #, html.Button('SAVE ROI', id='save_gridSeg')
############################################################################################
if __name__ == '__main__':
app.run_server(debug=False)
| [
"cv2.GaussianBlur",
"dash_html_components.H2",
"cv2.imdecode",
"base64.b64decode",
"json.dumps",
"dash_core_components.Input",
"cv2.warpAffine",
"glob.glob",
"cv2.rectangle",
"cv2.inRange",
"os.path.join",
"cv2.getRotationMatrix2D",
"cv2.contourArea",
"numpy.zeros_like",
"dash.Dash",
"... | [((864, 879), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (869, 879), False, 'from flask import Flask, send_from_directory\n'), ((887, 911), 'dash.Dash', 'dash.Dash', ([], {'server': 'server'}), '(server=server)\n', (896, 911), False, 'import dash\n'), ((779, 813), 'os.path.exists', 'os.path.exists', (['static_image_route'], {}), '(static_image_route)\n', (793, 813), False, 'import os\n'), ((820, 851), 'os.makedirs', 'os.makedirs', (['static_image_route'], {}), '(static_image_route)\n', (831, 851), False, 'import os\n'), ((987, 1052), 'flask.send_from_directory', 'send_from_directory', (['static_image_route', 'path'], {'as_attachment': '(True)'}), '(static_image_route, path, as_attachment=True)\n', (1006, 1052), False, 'from flask import Flask, send_from_directory\n'), ((11184, 11229), 'glob.glob', 'glob.glob', (["(static_image_route + '/' + '*.png')"], {}), "(static_image_route + '/' + '*.png')\n", (11193, 11229), False, 'import glob\n'), ((11392, 11421), 'dash.dependencies.Output', 'Output', (['"""sideNav"""', '"""children"""'], {}), "('sideNav', 'children')\n", (11398, 11421), False, 'from dash.dependencies import Input, Output, State\n'), ((14225, 14268), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', 'angle', '(1.0)'], {}), '(center, angle, 1.0)\n', (14248, 14268), False, 'import cv2\n'), ((14283, 14357), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'rot_mat', 'image.shape[1::-1]'], {'flags': 'cv2.INTER_LINEAR'}), '(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n', (14297, 14357), False, 'import cv2\n'), ((16529, 16565), 'dash.dependencies.Output', 'Output', (['"""sideNavOptions"""', '"""children"""'], {}), "('sideNavOptions', 'children')\n", (16535, 16565), False, 'from dash.dependencies import Input, Output, State\n'), ((5812, 5853), 'dash.dependencies.Output', 'Output', (['"""output-image-upload"""', '"""children"""'], {}), "('output-image-upload', 'children')\n", (5818, 5853), False, 'from dash.dependencies import Input, Output, State\n'), ((5872, 5903), 'dash.dependencies.Output', 'Output', (['"""upload-image"""', '"""style"""'], {}), "('upload-image', 'style')\n", (5878, 5903), False, 'from dash.dependencies import Input, Output, State\n'), ((5921, 5946), 'dash.dependencies.Output', 'Output', (['"""button"""', '"""style"""'], {}), "('button', 'style')\n", (5927, 5946), False, 'from dash.dependencies import Input, Output, State\n'), ((5964, 6002), 'dash.dependencies.Output', 'Output', (['"""Output-generatl_image"""', '"""src"""'], {}), "('Output-generatl_image', 'src')\n", (5970, 6002), False, 'from dash.dependencies import Input, Output, State\n'), ((6020, 6061), 'dash.dependencies.Output', 'Output', (['"""Output-operation_image"""', '"""style"""'], {}), "('Output-operation_image', 'style')\n", (6026, 6061), False, 'from dash.dependencies import Input, Output, State\n'), ((6079, 6102), 'dash.dependencies.Output', 'Output', (['"""knob"""', '"""style"""'], {}), "('knob', 'style')\n", (6085, 6102), False, 'from dash.dependencies import Input, Output, State\n'), ((6120, 6153), 'dash.dependencies.Input', 'Input', (['"""upload-image"""', '"""contents"""'], {}), "('upload-image', 'contents')\n", (6125, 6153), False, 'from dash.dependencies import Input, Output, State\n'), ((6155, 6186), 'dash.dependencies.Input', 'Input', (['"""selectionMode"""', '"""value"""'], {}), "('selectionMode', 'value')\n", (6160, 6186), False, 'from dash.dependencies import Input, Output, State\n'), ((10810, 10833), 'json.loads', 'json.loads', (['coordinates'], {}), '(coordinates)\n', (10820, 10833), False, 'import json\n'), ((11001, 11047), 'cv2.cvtColor', 'cv2.cvtColor', (['i[y:y1, x:x1]', 'cv2.COLOR_BGR2RGB'], {}), '(i[y:y1, x:x1], cv2.COLOR_BGR2RGB)\n', (11013, 11047), False, 'import cv2\n'), ((11236, 11284), 'zipfile.ZipFile', 'ZipFile', (["(static_image_route + 'cropped.zip')", '"""w"""'], {}), "(static_image_route + 'cropped.zip', 'w')\n", (11243, 11284), False, 'from zipfile import ZipFile\n'), ((11471, 11511), 'dash.dependencies.Input', 'Input', (['"""multiAnnotation"""', '"""relayoutData"""'], {}), "('multiAnnotation', 'relayoutData')\n", (11476, 11511), False, 'from dash.dependencies import Input, Output, State\n'), ((11519, 11552), 'dash.dependencies.Input', 'Input', (['"""upload-image"""', '"""contents"""'], {}), "('upload-image', 'contents')\n", (11524, 11552), False, 'from dash.dependencies import Input, Output, State\n'), ((11561, 11588), 'dash.dependencies.Input', 'Input', (['"""button"""', '"""n_clicks"""'], {}), "('button', 'n_clicks')\n", (11566, 11588), False, 'from dash.dependencies import Input, Output, State\n'), ((14402, 14453), 'dash.dependencies.Output', 'Output', (['"""output-image-uploadCorrection"""', '"""children"""'], {}), "('output-image-uploadCorrection', 'children')\n", (14408, 14453), False, 'from dash.dependencies import Input, Output, State\n'), ((14454, 14492), 'dash.dependencies.Output', 'Output', (['"""output-image-upload"""', '"""style"""'], {}), "('output-image-upload', 'style')\n", (14460, 14492), False, 'from dash.dependencies import Input, Output, State\n'), ((14543, 14576), 'dash.dependencies.Input', 'Input', (['"""upload-image"""', '"""contents"""'], {}), "('upload-image', 'contents')\n", (14548, 14576), False, 'from dash.dependencies import Input, Output, State\n'), ((14583, 14612), 'dash.dependencies.Input', 'Input', (['"""Semi-Manual"""', '"""value"""'], {}), "('Semi-Manual', 'value')\n", (14588, 14612), False, 'from dash.dependencies import Input, Output, State\n'), ((14619, 14644), 'dash.dependencies.Input', 'Input', (['"""my-knob"""', '"""value"""'], {}), "('my-knob', 'value')\n", (14624, 14644), False, 'from dash.dependencies import Input, Output, State\n'), ((14652, 14685), 'dash.dependencies.Input', 'Input', (['"""angleCorrection"""', '"""value"""'], {}), "('angleCorrection', 'value')\n", (14657, 14685), False, 'from dash.dependencies import Input, Output, State\n'), ((16615, 16646), 'dash.dependencies.Input', 'Input', (['"""selectionMode"""', '"""value"""'], {}), "('selectionMode', 'value')\n", (16620, 16646), False, 'from dash.dependencies import Input, Output, State\n'), ((16647, 16680), 'dash.dependencies.Input', 'Input', (['"""upload-image"""', '"""contents"""'], {}), "('upload-image', 'contents')\n", (16652, 16680), False, 'from dash.dependencies import Input, Output, State\n'), ((19901, 19940), 'dash.dependencies.Output', 'Output', (['"""Output-operation_image"""', '"""src"""'], {}), "('Output-operation_image', 'src')\n", (19907, 19940), False, 'from dash.dependencies import Input, Output, State\n'), ((19941, 19981), 'dash.dependencies.Output', 'Output', (['"""Output-generatl_image"""', '"""style"""'], {}), "('Output-generatl_image', 'style')\n", (19947, 19981), False, 'from dash.dependencies import Input, Output, State\n'), ((19981, 20015), 'dash.dependencies.Output', 'Output', (['"""AutoDownload"""', '"""children"""'], {}), "('AutoDownload', 'children')\n", (19987, 20015), False, 'from dash.dependencies import Input, Output, State\n'), ((20065, 20096), 'dash.dependencies.Input', 'Input', (['"""selectionMode"""', '"""value"""'], {}), "('selectionMode', 'value')\n", (20070, 20096), False, 'from dash.dependencies import Input, Output, State\n'), ((20102, 20135), 'dash.dependencies.Input', 'Input', (['"""upload-image"""', '"""contents"""'], {}), "('upload-image', 'contents')\n", (20107, 20135), False, 'from dash.dependencies import Input, Output, State\n'), ((20142, 20172), 'dash.dependencies.Input', 'Input', (['"""RLower_value"""', '"""value"""'], {}), "('RLower_value', 'value')\n", (20147, 20172), False, 'from dash.dependencies import Input, Output, State\n'), ((20178, 20208), 'dash.dependencies.Input', 'Input', (['"""Rupper_value"""', '"""value"""'], {}), "('Rupper_value', 'value')\n", (20183, 20208), False, 'from dash.dependencies import Input, Output, State\n'), ((20214, 20244), 'dash.dependencies.Input', 'Input', (['"""GLower_value"""', '"""value"""'], {}), "('GLower_value', 'value')\n", (20219, 20244), False, 'from dash.dependencies import Input, Output, State\n'), ((20250, 20280), 'dash.dependencies.Input', 'Input', (['"""Gupper_value"""', '"""value"""'], {}), "('Gupper_value', 'value')\n", (20255, 20280), False, 'from dash.dependencies import Input, Output, State\n'), ((20286, 20316), 'dash.dependencies.Input', 'Input', (['"""BLower_value"""', '"""value"""'], {}), "('BLower_value', 'value')\n", (20291, 20316), False, 'from dash.dependencies import Input, Output, State\n'), ((20322, 20352), 'dash.dependencies.Input', 'Input', (['"""Bupper_value"""', '"""value"""'], {}), "('Bupper_value', 'value')\n", (20327, 20352), False, 'from dash.dependencies import Input, Output, State\n'), ((20358, 20382), 'dash.dependencies.Input', 'Input', (['"""V_mask"""', '"""value"""'], {}), "('V_mask', 'value')\n", (20363, 20382), False, 'from dash.dependencies import Input, Output, State\n'), ((20388, 20412), 'dash.dependencies.Input', 'Input', (['"""H_mask"""', '"""value"""'], {}), "('H_mask', 'value')\n", (20393, 20412), False, 'from dash.dependencies import Input, Output, State\n'), ((20418, 20449), 'dash.dependencies.Input', 'Input', (['"""Combined_mask"""', '"""value"""'], {}), "('Combined_mask', 'value')\n", (20423, 20449), False, 'from dash.dependencies import Input, Output, State\n'), ((20455, 20477), 'dash.dependencies.Input', 'Input', (['"""cont"""', '"""value"""'], {}), "('cont', 'value')\n", (20460, 20477), False, 'from dash.dependencies import Input, Output, State\n'), ((20483, 20508), 'dash.dependencies.Input', 'Input', (['"""MinArea"""', '"""value"""'], {}), "('MinArea', 'value')\n", (20488, 20508), False, 'from dash.dependencies import Input, Output, State\n'), ((20514, 20539), 'dash.dependencies.Input', 'Input', (['"""MaxArea"""', '"""value"""'], {}), "('MaxArea', 'value')\n", (20519, 20539), False, 'from dash.dependencies import Input, Output, State\n'), ((20545, 20572), 'dash.dependencies.Input', 'Input', (['"""button"""', '"""n_clicks"""'], {}), "('button', 'n_clicks')\n", (20550, 20572), False, 'from dash.dependencies import Input, Output, State\n'), ((25377, 25409), 'dash.dependencies.Output', 'Output', (['"""GridValues"""', '"""children"""'], {}), "('GridValues', 'children')\n", (25383, 25409), False, 'from dash.dependencies import Input, Output, State\n'), ((25417, 25446), 'dash.dependencies.Input', 'Input', (['"""Semi-Manual"""', '"""value"""'], {}), "('Semi-Manual', 'value')\n", (25422, 25446), False, 'from dash.dependencies import Input, Output, State\n'), ((26676, 26699), 'json.loads', 'json.loads', (['coordinates'], {}), '(coordinates)\n', (26686, 26699), False, 'import json\n'), ((27657, 27701), 'os.path.join', 'os.path.join', (['static_image_route', '"""dataCROP"""'], {}), "(static_image_route, 'dataCROP')\n", (27669, 27701), False, 'import os\n'), ((27717, 27762), 'cv2.inRange', 'cv2.inRange', (['ROIint', '(0, 0, 254)', '(0, 0, 255)'], {}), '(ROIint, (0, 0, 254), (0, 0, 255))\n', (27728, 27762), False, 'import cv2\n'), ((27802, 27833), 'numpy.zeros_like', 'np.zeros_like', (['ROIint', 'np.uint8'], {}), '(ROIint, np.uint8)\n', (27815, 27833), True, 'import numpy as np\n'), ((27891, 27929), 'cv2.cvtColor', 'cv2.cvtColor', (['blue', 'cv2.COLOR_RGB2GRAY'], {}), '(blue, cv2.COLOR_RGB2GRAY)\n', (27903, 27929), False, 'import cv2\n'), ((27950, 27987), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['greyblue', '(5, 5)', '(0)'], {}), '(greyblue, (5, 5), 0)\n', (27966, 27987), False, 'import cv2\n'), ((28005, 28073), 'cv2.threshold', 'cv2.threshold', (['blurblue', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(blurblue, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (28018, 28073), False, 'import cv2\n'), ((28110, 28175), 'cv2.findContours', 'cv2.findContours', (['th4', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(th4, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (28126, 28175), False, 'import cv2\n'), ((29461, 29506), 'dash.dependencies.Output', 'Output', (['"""output-image-uploadGrid"""', '"""children"""'], {}), "('output-image-uploadGrid', 'children')\n", (29467, 29506), False, 'from dash.dependencies import Input, Output, State\n'), ((29506, 29554), 'dash.dependencies.Output', 'Output', (['"""output-image-uploadCorrection"""', '"""style"""'], {}), "('output-image-uploadCorrection', 'style')\n", (29512, 29554), False, 'from dash.dependencies import Input, Output, State\n'), ((29555, 29589), 'dash.dependencies.Output', 'Output', (['"""GridDownload"""', '"""children"""'], {}), "('GridDownload', 'children')\n", (29561, 29589), False, 'from dash.dependencies import Input, Output, State\n'), ((29639, 29679), 'dash.dependencies.Input', 'Input', (['"""multiAnnotation"""', '"""relayoutData"""'], {}), "('multiAnnotation', 'relayoutData')\n", (29644, 29679), False, 'from dash.dependencies import Input, Output, State\n'), ((29686, 29715), 'dash.dependencies.Input', 'Input', (['"""Semi-Manual"""', '"""value"""'], {}), "('Semi-Manual', 'value')\n", (29691, 29715), False, 'from dash.dependencies import Input, Output, State\n'), ((29721, 29754), 'dash.dependencies.Input', 'Input', (['"""angleCorrection"""', '"""value"""'], {}), "('angleCorrection', 'value')\n", (29726, 29754), False, 'from dash.dependencies import Input, Output, State\n'), ((29760, 29790), 'dash.dependencies.Input', 'Input', (['"""verticalGrid"""', '"""value"""'], {}), "('verticalGrid', 'value')\n", (29765, 29790), False, 'from dash.dependencies import Input, Output, State\n'), ((29796, 29828), 'dash.dependencies.Input', 'Input', (['"""horizontalGrid"""', '"""value"""'], {}), "('horizontalGrid', 'value')\n", (29801, 29828), False, 'from dash.dependencies import Input, Output, State\n'), ((29834, 29867), 'dash.dependencies.Input', 'Input', (['"""subverticalGrid"""', '"""value"""'], {}), "('subverticalGrid', 'value')\n", (29839, 29867), False, 'from dash.dependencies import Input, Output, State\n'), ((29873, 29908), 'dash.dependencies.Input', 'Input', (['"""subhorizontalGrid"""', '"""value"""'], {}), "('subhorizontalGrid', 'value')\n", (29878, 29908), False, 'from dash.dependencies import Input, Output, State\n'), ((6438, 6450), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6448, 6450), False, 'import io\n'), ((6612, 6645), 'cv2.imdecode', 'cv2.imdecode', (['i', 'cv2.IMREAD_COLOR'], {}), '(i, cv2.IMREAD_COLOR)\n', (6624, 6645), False, 'import cv2\n'), ((6663, 6697), 'cv2.cvtColor', 'cv2.cvtColor', (['i', 'cv2.COLOR_RGB2BGR'], {}), '(i, cv2.COLOR_RGB2BGR)\n', (6675, 6697), False, 'import cv2\n'), ((6747, 6782), 'plotly.express.imshow', 'px.imshow', (['i'], {'width': '(920)', 'height': '(510)'}), '(i, width=920, height=510)\n', (6756, 6782), True, 'import plotly.express as px\n'), ((11133, 11169), 'cv2.cvtColor', 'cv2.cvtColor', (['ROI', 'cv2.COLOR_RGB2BGR'], {}), '(ROI, cv2.COLOR_RGB2BGR)\n', (11145, 11169), False, 'import cv2\n'), ((11886, 11898), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (11896, 11898), False, 'import io\n'), ((11922, 11934), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (11932, 11934), False, 'import io\n'), ((12091, 12124), 'cv2.imdecode', 'cv2.imdecode', (['i', 'cv2.IMREAD_COLOR'], {}), '(i, cv2.IMREAD_COLOR)\n', (12103, 12124), False, 'import cv2\n'), ((14176, 14204), 'numpy.array', 'np.array', (['image.shape[1::-1]'], {}), '(image.shape[1::-1])\n', (14184, 14204), True, 'import numpy as np\n'), ((20877, 20889), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (20887, 20889), False, 'import io\n'), ((21051, 21084), 'cv2.imdecode', 'cv2.imdecode', (['i', 'cv2.IMREAD_COLOR'], {}), '(i, cv2.IMREAD_COLOR)\n', (21063, 21084), False, 'import cv2\n'), ((21102, 21136), 'cv2.cvtColor', 'cv2.cvtColor', (['i', 'cv2.COLOR_RGB2BGR'], {}), '(i, cv2.COLOR_RGB2BGR)\n', (21114, 21136), False, 'import cv2\n'), ((21212, 21254), 'cv2.inRange', 'cv2.inRange', (['i', '(0, 0, 0)', '(255, 255, 255)'], {}), '(i, (0, 0, 0), (255, 255, 255))\n', (21223, 21254), False, 'import cv2\n'), ((28311, 28329), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (28326, 28329), False, 'import cv2\n'), ((28353, 28372), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (28369, 28372), False, 'import cv2\n'), ((28600, 28644), 'os.path.join', 'os.path.join', (['static_image_route', '"""dataCROP"""'], {}), "(static_image_route, 'dataCROP')\n", (28612, 28644), False, 'import os\n'), ((28680, 28953), 'dash_html_components.Button', 'html.Button', (['"""Download"""'], {'style': "{'display': 'block', 'position': 'relative', 'font-size': '16px', 'padding':\n '8px 12px', 'border-radius': '4px', 'text-align': 'center', 'align':\n 'center', 'color': 'black', 'font-family': 'Times New Roman',\n 'textAlign': 'center'}"}), "('Download', style={'display': 'block', 'position': 'relative',\n 'font-size': '16px', 'padding': '8px 12px', 'border-radius': '4px',\n 'text-align': 'center', 'align': 'center', 'color': 'black',\n 'font-family': 'Times New Roman', 'textAlign': 'center'})\n", (28691, 28953), True, 'import dash_html_components as html\n'), ((1294, 1379), 'dash_html_components.H1', 'html.H1', (['"""Options"""'], {'style': "{'font-family': 'Times New Roman', 'font-size': '25px'}"}), "('Options', style={'font-family': 'Times New Roman', 'font-size':\n '25px'})\n", (1301, 1379), True, 'import dash_html_components as html\n'), ((1686, 1708), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""sideNav"""'}), "(id='sideNav')\n", (1694, 1708), True, 'import dash_html_components as html\n'), ((1719, 1748), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""sideNavOptions"""'}), "(id='sideNavOptions')\n", (1727, 1748), True, 'import dash_html_components as html\n'), ((1759, 1784), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""GridValues"""'}), "(id='GridValues')\n", (1767, 1784), True, 'import dash_html_components as html\n'), ((1797, 2101), 'dash_html_components.Button', 'html.Button', (['"""Save ROI"""'], {'id': '"""button"""', 'style': "{'display': 'block', 'position': 'absolute', 'font-size': '16px', 'padding':\n '8px 12px', 'border-radius': '4px', 'text-align': 'center', 'align':\n 'center', 'color': 'black', 'margin': '25px', 'font-family':\n 'Times New Roman', 'textAlign': 'center'}"}), "('Save ROI', id='button', style={'display': 'block', 'position':\n 'absolute', 'font-size': '16px', 'padding': '8px 12px', 'border-radius':\n '4px', 'text-align': 'center', 'align': 'center', 'color': 'black',\n 'margin': '25px', 'font-family': 'Times New Roman', 'textAlign': 'center'})\n", (1808, 2101), True, 'import dash_html_components as html\n'), ((2463, 2490), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""AutoDownload"""'}), "(id='AutoDownload')\n", (2471, 2490), True, 'import dash_html_components as html\n'), ((2501, 2528), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""GridDownload"""'}), "(id='GridDownload')\n", (2509, 2528), True, 'import dash_html_components as html\n'), ((3202, 3211), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (3209, 3211), True, 'import dash_html_components as html\n'), ((3218, 3252), 'dash_html_components.H1', 'html.H1', (['"""NDSU Precision Ag Group"""'], {}), "('NDSU Precision Ag Group')\n", (3225, 3252), True, 'import dash_html_components as html\n'), ((3259, 3305), 'dash_html_components.H1', 'html.H1', (['"""Small Grain Plots Segmentation Tool"""'], {}), "('Small Grain Plots Segmentation Tool')\n", (3266, 3305), True, 'import dash_html_components as html\n'), ((3312, 3354), 'dash_html_components.H4', 'html.H4', (['"""A faster Dataset Creation Tool """'], {}), "('A faster Dataset Creation Tool ')\n", (3319, 3354), True, 'import dash_html_components as html\n'), ((3361, 3370), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (3368, 3370), True, 'import dash_html_components as html\n'), ((3377, 3406), 'dash_html_components.H2', 'html.H2', (['"""Upload Image below"""'], {}), "('Upload Image below')\n", (3384, 3406), True, 'import dash_html_components as html\n'), ((6474, 6501), 'base64.b64decode', 'base64.b64decode', (['MainImage'], {}), '(MainImage)\n', (6490, 6501), False, 'import base64\n'), ((11958, 11980), 'base64.b64decode', 'base64.b64decode', (['data'], {}), '(data)\n', (11974, 11980), False, 'import base64\n'), ((12141, 12186), 'json.dumps', 'json.dumps', (["relayout_data['shapes']"], {'indent': '(2)'}), "(relayout_data['shapes'], indent=2)\n", (12151, 12186), False, 'import json\n'), ((14988, 15000), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (14998, 15000), False, 'import io\n'), ((15028, 15040), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (15038, 15040), False, 'import io\n'), ((15213, 15246), 'cv2.imdecode', 'cv2.imdecode', (['i', 'cv2.IMREAD_COLOR'], {}), '(i, cv2.IMREAD_COLOR)\n', (15225, 15246), False, 'import cv2\n'), ((15268, 15302), 'cv2.cvtColor', 'cv2.cvtColor', (['i', 'cv2.COLOR_RGB2BGR'], {}), '(i, cv2.COLOR_RGB2BGR)\n', (15280, 15302), False, 'import cv2\n'), ((15409, 15444), 'plotly.express.imshow', 'px.imshow', (['i'], {'width': '(920)', 'height': '(510)'}), '(i, width=920, height=510)\n', (15418, 15444), True, 'import plotly.express as px\n'), ((18649, 18736), 'dash_daq.ToggleSwitch', 'daq.ToggleSwitch', ([], {'id': '"""V_mask"""', 'label': '"""Apply vertical Mask"""', 'labelPosition': '"""bottom"""'}), "(id='V_mask', label='Apply vertical Mask', labelPosition=\n 'bottom')\n", (18665, 18736), True, 'import dash_daq as daq\n'), ((18829, 18918), 'dash_daq.ToggleSwitch', 'daq.ToggleSwitch', ([], {'id': '"""H_mask"""', 'label': '"""Apply horizontal Mask"""', 'labelPosition': '"""bottom"""'}), "(id='H_mask', label='Apply horizontal Mask', labelPosition=\n 'bottom')\n", (18845, 18918), True, 'import dash_daq as daq\n'), ((19011, 19102), 'dash_daq.ToggleSwitch', 'daq.ToggleSwitch', ([], {'id': '"""Combined_mask"""', 'label': '"""Combine V&H Masks"""', 'labelPosition': '"""bottom"""'}), "(id='Combined_mask', label='Combine V&H Masks',\n labelPosition='bottom')\n", (19027, 19102), True, 'import dash_daq as daq\n'), ((19196, 19270), 'dash_daq.ToggleSwitch', 'daq.ToggleSwitch', ([], {'id': '"""cont"""', 'label': '"""Find Contours"""', 'labelPosition': '"""bottom"""'}), "(id='cont', label='Find Contours', labelPosition='bottom')\n", (19212, 19270), True, 'import dash_daq as daq\n'), ((19384, 19446), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""MinArea"""', 'type': '"""number"""', 'placeholder': '"""Min Area"""'}), "(id='MinArea', type='number', placeholder='Min Area')\n", (19393, 19446), True, 'import dash_core_components as dcc\n'), ((19642, 19704), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""MaxArea"""', 'type': '"""number"""', 'placeholder': '"""Max Area"""'}), "(id='MaxArea', type='number', placeholder='Max Area')\n", (19651, 19704), True, 'import dash_core_components as dcc\n'), ((19850, 19875), 'dash_html_components.H5', 'html.H5', (['"""Filter by Area"""'], {}), "('Filter by Area')\n", (19857, 19875), True, 'import dash_html_components as html\n'), ((20913, 20940), 'base64.b64decode', 'base64.b64decode', (['MainImage'], {}), '(MainImage)\n', (20929, 20940), False, 'import base64\n'), ((21407, 21449), 'cv2.inRange', 'cv2.inRange', (['i', '(RL, GL, BL)', '(RU, GU, BU)'], {}), '(i, (RL, GL, BL), (RU, GU, BU))\n', (21418, 21449), False, 'import cv2\n'), ((21507, 21533), 'numpy.zeros_like', 'np.zeros_like', (['i', 'np.uint8'], {}), '(i, np.uint8)\n', (21520, 21533), True, 'import numpy as np\n'), ((22200, 22251), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(1, 110)'], {}), '(cv2.MORPH_RECT, (1, 110))\n', (22225, 22251), False, 'import cv2\n'), ((22284, 22354), 'cv2.morphologyEx', 'cv2.morphologyEx', (['close', 'cv2.MORPH_OPEN', 'vertical_kernel'], {'iterations': '(2)'}), '(close, cv2.MORPH_OPEN, vertical_kernel, iterations=2)\n', (22300, 22354), False, 'import cv2\n'), ((22383, 22439), 'cv2.dilate', 'cv2.dilate', (['vertical_mask', 'vertical_kernel'], {'iterations': '(7)'}), '(vertical_mask, vertical_kernel, iterations=7)\n', (22393, 22439), False, 'import cv2\n'), ((22596, 22645), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(5, 1)'], {}), '(cv2.MORPH_RECT, (5, 1))\n', (22621, 22645), False, 'import cv2\n'), ((22680, 22752), 'cv2.morphologyEx', 'cv2.morphologyEx', (['close', 'cv2.MORPH_OPEN', 'horizontal_kernel'], {'iterations': '(1)'}), '(close, cv2.MORPH_OPEN, horizontal_kernel, iterations=1)\n', (22696, 22752), False, 'import cv2\n'), ((22788, 22849), 'cv2.dilate', 'cv2.dilate', (['horizontal_mask', 'horizontal_kernel'], {'iterations': '(10)'}), '(horizontal_mask, horizontal_kernel, iterations=10)\n', (22798, 22849), False, 'import cv2\n'), ((23478, 23551), 'cv2.findContours', 'cv2.findContours', (['boundingBox', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(boundingBox, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (23494, 23551), False, 'import cv2\n'), ((27524, 27568), 'os.path.join', 'os.path.join', (['static_image_route', '"""dataCROP"""'], {}), "(static_image_route, 'dataCROP')\n", (27536, 27568), False, 'import os\n'), ((27595, 27639), 'os.path.join', 'os.path.join', (['static_image_route', '"""dataCROP"""'], {}), "(static_image_route, 'dataCROP')\n", (27607, 27639), False, 'import os\n'), ((29310, 29362), 'os.path.join', 'os.path.join', (['static_image_route', '"""Grid_dataset.zip"""'], {}), "(static_image_route, 'Grid_dataset.zip')\n", (29322, 29362), False, 'import os\n'), ((12239, 12284), 'json.dumps', 'json.dumps', (["relayout_data['shapes']"], {'indent': '(2)'}), "(relayout_data['shapes'], indent=2)\n", (12249, 12284), False, 'import json\n'), ((12554, 12856), 'dash_html_components.Button', 'html.Button', (['"""Download"""'], {'style': "{'display': 'block', 'position': 'relative', 'top': '55%', 'left': '10%',\n 'font-size': '16px', 'padding': '8px 12px', 'border-radius': '4px',\n 'text-align': 'center', 'align': 'center', 'color': 'black',\n 'font-family': 'Times New Roman', 'textAlign': 'center'}"}), "('Download', style={'display': 'block', 'position': 'relative',\n 'top': '55%', 'left': '10%', 'font-size': '16px', 'padding': '8px 12px',\n 'border-radius': '4px', 'text-align': 'center', 'align': 'center',\n 'color': 'black', 'font-family': 'Times New Roman', 'textAlign': 'center'})\n", (12565, 12856), True, 'import dash_html_components as html\n'), ((15068, 15090), 'base64.b64decode', 'base64.b64decode', (['data'], {}), '(data)\n', (15084, 15090), False, 'import base64\n'), ((15710, 16035), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""multiAnnotation"""', 'figure': 'figu', 'config': "{'modeBarButtonsToAdd': ['drawline', 'drawopenpath', 'drawclosedpath',\n 'drawcircle', 'drawrect', 'eraseshape']}", 'style': "{'text-align': 'center', 'position': 'absolute', 'left': '50%', 'transform':\n 'translate(-50%, 10%)', 'height': '510px', 'width': '920px'}"}), "(id='multiAnnotation', figure=figu, config={'modeBarButtonsToAdd':\n ['drawline', 'drawopenpath', 'drawclosedpath', 'drawcircle', 'drawrect',\n 'eraseshape']}, style={'text-align': 'center', 'position': 'absolute',\n 'left': '50%', 'transform': 'translate(-50%, 10%)', 'height': '510px',\n 'width': '920px'})\n", (15719, 16035), True, 'import dash_core_components as dcc\n'), ((21682, 21721), 'cv2.cvtColor', 'cv2.cvtColor', (['green', 'cv2.COLOR_RGB2GRAY'], {}), '(green, cv2.COLOR_RGB2GRAY)\n', (21694, 21721), False, 'import cv2\n'), ((21750, 21783), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['grey', '(5, 5)', '(0)'], {}), '(grey, (5, 5), 0)\n', (21766, 21783), False, 'import cv2\n'), ((21813, 21877), 'cv2.threshold', 'cv2.threshold', (['blur', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (21826, 21877), False, 'import cv2\n'), ((21903, 21952), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(2, 2)'], {}), '(cv2.MORPH_RECT, (2, 2))\n', (21928, 21952), False, 'import cv2\n'), ((21981, 22041), 'cv2.morphologyEx', 'cv2.morphologyEx', (['th3', 'cv2.MORPH_CLOSE', 'kernel'], {'iterations': '(8)'}), '(th3, cv2.MORPH_CLOSE, kernel, iterations=8)\n', (21997, 22041), False, 'import cv2\n'), ((23709, 23727), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (23724, 23727), False, 'import cv2\n'), ((25262, 25286), 'skimage.img_as_ubyte', 'img_as_ubyte', (['FinalImage'], {}), '(FinalImage)\n', (25274, 25286), False, 'from skimage import io, color, img_as_ubyte\n'), ((25577, 25678), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""verticalGrid"""', 'type': '"""number"""', 'placeholder': '"""Vertical Grids"""', 'min': '(0)', 'max': '(100)', 'step': '(1)'}), "(id='verticalGrid', type='number', placeholder='Vertical Grids',\n min=0, max=100, step=1)\n", (25586, 25678), True, 'import dash_core_components as dcc\n'), ((25782, 25888), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""horizontalGrid"""', 'type': '"""number"""', 'placeholder': '"""Horizontal Grids"""', 'min': '(0)', 'max': '(100)', 'step': '(1)'}), "(id='horizontalGrid', type='number', placeholder=\n 'Horizontal Grids', min=0, max=100, step=1)\n", (25791, 25888), True, 'import dash_core_components as dcc\n'), ((25991, 26099), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""subverticalGrid"""', 'type': '"""number"""', 'placeholder': '"""Sub-vertical Grids"""', 'min': '(0)', 'max': '(10)', 'step': '(1)'}), "(id='subverticalGrid', type='number', placeholder=\n 'Sub-vertical Grids', min=0, max=10, step=1)\n", (26000, 26099), True, 'import dash_core_components as dcc\n'), ((26202, 26314), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""subhorizontalGrid"""', 'type': '"""number"""', 'placeholder': '"""Sub-horizontal Grids"""', 'min': '(0)', 'max': '(10)', 'step': '(1)'}), "(id='subhorizontalGrid', type='number', placeholder=\n 'Sub-horizontal Grids', min=0, max=10, step=1)\n", (26211, 26314), True, 'import dash_core_components as dcc\n'), ((26417, 26459), 'dash_html_components.Button', 'html.Button', (['"""Draw Grids"""'], {'id': '"""GridSubmit"""'}), "('Draw Grids', id='GridSubmit')\n", (26428, 26459), True, 'import dash_html_components as html\n'), ((1458, 1550), 'dash_daq.ToggleSwitch', 'daq.ToggleSwitch', ([], {'id': '"""selectionMode"""', 'label': '"""Manual Automatic"""', 'labelPosition': '"""bottom"""'}), "(id='selectionMode', label='Manual Automatic',\n labelPosition='bottom')\n", (1474, 1550), True, 'import dash_daq as daq\n'), ((2583, 2675), 'dash_daq.Knob', 'daq.Knob', ([], {'id': '"""my-knob"""', 'size': '(80)', 'min': '(-3)', 'max': '(3)', 'value': '(0)', 'className': '"""dark-theme-control"""'}), "(id='my-knob', size=80, min=-3, max=3, value=0, className=\n 'dark-theme-control')\n", (2591, 2675), True, 'import dash_daq as daq\n'), ((2962, 2988), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""knob-output"""'}), "(id='knob-output')\n", (2970, 2988), True, 'import dash_html_components as html\n'), ((3966, 3975), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (3973, 3975), True, 'import dash_html_components as html\n'), ((4861, 5029), 'dash_html_components.Img', 'html.Img', ([], {'id': '"""Output-generatl_image"""', 'style': "{'float': 'left', 'margin-left': 'None', 'left': 25, 'position': 'absolute',\n 'left': '50%', 'margin-bottom': '20px'}"}), "(id='Output-generatl_image', style={'float': 'left', 'margin-left':\n 'None', 'left': 25, 'position': 'absolute', 'left': '50%',\n 'margin-bottom': '20px'})\n", (4869, 5029), True, 'import dash_html_components as html\n'), ((5100, 5347), 'dash_html_components.Img', 'html.Img', ([], {'id': '"""Output-operation_image"""', 'style': "{'float': 'left', 'margin-left': 'None', 'left': 25, 'position': 'relative',\n 'left': '50%', 'height': '510px', 'width': '920px', 'transform':\n 'translate(-50%, 10%)', 'margin-bottom': '20px'}"}), "(id='Output-operation_image', style={'float': 'left', 'margin-left':\n 'None', 'left': 25, 'position': 'relative', 'left': '50%', 'height':\n '510px', 'width': '920px', 'transform': 'translate(-50%, 10%)',\n 'margin-bottom': '20px'})\n", (5108, 5347), True, 'import dash_html_components as html\n'), ((7057, 7156), 'dash_daq.ToggleSwitch', 'daq.ToggleSwitch', ([], {'id': '"""Semi-Manual"""', 'label': '"""Manual and Semi-Manual crop"""', 'labelPosition': '"""bottom"""'}), "(id='Semi-Manual', label='Manual and Semi-Manual crop',\n labelPosition='bottom')\n", (7073, 7156), True, 'import dash_daq as daq\n'), ((7293, 7618), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""multiAnnotation"""', 'figure': 'figu', 'config': "{'modeBarButtonsToAdd': ['drawline', 'drawopenpath', 'drawclosedpath',\n 'drawcircle', 'drawrect', 'eraseshape']}", 'style': "{'text-align': 'center', 'position': 'absolute', 'left': '50%', 'transform':\n 'translate(-50%, 10%)', 'height': '510px', 'width': '920px'}"}), "(id='multiAnnotation', figure=figu, config={'modeBarButtonsToAdd':\n ['drawline', 'drawopenpath', 'drawclosedpath', 'drawcircle', 'drawrect',\n 'eraseshape']}, style={'text-align': 'center', 'position': 'absolute',\n 'left': '50%', 'transform': 'translate(-50%, 10%)', 'height': '510px',\n 'width': '920px'})\n", (7302, 7618), True, 'import dash_core_components as dcc\n'), ((7958, 8048), 'dash_daq.ToggleSwitch', 'daq.ToggleSwitch', ([], {'id': '"""angleCorrection"""', 'label': '"""Correct Angle"""', 'labelPosition': '"""bottom"""'}), "(id='angleCorrection', label='Correct Angle', labelPosition\n ='bottom')\n", (7974, 8048), True, 'import dash_daq as daq\n'), ((13213, 13260), 'os.path.join', 'os.path.join', (['static_image_route', '"""cropped.zip"""'], {}), "(static_image_route, 'cropped.zip')\n", (13225, 13260), False, 'import os\n'), ((23291, 23331), 'os.path.join', 'os.path.join', (['static_image_route', '"""data"""'], {}), "(static_image_route, 'data')\n", (23303, 23331), False, 'import os\n'), ((23366, 23406), 'os.path.join', 'os.path.join', (['static_image_route', '"""data"""'], {}), "(static_image_route, 'data')\n", (23378, 23406), False, 'import os\n'), ((23816, 23835), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (23832, 23835), False, 'import cv2\n'), ((24010, 24069), 'cv2.rectangle', 'cv2.rectangle', (['i', '(x, y)', '(x + w, y + h)', '(36, 255, 12)', '(20)'], {}), '(i, (x, y), (x + w, y + h), (36, 255, 12), 20)\n', (24023, 24069), False, 'import cv2\n'), ((24320, 24360), 'os.path.join', 'os.path.join', (['static_image_route', '"""data"""'], {}), "(static_image_route, 'data')\n", (24332, 24360), False, 'import os\n'), ((30154, 30189), 'json.dumps', 'json.dumps', (["sel['shapes']"], {'indent': '(2)'}), "(sel['shapes'], indent=2)\n", (30164, 30189), False, 'import json\n'), ((16872, 16962), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""RLower_value"""', 'type': '"""number"""', 'placeholder': '"""RL"""', 'min': '(0)', 'max': '(255)', 'step': '(1)'}), "(id='RLower_value', type='number', placeholder='RL', min=0, max=\n 255, step=1)\n", (16881, 16962), True, 'import dash_core_components as dcc\n'), ((17152, 17242), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""Rupper_value"""', 'type': '"""number"""', 'placeholder': '"""RU"""', 'min': '(0)', 'max': '(255)', 'step': '(1)'}), "(id='Rupper_value', type='number', placeholder='RU', min=0, max=\n 255, step=1)\n", (17161, 17242), True, 'import dash_core_components as dcc\n'), ((17432, 17522), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""GLower_value"""', 'type': '"""number"""', 'placeholder': '"""GL"""', 'min': '(0)', 'max': '(255)', 'step': '(1)'}), "(id='GLower_value', type='number', placeholder='GL', min=0, max=\n 255, step=1)\n", (17441, 17522), True, 'import dash_core_components as dcc\n'), ((17712, 17802), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""Gupper_value"""', 'type': '"""number"""', 'placeholder': '"""GU"""', 'min': '(0)', 'max': '(255)', 'step': '(1)'}), "(id='Gupper_value', type='number', placeholder='GU', min=0, max=\n 255, step=1)\n", (17721, 17802), True, 'import dash_core_components as dcc\n'), ((17992, 18082), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""BLower_value"""', 'type': '"""number"""', 'placeholder': '"""BL"""', 'min': '(0)', 'max': '(255)', 'step': '(1)'}), "(id='BLower_value', type='number', placeholder='BL', min=0, max=\n 255, step=1)\n", (18001, 18082), True, 'import dash_core_components as dcc\n'), ((18272, 18362), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""Bupper_value"""', 'type': '"""number"""', 'placeholder': '"""BU"""', 'min': '(0)', 'max': '(255)', 'step': '(1)'}), "(id='Bupper_value', type='number', placeholder='BU', min=0, max=\n 255, step=1)\n", (18281, 18362), True, 'import dash_core_components as dcc\n'), ((18580, 18608), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""out-all-types"""'}), "(id='out-all-types')\n", (18588, 18608), True, 'import dash_html_components as html\n'), ((24407, 24431), 'skimage.img_as_ubyte', 'img_as_ubyte', (['FinalImage'], {}), '(FinalImage)\n', (24419, 24431), False, 'from skimage import io, color, img_as_ubyte\n'), ((24460, 24733), 'dash_html_components.Button', 'html.Button', (['"""Download"""'], {'style': "{'display': 'block', 'position': 'relative', 'font-size': '16px', 'padding':\n '8px 12px', 'border-radius': '4px', 'text-align': 'center', 'align':\n 'center', 'color': 'black', 'font-family': 'Times New Roman',\n 'textAlign': 'center'}"}), "('Download', style={'display': 'block', 'position': 'relative',\n 'font-size': '16px', 'padding': '8px 12px', 'border-radius': '4px',\n 'text-align': 'center', 'align': 'center', 'color': 'black',\n 'font-family': 'Times New Roman', 'textAlign': 'center'})\n", (24471, 24733), True, 'import dash_html_components as html\n'), ((30244, 30279), 'json.dumps', 'json.dumps', (["sel['shapes']"], {'indent': '(2)'}), "(sel['shapes'], indent=2)\n", (30254, 30279), False, 'import json\n'), ((30473, 30512), 'plotly.express.imshow', 'px.imshow', (['igrph'], {'width': '(920)', 'height': '(510)'}), '(igrph, width=920, height=510)\n', (30482, 30512), True, 'import plotly.express as px\n'), ((4004, 4328), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""output-image-upload"""', 'style': "{'display': 'relative', 'position': 'center', 'align': 'center',\n 'margin-left': 'auto', 'margin-right': 'auto', 'padding-left': '40px',\n 'padding-right': '40px', 'padding-topg': '25px', 'box-shadow':\n '0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19)'}"}), "(id='output-image-upload', style={'display': 'relative', 'position':\n 'center', 'align': 'center', 'margin-left': 'auto', 'margin-right':\n 'auto', 'padding-left': '40px', 'padding-right': '40px', 'padding-topg':\n '25px', 'box-shadow':\n '0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19)'})\n", (4012, 4328), True, 'import dash_html_components as html\n'), ((4711, 4755), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""output-image-uploadCorrection"""'}), "(id='output-image-uploadCorrection')\n", (4719, 4755), True, 'import dash_html_components as html\n'), ((4762, 4800), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""output-image-uploadGrid"""'}), "(id='output-image-uploadGrid')\n", (4770, 4800), True, 'import dash_html_components as html\n'), ((25090, 25147), 'os.path.join', 'os.path.join', (['static_image_route', '"""Automatic_dataset.zip"""'], {}), "(static_image_route, 'Automatic_dataset.zip')\n", (25102, 25147), False, 'import os\n'), ((30831, 31157), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""multiAnnotation2"""', 'figure': 'figu', 'config': "{'modeBarButtonsToAdd': ['drawline', 'drawopenpath', 'drawclosedpath',\n 'drawcircle', 'drawrect', 'eraseshape']}", 'style': "{'text-align': 'center', 'position': 'absolute', 'left': '50%', 'transform':\n 'translate(-50%, 10%)', 'height': '510px', 'width': '920px'}"}), "(id='multiAnnotation2', figure=figu, config={'modeBarButtonsToAdd':\n ['drawline', 'drawopenpath', 'drawclosedpath', 'drawcircle', 'drawrect',\n 'eraseshape']}, style={'text-align': 'center', 'position': 'absolute',\n 'left': '50%', 'transform': 'translate(-50%, 10%)', 'height': '510px',\n 'width': '920px'})\n", (30840, 31157), True, 'import dash_core_components as dcc\n'), ((3547, 3575), 'dash_html_components.A', 'html.A', (['"""Select color image"""'], {}), "('Select color image')\n", (3553, 3575), True, 'import dash_html_components as html\n')] |
from keras.models import Model
from keras.layers import Input, Embedding, GRU, Bidirectional, Dense, \
RepeatVector, Masking, concatenate, Reshape, TimeDistributed
from keras.optimizers import SGD, Adagrad, Adam
def seq2seq_simple(input_dic_len=100,
input_len=50,
vector_len=200,
hidden_dim=100,
output_dim=100,
output_len=10):
'''
:param input_dic_len: 输入的字典长度
:param input_len: 输入的文本长度
:param vector_len: 词向量维度
:param hidden_dim: encoding结尾的全连接节点数
:param output_dim: 输出的字典长度
:param output_len: 输出的文本长度
:return:
'''
# input_dic_len=100
# input_len=50
# vector_len=200
# hidden_dim=100
# output_dim = 100
# output_len=50
data_input = Input(shape=[input_len])
# 创建词向量
word_vec = Embedding(input_dim=input_dic_len + 1,
input_length=input_len,
output_dim=vector_len,
mask_zero=0,
name='Embedding')(data_input)
# encoding过程
rnn_encoding, state_h1, state_h2 = Bidirectional(GRU(units=32,
activation='tanh',
recurrent_activation='hard_sigmoid',
return_sequences=False,
return_state=True),
name='Bidirectional_encoding')(word_vec)
data_encoding = Dense(units=hidden_dim,
activation='relu',
name='encoding')(rnn_encoding)
# decoding过程,按照输出长度复制encoding的结果
data_RepeatVector = RepeatVector(n=output_len)(data_encoding)
# encoding过程的细胞状态作为decoding的初始值,增强信息传递
rnn_decoding = Bidirectional(GRU(units=32,
return_sequences=True,
activation="relu"),
name='Bidirectional_decoding')(data_RepeatVector, initial_state=[state_h1, state_h2])
data_decoding = TimeDistributed(Dense(units=output_dim, activation="relu"),
name='TimeDistributed')(rnn_decoding)
optimizer = Adam(lr=0.01)
model = Model(inputs=data_input, outputs=data_decoding)
model.compile(optimizer=optimizer, loss='mse', metrics=['accuracy'])
return model
if __name__ == '__main__':
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import numpy as np
import random
input_len=5
ask_transform = [[random.choice('abcdefg') for j in range(random.randint(1,input_len))]
for i in range(5000)]
answer_transform = [[j.upper() for j in i] for i in ask_transform]
tokenizer_ask = Tokenizer()
tokenizer_ask.fit_on_texts(texts=ask_transform)
ask_seq = tokenizer_ask.texts_to_sequences(texts=ask_transform)
ask_new = pad_sequences(ask_seq, maxlen=input_len, padding='post', value=0, dtype='int')
output_len = 5
tokenizer_answer = Tokenizer()
tokenizer_answer.fit_on_texts(texts=answer_transform)
answer_seq = tokenizer_answer.texts_to_sequences(texts=answer_transform)
answer_new = pad_sequences(answer_seq, maxlen=output_len, padding='post', value=0, dtype='int')
answer_categorical = to_categorical(answer_new)
model_seq2seq = seq2seq_simple(input_dic_len=len(tokenizer_ask.word_index),
input_len=input_len,
vector_len=20,
hidden_dim=20,
output_dim=answer_categorical.shape[2],
output_len=output_len)
model_seq2seq.fit(x=ask_new, y=answer_categorical, batch_size=50, epochs=10, validation_split=0.2,verbose=2)
answer_key = list(tokenizer_answer.word_index.keys())
answer_values = list(tokenizer_answer.word_index.values())
def chatbot(text=None):
text=tokenizer_ask.texts_to_sequences(texts=[text])
text_new = pad_sequences(text, maxlen=input_len, padding='post', value=0, dtype='float32')
result = model_seq2seq.predict(text_new)[0]
result = [np.argmax(i) for i in result]
result = [answer_key[answer_values.index(i)] for i in result if i in answer_values]
return result
for i in ask_transform[0:20]:
print('ask:', i,'answer:', chatbot(text=i))
chatbot(['a','d','g','e','c']) | [
"random.randint",
"numpy.argmax",
"keras.preprocessing.sequence.pad_sequences",
"keras.layers.GRU",
"keras.optimizers.Adam",
"random.choice",
"keras.models.Model",
"keras.preprocessing.text.Tokenizer",
"keras.layers.Dense",
"keras.layers.Embedding",
"keras.layers.Input",
"keras.layers.RepeatVe... | [((807, 831), 'keras.layers.Input', 'Input', ([], {'shape': '[input_len]'}), '(shape=[input_len])\n', (812, 831), False, 'from keras.layers import Input, Embedding, GRU, Bidirectional, Dense, RepeatVector, Masking, concatenate, Reshape, TimeDistributed\n'), ((2339, 2352), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (2343, 2352), False, 'from keras.optimizers import SGD, Adagrad, Adam\n'), ((2365, 2412), 'keras.models.Model', 'Model', ([], {'inputs': 'data_input', 'outputs': 'data_decoding'}), '(inputs=data_input, outputs=data_decoding)\n', (2370, 2412), False, 'from keras.models import Model\n'), ((2970, 2981), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (2979, 2981), False, 'from keras.preprocessing.text import Tokenizer\n'), ((3116, 3194), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['ask_seq'], {'maxlen': 'input_len', 'padding': '"""post"""', 'value': '(0)', 'dtype': '"""int"""'}), "(ask_seq, maxlen=input_len, padding='post', value=0, dtype='int')\n", (3129, 3194), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((3238, 3249), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (3247, 3249), False, 'from keras.preprocessing.text import Tokenizer\n'), ((3402, 3489), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['answer_seq'], {'maxlen': 'output_len', 'padding': '"""post"""', 'value': '(0)', 'dtype': '"""int"""'}), "(answer_seq, maxlen=output_len, padding='post', value=0, dtype\n ='int')\n", (3415, 3489), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((3510, 3536), 'keras.utils.to_categorical', 'to_categorical', (['answer_new'], {}), '(answer_new)\n', (3524, 3536), False, 'from keras.utils import to_categorical\n'), ((859, 980), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': '(input_dic_len + 1)', 'input_length': 'input_len', 'output_dim': 'vector_len', 'mask_zero': '(0)', 'name': '"""Embedding"""'}), "(input_dim=input_dic_len + 1, input_length=input_len, output_dim=\n vector_len, mask_zero=0, name='Embedding')\n", (868, 980), False, 'from keras.layers import Input, Embedding, GRU, Bidirectional, Dense, RepeatVector, Masking, concatenate, Reshape, TimeDistributed\n'), ((1614, 1673), 'keras.layers.Dense', 'Dense', ([], {'units': 'hidden_dim', 'activation': '"""relu"""', 'name': '"""encoding"""'}), "(units=hidden_dim, activation='relu', name='encoding')\n", (1619, 1673), False, 'from keras.layers import Input, Embedding, GRU, Bidirectional, Dense, RepeatVector, Masking, concatenate, Reshape, TimeDistributed\n'), ((1801, 1827), 'keras.layers.RepeatVector', 'RepeatVector', ([], {'n': 'output_len'}), '(n=output_len)\n', (1813, 1827), False, 'from keras.layers import Input, Embedding, GRU, Bidirectional, Dense, RepeatVector, Masking, concatenate, Reshape, TimeDistributed\n'), ((4251, 4330), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['text'], {'maxlen': 'input_len', 'padding': '"""post"""', 'value': '(0)', 'dtype': '"""float32"""'}), "(text, maxlen=input_len, padding='post', value=0, dtype='float32')\n", (4264, 4330), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((1158, 1274), 'keras.layers.GRU', 'GRU', ([], {'units': '(32)', 'activation': '"""tanh"""', 'recurrent_activation': '"""hard_sigmoid"""', 'return_sequences': '(False)', 'return_state': '(True)'}), "(units=32, activation='tanh', recurrent_activation='hard_sigmoid',\n return_sequences=False, return_state=True)\n", (1161, 1274), False, 'from keras.layers import Input, Embedding, GRU, Bidirectional, Dense, RepeatVector, Masking, concatenate, Reshape, TimeDistributed\n'), ((1919, 1974), 'keras.layers.GRU', 'GRU', ([], {'units': '(32)', 'return_sequences': '(True)', 'activation': '"""relu"""'}), "(units=32, return_sequences=True, activation='relu')\n", (1922, 1974), False, 'from keras.layers import Input, Embedding, GRU, Bidirectional, Dense, RepeatVector, Masking, concatenate, Reshape, TimeDistributed\n'), ((2205, 2247), 'keras.layers.Dense', 'Dense', ([], {'units': 'output_dim', 'activation': '"""relu"""'}), "(units=output_dim, activation='relu')\n", (2210, 2247), False, 'from keras.layers import Input, Embedding, GRU, Bidirectional, Dense, RepeatVector, Masking, concatenate, Reshape, TimeDistributed\n'), ((2765, 2789), 'random.choice', 'random.choice', (['"""abcdefg"""'], {}), "('abcdefg')\n", (2778, 2789), False, 'import random\n'), ((4401, 4413), 'numpy.argmax', 'np.argmax', (['i'], {}), '(i)\n', (4410, 4413), True, 'import numpy as np\n'), ((2805, 2833), 'random.randint', 'random.randint', (['(1)', 'input_len'], {}), '(1, input_len)\n', (2819, 2833), False, 'import random\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 22 08:40:31 2018
@author: alxgr
"""
import numpy as np
import matplotlib.pyplot as plt
from pyro.dynamic import system
from pyro.analysis import phaseanalysis
from pyro.analysis import simulation
from pyro.analysis import graphical
from pyro.analysis import costfunction
###############################################################################
# Mother Controller class
###############################################################################
class StaticController():
"""
Mother class for memoryless controllers
---------------------------------------
r : reference signal vector k x 1
y : sensor signal vector p x 1
u : control inputs vector m x 1
t : time 1 x 1
---------------------------------------
u = c( y , r , t )
"""
###########################################################################
# The two following functions needs to be implemented by child classes
###########################################################################
############################
def __init__(self, k=1, m=1, p=1):
""" """
# Dimensions
self.k = k
self.m = m
self.p = p
# Label
self.name = 'Static Controller'
# Reference signal info
self.ref_label = []
self.ref_units = []
for i in range(k):
self.ref_label.append('Ref. '+str(i))
self.ref_units.append('')
self.r_ub = np.zeros(self.k) + 10 # upper bounds
self.r_lb = np.zeros(self.k) - 10 # lower bounds
# default constant reference
self.rbar = np.zeros(self.k)
#############################
def c( self , y , r , t = 0 ):
"""
Feedback static computation u = c( y, r, t)
INPUTS
y : sensor signal vector p x 1
r : reference signal vector k x 1
t : time 1 x 1
OUTPUTS
u : control inputs vector m x 1
"""
u = np.zeros(self.m) # State derivative vector
raise NotImplementedError
return u
#########################################################################
# Default methods that can be overloaded in child classes
#########################################################################
#############################
def t2r( self , t ):
"""
Reference signal fonction u = t2u(t)
INPUTS
t : time 1 x 1
OUTPUTS
r : controller reference vector m x 1
Defaul is a constant signal equal to self.rbar, can overload the
with a more complexe reference signal time-function
"""
#Default is a constant signal
r = self.rbar
return r
#########################################################################
# No need to overwrite the following functions for child classes
#########################################################################
#############################
def cbar( self , y , t = 0 ):
"""
Feedback static computation u = c( y, r = rbar, t) for
default reference
INPUTS
y : sensor signal vector p x 1
t : time 1 x 1
OUTPUTS
u : control inputs vector m x 1
"""
u = self.c( y , self.rbar , t )
return u
#############################
def __add__(self, sys):
"""
closed_loop_system = controller + dynamic_system
"""
cl_sys = ClosedLoopSystem( sys , self )
return cl_sys
#############################
def plot_control_law(self, i=0, j=1, k=0, t=0, n = 10, sys = None):
"""
k = control input index to plot
i = state to use as the x-axis
j = state to use as the y-axis
n = grid resolution
sys can be passed for state label unit and range
"""
# Extract sys info
if sys is not None:
xname = sys.state_label[i] + ' ' + sys.state_units[i]
yname = sys.state_label[j] + ' ' + sys.state_units[j]
xmax = sys.x_ub[i]
xmin = sys.x_lb[i]
ymax = sys.x_ub[j]
ymin = sys.x_lb[j]
xbar = sys.xbar
else:
xname = 'state x[%i]'%i
yname = 'state x[%i]'%j
xmax = 10
xmin = -10
ymax = 10
ymin = -10
xbar = np.zeros( self.p )
# Computing
x = np.linspace( xmin , xmax , n )
y = np.linspace( ymin , ymax , n )
X, Y = np.meshgrid( x, y)
U = np.zeros((n,n)) # control action table
for l in range(n):
for m in range(n):
# Actual states
x = np.copy( xbar ) # default value for all states
x[ i ] = X[l, m]
x[ j ] = Y[l, m]
# Control action
u = self.cbar( x , t )
U[l, m] = u[k] # extract control input element k
# Ploting
fig = plt.figure(figsize=(3, 2),dpi=300, frameon=True)
fig.canvas.manager.set_window_title('Control law for u[%i]'%i)
ax = fig.add_subplot(1,1,1)
ax.tick_params('both',labelsize = 5 )
plt.ylabel(yname, fontsize = 5 )
plt.xlabel(xname, fontsize = 5 )
im1 = plt.pcolormesh( X , Y , U, shading='gouraud' )
cbar = plt.colorbar(im1)
cbar.ax.tick_params(labelsize=5)
plt.axis([xmin,xmax,ymin,ymax])
plt.grid(True)
plt.tight_layout()
plt.show()
###############################################################################
# Mother "Static controller + dynamic system" class
###############################################################################
class ClosedLoopSystem( system.ContinuousDynamicSystem ):
"""
Dynamic system connected with a static controller
---------------------------------------------
NOTE:
Ignore any feedthough in the plant to avoid creating algebraic loops
This is only valid if the output function h is not a fonction of u
New equations assume y = h(x,u,t) -- > y = h(x,t)
"""
############################
def __init__(self, ContinuousDynamicSystem , StaticController ):
""" """
self.plant = ContinuousDynamicSystem
self.controller = StaticController
######################################################################
# Check dimensions match
if not (self.plant.m == self.controller.m ):
raise NameError('Dimension mismatch between controller and' +
' dynamic system for the input signal u')
elif not (self.plant.p == self.controller.p ):
raise NameError('Dimension mismatch between controller and' +
' dynamic system for the output signal y')
######################################################################
# Dimensions of global closed-loop dynamic system
self.n = self.plant.n
self.m = self.controller.k
self.p = self.plant.p
# Labels
self.name = ('Closed-Loop ' + self.plant.name +
' with ' + self.controller.name )
self.state_label = self.plant.state_label
self.input_label = self.controller.ref_label
self.output_label = self.plant.output_label
# Units
self.state_units = self.plant.state_units
self.input_units = self.controller.ref_units
self.output_units = self.plant.output_units
# Define the domain
self.x_ub = self.plant.x_ub
self.x_lb = self.plant.x_lb
self.u_ub = self.controller.r_ub
self.u_lb = self.controller.r_lb
# Plot params
self.domain = self.plant.domain
self.linestyle = self.plant.linestyle
self.linestyle_plus = self.plant.linestyle_plus
self.linescolor = self.plant.linescolor
self.linescolor_plus = self.plant.linescolor_plus
self.lines_plus = self.plant.lines_plus
self.is_3d = self.plant.is_3d
# Default State and inputs
self.xbar = self.plant.xbar
self.ubar = self.controller.rbar
################################
# Variables
################################
# Initial value for simulations
self.x0 = self.plant.x0
# Result of last simulation
self.traj = None
# Cost function for evaluation
# default is a quadratic cost function with diag Q and R matrices
self.cost_function = costfunction.QuadraticCostFunction.from_sys(self)
###########################################################################
def f( self , x , u , t ):
"""
Continuous time foward dynamics evaluation dx = f(x,u,t)
INPUTS
x : state vector n x 1
u : control inputs vector m x 1
t : time 1 x 1
OUTPUTS
dx : state derivative vector n x 1
"""
dx = np.zeros(self.n) # State derivative vector
r = u # input of closed-loop global sys is ref of the controller
# Compute output signal
y = self.plant.h( x, self.plant.ubar, t)
# Compute control inputs
u = self.controller.c( y, r, t)
# Compute state derivatives
dx = self.plant.f( x, u, t)
return dx
###########################################################################
def h( self , x , u , t ):
"""
Output fonction y = h(x,u,t)
INPUTS
x : state vector n x 1
u : control inputs vector m x 1
t : time 1 x 1
OUTPUTS
y : output derivative vector o x 1
"""
#y = np.zeros(self.p) # Output vector
# Using u = ubar to avoid algeabric loops
y = self.plant.h( x , self.plant.ubar , t )
return y
###########################################################################
def t2u( self , t ):
"""
Reference signal fonction u = t2u(t)
INPUTS
t : time 1 x 1
OUTPUTS
u : control inputs vector m x 1
Defaul is a constant signal equal to self.ubar, can overload the
with a more complexe reference signal time-function
"""
# Input of closed-loop global sys is ref of the controller
u = self.controller.t2r(t)
return u
###########################################################################
def plot_phase_plane_closed_loop(self , x_axis = 0 , y_axis = 1 ):
"""
Plot Phase Plane vector field of the system
------------------------------------------------
blue arrows for the open-loop behavior
red arrows for the closed-loop behavior
"""
pp = phaseanalysis.PhasePlot( self , x_axis , y_axis )
pp.compute_grid()
pp.plot_init()
# Closed-loop Behavior
pp.color = 'r'
pp.compute_vector_field()
pp.plot_vector_field()
# Open-Loop Behavior
pp.f = self.plant.f
pp.ubar = self.plant.ubar
pp.color = 'b'
pp.compute_vector_field()
pp.plot_vector_field()
pp.plot_finish()
return pp
#############################
def compute_trajectory(
self, tf=10, n=10001, solver='ode'):
"""
Simulation of time evolution of the system
------------------------------------------------
tf : final time
n : time steps
"""
sim = simulation.CLosedLoopSimulator(self, tf, n, solver)
self.traj = sim.compute()
return self.traj
#############################################
# Make graph function use the internal sys
#############################################
###########################################################################
def get_plotter(self):
return self.plant.get_plotter()
###########################################################################
def get_animator(self):
return self.plant.get_animator()
###########################################################################
def show(self, q , x_axis = 0 , y_axis = 1 ):
""" Plot figure of configuration q """
system.ContinuousDynamicSystem.show( self.plant , q ,
x_axis = 0 , y_axis = 1 )
###########################################################################
def show3(self, q ):
""" Plot figure of configuration q """
system.ContinuousDynamicSystem.show3( self.plant, q )
###########################################################################
def plot_phase_plane_trajectory_closed_loop(self, x_axis=0, y_axis=1):
"""
Plot Phase Plane vector field of the system and the trajectory
------------------------------------------------
blue arrows for the open-loop behavior
red arrows for the closed-loop behavior
"""
plotter = self.get_plotter()
plotter.phase_plane_trajectory_closed_loop( self.traj, x_axis, y_axis)
###########################################################################
def plot_end_effector_trajectory(self, traj = None ):
self.plant.plot_end_effector_trajectory( self.traj )
###############################################################################
class DynamicController( StaticController ):
"""
Mother class for controller with internal states and dynamics (memory)
ex: integral action of a PID
----------------------------------------
z : controller internal states l x 1
r : reference signal vector k x 1
y : sensor signal vector p x 1
u : control inputs vector m x 1
t : time 1 x 1
-----------------------------------------
Control law
u = c( z, y, r, t)
Internal dynamic
dz / dt = b( z, y, r, t)
"""
#############################
def __init__(self, k, l, m, p):
self.l = l
self.m = m
self.p = p
self.k = k
self.name = "Dynamic Controller"
############################
# Reference signal info
self.ref_label = []
self.ref_units = []
for i in range(k):
self.ref_label.append('Ref. '+str(i))
self.ref_units.append('')
self.r_ub = np.zeros(self.k) + 10 # upper bounds
self.r_lb = np.zeros(self.k) - 10 # lower bounds
# default constant reference
self.rbar = np.zeros(self.k)
###########################
# Internal states info
self.internal_state_label = []
self.internal_state_units = []
for i in range(l):
self.internal_state_label.append('Internal state ' +str(i))
self.internal_state_units.append('')
self.z_ub = np.zeros(self.l) + 10 # upper bounds
self.z_lb = np.zeros(self.l) - 10 # lower bounds
# default constant reference
self.zbar = np.zeros(self.l)
# initial internal controller states
self.z0 = np.zeros(self.l)
#############################
def c(self, z, y, r, t):
"""
CONTROL LAW
u = c( z, y, r, t)
INPUTS
z : internal states l x 1
y : sensor signal vector p x 1
r : reference signal vector k x 1
t : time 1 x 1
OUTPUTS
u : control inputs vector m x 1
"""
u = np.zeros( self.m )
return u
#############################
def b(self, z, y, r, t):
"""
INTERNAL CONTROLLER DYNAMIC
dz/dt = b( z, y, r, t)
INPUTS
z : internal states l x 1
y : sensor signal vector p x 1
r : reference signal vector k x 1
t : time 1 x 1
OUTPUTS
d z / dt : time derivative of internal states l x 1
"""
dz = np.zeros( self.l )
return dz
#############################
def cbar( self , y , t = 0 ):
"""
Feedback static computation u = c( z = zbar, y, r = rbar, t) for
default reference and internal states
INPUTS
y : sensor signal vector p x 1
t : time 1 x 1
OUTPUTS
u : control inputs vector m x 1
"""
u = self.c( self.zbar, y , self.rbar , t )
return u
#############################
def __add__(self, sys):
return DynamicClosedLoopSystem( sys, self)
##############################################################################
class DynamicClosedLoopSystem( ClosedLoopSystem ):
"""
Closed loop system with Dynamic controller
--------------------------------------------
Global closed-loop system with physical plant states and virtual
controller internal states
x_global = [ x_plant ; z_controller ]
"""
#######################################
def __init__(self, plant, controller):
# Check dimensions
if plant.p != controller.p:
raise ValueError("Controller inputs do not match system outputs")
if plant.m != controller.m:
raise ValueError("Controller outputs do not match system inputs")
########################
#Remove cost funtion
########################
plant.cost_function = None
ClosedLoopSystem.__init__( self, plant, controller)
# Add extra states that represent system memory
self.n = self.plant.n + self.controller.l
self.state_label = ( self.plant.state_label +
self.controller.internal_state_label )
self.state_units = ( self.plant.state_units +
self.controller.internal_state_units )
self.x_ub = np.concatenate([ self.plant.x_ub,
self.controller.z_ub
], axis=0)
self.x_lb = np.concatenate([ self.plant.x_lb,
self.controller.z_lb
], axis=0)
self.xbar = np.concatenate([ self.plant.xbar,
self.controller.zbar
], axis=0)
################################
# Variables
################################
# Initial value for simulations
self.x0 = np.concatenate([ self.plant.x0,
self.controller.z0
], axis=0)
# Result of last simulation
self.traj = None
# Cost function for evaluation
# default is a quadratic cost function with diag Q and R matrices
self.cost_function = None #costfunction.QuadraticCostFunction.from_sys(self)
######################################
def f(self, x, u, t):
"""
Continuous time foward dynamics evaluation dx = f(x,u,t)
INPUTS
x : state vector n x 1
u : control inputs vector m x 1
t : time 1 x 1
OUTPUTS
dx : state derivative vector n x 1
"""
x, z = self._split_states( x )
# Input to global system interpreted as reference signal
r = u
# Eval current system output. Assume there is no feedforward term,
# as it would cause an algebraic loop
y = self.plant.h( x, self.plant.ubar, t)
# input u to dynamic system evaluated by controller
u = self.controller.c( z, y, r, t)
# Time derivative of states
dx = self.plant.f( x, u, t)
dz = self.controller.b( z, y, r, t)
dx = np.concatenate([ dx, dz], axis=0)
assert dx.shape == (self.n,)
return dx
######################################
def fzbar(self, x_plant , u, t = 0):
"""
Continuous time foward dynamics evaluation dx = f(x,u,t)
with
z = zbar
r = u
INPUTS
x : state vector plant.n x 1
t : time 1 x 1
OUTPUTS
dx : state derivative vector n x 1
"""
# Input to global system interpreted as reference signal
r = u
# Eval current system output. Assume there is no feedforward term,
# as it would cause an algebraic loop
y = self.plant.h( x_plant, self.plant.ubar, t)
# input u to dynamic system evaluated by controller
u = self.controller.c( self.controller.zbar, y, r, t)
# Time derivative of states
dx = self.plant.f( x_plant, u, t)
return dx
##########################################
def h(self, x, u, t):
"""
Output fonction y = h(x,u,t)
INPUTS
x : state vector n x 1
u : control inputs vector m x 1
t : time 1 x 1
OUTPUTS
y : output derivative vector p x 1
"""
x, z = self._split_states( x )
y = self.plant.h( x, u, t)
return y
#######################################
def _split_states(self, x):
"""
Separate full state vector into system and controller states
"""
x_sys, x_ctl = x[:self.plant.n], x[self.plant.n:]
assert x_ctl.shape == (self.controller.l,)
return (x_sys, x_ctl)
#############################
def xut2q( self, x , u , t ):
""" Compute configuration variables ( q vector ) """
x , z = self._split_states( x )
# Use the plant function
q = self.plant.xut2q( x, u, t)
return q
#############################
def compute_trajectory(
self, tf=10, n=10001, solver='ode'):
"""
Simulation of time evolution of the system
------------------------------------------------
tf : final time
n : time steps
"""
sim = simulation.DynamicCLosedLoopSimulator( self, tf, n, solver)
self.traj = sim.compute()
return self.traj
#############################
def plot_trajectory_with_internal_states(self, plot='x', **kwargs):
"""
Plot time evolution of a simulation of this system
------------------------------------------------
note: will call compute_trajectory if no simulation data is present
"""
# Check if trajectory is already computed
if self.traj == None:
self.compute_trajectory()
plotter = graphical.TrajectoryPlotter( self )
plotter.plot( self.traj, plot, **kwargs)
#############################
def plot_internal_controller_states(self, plot='z', **kwargs):
"""
Plot time evolution of a simulation of this system
------------------------------------------------
note: will call compute_trajectory if no simulation data is present
"""
# Check if trajectory is already computed
if self.traj == None:
self.compute_trajectory()
plotter = graphical.TrajectoryPlotter( self )
plotter.plot( self.traj, plot, **kwargs)
###########################################################################
def plot_phase_plane_closed_loop( self , x_axis = 0 , y_axis = 1 ):
"""
Plot Phase Plane vector field of the system
------------------------------------------------
blue arrows for the open-loop behavior
red arrows for the closed-loop behavior
"""
pp = phaseanalysis.PhasePlot( self.plant , x_axis , y_axis )
pp.compute_grid()
pp.plot_init()
# Closed-loop Behavior
pp.color = 'b'
pp.compute_vector_field()
pp.plot_vector_field()
# Open-Loop Behavior
pp.f = self.fzbar # assume default internal states
pp.ubar = self.ubar
pp.color = 'r'
pp.compute_vector_field()
pp.plot_vector_field()
pp.plot_finish()
return pp
'''
#################################################################
################## Main ########
#################################################################
'''
if __name__ == "__main__":
""" MAIN TEST """
pass
| [
"matplotlib.pyplot.figure",
"pyro.dynamic.system.ContinuousDynamicSystem.show3",
"matplotlib.pyplot.tight_layout",
"pyro.analysis.costfunction.QuadraticCostFunction.from_sys",
"numpy.meshgrid",
"pyro.analysis.simulation.CLosedLoopSimulator",
"numpy.copy",
"matplotlib.pyplot.colorbar",
"numpy.linspac... | [((1799, 1815), 'numpy.zeros', 'np.zeros', (['self.k'], {}), '(self.k)\n', (1807, 1815), True, 'import numpy as np\n'), ((2251, 2267), 'numpy.zeros', 'np.zeros', (['self.m'], {}), '(self.m)\n', (2259, 2267), True, 'import numpy as np\n'), ((5031, 5057), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'n'], {}), '(xmin, xmax, n)\n', (5042, 5057), True, 'import numpy as np\n'), ((5076, 5102), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'n'], {}), '(ymin, ymax, n)\n', (5087, 5102), True, 'import numpy as np\n'), ((5133, 5150), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5144, 5150), True, 'import numpy as np\n'), ((5173, 5189), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (5181, 5189), True, 'import numpy as np\n'), ((5694, 5743), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3, 2)', 'dpi': '(300)', 'frameon': '(True)'}), '(figsize=(3, 2), dpi=300, frameon=True)\n', (5704, 5743), True, 'import matplotlib.pyplot as plt\n'), ((5923, 5952), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yname'], {'fontsize': '(5)'}), '(yname, fontsize=5)\n', (5933, 5952), True, 'import matplotlib.pyplot as plt\n'), ((5964, 5993), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xname'], {'fontsize': '(5)'}), '(xname, fontsize=5)\n', (5974, 5993), True, 'import matplotlib.pyplot as plt\n'), ((6020, 6062), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['X', 'Y', 'U'], {'shading': '"""gouraud"""'}), "(X, Y, U, shading='gouraud')\n", (6034, 6062), True, 'import matplotlib.pyplot as plt\n'), ((6091, 6108), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im1'], {}), '(im1)\n', (6103, 6108), True, 'import matplotlib.pyplot as plt\n'), ((6176, 6210), 'matplotlib.pyplot.axis', 'plt.axis', (['[xmin, xmax, ymin, ymax]'], {}), '([xmin, xmax, ymin, ymax])\n', (6184, 6210), True, 'import matplotlib.pyplot as plt\n'), ((6217, 6231), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6225, 6231), True, 'import matplotlib.pyplot as plt\n'), ((6240, 6258), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6256, 6258), True, 'import matplotlib.pyplot as plt\n'), ((6268, 6278), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6276, 6278), True, 'import matplotlib.pyplot as plt\n'), ((9459, 9508), 'pyro.analysis.costfunction.QuadraticCostFunction.from_sys', 'costfunction.QuadraticCostFunction.from_sys', (['self'], {}), '(self)\n', (9502, 9508), False, 'from pyro.analysis import costfunction\n'), ((9980, 9996), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (9988, 9996), True, 'import numpy as np\n'), ((12033, 12078), 'pyro.analysis.phaseanalysis.PhasePlot', 'phaseanalysis.PhasePlot', (['self', 'x_axis', 'y_axis'], {}), '(self, x_axis, y_axis)\n', (12056, 12078), False, 'from pyro.analysis import phaseanalysis\n'), ((12840, 12891), 'pyro.analysis.simulation.CLosedLoopSimulator', 'simulation.CLosedLoopSimulator', (['self', 'tf', 'n', 'solver'], {}), '(self, tf, n, solver)\n', (12870, 12891), False, 'from pyro.analysis import simulation\n'), ((13626, 13696), 'pyro.dynamic.system.ContinuousDynamicSystem.show', 'system.ContinuousDynamicSystem.show', (['self.plant', 'q'], {'x_axis': '(0)', 'y_axis': '(1)'}), '(self.plant, q, x_axis=0, y_axis=1)\n', (13661, 13696), False, 'from pyro.dynamic import system\n'), ((13935, 13986), 'pyro.dynamic.system.ContinuousDynamicSystem.show3', 'system.ContinuousDynamicSystem.show3', (['self.plant', 'q'], {}), '(self.plant, q)\n', (13971, 13986), False, 'from pyro.dynamic import system\n'), ((16142, 16158), 'numpy.zeros', 'np.zeros', (['self.k'], {}), '(self.k)\n', (16150, 16158), True, 'import numpy as np\n'), ((16659, 16675), 'numpy.zeros', 'np.zeros', (['self.l'], {}), '(self.l)\n', (16667, 16675), True, 'import numpy as np\n'), ((16750, 16766), 'numpy.zeros', 'np.zeros', (['self.l'], {}), '(self.l)\n', (16758, 16766), True, 'import numpy as np\n'), ((17245, 17261), 'numpy.zeros', 'np.zeros', (['self.m'], {}), '(self.m)\n', (17253, 17261), True, 'import numpy as np\n'), ((17787, 17803), 'numpy.zeros', 'np.zeros', (['self.l'], {}), '(self.l)\n', (17795, 17803), True, 'import numpy as np\n'), ((19860, 19923), 'numpy.concatenate', 'np.concatenate', (['[self.plant.x_ub, self.controller.z_ub]'], {'axis': '(0)'}), '([self.plant.x_ub, self.controller.z_ub], axis=0)\n', (19874, 19923), True, 'import numpy as np\n'), ((20019, 20082), 'numpy.concatenate', 'np.concatenate', (['[self.plant.x_lb, self.controller.z_lb]'], {'axis': '(0)'}), '([self.plant.x_lb, self.controller.z_lb], axis=0)\n', (20033, 20082), True, 'import numpy as np\n'), ((20178, 20241), 'numpy.concatenate', 'np.concatenate', (['[self.plant.xbar, self.controller.zbar]'], {'axis': '(0)'}), '([self.plant.xbar, self.controller.zbar], axis=0)\n', (20192, 20241), True, 'import numpy as np\n'), ((20497, 20556), 'numpy.concatenate', 'np.concatenate', (['[self.plant.x0, self.controller.z0]'], {'axis': '(0)'}), '([self.plant.x0, self.controller.z0], axis=0)\n', (20511, 20556), True, 'import numpy as np\n'), ((21866, 21898), 'numpy.concatenate', 'np.concatenate', (['[dx, dz]'], {'axis': '(0)'}), '([dx, dz], axis=0)\n', (21880, 21898), True, 'import numpy as np\n'), ((24361, 24419), 'pyro.analysis.simulation.DynamicCLosedLoopSimulator', 'simulation.DynamicCLosedLoopSimulator', (['self', 'tf', 'n', 'solver'], {}), '(self, tf, n, solver)\n', (24398, 24419), False, 'from pyro.analysis import simulation\n'), ((24981, 25014), 'pyro.analysis.graphical.TrajectoryPlotter', 'graphical.TrajectoryPlotter', (['self'], {}), '(self)\n', (25008, 25014), False, 'from pyro.analysis import graphical\n'), ((25554, 25587), 'pyro.analysis.graphical.TrajectoryPlotter', 'graphical.TrajectoryPlotter', (['self'], {}), '(self)\n', (25581, 25587), False, 'from pyro.analysis import graphical\n'), ((26060, 26111), 'pyro.analysis.phaseanalysis.PhasePlot', 'phaseanalysis.PhasePlot', (['self.plant', 'x_axis', 'y_axis'], {}), '(self.plant, x_axis, y_axis)\n', (26083, 26111), False, 'from pyro.analysis import phaseanalysis\n'), ((1639, 1655), 'numpy.zeros', 'np.zeros', (['self.k'], {}), '(self.k)\n', (1647, 1655), True, 'import numpy as np\n'), ((1696, 1712), 'numpy.zeros', 'np.zeros', (['self.k'], {}), '(self.k)\n', (1704, 1712), True, 'import numpy as np\n'), ((4958, 4974), 'numpy.zeros', 'np.zeros', (['self.p'], {}), '(self.p)\n', (4966, 4974), True, 'import numpy as np\n'), ((15982, 15998), 'numpy.zeros', 'np.zeros', (['self.k'], {}), '(self.k)\n', (15990, 15998), True, 'import numpy as np\n'), ((16039, 16055), 'numpy.zeros', 'np.zeros', (['self.k'], {}), '(self.k)\n', (16047, 16055), True, 'import numpy as np\n'), ((16499, 16515), 'numpy.zeros', 'np.zeros', (['self.l'], {}), '(self.l)\n', (16507, 16515), True, 'import numpy as np\n'), ((16556, 16572), 'numpy.zeros', 'np.zeros', (['self.l'], {}), '(self.l)\n', (16564, 16572), True, 'import numpy as np\n'), ((5349, 5362), 'numpy.copy', 'np.copy', (['xbar'], {}), '(xbar)\n', (5356, 5362), True, 'import numpy as np\n')] |
"""
Created on 19. 3. 2019
This module contains functions that are useful for estimating likelihood that given vector is in a class.
This module could be used for auto importing in a way:
FUNCTIONS=[o for o in getmembers(functions) if isfunction(o[1])]
:author: <NAME>
:contact: <EMAIL>
"""
from scipy.spatial import cKDTree
import numpy as np
def fNearest(samples, samplesVals):
"""
Linear interpolation according to nearest neighbour.
:param samples: Coords for interpolation.
:type samples: np.array
:param samplesVals: Values on class coords.
:type samplesVals: np.array
"""
fnAll=cKDTree(samples)
def res(p):
#check the nearest
_, IA = fnAll.query(p,1)
return samplesVals[IA]
return res
def fNearest2x2FromClassAndOuter(samples, samplesVals):
"""
Finds two nearest from class and two from outer samples and performs weighted average of their values.
As weight is used distance. If distance from some data sample is zero than it's
value is returned. Beware that if there is multiple samples with zero distance
than zero value(outer) haves priority.
Outer class have values that are equal or smaller than 0 (by default) and actual class must have values greater than zero.
:param samples: Coords for interpolation.
:type samples: np.array
:param samplesVals: Values on class coords.
:type samplesVals: np.array
"""
cInd=np.where(samplesVals>0)
classData=samples[cInd]
classVals=samplesVals[cInd]
haveClassData=classData.shape[0]>0
oInd=np.where(samplesVals<=0)
outerData=samples[oInd]
outerVals=samplesVals[oInd]
haveOuterData=outerData.shape[0]>0
#nearest 2x2 (from each class) interpolate
if haveClassData:
fnClass=cKDTree(classData)
fnClassMaxNeigh=1 if classData.shape[0]<2 else 2
if haveOuterData:
fnOuter=cKDTree(outerData)
fnOuterMaxNeigh=1 if outerData.shape[0]<2 else 2
def res(p):
#check the nearest
if haveClassData:
dC, iC=fnClass.query(p,fnClassMaxNeigh)
if fnClassMaxNeigh==1:
#we need col vectors
dC=dC[:, np.newaxis]
iC=iC[:, np.newaxis]
if haveOuterData:
dO, oC=fnOuter.query(p,fnOuterMaxNeigh)
if fnOuterMaxNeigh==1:
#we need col vectors
dO=dO[:, np.newaxis]
oC=oC[:, np.newaxis]
if haveClassData and haveOuterData:
values=np.hstack((classVals[iC],outerVals[oC]))
del iC
del oC
distances=np.hstack((dC,dO))
elif haveClassData:
values=classVals[iC]
del iC
distances=dC
else:
#only outer remains
values=outerVals[oC]
del oC
distances=dO
with np.errstate(divide='ignore',invalid='ignore'):
#we want to detect zero distance values
#this values will show as inf in 1/distances and nans in avg
distances=1./distances
avg=np.average(values, axis=1, weights=distances)
#find problems, if exists
problems=np.where(np.isnan(avg))
if problems[0].shape[0]>0:
problemsCols=(problems[0],np.array(np.argmax(np.isinf(distances[problems]),axis=1))) #we are interested in the first only
#change the nans with values of the problematic points
avg[problems]=values[problemsCols]
return avg
return res
def fNearest2x2FromEachClass2AtAll(samples, samplesVals):
"""
Finds two nearest from each class(outer and act. class), two at all and performs weighted average of their values.
As weight is used distance. If distance from some data sample is zero than it's
value is returned.
Outer class have values that are equal or smaller than 0 (by default) and actual class must have values greater than zero.
:param samples: Coords for interpolation.
:type samples: np.array
:param samplesVals: Values on class coords.
:type samplesVals: np.array
"""
cInd=np.where(samplesVals>0)
classData=samples[cInd]
classVals=samplesVals[cInd]
haveClassData=classData.shape[0]>0
oInd=np.where(samplesVals<=0)
outerData=samples[oInd]
outerVals=samplesVals[oInd]
haveOuterData=outerData.shape[0]>0
fnAll=cKDTree(samples)
fnAllMaxNeigh=1 if samplesVals.shape[0]<2 else 2
if haveClassData:
fnClass=cKDTree(classData)
fnClassMaxNeigh=1 if classData.shape[0]<2 else 2
if haveOuterData:
fnOuter=cKDTree(outerData)
fnOuterMaxNeigh=1 if outerData.shape[0]<2 else 2
def res(p):
#check the nearest
DA, IA = fnAll.query(p,fnAllMaxNeigh)
if fnAllMaxNeigh==1:
#we need col vectors
DA=DA[:, np.newaxis]
IA=IA[:, np.newaxis]
if haveClassData:
dC, iC=fnClass.query(p,fnClassMaxNeigh)
if fnClassMaxNeigh==1:
#we need col vectors
dC=dC[:, np.newaxis]
iC=iC[:, np.newaxis]
if haveOuterData:
dO, oC=fnOuter.query(p,fnOuterMaxNeigh)
if fnOuterMaxNeigh==1:
#we need col vectors
dO=dO[:, np.newaxis]
oC=oC[:, np.newaxis]
#compile data we have
if haveClassData and haveOuterData:
values=np.hstack((samplesVals[IA],classVals[iC],outerVals[oC]))
del iC
del oC
distances=np.hstack((DA,dC,dO))
elif haveClassData:
values=np.hstack((samplesVals[IA],classVals[iC]))
del iC
distances=np.hstack((DA,dC))
else:
#we have just outer not class
values=np.hstack((samplesVals[IA],outerVals[oC]))
del oC
distances=np.hstack((DA,dO))
del IA
with np.errstate(divide='ignore',invalid='ignore'):
#we want to detect zero distance values
#this values will show as inf in 1/distances and nans in avg
distances=1./distances
avg=np.average(values, axis=1, weights=distances)
#find problems, if exists
problems=np.where(np.isnan(avg))
if problems[0].shape[0]>0:
problemsCols=(problems[0],np.array(np.argmax(np.isinf(distances[problems]),axis=1))) #we are interested in the first only
#change the nans with values of the problematic points
avg[problems]=values[problemsCols]
return avg
return res
| [
"numpy.average",
"numpy.isinf",
"numpy.errstate",
"numpy.hstack",
"numpy.isnan",
"numpy.where",
"scipy.spatial.cKDTree"
] | [((639, 655), 'scipy.spatial.cKDTree', 'cKDTree', (['samples'], {}), '(samples)\n', (646, 655), False, 'from scipy.spatial import cKDTree\n'), ((1479, 1504), 'numpy.where', 'np.where', (['(samplesVals > 0)'], {}), '(samplesVals > 0)\n', (1487, 1504), True, 'import numpy as np\n'), ((1616, 1642), 'numpy.where', 'np.where', (['(samplesVals <= 0)'], {}), '(samplesVals <= 0)\n', (1624, 1642), True, 'import numpy as np\n'), ((4300, 4325), 'numpy.where', 'np.where', (['(samplesVals > 0)'], {}), '(samplesVals > 0)\n', (4308, 4325), True, 'import numpy as np\n'), ((4446, 4472), 'numpy.where', 'np.where', (['(samplesVals <= 0)'], {}), '(samplesVals <= 0)\n', (4454, 4472), True, 'import numpy as np\n'), ((4588, 4604), 'scipy.spatial.cKDTree', 'cKDTree', (['samples'], {}), '(samples)\n', (4595, 4604), False, 'from scipy.spatial import cKDTree\n'), ((1827, 1845), 'scipy.spatial.cKDTree', 'cKDTree', (['classData'], {}), '(classData)\n', (1834, 1845), False, 'from scipy.spatial import cKDTree\n'), ((1946, 1964), 'scipy.spatial.cKDTree', 'cKDTree', (['outerData'], {}), '(outerData)\n', (1953, 1964), False, 'from scipy.spatial import cKDTree\n'), ((4701, 4719), 'scipy.spatial.cKDTree', 'cKDTree', (['classData'], {}), '(classData)\n', (4708, 4719), False, 'from scipy.spatial import cKDTree\n'), ((4820, 4838), 'scipy.spatial.cKDTree', 'cKDTree', (['outerData'], {}), '(outerData)\n', (4827, 4838), False, 'from scipy.spatial import cKDTree\n'), ((2582, 2623), 'numpy.hstack', 'np.hstack', (['(classVals[iC], outerVals[oC])'], {}), '((classVals[iC], outerVals[oC]))\n', (2591, 2623), True, 'import numpy as np\n'), ((2683, 2702), 'numpy.hstack', 'np.hstack', (['(dC, dO)'], {}), '((dC, dO))\n', (2692, 2702), True, 'import numpy as np\n'), ((2945, 2991), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (2956, 2991), True, 'import numpy as np\n'), ((3181, 3226), 'numpy.average', 'np.average', (['values'], {'axis': '(1)', 'weights': 'distances'}), '(values, axis=1, weights=distances)\n', (3191, 3226), True, 'import numpy as np\n'), ((5699, 5757), 'numpy.hstack', 'np.hstack', (['(samplesVals[IA], classVals[iC], outerVals[oC])'], {}), '((samplesVals[IA], classVals[iC], outerVals[oC]))\n', (5708, 5757), True, 'import numpy as np\n'), ((5816, 5839), 'numpy.hstack', 'np.hstack', (['(DA, dC, dO)'], {}), '((DA, dC, dO))\n', (5825, 5839), True, 'import numpy as np\n'), ((6221, 6267), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (6232, 6267), True, 'import numpy as np\n'), ((6444, 6489), 'numpy.average', 'np.average', (['values'], {'axis': '(1)', 'weights': 'distances'}), '(values, axis=1, weights=distances)\n', (6454, 6489), True, 'import numpy as np\n'), ((3308, 3321), 'numpy.isnan', 'np.isnan', (['avg'], {}), '(avg)\n', (3316, 3321), True, 'import numpy as np\n'), ((5885, 5928), 'numpy.hstack', 'np.hstack', (['(samplesVals[IA], classVals[iC])'], {}), '((samplesVals[IA], classVals[iC]))\n', (5894, 5928), True, 'import numpy as np\n'), ((5969, 5988), 'numpy.hstack', 'np.hstack', (['(DA, dC)'], {}), '((DA, dC))\n', (5978, 5988), True, 'import numpy as np\n'), ((6063, 6106), 'numpy.hstack', 'np.hstack', (['(samplesVals[IA], outerVals[oC])'], {}), '((samplesVals[IA], outerVals[oC]))\n', (6072, 6106), True, 'import numpy as np\n'), ((6147, 6166), 'numpy.hstack', 'np.hstack', (['(DA, dO)'], {}), '((DA, dO))\n', (6156, 6166), True, 'import numpy as np\n'), ((6571, 6584), 'numpy.isnan', 'np.isnan', (['avg'], {}), '(avg)\n', (6579, 6584), True, 'import numpy as np\n'), ((3423, 3452), 'numpy.isinf', 'np.isinf', (['distances[problems]'], {}), '(distances[problems])\n', (3431, 3452), True, 'import numpy as np\n'), ((6686, 6715), 'numpy.isinf', 'np.isinf', (['distances[problems]'], {}), '(distances[problems])\n', (6694, 6715), True, 'import numpy as np\n')] |
import pandas as pd
import quandl
import math
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
data_frame = quandl.get('WIKI/GOOGL')
# limit the columns that we display, and work with from 12 to 6
data_frame = data_frame[['Adj. Open','Adj. High','Adj. Low','Adj. Close','Adj. Volume',]]
data_frame['HL_PCT'] = (data_frame['Adj. High'] - data_frame['Adj. Close']) / data_frame['Adj. Close'] * 100.0
data_frame['PCT_change'] = (data_frame['Adj. Close'] - data_frame['Adj. Open']) / data_frame['Adj. Open'] * 100.0
# again limiting number of columns to just a meaningful features, hopefully they are labels.
data_frame = data_frame[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
print(data_frame)
forecast_col = 'Adj. Close'
data_frame.fillna(-99999, inplace=True)
forecast_out = int(math.ceil(0.1*len(data_frame)))
data_frame['label'] = data_frame[forecast_col].shift(-forecast_out)
data_frame.dropna(inplace=True)
print(data_frame.head())
X = np.array(data_frame.drop(['label'],1))
Y = np.array(data_frame['label'])
X = preprocessing.scale(X)
X = X[:-forecast_out+1]
data_frame.dropna(inplace=True)
y = np.array(data_frame['label'])
print(len(X), len(Y))
| [
"quandl.get",
"numpy.array",
"sklearn.preprocessing.scale"
] | [((188, 212), 'quandl.get', 'quandl.get', (['"""WIKI/GOOGL"""'], {}), "('WIKI/GOOGL')\n", (198, 212), False, 'import quandl\n'), ((1080, 1109), 'numpy.array', 'np.array', (["data_frame['label']"], {}), "(data_frame['label'])\n", (1088, 1109), True, 'import numpy as np\n'), ((1115, 1137), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (1134, 1137), False, 'from sklearn import preprocessing, cross_validation, svm\n'), ((1198, 1227), 'numpy.array', 'np.array', (["data_frame['label']"], {}), "(data_frame['label'])\n", (1206, 1227), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import os
import numpy as np
import yt
from pygrackle import \
FluidContainer, \
chemistry_data, \
evolve_constant_density
from pygrackle.utilities.physical_constants import \
mass_hydrogen_cgs, \
sec_per_Myr, \
cm_per_mpc
import sys
from multiprocessing import Pool
from contextlib import closing
import itertools
tiny_number = 1e-20
class NoStdStreams(object):
def __init__(self,stdout = None, stderr = None):
self.devnull = open(os.devnull,'w')
self._stdout = stdout or self.devnull or sys.stdout
self._stderr = stderr or self.devnull or sys.stderr
def __enter__(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.old_stdout.flush(); self.old_stderr.flush()
sys.stdout, sys.stderr = self._stdout, self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush(); self._stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.devnull.close()
def cooling_cell(density = 12.2,
initial_temperature = 2.0E4,
final_time = 30.0,
metal_fraction = 4.0E-4,
make_plot = False,
save_output = False, primordial_chemistry = 2,
outname = None, save_H2_fraction = False,
return_result = False,
verbose = False, H2_converge = None,
*args, **kwargs):
current_redshift = 0.
# Set solver parameters
my_chemistry = chemistry_data()
my_chemistry.use_grackle = 1
my_chemistry.with_radiative_cooling = 1
my_chemistry.primordial_chemistry = primordial_chemistry
my_chemistry.metal_cooling = 1
my_chemistry.UVbackground = 1
my_chemistry.self_shielding_method = 3
if primordial_chemistry > 1:
my_chemistry.H2_self_shielding = 2
my_chemistry.h2_on_dust = 1
my_chemistry.three_body_rate = 4
grackle_dir = "/home/aemerick/code/grackle-emerick/"
my_chemistry.grackle_data_file = os.sep.join( #['/home/aemerick/code/grackle-emerick/input/CloudyData_UVB=HM2012.h5'])
[grackle_dir, "input","CloudyData_UVB=HM2012_shielded.h5"])
# set the factors
my_chemistry.LW_factor = kwargs.get("LW_factor", 1.0)
my_chemistry.k27_factor = kwargs.get("k27_factor", 1.0)
#if 'LW_factor' in kwargs.keys():
# my_chemistry.LW_factor = kwargs['LW_factor']
#else:
# my_chemistry.LW_factor = 1.0
#if 'k27_factor' in kwargs.keys():
# my_chemistry.k27_factor = kwargs['k27_factor']
#else:
# my_chemistry.k27_factor = 1.0
# Set units
my_chemistry.comoving_coordinates = 0 # proper units
my_chemistry.a_units = 1.0
my_chemistry.a_value = 1. / (1. + current_redshift) / \
my_chemistry.a_units
my_chemistry.density_units = mass_hydrogen_cgs # rho = 1.0 is 1.67e-24 g
my_chemistry.length_units = cm_per_mpc # 1 Mpc in cm
my_chemistry.time_units = sec_per_Myr # 1 Myr in s
my_chemistry.velocity_units = my_chemistry.a_units * \
(my_chemistry.length_units / my_chemistry.a_value) / \
my_chemistry.time_units
rval = my_chemistry.initialize()
fc = FluidContainer(my_chemistry, 1)
fc["density"][:] = density
if my_chemistry.primordial_chemistry > 0:
fc["HI"][:] = 0.76 * fc["density"]
fc["HII"][:] = tiny_number * fc["density"]
fc["HeI"][:] = (1.0 - 0.76) * fc["density"]
fc["HeII"][:] = tiny_number * fc["density"]
fc["HeIII"][:] = tiny_number * fc["density"]
if my_chemistry.primordial_chemistry > 1:
fc["H2I"][:] = tiny_number * fc["density"]
fc["H2II"][:] = tiny_number * fc["density"]
fc["HM"][:] = tiny_number * fc["density"]
fc["de"][:] = tiny_number * fc["density"]
fc['H2_self_shielding_length'][:] = 1.8E-6
if my_chemistry.primordial_chemistry > 2:
fc["DI"][:] = 2.0 * 3.4e-5 * fc["density"]
fc["DII"][:] = tiny_number * fc["density"]
fc["HDI"][:] = tiny_number * fc["density"]
if my_chemistry.metal_cooling == 1:
fc["metal"][:] = metal_fraction * fc["density"] * \
my_chemistry.SolarMetalFractionByMass
fc["x-velocity"][:] = 0.0
fc["y-velocity"][:] = 0.0
fc["z-velocity"][:] = 0.0
fc["energy"][:] = initial_temperature / \
fc.chemistry_data.temperature_units
fc.calculate_temperature()
fc["energy"][:] *= initial_temperature / fc["temperature"]
# timestepping safety factor
safety_factor = 0.001
# let gas cool at constant density
#if verbose:
print("Beginning Run")
data = evolve_constant_density(
fc, final_time=final_time, H2_converge = H2_converge,
safety_factor=safety_factor, verbose = verbose)
#else:
# print "Beginning Run"
# with NoStdStreams():
# data = evolve_constant_density(
# fc, final_time=final_time, H2_converge = 1.0E-6,
# safety_factor=safety_factor)
# print "Ending Run"
if make_plot:
p1, = plt.loglog(data["time"].to("Myr"), data["temperature"],
color="black", label="T")
plt.xlabel("Time [Myr]")
plt.ylabel("T [K]")
data["mu"] = data["temperature"] / \
(data["energy"] * (my_chemistry.Gamma - 1.) *
fc.chemistry_data.temperature_units)
plt.twinx()
p2, = plt.semilogx(data["time"].to("Myr"), data["mu"],
color="red", label="$\\mu$")
plt.ylabel("$\\mu$")
plt.legend([p1,p2],["T","$\\mu$"], fancybox=True,
loc="center left")
plt.savefig("cooling_cell.png")
# save data arrays as a yt dataset
if outname is None:
outname = 'cooling_cell_%.2f_%.2f'%(my_chemistry.k27_factor,
my_chemistry.LW_factor)
if save_output:
yt.save_as_dataset({}, outname + '.h5', data)
if my_chemistry.primordial_chemistry > 1:
H2_fraction = (data['H2I'] + data['H2II']) / data['density']
else:
H2_fraction = np.zeros(np.size(data['density']))
if save_H2_fraction:
#np.savetxt(outname + ".dat", [data['time'], H2_fraction])
f = open("all_runs_d_%.2f.dat"%(density),"a")
# f.write("# k27 LW f_H2 T time\n")
f.write("%8.8E %8.8E %8.8E %8.8E %8.8E \n"%(my_chemistry.k27_factor,
my_chemistry.LW_factor,
H2_fraction[-1], data['temperature'][-1],
data['time'][-1] ))
f.close()
if return_result:
return data
else:
return
def _parallel_loop(i, k27, LW):
primordial_chemistry = 1
data = cooling_cell(k27_factor = k27, LW_factor = LW, save_output = False,
save_H2_fraction = False, primordial_chemistry = primordial_chemistry,
return_result = True)
if primordial_chemistry > 1:
H2_fraction = (data['H2I'] + data['H2II']) / data['density']
else:
H2_fraction = np.zeros(np.size(data['density']))
T = (data['temperature'])
str_i = "%00005i"%(i)
result = { str_i : {}}
result[str_i]['k27'] = k27
result[str_i]['LW'] = LW
result[str_i]['H2_fraction'] = H2_fraction[-1]
result[str_i]['T'] = T[-1]
return result
def _parallel_loop_star(args):
return _parallel_loop(*args)
def cooling_cell_grid(k27_factors = None, LW_factors = None,
fmin = 0.1, fmax = 10000.0, npoints = 100,
nproc = 1, outname = None):
if outname is None:
outname = "all_parallel_runs.dat"
if k27_factors is None:
k27_factors = np.logspace(np.log10(fmin),
np.log10(fmax), npoints)
if LW_factors is None:
LW_factors = 1.0 * k27_factors
if nproc == 1:
call_cell = lambda x, y : cooling_cell(k27_factor = x,
LW_factor = y, save_H2_fraction = True)
for i,k27 in enumerate(k27_factors):
print((i)*np.size(LW_factors))
temp_cell = lambda y : call_cell(k27,y)
list(map(temp_cell, LW_factors)) # this may not work anymore - AE python 2 to 3
else:
LW_mesh, k27_mesh = np.meshgrid(LW_factors, k27_factors)
k27_mesh = k27_mesh.flatten()
LW_mesh = LW_mesh.flatten()
for sub_list in itertools.zip_longest(*(iter( np.arange(np.size(k27_mesh))),) * nproc):
sub_list = list(sub_list)
sub_list = [s for s in sub_list if s is not None]
reduced_nproc = np.min( [len(sub_list), nproc])
print("running for ", sub_list)
imin,imax = sub_list[0], (sub_list[-1] + 1)
pool = Pool(reduced_nproc)
results = pool.map_async(_parallel_loop_star,
zip(sub_list,
k27_mesh[imin:imax], LW_mesh[imin:imax]))
pool.close()
pool.join()
for r in results.get():
str_i = list(r.keys())[0]
f = open(outname,"a")
f.write("%8.8E %8.8E %8.8E %8.8E\n"%( r[str_i]['k27'],
r[str_i]['LW'], r[str_i]['H2_fraction'],
r[str_i]['T']))
f.close()
del(results)
return
if __name__ == "__main__":
# test this
#cooling_cell(k27_factor = 0.99, LW_factor = 0.99,
# save_output = False, save_H2_fraction=True)
import time
npoints = 16
nproc = 4
start = time.time()
cooling_cell_grid(npoints = npoints, nproc = nproc, outname = str(sys.argv[1]))
end = time.time()
dt = end - start
eff = dt / (1.0*nproc)
print("This run of %i models on %i processors took %.3E s - Eff = %.1E"%(npoints*npoints, nproc, dt, eff))
| [
"numpy.size",
"pygrackle.chemistry_data",
"numpy.meshgrid",
"matplotlib.pyplot.twinx",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"pygrackle.FluidContainer",
"time.time",
"yt.save_as_dataset",
"os.sep.join",
"multiprocessing.Pool",
"pygrackle.evolve_constant_density",
"numpy.log... | [((1584, 1600), 'pygrackle.chemistry_data', 'chemistry_data', ([], {}), '()\n', (1598, 1600), False, 'from pygrackle import FluidContainer, chemistry_data, evolve_constant_density\n'), ((2100, 2172), 'os.sep.join', 'os.sep.join', (["[grackle_dir, 'input', 'CloudyData_UVB=HM2012_shielded.h5']"], {}), "([grackle_dir, 'input', 'CloudyData_UVB=HM2012_shielded.h5'])\n", (2111, 2172), False, 'import os\n'), ((3286, 3317), 'pygrackle.FluidContainer', 'FluidContainer', (['my_chemistry', '(1)'], {}), '(my_chemistry, 1)\n', (3300, 3317), False, 'from pygrackle import FluidContainer, chemistry_data, evolve_constant_density\n'), ((4725, 4850), 'pygrackle.evolve_constant_density', 'evolve_constant_density', (['fc'], {'final_time': 'final_time', 'H2_converge': 'H2_converge', 'safety_factor': 'safety_factor', 'verbose': 'verbose'}), '(fc, final_time=final_time, H2_converge=H2_converge,\n safety_factor=safety_factor, verbose=verbose)\n', (4748, 4850), False, 'from pygrackle import FluidContainer, chemistry_data, evolve_constant_density\n'), ((9925, 9936), 'time.time', 'time.time', ([], {}), '()\n', (9934, 9936), False, 'import time\n'), ((10031, 10042), 'time.time', 'time.time', ([], {}), '()\n', (10040, 10042), False, 'import time\n'), ((5286, 5310), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [Myr]"""'], {}), "('Time [Myr]')\n", (5296, 5310), True, 'import matplotlib.pyplot as plt\n'), ((5319, 5338), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""T [K]"""'], {}), "('T [K]')\n", (5329, 5338), True, 'import matplotlib.pyplot as plt\n'), ((5501, 5512), 'matplotlib.pyplot.twinx', 'plt.twinx', ([], {}), '()\n', (5510, 5512), True, 'import matplotlib.pyplot as plt\n'), ((5643, 5663), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\mu$"""'], {}), "('$\\\\mu$')\n", (5653, 5663), True, 'import matplotlib.pyplot as plt\n'), ((5672, 5743), 'matplotlib.pyplot.legend', 'plt.legend', (['[p1, p2]', "['T', '$\\\\mu$']"], {'fancybox': '(True)', 'loc': '"""center left"""'}), "([p1, p2], ['T', '$\\\\mu$'], fancybox=True, loc='center left')\n", (5682, 5743), True, 'import matplotlib.pyplot as plt\n'), ((5771, 5802), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cooling_cell.png"""'], {}), "('cooling_cell.png')\n", (5782, 5802), True, 'import matplotlib.pyplot as plt\n'), ((6034, 6079), 'yt.save_as_dataset', 'yt.save_as_dataset', (['{}', "(outname + '.h5')", 'data'], {}), "({}, outname + '.h5', data)\n", (6052, 6079), False, 'import yt\n'), ((8514, 8550), 'numpy.meshgrid', 'np.meshgrid', (['LW_factors', 'k27_factors'], {}), '(LW_factors, k27_factors)\n', (8525, 8550), True, 'import numpy as np\n'), ((6237, 6261), 'numpy.size', 'np.size', (["data['density']"], {}), "(data['density'])\n", (6244, 6261), True, 'import numpy as np\n'), ((7269, 7293), 'numpy.size', 'np.size', (["data['density']"], {}), "(data['density'])\n", (7276, 7293), True, 'import numpy as np\n'), ((7928, 7942), 'numpy.log10', 'np.log10', (['fmin'], {}), '(fmin)\n', (7936, 7942), True, 'import numpy as np\n'), ((7978, 7992), 'numpy.log10', 'np.log10', (['fmax'], {}), '(fmax)\n', (7986, 7992), True, 'import numpy as np\n'), ((9006, 9025), 'multiprocessing.Pool', 'Pool', (['reduced_nproc'], {}), '(reduced_nproc)\n', (9010, 9025), False, 'from multiprocessing import Pool\n'), ((8309, 8328), 'numpy.size', 'np.size', (['LW_factors'], {}), '(LW_factors)\n', (8316, 8328), True, 'import numpy as np\n'), ((8692, 8709), 'numpy.size', 'np.size', (['k27_mesh'], {}), '(k27_mesh)\n', (8699, 8709), True, 'import numpy as np\n')] |
__author__ = "<NAME>"
import scipy.ndimage as ndimage
import numpy as np
from matplotlib import pyplot as plt
from scipy.io import wavfile
def GreenSqr(image, center, width):
if not isinstance(image, np.ndarray):
print("GreenSqr: Not a tensor. Was: Image=", image.__class__)
return None
if not isinstance(center, tuple) or len(center) != 2:
print("GreenSqr: Center point should contain two values (x, y). Was: center=", center)
return None
if not isinstance(width, int):
print("GreenSqr: Width should be a number. Was: width=", width.__class__)
return None
if len(image.shape) == 2:
w = 2 * width + 1
image[center[0] - w: center[0] + w, center[1] - w: center[1] + w] = 255
elif len(image.shape) == 3:
w = 2 * width + 1
image[int(center[0] - w): int(center[0] + w), int(center[1] - w): int(center[1] + w), 0:3:2] = 0
else:
print("GreenSqr: Unsupported shape. Was:", image.shape)
return None
def QuadGreenSqr(image, rows):
if not isinstance(image, np.ndarray):
print("QuadGreenSqr: Not a tensor. Was: Image=", image.__class__)
return None
if not isinstance(rows, list):
print("QuadGreenSqr: Rows should be a list. Was: rows=", rows.__class__)
return None
image[:, rows] = 0, 255, 0
def ColorShift(image):
if not isinstance(image, np.ndarray):
print("ColorShift: Not a tensor. Was: Image=", image.__class__)
return None
image[np.logical_and(image >= 75, image <= 125)] *= 2
def myColorReplacementLoop(img, read, write, amount, th):
if img.__class__ != np.ndarray:
return None
myImg = img.copy()
for m in range(read, read + amount):
for n in range(myImg.shape[1]):
if myImg[m, n, 0] < th or myImg[m, n, 2] < th:
myImg[m - read + write, n, 1] = 255
return myImg
def myColorReplacement(img, read, write, amount, th):
if img.__class__ != np.ndarray:
return None
myImg = img.copy()
myImg[write: write + amount, :, 1][np.logical_or(myImg[read: read + amount, :, 0] < th, myImg[read: read + amount, :, 2] < th)] = 255
return myImg
def TwoLinerLoop(img):
if img.__class__ != np.ndarray:
return None
im = img.copy()
D = im.shape
th = np.int64(np.round(D[0]/3.))
wi = np.int64(np.round(D[0]/30.))
if len(D) != 3 or D[0] < 30:
return None
for m in range(th - wi, th + wi + 1):
for n in range(D[1]):
im[m, n, 0] = 150
im[m, n, 1] = 255
im[m, n, 2] = 0
for m in range(2*th - wi, 2*th + wi + 1):
for n in range(D[1]):
im[m, n, 0] = 150
im[m, n, 1] = 255
im[m, n, 2] = 0
return im
def TwoLiner(img):
if img.__class__ != np.ndarray:
return None
im = img.copy()
D = im.shape
th = np.int64(np.round(D[0]/3.))
wi = np.int64(np.round(D[0]/30.))
if len(D) != 3 or D[0] < 30:
return None
im[(list(range(th - wi, th + wi + 1)) + list(range(th*2 - wi, th*2 + wi + 1))), :] = 150, 255, 0
return im
def NoisySin(time, nType='g'):
amplitude = np.sin(time)
if nType == 'u':
# Uniform distribution parameters
low = -0.25
high = 0.25
noise = np.random.uniform(low, high, len(amplitude))
else:
# Gaussian distribution parameters
mean = 0 # y = 0
sigma = 1/8.0 # width (spread)
noise = np.random.normal(mean, sigma, len(amplitude))
noisy = amplitude + noise
return noisy, amplitude, noise
def myMAF(signal, order=1):
if not isinstance(signal, np.ndarray):
print("myMAF: Not a tensor. Was: signal=", signal.__class__)
return None
if len(signal.shape) != 1:
print("myMAF: Unsupported shape:", signal.shape)
return None
if order < 1:
order = 1
order = int(np.round(order))
# padded = np.pad(signal, (order, order), 'edge')
mafFilter = np.ones(2*order + 1) / (2.0*order + 1.0)
return ndimage.convolve(signal, mafFilter)
def myMedFilt(signal, order=1):
if not isinstance(signal, np.ndarray):
print("myMedFilt: Not a tensor. Was: signal=", signal.__class__)
return None
if len(signal.shape) != 1:
print("myMedFilt: Unsupported shape:", signal.shape)
return None
if order < 1:
order = 1
order = int(np.round(order))
padded = np.pad(signal, (order, order), 'edge') # Extend Padding
result = np.zeros(len(signal))
for i in range(order, len(signal) + order):
result[i - order] = np.median(padded[i - order: i + order])
return result
def mix(stereo_audio_path, mono_audio_path):
# Stereo has two columns where column 0 is the left channel and column 1 is the right channel
# The rows in stereo audio represent the signal
sampling_rate1, stereo_data = wavfile.read(stereo_audio_path)
sampling_rate2, mono_data = wavfile.read(mono_audio_path)
if len(stereo_data.shape) != 2:
print('mix: Stereo file had unexpected shape. Was:', stereo_data.shape)
return None
if len(mono_data.shape) != 1:
print('mix: Mono file had unexpected shape. Was:', mono_data.shape)
return None
# Adjust to the same length
minLen = np.min([stereo_data.shape[0], len(mono_data)])
data1 = np.float64(stereo_data[: minLen, :])
data2 = np.float64(mono_data[:minLen])
# We use these for normalizing the signals
maxOfMaxs = np.max([np.max(data1[0]), np.max(data1[1]), np.max(data2)])
if maxOfMaxs < 0:
maxOfMaxs *= -1
minOfMins = np.min([np.min(data1[0]), np.min(data1[1]), np.min(data2)])
if minOfMins > 0:
minOfMins *= -1
# Normalize the signals
data2[data2 >= 0] *= maxOfMaxs / np.max(data2)
data1[:, 0][data1[:, 0] >= 0] *= maxOfMaxs / np.max(data1[:, 0])
data1[:, 1][data1[:, 1] >= 0] *= maxOfMaxs / np.max(data1[:, 1])
data2[data2 < 0] *= minOfMins / np.min(data2)
data1[:, 0][data1[:, 0] < 0] *= minOfMins / np.min(data1[:, 0])
data1[:, 1][data1[:, 1] < 0] *= minOfMins / np.min(data1[:, 1])
# Add the mono sound into the stereo one
result = np.zeros((minLen, 2), dtype=np.float64)
result[:, 0] += data1[:, 0]
result[:, 0] += data2
result[:, 1] += data1[:, 0]
result[:, 1] += data2
# Make sure we do not exceed in16
result = np.round(result)
result[result > 32767] = 32767
result[result < -32768] = -32768
result = np.int16(result)
wavfile.write('Mixed.wav', sampling_rate1, result)
return sampling_rate1, stereo_data[: minLen, :], mono_data[:minLen], result
def plot(title, img, location):
plt.subplot(location)
plt.tight_layout(pad=2.0)
plt.imshow(np.uint8(img[:, :, ::-1]))
plt.title(title)
plt.axis('off')
plt.xticks([])
plt.yticks([])
| [
"matplotlib.pyplot.title",
"numpy.ones",
"scipy.io.wavfile.read",
"numpy.sin",
"numpy.float64",
"matplotlib.pyplot.tight_layout",
"numpy.round",
"numpy.pad",
"matplotlib.pyplot.yticks",
"scipy.io.wavfile.write",
"numpy.max",
"matplotlib.pyplot.xticks",
"numpy.uint8",
"numpy.median",
"num... | [((3331, 3343), 'numpy.sin', 'np.sin', (['time'], {}), '(time)\n', (3337, 3343), True, 'import numpy as np\n'), ((4256, 4291), 'scipy.ndimage.convolve', 'ndimage.convolve', (['signal', 'mafFilter'], {}), '(signal, mafFilter)\n', (4272, 4291), True, 'import scipy.ndimage as ndimage\n'), ((4675, 4713), 'numpy.pad', 'np.pad', (['signal', '(order, order)', '"""edge"""'], {}), "(signal, (order, order), 'edge')\n", (4681, 4713), True, 'import numpy as np\n'), ((5146, 5177), 'scipy.io.wavfile.read', 'wavfile.read', (['stereo_audio_path'], {}), '(stereo_audio_path)\n', (5158, 5177), False, 'from scipy.io import wavfile\n'), ((5211, 5240), 'scipy.io.wavfile.read', 'wavfile.read', (['mono_audio_path'], {}), '(mono_audio_path)\n', (5223, 5240), False, 'from scipy.io import wavfile\n'), ((5626, 5661), 'numpy.float64', 'np.float64', (['stereo_data[:minLen, :]'], {}), '(stereo_data[:minLen, :])\n', (5636, 5661), True, 'import numpy as np\n'), ((5676, 5706), 'numpy.float64', 'np.float64', (['mono_data[:minLen]'], {}), '(mono_data[:minLen])\n', (5686, 5706), True, 'import numpy as np\n'), ((6483, 6522), 'numpy.zeros', 'np.zeros', (['(minLen, 2)'], {'dtype': 'np.float64'}), '((minLen, 2), dtype=np.float64)\n', (6491, 6522), True, 'import numpy as np\n'), ((6698, 6714), 'numpy.round', 'np.round', (['result'], {}), '(result)\n', (6706, 6714), True, 'import numpy as np\n'), ((6803, 6819), 'numpy.int16', 'np.int16', (['result'], {}), '(result)\n', (6811, 6819), True, 'import numpy as np\n'), ((6827, 6877), 'scipy.io.wavfile.write', 'wavfile.write', (['"""Mixed.wav"""', 'sampling_rate1', 'result'], {}), "('Mixed.wav', sampling_rate1, result)\n", (6840, 6877), False, 'from scipy.io import wavfile\n'), ((7003, 7024), 'matplotlib.pyplot.subplot', 'plt.subplot', (['location'], {}), '(location)\n', (7014, 7024), True, 'from matplotlib import pyplot as plt\n'), ((7030, 7055), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(2.0)'}), '(pad=2.0)\n', (7046, 7055), True, 'from matplotlib import pyplot as plt\n'), ((7104, 7120), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7113, 7120), True, 'from matplotlib import pyplot as plt\n'), ((7126, 7141), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7134, 7141), True, 'from matplotlib import pyplot as plt\n'), ((7147, 7161), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (7157, 7161), True, 'from matplotlib import pyplot as plt\n'), ((7167, 7181), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (7177, 7181), True, 'from matplotlib import pyplot as plt\n'), ((1575, 1616), 'numpy.logical_and', 'np.logical_and', (['(image >= 75)', '(image <= 125)'], {}), '(image >= 75, image <= 125)\n', (1589, 1616), True, 'import numpy as np\n'), ((2171, 2264), 'numpy.logical_or', 'np.logical_or', (['(myImg[read:read + amount, :, 0] < th)', '(myImg[read:read + amount, :, 2] < th)'], {}), '(myImg[read:read + amount, :, 0] < th, myImg[read:read +\n amount, :, 2] < th)\n', (2184, 2264), True, 'import numpy as np\n'), ((2436, 2456), 'numpy.round', 'np.round', (['(D[0] / 3.0)'], {}), '(D[0] / 3.0)\n', (2444, 2456), True, 'import numpy as np\n'), ((2474, 2495), 'numpy.round', 'np.round', (['(D[0] / 30.0)'], {}), '(D[0] / 30.0)\n', (2482, 2495), True, 'import numpy as np\n'), ((3044, 3064), 'numpy.round', 'np.round', (['(D[0] / 3.0)'], {}), '(D[0] / 3.0)\n', (3052, 3064), True, 'import numpy as np\n'), ((3082, 3103), 'numpy.round', 'np.round', (['(D[0] / 30.0)'], {}), '(D[0] / 30.0)\n', (3090, 3103), True, 'import numpy as np\n'), ((4112, 4127), 'numpy.round', 'np.round', (['order'], {}), '(order)\n', (4120, 4127), True, 'import numpy as np\n'), ((4203, 4225), 'numpy.ones', 'np.ones', (['(2 * order + 1)'], {}), '(2 * order + 1)\n', (4210, 4225), True, 'import numpy as np\n'), ((4644, 4659), 'numpy.round', 'np.round', (['order'], {}), '(order)\n', (4652, 4659), True, 'import numpy as np\n'), ((4848, 4886), 'numpy.median', 'np.median', (['padded[i - order:i + order]'], {}), '(padded[i - order:i + order])\n', (4857, 4886), True, 'import numpy as np\n'), ((6078, 6091), 'numpy.max', 'np.max', (['data2'], {}), '(data2)\n', (6084, 6091), True, 'import numpy as np\n'), ((6142, 6161), 'numpy.max', 'np.max', (['data1[:, 0]'], {}), '(data1[:, 0])\n', (6148, 6161), True, 'import numpy as np\n'), ((6212, 6231), 'numpy.max', 'np.max', (['data1[:, 1]'], {}), '(data1[:, 1])\n', (6218, 6231), True, 'import numpy as np\n'), ((6269, 6282), 'numpy.min', 'np.min', (['data2'], {}), '(data2)\n', (6275, 6282), True, 'import numpy as np\n'), ((6332, 6351), 'numpy.min', 'np.min', (['data1[:, 0]'], {}), '(data1[:, 0])\n', (6338, 6351), True, 'import numpy as np\n'), ((6401, 6420), 'numpy.min', 'np.min', (['data1[:, 1]'], {}), '(data1[:, 1])\n', (6407, 6420), True, 'import numpy as np\n'), ((7072, 7097), 'numpy.uint8', 'np.uint8', (['img[:, :, ::-1]'], {}), '(img[:, :, ::-1])\n', (7080, 7097), True, 'import numpy as np\n'), ((5782, 5798), 'numpy.max', 'np.max', (['data1[0]'], {}), '(data1[0])\n', (5788, 5798), True, 'import numpy as np\n'), ((5800, 5816), 'numpy.max', 'np.max', (['data1[1]'], {}), '(data1[1])\n', (5806, 5816), True, 'import numpy as np\n'), ((5818, 5831), 'numpy.max', 'np.max', (['data2'], {}), '(data2)\n', (5824, 5831), True, 'import numpy as np\n'), ((5909, 5925), 'numpy.min', 'np.min', (['data1[0]'], {}), '(data1[0])\n', (5915, 5925), True, 'import numpy as np\n'), ((5927, 5943), 'numpy.min', 'np.min', (['data1[1]'], {}), '(data1[1])\n', (5933, 5943), True, 'import numpy as np\n'), ((5945, 5958), 'numpy.min', 'np.min', (['data2'], {}), '(data2)\n', (5951, 5958), True, 'import numpy as np\n')] |
"""
???+ note "High-level functions to produce an interactive annotation interface."
Experimental recipes whose function signatures might change significantly in the future. Use with caution.
"""
from bokeh.layouts import row, column
from bokeh.models import Button, Slider
from .subroutine import (
standard_annotator,
standard_finder,
standard_snorkel,
standard_softlabel,
)
from hover.utils.bokeh_helper import servable
from wasabi import msg as logger
import numpy as np
@servable(title="Snorkel Crosscheck")
def snorkel_crosscheck(dataset, lf_list, **kwargs):
"""
???+ note "Display the dataset for annotation, cross-checking with labeling functions."
Use the dev set to check labeling functions; use the labeling functions to hint at potential annotation.
| Param | Type | Description |
| :-------- | :------- | :----------------------------------- |
| `dataset` | `SupervisableDataset` | the dataset to link to |
| `lf_list` | `list` | a list of callables decorated by `@hover.utils.snorkel_helper.labeling_function` |
| `**kwargs` | | kwargs to forward to each Bokeh figure |
Expected visual layout:
| SupervisableDataset | BokehSnorkelExplorer | BokehDataAnnotator |
| :------------------ | :------------------------- | :----------------- |
| manage data subsets | inspect labeling functions | make annotations |
"""
# building-block subroutines
snorkel = standard_snorkel(dataset, **kwargs)
annotator = standard_annotator(dataset, **kwargs)
# plot labeling functions
for _lf in lf_list:
snorkel.plot_lf(_lf)
snorkel.figure.legend.click_policy = "hide"
# link coordinates and selections
snorkel.link_xy_range(annotator)
snorkel.link_selection("raw", annotator, "raw")
snorkel.link_selection("labeled", annotator, "dev")
sidebar = dataset.view()
layout = row(sidebar, snorkel.view(), annotator.view())
return layout
@servable(title="Active Learning")
def active_learning(dataset, vectorizer, vecnet_callback, **kwargs):
"""
???+ note "Display the dataset for annotation, putting a classification model in the loop."
Currently works most smoothly with `VectorNet`.
| Param | Type | Description |
| :-------- | :------- | :----------------------------------- |
| `dataset` | `SupervisableDataset` | the dataset to link to |
| `vectorizer` | `callable` | the feature -> vector function |
| `vecnet_callback` | `callable` | the (dataset, vectorizer) -> `VecNet` function|
| `**kwargs` | | kwargs to forward to each Bokeh figure |
Expected visual layout:
| SupervisableDataset | BokehSoftLabelExplorer | BokehDataAnnotator | BokehDataFinder |
| :------------------ | :------------------------ | :----------------- | :------------------ |
| manage data subsets | inspect model predictions | make annotations | search -> highlight |
"""
# building-block subroutines
softlabel = standard_softlabel(dataset, **kwargs)
annotator = standard_annotator(dataset, **kwargs)
finder = standard_finder(dataset, **kwargs)
# link coordinates, omitting the softlabel
finder.link_xy_range(annotator)
# link selections, noting that softlabel does not take "test"
for _key in ["raw", "train", "dev"]:
softlabel.link_selection(_key, annotator, _key)
softlabel.link_selection(_key, finder, _key)
finder.link_selection("test", annotator, "test")
# patch coordinates for representational similarity analysis
softlabel.value_patch("x", "x_traj", title="Manifold trajectory step")
softlabel.value_patch("y", "y_traj")
# recipe-specific widget
def setup_model_retrainer():
model_retrainer = Button(label="Train model", button_type="primary")
epochs_slider = Slider(start=1, end=20, value=1, step=1, title="# epochs")
def retrain_model():
"""
Callback function.
"""
model_retrainer.disabled = True
logger.info("Start training... button will be disabled temporarily.")
dataset.setup_label_coding()
model = vecnet_callback(dataset, vectorizer)
train_loader = dataset.loader("train", vectorizer, smoothing_coeff=0.2)
dev_loader = dataset.loader("dev", vectorizer)
_ = model.train(train_loader, dev_loader, epochs=epochs_slider.value)
model.save()
logger.good("-- 1/2: retrained model")
# combine inputs and compute outputs of all non-test subsets
use_subsets = ("raw", "train", "dev")
inps = []
for _key in use_subsets:
inps.extend(dataset.dfs[_key]["text"].tolist())
probs = model.predict_proba(inps)
labels = [dataset.label_decoder[_val] for _val in probs.argmax(axis=-1)]
scores = probs.max(axis=-1).tolist()
traj_arr, seq_arr, disparity_arr = model.manifold_trajectory(
inps,
points_per_step=5,
)
offset = 0
for _key in use_subsets:
_length = dataset.dfs[_key].shape[0]
# skip subset if empty
if _length > 0:
_slice = slice(offset, offset + _length)
dataset.dfs[_key]["pred_label"] = labels[_slice]
dataset.dfs[_key]["pred_score"] = scores[_slice]
# for each dimension: all steps, selected slice
_x_traj = traj_arr[:, _slice, 0]
_y_traj = traj_arr[:, _slice, 1]
# for each dimension: selected slice, all steps
_x_traj = list(np.swapaxes(_x_traj, 0, 1))
_y_traj = list(np.swapaxes(_y_traj, 0, 1))
dataset.dfs[_key]["x_traj"] = _x_traj
dataset.dfs[_key]["y_traj"] = _y_traj
offset += _length
softlabel._dynamic_callbacks["adjust_patch_slider"]()
softlabel._update_sources()
model_retrainer.disabled = False
logger.good("-- 2/2: updated predictions. Training button is re-enabled.")
model_retrainer.on_click(retrain_model)
return model_retrainer, epochs_slider
model_retrainer, epochs_slider = setup_model_retrainer()
sidebar = column(model_retrainer, epochs_slider, dataset.view())
layout = row(sidebar, *[_plot.view() for _plot in [softlabel, annotator, finder]])
return layout
| [
"wasabi.msg.info",
"bokeh.models.Slider",
"bokeh.models.Button",
"wasabi.msg.good",
"numpy.swapaxes",
"hover.utils.bokeh_helper.servable"
] | [((498, 534), 'hover.utils.bokeh_helper.servable', 'servable', ([], {'title': '"""Snorkel Crosscheck"""'}), "(title='Snorkel Crosscheck')\n", (506, 534), False, 'from hover.utils.bokeh_helper import servable\n'), ((2057, 2090), 'hover.utils.bokeh_helper.servable', 'servable', ([], {'title': '"""Active Learning"""'}), "(title='Active Learning')\n", (2065, 2090), False, 'from hover.utils.bokeh_helper import servable\n'), ((3937, 3987), 'bokeh.models.Button', 'Button', ([], {'label': '"""Train model"""', 'button_type': '"""primary"""'}), "(label='Train model', button_type='primary')\n", (3943, 3987), False, 'from bokeh.models import Button, Slider\n'), ((4012, 4070), 'bokeh.models.Slider', 'Slider', ([], {'start': '(1)', 'end': '(20)', 'value': '(1)', 'step': '(1)', 'title': '"""# epochs"""'}), "(start=1, end=20, value=1, step=1, title='# epochs')\n", (4018, 4070), False, 'from bokeh.models import Button, Slider\n'), ((4220, 4289), 'wasabi.msg.info', 'logger.info', (['"""Start training... button will be disabled temporarily."""'], {}), "('Start training... button will be disabled temporarily.')\n", (4231, 4289), True, 'from wasabi import msg as logger\n'), ((4652, 4690), 'wasabi.msg.good', 'logger.good', (['"""-- 1/2: retrained model"""'], {}), "('-- 1/2: retrained model')\n", (4663, 4690), True, 'from wasabi import msg as logger\n'), ((6335, 6409), 'wasabi.msg.good', 'logger.good', (['"""-- 2/2: updated predictions. Training button is re-enabled."""'], {}), "('-- 2/2: updated predictions. Training button is re-enabled.')\n", (6346, 6409), True, 'from wasabi import msg as logger\n'), ((5925, 5951), 'numpy.swapaxes', 'np.swapaxes', (['_x_traj', '(0)', '(1)'], {}), '(_x_traj, 0, 1)\n', (5936, 5951), True, 'import numpy as np\n'), ((5988, 6014), 'numpy.swapaxes', 'np.swapaxes', (['_y_traj', '(0)', '(1)'], {}), '(_y_traj, 0, 1)\n', (5999, 6014), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
fig =plt.figure()
x,y = np.loadtxt('test.txt', delimiter=',',unpack=True)
x2,y2=np.loadtxt('test2.txt', delimiter=',',unpack=True)
ax1 = plt.subplot2grid((1,1), (0,0))
ax1.grid(True)
ax1.text(x2[3],y2[3],'example')
ax1.annotate('Good!',(x[5],y[5]),xytext=(0.2,0.9),
textcoords='axes fraction',
arrowprops=dict(facecolor='grey'))
#style.use('dark_background')
#plt.plot(x,y,label='load from file')
plt.bar(x,y,label='load from file',color='g')
plt.plot(x2,y2,label='load from file2',color='r')
plt.hist(y2,x2,label='hist',histtype='bar',rwidth=0.8)
plt.scatter(x,y,label='skitscat',color='k',s=25,marker="o")
plt.xlabel('x')
plt.ylabel('y')
plt.title(' Graph\n Check file !')
style.use('dark_background')
plt.legend()
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.style.use",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"mat... | [((86, 98), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (96, 98), True, 'import matplotlib.pyplot as plt\n'), ((106, 156), 'numpy.loadtxt', 'np.loadtxt', (['"""test.txt"""'], {'delimiter': '""","""', 'unpack': '(True)'}), "('test.txt', delimiter=',', unpack=True)\n", (116, 156), True, 'import numpy as np\n'), ((162, 213), 'numpy.loadtxt', 'np.loadtxt', (['"""test2.txt"""'], {'delimiter': '""","""', 'unpack': '(True)'}), "('test2.txt', delimiter=',', unpack=True)\n", (172, 213), True, 'import numpy as np\n'), ((220, 252), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 1)', '(0, 0)'], {}), '((1, 1), (0, 0))\n', (236, 252), True, 'import matplotlib.pyplot as plt\n'), ((506, 554), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y'], {'label': '"""load from file"""', 'color': '"""g"""'}), "(x, y, label='load from file', color='g')\n", (513, 554), True, 'import matplotlib.pyplot as plt\n'), ((552, 604), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'y2'], {'label': '"""load from file2"""', 'color': '"""r"""'}), "(x2, y2, label='load from file2', color='r')\n", (560, 604), True, 'import matplotlib.pyplot as plt\n'), ((602, 660), 'matplotlib.pyplot.hist', 'plt.hist', (['y2', 'x2'], {'label': '"""hist"""', 'histtype': '"""bar"""', 'rwidth': '(0.8)'}), "(y2, x2, label='hist', histtype='bar', rwidth=0.8)\n", (610, 660), True, 'import matplotlib.pyplot as plt\n'), ((657, 721), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'label': '"""skitscat"""', 'color': '"""k"""', 's': '(25)', 'marker': '"""o"""'}), "(x, y, label='skitscat', color='k', s=25, marker='o')\n", (668, 721), True, 'import matplotlib.pyplot as plt\n'), ((718, 733), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (728, 733), True, 'import matplotlib.pyplot as plt\n'), ((734, 749), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (744, 749), True, 'import matplotlib.pyplot as plt\n'), ((750, 787), 'matplotlib.pyplot.title', 'plt.title', (['""" Graph\n Check file !"""'], {}), '(""" Graph\n Check file !""")\n', (759, 787), True, 'import matplotlib.pyplot as plt\n'), ((785, 813), 'matplotlib.style.use', 'style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (794, 813), False, 'from matplotlib import style\n'), ((814, 826), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (824, 826), True, 'import matplotlib.pyplot as plt\n'), ((828, 838), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (836, 838), True, 'import matplotlib.pyplot as plt\n')] |
import pygame
from numpy import interp
pygame.init()
leveys = 600
korkeus = 600
display = pygame.display.set_mode((korkeus, leveys))
pygame.display.set_caption("ZzzZzz")
clock = pygame.time.Clock()
class Viiva:
def __init__(self, x, y):
self.x = x
self.y = y
self.suunta = 0
self.color = pygame.Color("black")
self.color2 = pygame.Color("red")
def piirto(self):
self.pos = [int(self.x), int(self.y)]
self.pos2 = [int(300+self.vali), int(self.y)]
pygame.draw.line(display, self.color, self.pos, [300, int(self.y)], 2)
pygame.draw.line(display, self.color2, [300, int(self.y)], self.pos2, 2)
pygame.draw.circle(display, self.color, self.pos, 3, 0)
pygame.draw.circle(display, self.color2, self.pos2, 3, 0)
def varivaihto(self):
if(self.color == pygame.Color("black")):
self.color = pygame.Color("red")
self.color2 = pygame.Color("black")
else:
self.color = pygame.Color("black")
self.color2 = pygame.Color("red")
def heilu(self):
self.vali = 300-self.x
self.vauhti = interp(self.vali, [0, 100], [5, 1])
if(self.vali > 99 and self.suunta == 0):
self.suunta = 1
elif(self.vali < 1 and self.suunta == 1):
self.suunta = 0
self.varivaihto()
if(self.suunta == 0):
self.x -= self.vauhti
else:
self.x += self.vauhti
def inputt():
for event in pygame.event.get():
if(event.type == pygame.QUIT):
pygame.quit()
exit()
def main():
maara = 41
y = int(korkeus/maara)
x = 300
viivat = []
for i in range(1, maara):
viivat.append(Viiva(x, y))
y += int(korkeus/maara)
x += 20
while(True):
inputt()
display.fill(pygame.Color("white"))
for viiva in viivat:
viiva.heilu()
viiva.piirto()
pygame.display.update()
clock.tick(30)
if(__name__ == "__main__"):
main()
| [
"pygame.quit",
"pygame.draw.circle",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.Color",
"pygame.init",
"pygame.display.update",
"numpy.interp",
"pygame.display.set_caption",
"pygame.time.Clock"
] | [((43, 56), 'pygame.init', 'pygame.init', ([], {}), '()\n', (54, 56), False, 'import pygame\n'), ((99, 141), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(korkeus, leveys)'], {}), '((korkeus, leveys))\n', (122, 141), False, 'import pygame\n'), ((143, 179), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""ZzzZzz"""'], {}), "('ZzzZzz')\n", (169, 179), False, 'import pygame\n'), ((189, 208), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (206, 208), False, 'import pygame\n'), ((1575, 1593), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1591, 1593), False, 'import pygame\n'), ((345, 366), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (357, 366), False, 'import pygame\n'), ((390, 409), 'pygame.Color', 'pygame.Color', (['"""red"""'], {}), "('red')\n", (402, 409), False, 'import pygame\n'), ((708, 763), 'pygame.draw.circle', 'pygame.draw.circle', (['display', 'self.color', 'self.pos', '(3)', '(0)'], {}), '(display, self.color, self.pos, 3, 0)\n', (726, 763), False, 'import pygame\n'), ((773, 830), 'pygame.draw.circle', 'pygame.draw.circle', (['display', 'self.color2', 'self.pos2', '(3)', '(0)'], {}), '(display, self.color2, self.pos2, 3, 0)\n', (791, 830), False, 'import pygame\n'), ((1194, 1229), 'numpy.interp', 'interp', (['self.vali', '[0, 100]', '[5, 1]'], {}), '(self.vali, [0, 100], [5, 1])\n', (1200, 1229), False, 'from numpy import interp\n'), ((2065, 2088), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2086, 2088), False, 'import pygame\n'), ((886, 907), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (898, 907), False, 'import pygame\n'), ((936, 955), 'pygame.Color', 'pygame.Color', (['"""red"""'], {}), "('red')\n", (948, 955), False, 'import pygame\n'), ((983, 1004), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (995, 1004), False, 'import pygame\n'), ((1046, 1067), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (1058, 1067), False, 'import pygame\n'), ((1095, 1114), 'pygame.Color', 'pygame.Color', (['"""red"""'], {}), "('red')\n", (1107, 1114), False, 'import pygame\n'), ((1648, 1661), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1659, 1661), False, 'import pygame\n'), ((1948, 1969), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (1960, 1969), False, 'import pygame\n')] |
"""
This file is part of Cytometer
Copyright 2021 Medical Research Council
SPDX-License-Identifier: Apache-2.0
Author: <NAME> <<EMAIL>>
"""
# cross-platform home directory
from pathlib import Path
home = str(Path.home())
# PyCharm automatically adds cytometer to the python path, but this doesn't happen if the script is run
# with "python scriptname.py"
import os
import sys
sys.path.extend([os.path.join(home, 'Software/cytometer')])
import pickle
import glob
import numpy as np
# limit number of GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# limit GPU memory used
os.environ['KERAS_BACKEND'] = 'tensorflow'
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = .9
set_session(tf.Session(config=config))
# Note: you need to use my branch of keras with the new functionality, that allows element-wise weights of the loss
# function
import keras
import keras.backend as K
import cytometer.data
import cytometer.models
import cytometer.utils
import matplotlib.pyplot as plt
import cv2
import pysto.imgproc as pystoim
from skimage.future.graph import rag_mean_color
from skimage.measure import regionprops
import networkx as nx
# specify data format as (n, row, col, channel)
K.set_image_data_format('channels_last')
DEBUG = True
'''Load data
'''
# data paths
root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
training_dir = os.path.join(root_data_dir, 'klf14_b6ntac_training')
training_non_overlap_data_dir = os.path.join(root_data_dir, 'klf14_b6ntac_training_non_overlap')
training_augmented_dir = os.path.join(root_data_dir, 'klf14_b6ntac_training_augmented')
saved_models_dir = os.path.join(root_data_dir, 'saved_models')
dice_saved_model_basename = 'klf14_b6ntac_exp_0013_cnn_dice_coeff_with_weights' # Dice coefficient regression model
dice_model_name = dice_saved_model_basename + '*.h5'
# load model weights for each fold
dice_model_files = glob.glob(os.path.join(saved_models_dir, dice_model_name))
n_folds = len(dice_model_files)
# load k-fold sets that were used to train the models
saved_model_kfold_filename = os.path.join(saved_models_dir, 'klf14_b6ntac_exp_0015_cnn_dmap_info.pickle')
with open(saved_model_kfold_filename, 'rb') as f:
aux = pickle.load(f)
im_file_list = aux['file_list']
idx_test_all = aux['idx_test_all']
# correct home directory if we are in a different system than what was used to train the models
im_file_list = cytometer.data.change_home_directory(im_file_list, '/users/rittscher/rcasero', home, check_isfile=True)
'''Load model and visualise results
'''
# list of model files to inspect
dice_model_files = glob.glob(os.path.join(saved_models_dir, dice_model_name))
fold_i = 0
dice_model_file = dice_model_files[fold_i]
# split the data into training and testing datasets
im_test_file_list, _ = cytometer.data.split_list(im_file_list, idx_test_all[fold_i])
# load datasets
test_datasets, _, _ = cytometer.data.load_datasets(im_test_file_list, prefix_from='im',
prefix_to=['im', 'seg', 'lab',
'predseg_kfold_' + str(fold_i).zfill(2),
'predlab_kfold_' + str(fold_i).zfill(2),
'preddice_kfold_' + str(fold_i).zfill(2)],
nblocks=2)
im_test = test_datasets['im']
seg_test = test_datasets['seg']
lab_test = test_datasets['lab']
predseg_test = test_datasets['predseg_kfold_00']
predlab_test = test_datasets['predlab_kfold_00']
preddice_test = test_datasets['preddice_kfold_00']
del test_datasets
# load model
dice_model = keras.models.load_model(dice_model_file)
# set input layer to size of test images
dice_model = cytometer.models.change_input_size(dice_model, batch_shape=(None,) + im_test.shape[1:])
# visualise results
i = 0
# run image through network
preddice_test_pred = dice_model.predict(im_test[i, :, :, :].reshape((1,) + im_test.shape[1:]))
"""Split segmentation into multiple masked segmentations where the network can only see one cell at a time
"""
# compute Region Adjacency Graph (RAG) for labels
rag = rag_mean_color(image=predlab_test[i, :, :, 0], labels=predlab_test[i, :, :, 0])
labels_prop = regionprops(predlab_test[i, :, :, 0], coordinates='rc')
centroids_rc = {}
for lp in labels_prop:
centroids_rc[lp['label']] = lp['centroid']
centroids_xy = centroids_rc.copy()
for n in centroids_rc.keys():
centroids_xy[n] = centroids_rc[n][::-1]
# plot results
plt.clf()
plt.subplot(321)
plt.imshow(im_test[i, :, :, :])
plt.title('histology, i = ' + str(i))
plt.subplot(323)
aux = cv2.dilate(predseg_test[i, :, :, 0], kernel=np.ones(shape=(3, 3))) # dilate for better visualisation
plt.imshow(aux)
plt.title('predicted contours')
plt.subplot(324)
plt.imshow(predlab_test[i, :, :, 0])
plt.title('predicted labels')
plt.subplot(325)
plt.imshow(predlab_test[i, :, :, 0])
nx.draw(rag, pos=centroids_xy, node_size=30)
plt.title('cell adjacency graph')
labels = predlab_test[i, :, :, 0]
receptive_field = (162, 162)
# colour labels
colours, coloured_labels = cytometer.utils.colour_labels_with_receptive_field(labels, receptive_field)
plt.clf()
plt.subplot(221)
plt.imshow(labels)
plt.title('labels')
plt.subplot(222)
plt.imshow(labels)
nx.draw(rag, pos=centroids_xy, node_size=30)
plt.title('label adjacency graph')
plt.subplot(223)
plt.imshow(coloured_labels, cmap='tab10')
c = centroids_rc[38]
plt.plot(c[1], c[0], 'ok')
if receptive_field[0] % 2:
receptive_field_half = ((receptive_field[0] - 1) / 2,)
else:
receptive_field_half = (receptive_field[0] / 2,)
if receptive_field[1] % 2:
receptive_field_half += ((receptive_field[1] - 1) / 2,)
else:
receptive_field_half += (receptive_field[1] / 2,)
rmin = int(max(0.0, np.round(c[0] - receptive_field_half[0])))
rmax = int(min(labels.shape[0] - 1.0, np.round(c[0] + receptive_field_half[0])))
cmin = int(max(0.0, np.round(c[1] - receptive_field_half[1])))
cmax = int(min(labels.shape[1] - 1.0, np.round(c[1] + receptive_field_half[1])))
plt.plot([cmin, cmax, cmax, cmin, cmin], [rmin, rmin, rmax, rmax, rmin], 'k')
plt.title('coloured labels')
| [
"keras.models.load_model",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.title",
"pathlib.Path.home",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.imshow",
"tensorflow.Session",
"numpy.ones",
"keras.backend.set_image_data_format",
"tensorflow.ConfigProto",
"pickle.lo... | [((707, 723), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (721, 723), True, 'import tensorflow as tf\n'), ((1289, 1329), 'keras.backend.set_image_data_format', 'K.set_image_data_format', (['"""channels_last"""'], {}), "('channels_last')\n", (1312, 1329), True, 'import keras.backend as K\n'), ((1392, 1439), 'os.path.join', 'os.path.join', (['home', '"""Data/cytometer_data/klf14"""'], {}), "(home, 'Data/cytometer_data/klf14')\n", (1404, 1439), False, 'import os\n'), ((1455, 1507), 'os.path.join', 'os.path.join', (['root_data_dir', '"""klf14_b6ntac_training"""'], {}), "(root_data_dir, 'klf14_b6ntac_training')\n", (1467, 1507), False, 'import os\n'), ((1540, 1604), 'os.path.join', 'os.path.join', (['root_data_dir', '"""klf14_b6ntac_training_non_overlap"""'], {}), "(root_data_dir, 'klf14_b6ntac_training_non_overlap')\n", (1552, 1604), False, 'import os\n'), ((1630, 1692), 'os.path.join', 'os.path.join', (['root_data_dir', '"""klf14_b6ntac_training_augmented"""'], {}), "(root_data_dir, 'klf14_b6ntac_training_augmented')\n", (1642, 1692), False, 'import os\n'), ((1712, 1755), 'os.path.join', 'os.path.join', (['root_data_dir', '"""saved_models"""'], {}), "(root_data_dir, 'saved_models')\n", (1724, 1755), False, 'import os\n'), ((2158, 2234), 'os.path.join', 'os.path.join', (['saved_models_dir', '"""klf14_b6ntac_exp_0015_cnn_dmap_info.pickle"""'], {}), "(saved_models_dir, 'klf14_b6ntac_exp_0015_cnn_dmap_info.pickle')\n", (2170, 2234), False, 'import os\n'), ((3787, 3827), 'keras.models.load_model', 'keras.models.load_model', (['dice_model_file'], {}), '(dice_model_file)\n', (3810, 3827), False, 'import keras\n'), ((4291, 4370), 'skimage.future.graph.rag_mean_color', 'rag_mean_color', ([], {'image': 'predlab_test[i, :, :, 0]', 'labels': 'predlab_test[i, :, :, 0]'}), '(image=predlab_test[i, :, :, 0], labels=predlab_test[i, :, :, 0])\n', (4305, 4370), False, 'from skimage.future.graph import rag_mean_color\n'), ((4385, 4440), 'skimage.measure.regionprops', 'regionprops', (['predlab_test[i, :, :, 0]'], {'coordinates': '"""rc"""'}), "(predlab_test[i, :, :, 0], coordinates='rc')\n", (4396, 4440), False, 'from skimage.measure import regionprops\n'), ((4654, 4663), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4661, 4663), True, 'import matplotlib.pyplot as plt\n'), ((4664, 4680), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(321)'], {}), '(321)\n', (4675, 4680), True, 'import matplotlib.pyplot as plt\n'), ((4681, 4712), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_test[i, :, :, :]'], {}), '(im_test[i, :, :, :])\n', (4691, 4712), True, 'import matplotlib.pyplot as plt\n'), ((4751, 4767), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(323)'], {}), '(323)\n', (4762, 4767), True, 'import matplotlib.pyplot as plt\n'), ((4876, 4891), 'matplotlib.pyplot.imshow', 'plt.imshow', (['aux'], {}), '(aux)\n', (4886, 4891), True, 'import matplotlib.pyplot as plt\n'), ((4892, 4923), 'matplotlib.pyplot.title', 'plt.title', (['"""predicted contours"""'], {}), "('predicted contours')\n", (4901, 4923), True, 'import matplotlib.pyplot as plt\n'), ((4924, 4940), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(324)'], {}), '(324)\n', (4935, 4940), True, 'import matplotlib.pyplot as plt\n'), ((4941, 4977), 'matplotlib.pyplot.imshow', 'plt.imshow', (['predlab_test[i, :, :, 0]'], {}), '(predlab_test[i, :, :, 0])\n', (4951, 4977), True, 'import matplotlib.pyplot as plt\n'), ((4978, 5007), 'matplotlib.pyplot.title', 'plt.title', (['"""predicted labels"""'], {}), "('predicted labels')\n", (4987, 5007), True, 'import matplotlib.pyplot as plt\n'), ((5008, 5024), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(325)'], {}), '(325)\n', (5019, 5024), True, 'import matplotlib.pyplot as plt\n'), ((5025, 5061), 'matplotlib.pyplot.imshow', 'plt.imshow', (['predlab_test[i, :, :, 0]'], {}), '(predlab_test[i, :, :, 0])\n', (5035, 5061), True, 'import matplotlib.pyplot as plt\n'), ((5062, 5106), 'networkx.draw', 'nx.draw', (['rag'], {'pos': 'centroids_xy', 'node_size': '(30)'}), '(rag, pos=centroids_xy, node_size=30)\n', (5069, 5106), True, 'import networkx as nx\n'), ((5107, 5140), 'matplotlib.pyplot.title', 'plt.title', (['"""cell adjacency graph"""'], {}), "('cell adjacency graph')\n", (5116, 5140), True, 'import matplotlib.pyplot as plt\n'), ((5326, 5335), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5333, 5335), True, 'import matplotlib.pyplot as plt\n'), ((5336, 5352), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (5347, 5352), True, 'import matplotlib.pyplot as plt\n'), ((5353, 5371), 'matplotlib.pyplot.imshow', 'plt.imshow', (['labels'], {}), '(labels)\n', (5363, 5371), True, 'import matplotlib.pyplot as plt\n'), ((5372, 5391), 'matplotlib.pyplot.title', 'plt.title', (['"""labels"""'], {}), "('labels')\n", (5381, 5391), True, 'import matplotlib.pyplot as plt\n'), ((5392, 5408), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (5403, 5408), True, 'import matplotlib.pyplot as plt\n'), ((5409, 5427), 'matplotlib.pyplot.imshow', 'plt.imshow', (['labels'], {}), '(labels)\n', (5419, 5427), True, 'import matplotlib.pyplot as plt\n'), ((5428, 5472), 'networkx.draw', 'nx.draw', (['rag'], {'pos': 'centroids_xy', 'node_size': '(30)'}), '(rag, pos=centroids_xy, node_size=30)\n', (5435, 5472), True, 'import networkx as nx\n'), ((5473, 5507), 'matplotlib.pyplot.title', 'plt.title', (['"""label adjacency graph"""'], {}), "('label adjacency graph')\n", (5482, 5507), True, 'import matplotlib.pyplot as plt\n'), ((5508, 5524), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (5519, 5524), True, 'import matplotlib.pyplot as plt\n'), ((5525, 5566), 'matplotlib.pyplot.imshow', 'plt.imshow', (['coloured_labels'], {'cmap': '"""tab10"""'}), "(coloured_labels, cmap='tab10')\n", (5535, 5566), True, 'import matplotlib.pyplot as plt\n'), ((5588, 5614), 'matplotlib.pyplot.plot', 'plt.plot', (['c[1]', 'c[0]', '"""ok"""'], {}), "(c[1], c[0], 'ok')\n", (5596, 5614), True, 'import matplotlib.pyplot as plt\n'), ((6195, 6272), 'matplotlib.pyplot.plot', 'plt.plot', (['[cmin, cmax, cmax, cmin, cmin]', '[rmin, rmin, rmax, rmax, rmin]', '"""k"""'], {}), "([cmin, cmax, cmax, cmin, cmin], [rmin, rmin, rmax, rmax, rmin], 'k')\n", (6203, 6272), True, 'import matplotlib.pyplot as plt\n'), ((6273, 6301), 'matplotlib.pyplot.title', 'plt.title', (['"""coloured labels"""'], {}), "('coloured labels')\n", (6282, 6301), True, 'import matplotlib.pyplot as plt\n'), ((209, 220), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (218, 220), False, 'from pathlib import Path\n'), ((792, 817), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (802, 817), True, 'import tensorflow as tf\n'), ((1993, 2040), 'os.path.join', 'os.path.join', (['saved_models_dir', 'dice_model_name'], {}), '(saved_models_dir, dice_model_name)\n', (2005, 2040), False, 'import os\n'), ((2295, 2309), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2306, 2309), False, 'import pickle\n'), ((2697, 2744), 'os.path.join', 'os.path.join', (['saved_models_dir', 'dice_model_name'], {}), '(saved_models_dir, dice_model_name)\n', (2709, 2744), False, 'import os\n'), ((395, 435), 'os.path.join', 'os.path.join', (['home', '"""Software/cytometer"""'], {}), "(home, 'Software/cytometer')\n", (407, 435), False, 'import os\n'), ((4818, 4839), 'numpy.ones', 'np.ones', ([], {'shape': '(3, 3)'}), '(shape=(3, 3))\n', (4825, 4839), True, 'import numpy as np\n'), ((5927, 5967), 'numpy.round', 'np.round', (['(c[0] - receptive_field_half[0])'], {}), '(c[0] - receptive_field_half[0])\n', (5935, 5967), True, 'import numpy as np\n'), ((6008, 6048), 'numpy.round', 'np.round', (['(c[0] + receptive_field_half[0])'], {}), '(c[0] + receptive_field_half[0])\n', (6016, 6048), True, 'import numpy as np\n'), ((6071, 6111), 'numpy.round', 'np.round', (['(c[1] - receptive_field_half[1])'], {}), '(c[1] - receptive_field_half[1])\n', (6079, 6111), True, 'import numpy as np\n'), ((6152, 6192), 'numpy.round', 'np.round', (['(c[1] + receptive_field_half[1])'], {}), '(c[1] + receptive_field_half[1])\n', (6160, 6192), True, 'import numpy as np\n')] |
import numpy as np
def conv_forward_naive(x,weight,b,parameters):
pad = parameters['pad']
stride = parameters['stride']
(m, n_h, n_w, n_C_prev) = x.shape
(f,f, n_C_prev, n_C) = weight.shape
n_H = int(1 + (n_h + 2 * pad - f) / stride)
n_W = int(1 + (n_w + 2 * pad - f) / stride)
x_prev_pad = np.pad(x, ((0,0),(pad,pad),(pad,pad),(0,0)), 'constant', constant_values=0)
Z = np.zeros((m, n_H,n_W,n_C))
caches = (x,weight,b,pad,stride)
for i in range(m):
for h in range(n_H):
for w in range(n_W):
for c in range(n_C):
vert_start = h*stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
x_slice = x_prev_pad[i,vert_start:vert_end,horiz_start:horiz_end,:]
Z[i,h,w,c] = np.sum(np.multiply(x_slice, weight[:,:,:,c]))
return Z + b[None,None,None,:], caches
def conv_back_naive(dout,cache):
x,w_filter,b,pad,stride = cache
(m, n_h, n_w, n_C_prev) = x.shape
(f,f, n_C_prev, n_C) = w_filter.shape
n_H = int(1 + (n_h + 2 * pad - f) / stride)
n_W = int(1 + (n_w + 2 * pad - f) / stride)
a_prev_pad = np.pad(x, ((0,0),(pad,pad),(pad,pad),(0,0)), 'constant', constant_values=0)
dw = np.zeros(w_filter.shape,dtype=np.float32)
dx = np.zeros(x.shape,dtype=np.float32)
for h in range(f):
for w in range(f):
for p in range(n_C_prev):
for c in range(n_C):
# go through all the individual positions that this filter affected and multiply by their dout
a_slice = a_prev_pad[:,h:h + n_H * stride:stride,w:w + n_W * stride:stride,p]
dw[h,w,p,c] = np.sum(a_slice * dout[:,:,:,c])
# TODO: put back in dout to get correct gradient
dx_pad = np.pad(dx, ((0,0),(pad,pad),(pad,pad),(0,0)), 'constant', constant_values=0)
for i in range(m):
for h_output in range(n_H):
for w_output in range(n_W):
for g in range(n_C):
vert_start = h_output*stride
vert_end = vert_start + f
horiz_start = w_output * stride
horiz_end = horiz_start + f
dx_pad[i,vert_start:vert_end,horiz_start:horiz_end,:] += w_filter[:,:,:,g] * dout[i,h_output,w_output,g]
dx = dx_pad[:,pad:pad+n_h,pad:pad+n_w,:]
db = np.sum(dout,axis=(0,1,2))
return dx,dw,db
def relu(x):
return np.maximum(0, x)
def relu_back(x,dout):
dx = np.array(dout, copy=True)
dx[x <= 0] = 0
return dx
def max_pooling(prev_layer, filter_size=2):
(m, n_H_prev, n_W_prev, channels) = prev_layer.shape
stride = 2
# with max pooling I dont want overlapping filters so make stride = filter size
n_H = int((n_H_prev - filter_size)/filter_size + 1)
n_W = int((n_W_prev - filter_size)/filter_size + 1)
pooling = np.zeros((m,n_H,n_W,channels))
for i in range(m):
for h in range(n_H):
for w in range(n_W):
for c in range(channels):
vert_start = h*filter_size
vert_end = vert_start + filter_size
horiz_start = w*filter_size
horiz_end = horiz_start + filter_size
prev_slice = prev_layer[i,vert_start:vert_end,horiz_start:horiz_end,c]
pooling[i,h,w,c] = np.max(prev_slice)
caches = (pooling,prev_layer,filter_size)
return pooling, caches
def max_pooling_back(dout, caches):
pool, prev, filter_size = caches
(m, n_H, n_W, channels) = pool.shape
(m_prev, n_prev_H, n_prev_W, channels_prev) = prev.shape
empty = np.zeros((m, n_prev_H, n_prev_W, channels))
for i in range(m):
for h in range(n_H):
for w in range(n_W):
for c in range(channels):
vert_start = h*filter_size
vert_end = vert_start + filter_size
horiz_start = w*filter_size
horiz_end = horiz_start + filter_size
mask = prev[i,vert_start:vert_end,horiz_start:horiz_end,c] == pool[i,h,w,c]
empty[i,vert_start:vert_end,horiz_start:horiz_end,c] = mask * dout[i,h,w,c]
return empty
def fully_connected(prev_layer, w,b):
fc = prev_layer.dot(w) + b
caches = {'input':prev_layer,'weights':w,'bias':b}
return fc, caches
def fully_connected_backward(dout,caches):
x_input = caches['input']
w = caches['weights']
b = caches['bias']
da = (w.dot(dout.T)).T
dw = x_input.T.dot(dout)
db = np.sum(dout,axis=0)
return da,dw,db
def softmax_cost(y, y_hat):
return -np.sum(y * np.log(y_hat),axis=1)
def softmax(z):
return np.exp(z)/np.sum(np.exp(z),axis=1,keepdims=True)
def softmax_back(softmax, Y):
return (softmax-Y)/softmax.shape[0]
def batchnorm_forward(x,gamma,beta,running_mu,running_sigma,run='train'):
# mean of x along each dimension
# Gamma is size of (C,)
# Beta is size of (C.)
m,h,w,c = x.shape
nt = (m*h*w)
velocity = 0.9
if run == 'train':
mu = (1./nt) * np.sum(x,axis=(0,1,2),keepdims = True)
sigma = (1./nt) * np.sum((x - mu) ** 2,axis=(0,1,2),keepdims=True)
xhat = (x - mu)/(np.sqrt(sigma+1e-8))
y = gamma.reshape(1,1,1,c) * xhat + beta.reshape(1,1,1,c)
# Update moving averages
running_mu = velocity * running_mu + (1-velocity ) * np.squeeze(mu)
running_sigma = velocity * running_sigma + (1-velocity ) * np.squeeze(sigma)
cache = (x,mu,sigma,xhat,y,gamma,beta)
else:
mu = running_mu.reshape(1,1,1,c)
sigma = running_sigma.reshape(1,1,1,c)
xhat = (x - mu)/np.sqrt(sigma + 1e-8)
y = gamma.reshape(1,1,1,c) * xhat + beta.reshape(1,1,1,c)
cache = (x,mu,sigma,xhat,y,gamma,beta)
return xhat,running_mu,running_sigma, cache
def batchnorm_backward(dout,cache):
#computes the graidents for batchnorm
x,mu,sigma,xhat,y,gamma,beta = cache
m,h,w,c = x.shape
gamma = gamma.reshape(1,1,1,c)
# derivatives directly from the paper
dbeta = np.sum(dout, axis=(0, 1, 2))
dgamma = np.sum(dout * xhat, axis=(0, 1, 2))
Nt = m*h*w
dxhat = dout * gamma
dsigma = np.sum(dxhat * (x-mu),axis=(0,1,2)).reshape(1,1,1,c) * -0.5 * (sigma+1e-8) ** -1.5
dmu = np.sum(dxhat * (-1.0/np.sqrt(sigma+1e-8)), axis=(0,1,2)).reshape(1,1,1,c) + dsigma * np.sum(-2 * (x-mu),axis=(0,1,2)).reshape(1,1,1,c)/Nt
dx = dxhat * (1.0/np.sqrt(sigma+1e-8)) + dsigma * (2.0* (x-mu))/Nt + dmu * (1./Nt)
return dx,dgamma,dbeta
| [
"numpy.pad",
"numpy.maximum",
"numpy.sum",
"numpy.log",
"numpy.multiply",
"numpy.zeros",
"numpy.max",
"numpy.array",
"numpy.exp",
"numpy.squeeze",
"numpy.sqrt"
] | [((309, 395), 'numpy.pad', 'np.pad', (['x', '((0, 0), (pad, pad), (pad, pad), (0, 0))', '"""constant"""'], {'constant_values': '(0)'}), "(x, ((0, 0), (pad, pad), (pad, pad), (0, 0)), 'constant',\n constant_values=0)\n", (315, 395), True, 'import numpy as np\n'), ((391, 419), 'numpy.zeros', 'np.zeros', (['(m, n_H, n_W, n_C)'], {}), '((m, n_H, n_W, n_C))\n', (399, 419), True, 'import numpy as np\n'), ((1219, 1305), 'numpy.pad', 'np.pad', (['x', '((0, 0), (pad, pad), (pad, pad), (0, 0))', '"""constant"""'], {'constant_values': '(0)'}), "(x, ((0, 0), (pad, pad), (pad, pad), (0, 0)), 'constant',\n constant_values=0)\n", (1225, 1305), True, 'import numpy as np\n'), ((1302, 1344), 'numpy.zeros', 'np.zeros', (['w_filter.shape'], {'dtype': 'np.float32'}), '(w_filter.shape, dtype=np.float32)\n', (1310, 1344), True, 'import numpy as np\n'), ((1350, 1385), 'numpy.zeros', 'np.zeros', (['x.shape'], {'dtype': 'np.float32'}), '(x.shape, dtype=np.float32)\n', (1358, 1385), True, 'import numpy as np\n'), ((1867, 1954), 'numpy.pad', 'np.pad', (['dx', '((0, 0), (pad, pad), (pad, pad), (0, 0))', '"""constant"""'], {'constant_values': '(0)'}), "(dx, ((0, 0), (pad, pad), (pad, pad), (0, 0)), 'constant',\n constant_values=0)\n", (1873, 1954), True, 'import numpy as np\n'), ((2523, 2551), 'numpy.sum', 'np.sum', (['dout'], {'axis': '(0, 1, 2)'}), '(dout, axis=(0, 1, 2))\n', (2529, 2551), True, 'import numpy as np\n'), ((2592, 2608), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (2602, 2608), True, 'import numpy as np\n'), ((2642, 2667), 'numpy.array', 'np.array', (['dout'], {'copy': '(True)'}), '(dout, copy=True)\n', (2650, 2667), True, 'import numpy as np\n'), ((3017, 3050), 'numpy.zeros', 'np.zeros', (['(m, n_H, n_W, channels)'], {}), '((m, n_H, n_W, channels))\n', (3025, 3050), True, 'import numpy as np\n'), ((3846, 3889), 'numpy.zeros', 'np.zeros', (['(m, n_prev_H, n_prev_W, channels)'], {}), '((m, n_prev_H, n_prev_W, channels))\n', (3854, 3889), True, 'import numpy as np\n'), ((4867, 4887), 'numpy.sum', 'np.sum', (['dout'], {'axis': '(0)'}), '(dout, axis=0)\n', (4873, 4887), True, 'import numpy as np\n'), ((6303, 6331), 'numpy.sum', 'np.sum', (['dout'], {'axis': '(0, 1, 2)'}), '(dout, axis=(0, 1, 2))\n', (6309, 6331), True, 'import numpy as np\n'), ((6342, 6377), 'numpy.sum', 'np.sum', (['(dout * xhat)'], {'axis': '(0, 1, 2)'}), '(dout * xhat, axis=(0, 1, 2))\n', (6348, 6377), True, 'import numpy as np\n'), ((5008, 5017), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (5014, 5017), True, 'import numpy as np\n'), ((5025, 5034), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (5031, 5034), True, 'import numpy as np\n'), ((5381, 5421), 'numpy.sum', 'np.sum', (['x'], {'axis': '(0, 1, 2)', 'keepdims': '(True)'}), '(x, axis=(0, 1, 2), keepdims=True)\n', (5387, 5421), True, 'import numpy as np\n'), ((5441, 5493), 'numpy.sum', 'np.sum', (['((x - mu) ** 2)'], {'axis': '(0, 1, 2)', 'keepdims': '(True)'}), '((x - mu) ** 2, axis=(0, 1, 2), keepdims=True)\n', (5447, 5493), True, 'import numpy as np\n'), ((5510, 5532), 'numpy.sqrt', 'np.sqrt', (['(sigma + 1e-08)'], {}), '(sigma + 1e-08)\n', (5517, 5532), True, 'import numpy as np\n'), ((5913, 5935), 'numpy.sqrt', 'np.sqrt', (['(sigma + 1e-08)'], {}), '(sigma + 1e-08)\n', (5920, 5935), True, 'import numpy as np\n'), ((4960, 4973), 'numpy.log', 'np.log', (['y_hat'], {}), '(y_hat)\n', (4966, 4973), True, 'import numpy as np\n'), ((5675, 5689), 'numpy.squeeze', 'np.squeeze', (['mu'], {}), '(mu)\n', (5685, 5689), True, 'import numpy as np\n'), ((5751, 5768), 'numpy.squeeze', 'np.squeeze', (['sigma'], {}), '(sigma)\n', (5761, 5768), True, 'import numpy as np\n'), ((1757, 1791), 'numpy.sum', 'np.sum', (['(a_slice * dout[:, :, :, c])'], {}), '(a_slice * dout[:, :, :, c])\n', (1763, 1791), True, 'import numpy as np\n'), ((3517, 3535), 'numpy.max', 'np.max', (['prev_slice'], {}), '(prev_slice)\n', (3523, 3535), True, 'import numpy as np\n'), ((6423, 6463), 'numpy.sum', 'np.sum', (['(dxhat * (x - mu))'], {'axis': '(0, 1, 2)'}), '(dxhat * (x - mu), axis=(0, 1, 2))\n', (6429, 6463), True, 'import numpy as np\n'), ((6670, 6692), 'numpy.sqrt', 'np.sqrt', (['(sigma + 1e-08)'], {}), '(sigma + 1e-08)\n', (6677, 6692), True, 'import numpy as np\n'), ((890, 930), 'numpy.multiply', 'np.multiply', (['x_slice', 'weight[:, :, :, c]'], {}), '(x_slice, weight[:, :, :, c])\n', (901, 930), True, 'import numpy as np\n'), ((6598, 6635), 'numpy.sum', 'np.sum', (['(-2 * (x - mu))'], {'axis': '(0, 1, 2)'}), '(-2 * (x - mu), axis=(0, 1, 2))\n', (6604, 6635), True, 'import numpy as np\n'), ((6534, 6556), 'numpy.sqrt', 'np.sqrt', (['(sigma + 1e-08)'], {}), '(sigma + 1e-08)\n', (6541, 6556), True, 'import numpy as np\n')] |
import numpy as np
from xengine.colors import *
from xengine.types import UNDEFINED
class Point(list):
def __init__(self, x = 0, y = 0, z = 0, RGBA = WHITE):
super().__init__([x, y, z, RGBA])
self.vertices = np.array([x, y, z], dtype=np.float32)
self.color = np.array([RGBA[0], RGBA[1], RGBA[2], RGBA[3]], dtype=np.float32)
self.zoom = UNDEFINED
def adapt_to_window(self, window = UNDEFINED, width = UNDEFINED, height = UNDEFINED):
if window is not UNDEFINED:
width = window.width
height = window.height
mid_width = width / 2
mid_height = height / 2
ratio = mid_height / mid_width
if self.zoom is UNDEFINED:
self.zoom = 1 / mid_width
self.vertices = np.array([self.x * ratio * self.zoom, self.y * self.zoom, self.z * self.zoom], dtype=np.float32)
def set_zoom(self, ratio):
self.zoom = ratio
self.vertices = np.array([self.x * ratio, self.y * ratio, self.z * ratio], dtype=np.float32)
@property
def x(self):
return self.vertices[0]
@property
def y(self):
return self.vertices[1]
@property
def z(self):
return self.vertices[2]
@property
def R(self):
return self.color[0]
@property
def G(self):
return self.color[1]
@property
def B(self):
return self.color[2]
@property
def A(self):
return self.color[3]
| [
"numpy.array"
] | [((235, 272), 'numpy.array', 'np.array', (['[x, y, z]'], {'dtype': 'np.float32'}), '([x, y, z], dtype=np.float32)\n', (243, 272), True, 'import numpy as np\n'), ((295, 359), 'numpy.array', 'np.array', (['[RGBA[0], RGBA[1], RGBA[2], RGBA[3]]'], {'dtype': 'np.float32'}), '([RGBA[0], RGBA[1], RGBA[2], RGBA[3]], dtype=np.float32)\n', (303, 359), True, 'import numpy as np\n'), ((797, 898), 'numpy.array', 'np.array', (['[self.x * ratio * self.zoom, self.y * self.zoom, self.z * self.zoom]'], {'dtype': 'np.float32'}), '([self.x * ratio * self.zoom, self.y * self.zoom, self.z * self.\n zoom], dtype=np.float32)\n', (805, 898), True, 'import numpy as np\n'), ((977, 1053), 'numpy.array', 'np.array', (['[self.x * ratio, self.y * ratio, self.z * ratio]'], {'dtype': 'np.float32'}), '([self.x * ratio, self.y * ratio, self.z * ratio], dtype=np.float32)\n', (985, 1053), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
import torch
import os
import random
import albumentations as A
import torchvision
# Set random seed for reproducibility
manualSeed = 999
# manualSeed = random.randint(1, 10000) # use if you want new results
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
class Dataset(torch.utils.data.Dataset):
def __init__(self, dataset='../Data/Dataset', setting='train', sim=True, original=False):
self.path = dataset
self.classes = os.listdir(self.path)
self.interferograms = []
self.interferograms_normal = []
self.interferograms_deformation = []
self.sim = sim
self.original = original
self.oversampling = True
for data_class in self.classes:
images = os.listdir(self.path + '/' + data_class)
for image in images:
if 'ipynb' in image:
continue
image_dict = {'path': self.path + '/' + data_class + '/' + image, 'label': data_class}
self.interferograms.append(image_dict)
if int(data_class) == 0:
self.interferograms_normal.append(image_dict)
else:
self.interferograms_deformation.append(image_dict)
self.num_examples = len(self.interferograms)
self.set = setting
def __len__(self):
return self.num_examples
def __getitem__(self, index):
if self.set == 'train' and self.sim == False and self.oversampling:
#print('Oversampling')
choice = random.randint(0, 10)
buffer = False
if choice % 2 != 0:
choice_normal = random.randint(0, len(self.interferograms_normal) - 1)
image_data = self.interferograms_normal[choice_normal]
else:
choice_deform = random.randint(0, len(self.interferograms_deformation) - 1)
image_data = self.interferograms_deformation[choice_deform]
else:
image_data = self.interferograms[index]
image_file = image_data['path']
image_label = image_data['label']
image = cv.imread(image_file)
zero = np.zeros_like(image)
if image is None:
print(image_file)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
original = image
original = original[:224, :224, :]
zero[:, :, 0] = gray
zero[:, :, 1] = gray
zero[:, :, 2] = gray
image = zero
image = image[:224, :224, :]
image = torch.from_numpy(image).float().permute(2, 0, 1)
original = torch.from_numpy(original).float().permute(2, 0, 1)
image = torchvision.transforms.Normalize((108.6684,108.6684, 108.6684), (109.1284, 109.1284, 109.1284))(image)
if image.shape[1] < 224 or image.shape[2] < 224:
print(image_file)
if self.original:
return (image, image, original), int(image_label), image_file
return (image, original), int(image_label)
class Unlabeled(torch.utils.data.Dataset):
def __init__(self, dataset='../Data/Dataset', setting='train', original=False):
self.path = dataset
self.images = os.listdir(self.path)
self.interferograms = []
self.original = original
for image in self.images:
image_dict = {'path': self.path + '/' + image}
self.interferograms.append(image_dict)
self.num_examples = len(self.interferograms)
self.set = setting
def __len__(self):
return self.num_examples
def __getitem__(self, index):
image_data = self.interferograms[index]
image_file = image_data['path']
image = cv.imread(image_file)
zero = np.zeros_like(image)
if image is None:
print(image_file)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
original = image
original = original[:224, :224, :]
zero[:, :, 0] = gray
zero[:, :, 1] = gray
zero[:, :, 2] = gray
image = zero
image = image[:224, :224, :]
image = torch.from_numpy(image).float().permute(2, 0, 1)
original = torch.from_numpy(original).float().permute(2, 0, 1)
image = torchvision.transforms.Normalize((108.6684,108.6684, 108.6684), (109.1284, 109.1284, 109.1284))(image)
if image.shape[1] < 224 or image.shape[2] < 224:
print(image_file)
if self.original:
return (image, image, original), 0, image_file
return (image, original), 0 | [
"numpy.zeros_like",
"random.randint",
"cv2.cvtColor",
"torch.manual_seed",
"cv2.imread",
"random.seed",
"torchvision.transforms.Normalize",
"os.listdir",
"torch.from_numpy"
] | [((279, 302), 'random.seed', 'random.seed', (['manualSeed'], {}), '(manualSeed)\n', (290, 302), False, 'import random\n'), ((303, 332), 'torch.manual_seed', 'torch.manual_seed', (['manualSeed'], {}), '(manualSeed)\n', (320, 332), False, 'import torch\n'), ((521, 542), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (531, 542), False, 'import os\n'), ((2207, 2228), 'cv2.imread', 'cv.imread', (['image_file'], {}), '(image_file)\n', (2216, 2228), True, 'import cv2 as cv\n'), ((2244, 2264), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (2257, 2264), True, 'import numpy as np\n'), ((2336, 2373), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (2347, 2373), True, 'import cv2 as cv\n'), ((3265, 3286), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (3275, 3286), False, 'import os\n'), ((3778, 3799), 'cv2.imread', 'cv.imread', (['image_file'], {}), '(image_file)\n', (3787, 3799), True, 'import cv2 as cv\n'), ((3815, 3835), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (3828, 3835), True, 'import numpy as np\n'), ((3907, 3944), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (3918, 3944), True, 'import cv2 as cv\n'), ((811, 851), 'os.listdir', 'os.listdir', (["(self.path + '/' + data_class)"], {}), "(self.path + '/' + data_class)\n", (821, 851), False, 'import os\n'), ((1617, 1638), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (1631, 1638), False, 'import random\n'), ((2741, 2841), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['(108.6684, 108.6684, 108.6684)', '(109.1284, 109.1284, 109.1284)'], {}), '((108.6684, 108.6684, 108.6684), (109.1284,\n 109.1284, 109.1284))\n', (2773, 2841), False, 'import torchvision\n'), ((4312, 4412), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['(108.6684, 108.6684, 108.6684)', '(109.1284, 109.1284, 109.1284)'], {}), '((108.6684, 108.6684, 108.6684), (109.1284,\n 109.1284, 109.1284))\n', (4344, 4412), False, 'import torchvision\n'), ((2604, 2627), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (2620, 2627), False, 'import torch\n'), ((2672, 2698), 'torch.from_numpy', 'torch.from_numpy', (['original'], {}), '(original)\n', (2688, 2698), False, 'import torch\n'), ((4175, 4198), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (4191, 4198), False, 'import torch\n'), ((4243, 4269), 'torch.from_numpy', 'torch.from_numpy', (['original'], {}), '(original)\n', (4259, 4269), False, 'import torch\n')] |
import mxnet as mx
import logging
import numpy as np
import argparse
from ShuffleNet import get_shufflenet
# logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(level=logging.DEBUG)
#数据路径
train_data = np.concatenate((mnist['train_data'], mnist['train_data'], mnist['train_data']),
axis=1)
val_data = np.concatenate((mnist['test_data'], mnist['test_data'], mnist['test_data']),
axis=1)
train_iter = mx.io.NDArrayIter(train_data, mnist['train_label'], batch_size, shuffle=True)
val_iter = mx.io.NDArrayIter(val_data, mnist['test_label'], batch_size)
batch_size = 128
shufflenet = get_shufflenet()
shufflenet_mod = mx.mod.Module(symbol=shufflenet,
context=[mx.gpu(0), mx.gpu(1)],
data_names=['data'],
label_names=['softmax_label'])
shufflenet_mod.fit(train_iter,
eval_data=val_iter,
optimizer='sgd',
optimizer_params={'learning_rate':0.01},
eval_metric='acc',
#batch_end_callback = mx.callback.Speedometer(batch_size, 20),
num_epoch=10)
# parse args
parser = argparse.ArgumentParser(description="train cifar10",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
fit.add_fit_args(parser)
data.add_data_args(parser)
data.add_data_aug_args(parser)
data.set_data_aug_level(parser, 2)
parser.set_defaults(
# network
network = 'resnet',
num_layers = 110,
# data
data_train = train_fname,
data_val = val_fname,
num_classes = 10,
num_examples = 50000,
image_shape = '3,28,28',
pad_size = 4,
# train
batch_size = 128,
num_epochs = 300,
lr = .05,
lr_step_epochs = '200,250',
)
args = parser.parse_args()
# load network
from importlib import import_module
net = import_module('symbols.'+args.network)
sym = net.get_symbol(**vars(args))
# train
fit.fit(args, sym, data.get_rec_iter)
| [
"argparse.ArgumentParser",
"numpy.concatenate",
"logging.basicConfig",
"importlib.import_module",
"mxnet.io.NDArrayIter",
"mxnet.gpu",
"ShuffleNet.get_shufflenet"
] | [((153, 193), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (172, 193), False, 'import logging\n'), ((214, 306), 'numpy.concatenate', 'np.concatenate', (["(mnist['train_data'], mnist['train_data'], mnist['train_data'])"], {'axis': '(1)'}), "((mnist['train_data'], mnist['train_data'], mnist[\n 'train_data']), axis=1)\n", (228, 306), True, 'import numpy as np\n'), ((339, 427), 'numpy.concatenate', 'np.concatenate', (["(mnist['test_data'], mnist['test_data'], mnist['test_data'])"], {'axis': '(1)'}), "((mnist['test_data'], mnist['test_data'], mnist['test_data']),\n axis=1)\n", (353, 427), True, 'import numpy as np\n'), ((463, 540), 'mxnet.io.NDArrayIter', 'mx.io.NDArrayIter', (['train_data', "mnist['train_label']", 'batch_size'], {'shuffle': '(True)'}), "(train_data, mnist['train_label'], batch_size, shuffle=True)\n", (480, 540), True, 'import mxnet as mx\n'), ((552, 612), 'mxnet.io.NDArrayIter', 'mx.io.NDArrayIter', (['val_data', "mnist['test_label']", 'batch_size'], {}), "(val_data, mnist['test_label'], batch_size)\n", (569, 612), True, 'import mxnet as mx\n'), ((644, 660), 'ShuffleNet.get_shufflenet', 'get_shufflenet', ([], {}), '()\n', (658, 660), False, 'from ShuffleNet import get_shufflenet\n'), ((1192, 1305), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""train cifar10"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='train cifar10', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (1215, 1305), False, 'import argparse\n'), ((1941, 1981), 'importlib.import_module', 'import_module', (["('symbols.' + args.network)"], {}), "('symbols.' + args.network)\n", (1954, 1981), False, 'from importlib import import_module\n'), ((746, 755), 'mxnet.gpu', 'mx.gpu', (['(0)'], {}), '(0)\n', (752, 755), True, 'import mxnet as mx\n'), ((757, 766), 'mxnet.gpu', 'mx.gpu', (['(1)'], {}), '(1)\n', (763, 766), True, 'import mxnet as mx\n')] |
import logging
import os
import torch
from torch.utils.model_zoo import tqdm
import random
import numpy as np
from dataset import *
from torch.utils.data import DataLoader
import torch.nn.functional as F
import eval_metrics as em
from evaluate_tDCF_asvspoof19 import compute_eer_and_tdcf
from utils import setup_seed
import argparse
## Adapted from https://github.com/pytorch/audio/tree/master/torchaudio
## https://github.com/nii-yamagishilab/project-NN-Pytorch-scripts/blob/newfunctions/
def init():
parser = argparse.ArgumentParser("load model scores")
parser.add_argument('--seed', type=int, help="random number seed", default=1000)
parser.add_argument("-d", "--path_to_database", type=str, help="dataset path",
default='/data/neil/DS_10283_3336/')
parser.add_argument("-f", "--path_to_features", type=str, help="features path",
default='/data2/neil/ASVspoof2019LA/')
parser.add_argument('-m', '--model_dir', type=str, help="directory for pretrained model", required=True,
default='/data3/neil/chan/adv1010')
parser.add_argument("-t", "--task", type=str, help="which dataset you would like to test on",
required=True, default='ASVspoof2019LA',
choices=["ASVspoof2019LA", "ASVspoof2015", "VCC2020", "ASVspoof2019LASim", "ASVspoof2021LA"])
parser.add_argument('-l', '--loss', type=str, default="ocsoftmax",
choices=["softmax", "amsoftmax", "ocsoftmax", "isolate", "scl", "angulariso"],
help="loss for scoring")
parser.add_argument('--weight_loss', type=float, default=0.5, help="weight for other loss")
parser.add_argument("--feat", type=str, help="which feature to use", default='LFCC',
choices=["CQCC", "LFCC", "Raw"])
parser.add_argument("--feat_len", type=int, help="features length", default=500)
parser.add_argument('--batch_size', type=int, default=64, help="Mini batch size for training")
parser.add_argument("--gpu", type=str, help="GPU index", default="0")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
setup_seed(args.seed) # Set seeds
args.cuda = torch.cuda.is_available()
args.device = torch.device("cuda" if args.cuda else "cpu")
return args
def test_model_on_ASVspoof2019LA(feat_model_path, loss_model_path, part, add_loss):
dirname = os.path.dirname
basename = os.path.splitext(os.path.basename(feat_model_path))[0]
if "checkpoint" in dirname(feat_model_path):
dir_path = dirname(dirname(feat_model_path))
else:
dir_path = dirname(feat_model_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = torch.load(feat_model_path)
loss_model = torch.load(loss_model_path) if add_loss is not None else None
test_set = ASVspoof2019LA(args.path_to_database, args.path_to_features, part,
args.feat, feat_len=args.feat_len)
testDataLoader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=0)
model.eval()
score_loader, idx_loader = [], []
with open(os.path.join(dir_path, 'checkpoint_cm_score_ASVspoof2019LA.txt'), 'w') as cm_score_file:
for i, (feat, audio_fn, tags, labels, _) in enumerate(tqdm(testDataLoader)):
if args.feat == "Raw":
feat = feat.to(args.device)
else:
feat = feat.transpose(2, 3).to(args.device)
# print(feat.shape)
tags = tags.to(device)
labels = labels.to(device)
feats, feat_outputs = model(feat)
if add_loss == "softmax":
score = F.softmax(feat_outputs)[:, 0]
elif add_loss == "ocsoftmax":
ang_isoloss, score = loss_model(feats, labels)
elif add_loss == "isolate":
_, score = loss_model(feats, labels)
elif add_loss == "scl":
score_softmax = F.softmax(feat_outputs)[:, 0]
_, score_scl = loss_model(feats, labels)
score = score_softmax + args.weight_loss * score_scl
elif add_loss == "amsoftmax":
outputs, moutputs = loss_model(feats, labels)
score = F.softmax(outputs, dim=1)[:, 0]
elif add_loss == "angulariso":
angularisoloss, score = loss_model(feats, labels)
else:
raise ValueError("what is the loss?")
for j in range(labels.size(0)):
cm_score_file.write(
'%s A%02d %s %s\n' % (audio_fn[j], tags[j].data,
"spoof" if labels[j].data.cpu().numpy() else "bonafide",
score[j].item()))
# score_loader.append(score.detach().cpu())
# idx_loader.append(labels.detach().cpu())
# scores = torch.cat(score_loader, 0).data.cpu().numpy()
# labels = torch.cat(idx_loader, 0).data.cpu().numpy()
# eer = em.compute_eer(scores[labels == 0], scores[labels == 1])[0]
eer, min_tDCF = compute_eer_and_tdcf(os.path.join(dir_path, 'checkpoint_cm_score_ASVspoof2019LA.txt'),
"/data/neil/DS_10283_3336/")
return eer, min_tDCF
def test_on_VCC(feat_model_path, loss_model_path, part, add_loss):
dirname = os.path.dirname
basename = os.path.splitext(os.path.basename(feat_model_path))[0]
if "checkpoint" in dirname(feat_model_path):
dir_path = dirname(dirname(feat_model_path))
else:
dir_path = dirname(feat_model_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = torch.load(feat_model_path)
loss_model = torch.load(loss_model_path) if add_loss is not None else None
test_set_VCC = VCC2020("/data2/neil/VCC2020/", "LFCC", feat_len=args.feat_len)
testDataLoader = DataLoader(test_set_VCC, batch_size=args.batch_size, shuffle=False, num_workers=0)
model.eval()
score_loader, idx_loader = [], []
with open(os.path.join(dir_path, 'checkpoint_cm_score_VCC.txt'), 'w') as cm_score_file:
for i, (feat, _, tags, labels, _) in enumerate(tqdm(testDataLoader)):
if args.feat == "Raw":
feat = feat.to(args.device)
else:
feat = feat.transpose(2, 3).to(args.device)
tags = tags.to(device)
labels = labels.to(device)
feats, feat_outputs = model(feat)
if add_loss == "softmax":
score = F.softmax(feat_outputs)[:, 0]
elif add_loss == "ocsoftmax":
ang_isoloss, score = loss_model(feats, labels)
elif add_loss == "isolate":
_, score = loss_model(feats, labels)
elif add_loss == "scl":
score_softmax = F.softmax(feat_outputs)[:, 0]
_, score_scl = loss_model(feats, labels)
score = score_softmax + args.weight_loss * score_scl
elif add_loss == "amsoftmax":
outputs, moutputs = loss_model(feats, labels)
score = F.softmax(outputs, dim=1)[:, 0]
elif add_loss == "angulariso":
angularisoloss, score = loss_model(feats, labels)
else:
raise ValueError("what is the loss?")
for j in range(labels.size(0)):
cm_score_file.write(
'A%02d %s %s\n' % (tags[j].data,
"spoof" if labels[j].data.cpu().numpy() else "bonafide",
score[j].item()))
score_loader.append(score.detach().cpu())
idx_loader.append(labels.detach().cpu())
scores = torch.cat(score_loader, 0).data.cpu().numpy()
labels = torch.cat(idx_loader, 0).data.cpu().numpy()
eer = em.compute_eer(scores[labels == 0], scores[labels == 1])[0]
return eer
def test_on_ASVspoof2015(feat_model_path, loss_model_path, part, add_loss):
dirname = os.path.dirname
basename = os.path.splitext(os.path.basename(feat_model_path))[0]
if "checkpoint" in dirname(feat_model_path):
dir_path = dirname(dirname(feat_model_path))
else:
dir_path = dirname(feat_model_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = torch.load(feat_model_path)
loss_model = torch.load(loss_model_path) if add_loss is not None else None
test_set_2015 = ASVspoof2015("/data2/neil/ASVspoof2015/", part="eval", feature="LFCC", feat_len=args.feat_len)
testDataLoader = DataLoader(test_set_2015, batch_size=args.batch_size, shuffle=False, num_workers=0)
model.eval()
score_loader, idx_loader = [], []
with open(os.path.join(dir_path, 'checkpoint_cm_score_ASVspoof2015.txt'), 'w') as cm_score_file:
for i, (feat, audio_fn, tags, labels, _) in enumerate(tqdm(testDataLoader)):
if args.feat == "Raw":
feat = feat.to(args.device)
else:
feat = feat.transpose(2, 3).to(args.device)
tags = tags.to(device)
labels = labels.to(device)
feats, feat_outputs = model(feat)
if add_loss == "softmax":
score = F.softmax(feat_outputs)[:, 0]
elif add_loss == "ocsoftmax":
ang_isoloss, score = loss_model(feats, labels)
elif add_loss == "isolate":
_, score = loss_model(feats, labels)
elif add_loss == "scl":
score_softmax = F.softmax(feat_outputs)[:, 0]
_, score_scl = loss_model(feats, labels)
score = score_softmax + args.weight_loss * score_scl
elif add_loss == "amsoftmax":
outputs, moutputs = loss_model(feats, labels)
score = F.softmax(outputs, dim=1)[:, 0]
elif add_loss == "angulariso":
angularisoloss, score = loss_model(feats, labels)
else:
raise ValueError("what is the loss?")
for j in range(labels.size(0)):
cm_score_file.write(
'%s A%02d %s %s\n' % (audio_fn[j], tags[j].data,
"spoof" if labels[j].data.cpu().numpy() else "bonafide",
score[j].item()))
score_loader.append(score.detach().cpu())
idx_loader.append(labels.detach().cpu())
scores = torch.cat(score_loader, 0).data.cpu().numpy()
labels = torch.cat(idx_loader, 0).data.cpu().numpy()
eer = em.compute_eer(scores[labels == 0], scores[labels == 1])[0]
return eer
def test_individual_attacks(cm_score_file):
# Load CM scores
cm_data = np.genfromtxt(cm_score_file, dtype=str)
cm_sources = cm_data[:, 1]
cm_keys = cm_data[:, 2]
cm_scores = cm_data[:, 3].astype(np.float)
eer_cm_lst, min_tDCF_lst = [], []
for attack_idx in range(0, 55):
# Extract target, nontarget, and spoof scores from the ASV scores
# Extract bona fide (real human) and spoof scores from the CM scores
bona_cm = cm_scores[cm_keys == 'bonafide']
spoof_cm = cm_scores[cm_sources == 'A%02d' % attack_idx]
# EERs of the standalone systems and fix ASV operating point to EER threshold
eer_cm = em.compute_eer(bona_cm, spoof_cm)[0]
eer_cm_lst.append(eer_cm)
return eer_cm_lst
def test_on_ASVspoof2019LASim(feat_model_path, loss_model_path, part, add_loss):
dirname = os.path.dirname
# basename = os.path.splitext(os.path.basename(feat_model_path))[0]
if "checkpoint" in dirname(feat_model_path):
dir_path = dirname(dirname(feat_model_path))
else:
dir_path = dirname(feat_model_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = torch.load(feat_model_path)
loss_model = torch.load(loss_model_path) if add_loss is not None else None
test_set = ASVspoof2019LASim(path_to_features="/data2/neil/ASVspoof2019LA/",
path_to_deviced="/dataNVME/neil/ASVspoof2019LADevice",
part="eval",
feature=args.feat, feat_len=args.feat_len)
testDataLoader = DataLoader(test_set, batch_size=args.batch_size, shuffle=True, num_workers=0)
model.eval()
# score_loader, idx_loader = [], []
with open(os.path.join(dir_path, 'checkpoint_cm_score_ASVspoof2019LASim.txt'), 'w') as cm_score_file:
for i, (feat, audio_fn, tags, labels, _) in enumerate(tqdm(testDataLoader)):
if i > int(len(test_set) / args.batch_size / (len(test_set.devices) + 1)): break
if args.feat == "Raw":
feat = feat.to(args.device)
else:
feat = feat.transpose(2, 3).to(args.device)
# print(feat.shape)
tags = tags.to(device)
labels = labels.to(device)
feats, feat_outputs = model(feat)
if add_loss == "softmax":
score = F.softmax(feat_outputs)[:, 0]
elif add_loss == "ocsoftmax":
ang_isoloss, score = loss_model(feats, labels)
elif add_loss == "isolate":
_, score = loss_model(feats, labels)
elif add_loss == "scl":
score_softmax = F.softmax(feat_outputs)[:, 0]
_, score_scl = loss_model(feats, labels)
score = score_softmax + args.weight_loss * score_scl
elif add_loss == "amsoftmax":
outputs, moutputs = loss_model(feats, labels)
score = F.softmax(outputs, dim=1)[:, 0]
elif add_loss == "angulariso":
angularisoloss, score = loss_model(feats, labels)
else:
raise ValueError("what is the loss?")
for j in range(labels.size(0)):
cm_score_file.write(
'%s A%02d %s %s\n' % (audio_fn[j], tags[j].data,
"spoof" if labels[j].data.cpu().numpy() else "bonafide",
score[j].item()))
# score_loader.append(score.detach().cpu())
# idx_loader.append(labels.detach().cpu())
#
# scores = torch.cat(score_loader, 0).data.cpu().numpy()
# labels = torch.cat(idx_loader, 0).data.cpu().numpy()
# eer = em.compute_eer(scores[labels == 0], scores[labels == 1])[0]
eer, min_tDCF = compute_eer_and_tdcf(os.path.join(dir_path, 'checkpoint_cm_score_ASVspoof2019LASim.txt'),
"/data/neil/DS_10283_3336/")
return eer, min_tDCF
def test_on_ASVspoof2021LA(feat_model_path, loss_model_path, part, add_loss):
dirname = os.path.dirname
if "checkpoint" in dirname(feat_model_path):
dir_path = dirname(dirname(feat_model_path))
else:
dir_path = dirname(feat_model_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = torch.load(feat_model_path)
loss_model = torch.load(loss_model_path) if add_loss is not None else None
### use this line to generate score for LA 2021 Challenge
test_set = ASVspoof2021LAeval(feature=args.feat, feat_len=args.feat_len)
testDataLoader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=0)
model.eval()
txt_file_name = os.path.join(dir_path, 'score.txt')
with open(txt_file_name, 'w') as cm_score_file:
for i, data_slice in enumerate(tqdm(testDataLoader)):
feat, audio_fn = data_slice
if args.feat == "Raw":
feat = feat.to(args.device)
else:
feat = feat.transpose(2, 3).to(args.device)
labels = torch.zeros((feat.shape[0]))
labels = labels.to(device)
feats, feat_outputs = model(feat)
if add_loss == "softmax":
score = F.softmax(feat_outputs)[:, 0]
elif add_loss == "ocsoftmax":
ang_isoloss, score = loss_model(feats, labels)
elif add_loss == "isolate":
_, score = loss_model(feats, labels)
elif add_loss == "scl":
score_softmax = F.softmax(feat_outputs)[:, 0]
_, score_scl = loss_model(feats, labels)
score = score_softmax + args.weight_loss * score_scl
elif add_loss == "amsoftmax":
outputs, moutputs = loss_model(feats, labels)
score = F.softmax(outputs, dim=1)[:, 0]
elif add_loss == "angulariso":
angularisoloss, score = loss_model(feats, labels)
else:
raise ValueError("what is the loss?")
for j in range(labels.size(0)):
cm_score_file.write('%s %s\n' % (audio_fn[j], score[j].item()))
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
device = torch.device("cuda")
args = init()
model_path = os.path.join(args.model_dir, "anti-spoofing_feat_model.pt")
loss_model_path = os.path.join(args.model_dir, "anti-spoofing_loss_model.pt")
if args.task == "ASVspoof2019LA":
eer = test_model_on_ASVspoof2019LA(model_path, loss_model_path, "eval", args.loss)
elif args.task == "ASVspoof2015":
eer = test_on_ASVspoof2015(model_path, loss_model_path, "eval", args.loss)
print(eer)
elif args.task =="VCC2020":
eer = test_on_VCC(model_path, loss_model_path, "eval", args.loss)
print(eer)
elif args.task =="ASVspoof2019LASim":
eer = test_on_ASVspoof2019LASim(model_path, loss_model_path, "eval", args.loss)
elif args.task == "ASVspoof2021LA":
eer = test_on_ASVspoof2021LA(model_path, loss_model_path, "eval", args.loss)
else:
raise ValueError("Evaluation task unknown!")
| [
"utils.setup_seed",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"os.path.basename",
"torch.load",
"eval_metrics.compute_eer",
"numpy.genfromtxt",
"torch.utils.model_zoo.tqdm",
"torch.nn.functional.softmax",
"torch.cat",
"torch.cuda.is_available",
"torch.device",
"torch.zeros",
... | [((518, 562), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""load model scores"""'], {}), "('load model scores')\n", (541, 562), False, 'import argparse\n'), ((2198, 2219), 'utils.setup_seed', 'setup_seed', (['args.seed'], {}), '(args.seed)\n', (2208, 2219), False, 'from utils import setup_seed\n'), ((2253, 2278), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2276, 2278), False, 'import torch\n'), ((2297, 2341), 'torch.device', 'torch.device', (["('cuda' if args.cuda else 'cpu')"], {}), "('cuda' if args.cuda else 'cpu')\n", (2309, 2341), False, 'import torch\n'), ((2786, 2813), 'torch.load', 'torch.load', (['feat_model_path'], {}), '(feat_model_path)\n', (2796, 2813), False, 'import torch\n'), ((3059, 3137), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(test_set, batch_size=args.batch_size, shuffle=False, num_workers=0)\n', (3069, 3137), False, 'from torch.utils.data import DataLoader\n'), ((5780, 5807), 'torch.load', 'torch.load', (['feat_model_path'], {}), '(feat_model_path)\n', (5790, 5807), False, 'import torch\n'), ((5991, 6077), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set_VCC'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(test_set_VCC, batch_size=args.batch_size, shuffle=False,\n num_workers=0)\n', (6001, 6077), False, 'from torch.utils.data import DataLoader\n'), ((8459, 8486), 'torch.load', 'torch.load', (['feat_model_path'], {}), '(feat_model_path)\n', (8469, 8486), False, 'import torch\n'), ((8702, 8789), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set_2015'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(test_set_2015, batch_size=args.batch_size, shuffle=False,\n num_workers=0)\n', (8712, 8789), False, 'from torch.utils.data import DataLoader\n'), ((10863, 10902), 'numpy.genfromtxt', 'np.genfromtxt', (['cm_score_file'], {'dtype': 'str'}), '(cm_score_file, dtype=str)\n', (10876, 10902), True, 'import numpy as np\n'), ((11976, 12003), 'torch.load', 'torch.load', (['feat_model_path'], {}), '(feat_model_path)\n', (11986, 12003), False, 'import torch\n'), ((12440, 12517), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(test_set, batch_size=args.batch_size, shuffle=True, num_workers=0)\n', (12450, 12517), False, 'from torch.utils.data import DataLoader\n'), ((15208, 15235), 'torch.load', 'torch.load', (['feat_model_path'], {}), '(feat_model_path)\n', (15218, 15235), False, 'import torch\n'), ((15476, 15554), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(test_set, batch_size=args.batch_size, shuffle=False, num_workers=0)\n', (15486, 15554), False, 'from torch.utils.data import DataLoader\n'), ((15593, 15628), 'os.path.join', 'os.path.join', (['dir_path', '"""score.txt"""'], {}), "(dir_path, 'score.txt')\n", (15605, 15628), False, 'import os\n'), ((17146, 17166), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (17158, 17166), False, 'import torch\n'), ((17204, 17263), 'os.path.join', 'os.path.join', (['args.model_dir', '"""anti-spoofing_feat_model.pt"""'], {}), "(args.model_dir, 'anti-spoofing_feat_model.pt')\n", (17216, 17263), False, 'import os\n'), ((17286, 17345), 'os.path.join', 'os.path.join', (['args.model_dir', '"""anti-spoofing_loss_model.pt"""'], {}), "(args.model_dir, 'anti-spoofing_loss_model.pt')\n", (17298, 17345), False, 'import os\n'), ((2831, 2858), 'torch.load', 'torch.load', (['loss_model_path'], {}), '(loss_model_path)\n', (2841, 2858), False, 'import torch\n'), ((5205, 5269), 'os.path.join', 'os.path.join', (['dir_path', '"""checkpoint_cm_score_ASVspoof2019LA.txt"""'], {}), "(dir_path, 'checkpoint_cm_score_ASVspoof2019LA.txt')\n", (5217, 5269), False, 'import os\n'), ((5825, 5852), 'torch.load', 'torch.load', (['loss_model_path'], {}), '(loss_model_path)\n', (5835, 5852), False, 'import torch\n'), ((7964, 8020), 'eval_metrics.compute_eer', 'em.compute_eer', (['scores[labels == 0]', 'scores[labels == 1]'], {}), '(scores[labels == 0], scores[labels == 1])\n', (7978, 8020), True, 'import eval_metrics as em\n'), ((8504, 8531), 'torch.load', 'torch.load', (['loss_model_path'], {}), '(loss_model_path)\n', (8514, 8531), False, 'import torch\n'), ((10707, 10763), 'eval_metrics.compute_eer', 'em.compute_eer', (['scores[labels == 0]', 'scores[labels == 1]'], {}), '(scores[labels == 0], scores[labels == 1])\n', (10721, 10763), True, 'import eval_metrics as em\n'), ((12021, 12048), 'torch.load', 'torch.load', (['loss_model_path'], {}), '(loss_model_path)\n', (12031, 12048), False, 'import torch\n'), ((14688, 14755), 'os.path.join', 'os.path.join', (['dir_path', '"""checkpoint_cm_score_ASVspoof2019LASim.txt"""'], {}), "(dir_path, 'checkpoint_cm_score_ASVspoof2019LASim.txt')\n", (14700, 14755), False, 'import os\n'), ((15253, 15280), 'torch.load', 'torch.load', (['loss_model_path'], {}), '(loss_model_path)\n', (15263, 15280), False, 'import torch\n'), ((2506, 2539), 'os.path.basename', 'os.path.basename', (['feat_model_path'], {}), '(feat_model_path)\n', (2522, 2539), False, 'import os\n'), ((2736, 2761), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2759, 2761), False, 'import torch\n'), ((3208, 3272), 'os.path.join', 'os.path.join', (['dir_path', '"""checkpoint_cm_score_ASVspoof2019LA.txt"""'], {}), "(dir_path, 'checkpoint_cm_score_ASVspoof2019LA.txt')\n", (3220, 3272), False, 'import os\n'), ((3359, 3379), 'torch.utils.model_zoo.tqdm', 'tqdm', (['testDataLoader'], {}), '(testDataLoader)\n', (3363, 3379), False, 'from torch.utils.model_zoo import tqdm\n'), ((5500, 5533), 'os.path.basename', 'os.path.basename', (['feat_model_path'], {}), '(feat_model_path)\n', (5516, 5533), False, 'import os\n'), ((5730, 5755), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5753, 5755), False, 'import torch\n'), ((6144, 6197), 'os.path.join', 'os.path.join', (['dir_path', '"""checkpoint_cm_score_VCC.txt"""'], {}), "(dir_path, 'checkpoint_cm_score_VCC.txt')\n", (6156, 6197), False, 'import os\n'), ((6277, 6297), 'torch.utils.model_zoo.tqdm', 'tqdm', (['testDataLoader'], {}), '(testDataLoader)\n', (6281, 6297), False, 'from torch.utils.model_zoo import tqdm\n'), ((8179, 8212), 'os.path.basename', 'os.path.basename', (['feat_model_path'], {}), '(feat_model_path)\n', (8195, 8212), False, 'import os\n'), ((8409, 8434), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8432, 8434), False, 'import torch\n'), ((8856, 8918), 'os.path.join', 'os.path.join', (['dir_path', '"""checkpoint_cm_score_ASVspoof2015.txt"""'], {}), "(dir_path, 'checkpoint_cm_score_ASVspoof2015.txt')\n", (8868, 8918), False, 'import os\n'), ((9005, 9025), 'torch.utils.model_zoo.tqdm', 'tqdm', (['testDataLoader'], {}), '(testDataLoader)\n', (9009, 9025), False, 'from torch.utils.model_zoo import tqdm\n'), ((11456, 11489), 'eval_metrics.compute_eer', 'em.compute_eer', (['bona_cm', 'spoof_cm'], {}), '(bona_cm, spoof_cm)\n', (11470, 11489), True, 'import eval_metrics as em\n'), ((11926, 11951), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11949, 11951), False, 'import torch\n'), ((12590, 12657), 'os.path.join', 'os.path.join', (['dir_path', '"""checkpoint_cm_score_ASVspoof2019LASim.txt"""'], {}), "(dir_path, 'checkpoint_cm_score_ASVspoof2019LASim.txt')\n", (12602, 12657), False, 'import os\n'), ((12744, 12764), 'torch.utils.model_zoo.tqdm', 'tqdm', (['testDataLoader'], {}), '(testDataLoader)\n', (12748, 12764), False, 'from torch.utils.model_zoo import tqdm\n'), ((15158, 15183), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15181, 15183), False, 'import torch\n'), ((15721, 15741), 'torch.utils.model_zoo.tqdm', 'tqdm', (['testDataLoader'], {}), '(testDataLoader)\n', (15725, 15741), False, 'from torch.utils.model_zoo import tqdm\n'), ((15963, 15989), 'torch.zeros', 'torch.zeros', (['feat.shape[0]'], {}), '(feat.shape[0])\n', (15974, 15989), False, 'import torch\n'), ((3755, 3778), 'torch.nn.functional.softmax', 'F.softmax', (['feat_outputs'], {}), '(feat_outputs)\n', (3764, 3778), True, 'import torch.nn.functional as F\n'), ((6642, 6665), 'torch.nn.functional.softmax', 'F.softmax', (['feat_outputs'], {}), '(feat_outputs)\n', (6651, 6665), True, 'import torch.nn.functional as F\n'), ((9369, 9392), 'torch.nn.functional.softmax', 'F.softmax', (['feat_outputs'], {}), '(feat_outputs)\n', (9378, 9392), True, 'import torch.nn.functional as F\n'), ((13233, 13256), 'torch.nn.functional.softmax', 'F.softmax', (['feat_outputs'], {}), '(feat_outputs)\n', (13242, 13256), True, 'import torch.nn.functional as F\n'), ((16141, 16164), 'torch.nn.functional.softmax', 'F.softmax', (['feat_outputs'], {}), '(feat_outputs)\n', (16150, 16164), True, 'import torch.nn.functional as F\n'), ((7851, 7877), 'torch.cat', 'torch.cat', (['score_loader', '(0)'], {}), '(score_loader, 0)\n', (7860, 7877), False, 'import torch\n'), ((7910, 7934), 'torch.cat', 'torch.cat', (['idx_loader', '(0)'], {}), '(idx_loader, 0)\n', (7919, 7934), False, 'import torch\n'), ((10594, 10620), 'torch.cat', 'torch.cat', (['score_loader', '(0)'], {}), '(score_loader, 0)\n', (10603, 10620), False, 'import torch\n'), ((10653, 10677), 'torch.cat', 'torch.cat', (['idx_loader', '(0)'], {}), '(idx_loader, 0)\n', (10662, 10677), False, 'import torch\n'), ((4051, 4074), 'torch.nn.functional.softmax', 'F.softmax', (['feat_outputs'], {}), '(feat_outputs)\n', (4060, 4074), True, 'import torch.nn.functional as F\n'), ((6938, 6961), 'torch.nn.functional.softmax', 'F.softmax', (['feat_outputs'], {}), '(feat_outputs)\n', (6947, 6961), True, 'import torch.nn.functional as F\n'), ((9665, 9688), 'torch.nn.functional.softmax', 'F.softmax', (['feat_outputs'], {}), '(feat_outputs)\n', (9674, 9688), True, 'import torch.nn.functional as F\n'), ((13529, 13552), 'torch.nn.functional.softmax', 'F.softmax', (['feat_outputs'], {}), '(feat_outputs)\n', (13538, 13552), True, 'import torch.nn.functional as F\n'), ((16437, 16460), 'torch.nn.functional.softmax', 'F.softmax', (['feat_outputs'], {}), '(feat_outputs)\n', (16446, 16460), True, 'import torch.nn.functional as F\n'), ((4335, 4360), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (4344, 4360), True, 'import torch.nn.functional as F\n'), ((7222, 7247), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (7231, 7247), True, 'import torch.nn.functional as F\n'), ((9949, 9974), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (9958, 9974), True, 'import torch.nn.functional as F\n'), ((13813, 13838), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (13822, 13838), True, 'import torch.nn.functional as F\n'), ((16721, 16746), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (16730, 16746), True, 'import torch.nn.functional as F\n')] |
import os
import numpy as np
import torch.utils.data as torch_data
import lib.utils.calibration as calibration
import lib.utils.kitti_utils as kitti_utils
from PIL import Image
import argoverse
#from argoverse.data_loading.argoverse_tracking_loader import ArgoverseTrackingLoader
import lib.datasets.ground_segmentation as gs
from pyntcloud import PyntCloud
import random
import copy
class KittiDataset(torch_data.Dataset):
def __init__(self, root_dir, split='train'):
self.split = split
is_test = self.split == 'test'
self.imageset_dir = os.path.join(root_dir,"sample/argoverse/lidar")
lidarfile_list = os.listdir(self.imageset_dir)
self.image_idx_list = [x.split('.')[0] for x in lidarfile_list]
self.num_sample = self.image_idx_list.__len__()
self.lidar_dir = self.imageset_dir
self.argo_to_kitti = np.array([[6.927964e-03, -9.999722e-01, -2.757829e-03],
[-1.162982e-03, 2.749836e-03, -9.999955e-01],
[9.999753e-01, 6.931141e-03, -1.143899e-03]])
self.ground_removal = True
def get_lidar(self,idx):
lidar_file = os.path.join(self.lidar_dir,"%06d.bin"%idx)
assert os.path.exists(lidar_file)
pts_lidar = np.fromfile(lidar_file).reshape(-1,3)[:,:3]
#x = copy.deepcopy(pts_lidar[:,1])
#y = copy.deepcopy(pts_lidar[:,0])
#pts_lidar[:,0] = x
#pts_lidar[:,1] = y
if self.ground_removal:
pts_lidar = gs.ground_segmentation(pts_lidar)
pts_lidar = np.dot(self.argo_to_kitti,pts_lidar.T).T
return pts_lidar
| [
"numpy.fromfile",
"os.path.exists",
"lib.datasets.ground_segmentation.ground_segmentation",
"numpy.array",
"numpy.dot",
"os.path.join",
"os.listdir"
] | [((569, 617), 'os.path.join', 'os.path.join', (['root_dir', '"""sample/argoverse/lidar"""'], {}), "(root_dir, 'sample/argoverse/lidar')\n", (581, 617), False, 'import os\n'), ((643, 672), 'os.listdir', 'os.listdir', (['self.imageset_dir'], {}), '(self.imageset_dir)\n', (653, 672), False, 'import os\n'), ((901, 1038), 'numpy.array', 'np.array', (['[[0.006927964, -0.9999722, -0.002757829], [-0.001162982, 0.002749836, -\n 0.9999955], [0.9999753, 0.006931141, -0.001143899]]'], {}), '([[0.006927964, -0.9999722, -0.002757829], [-0.001162982, \n 0.002749836, -0.9999955], [0.9999753, 0.006931141, -0.001143899]])\n', (909, 1038), True, 'import numpy as np\n'), ((1222, 1268), 'os.path.join', 'os.path.join', (['self.lidar_dir', "('%06d.bin' % idx)"], {}), "(self.lidar_dir, '%06d.bin' % idx)\n", (1234, 1268), False, 'import os\n'), ((1281, 1307), 'os.path.exists', 'os.path.exists', (['lidar_file'], {}), '(lidar_file)\n', (1295, 1307), False, 'import os\n'), ((1599, 1632), 'lib.datasets.ground_segmentation.ground_segmentation', 'gs.ground_segmentation', (['pts_lidar'], {}), '(pts_lidar)\n', (1621, 1632), True, 'import lib.datasets.ground_segmentation as gs\n'), ((1662, 1701), 'numpy.dot', 'np.dot', (['self.argo_to_kitti', 'pts_lidar.T'], {}), '(self.argo_to_kitti, pts_lidar.T)\n', (1668, 1701), True, 'import numpy as np\n'), ((1347, 1370), 'numpy.fromfile', 'np.fromfile', (['lidar_file'], {}), '(lidar_file)\n', (1358, 1370), True, 'import numpy as np\n')] |
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import style
from matplotlib import cm
import numpy as np
data = np.loadtxt("onda.dat")
fig=plt.figure(figsize=(15,5))
ax1 = fig.add_subplot(111,projection='3d')
x, y=np.mgrid[0:data.shape[0], 0:data.shape[1]]
print(np.shape(x), np.shape(y), np.shape(data))
ax1.plot_surface(x/100, y/100, data, cmap=cm.rainbow)
plt.xlabel("Tiempo (segundos)")
plt.ylabel("Posición (metros)")
plt.show()
plt.legend()
plt.savefig("plot.png") | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((154, 176), 'numpy.loadtxt', 'np.loadtxt', (['"""onda.dat"""'], {}), "('onda.dat')\n", (164, 176), True, 'import numpy as np\n'), ((182, 209), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (192, 209), True, 'import matplotlib.pyplot as plt\n'), ((402, 433), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tiempo (segundos)"""'], {}), "('Tiempo (segundos)')\n", (412, 433), True, 'import matplotlib.pyplot as plt\n'), ((434, 465), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Posición (metros)"""'], {}), "('Posición (metros)')\n", (444, 465), True, 'import matplotlib.pyplot as plt\n'), ((469, 479), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (477, 479), True, 'import matplotlib.pyplot as plt\n'), ((480, 492), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (490, 492), True, 'import matplotlib.pyplot as plt\n'), ((493, 516), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot.png"""'], {}), "('plot.png')\n", (504, 516), True, 'import matplotlib.pyplot as plt\n'), ((306, 317), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (314, 317), True, 'import numpy as np\n'), ((319, 330), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (327, 330), True, 'import numpy as np\n'), ((332, 346), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (340, 346), True, 'import numpy as np\n')] |
import os
import os.path as osp
import numpy as np
from PIL import Image
import torch
# import torchvision
from torch.utils import data
import glob
class DAVIS_MO_Test(data.Dataset):
# for multi object, do shuffling
def __init__(self, root, imset='2017/train.txt', resolution='480p', single_object=False, max_obj_num=11):
self.root = root
self.mask_dir = os.path.join(root, 'Annotations', resolution)
self.mask480_dir = os.path.join(root, 'Annotations', '480p')
self.image_dir = os.path.join(root, 'JPEGImages', resolution)
_imset_dir = os.path.join(root, 'ImageSets')
_imset_f = os.path.join(_imset_dir, imset)
# assert 1<0, _imset_f
self.videos = []
self.num_frames = {}
self.num_objects = {}
self.shape = {}
self.size_480p = {}
with open(os.path.join(_imset_f), "r") as lines:
for line in lines:
_video = line.rstrip('\n')
self.videos.append(_video)
self.num_frames[_video] = len(glob.glob(os.path.join(self.image_dir, _video, '*.jpg')))
_mask = np.array(Image.open(os.path.join(self.mask_dir, _video, '00000.png')).convert("P"))
self.num_objects[_video] = np.max(_mask)
self.shape[_video] = np.shape(_mask)
_mask480 = np.array(Image.open(os.path.join(self.mask480_dir, _video, '00000.png')).convert("P"))
self.size_480p[_video] = np.shape(_mask480)
# self.K = 11 if
# self.K = 2 if '2016' in imset else 11
self.K = max_obj_num
self.single_object = single_object
def __len__(self):
return len(self.videos)
def To_onehot(self, mask):
M = np.zeros((self.K, mask.shape[0], mask.shape[1]), dtype=np.uint8)
for k in range(self.K):
M[k] = (mask == k).astype(np.uint8)
return M
def All_to_onehot(self, masks):
Ms = np.zeros((self.K, masks.shape[0], masks.shape[1], masks.shape[2]), dtype=np.uint8)
for n in range(masks.shape[0]):
Ms[:,n] = self.To_onehot(masks[n])
return Ms
def __getitem__(self, index):
video = self.videos[index]
info = {}
info['name'] = video
info['num_frames'] = self.num_frames[video]
info['size_480p'] = self.size_480p[video]
N_frames = np.empty((self.num_frames[video],)+self.shape[video]+(3,), dtype=np.float32)
N_masks = np.empty((self.num_frames[video],)+self.shape[video], dtype=np.uint8)
for f in range(self.num_frames[video]):
img_file = os.path.join(self.image_dir, video, '{:05d}.jpg'.format(f))
N_frames[f] = np.array(Image.open(img_file).convert('RGB'))/255.
try:
mask_file = os.path.join(self.mask_dir, video, '{:05d}.png'.format(f))
N_masks[f] = np.array(Image.open(mask_file).convert('P'), dtype=np.uint8)
except:
# print('a')
N_masks[f] = 255
Fs = torch.from_numpy(np.transpose(N_frames.copy(), (3, 0, 1, 2)).copy()).float()
if self.single_object:
N_masks = (N_masks > 0.5).astype(np.uint8) * (N_masks < 255).astype(np.uint8)
Ms = torch.from_numpy(self.All_to_onehot(N_masks).copy()).float()
num_objects = torch.LongTensor([int(1)])
return Fs, Ms, num_objects, info
else:
Ms = torch.from_numpy(self.All_to_onehot(N_masks).copy()).float()
num_objects = torch.LongTensor([int(self.num_objects[video])])
return Fs, Ms, num_objects, info
class YTVOS_val(data.Dataset):
def __init__(self, root, imset='valid'):
self.root = root
self.mask_dir = os.path.join(root, imset, 'Annotations')
self.image_dir = os.path.join(root, imset, 'JPEGImages')
self.videos = []
self.num_frames = {}
self.frame_ids = {}
self.num_objects = {}
self.shape = {}
self.start_frame = {}
max_obj_num = 0
for vid in sorted(os.listdir(self.image_dir)):
if vid == '.' or vid == '..':
continue
self.videos.append(vid)
self.num_frames[vid] = len(glob.glob(os.path.join(self.image_dir, vid, '*.jpg')))
self.frame_ids[vid] = []
self.start_frame[vid] = {}
cur_obj_num = 0
for t, name in enumerate(sorted(os.listdir(os.path.join(self.image_dir, vid)))):
frame_id = name.split('.')[0]
self.frame_ids[vid].append(frame_id)
mask_file = os.path.join(self.mask_dir, vid, frame_id + '.png')
if os.path.exists(mask_file):
mask = np.array(Image.open(mask_file).convert('P'), dtype=np.uint8)
self.shape[vid] = np.shape(mask)
max_obj = np.max(mask)
for k in range(1, max_obj + 1):
if (k in mask) and (k not in self.start_frame[vid].keys()):
self.start_frame[vid][k] = t
if max_obj > cur_obj_num:
cur_obj_num = max_obj
self.num_objects[vid] = cur_obj_num
max_obj_num = max(max_obj_num, self.num_objects[vid])
print(max_obj_num)
self.K = 6
self.single_object = False
def __len__(self):
return len(self.videos)
def To_onehot(self, mask):
M = np.zeros((self.K, mask.shape[0], mask.shape[1]), dtype=np.uint8)
for k in range(self.K):
M[k] = (mask == k).astype(np.uint8)
return M
def All_to_onehot(self, masks):
Ms = np.zeros((self.K, masks.shape[0], masks.shape[1], masks.shape[2]), dtype=np.uint8)
for n in range(masks.shape[0]):
Ms[:, n] = self.To_onehot(masks[n])
return Ms
def __getitem__(self, index):
video = self.videos[index]
info = {}
info['name'] = video
info['frame_ids'] = self.frame_ids[video]
info['num_frames'] = self.num_frames[video]
info['shape'] = self.shape[video]
info['start_frame'] = self.start_frame[video]
N_frames = np.empty((self.num_frames[video],) + self.shape[video] + (3,), dtype=np.float32)
N_masks = np.empty((self.num_frames[video],) + self.shape[video], dtype=np.uint8)
for t in range(self.num_frames[video]):
f = int(self.frame_ids[video][t])
img_file = os.path.join(self.image_dir, video, '{:05d}.jpg'.format(f))
N_frames[t] = np.array(Image.open(img_file).convert('RGB')) / 255.
mask_file = os.path.join(self.mask_dir, video, '{:05d}.png'.format(f))
if os.path.exists(mask_file):
N_masks[t] = np.array(Image.open(mask_file).convert('P'), dtype=np.uint8)
else:
N_masks[t] = 255
#if(len(info['start_frame'].keys()) == 0):
# print(video)
# print(self.frame_ids[video])
# assert False
#print(info['start_frame'])
Fs = torch.from_numpy(np.transpose(N_frames.copy(), (3, 0, 1, 2)).copy()).float()
if self.single_object:
N_masks = (N_masks > 0.5).astype(np.uint8) * (N_masks < 255).astype(np.uint8)
Ms = torch.from_numpy(self.All_to_onehot(N_masks).copy()).float()
num_objects = torch.LongTensor([int(1)])
return Fs, Ms, num_objects, info
else:
Ms = torch.from_numpy(self.All_to_onehot(N_masks).copy()).float()
num_objects = torch.LongTensor([int(self.num_objects[video])])
return Fs, Ms, num_objects, info
| [
"numpy.empty",
"numpy.zeros",
"os.path.exists",
"PIL.Image.open",
"numpy.shape",
"numpy.max",
"os.path.join",
"os.listdir"
] | [((383, 428), 'os.path.join', 'os.path.join', (['root', '"""Annotations"""', 'resolution'], {}), "(root, 'Annotations', resolution)\n", (395, 428), False, 'import os\n'), ((456, 497), 'os.path.join', 'os.path.join', (['root', '"""Annotations"""', '"""480p"""'], {}), "(root, 'Annotations', '480p')\n", (468, 497), False, 'import os\n'), ((523, 567), 'os.path.join', 'os.path.join', (['root', '"""JPEGImages"""', 'resolution'], {}), "(root, 'JPEGImages', resolution)\n", (535, 567), False, 'import os\n'), ((589, 620), 'os.path.join', 'os.path.join', (['root', '"""ImageSets"""'], {}), "(root, 'ImageSets')\n", (601, 620), False, 'import os\n'), ((640, 671), 'os.path.join', 'os.path.join', (['_imset_dir', 'imset'], {}), '(_imset_dir, imset)\n', (652, 671), False, 'import os\n'), ((1757, 1821), 'numpy.zeros', 'np.zeros', (['(self.K, mask.shape[0], mask.shape[1])'], {'dtype': 'np.uint8'}), '((self.K, mask.shape[0], mask.shape[1]), dtype=np.uint8)\n', (1765, 1821), True, 'import numpy as np\n'), ((1973, 2060), 'numpy.zeros', 'np.zeros', (['(self.K, masks.shape[0], masks.shape[1], masks.shape[2])'], {'dtype': 'np.uint8'}), '((self.K, masks.shape[0], masks.shape[1], masks.shape[2]), dtype=np\n .uint8)\n', (1981, 2060), True, 'import numpy as np\n'), ((2400, 2485), 'numpy.empty', 'np.empty', (['((self.num_frames[video],) + self.shape[video] + (3,))'], {'dtype': 'np.float32'}), '((self.num_frames[video],) + self.shape[video] + (3,), dtype=np.float32\n )\n', (2408, 2485), True, 'import numpy as np\n'), ((2495, 2566), 'numpy.empty', 'np.empty', (['((self.num_frames[video],) + self.shape[video])'], {'dtype': 'np.uint8'}), '((self.num_frames[video],) + self.shape[video], dtype=np.uint8)\n', (2503, 2566), True, 'import numpy as np\n'), ((3786, 3826), 'os.path.join', 'os.path.join', (['root', 'imset', '"""Annotations"""'], {}), "(root, imset, 'Annotations')\n", (3798, 3826), False, 'import os\n'), ((3852, 3891), 'os.path.join', 'os.path.join', (['root', 'imset', '"""JPEGImages"""'], {}), "(root, imset, 'JPEGImages')\n", (3864, 3891), False, 'import os\n'), ((5525, 5589), 'numpy.zeros', 'np.zeros', (['(self.K, mask.shape[0], mask.shape[1])'], {'dtype': 'np.uint8'}), '((self.K, mask.shape[0], mask.shape[1]), dtype=np.uint8)\n', (5533, 5589), True, 'import numpy as np\n'), ((5737, 5824), 'numpy.zeros', 'np.zeros', (['(self.K, masks.shape[0], masks.shape[1], masks.shape[2])'], {'dtype': 'np.uint8'}), '((self.K, masks.shape[0], masks.shape[1], masks.shape[2]), dtype=np\n .uint8)\n', (5745, 5824), True, 'import numpy as np\n'), ((6261, 6346), 'numpy.empty', 'np.empty', (['((self.num_frames[video],) + self.shape[video] + (3,))'], {'dtype': 'np.float32'}), '((self.num_frames[video],) + self.shape[video] + (3,), dtype=np.float32\n )\n', (6269, 6346), True, 'import numpy as np\n'), ((6360, 6431), 'numpy.empty', 'np.empty', (['((self.num_frames[video],) + self.shape[video])'], {'dtype': 'np.uint8'}), '((self.num_frames[video],) + self.shape[video], dtype=np.uint8)\n', (6368, 6431), True, 'import numpy as np\n'), ((4110, 4136), 'os.listdir', 'os.listdir', (['self.image_dir'], {}), '(self.image_dir)\n', (4120, 4136), False, 'import os\n'), ((6786, 6811), 'os.path.exists', 'os.path.exists', (['mask_file'], {}), '(mask_file)\n', (6800, 6811), False, 'import os\n'), ((858, 880), 'os.path.join', 'os.path.join', (['_imset_f'], {}), '(_imset_f)\n', (870, 880), False, 'import os\n'), ((1269, 1282), 'numpy.max', 'np.max', (['_mask'], {}), '(_mask)\n', (1275, 1282), True, 'import numpy as np\n'), ((1320, 1335), 'numpy.shape', 'np.shape', (['_mask'], {}), '(_mask)\n', (1328, 1335), True, 'import numpy as np\n'), ((1491, 1509), 'numpy.shape', 'np.shape', (['_mask480'], {}), '(_mask480)\n', (1499, 1509), True, 'import numpy as np\n'), ((4661, 4712), 'os.path.join', 'os.path.join', (['self.mask_dir', 'vid', "(frame_id + '.png')"], {}), "(self.mask_dir, vid, frame_id + '.png')\n", (4673, 4712), False, 'import os\n'), ((4732, 4757), 'os.path.exists', 'os.path.exists', (['mask_file'], {}), '(mask_file)\n', (4746, 4757), False, 'import os\n'), ((4291, 4333), 'os.path.join', 'os.path.join', (['self.image_dir', 'vid', '"""*.jpg"""'], {}), "(self.image_dir, vid, '*.jpg')\n", (4303, 4333), False, 'import os\n'), ((4885, 4899), 'numpy.shape', 'np.shape', (['mask'], {}), '(mask)\n', (4893, 4899), True, 'import numpy as np\n'), ((4930, 4942), 'numpy.max', 'np.max', (['mask'], {}), '(mask)\n', (4936, 4942), True, 'import numpy as np\n'), ((1070, 1115), 'os.path.join', 'os.path.join', (['self.image_dir', '_video', '"""*.jpg"""'], {}), "(self.image_dir, _video, '*.jpg')\n", (1082, 1115), False, 'import os\n'), ((4495, 4528), 'os.path.join', 'os.path.join', (['self.image_dir', 'vid'], {}), '(self.image_dir, vid)\n', (4507, 4528), False, 'import os\n'), ((2731, 2751), 'PIL.Image.open', 'Image.open', (['img_file'], {}), '(img_file)\n', (2741, 2751), False, 'from PIL import Image\n'), ((2917, 2938), 'PIL.Image.open', 'Image.open', (['mask_file'], {}), '(mask_file)\n', (2927, 2938), False, 'from PIL import Image\n'), ((6644, 6664), 'PIL.Image.open', 'Image.open', (['img_file'], {}), '(img_file)\n', (6654, 6664), False, 'from PIL import Image\n'), ((6851, 6872), 'PIL.Image.open', 'Image.open', (['mask_file'], {}), '(mask_file)\n', (6861, 6872), False, 'from PIL import Image\n'), ((1162, 1210), 'os.path.join', 'os.path.join', (['self.mask_dir', '_video', '"""00000.png"""'], {}), "(self.mask_dir, _video, '00000.png')\n", (1174, 1210), False, 'import os\n'), ((1383, 1434), 'os.path.join', 'os.path.join', (['self.mask480_dir', '_video', '"""00000.png"""'], {}), "(self.mask480_dir, _video, '00000.png')\n", (1395, 1434), False, 'import os\n'), ((4795, 4816), 'PIL.Image.open', 'Image.open', (['mask_file'], {}), '(mask_file)\n', (4805, 4816), False, 'from PIL import Image\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
*Copyright (c) 2015, <NAME>*
All rights reserved.
See the LICENSE file for license information.
odeintw
=======
`odeintw` provides a wrapper of `scipy.integrate.odeint` that allows it to
handle complex and matrix differential equations. That is, it can solve
equations of the form
dZ/dt = F(Z, t, param1, param2, ...)
where `t` is real and `Z` is a real or complex array.
Since `odeintw` is just a wrapper of `scipy.integrate.odeint`, it requires
`scipy` to be installed.
"""
'''Example1:
=============
To solve the equations
dz1/dt = -z1 * (K - z2)
dz2/dt = L - M*z2
where `K`, `L` and `M` are (possibly complex) constants, we first define the
right-hand-side of the differential equations:
'''
import numpy as np
from odeintw import odeintw
def zfunc(z, t, K, L, M):
z1, z2 = z
return [-z1 * (K - z2), L - M*z2]
# The Jacobian is
def zjac(z, t, K, L, M):
z1, z2 = z
jac = np.array([[z2 - K, z1], [0, -M]])
return jac
# The following calls `odeintw` with appropriate arguments
# Initial conditions.
z0 = np.array([1+2j, 3+4j])
# Desired time samples for the solution.
t = np.linspace(0, 5, 101)
# Parameters.
K = 2
L = 4 - 2j
M = 2.5
# Call odeintw
z, infodict = odeintw(zfunc, z0, t, args=(K, L, M), Dfun=zjac,
full_output=True)
# The components of the solution can be plotted with `matplotlib` as follows
import matplotlib.pyplot as plt
color1 = (0.5, 0.4, 0.3)
color2 = (0.2, 0.2, 1.0)
plt.plot(t, z[:, 0].real, color=color1, label='z1.real', linewidth=1.5)
plt.plot(t, z[:, 0].imag, '--', color=color1, label='z1.imag', linewidth=2)
plt.plot(t, z[:, 1].real, color=color2, label='z2.real', linewidth=1.5)
plt.plot(t, z[:, 1].imag, '--', color=color2, label='z2.imag', linewidth=2)
plt.xlabel('t')
plt.grid(True)
plt.legend(loc='best')
plt.show()
# Plot: 
'''Example2:
============
We'll solve the matrix differential equation
dA/dt = C * A
where `A` and `C` are real 2x2 matrices.
The differential equation is defined with the function
'''
def asys(a, t, c):
return c.dot(a)
# Both `a` and `c` are assumed to be `n x n` matrices. The function
# `asys` will work for any `n`, but we'll specialize to `2 x 2` in our
# implementation of the Jacobian:
def ajac(a, t, c):
# asys returns [[F[0,0](a,t), F[0,1](a,t),
# F[1,0](a,t), F[1,1](a,t)]]
# This function computes jac[m, n, i, j]
# jac[m, n, i, j] holds dF[m,n]/da[i,j]
jac = np.zeros((2,2,2,2))
jac[0, 0, 0, 0] = c[0, 0]
jac[0, 0, 1, 0] = c[0, 1]
jac[0, 1, 0, 1] = c[0, 0]
jac[0, 1, 1, 1] = c[0, 1]
jac[1, 0, 0, 0] = c[1, 0]
jac[1, 0, 1, 0] = c[1, 1]
jac[1, 1, 0, 1] = c[1, 0]
jac[1, 1, 1, 1] = c[1, 1]
# (As with `odeint`, giving an explicit Jacobian is optional.)
# Now create the arguments and call `odeintw`:
# The matrix of coefficients `c`. This is passed as an
# extra argument to `asys` and `ajac`.
c = np.array([[-0.5, -1.25],
[ 0.5, -0.25]])
# Desired time samples for the solution.
t = np.linspace(0, 10, 201)
# a0 is the initial condition.
a0 = np.array([[0.0, 1.0],[2.0, 3.0]])
# Call `odeintw`.
sol = odeintw(asys, a0, t, Dfun=ajac, args=(c,))
# The solution can be plotted with `matplotlib`:
import matplotlib.pyplot as plt
plt.figure(1)
plt.clf()
color1 = (0.5, 0.4, 0.3)
color2 = (0.2, 0.2, 1.0)
plt.plot(t, sol[:, 0, 0], color=color1, label='a[0,0]')
plt.plot(t, sol[:, 0, 1], color=color2, label='a[0,1]')
plt.plot(t, sol[:, 1, 0], '--', color=color1, linewidth=1.5, label='a[1,0]')
plt.plot(t, sol[:, 1, 1], '--', color=color2, linewidth=1.5, label='a[1,1]')
plt.legend(loc='best')
plt.grid(True)
plt.show()
# Plot:  | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"numpy.zeros",
"odeintw.odeintw",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] | [((1106, 1136), 'numpy.array', 'np.array', (['[1 + 2.0j, 3 + 4.0j]'], {}), '([1 + 2.0j, 3 + 4.0j])\n', (1114, 1136), True, 'import numpy as np\n'), ((1175, 1197), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(101)'], {}), '(0, 5, 101)\n', (1186, 1197), True, 'import numpy as np\n'), ((1268, 1334), 'odeintw.odeintw', 'odeintw', (['zfunc', 'z0', 't'], {'args': '(K, L, M)', 'Dfun': 'zjac', 'full_output': '(True)'}), '(zfunc, z0, t, args=(K, L, M), Dfun=zjac, full_output=True)\n', (1275, 1334), False, 'from odeintw import odeintw\n'), ((1523, 1594), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'z[:, 0].real'], {'color': 'color1', 'label': '"""z1.real"""', 'linewidth': '(1.5)'}), "(t, z[:, 0].real, color=color1, label='z1.real', linewidth=1.5)\n", (1531, 1594), True, 'import matplotlib.pyplot as plt\n'), ((1595, 1670), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'z[:, 0].imag', '"""--"""'], {'color': 'color1', 'label': '"""z1.imag"""', 'linewidth': '(2)'}), "(t, z[:, 0].imag, '--', color=color1, label='z1.imag', linewidth=2)\n", (1603, 1670), True, 'import matplotlib.pyplot as plt\n'), ((1671, 1742), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'z[:, 1].real'], {'color': 'color2', 'label': '"""z2.real"""', 'linewidth': '(1.5)'}), "(t, z[:, 1].real, color=color2, label='z2.real', linewidth=1.5)\n", (1679, 1742), True, 'import matplotlib.pyplot as plt\n'), ((1743, 1818), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'z[:, 1].imag', '"""--"""'], {'color': 'color2', 'label': '"""z2.imag"""', 'linewidth': '(2)'}), "(t, z[:, 1].imag, '--', color=color2, label='z2.imag', linewidth=2)\n", (1751, 1818), True, 'import matplotlib.pyplot as plt\n'), ((1819, 1834), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (1829, 1834), True, 'import matplotlib.pyplot as plt\n'), ((1835, 1849), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1843, 1849), True, 'import matplotlib.pyplot as plt\n'), ((1850, 1872), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1860, 1872), True, 'import matplotlib.pyplot as plt\n'), ((1873, 1883), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1881, 1883), True, 'import matplotlib.pyplot as plt\n'), ((3078, 3117), 'numpy.array', 'np.array', (['[[-0.5, -1.25], [0.5, -0.25]]'], {}), '([[-0.5, -1.25], [0.5, -0.25]])\n', (3086, 3117), True, 'import numpy as np\n'), ((3183, 3206), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(201)'], {}), '(0, 10, 201)\n', (3194, 3206), True, 'import numpy as np\n'), ((3244, 3278), 'numpy.array', 'np.array', (['[[0.0, 1.0], [2.0, 3.0]]'], {}), '([[0.0, 1.0], [2.0, 3.0]])\n', (3252, 3278), True, 'import numpy as np\n'), ((3303, 3345), 'odeintw.odeintw', 'odeintw', (['asys', 'a0', 't'], {'Dfun': 'ajac', 'args': '(c,)'}), '(asys, a0, t, Dfun=ajac, args=(c,))\n', (3310, 3345), False, 'from odeintw import odeintw\n'), ((3430, 3443), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3440, 3443), True, 'import matplotlib.pyplot as plt\n'), ((3444, 3453), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3451, 3453), True, 'import matplotlib.pyplot as plt\n'), ((3504, 3559), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sol[:, 0, 0]'], {'color': 'color1', 'label': '"""a[0,0]"""'}), "(t, sol[:, 0, 0], color=color1, label='a[0,0]')\n", (3512, 3559), True, 'import matplotlib.pyplot as plt\n'), ((3560, 3615), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sol[:, 0, 1]'], {'color': 'color2', 'label': '"""a[0,1]"""'}), "(t, sol[:, 0, 1], color=color2, label='a[0,1]')\n", (3568, 3615), True, 'import matplotlib.pyplot as plt\n'), ((3616, 3692), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sol[:, 1, 0]', '"""--"""'], {'color': 'color1', 'linewidth': '(1.5)', 'label': '"""a[1,0]"""'}), "(t, sol[:, 1, 0], '--', color=color1, linewidth=1.5, label='a[1,0]')\n", (3624, 3692), True, 'import matplotlib.pyplot as plt\n'), ((3693, 3769), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sol[:, 1, 1]', '"""--"""'], {'color': 'color2', 'linewidth': '(1.5)', 'label': '"""a[1,1]"""'}), "(t, sol[:, 1, 1], '--', color=color2, linewidth=1.5, label='a[1,1]')\n", (3701, 3769), True, 'import matplotlib.pyplot as plt\n'), ((3770, 3792), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3780, 3792), True, 'import matplotlib.pyplot as plt\n'), ((3793, 3807), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3801, 3807), True, 'import matplotlib.pyplot as plt\n'), ((3808, 3818), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3816, 3818), True, 'import matplotlib.pyplot as plt\n'), ((969, 1002), 'numpy.array', 'np.array', (['[[z2 - K, z1], [0, -M]]'], {}), '([[z2 - K, z1], [0, -M]])\n', (977, 1002), True, 'import numpy as np\n'), ((2606, 2628), 'numpy.zeros', 'np.zeros', (['(2, 2, 2, 2)'], {}), '((2, 2, 2, 2))\n', (2614, 2628), True, 'import numpy as np\n')] |
import time
import os
import sys
import numpy as np
from getting_data import load_sample, feature_key_list, get_categorical_encoded_data, decode_paper
from ranker_helper import get_scores, start_record_paper_count, end_record_paper_count, processing_log
from s2search_score_pdp import save_pdp_to_npz
from anchor import anchor_tabular
import pytz
import datetime
import logging
import psutil
utc_tz = pytz.timezone('America/Montreal')
def get_class(score):
if score <= -17:
return '(,-17]'
elif score <= -10:
return '(-17, -10]'
elif score <= -5:
return '(-10, -5]'
elif score <= 0:
return '(-5, <0]'
elif score <= 3:
return '(0, 3]'
elif score <= 5:
return '(3, 5]'
elif score <= 6:
return '(5, 6]'
elif score <= 7:
return '(6, 7]'
elif score <= 8:
return '(7, 8]'
elif score <= 9:
return '(8, 9]'
else:
return '(9,)'
def remove_duplicate(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def get_time_str():
return datetime.datetime.now(tz=utc_tz).strftime("%m/%d/%Y, %H:%M:%S")
def metrics_to_str(metrics):
return ', '.join([f'{feature_name}: {len(metrics[feature_name])}' for feature_name in metrics.keys()])
def compute_and_save(output_exp_dir, output_data_sample_name, query, rg, data_exp_name, data_sample_name, logger, explainer_configs):
metrics_npz_file = os.path.join(
output_exp_dir, 'scores', f'{output_data_sample_name}_anchor_metrics_{rg[0]}_{rg[1]}.npz')
st = time.time()
logger.info(f'\n[{get_time_str()}] start anchor metrics')
paper_data = load_sample(data_exp_name, data_sample_name, not_df=True)
task_name = f'get prediction of paper data for {output_exp_dir} {output_data_sample_name} {rg}'
start_record_paper_count(task_name)
y_pred_file = os.path.join(
output_exp_dir, 'scores', f'{data_sample_name}_y_pred.npz')
if not os.path.exists(y_pred_file):
y_pred = get_scores(query, paper_data)
save_pdp_to_npz('.', y_pred_file, y_pred)
else:
y_pred = np.load(y_pred_file)['arr_0']
end_record_paper_count(task_name)
# make class_name
class_name = ['(,-17]', '(-17, -10]', '(-10, -5]', '(-5, <0]',
'(0, 3]', '(3, 5]', '(5, 6]', '(6, 7]', '(7, 8]', '(8, 9]', '(9,)']
task_name = f'get categorical paper data for {output_exp_dir} {output_data_sample_name} {rg}'
start_record_paper_count(task_name)
categorical_name, paper_data = get_categorical_encoded_data(
data_exp_name, data_sample_name, query, paper_data)
end_record_paper_count(task_name)
explainer = anchor_tabular.AnchorTabularExplainer(
class_name,
feature_key_list,
paper_data,
categorical_name)
def pred_fn(x):
predictions = get_scores(query, [decode_paper(
categorical_name, p) for p in x], ptf=False)
encoded_pred = [class_name.index(get_class(pp)) for pp in predictions]
return np.array(encoded_pred)
data_len = len(paper_data)
numerator, denominator = rg
process_len = int(data_len / denominator) + 1
curr_numerator = 1
start = 0
end = process_len
while curr_numerator != numerator:
start += process_len
end = end + process_len if end + process_len < data_len else data_len
curr_numerator += 1
metrics = dict(
title=[],
abstract=[],
venue=[],
authors=[],
year=[],
n_citations=[],
)
previous_work_idx = start
if os.path.exists(metrics_npz_file):
previous_data = np.load(metrics_npz_file)
metrics = dict(
title=list(previous_data['title']),
abstract=list(previous_data['abstract']),
venue=list(previous_data['venue']),
authors=list(previous_data['authors']),
year=list(previous_data['year']),
n_citations=list(previous_data['n_citations']),
)
previous_work_idx = previous_data['idx'][0] + 1
task_name = f'get anchor metrics for {output_exp_dir} {output_data_sample_name} {rg} from index: {previous_work_idx} to {end - 1}'
start_record_paper_count(task_name)
sst = time.time()
th = explainer_configs.get('threshold') if explainer_configs.get(
'threshold') != None else 0.9999
tau = explainer_configs.get(
'tau') if explainer_configs.get('tau') != None else 0.2
logger.info(
f'[{get_time_str()}] start computing anchor from index: {previous_work_idx} to {end - 1} with config: {th} {tau}')
count = 0
for i in range(previous_work_idx, end):
exp = explainer.explain_instance(
paper_data[i], pred_fn, threshold=th, tau=tau)
previous_single_precision = 0
for j in range(len(exp.names())):
name = exp.names()[j]
current_single_precision = exp.precision(
j) - previous_single_precision
previous_single_precision = exp.precision(j)
for feature_name in metrics.keys():
if name.startswith(f'{feature_name}'):
metrics[feature_name].append(current_single_precision)
count += 1
if count % 10 == 0:
ett = round(time.time() - sst, 6)
avg_time = round(ett / count, 4)
logger.info(f'[{get_time_str()}] ({i} / {end - 1}) {metrics_to_str(metrics)} \
within ({ett} total / {avg_time} average)')
logger.info(
f'estimate time left: {datetime.timedelta(seconds=((end-previous_work_idx-count) * avg_time))}')
save_pdp_to_npz('.', metrics_npz_file,
title=metrics['title'],
abstract=metrics['abstract'],
venue=metrics['venue'],
authors=metrics['authors'],
year=metrics['year'],
n_citations=metrics['n_citations'],
idx=[i]
)
end_record_paper_count(task_name)
logger.info(
f'[{get_time_str()}] end anchor metrics witin {round(time.time() - st, 6)}')
def get_anchor_metrics(exp_dir_path, query, task_config, explainer_configs, data_info):
current_sample_name = data_info['current_sample_name']
sample_src_name = data_info['sample_src_name']
sample_src_exp_name = data_info['sample_src_exp_name']
rg = task_config['range']
log_dir = os.path.join(exp_dir_path, 'log')
if not os.path.exists(log_dir):
os.mkdir(log_dir)
log_file_path = os.path.join(
log_dir, f'{current_sample_name}_anchor_{rg}.log')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# remain one log file handler
if logger.hasHandlers():
for h in logger.handlers:
logger.removeHandler(h)
logger.addHandler(logging.FileHandler(
filename=log_file_path, encoding='utf-8'))
rg = task_config['range']
if sys.platform != "darwin":
p = psutil.Process()
worker = task_config['cpu']
logger.info(f"\nChild #{worker}: {p}, affinity {p.cpu_affinity()}")
p.cpu_affinity(worker)
logger.info(
f"Child #{worker}: Set my affinity to {worker}, affinity now {p.cpu_affinity()}")
compute_and_save(
exp_dir_path, current_sample_name, query, rg,
sample_src_exp_name, sample_src_name, logger, explainer_configs)
| [
"os.mkdir",
"numpy.load",
"getting_data.decode_paper",
"getting_data.load_sample",
"os.path.join",
"ranker_helper.end_record_paper_count",
"logging.FileHandler",
"os.path.exists",
"datetime.timedelta",
"datetime.datetime.now",
"s2search_score_pdp.save_pdp_to_npz",
"ranker_helper.start_record_p... | [((402, 435), 'pytz.timezone', 'pytz.timezone', (['"""America/Montreal"""'], {}), "('America/Montreal')\n", (415, 435), False, 'import pytz\n'), ((1480, 1587), 'os.path.join', 'os.path.join', (['output_exp_dir', '"""scores"""', 'f"""{output_data_sample_name}_anchor_metrics_{rg[0]}_{rg[1]}.npz"""'], {}), "(output_exp_dir, 'scores',\n f'{output_data_sample_name}_anchor_metrics_{rg[0]}_{rg[1]}.npz')\n", (1492, 1587), False, 'import os\n'), ((1603, 1614), 'time.time', 'time.time', ([], {}), '()\n', (1612, 1614), False, 'import time\n'), ((1694, 1751), 'getting_data.load_sample', 'load_sample', (['data_exp_name', 'data_sample_name'], {'not_df': '(True)'}), '(data_exp_name, data_sample_name, not_df=True)\n', (1705, 1751), False, 'from getting_data import load_sample, feature_key_list, get_categorical_encoded_data, decode_paper\n'), ((1857, 1892), 'ranker_helper.start_record_paper_count', 'start_record_paper_count', (['task_name'], {}), '(task_name)\n', (1881, 1892), False, 'from ranker_helper import get_scores, start_record_paper_count, end_record_paper_count, processing_log\n'), ((1911, 1983), 'os.path.join', 'os.path.join', (['output_exp_dir', '"""scores"""', 'f"""{data_sample_name}_y_pred.npz"""'], {}), "(output_exp_dir, 'scores', f'{data_sample_name}_y_pred.npz')\n", (1923, 1983), False, 'import os\n'), ((2191, 2224), 'ranker_helper.end_record_paper_count', 'end_record_paper_count', (['task_name'], {}), '(task_name)\n', (2213, 2224), False, 'from ranker_helper import get_scores, start_record_paper_count, end_record_paper_count, processing_log\n'), ((2504, 2539), 'ranker_helper.start_record_paper_count', 'start_record_paper_count', (['task_name'], {}), '(task_name)\n', (2528, 2539), False, 'from ranker_helper import get_scores, start_record_paper_count, end_record_paper_count, processing_log\n'), ((2575, 2660), 'getting_data.get_categorical_encoded_data', 'get_categorical_encoded_data', (['data_exp_name', 'data_sample_name', 'query', 'paper_data'], {}), '(data_exp_name, data_sample_name, query, paper_data\n )\n', (2603, 2660), False, 'from getting_data import load_sample, feature_key_list, get_categorical_encoded_data, decode_paper\n'), ((2669, 2702), 'ranker_helper.end_record_paper_count', 'end_record_paper_count', (['task_name'], {}), '(task_name)\n', (2691, 2702), False, 'from ranker_helper import get_scores, start_record_paper_count, end_record_paper_count, processing_log\n'), ((2720, 2821), 'anchor.anchor_tabular.AnchorTabularExplainer', 'anchor_tabular.AnchorTabularExplainer', (['class_name', 'feature_key_list', 'paper_data', 'categorical_name'], {}), '(class_name, feature_key_list,\n paper_data, categorical_name)\n', (2757, 2821), False, 'from anchor import anchor_tabular\n'), ((3631, 3663), 'os.path.exists', 'os.path.exists', (['metrics_npz_file'], {}), '(metrics_npz_file)\n', (3645, 3663), False, 'import os\n'), ((4253, 4288), 'ranker_helper.start_record_paper_count', 'start_record_paper_count', (['task_name'], {}), '(task_name)\n', (4277, 4288), False, 'from ranker_helper import get_scores, start_record_paper_count, end_record_paper_count, processing_log\n'), ((4300, 4311), 'time.time', 'time.time', ([], {}), '()\n', (4309, 4311), False, 'import time\n'), ((6137, 6170), 'ranker_helper.end_record_paper_count', 'end_record_paper_count', (['task_name'], {}), '(task_name)\n', (6159, 6170), False, 'from ranker_helper import get_scores, start_record_paper_count, end_record_paper_count, processing_log\n'), ((6577, 6610), 'os.path.join', 'os.path.join', (['exp_dir_path', '"""log"""'], {}), "(exp_dir_path, 'log')\n", (6589, 6610), False, 'import os\n'), ((6693, 6756), 'os.path.join', 'os.path.join', (['log_dir', 'f"""{current_sample_name}_anchor_{rg}.log"""'], {}), "(log_dir, f'{current_sample_name}_anchor_{rg}.log')\n", (6705, 6756), False, 'import os\n'), ((6780, 6807), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6797, 6807), False, 'import logging\n'), ((2004, 2031), 'os.path.exists', 'os.path.exists', (['y_pred_file'], {}), '(y_pred_file)\n', (2018, 2031), False, 'import os\n'), ((2050, 2079), 'ranker_helper.get_scores', 'get_scores', (['query', 'paper_data'], {}), '(query, paper_data)\n', (2060, 2079), False, 'from ranker_helper import get_scores, start_record_paper_count, end_record_paper_count, processing_log\n'), ((2088, 2129), 's2search_score_pdp.save_pdp_to_npz', 'save_pdp_to_npz', (['"""."""', 'y_pred_file', 'y_pred'], {}), "('.', y_pred_file, y_pred)\n", (2103, 2129), False, 'from s2search_score_pdp import save_pdp_to_npz\n'), ((3078, 3100), 'numpy.array', 'np.array', (['encoded_pred'], {}), '(encoded_pred)\n', (3086, 3100), True, 'import numpy as np\n'), ((3689, 3714), 'numpy.load', 'np.load', (['metrics_npz_file'], {}), '(metrics_npz_file)\n', (3696, 3714), True, 'import numpy as np\n'), ((6622, 6645), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (6636, 6645), False, 'import os\n'), ((6655, 6672), 'os.mkdir', 'os.mkdir', (['log_dir'], {}), '(log_dir)\n', (6663, 6672), False, 'import os\n'), ((6998, 7059), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': 'log_file_path', 'encoding': '"""utf-8"""'}), "(filename=log_file_path, encoding='utf-8')\n", (7017, 7059), False, 'import logging\n'), ((7147, 7163), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (7161, 7163), False, 'import psutil\n'), ((1119, 1151), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'utc_tz'}), '(tz=utc_tz)\n', (1140, 1151), False, 'import datetime\n'), ((2157, 2177), 'numpy.load', 'np.load', (['y_pred_file'], {}), '(y_pred_file)\n', (2164, 2177), True, 'import numpy as np\n'), ((5695, 5915), 's2search_score_pdp.save_pdp_to_npz', 'save_pdp_to_npz', (['"""."""', 'metrics_npz_file'], {'title': "metrics['title']", 'abstract': "metrics['abstract']", 'venue': "metrics['venue']", 'authors': "metrics['authors']", 'year': "metrics['year']", 'n_citations': "metrics['n_citations']", 'idx': '[i]'}), "('.', metrics_npz_file, title=metrics['title'], abstract=\n metrics['abstract'], venue=metrics['venue'], authors=metrics['authors'],\n year=metrics['year'], n_citations=metrics['n_citations'], idx=[i])\n", (5710, 5915), False, 'from s2search_score_pdp import save_pdp_to_npz\n'), ((2913, 2946), 'getting_data.decode_paper', 'decode_paper', (['categorical_name', 'p'], {}), '(categorical_name, p)\n', (2925, 2946), False, 'from getting_data import load_sample, feature_key_list, get_categorical_encoded_data, decode_paper\n'), ((5343, 5354), 'time.time', 'time.time', ([], {}), '()\n', (5352, 5354), False, 'import time\n'), ((5609, 5681), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '((end - previous_work_idx - count) * avg_time)'}), '(seconds=(end - previous_work_idx - count) * avg_time)\n', (5627, 5681), False, 'import datetime\n'), ((6249, 6260), 'time.time', 'time.time', ([], {}), '()\n', (6258, 6260), False, 'import time\n')] |
from dataclasses import dataclass
import numpy as np
from loguru import logger
def centroid_to_bvol(centers, bvol_dim=(10, 10, 10), flipxy=False):
"""Centroid to bounding volume
Parameters
----------
centers : np.ndarray, (nx3)
3d coordinates of the point to use as the centroid of the bounding box
bvol_dim : tuple, optional
Dimensions of the bounding volume centered at the points given by centers, by default (10, 10, 10)
flipxy : bool, optional
Flip x and y coordinates, by default False
Returns
-------
np.ndarray, (nx6)
(z_start, x_start, y_start, z_fin, x_fin, y_fin)
"""
d, w, h = bvol_dim
if flipxy:
bvols = np.array(
[
(cz - d, cx - w, cy - h, cz + d, cx + w, cy + h)
for cz, cx, cy, _ in centers
]
)
else:
bvols = np.array(
[
(cz - d, cx - w, cy - h, cz + d, cx + w, cy + h)
for cz, cy, cx, _ in centers
]
)
return bvols
def centroid_to_detnet_bvol(centers, bvol_dim=(10, 10, 10), flipxy=False):
"""Centroid to bounding volume for patches
Parameters
----------
centers : np.ndarray, (nx3)
3d coordinates of the point to use as the centroid of the bounding box
bvol_dim : tuple, optional
Dimensions of the bounding volume centered at the points given by centers, by default (10, 10, 10)
flipxy : bool, optional
Flip x and y coordinates, by default False
Returns
-------
np.ndarray, (nx6)
(x_start, y_start, x_fin, y_fin, z_start, z_fin)
"""
d, w, h = bvol_dim
if flipxy:
bvols = np.array(
[
(cx - w, cy - h, cx + w, cy + h, cz - d, cz + d)
for cz, cx, cy, _ in centers
]
)
else:
bvols = np.array(
[
(cx - w, cy - h, cx + w, cy + h, cz - d, cz + d)
for cz, cy, cx, _ in centers
]
)
return bvols
def centroid_to_boxes(centers, bvol_dim=(10, 10, 10), flipxy=False):
"""Centroid to bounding volume
Parameters
----------
centers : np.ndarray, (nx3)
3d coordinates of the point to use as the centroid of the bounding box
bvol_dim : tuple, optional
Dimensions of the bounding volume centered at the points given by centers, by default (10, 10, 10)
flipxy : bool, optional
Flip x and y coordinates, by default False
Returns
-------
np.ndarray, (nx6)
(z_start, x_start, y_start, z_fin, x_fin, y_fin)
"""
d, w, h = bvol_dim
if flipxy:
bvols = np.array(
[
(0, cz, cx, cy, cz - d, cx - w, cy - h, cz + d, cx + w, cy + h)
for cz, cx, cy, _ in centers
]
)
else:
bvols = np.array(
[
(0, cz, cx, cy, cz - d, cx - w, cy - h, cz + d, cx + w, cy + h)
for cz, cy, cx, _ in centers
]
)
return bvols
def grid_of_points(padded_vol, padding, grid_dim=(4, 16, 16)):
"""Grid of points
Parameters
----------
padded_vol : np.ndarray
Input image
padding : Tuple
Three-element tuple giving padding to add to both size per dimension
grid_dim : tuple, optional
Grid dimensions, by default (4, 16, 16)
Returns
-------
np.ndarray
Grid of points in x,y,z as a numpy array
"""
z_dim, x_dim, y_dim = grid_dim
spacez = np.linspace(0, padded_vol.shape[0] - (2 * padding[0]), z_dim)
spacex = np.linspace(0, padded_vol.shape[1] - (2 * padding[1]), x_dim)
spacey = np.linspace(0, padded_vol.shape[2] - (2 * padding[2]), y_dim)
zv, xv, yv = np.meshgrid(spacez, spacex, spacey)
zv = zv + padding[0]
xv = xv + padding[1]
yv = yv + padding[2]
gridarr = np.stack((zv, xv, yv)).astype(np.uint32)
gridarr[:, 1, 1, 1]
zv_f = zv.reshape((z_dim * x_dim * y_dim))
xv_f = xv.reshape((z_dim * x_dim * y_dim))
yv_f = yv.reshape((z_dim * x_dim * y_dim))
class_code = [0] * len(zv_f)
trans_pts = np.stack((zv_f, xv_f, yv_f, class_code)).T.astype(np.uint32)
return trans_pts
def generate_random_points_in_volume(vol, num_pts, border=(32, 32, 32)):
"""Generate a set of random points within a given image volume
Parameters
----------
vol : np.ndarray
Input image
num_pts : Int
Number of points to generate
border : tuple, optional
Don't generate points within this border, by default (32, 32, 32)
Returns
-------
np.ndarray
Array of points
"""
pts = np.random.random((num_pts, 4))
pts[:, 0] = pts[:, 0] * (vol.shape[0] - (2 * border[0])) + border[0]
pts[:, 1] = pts[:, 1] * (vol.shape[1] - (2 * border[1])) + border[1]
pts[:, 2] = pts[:, 2] * (vol.shape[2] - (2 * border[2])) + border[2]
pts = np.abs(pts)
return pts
def offset_points(pts, offset, scale=32, random_offset=False):
"""Offset points
Parameters
----------
pts : Input array of z,x,y points
[description]
offset : Tuple
(Z,X,Y) giving offset in each axis
scale : int, optional
Value to scale image dimensions by, by default 32
random_offset : bool, optional
Whether to add a random offset, by default False
Returns
-------
np.ndarray
Array of offset points
"""
trans_pts = pts.copy()
trans_pts[:, 0] = pts[:, 0] + offset[0]
trans_pts[:, 1] = pts[:, 1] + offset[1]
trans_pts[:, 2] = pts[:, 2] + offset[2]
if random_offset:
offset_rand = np.random.random(trans_pts.shape) * scale
offset_rand[:, 3] = np.zeros((len(trans_pts)))
trans_pts = trans_pts + offset_rand
return trans_pts
def sample_volumes(sel_entities, precropped_vol):
sampled_vols = []
for i in range(len(sel_entities)):
ent = sel_entities.iloc[i]
bb = np.array(
[
ent["bb_s_z"],
ent["bb_f_z"],
ent["bb_s_x"],
ent["bb_f_x"],
ent["bb_s_y"],
ent["bb_f_y"],
]
).astype(np.uint32)
sampled_vols.append(sample_bvol(precropped_vol, bb))
return sampled_vols
def viz_bvols(input_array, bvols, flip_coords=False):
bvol_mask = np.zeros_like(input_array)
print(f"Making {len(bvols)} bvols")
for bvol in bvols:
# print(bvol)
bvol = bvol.astype(np.int32)
z_s = np.max((0, bvol[0]))
z_f = np.min((bvol[3], input_array.shape[0]))
x_s = np.max((0, bvol[1]))
x_f = np.min((bvol[4], input_array.shape[1]))
y_s = np.max((0, bvol[2]))
y_f = np.min((bvol[5], input_array.shape[2]))
# print(f"Sampling {z_s}, {z_f}, {x_s}, {x_f}, {y_s}, {y_f}")
if flip_coords:
bvol_mask[z_s:z_f, y_s:y_f, x_s:x_f] = 1.0
else:
bvol_mask[z_s:z_f, x_s:x_f, y_s:y_f] = 1.0
return bvol_mask
def sample_region_at_pt(img_volume, pt, dim):
z, x, y = pt
d, w, h = dim
z_st = np.max((0, z - d))
z_end = np.min((z + d, img_volume.shape[0]))
x_st = np.max((0, x - w))
x_end = np.min((x + w, img_volume.shape[1]))
y_st = np.max((0, y - h))
y_end = np.min((y + h, img_volume.shape[2]))
return img_volume[z_st:z_end, x_st:x_end, y_st:y_end]
def sample_bvol(img_volume, bvol):
z_st, z_end, x_st, x_end, y_st, y_end = bvol
return img_volume[z_st:z_end, x_st:x_end, y_st:y_end]
def get_vol_in_cent_box(img_volume, z_st, z_end, x, y, w, h):
return img_volume[z_st:z_end, x - w : x + w, y - h : y + h]
def sample_roi(img_vol, tabledata, i=0, vol_size=(32, 32, 32)):
# Sampling ROI from an entity table
print(f"Sampling from vol of shape {img_vol.shape}")
pad_slice, pad_y, pad_x = np.array(vol_size) // 2
z, x, y = tabledata["z"][i], tabledata["x"][i], tabledata["y"][i]
logger.info(f"Sampling location {z} {x} {y}")
# make a bv
bb_zb = np.clip(int(z) - pad_slice, 0, img_vol.shape[0])
bb_zt = np.clip(int(z) + pad_slice, 0, img_vol.shape[0])
bb_xl = np.clip(int(x) - pad_slice, 0, img_vol.shape[1])
bb_xr = np.clip(int(x) + pad_slice, 0, img_vol.shape[1])
bb_yl = np.clip(int(y) - pad_slice, 0, img_vol.shape[2])
bb_yr = np.clip(int(y) + pad_slice, 0, img_vol.shape[2])
vol1 = get_vol_in_bbox(img_vol, bb_zb, bb_zt, bb_xl, bb_xr, bb_yl, bb_yr)
print(f"Sampled vol of shape {vol1.shape}")
if vol1.shape[0] == 0 or vol1.shape[1] == 0 or vol1.shape[2] == 0:
vol1 = np.zeros(vol_size)
return vol1
def get_vol_in_bbox(image_volume, slice_start, slice_end, xst, xend, yst, yend):
return image_volume[slice_start:slice_end, xst:xend, yst:yend]
def get_centered_vol_in_bbox(image_volume, slice_start, slice_end, x, y, w, h):
return image_volume[slice_start:slice_end, x - w : x + w, y - h : y + h]
def crop_vol_in_bbox(image_volume, slice_start, slice_end, x, y, w, h):
return image_volume[slice_start:slice_end, x : x + w, y : y + h]
def get_centered_img_in_bbox(image_volume, sliceno, x, y, w, h):
w = w // 2
h = h // 2
return image_volume[int(sliceno), x - w : x + w, y - h : y + h]
def get_img_in_bbox(image_volume, sliceno, x, y, w, h):
return image_volume[int(sliceno), x - w : x + w, y - h : y + h]
@dataclass
class MarkedPatches:
"""Set of N patches, with associated per-patch 3d points
There is also a per-patch location which is the location the patch was sampled from in the original volume.
"""
vols: np.ndarray # (N, Z, X, Y) image data within patch
vols_pts: np.ndarray # (N, Z, X, Y) cropped point geometry within patch
vols_locs: np.ndarray # (N, Z, X, Y, C) centroid location of patch and class code
vols_bbs: np.ndarray # (N, Z_start, Z_fin, X_start, X_fin, Y_start, Y_fin)bounding box for patch
# todo: list of patch sizes
# todo: pad
def sample_marked_patches(
img_volume, locs, pts, patch_size=(32, 32, 32), debug_verbose=False
):
"""Samples a large image volume into a MarkedPatches object.
Uses bounding volumes, and crops the image volume and associated geometry
into a list of cropped volumes and cropped geometry.
Parameters
----------
img_volume : {np.ndarray}
image volume
locs : {np.array of N x 4}
N point locations, with a label in the final column
pts : {np.array of P x k}
point cloud of size P (the first 3 columns are used as the z,x,y coords)
patch_size : {tuple, int x 3)
-- Size of patch to sample (default: {(32,32,32)}), optional
debug_verbose : bool, optional
[description], by default False
Returns
-------
MarkedPatches
volumes with associated geometry
"""
vols = []
img_titles = []
vols_pts = []
vols_locs = []
vols_bbs = []
print(
f"Generating {len(locs)} patch volumes from image of shape {img_volume.shape}"
)
for j in range(len(locs)):
if locs[j].shape[0] == 4:
sliceno, x, y, c = locs[j]
else:
sliceno, x, y = locs[j]
d, w, h = patch_size
w = w // 2
h = h // 2
sliceno = int(sliceno)
x = int(np.ceil(x))
y = int(np.ceil(y))
slice_start = np.max([0, sliceno - int(patch_size[0] / 2.0)])
slice_end = np.min([sliceno + int(patch_size[0] / 2.0), img_volume.shape[0]])
out_of_bounds = np.unique(
np.hstack(
(
np.where(pts[:, 1] <= x - w)[0],
np.where(pts[:, 1] >= x + w)[0],
np.where(pts[:, 2] <= y - h)[0],
np.where(pts[:, 2] >= y + h)[0],
np.where(pts[:, 0] <= slice_start)[0],
np.where(pts[:, 0] >= slice_end)[0],
)
)
)
pts_c = pts.copy()
sel_pts = np.delete(pts_c, out_of_bounds, axis=0)
if debug_verbose:
print("Shape of original pt data {}".format(pts.shape))
print("Number of out of bounds pts: {}".format(out_of_bounds.shape))
img = get_centered_vol_in_bbox(img_volume, slice_start, slice_end, y, x, h, w)
sel_pts[:, 0] = sel_pts[:, 0] - slice_start
sel_pts[:, 1] = sel_pts[:, 1] - (x - w)
sel_pts[:, 2] = sel_pts[:, 2] - (y - h)
if img.shape == patch_size:
vols.append(img)
else:
incomplete_img = np.zeros(patch_size)
incomplete_img[0 : img.shape[0], 0 : img.shape[1], 0 : img.shape[2]] = img
vols.append(incomplete_img)
vols_pts.append(sel_pts)
vols_bbs.append([slice_start, slice_end, x - w, x + w, y - h, y + h])
vols_locs.append(locs[j])
vols = np.array(vols)
vols_pts = np.array(vols_pts)
vols_bbs = np.array(vols_bbs)
vols_locs = np.array(vols_locs)
marked_patches = MarkedPatches(vols, vols_pts, vols_locs, vols_bbs)
print(f"Generated {len(locs)} MarkedPatches of shape {vols.shape}")
return marked_patches
def crop_vol_and_pts(
img_data,
pts,
location=(60, 700, 700),
patch_size=(40, 300, 300),
debug_verbose=False,
offset=False,
):
patch_size = np.array(patch_size).astype(np.uint32)
location = np.array(location).astype(np.uint32)
# z, x_bl, x_ur, y_bl, y_ur = location[0], location[1], location[1]+patch_size[1], location[2], location[2]+patch_size[2]
slice_start = np.max([0, location[0]])
slice_end = np.min([location[0] + patch_size[0], img_data.shape[0]])
out_of_bounds_w = np.hstack(
(
np.where(pts[:, 2] >= location[2] + patch_size[2])[0],
np.where(pts[:, 2] <= location[2])[0],
np.where(pts[:, 1] >= location[1] + patch_size[1])[0],
np.where(pts[:, 1] <= location[1])[0],
np.where(pts[:, 0] <= location[0])[0],
np.where(pts[:, 0] >= location[0] + patch_size[0])[0],
)
)
cropped_pts = np.array(np.delete(pts, out_of_bounds_w, axis=0))
if offset:
cropped_pts[:, 0] = cropped_pts[:, 0] - location[0]
cropped_pts[:, 1] = cropped_pts[:, 1] - location[1]
cropped_pts[:, 2] = cropped_pts[:, 2] - location[2]
if debug_verbose:
print(
"\n z x y w h: {}".format(
(location[0], location[1], location[2], patch_size[1], patch_size[2])
)
)
print("Slice start, slice end {} {}".format(slice_start, slice_end))
print("Cropped points array shape: {}".format(cropped_pts.shape))
img = crop_vol_in_bbox(
img_data,
slice_start,
slice_end,
location[2],
location[1],
patch_size[2],
patch_size[1],
)
return img, cropped_pts
def crop_pts_bb(
pts, bounding_box, location=(0, 0, 0), debug_verbose=False, offset=False
):
z_st, z_end, x_st, x_end, y_st, y_end = bounding_box
print(z_st, z_end, x_st, x_end, y_st, y_end)
out_of_bounds_w = np.hstack(
(
np.where(pts[:, 0] <= z_st)[0],
np.where(pts[:, 0] >= z_end)[0],
np.where(pts[:, 1] <= x_st)[0],
np.where(pts[:, 1] >= x_end)[0],
np.where(pts[:, 2] <= y_st)[0],
np.where(pts[:, 2] >= y_end)[0],
)
)
cropped_pts = np.array(np.delete(pts, out_of_bounds_w, axis=0))
if offset:
location = (z_st, x_st, y_st)
cropped_pts[:, 0] = cropped_pts[:, 0] - location[0]
cropped_pts[:, 1] = cropped_pts[:, 1] - location[1]
cropped_pts[:, 2] = cropped_pts[:, 2] - location[2]
print(f"Offset location {location}")
if debug_verbose:
print("Cropped points array shape: {}".format(cropped_pts.shape))
return cropped_pts
def crop_vol_and_pts_bb(
img_volume, pts, bounding_box, debug_verbose=False, offset=False
):
# TODO: clip bbox to img_volume
z_st, z_end, y_st, y_end, x_st, x_end = bounding_box
location = (z_st, x_st, y_st)
out_of_bounds_w = np.hstack(
(
np.where(pts[:, 0] <= z_st)[0],
np.where(pts[:, 0] >= z_end)[0],
np.where(pts[:, 1] <= x_st)[0],
np.where(pts[:, 1] >= x_end)[0],
np.where(pts[:, 2] <= y_st)[0],
np.where(pts[:, 2] >= y_end)[0],
)
)
cropped_pts = np.array(np.delete(pts, out_of_bounds_w, axis=0))
if offset:
cropped_pts[:, 0] = cropped_pts[:, 0] - location[0]
cropped_pts[:, 1] = cropped_pts[:, 1] - location[1]
cropped_pts[:, 2] = cropped_pts[:, 2] - location[2]
img = sample_bvol(img_volume, bounding_box)
return img, cropped_pts
# old
def crop_vol_and_pts_centered(
img_volume,
pts,
location=(60, 700, 700),
patch_size=(40, 300, 300),
debug_verbose=False,
offset=False,
):
patch_size = np.array(patch_size).astype(np.uint32)
location = np.array(location).astype(np.uint32)
# z, x_bl, x_ur, y_bl, y_ur = location[0], location[1], location[1]+patch_size[1], location[2], location[2]+patch_size[2]
slice_start = np.max([0, location[0]])
slice_end = np.min([location[0] + patch_size[0], img_volume.shape[0]])
out_of_bounds_w = np.hstack(
(
np.where(pts[:, 2] >= location[2] + patch_size[2])[0],
np.where(pts[:, 2] <= location[2])[0],
np.where(pts[:, 1] >= location[1] + patch_size[1])[0],
np.where(pts[:, 1] <= location[1])[0],
np.where(pts[:, 0] <= location[0])[0],
np.where(pts[:, 0] >= location[0] + patch_size[0])[0],
)
)
cropped_pts = np.array(np.delete(pts, out_of_bounds_w, axis=0))
if offset:
cropped_pts[:, 0] = cropped_pts[:, 0] - location[0]
cropped_pts[:, 1] = cropped_pts[:, 1] - location[1]
cropped_pts[:, 2] = cropped_pts[:, 2] - location[2]
if debug_verbose:
print(
"\n z x y w h: {}".format(
(location[0], location[1], location[2], patch_size[1], patch_size[2])
)
)
print("Slice start, slice end {} {}".format(slice_start, slice_end))
print("Cropped points array shape: {}".format(cropped_pts.shape))
img = crop_vol_in_bbox(
img_volume,
slice_start,
slice_end,
location[2],
location[1],
patch_size[2],
patch_size[1],
)
return img, cropped_pts
def sample_patch_slices(img_vol, entities_df):
entities_locs = np.array(entities_df[["slice", "x", "y"]])
mp = sample_marked_patches(
img_vol, entities_locs, entities_locs, patch_size=(64, 64, 64)
)
vol_list = mp.vols
vol_locs = mp.vols_locs
vol_pts = mp.vols_pts
slice_list = np.array([v[vol_list[0].shape[0] // 2, :, :] for v in vol_list])
print(f"Generated slice {slice_list.shape}")
return slice_list, vol_pts
def gather_single_class(img_vol, entities_locs, class_code, patch_size=(64, 64, 64)):
entities_locs_singleclass = entities_locs.loc[
entities_locs["class_code"].isin([class_code])
]
entities_locs_singleclass = np.array(entities_locs_singleclass[["slice", "x", "y"]])
mp = sample_marked_patches(
img_vol,
entities_locs_singleclass,
entities_locs_singleclass,
patch_size=patch_size,
)
vol_list = mp.vols
vol_locs = mp.vols_locs
vol_pts = mp.vols_pts
return vol_list, vol_locs, vol_pts
def sample_patch2d(img_volume, pts, patch_size=(40, 40)):
img_shortlist = []
img_titles = []
print(f"Sampling {len(pts)} pts from image volume of shape {img_volume.shape}")
for j in range(len(pts)):
sliceno, y, x = pts[j]
w, h = patch_size
img = get_centered_img_in_bbox(
img_volume, sliceno, int(np.ceil(x)), int(np.ceil(y)), w, h
)
img_shortlist.append(img)
img_titles.append(str(int(x)) + "_" + str(int(y)) + "_" + str(sliceno))
return img_shortlist, img_titles
def entitybvol_to_cropbvol(bvol):
"""
from z1 z2 x1 x2 y1 y2
to z1 x1 y1 z2 x2 y2
"""
b = np.zeros_like(bvol)
b[0] = bvol[0]
b[1] = bvol[3]
b[2] = bvol[1]
b[3] = bvol[4]
b[4] = bvol[2]
b[5] = bvol[5]
return b
def detnetbvol_to_cropbvol(bvol):
"""
from x1 y1 x2 y2 z1 z2
to z1 x1 y1 z2 x2 y2
"""
b = np.zeros_like(bvol)
b[0] = bvol[4]
b[1] = bvol[0]
b[2] = bvol[1]
b[3] = bvol[5]
b[4] = bvol[2]
b[5] = bvol[3]
return b
def cropbvol_to_detnet_bvol(bvol):
"""
from z1 x1 y1 z2 x2 y2
to x1 y1 x2 y2 z1 z2
"""
b = np.zeros_like(bvol)
b[0] = bvol[1]
b[1] = bvol[2]
b[2] = bvol[4]
b[3] = bvol[5]
b[4] = bvol[0]
b[5] = bvol[3]
return b
| [
"numpy.stack",
"numpy.meshgrid",
"numpy.abs",
"numpy.zeros_like",
"numpy.ceil",
"numpy.zeros",
"loguru.logger.info",
"numpy.max",
"numpy.random.random",
"numpy.min",
"numpy.array",
"numpy.linspace",
"numpy.where",
"numpy.delete"
] | [((3719, 3778), 'numpy.linspace', 'np.linspace', (['(0)', '(padded_vol.shape[0] - 2 * padding[0])', 'z_dim'], {}), '(0, padded_vol.shape[0] - 2 * padding[0], z_dim)\n', (3730, 3778), True, 'import numpy as np\n'), ((3795, 3854), 'numpy.linspace', 'np.linspace', (['(0)', '(padded_vol.shape[1] - 2 * padding[1])', 'x_dim'], {}), '(0, padded_vol.shape[1] - 2 * padding[1], x_dim)\n', (3806, 3854), True, 'import numpy as np\n'), ((3871, 3930), 'numpy.linspace', 'np.linspace', (['(0)', '(padded_vol.shape[2] - 2 * padding[2])', 'y_dim'], {}), '(0, padded_vol.shape[2] - 2 * padding[2], y_dim)\n', (3882, 3930), True, 'import numpy as np\n'), ((3953, 3988), 'numpy.meshgrid', 'np.meshgrid', (['spacez', 'spacex', 'spacey'], {}), '(spacez, spacex, spacey)\n', (3964, 3988), True, 'import numpy as np\n'), ((4917, 4947), 'numpy.random.random', 'np.random.random', (['(num_pts, 4)'], {}), '((num_pts, 4))\n', (4933, 4947), True, 'import numpy as np\n'), ((5181, 5192), 'numpy.abs', 'np.abs', (['pts'], {}), '(pts)\n', (5187, 5192), True, 'import numpy as np\n'), ((6704, 6730), 'numpy.zeros_like', 'np.zeros_like', (['input_array'], {}), '(input_array)\n', (6717, 6730), True, 'import numpy as np\n'), ((7483, 7501), 'numpy.max', 'np.max', (['(0, z - d)'], {}), '((0, z - d))\n', (7489, 7501), True, 'import numpy as np\n'), ((7515, 7551), 'numpy.min', 'np.min', (['(z + d, img_volume.shape[0])'], {}), '((z + d, img_volume.shape[0]))\n', (7521, 7551), True, 'import numpy as np\n'), ((7564, 7582), 'numpy.max', 'np.max', (['(0, x - w)'], {}), '((0, x - w))\n', (7570, 7582), True, 'import numpy as np\n'), ((7596, 7632), 'numpy.min', 'np.min', (['(x + w, img_volume.shape[1])'], {}), '((x + w, img_volume.shape[1]))\n', (7602, 7632), True, 'import numpy as np\n'), ((7645, 7663), 'numpy.max', 'np.max', (['(0, y - h)'], {}), '((0, y - h))\n', (7651, 7663), True, 'import numpy as np\n'), ((7677, 7713), 'numpy.min', 'np.min', (['(y + h, img_volume.shape[2])'], {}), '((y + h, img_volume.shape[2]))\n', (7683, 7713), True, 'import numpy as np\n'), ((8357, 8402), 'loguru.logger.info', 'logger.info', (['f"""Sampling location {z} {x} {y}"""'], {}), "(f'Sampling location {z} {x} {y}')\n", (8368, 8402), False, 'from loguru import logger\n'), ((13388, 13402), 'numpy.array', 'np.array', (['vols'], {}), '(vols)\n', (13396, 13402), True, 'import numpy as np\n'), ((13419, 13437), 'numpy.array', 'np.array', (['vols_pts'], {}), '(vols_pts)\n', (13427, 13437), True, 'import numpy as np\n'), ((13454, 13472), 'numpy.array', 'np.array', (['vols_bbs'], {}), '(vols_bbs)\n', (13462, 13472), True, 'import numpy as np\n'), ((13490, 13509), 'numpy.array', 'np.array', (['vols_locs'], {}), '(vols_locs)\n', (13498, 13509), True, 'import numpy as np\n'), ((14110, 14134), 'numpy.max', 'np.max', (['[0, location[0]]'], {}), '([0, location[0]])\n', (14116, 14134), True, 'import numpy as np\n'), ((14152, 14208), 'numpy.min', 'np.min', (['[location[0] + patch_size[0], img_data.shape[0]]'], {}), '([location[0] + patch_size[0], img_data.shape[0]])\n', (14158, 14208), True, 'import numpy as np\n'), ((17875, 17899), 'numpy.max', 'np.max', (['[0, location[0]]'], {}), '([0, location[0]])\n', (17881, 17899), True, 'import numpy as np\n'), ((17917, 17975), 'numpy.min', 'np.min', (['[location[0] + patch_size[0], img_volume.shape[0]]'], {}), '([location[0] + patch_size[0], img_volume.shape[0]])\n', (17923, 17975), True, 'import numpy as np\n'), ((19319, 19361), 'numpy.array', 'np.array', (["entities_df[['slice', 'x', 'y']]"], {}), "(entities_df[['slice', 'x', 'y']])\n", (19327, 19361), True, 'import numpy as np\n'), ((19574, 19638), 'numpy.array', 'np.array', (['[v[vol_list[0].shape[0] // 2, :, :] for v in vol_list]'], {}), '([v[vol_list[0].shape[0] // 2, :, :] for v in vol_list])\n', (19582, 19638), True, 'import numpy as np\n'), ((19964, 20020), 'numpy.array', 'np.array', (["entities_locs_singleclass[['slice', 'x', 'y']]"], {}), "(entities_locs_singleclass[['slice', 'x', 'y']])\n", (19972, 20020), True, 'import numpy as np\n'), ((20997, 21016), 'numpy.zeros_like', 'np.zeros_like', (['bvol'], {}), '(bvol)\n', (21010, 21016), True, 'import numpy as np\n'), ((21275, 21294), 'numpy.zeros_like', 'np.zeros_like', (['bvol'], {}), '(bvol)\n', (21288, 21294), True, 'import numpy as np\n'), ((21550, 21569), 'numpy.zeros_like', 'np.zeros_like', (['bvol'], {}), '(bvol)\n', (21563, 21569), True, 'import numpy as np\n'), ((737, 830), 'numpy.array', 'np.array', (['[(cz - d, cx - w, cy - h, cz + d, cx + w, cy + h) for cz, cx, cy, _ in centers]'], {}), '([(cz - d, cx - w, cy - h, cz + d, cx + w, cy + h) for cz, cx, cy,\n _ in centers])\n', (745, 830), True, 'import numpy as np\n'), ((928, 1021), 'numpy.array', 'np.array', (['[(cz - d, cx - w, cy - h, cz + d, cx + w, cy + h) for cz, cy, cx, _ in centers]'], {}), '([(cz - d, cx - w, cy - h, cz + d, cx + w, cy + h) for cz, cy, cx,\n _ in centers])\n', (936, 1021), True, 'import numpy as np\n'), ((1782, 1875), 'numpy.array', 'np.array', (['[(cx - w, cy - h, cx + w, cy + h, cz - d, cz + d) for cz, cx, cy, _ in centers]'], {}), '([(cx - w, cy - h, cx + w, cy + h, cz - d, cz + d) for cz, cx, cy,\n _ in centers])\n', (1790, 1875), True, 'import numpy as np\n'), ((1973, 2066), 'numpy.array', 'np.array', (['[(cx - w, cy - h, cx + w, cy + h, cz - d, cz + d) for cz, cy, cx, _ in centers]'], {}), '([(cx - w, cy - h, cx + w, cy + h, cz - d, cz + d) for cz, cy, cx,\n _ in centers])\n', (1981, 2066), True, 'import numpy as np\n'), ((2807, 2915), 'numpy.array', 'np.array', (['[(0, cz, cx, cy, cz - d, cx - w, cy - h, cz + d, cx + w, cy + h) for cz, cx,\n cy, _ in centers]'], {}), '([(0, cz, cx, cy, cz - d, cx - w, cy - h, cz + d, cx + w, cy + h) for\n cz, cx, cy, _ in centers])\n', (2815, 2915), True, 'import numpy as np\n'), ((3013, 3121), 'numpy.array', 'np.array', (['[(0, cz, cx, cy, cz - d, cx - w, cy - h, cz + d, cx + w, cy + h) for cz, cy,\n cx, _ in centers]'], {}), '([(0, cz, cx, cy, cz - d, cx - w, cy - h, cz + d, cx + w, cy + h) for\n cz, cy, cx, _ in centers])\n', (3021, 3121), True, 'import numpy as np\n'), ((6872, 6892), 'numpy.max', 'np.max', (['(0, bvol[0])'], {}), '((0, bvol[0]))\n', (6878, 6892), True, 'import numpy as np\n'), ((6908, 6947), 'numpy.min', 'np.min', (['(bvol[3], input_array.shape[0])'], {}), '((bvol[3], input_array.shape[0]))\n', (6914, 6947), True, 'import numpy as np\n'), ((6963, 6983), 'numpy.max', 'np.max', (['(0, bvol[1])'], {}), '((0, bvol[1]))\n', (6969, 6983), True, 'import numpy as np\n'), ((6999, 7038), 'numpy.min', 'np.min', (['(bvol[4], input_array.shape[1])'], {}), '((bvol[4], input_array.shape[1]))\n', (7005, 7038), True, 'import numpy as np\n'), ((7054, 7074), 'numpy.max', 'np.max', (['(0, bvol[2])'], {}), '((0, bvol[2]))\n', (7060, 7074), True, 'import numpy as np\n'), ((7090, 7129), 'numpy.min', 'np.min', (['(bvol[5], input_array.shape[2])'], {}), '((bvol[5], input_array.shape[2]))\n', (7096, 7129), True, 'import numpy as np\n'), ((8255, 8273), 'numpy.array', 'np.array', (['vol_size'], {}), '(vol_size)\n', (8263, 8273), True, 'import numpy as np\n'), ((9012, 9030), 'numpy.zeros', 'np.zeros', (['vol_size'], {}), '(vol_size)\n', (9020, 9030), True, 'import numpy as np\n'), ((12497, 12536), 'numpy.delete', 'np.delete', (['pts_c', 'out_of_bounds'], {'axis': '(0)'}), '(pts_c, out_of_bounds, axis=0)\n', (12506, 12536), True, 'import numpy as np\n'), ((14664, 14703), 'numpy.delete', 'np.delete', (['pts', 'out_of_bounds_w'], {'axis': '(0)'}), '(pts, out_of_bounds_w, axis=0)\n', (14673, 14703), True, 'import numpy as np\n'), ((16057, 16096), 'numpy.delete', 'np.delete', (['pts', 'out_of_bounds_w'], {'axis': '(0)'}), '(pts, out_of_bounds_w, axis=0)\n', (16066, 16096), True, 'import numpy as np\n'), ((17114, 17153), 'numpy.delete', 'np.delete', (['pts', 'out_of_bounds_w'], {'axis': '(0)'}), '(pts, out_of_bounds_w, axis=0)\n', (17123, 17153), True, 'import numpy as np\n'), ((18431, 18470), 'numpy.delete', 'np.delete', (['pts', 'out_of_bounds_w'], {'axis': '(0)'}), '(pts, out_of_bounds_w, axis=0)\n', (18440, 18470), True, 'import numpy as np\n'), ((4089, 4111), 'numpy.stack', 'np.stack', (['(zv, xv, yv)'], {}), '((zv, xv, yv))\n', (4097, 4111), True, 'import numpy as np\n'), ((5939, 5972), 'numpy.random.random', 'np.random.random', (['trans_pts.shape'], {}), '(trans_pts.shape)\n', (5955, 5972), True, 'import numpy as np\n'), ((11787, 11797), 'numpy.ceil', 'np.ceil', (['x'], {}), '(x)\n', (11794, 11797), True, 'import numpy as np\n'), ((11816, 11826), 'numpy.ceil', 'np.ceil', (['y'], {}), '(y)\n', (11823, 11826), True, 'import numpy as np\n'), ((13076, 13096), 'numpy.zeros', 'np.zeros', (['patch_size'], {}), '(patch_size)\n', (13084, 13096), True, 'import numpy as np\n'), ((13868, 13888), 'numpy.array', 'np.array', (['patch_size'], {}), '(patch_size)\n', (13876, 13888), True, 'import numpy as np\n'), ((13923, 13941), 'numpy.array', 'np.array', (['location'], {}), '(location)\n', (13931, 13941), True, 'import numpy as np\n'), ((17637, 17657), 'numpy.array', 'np.array', (['patch_size'], {}), '(patch_size)\n', (17645, 17657), True, 'import numpy as np\n'), ((17692, 17710), 'numpy.array', 'np.array', (['location'], {}), '(location)\n', (17700, 17710), True, 'import numpy as np\n'), ((4356, 4396), 'numpy.stack', 'np.stack', (['(zv_f, xv_f, yv_f, class_code)'], {}), '((zv_f, xv_f, yv_f, class_code))\n', (4364, 4396), True, 'import numpy as np\n'), ((6280, 6385), 'numpy.array', 'np.array', (["[ent['bb_s_z'], ent['bb_f_z'], ent['bb_s_x'], ent['bb_f_x'], ent['bb_s_y'],\n ent['bb_f_y']]"], {}), "([ent['bb_s_z'], ent['bb_f_z'], ent['bb_s_x'], ent['bb_f_x'], ent[\n 'bb_s_y'], ent['bb_f_y']])\n", (6288, 6385), True, 'import numpy as np\n'), ((14269, 14319), 'numpy.where', 'np.where', (['(pts[:, 2] >= location[2] + patch_size[2])'], {}), '(pts[:, 2] >= location[2] + patch_size[2])\n', (14277, 14319), True, 'import numpy as np\n'), ((14337, 14371), 'numpy.where', 'np.where', (['(pts[:, 2] <= location[2])'], {}), '(pts[:, 2] <= location[2])\n', (14345, 14371), True, 'import numpy as np\n'), ((14389, 14439), 'numpy.where', 'np.where', (['(pts[:, 1] >= location[1] + patch_size[1])'], {}), '(pts[:, 1] >= location[1] + patch_size[1])\n', (14397, 14439), True, 'import numpy as np\n'), ((14457, 14491), 'numpy.where', 'np.where', (['(pts[:, 1] <= location[1])'], {}), '(pts[:, 1] <= location[1])\n', (14465, 14491), True, 'import numpy as np\n'), ((14509, 14543), 'numpy.where', 'np.where', (['(pts[:, 0] <= location[0])'], {}), '(pts[:, 0] <= location[0])\n', (14517, 14543), True, 'import numpy as np\n'), ((14561, 14611), 'numpy.where', 'np.where', (['(pts[:, 0] >= location[0] + patch_size[0])'], {}), '(pts[:, 0] >= location[0] + patch_size[0])\n', (14569, 14611), True, 'import numpy as np\n'), ((15749, 15776), 'numpy.where', 'np.where', (['(pts[:, 0] <= z_st)'], {}), '(pts[:, 0] <= z_st)\n', (15757, 15776), True, 'import numpy as np\n'), ((15794, 15822), 'numpy.where', 'np.where', (['(pts[:, 0] >= z_end)'], {}), '(pts[:, 0] >= z_end)\n', (15802, 15822), True, 'import numpy as np\n'), ((15840, 15867), 'numpy.where', 'np.where', (['(pts[:, 1] <= x_st)'], {}), '(pts[:, 1] <= x_st)\n', (15848, 15867), True, 'import numpy as np\n'), ((15885, 15913), 'numpy.where', 'np.where', (['(pts[:, 1] >= x_end)'], {}), '(pts[:, 1] >= x_end)\n', (15893, 15913), True, 'import numpy as np\n'), ((15931, 15958), 'numpy.where', 'np.where', (['(pts[:, 2] <= y_st)'], {}), '(pts[:, 2] <= y_st)\n', (15939, 15958), True, 'import numpy as np\n'), ((15976, 16004), 'numpy.where', 'np.where', (['(pts[:, 2] >= y_end)'], {}), '(pts[:, 2] >= y_end)\n', (15984, 16004), True, 'import numpy as np\n'), ((16806, 16833), 'numpy.where', 'np.where', (['(pts[:, 0] <= z_st)'], {}), '(pts[:, 0] <= z_st)\n', (16814, 16833), True, 'import numpy as np\n'), ((16851, 16879), 'numpy.where', 'np.where', (['(pts[:, 0] >= z_end)'], {}), '(pts[:, 0] >= z_end)\n', (16859, 16879), True, 'import numpy as np\n'), ((16897, 16924), 'numpy.where', 'np.where', (['(pts[:, 1] <= x_st)'], {}), '(pts[:, 1] <= x_st)\n', (16905, 16924), True, 'import numpy as np\n'), ((16942, 16970), 'numpy.where', 'np.where', (['(pts[:, 1] >= x_end)'], {}), '(pts[:, 1] >= x_end)\n', (16950, 16970), True, 'import numpy as np\n'), ((16988, 17015), 'numpy.where', 'np.where', (['(pts[:, 2] <= y_st)'], {}), '(pts[:, 2] <= y_st)\n', (16996, 17015), True, 'import numpy as np\n'), ((17033, 17061), 'numpy.where', 'np.where', (['(pts[:, 2] >= y_end)'], {}), '(pts[:, 2] >= y_end)\n', (17041, 17061), True, 'import numpy as np\n'), ((18036, 18086), 'numpy.where', 'np.where', (['(pts[:, 2] >= location[2] + patch_size[2])'], {}), '(pts[:, 2] >= location[2] + patch_size[2])\n', (18044, 18086), True, 'import numpy as np\n'), ((18104, 18138), 'numpy.where', 'np.where', (['(pts[:, 2] <= location[2])'], {}), '(pts[:, 2] <= location[2])\n', (18112, 18138), True, 'import numpy as np\n'), ((18156, 18206), 'numpy.where', 'np.where', (['(pts[:, 1] >= location[1] + patch_size[1])'], {}), '(pts[:, 1] >= location[1] + patch_size[1])\n', (18164, 18206), True, 'import numpy as np\n'), ((18224, 18258), 'numpy.where', 'np.where', (['(pts[:, 1] <= location[1])'], {}), '(pts[:, 1] <= location[1])\n', (18232, 18258), True, 'import numpy as np\n'), ((18276, 18310), 'numpy.where', 'np.where', (['(pts[:, 0] <= location[0])'], {}), '(pts[:, 0] <= location[0])\n', (18284, 18310), True, 'import numpy as np\n'), ((18328, 18378), 'numpy.where', 'np.where', (['(pts[:, 0] >= location[0] + patch_size[0])'], {}), '(pts[:, 0] >= location[0] + patch_size[0])\n', (18336, 18378), True, 'import numpy as np\n'), ((20673, 20683), 'numpy.ceil', 'np.ceil', (['x'], {}), '(x)\n', (20680, 20683), True, 'import numpy as np\n'), ((20690, 20700), 'numpy.ceil', 'np.ceil', (['y'], {}), '(y)\n', (20697, 20700), True, 'import numpy as np\n'), ((12090, 12118), 'numpy.where', 'np.where', (['(pts[:, 1] <= x - w)'], {}), '(pts[:, 1] <= x - w)\n', (12098, 12118), True, 'import numpy as np\n'), ((12144, 12172), 'numpy.where', 'np.where', (['(pts[:, 1] >= x + w)'], {}), '(pts[:, 1] >= x + w)\n', (12152, 12172), True, 'import numpy as np\n'), ((12198, 12226), 'numpy.where', 'np.where', (['(pts[:, 2] <= y - h)'], {}), '(pts[:, 2] <= y - h)\n', (12206, 12226), True, 'import numpy as np\n'), ((12252, 12280), 'numpy.where', 'np.where', (['(pts[:, 2] >= y + h)'], {}), '(pts[:, 2] >= y + h)\n', (12260, 12280), True, 'import numpy as np\n'), ((12306, 12340), 'numpy.where', 'np.where', (['(pts[:, 0] <= slice_start)'], {}), '(pts[:, 0] <= slice_start)\n', (12314, 12340), True, 'import numpy as np\n'), ((12366, 12398), 'numpy.where', 'np.where', (['(pts[:, 0] >= slice_end)'], {}), '(pts[:, 0] >= slice_end)\n', (12374, 12398), True, 'import numpy as np\n')] |
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from datetime import datetime
from dlimage.mnist import MNISTLoader
def vectorize(j):
e = np.zeros(10)
e[j] = 1.0
return e
mndata = MNISTLoader('dlimage/mnist/data')
images, lables = mndata.load_training()
x_train = np.ndarray((len(images), len(images[0])))
y_train = np.ndarray((len(lables), 10))
for i in range(len(images)):
x_train[i] = images[i]
for i in range(len(lables)):
y_train[i] = vectorize(lables[i])
print("Loading training data finished.")
mndata = MNISTLoader('dlimage/mnist/data')
images, lables = mndata.load_testing()
x_test = np.ndarray((len(images), len(images[0])))
y_test = np.ndarray((len(lables), 10))
for i in range(len(images)):
x_test[i] = images[i]
for i in range(len(lables)):
y_test[i] = vectorize(lables[i])
print("Loading testing data finished.")
model = Sequential()
model.add(Dense(80, activation='sigmoid', input_dim=784))
model.add(Dense(10, activation='sigmoid'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mse', optimizer=sgd, metrics=['accuracy'])
print("Start training: " + str(datetime.now()))
model.fit(x_train, y_train, epochs=20, batch_size=20, verbose=1)
print("End training: " + str(datetime.now()))
print("Start evaluating: " + str(datetime.now()))
score = model.evaluate(x_test, y_test, batch_size=20)
print(score)
print("End evaluating: " + str(datetime.now()))
| [
"keras.optimizers.SGD",
"numpy.zeros",
"keras.layers.Dense",
"dlimage.mnist.MNISTLoader",
"keras.models.Sequential",
"datetime.datetime.now"
] | [((301, 334), 'dlimage.mnist.MNISTLoader', 'MNISTLoader', (['"""dlimage/mnist/data"""'], {}), "('dlimage/mnist/data')\n", (312, 334), False, 'from dlimage.mnist import MNISTLoader\n'), ((641, 674), 'dlimage.mnist.MNISTLoader', 'MNISTLoader', (['"""dlimage/mnist/data"""'], {}), "('dlimage/mnist/data')\n", (652, 674), False, 'from dlimage.mnist import MNISTLoader\n'), ((975, 987), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (985, 987), False, 'from keras.models import Sequential\n'), ((1096, 1150), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, decay=1e-06, momentum=0.9, nesterov=True)\n', (1099, 1150), False, 'from keras.optimizers import SGD\n'), ((250, 262), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (258, 262), True, 'import numpy as np\n'), ((998, 1044), 'keras.layers.Dense', 'Dense', (['(80)'], {'activation': '"""sigmoid"""', 'input_dim': '(784)'}), "(80, activation='sigmoid', input_dim=784)\n", (1003, 1044), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((1056, 1087), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""sigmoid"""'}), "(10, activation='sigmoid')\n", (1061, 1087), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((1245, 1259), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1257, 1259), False, 'from datetime import datetime\n'), ((1356, 1370), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1368, 1370), False, 'from datetime import datetime\n'), ((1407, 1421), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1419, 1421), False, 'from datetime import datetime\n'), ((1522, 1536), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1534, 1536), False, 'from datetime import datetime\n')] |
import numpy as np
import pytest
import mbuild as mb
from mbuild.tests.base_test import BaseTest
class TestLattice(BaseTest):
"""
Unit Tests for Lattice class functionality.
"""
@pytest.mark.parametrize(
"spacing",
[
([1, 1, 1]),
([0.1, 0.1, 0.1]),
(["1", "1", "1"]),
(["1", 0.1, "0.1"]),
],
)
def test_spacing_success(self, spacing):
spacing = np.asarray(spacing, dtype=np.float64)
spacing = np.reshape(spacing, (3,))
test_lattice = mb.Lattice(lattice_spacing=spacing)
np.testing.assert_allclose(
spacing,
test_lattice.lattice_spacing,
rtol=1e-7,
atol=0,
equal_nan=True,
)
@pytest.mark.parametrize(
"dim, spacing", [(3, [1, 1, 1]), (3, [1, 1, 0]), (3, [1, 0, 0])]
)
def test_dimension_set(self, dim, spacing):
test_lattice = mb.Lattice(lattice_spacing=spacing)
assert test_lattice.dimension == dim
@pytest.mark.parametrize(
"spacing",
[
([1]),
(1),
([1, 1]),
([-1, 1, 1]),
([1, 1, 1, 1]),
([1, "a"]),
(None),
([]),
([None, None, None]),
],
)
def test_spacing_incorrect(self, spacing):
with pytest.raises(ValueError):
mb.Lattice(lattice_spacing=spacing)
@pytest.mark.parametrize(
"spacing",
[
([0.1, 0.1, 0.1]),
([1, 2, 3]),
(["1", "2", "3"]),
([1, 2, "3"]),
([1, 0, 0]),
([1, 1, 0]),
],
)
def test_spacing_correct(self, spacing):
mb.Lattice(lattice_spacing=spacing)
@pytest.mark.parametrize(
"vectors",
[
([[1, 2], [0, 1, 0], [0, 0, 1]]),
([[1, 0, 0], [0, 1, 0], [0, 1, 0]]),
(np.identity(4, dtype=np.float64)),
([[1, 2, 3], [3, 2, 1], [2, 1, 3]]),
],
)
def test_incorrect_lattice_vectors(self, vectors):
with pytest.raises(ValueError):
mb.Lattice(lattice_spacing=[1, 1, 1], lattice_vectors=vectors)
@pytest.mark.parametrize(
"vectors",
[
([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
([[1, 0, 0], [-0.5, 0.85, 0], [0, 0, 1]]),
],
)
def test_correct_lattice_vectors(self, vectors):
mb.Lattice(lattice_spacing=[1, 1, 1], lattice_vectors=vectors)
def test_overdefinied_inputs(self):
space = [1, 1, 1]
vectors = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
angles = [90, 90, 90]
with pytest.raises(ValueError):
mb.Lattice(
lattice_spacing=space, lattice_vectors=vectors, angles=angles
)
@pytest.mark.parametrize("the_type", [(list()), (tuple()), (str()), ([])])
def test_lattice_points_input_type(self, the_type):
with pytest.raises(TypeError):
mb.Lattice(lattice_spacing=[1, 1, 1], lattice_points=the_type)
@pytest.mark.parametrize(
"incorrect",
[
({"A": [[0.2, 0.3, 0.2, 0.1]]}),
({"A": [[None]]}),
({"A": [[0.2, 0.3, None]]}),
({"A": [[0.2, 0.3, -0.5]]}),
({"A": [[0.2, 0.3, 1]]}),
({"A": [[0.2, 0.3, 0.1], [0.2, 0.3, 0.1]]}),
],
)
def test_lattice_points_input_type(self, incorrect):
with pytest.raises(ValueError):
mb.Lattice(lattice_spacing=[1, 1, 1], lattice_points=incorrect)
@pytest.mark.parametrize(
"angles",
[
([150, 150, 150]),
([90, 90, -90]),
([90, 90, 180]),
([90, 90, 0]),
([90, 90, 90, 90]),
([97, 3, 120]),
],
)
def test_improper_angles(self, angles):
with pytest.raises(ValueError):
mb.Lattice(lattice_spacing=[1, 1, 1], angles=angles)
@pytest.mark.parametrize(
"vectors, angles",
[
([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [90, 90, 90]),
(
[
[1.0, 0.0, 0.0],
[-0.45399049973954675, 0.8910065241883679, 0.0],
[
-0.034899496702500955,
-0.037369475398893195,
0.9986919181801381,
],
],
[91, 92, 117],
),
],
)
def test_proper_angles(self, vectors, angles):
testlattice = mb.Lattice(
lattice_spacing=[1, 1, 1], lattice_vectors=vectors
)
np.testing.assert_allclose(
testlattice.angles,
np.asarray(angles, dtype=np.float64),
rtol=1e-05,
atol=1e-08,
equal_nan=False,
)
@pytest.mark.parametrize(
"x, y, z",
[
(None, 1, 0),
(1, None, 1),
(1, 1, None),
(-1, 1, 1),
(1, -1, 1),
(1, 1, -1),
(1, 1, np.NaN),
],
)
def test_incorrect_populate_inputs(self, x, y, z):
with pytest.raises(ValueError):
test_lattice = mb.Lattice(lattice_spacing=[1, 1, 1])
test_lattice.populate(
compound_dict={"id": mb.Compound()}, x=x, y=y, z=z
)
@pytest.mark.parametrize("my_type", [([]), (()), (np.array), (np.ndarray)])
def test_populate_basis_type_incorrect(self, my_type):
test_lattice = mb.Lattice(lattice_spacing=[1, 1, 1])
with pytest.raises(TypeError):
test_lattice.populate(compound_dict=my_type)
@pytest.mark.parametrize(
"not_compound",
[
(1),
(mb.Box(lengths=[1, 1, 1], angles=[90.0, 90.0, 90.0])),
("aLattice"),
],
)
def test_populate_not_compound(self, not_compound):
test_lattice = mb.Lattice(lattice_spacing=[1, 1, 1])
particle_dict = {"id": not_compound}
with pytest.raises(TypeError):
test_lattice.populate(compound_dict=particle_dict)
def test_proper_populate(self):
values_to_check = [
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 1, 0],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1],
]
test_lattice = mb.Lattice(
lattice_spacing=[1, 1, 1], angles=[90, 90, 90]
)
new_compound = test_lattice.populate(x=2, y=2, z=2)
values_to_check = np.asarray(values_to_check, dtype=np.float64)
is_true = []
for pos1 in np.split(values_to_check, 8, axis=0):
for pos2 in np.split(new_compound.xyz, 8, axis=0):
if np.allclose(pos1, pos2):
is_true.append(True)
assert len(is_true) == len(values_to_check)
def test_box(self):
lattice = mb.Lattice(
lattice_spacing=[1, 1, 1],
angles=[90, 90, 90],
lattice_points={"A": [[0, 0, 0]]},
)
compound_test = lattice.populate(
compound_dict={"A": mb.Compound()}, x=2, y=5, z=9
)
replication = [2, 5, 9]
np.testing.assert_allclose(
compound_test.box.lengths,
np.asarray(
[x * y for x, y in zip(replication, lattice.lattice_spacing)]
),
)
np.testing.assert_allclose(
compound_test.box.angles, np.asarray([90.0, 90.0, 90.0])
)
def test_box_non_rectangular(self):
lattice = mb.Lattice(
lattice_spacing=[0.5, 0.5, 1],
angles=[90, 90, 120],
lattice_points={"A": [[0, 0, 0]]},
)
compound_test = lattice.populate(
compound_dict={"A": mb.Compound()}, x=2, y=2, z=1
)
replication = [2, 2, 1]
np.testing.assert_allclose(
compound_test.box.lengths,
np.asarray(
[x * y for x, y in zip(replication, lattice.lattice_spacing)]
),
)
np.testing.assert_allclose(
compound_test.box.angles, np.asarray([90.0, 90.0, 120.0])
)
def test_get_box(self):
lattice = mb.Lattice(
lattice_spacing=[1, 1, 1],
angles=[90, 90, 90],
lattice_points={"A": [[0, 0, 0]]},
)
replication = [5, 4, 3]
expected_lengths = [
x * y for x, y in zip(replication, lattice.lattice_spacing)
]
mylat = lattice.populate(x=5, y=4, z=3)
assert isinstance(mylat.box, mb.Box)
np.testing.assert_allclose([90, 90, 90], mylat.box.angles)
np.testing.assert_allclose(expected_lengths, mylat.box.lengths)
def test_get_box_non_rectangular(self):
lattice = mb.Lattice(
lattice_spacing=[0.5, 0.5, 1],
angles=[90, 90, 120],
lattice_points={"A": [[0, 0, 0]]},
)
replication = [2, 2, 1]
expected_lengths = [
x * y for x, y in zip(replication, lattice.lattice_spacing)
]
mylat = lattice.populate(x=2, y=2, z=1)
assert isinstance(mylat.box, mb.Box)
np.testing.assert_allclose([90, 90, 120], mylat.box.angles)
np.testing.assert_allclose(expected_lengths, mylat.box.lengths)
| [
"mbuild.Lattice",
"mbuild.Compound",
"numpy.asarray",
"mbuild.Box",
"numpy.allclose",
"numpy.identity",
"numpy.split",
"pytest.raises",
"numpy.reshape",
"pytest.mark.parametrize",
"numpy.testing.assert_allclose"
] | [((199, 303), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spacing"""', "[[1, 1, 1], [0.1, 0.1, 0.1], ['1', '1', '1'], ['1', 0.1, '0.1']]"], {}), "('spacing', [[1, 1, 1], [0.1, 0.1, 0.1], ['1', '1',\n '1'], ['1', 0.1, '0.1']])\n", (222, 303), False, 'import pytest\n'), ((780, 873), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim, spacing"""', '[(3, [1, 1, 1]), (3, [1, 1, 0]), (3, [1, 0, 0])]'], {}), "('dim, spacing', [(3, [1, 1, 1]), (3, [1, 1, 0]), (3,\n [1, 0, 0])])\n", (803, 873), False, 'import pytest\n'), ((1042, 1165), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spacing"""', "[[1], 1, [1, 1], [-1, 1, 1], [1, 1, 1, 1], [1, 'a'], None, [], [None, None,\n None]]"], {}), "('spacing', [[1], 1, [1, 1], [-1, 1, 1], [1, 1, 1, 1\n ], [1, 'a'], None, [], [None, None, None]])\n", (1065, 1165), False, 'import pytest\n'), ((1462, 1582), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spacing"""', "[[0.1, 0.1, 0.1], [1, 2, 3], ['1', '2', '3'], [1, 2, '3'], [1, 0, 0], [1, 1, 0]\n ]"], {}), "('spacing', [[0.1, 0.1, 0.1], [1, 2, 3], ['1', '2',\n '3'], [1, 2, '3'], [1, 0, 0], [1, 1, 0]])\n", (1485, 1582), False, 'import pytest\n'), ((2231, 2347), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""vectors"""', '[[[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[1, 0, 0], [-0.5, 0.85, 0], [0, 0, 1]]]'], {}), "('vectors', [[[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[1,\n 0, 0], [-0.5, 0.85, 0], [0, 0, 1]]])\n", (2254, 2347), False, 'import pytest\n'), ((3091, 3306), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""incorrect"""', "[{'A': [[0.2, 0.3, 0.2, 0.1]]}, {'A': [[None]]}, {'A': [[0.2, 0.3, None]]},\n {'A': [[0.2, 0.3, -0.5]]}, {'A': [[0.2, 0.3, 1]]}, {'A': [[0.2, 0.3, \n 0.1], [0.2, 0.3, 0.1]]}]"], {}), "('incorrect', [{'A': [[0.2, 0.3, 0.2, 0.1]]}, {'A':\n [[None]]}, {'A': [[0.2, 0.3, None]]}, {'A': [[0.2, 0.3, -0.5]]}, {'A':\n [[0.2, 0.3, 1]]}, {'A': [[0.2, 0.3, 0.1], [0.2, 0.3, 0.1]]}])\n", (3114, 3306), False, 'import pytest\n'), ((3596, 3727), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""angles"""', '[[150, 150, 150], [90, 90, -90], [90, 90, 180], [90, 90, 0], [90, 90, 90, \n 90], [97, 3, 120]]'], {}), "('angles', [[150, 150, 150], [90, 90, -90], [90, 90,\n 180], [90, 90, 0], [90, 90, 90, 90], [97, 3, 120]])\n", (3619, 3727), False, 'import pytest\n'), ((3997, 4259), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""vectors, angles"""', '[([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [90, 90, 90]), ([[1.0, 0.0, 0.0], [-\n 0.45399049973954675, 0.8910065241883679, 0.0], [-0.034899496702500955, \n -0.037369475398893195, 0.9986919181801381]], [91, 92, 117])]'], {}), "('vectors, angles', [([[1, 0, 0], [0, 1, 0], [0, 0, \n 1]], [90, 90, 90]), ([[1.0, 0.0, 0.0], [-0.45399049973954675, \n 0.8910065241883679, 0.0], [-0.034899496702500955, -0.037369475398893195,\n 0.9986919181801381]], [91, 92, 117])])\n", (4020, 4259), False, 'import pytest\n'), ((4894, 5029), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x, y, z"""', '[(None, 1, 0), (1, None, 1), (1, 1, None), (-1, 1, 1), (1, -1, 1), (1, 1, -\n 1), (1, 1, np.NaN)]'], {}), "('x, y, z', [(None, 1, 0), (1, None, 1), (1, 1, None\n ), (-1, 1, 1), (1, -1, 1), (1, 1, -1), (1, 1, np.NaN)])\n", (4917, 5029), False, 'import pytest\n'), ((5425, 5491), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""my_type"""', '[[], (), np.array, np.ndarray]'], {}), "('my_type', [[], (), np.array, np.ndarray])\n", (5448, 5491), False, 'import pytest\n'), ((453, 490), 'numpy.asarray', 'np.asarray', (['spacing'], {'dtype': 'np.float64'}), '(spacing, dtype=np.float64)\n', (463, 490), True, 'import numpy as np\n'), ((509, 534), 'numpy.reshape', 'np.reshape', (['spacing', '(3,)'], {}), '(spacing, (3,))\n', (519, 534), True, 'import numpy as np\n'), ((558, 593), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': 'spacing'}), '(lattice_spacing=spacing)\n', (568, 593), True, 'import mbuild as mb\n'), ((602, 708), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spacing', 'test_lattice.lattice_spacing'], {'rtol': '(1e-07)', 'atol': '(0)', 'equal_nan': '(True)'}), '(spacing, test_lattice.lattice_spacing, rtol=\n 1e-07, atol=0, equal_nan=True)\n', (628, 708), True, 'import numpy as np\n'), ((955, 990), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': 'spacing'}), '(lattice_spacing=spacing)\n', (965, 990), True, 'import mbuild as mb\n'), ((1750, 1785), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': 'spacing'}), '(lattice_spacing=spacing)\n', (1760, 1785), True, 'import mbuild as mb\n'), ((2467, 2529), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[1, 1, 1]', 'lattice_vectors': 'vectors'}), '(lattice_spacing=[1, 1, 1], lattice_vectors=vectors)\n', (2477, 2529), True, 'import mbuild as mb\n'), ((4598, 4660), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[1, 1, 1]', 'lattice_vectors': 'vectors'}), '(lattice_spacing=[1, 1, 1], lattice_vectors=vectors)\n', (4608, 4660), True, 'import mbuild as mb\n'), ((5582, 5619), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[1, 1, 1]'}), '(lattice_spacing=[1, 1, 1])\n', (5592, 5619), True, 'import mbuild as mb\n'), ((5988, 6025), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[1, 1, 1]'}), '(lattice_spacing=[1, 1, 1])\n', (5998, 6025), True, 'import mbuild as mb\n'), ((6455, 6513), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[1, 1, 1]', 'angles': '[90, 90, 90]'}), '(lattice_spacing=[1, 1, 1], angles=[90, 90, 90])\n', (6465, 6513), True, 'import mbuild as mb\n'), ((6624, 6669), 'numpy.asarray', 'np.asarray', (['values_to_check'], {'dtype': 'np.float64'}), '(values_to_check, dtype=np.float64)\n', (6634, 6669), True, 'import numpy as np\n'), ((6712, 6748), 'numpy.split', 'np.split', (['values_to_check', '(8)'], {'axis': '(0)'}), '(values_to_check, 8, axis=0)\n', (6720, 6748), True, 'import numpy as np\n'), ((6994, 7092), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[1, 1, 1]', 'angles': '[90, 90, 90]', 'lattice_points': "{'A': [[0, 0, 0]]}"}), "(lattice_spacing=[1, 1, 1], angles=[90, 90, 90], lattice_points={\n 'A': [[0, 0, 0]]})\n", (7004, 7092), True, 'import mbuild as mb\n'), ((7659, 7761), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[0.5, 0.5, 1]', 'angles': '[90, 90, 120]', 'lattice_points': "{'A': [[0, 0, 0]]}"}), "(lattice_spacing=[0.5, 0.5, 1], angles=[90, 90, 120],\n lattice_points={'A': [[0, 0, 0]]})\n", (7669, 7761), True, 'import mbuild as mb\n'), ((8316, 8414), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[1, 1, 1]', 'angles': '[90, 90, 90]', 'lattice_points': "{'A': [[0, 0, 0]]}"}), "(lattice_spacing=[1, 1, 1], angles=[90, 90, 90], lattice_points={\n 'A': [[0, 0, 0]]})\n", (8326, 8414), True, 'import mbuild as mb\n'), ((8704, 8762), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[90, 90, 90]', 'mylat.box.angles'], {}), '([90, 90, 90], mylat.box.angles)\n', (8730, 8762), True, 'import numpy as np\n'), ((8771, 8834), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expected_lengths', 'mylat.box.lengths'], {}), '(expected_lengths, mylat.box.lengths)\n', (8797, 8834), True, 'import numpy as np\n'), ((8898, 9000), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[0.5, 0.5, 1]', 'angles': '[90, 90, 120]', 'lattice_points': "{'A': [[0, 0, 0]]}"}), "(lattice_spacing=[0.5, 0.5, 1], angles=[90, 90, 120],\n lattice_points={'A': [[0, 0, 0]]})\n", (8908, 9000), True, 'import mbuild as mb\n'), ((9291, 9350), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[90, 90, 120]', 'mylat.box.angles'], {}), '([90, 90, 120], mylat.box.angles)\n', (9317, 9350), True, 'import numpy as np\n'), ((9359, 9422), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expected_lengths', 'mylat.box.lengths'], {}), '(expected_lengths, mylat.box.lengths)\n', (9385, 9422), True, 'import numpy as np\n'), ((1381, 1406), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1394, 1406), False, 'import pytest\n'), ((1420, 1455), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': 'spacing'}), '(lattice_spacing=spacing)\n', (1430, 1455), True, 'import mbuild as mb\n'), ((2123, 2148), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2136, 2148), False, 'import pytest\n'), ((2162, 2224), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[1, 1, 1]', 'lattice_vectors': 'vectors'}), '(lattice_spacing=[1, 1, 1], lattice_vectors=vectors)\n', (2172, 2224), True, 'import mbuild as mb\n'), ((1954, 1986), 'numpy.identity', 'np.identity', (['(4)'], {'dtype': 'np.float64'}), '(4, dtype=np.float64)\n', (1965, 1986), True, 'import numpy as np\n'), ((2692, 2717), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2705, 2717), False, 'import pytest\n'), ((2731, 2804), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': 'space', 'lattice_vectors': 'vectors', 'angles': 'angles'}), '(lattice_spacing=space, lattice_vectors=vectors, angles=angles)\n', (2741, 2804), True, 'import mbuild as mb\n'), ((2984, 3008), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2997, 3008), False, 'import pytest\n'), ((3022, 3084), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[1, 1, 1]', 'lattice_points': 'the_type'}), '(lattice_spacing=[1, 1, 1], lattice_points=the_type)\n', (3032, 3084), True, 'import mbuild as mb\n'), ((3487, 3512), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3500, 3512), False, 'import pytest\n'), ((3526, 3589), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[1, 1, 1]', 'lattice_points': 'incorrect'}), '(lattice_spacing=[1, 1, 1], lattice_points=incorrect)\n', (3536, 3589), True, 'import mbuild as mb\n'), ((3899, 3924), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3912, 3924), False, 'import pytest\n'), ((3938, 3990), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[1, 1, 1]', 'angles': 'angles'}), '(lattice_spacing=[1, 1, 1], angles=angles)\n', (3948, 3990), True, 'import mbuild as mb\n'), ((4763, 4799), 'numpy.asarray', 'np.asarray', (['angles'], {'dtype': 'np.float64'}), '(angles, dtype=np.float64)\n', (4773, 4799), True, 'import numpy as np\n'), ((5211, 5236), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5224, 5236), False, 'import pytest\n'), ((5265, 5302), 'mbuild.Lattice', 'mb.Lattice', ([], {'lattice_spacing': '[1, 1, 1]'}), '(lattice_spacing=[1, 1, 1])\n', (5275, 5302), True, 'import mbuild as mb\n'), ((5633, 5657), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5646, 5657), False, 'import pytest\n'), ((6084, 6108), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (6097, 6108), False, 'import pytest\n'), ((5811, 5863), 'mbuild.Box', 'mb.Box', ([], {'lengths': '[1, 1, 1]', 'angles': '[90.0, 90.0, 90.0]'}), '(lengths=[1, 1, 1], angles=[90.0, 90.0, 90.0])\n', (5817, 5863), True, 'import mbuild as mb\n'), ((6774, 6811), 'numpy.split', 'np.split', (['new_compound.xyz', '(8)'], {'axis': '(0)'}), '(new_compound.xyz, 8, axis=0)\n', (6782, 6811), True, 'import numpy as np\n'), ((7559, 7589), 'numpy.asarray', 'np.asarray', (['[90.0, 90.0, 90.0]'], {}), '([90.0, 90.0, 90.0])\n', (7569, 7589), True, 'import numpy as np\n'), ((8227, 8258), 'numpy.asarray', 'np.asarray', (['[90.0, 90.0, 120.0]'], {}), '([90.0, 90.0, 120.0])\n', (8237, 8258), True, 'import numpy as np\n'), ((6832, 6855), 'numpy.allclose', 'np.allclose', (['pos1', 'pos2'], {}), '(pos1, pos2)\n', (6843, 6855), True, 'import numpy as np\n'), ((7210, 7223), 'mbuild.Compound', 'mb.Compound', ([], {}), '()\n', (7221, 7223), True, 'import mbuild as mb\n'), ((7879, 7892), 'mbuild.Compound', 'mb.Compound', ([], {}), '()\n', (7890, 7892), True, 'import mbuild as mb\n'), ((5375, 5388), 'mbuild.Compound', 'mb.Compound', ([], {}), '()\n', (5386, 5388), True, 'import mbuild as mb\n')] |
""" Training methods for the Naive Bayes model on the Web of Science dataset.
"""
import os
from pathlib import Path
from joblib import dump
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
import utils
from constants.transformers import TransformerModel
from streams.stream_data import WOSStream
from utils.metrics import get_metrics
import warnings
warnings.filterwarnings("ignore")
PATH = os.path.join(Path(__file__).parents[1], "assets/models")
if not os.path.isdir(PATH):
os.makedirs(PATH)
def train_nb_wos_holdout(
epochs=1,
batch_size=utils.BATCH_SIZE,
transform=True,
transformer_model=TransformerModel.BERT,
print_every=10,
device="cpu",
):
""" Trains the Naive Bayes model on the Web of Science dataset.
Args:
epochs (int): number of times the stream is run
batch_size (int): the batch size
transform (bool): transform the dataset or not
transformer_model (TransformerModel): the transformer model to use
print_every (int): print stats parameter
device (string): the device to run the training on (cpu or gpu)
"""
# Prepare the stream
stream = WOSStream(
transformer_model=transformer_model, transform=transform, device=device
)
stream.prepare_for_use()
# Define model
model = GaussianNB()
model_name = "naive-bayes-wos-{}-ver-{}-holdout".format(
transformer_model.name, stream.version
)
model_path = os.path.join(PATH, model_name)
os.makedirs(model_path, exist_ok=True)
all_labels = np.arange(stream.n_classes)
print("Starting training...")
train_accuracies, test_metrics_list = [], []
for epoch in range(epochs):
# Initialize the running loss and accuracy
running_accuracy = 0.0
# Start iterating over the dataset
i = 0
while stream.has_more_samples():
# Get the batch from the stream
if stream.n_remaining_samples() >= batch_size:
x_, y = stream.next_sample(batch_size)
else:
break
# Unpack x_ (we do not need the sequence lengths for NB)
x = x_[0].numpy()
# Take the maximum over the axis 1
x = np.amax(x, axis=1)
# Partial fit the model
model.partial_fit(x, y, classes=all_labels)
# Update running accuracy
running_accuracy += accuracy_score(y, model.predict(x))
# Print statistics
if i % print_every == print_every - 1:
# Evaluate the model on the test set
x_test_, y_test = stream.get_test_set()
x_test = x_test_[0].numpy()
x_test = np.amax(x_test, axis=1)
y_pred = model.predict(x_test)
test_metrics = get_metrics(y_pred, y_test, no_labels=stream.n_classes)
accuracy = running_accuracy / print_every
# Print every 10 batches
print(
"[{}/{} epochs, {}/{} batches] train accuracy: {:.4f}, "
"test (accuracy: {:.4f}, precision: {:.4f}, "
"recall: {:.4f}, f1: {:.4f})".format(
epoch + 1,
epochs,
i + 1,
stream.n_samples // batch_size + 1,
accuracy,
test_metrics["accuracy"],
test_metrics["precision"],
test_metrics["recall"],
test_metrics["macro_f1"],
)
)
train_accuracies.append(accuracy)
test_metrics_list.append(test_metrics)
running_accuracy = 0
# Increment i
i += 1
stream.restart()
# Save model
print("Finished training. Saving model..")
dump(model, os.path.join(model_path, "model.joblib"))
print("Done!")
return train_accuracies, test_metrics_list
if __name__ == "__main__":
_ = train_nb_wos_holdout(epochs=1, transform=False, print_every=1, device="cpu")
| [
"sklearn.naive_bayes.GaussianNB",
"os.makedirs",
"warnings.filterwarnings",
"os.path.isdir",
"numpy.amax",
"streams.stream_data.WOSStream",
"pathlib.Path",
"numpy.arange",
"utils.metrics.get_metrics",
"os.path.join"
] | [((408, 441), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (431, 441), False, 'import warnings\n'), ((515, 534), 'os.path.isdir', 'os.path.isdir', (['PATH'], {}), '(PATH)\n', (528, 534), False, 'import os\n'), ((540, 557), 'os.makedirs', 'os.makedirs', (['PATH'], {}), '(PATH)\n', (551, 557), False, 'import os\n'), ((1213, 1300), 'streams.stream_data.WOSStream', 'WOSStream', ([], {'transformer_model': 'transformer_model', 'transform': 'transform', 'device': 'device'}), '(transformer_model=transformer_model, transform=transform, device=\n device)\n', (1222, 1300), False, 'from streams.stream_data import WOSStream\n'), ((1371, 1383), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1381, 1383), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((1515, 1545), 'os.path.join', 'os.path.join', (['PATH', 'model_name'], {}), '(PATH, model_name)\n', (1527, 1545), False, 'import os\n'), ((1550, 1588), 'os.makedirs', 'os.makedirs', (['model_path'], {'exist_ok': '(True)'}), '(model_path, exist_ok=True)\n', (1561, 1588), False, 'import os\n'), ((1606, 1633), 'numpy.arange', 'np.arange', (['stream.n_classes'], {}), '(stream.n_classes)\n', (1615, 1633), True, 'import numpy as np\n'), ((3980, 4020), 'os.path.join', 'os.path.join', (['model_path', '"""model.joblib"""'], {}), "(model_path, 'model.joblib')\n", (3992, 4020), False, 'import os\n'), ((464, 478), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (468, 478), False, 'from pathlib import Path\n'), ((2292, 2310), 'numpy.amax', 'np.amax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (2299, 2310), True, 'import numpy as np\n'), ((2772, 2795), 'numpy.amax', 'np.amax', (['x_test'], {'axis': '(1)'}), '(x_test, axis=1)\n', (2779, 2795), True, 'import numpy as np\n'), ((2874, 2929), 'utils.metrics.get_metrics', 'get_metrics', (['y_pred', 'y_test'], {'no_labels': 'stream.n_classes'}), '(y_pred, y_test, no_labels=stream.n_classes)\n', (2885, 2929), False, 'from utils.metrics import get_metrics\n')] |
import math
import numpy as np
import scipy.misc
import tensorflow as tf
class Container(object):
"""Dumb container object"""
def __init__(self, dictionary):
self.__dict__.update(dictionary)
def _edge_filter():
"""Returns a 3x3 edge-detection functionally filter similar to Sobel"""
# See https://en.wikipedia.org/w/index.php?title=Talk:Sobel_operator&oldid=737772121#Scharr_not_the_ultimate_solution
a = .5*(1-math.sqrt(.5))
b = math.sqrt(.5)
# Horizontal filter as a 4-D tensor suitable for tf.nn.conv2d()
h = np.zeros([3,3,3,3])
for d in range(3):
# I.e. each RGB channel is processed independently
h[0,:,d,d] = [ a, b, a]
h[2,:,d,d] = [-a, -b, -a]
# Vertical filter
v = np.transpose(h, axes=[1, 0, 2, 3])
return h, v
def total_variation_loss(images, name='total_variation_loss'):
"""Returns a loss function that penalizes high-frequency features in the image.
Similar to the 'total variation loss' but using a different high-pass filter."""
filter_h, filter_v = _edge_filter()
strides = [1,1,1,1]
hor_edges = tf.nn.conv2d(images, filter_h, strides, padding='VALID', name='horizontal_edges')
ver_edges = tf.nn.conv2d(images, filter_v, strides, padding='VALID', name='vertical_edges')
l2_edges = tf.add(hor_edges*hor_edges, ver_edges*ver_edges, name='L2_edges')
total_variation_loss = tf.reduce_mean(l2_edges, name=name)
return total_variation_loss
def distort_image(image):
"""Perform random distortions to the given 4D image and return result"""
# Switch to 3D as that's what these operations require
slices = tf.unpack(image)
output = []
# Perform pixel-wise distortions
for image in slices:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_saturation(image, .2, 2.)
image += tf.truncated_normal(image.get_shape(), stddev=.05)
image = tf.image.random_contrast(image, .85, 1.15)
image = tf.image.random_brightness(image, .3)
output.append(image)
# Go back to 4D
image = tf.pack(output)
return image
def downscale(images, K):
"""Differentiable image downscaling by a factor of K"""
arr = np.zeros([K, K, 3, 3])
arr[:,:,0,0] = 1.0/(K*K)
arr[:,:,1,1] = 1.0/(K*K)
arr[:,:,2,2] = 1.0/(K*K)
dowscale_weight = tf.constant(arr, dtype=tf.float32)
downscaled = tf.nn.conv2d(images, dowscale_weight,
strides=[1, K, K, 1],
padding='SAME')
return downscaled
def upscale(images, K):
"""Differentiable image upscaling by a factor of K"""
prev_shape = images.get_shape()
size = [K * int(s) for s in prev_shape[1:3]]
out = tf.image.resize_nearest_neighbor(images, size)
return out
def save_image(image, filename, verbose=True):
"""Saves a (height,width,3) numpy array into a file"""
scipy.misc.toimage(image, cmin=0., cmax=1.).save(filename)
print(" Saved %s" % (filename,))
| [
"tensorflow.unpack",
"math.sqrt",
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.image.random_contrast",
"tensorflow.image.random_saturation",
"numpy.transpose",
"numpy.zeros",
"tensorflow.add",
"tensorflow.reduce_mean",
"tensorflow.constant",
"tensorflow.image.random_flip_left_right",
... | [((465, 479), 'math.sqrt', 'math.sqrt', (['(0.5)'], {}), '(0.5)\n', (474, 479), False, 'import math\n'), ((556, 578), 'numpy.zeros', 'np.zeros', (['[3, 3, 3, 3]'], {}), '([3, 3, 3, 3])\n', (564, 578), True, 'import numpy as np\n'), ((758, 792), 'numpy.transpose', 'np.transpose', (['h'], {'axes': '[1, 0, 2, 3]'}), '(h, axes=[1, 0, 2, 3])\n', (770, 792), True, 'import numpy as np\n'), ((1125, 1211), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['images', 'filter_h', 'strides'], {'padding': '"""VALID"""', 'name': '"""horizontal_edges"""'}), "(images, filter_h, strides, padding='VALID', name=\n 'horizontal_edges')\n", (1137, 1211), True, 'import tensorflow as tf\n'), ((1223, 1302), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['images', 'filter_v', 'strides'], {'padding': '"""VALID"""', 'name': '"""vertical_edges"""'}), "(images, filter_v, strides, padding='VALID', name='vertical_edges')\n", (1235, 1302), True, 'import tensorflow as tf\n'), ((1320, 1389), 'tensorflow.add', 'tf.add', (['(hor_edges * hor_edges)', '(ver_edges * ver_edges)'], {'name': '"""L2_edges"""'}), "(hor_edges * hor_edges, ver_edges * ver_edges, name='L2_edges')\n", (1326, 1389), True, 'import tensorflow as tf\n'), ((1414, 1449), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['l2_edges'], {'name': 'name'}), '(l2_edges, name=name)\n', (1428, 1449), True, 'import tensorflow as tf\n'), ((1660, 1676), 'tensorflow.unpack', 'tf.unpack', (['image'], {}), '(image)\n', (1669, 1676), True, 'import tensorflow as tf\n'), ((2127, 2142), 'tensorflow.pack', 'tf.pack', (['output'], {}), '(output)\n', (2134, 2142), True, 'import tensorflow as tf\n'), ((2262, 2284), 'numpy.zeros', 'np.zeros', (['[K, K, 3, 3]'], {}), '([K, K, 3, 3])\n', (2270, 2284), True, 'import numpy as np\n'), ((2394, 2428), 'tensorflow.constant', 'tf.constant', (['arr'], {'dtype': 'tf.float32'}), '(arr, dtype=tf.float32)\n', (2405, 2428), True, 'import tensorflow as tf\n'), ((2451, 2526), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['images', 'dowscale_weight'], {'strides': '[1, K, K, 1]', 'padding': '"""SAME"""'}), "(images, dowscale_weight, strides=[1, K, K, 1], padding='SAME')\n", (2463, 2526), True, 'import tensorflow as tf\n'), ((2788, 2834), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['images', 'size'], {}), '(images, size)\n', (2820, 2834), True, 'import tensorflow as tf\n'), ((1773, 1811), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (1804, 1811), True, 'import tensorflow as tf\n'), ((1829, 1872), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['image', '(0.2)', '(2.0)'], {}), '(image, 0.2, 2.0)\n', (1855, 1872), True, 'import tensorflow as tf\n'), ((1956, 1999), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['image', '(0.85)', '(1.15)'], {}), '(image, 0.85, 1.15)\n', (1980, 1999), True, 'import tensorflow as tf\n'), ((2016, 2054), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['image', '(0.3)'], {}), '(image, 0.3)\n', (2042, 2054), True, 'import tensorflow as tf\n'), ((442, 456), 'math.sqrt', 'math.sqrt', (['(0.5)'], {}), '(0.5)\n', (451, 456), False, 'import math\n')] |
import sdf
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn-paper')
plt.rcParams['font.size'] = 24
def cm2inch(value):
return value/2.54
Num = 26
TeS1 = np.ones(Num)
TeS2 = np.ones(Num)
TeS3 = np.ones(Num)
part1 = np.ones(Num)
part2 = np.ones(Num)
pho1 = np.ones(Num)
pho2 = np.ones(Num)
time = np.ones(Num)
file = '/Users/yaowp/code/merge/epoch2d/'
me = 9.1e-31
c = 3e8
# print(e0)
folder = 'Data0'
for i in range(Num):
ii = i
time[i] = ii/10
fname = file+folder+'/'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
TeS1[i] = datafile.Total_Particle_Energy_in_Simulation__J_.data
fname = file+folder+'/6'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
Gam1 = datafile.Particles_Gamma_subset_part_ele.data
Wgt1 = datafile.Particles_Weight_subset_part_ele.data
Gam2 = datafile.Particles_Gamma_subset_part_ion.data
Wgt2 = datafile.Particles_Weight_subset_part_ion.data
Gam3 = datafile.Particles_Gamma_subset_part_ele0.data
Wgt3 = datafile.Particles_Weight_subset_part_ele0.data
Gam4 = datafile.Particles_Gamma_subset_part_ion0.data
Wgt4 = datafile.Particles_Weight_subset_part_ion0.data
Gam5 = 0
Wgt5 = 0
Gam6 = 0
Wgt6 = 0
Px7 = 0
Py7 = 0
Pz7 = 0
Wgt7 = 0
if i>=1:
Gam5 = datafile.Particles_Gamma_subset_part_eleq.data
Wgt5 = datafile.Particles_Weight_subset_part_eleq.data
Gam6 = datafile.Particles_Gamma_subset_part_ionq.data
Wgt6 = datafile.Particles_Weight_subset_part_ionq.data
Px7 = datafile.Particles_Px_subset_part_pho.data
Py7 = datafile.Particles_Py_subset_part_pho.data
Pz7 = datafile.Particles_Pz_subset_part_pho.data
Wgt7 = datafile.Particles_Weight_subset_part_pho.data
part1[i] = np.sum((Gam1-1)*me*c*c*Wgt1)*10 \
+ np.sum((Gam2-1)*me*c*c*Wgt2)*10 \
+ np.sum((Gam3-1)*me*c*c*Wgt3)*10 \
+ np.sum((Gam4-1)*me*c*c*Wgt4)*10 \
+ np.sum((Gam5-1)*me*c*c*Wgt5)*10 \
+ np.sum((Gam6-1)*me*c*c*Wgt6)*10
pho1[i] = np.sum(np.sqrt(Px7**2+Py7**2+Pz7**2)*c*Wgt7)*10
# folder = 'Data1'
# for i in range(Num):
# time[i] = i*10
# fname = file+folder+'/'+str(i).zfill(4)+'.sdf'
# datafile = sdf.read(fname)
# TeS1[i] = datafile.Total_Field_Energy_in_Simulation__J_.data+datafile.Total_Particle_Energy_in_Simulation__J_.data
folder = 'Data'
for i in range(Num):
ii = i
time[i] = ii/10
fname = file+folder+'/'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
TeS2[i] = datafile.Total_Particle_Energy_in_Simulation__J_.data
fname = file+folder+'/6'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
Gam1 = datafile.Particles_Gamma_subset_part_ele.data
Wgt1 = datafile.Particles_Weight_subset_part_ele.data
Gam2 = datafile.Particles_Gamma_subset_part_ion.data
Wgt2 = datafile.Particles_Weight_subset_part_ion.data
Gam3 = datafile.Particles_Gamma_subset_part_ele0.data
Wgt3 = datafile.Particles_Weight_subset_part_ele0.data
Gam4 = datafile.Particles_Gamma_subset_part_ion0.data
Wgt4 = datafile.Particles_Weight_subset_part_ion0.data
Gam5 = 0
Wgt5 = 0
Gam6 = 0
Wgt6 = 0
Px7 = 0
Py7 = 0
Pz7 = 0
Wgt7 = 0
if i>=1:
Gam5 = datafile.Particles_Gamma_subset_part_eleq.data
Wgt5 = datafile.Particles_Weight_subset_part_eleq.data
Gam6 = datafile.Particles_Gamma_subset_part_ionq.data
Wgt6 = datafile.Particles_Weight_subset_part_ionq.data
Px7 = datafile.Particles_Px_subset_part_pho.data
Py7 = datafile.Particles_Py_subset_part_pho.data
Pz7 = datafile.Particles_Pz_subset_part_pho.data
Wgt7 = datafile.Particles_Weight_subset_part_pho.data
part2[i] = np.sum((Gam1-1)*me*c*c*Wgt1)*10 \
+ np.sum((Gam2-1)*me*c*c*Wgt2)*10 \
+ np.sum((Gam3-1)*me*c*c*Wgt3)*10 \
+ np.sum((Gam4-1)*me*c*c*Wgt4)*10 \
+ np.sum((Gam5-1)*me*c*c*Wgt5)*10 \
+ np.sum((Gam6-1)*me*c*c*Wgt6)*10
pho2[i] = np.sum(np.sqrt(Px7**2+Py7**2+Pz7**2)*c*Wgt7)*10
# print('TeS1 = ',TeS1)
plt.figure(figsize=(cm2inch(8.5), cm2inch(6)))
ax = plt.subplot()
ax.plot(time, TeS1,'k-', lw=1, label='w/o merge')
ax.plot(time, part1,'b-', lw=1, label='w/o merge')
ax.plot(time, pho1,'r-', lw=1, label='w/o merge')
ax.plot(time, TeS2,'ko', lw=1, markersize=3, markeredgewidth=1, markeredgecolor='k', markerfacecolor='None',label='w merge')
ax.plot(time, part2,'bo', lw=1, markersize=3, markeredgewidth=1, markeredgecolor='b', markerfacecolor='None',label='w merge')
ax.plot(time, pho2,'ro', lw=1, markersize=3, markeredgewidth=1, markeredgecolor='r', markerfacecolor='None',label='w merge')
plt.xlim(0,2.5)
plt.ylim(0,2.5e3)
plt.xlabel('time($\omega_{pe}^{-1}$)')
plt.ylabel('Energy[$J$]')
plt.legend(loc='best', numpoints=1, fancybox=True)
# print(TeS1[0])
# plt.grid(b=True,which='major',axis='both')
# plt.show()
# plt.title('energy conservation',fontsize=32,fontstyle='normal')
plt.savefig('EneCons.pdf',bbox_inches='tight') # n means normalized
plt.close() | [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"numpy.sum",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"sdf.read",
"numpy.ones",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"... | [((63, 93), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (76, 93), True, 'import matplotlib.pyplot as plt\n'), ((186, 198), 'numpy.ones', 'np.ones', (['Num'], {}), '(Num)\n', (193, 198), True, 'import numpy as np\n'), ((206, 218), 'numpy.ones', 'np.ones', (['Num'], {}), '(Num)\n', (213, 218), True, 'import numpy as np\n'), ((226, 238), 'numpy.ones', 'np.ones', (['Num'], {}), '(Num)\n', (233, 238), True, 'import numpy as np\n'), ((247, 259), 'numpy.ones', 'np.ones', (['Num'], {}), '(Num)\n', (254, 259), True, 'import numpy as np\n'), ((268, 280), 'numpy.ones', 'np.ones', (['Num'], {}), '(Num)\n', (275, 280), True, 'import numpy as np\n'), ((288, 300), 'numpy.ones', 'np.ones', (['Num'], {}), '(Num)\n', (295, 300), True, 'import numpy as np\n'), ((308, 320), 'numpy.ones', 'np.ones', (['Num'], {}), '(Num)\n', (315, 320), True, 'import numpy as np\n'), ((330, 342), 'numpy.ones', 'np.ones', (['Num'], {}), '(Num)\n', (337, 342), True, 'import numpy as np\n'), ((3888, 3901), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (3899, 3901), True, 'import matplotlib.pyplot as plt\n'), ((4436, 4452), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(2.5)'], {}), '(0, 2.5)\n', (4444, 4452), True, 'import matplotlib.pyplot as plt\n'), ((4452, 4471), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(2500.0)'], {}), '(0, 2500.0)\n', (4460, 4471), True, 'import matplotlib.pyplot as plt\n'), ((4471, 4510), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time($\\\\omega_{pe}^{-1}$)"""'], {}), "('time($\\\\omega_{pe}^{-1}$)')\n", (4481, 4510), True, 'import matplotlib.pyplot as plt\n'), ((4510, 4535), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Energy[$J$]"""'], {}), "('Energy[$J$]')\n", (4520, 4535), True, 'import matplotlib.pyplot as plt\n'), ((4536, 4586), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'numpoints': '(1)', 'fancybox': '(True)'}), "(loc='best', numpoints=1, fancybox=True)\n", (4546, 4586), True, 'import matplotlib.pyplot as plt\n'), ((4728, 4775), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""EneCons.pdf"""'], {'bbox_inches': '"""tight"""'}), "('EneCons.pdf', bbox_inches='tight')\n", (4739, 4775), True, 'import matplotlib.pyplot as plt\n'), ((4797, 4808), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4806, 4808), True, 'import matplotlib.pyplot as plt\n'), ((545, 560), 'sdf.read', 'sdf.read', (['fname'], {}), '(fname)\n', (553, 560), False, 'import sdf\n'), ((689, 704), 'sdf.read', 'sdf.read', (['fname'], {}), '(fname)\n', (697, 704), False, 'import sdf\n'), ((2371, 2386), 'sdf.read', 'sdf.read', (['fname'], {}), '(fname)\n', (2379, 2386), False, 'import sdf\n'), ((2517, 2532), 'sdf.read', 'sdf.read', (['fname'], {}), '(fname)\n', (2525, 2532), False, 'import sdf\n'), ((1893, 1931), 'numpy.sum', 'np.sum', (['((Gam6 - 1) * me * c * c * Wgt6)'], {}), '((Gam6 - 1) * me * c * c * Wgt6)\n', (1899, 1931), True, 'import numpy as np\n'), ((3718, 3756), 'numpy.sum', 'np.sum', (['((Gam6 - 1) * me * c * c * Wgt6)'], {}), '((Gam6 - 1) * me * c * c * Wgt6)\n', (3724, 3756), True, 'import numpy as np\n'), ((1853, 1891), 'numpy.sum', 'np.sum', (['((Gam5 - 1) * me * c * c * Wgt5)'], {}), '((Gam5 - 1) * me * c * c * Wgt5)\n', (1859, 1891), True, 'import numpy as np\n'), ((3678, 3716), 'numpy.sum', 'np.sum', (['((Gam5 - 1) * me * c * c * Wgt5)'], {}), '((Gam5 - 1) * me * c * c * Wgt5)\n', (3684, 3716), True, 'import numpy as np\n'), ((1813, 1851), 'numpy.sum', 'np.sum', (['((Gam4 - 1) * me * c * c * Wgt4)'], {}), '((Gam4 - 1) * me * c * c * Wgt4)\n', (1819, 1851), True, 'import numpy as np\n'), ((1944, 1983), 'numpy.sqrt', 'np.sqrt', (['(Px7 ** 2 + Py7 ** 2 + Pz7 ** 2)'], {}), '(Px7 ** 2 + Py7 ** 2 + Pz7 ** 2)\n', (1951, 1983), True, 'import numpy as np\n'), ((3638, 3676), 'numpy.sum', 'np.sum', (['((Gam4 - 1) * me * c * c * Wgt4)'], {}), '((Gam4 - 1) * me * c * c * Wgt4)\n', (3644, 3676), True, 'import numpy as np\n'), ((3769, 3808), 'numpy.sqrt', 'np.sqrt', (['(Px7 ** 2 + Py7 ** 2 + Pz7 ** 2)'], {}), '(Px7 ** 2 + Py7 ** 2 + Pz7 ** 2)\n', (3776, 3808), True, 'import numpy as np\n'), ((1773, 1811), 'numpy.sum', 'np.sum', (['((Gam3 - 1) * me * c * c * Wgt3)'], {}), '((Gam3 - 1) * me * c * c * Wgt3)\n', (1779, 1811), True, 'import numpy as np\n'), ((3598, 3636), 'numpy.sum', 'np.sum', (['((Gam3 - 1) * me * c * c * Wgt3)'], {}), '((Gam3 - 1) * me * c * c * Wgt3)\n', (3604, 3636), True, 'import numpy as np\n'), ((1690, 1728), 'numpy.sum', 'np.sum', (['((Gam1 - 1) * me * c * c * Wgt1)'], {}), '((Gam1 - 1) * me * c * c * Wgt1)\n', (1696, 1728), True, 'import numpy as np\n'), ((1733, 1771), 'numpy.sum', 'np.sum', (['((Gam2 - 1) * me * c * c * Wgt2)'], {}), '((Gam2 - 1) * me * c * c * Wgt2)\n', (1739, 1771), True, 'import numpy as np\n'), ((3515, 3553), 'numpy.sum', 'np.sum', (['((Gam1 - 1) * me * c * c * Wgt1)'], {}), '((Gam1 - 1) * me * c * c * Wgt1)\n', (3521, 3553), True, 'import numpy as np\n'), ((3558, 3596), 'numpy.sum', 'np.sum', (['((Gam2 - 1) * me * c * c * Wgt2)'], {}), '((Gam2 - 1) * me * c * c * Wgt2)\n', (3564, 3596), True, 'import numpy as np\n')] |
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
import torchvision
import time
import numpy as np
import progressbar
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
transform = transforms.Compose([
transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
"""
Sampling data for debugging
"""
# Training
n_training_samples = 1000
train_sampler = SubsetRandomSampler(np.arange(n_training_samples, dtype=np.int64))
"""
Loading the data
"""
# Train Data
train_set = torchvision.datasets.ImageFolder(root="./data/augmented/train", transform=transform)
train_loader = torch.utils.data.DataLoader(train_set,
batch_size=4,
num_workers=2,
shuffle=True,
# sampler=train_sampler,
drop_last=True,
)
# Validation Data
val_set = torchvision.datasets.ImageFolder(root="./data/augmented/validation", transform=transform)
val_loader = torch.utils.data.DataLoader(val_set,
batch_size=4,
num_workers=2,
shuffle=True,
drop_last=True,
)
'''
Defining classes
bb = Black Bishop
bk = Black King
bn = Black Knight
bp = Black Pawn
bq = Black Queen
br = Black Rook
'''
classes = ("bb", "bk", "bn", "bp", "bq", "br", "empty", "wb", "wk", "wn", "wp", "wq", "wr")
class ChessNet(nn.Module):
def __init__(self):
super(ChessNet, self).__init__()
# Defining the convolutional layers of the net
self.conv1 = nn.Conv2d(3, 8, kernel_size=5)
self.conv2 = nn.Conv2d(8, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 50, kernel_size=5)
self.dropout1 = nn.Dropout()
self.dropout2 = nn.Dropout()
# Defining the fully connected layers of the net
self.fc1 = nn.Linear(4 * 4 * 50, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 13)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = self.dropout1(x)
x = F.max_pool2d(x, 2)
x = F.relu(self.conv3(x))
x = self.dropout2(x)
x = F.max_pool2d(x, 2)
x = x.view(-1, 4 * 4 * 50) # Convert 2d data to 1d
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def train(model, optimizer, criterion):
model.train()
running_loss = 0.0
with progressbar.ProgressBar(max_value=len(train_loader)) as bar:
for i, t_data in enumerate(train_loader):
data, target = t_data
if torch.cuda.is_available():
data = data.cuda()
target = target.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
out = model(data)
loss = criterion(out, target)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
bar.update(i)
if i % 2000 == 1999:
print(" => Loss:", running_loss / 2000)
running_loss = 0.0
def validate(model, epoch=0):
model.eval()
correct = 0
total = 0
class_correct = list(0. for i in range(len(classes)))
class_total = list(0. for i in range(len(classes)))
with torch.no_grad():
for data, target in val_loader:
if torch.cuda.is_available():
data = data.cuda()
target = target.cuda()
out = model(data)
_, prediction = torch.max(out.data, 1)
total += target.size(0)
if torch.cuda.is_available():
correct += prediction.eq(target).sum().cpu().item()
else:
correct += prediction.eq(target).sum().item()
c = (prediction == target).squeeze()
for i in range(target.size(0)):
label = target[i]
class_correct[label] += c[i].item()
class_total[label] += 1
print("\nValidation")
print("###################################")
print("Epoch", epoch)
print("Accuracy: %.2f%%" % (100 * correct / total))
print("###################################\n")
for i in range(len(classes)):
try:
print('Accuracy of %5s : %2d%% [%2d/%2d]' %
(classes[i], 100 * class_correct[i] / class_total[i], class_correct[i], class_total[i]))
except ZeroDivisionError:
print('No Accuracy for %s' % classes[i])
return correct / total # Returning accuracy
def save_model(model, epoch):
torch.save(model.state_dict(), "./model/chess-net.pt".format(epoch))
print("\n------- Checkpoint saved -------\n")
def main():
model = ChessNet()
# Load Pretrained Model
# model.load_state_dict(torch.load("/content/drive/My Drive/ChessNetData/model/chess-net.pt"))
# Activate cuda support if available
if torch.cuda.is_available():
print("Activating cuda support!")
model = model.cuda()
# Defining the loss function
criterion = nn.CrossEntropyLoss()
# Defining the optimizer
optimizer = optim.Adam(model.parameters())
# optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# optimizer = optim.ASGD(model.parameters())
# Start training
epochs = 50
best_acc = 0
start = time.time()
print("Starting training for %s epochs on %s" % (epochs, time.ctime()))
for epoch in range(epochs):
train(model, optimizer, criterion)
acc = validate(model, epoch)
if acc > best_acc:
best_acc = acc
save_model(model, epoch)
end = time.time()
print("Training of the neuroal network done.")
print("Time spent:", end - start, "s")
print("Best-Accuracy: %.2f%%" % (100 * best_acc))
if __name__ == "__main__":
main()
| [
"torch.nn.Dropout",
"torch.utils.data.DataLoader",
"torch.nn.Conv2d",
"torch.nn.CrossEntropyLoss",
"time.ctime",
"time.time",
"torchvision.datasets.ImageFolder",
"torchvision.transforms.ToTensor",
"torch.cuda.is_available",
"numpy.arange",
"torch.nn.functional.max_pool2d",
"torch.nn.Linear",
... | [((616, 705), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', ([], {'root': '"""./data/augmented/train"""', 'transform': 'transform'}), "(root='./data/augmented/train', transform=\n transform)\n", (648, 705), False, 'import torchvision\n'), ((716, 818), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'batch_size': '(4)', 'num_workers': '(2)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(train_set, batch_size=4, num_workers=2, shuffle\n =True, drop_last=True)\n', (743, 818), False, 'import torch\n'), ((1128, 1221), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', ([], {'root': '"""./data/augmented/validation"""', 'transform': 'transform'}), "(root='./data/augmented/validation',\n transform=transform)\n", (1160, 1221), False, 'import torchvision\n'), ((1231, 1331), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_set'], {'batch_size': '(4)', 'num_workers': '(2)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(val_set, batch_size=4, num_workers=2, shuffle=\n True, drop_last=True)\n', (1258, 1331), False, 'import torch\n'), ((518, 563), 'numpy.arange', 'np.arange', (['n_training_samples'], {'dtype': 'np.int64'}), '(n_training_samples, dtype=np.int64)\n', (527, 563), True, 'import numpy as np\n'), ((5419, 5444), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5442, 5444), False, 'import torch\n'), ((5567, 5588), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5586, 5588), True, 'import torch.nn as nn\n'), ((5854, 5865), 'time.time', 'time.time', ([], {}), '()\n', (5863, 5865), False, 'import time\n'), ((6155, 6166), 'time.time', 'time.time', ([], {}), '()\n', (6164, 6166), False, 'import time\n'), ((295, 316), 'torchvision.transforms.Resize', 'transforms.Resize', (['(64)'], {}), '(64)\n', (312, 316), False, 'from torchvision import transforms\n'), ((322, 343), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (341, 343), False, 'from torchvision import transforms\n'), ((349, 403), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (369, 403), False, 'from torchvision import transforms\n'), ((1926, 1956), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(8)'], {'kernel_size': '(5)'}), '(3, 8, kernel_size=5)\n', (1935, 1956), True, 'import torch.nn as nn\n'), ((1978, 2009), 'torch.nn.Conv2d', 'nn.Conv2d', (['(8)', '(20)'], {'kernel_size': '(5)'}), '(8, 20, kernel_size=5)\n', (1987, 2009), True, 'import torch.nn as nn\n'), ((2031, 2063), 'torch.nn.Conv2d', 'nn.Conv2d', (['(20)', '(50)'], {'kernel_size': '(5)'}), '(20, 50, kernel_size=5)\n', (2040, 2063), True, 'import torch.nn as nn\n'), ((2089, 2101), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (2099, 2101), True, 'import torch.nn as nn\n'), ((2126, 2138), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (2136, 2138), True, 'import torch.nn as nn\n'), ((2216, 2241), 'torch.nn.Linear', 'nn.Linear', (['(4 * 4 * 50)', '(64)'], {}), '(4 * 4 * 50, 64)\n', (2225, 2241), True, 'import torch.nn as nn\n'), ((2261, 2278), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(32)'], {}), '(64, 32)\n', (2270, 2278), True, 'import torch.nn as nn\n'), ((2298, 2315), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(13)'], {}), '(32, 13)\n', (2307, 2315), True, 'import torch.nn as nn\n'), ((2389, 2407), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (2401, 2407), True, 'import torch.nn.functional as F\n'), ((2484, 2502), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (2496, 2502), True, 'import torch.nn.functional as F\n'), ((2579, 2597), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (2591, 2597), True, 'import torch.nn.functional as F\n'), ((3795, 3810), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3808, 3810), False, 'import torch\n'), ((3018, 3043), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3041, 3043), False, 'import torch\n'), ((3867, 3892), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3890, 3892), False, 'import torch\n'), ((4027, 4049), 'torch.max', 'torch.max', (['out.data', '(1)'], {}), '(out.data, 1)\n', (4036, 4049), False, 'import torch\n'), ((4101, 4126), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4124, 4126), False, 'import torch\n'), ((5927, 5939), 'time.ctime', 'time.ctime', ([], {}), '()\n', (5937, 5939), False, 'import time\n')] |
from pytesseract import *
import cv2
import os
import re
import numpy as np
import difflib
from difflib import SequenceMatcher
def img_similarity(img1, img2):
#gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
#gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
#data1 = gray1.flatten()
#data2 = gray2.flatten()
data1 = img1.flatten()
data2 = img2.flatten()
return sum(np.isclose(data1, data2, atol=50)) / len(data1)
#cnt = 0
#for p1, p2 in zip(data1, data2):
# if p1 == p2:
# cnt += 1
#return cnt / len(data1)
file_list = os.listdir("src/extract/")
file_list.sort()
thumb_list = os.listdir("src/thumbs/")
bound_upper_complete = False
bound_lower_complete = False
height_upper = 920
height_lower = 1000
pre_word = ""
diff = difflib.Differ()
sample_img = cv2.imread("src/extract/" + file_list[50])
kernel_sharpen = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
add_cnt = 0
page_cnt = 0
thumb_cnt = 0
str_diff = 0
img_diff = 0
ok = 13
print(sample_img.shape)
# selection
print("load preset? [{}:{}] [y/n]".format(str(height_upper), str(height_lower)))
yn = input()
if yn == 'y':
bound_upper_complete = True
bound_lower_complete = True
while not bound_upper_complete:
print("input_upper")
height_upper = int(input())
sliced = sample_img[height_upper:, :]
print(sliced.shape)
cv2.namedWindow("height_upper",cv2.WINDOW_NORMAL)
cv2.imshow("height_upper", sliced)
cv2.resizeWindow("height_upper", 600,600)
print("is it ok?[enter/other]")
ret = cv2.waitKey(0)
if ret == 13:
bound_upper_complete = True
# ok = input()
# if ok == 'y':
# bound_upper_complete = True
cv2.destroyAllWindows()
while not bound_lower_complete:
print("input_lower")
height_lower = int(input())
sliced = sample_img[height_upper:height_lower, :]
cv2.namedWindow("height_lower",cv2.WINDOW_NORMAL)
cv2.imshow("height_lower", sliced)
cv2.resizeWindow("height_lower", 600,600)
print("is it ok?[enter/other]")
ret = cv2.waitKey(0)
if ret == 13:
bound_lower_complete = True
cv2.destroyAllWindows()
result_img = cv2.imread("src/thumbs/" + thumb_list[thumb_cnt])[:height_upper - 5, :]
# result_img = cv2.imread("src/extract/" + file_list[7])[:height_upper - 5, :]
pre_img = cv2.imread("src/extract/" + file_list[0])[height_upper:height_lower, :]
cur_img = cv2.imread("src/extract/" + file_list[0])[height_upper:height_lower, :]
gray = cv2.cvtColor(cur_img, cv2.COLOR_BGR2GRAY)
inverted = cv2.bitwise_not(gray)
bilateral_filter = cv2.bilateralFilter(inverted, 9, 16, 16)
r, pre_bin = cv2.threshold(bilateral_filter, 200, 255, cv2.THRESH_BINARY)
for file_name in file_list:
original_img = cv2.imread("src/extract/" + file_name)
# cv2.imshow("original_img", original_img)
cur_img = original_img[height_upper:height_lower, :]
gray = cv2.cvtColor(cur_img, cv2.COLOR_BGR2GRAY)
inverted = cv2.bitwise_not(gray)
bilateral_filter = cv2.bilateralFilter(inverted, 9, 16, 16)
r, cur_bin = cv2.threshold(inverted, 127, 255, cv2.THRESH_BINARY)
# cur_bin = cv2.bilateralFilter(cur_bin, 9, 16, 16)
# dst = cv2.filter2D(bilateral_filter, -1, kernel_sharpen)
# dst = cur_bin
dst = bilateral_filter
new_img = dst
#
# gray = cv2.cvtColor(cur_img, cv2.COLOR_BGR2GRAY)
# inverted = cv2.bitwise_not(gray)
# dst = cv2.filter2D(inverted, -1, kernel_sharpen)
# new_img = dst
text = image_to_string(dst, lang="Hangul", config="--psm 4 --oem 1")
word_list = re.sub("\d+|[ ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㅏㅕㅓㅕㅗㅛㅜㅠㅡㅣ\{\}\[\]\/?.,;:|\)「*ㆍ:”…*~`!^\-_+<>@\#$%&\\\=\(\'\"\f]|[A-Za-z]", "", text).split('\n')
cur_word = max(word_list, key=len)
# Filter trash recognition
if len(cur_word) < 2:
continue
if cur_word != pre_word:
str_diff= SequenceMatcher(None, cur_word, pre_word).ratio()
img_diff = img_similarity(pre_bin, cur_bin)
if img_diff > 0.9:
#print("str pass : {:.03f}, img diff : {:.03f}, pre : [{}], cur : [{}]".format(str_diff, img_diff, pre_word, cur_word))
continue
elif img_diff > 0.5:
if str_diff > 0.9:
#print("str diff: {:.03f}, img pass : {:.03f}, pre : [{}], cur : [{}]".format(str_diff, img_diff, pre_word, cur_word))
continue
if str_diff > 0.2:
print("Check something [{}], [{}]".format(pre_word, cur_word) )
cv2.imshow("dst", dst)
cv2.imshow("cur_bin", cur_bin)
add_img = cv2.addWeighted(pre_img, 0.5, cur_img, 0.5, 0)
cv2.imshow("Okay to enter", add_img)
ok = cv2.waitKey(0)
cv2.destroyAllWindows()
if ok != 13:
continue
# if str_diff > 0.9:
# #print("str pass : {:.03f}, img diff : {:.03f}, pre : [{}], cur : [{}]".format(str_diff, img_diff, pre_word, cur_word))
# continue
# elif str_diff > 0.2:
# img_diff = img_similarity(pre_img, cur_img)
# # same word. obviously.
# if img_diff > 0.98:
# #print("str diff: {:.03f}, img pass : {:.03f}, pre : [{}], cur : [{}]".format(str_diff, img_diff, pre_word, cur_word))
# continue
# if img_diff > 0.85:
# print("Check something [{}], [{}]".format(pre_word, cur_word) )
# cv2.imshow("dst", dst)
# cv2.imshow("cur_bin", cur_bin)
# add_img = cv2.addWeighted(pre_img, 0.5, cur_img, 0.5, 0)
# cv2.imshow("Okay to enter", add_img)
# ok = cv2.waitKey(0)
# cv2.destroyAllWindows()
# if ok != 13:
# continue
print("str diff : {:.03f}, img diff : {:.03f}, pre : [{}], cur : [{}]".format(str_diff, img_diff, pre_word, cur_word))
# if (0.9 > str_diff > 0.25):
# #if (True):
# print("str diff : {:.03f}, img diff : {:.03f}, pre : [{}], cur : [{}]".format(str_diff, img_diff, pre_word, cur_word))
# else:
# print("str diff : {:.03f}, img diff : -.---, pre : [{}], cur : [{}]".format(str_diff, pre_word, cur_word))
cur_img2 = original_img[2 * height_upper - height_lower:height_upper, :]
gray2 = cv2.cvtColor(cur_img2, cv2.COLOR_BGR2GRAY)
inverted2 = cv2.bitwise_not(gray2)
bilateral_filter2 = cv2.bilateralFilter(inverted2, 9, 16, 16)
r, cur_bin2 = cv2.threshold(bilateral_filter2, 127, 255, cv2.THRESH_BINARY)
dst2 = bilateral_filter2
text = image_to_string(dst2, lang="Hangul", config="--psm 4 --oem 1")
word_list = re.sub("\d+|[ ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㅏㅑㅓㅕㅗㅛㅜㅠㅡㅣ\{\}\[\]\/?.,;:|\)「*ㆍ:”…*~`!^\-_+<>@\#$%&\\\=\(\'\"\f]|[A-Za-z]", "",
text).split('\n')
cur_word2 = max(word_list, key=len)
""" MULTILINE CHECK """
if len(cur_word2) > len(cur_word):
print("Check multiline")
cv2.imshow("cur_img", cur_img)
cv2.imshow("cur_img2", cur_img2)
ok = cv2.waitKey(0)
cv2.destroyAllWindows()
if ok == 13:
result_img = np.vstack((result_img, cur_img2))
else:
result_img = np.vstack((result_img, cur_img))
else:
result_img = np.vstack((result_img, cur_img))
add_cnt += 1
if add_cnt % 25 == 0:
cv2.imwrite("result-{}.jpg".format(str(page_cnt)), result_img)
page_cnt += 1
thumb_cnt += 1
result_img = cv2.imread("src/thumbs/" + thumb_list[thumb_cnt])[:height_upper - 5, :]
#
# if add_cnt % 40 == 0:
# cv2.imwrite("result-{}.jpg".format(str(page_cnt)), result_img)
# page_cnt += 1
# thumb_cnt += 1
# result_img = cv2.imread("src/thumbs/" + thumb_list[thumb_cnt])[:height_upper - 5, :]
# elif add_cnt % 20 == 0:
# thumb_cnt += 1
# result_img = np.vstack((result_img, cv2.imread("src/thumbs/" + thumb_list[thumb_cnt])[:height_upper - 5, :]))
pre_word = cur_word
pre_img = cur_img
pre_bin = cur_bin
if add_cnt % 25 != 0:
cv2.imwrite("result-{}.jpg".format(str(page_cnt)), result_img)
| [
"cv2.bitwise_not",
"difflib.Differ",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.threshold",
"cv2.destroyAllWindows",
"difflib.SequenceMatcher",
"cv2.bilateralFilter",
"cv2.imread",
"numpy.isclose",
"cv2.addWeighted",
"numpy.array",
"numpy.vstack",
"cv2.resizeWindow",
"cv2.imshow",
"re.sub",
... | [((583, 609), 'os.listdir', 'os.listdir', (['"""src/extract/"""'], {}), "('src/extract/')\n", (593, 609), False, 'import os\n'), ((640, 665), 'os.listdir', 'os.listdir', (['"""src/thumbs/"""'], {}), "('src/thumbs/')\n", (650, 665), False, 'import os\n'), ((785, 801), 'difflib.Differ', 'difflib.Differ', ([], {}), '()\n', (799, 801), False, 'import difflib\n'), ((815, 857), 'cv2.imread', 'cv2.imread', (["('src/extract/' + file_list[50])"], {}), "('src/extract/' + file_list[50])\n", (825, 857), False, 'import cv2\n'), ((875, 926), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n', (883, 926), True, 'import numpy as np\n'), ((2489, 2530), 'cv2.cvtColor', 'cv2.cvtColor', (['cur_img', 'cv2.COLOR_BGR2GRAY'], {}), '(cur_img, cv2.COLOR_BGR2GRAY)\n', (2501, 2530), False, 'import cv2\n'), ((2542, 2563), 'cv2.bitwise_not', 'cv2.bitwise_not', (['gray'], {}), '(gray)\n', (2557, 2563), False, 'import cv2\n'), ((2583, 2623), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['inverted', '(9)', '(16)', '(16)'], {}), '(inverted, 9, 16, 16)\n', (2602, 2623), False, 'import cv2\n'), ((2637, 2697), 'cv2.threshold', 'cv2.threshold', (['bilateral_filter', '(200)', '(255)', 'cv2.THRESH_BINARY'], {}), '(bilateral_filter, 200, 255, cv2.THRESH_BINARY)\n', (2650, 2697), False, 'import cv2\n'), ((1369, 1419), 'cv2.namedWindow', 'cv2.namedWindow', (['"""height_upper"""', 'cv2.WINDOW_NORMAL'], {}), "('height_upper', cv2.WINDOW_NORMAL)\n", (1384, 1419), False, 'import cv2\n'), ((1423, 1457), 'cv2.imshow', 'cv2.imshow', (['"""height_upper"""', 'sliced'], {}), "('height_upper', sliced)\n", (1433, 1457), False, 'import cv2\n'), ((1463, 1505), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""height_upper"""', '(600)', '(600)'], {}), "('height_upper', 600, 600)\n", (1479, 1505), False, 'import cv2\n'), ((1551, 1565), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1562, 1565), False, 'import cv2\n'), ((1701, 1724), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1722, 1724), False, 'import cv2\n'), ((1873, 1923), 'cv2.namedWindow', 'cv2.namedWindow', (['"""height_lower"""', 'cv2.WINDOW_NORMAL'], {}), "('height_lower', cv2.WINDOW_NORMAL)\n", (1888, 1923), False, 'import cv2\n'), ((1927, 1961), 'cv2.imshow', 'cv2.imshow', (['"""height_lower"""', 'sliced'], {}), "('height_lower', sliced)\n", (1937, 1961), False, 'import cv2\n'), ((1967, 2009), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""height_lower"""', '(600)', '(600)'], {}), "('height_lower', 600, 600)\n", (1983, 2009), False, 'import cv2\n'), ((2055, 2069), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2066, 2069), False, 'import cv2\n'), ((2128, 2151), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2149, 2151), False, 'import cv2\n'), ((2166, 2215), 'cv2.imread', 'cv2.imread', (["('src/thumbs/' + thumb_list[thumb_cnt])"], {}), "('src/thumbs/' + thumb_list[thumb_cnt])\n", (2176, 2215), False, 'import cv2\n'), ((2328, 2369), 'cv2.imread', 'cv2.imread', (["('src/extract/' + file_list[0])"], {}), "('src/extract/' + file_list[0])\n", (2338, 2369), False, 'import cv2\n'), ((2410, 2451), 'cv2.imread', 'cv2.imread', (["('src/extract/' + file_list[0])"], {}), "('src/extract/' + file_list[0])\n", (2420, 2451), False, 'import cv2\n'), ((2745, 2783), 'cv2.imread', 'cv2.imread', (["('src/extract/' + file_name)"], {}), "('src/extract/' + file_name)\n", (2755, 2783), False, 'import cv2\n'), ((2900, 2941), 'cv2.cvtColor', 'cv2.cvtColor', (['cur_img', 'cv2.COLOR_BGR2GRAY'], {}), '(cur_img, cv2.COLOR_BGR2GRAY)\n', (2912, 2941), False, 'import cv2\n'), ((2957, 2978), 'cv2.bitwise_not', 'cv2.bitwise_not', (['gray'], {}), '(gray)\n', (2972, 2978), False, 'import cv2\n'), ((3002, 3042), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['inverted', '(9)', '(16)', '(16)'], {}), '(inverted, 9, 16, 16)\n', (3021, 3042), False, 'import cv2\n'), ((3060, 3112), 'cv2.threshold', 'cv2.threshold', (['inverted', '(127)', '(255)', 'cv2.THRESH_BINARY'], {}), '(inverted, 127, 255, cv2.THRESH_BINARY)\n', (3073, 3112), False, 'import cv2\n'), ((6314, 6356), 'cv2.cvtColor', 'cv2.cvtColor', (['cur_img2', 'cv2.COLOR_BGR2GRAY'], {}), '(cur_img2, cv2.COLOR_BGR2GRAY)\n', (6326, 6356), False, 'import cv2\n'), ((6377, 6399), 'cv2.bitwise_not', 'cv2.bitwise_not', (['gray2'], {}), '(gray2)\n', (6392, 6399), False, 'import cv2\n'), ((6428, 6469), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['inverted2', '(9)', '(16)', '(16)'], {}), '(inverted2, 9, 16, 16)\n', (6447, 6469), False, 'import cv2\n'), ((6492, 6553), 'cv2.threshold', 'cv2.threshold', (['bilateral_filter2', '(127)', '(255)', 'cv2.THRESH_BINARY'], {}), '(bilateral_filter2, 127, 255, cv2.THRESH_BINARY)\n', (6505, 6553), False, 'import cv2\n'), ((397, 430), 'numpy.isclose', 'np.isclose', (['data1', 'data2'], {'atol': '(50)'}), '(data1, data2, atol=50)\n', (407, 430), True, 'import numpy as np\n'), ((3561, 3696), 're.sub', 're.sub', (['"""\\\\d+|[ ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㅏㅕㅓㅕㅗㅛㅜㅠㅡㅣ\\\\{\\\\}\\\\[\\\\]\\\\/?.,;:|\\\\)「*ㆍ:”…*~`!^\\\\-_+<>@\\\\#$%&\\\\\\\\=\\\\(\'"\x0c]|[A-Za-z]"""', '""""""', 'text'], {}), '(\n \'\\\\d+|[ ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㅏㅕㅓㅕㅗㅛㅜㅠㅡㅣ\\\\{\\\\}\\\\[\\\\]\\\\/?.,;:|\\\\)「*ㆍ:”…*~`!^\\\\-_+<>@\\\\#$%&\\\\\\\\=\\\\(\\\'"\\x0c]|[A-Za-z]\'\n , \'\', text)\n', (3567, 3696), False, 'import re\n'), ((7006, 7036), 'cv2.imshow', 'cv2.imshow', (['"""cur_img"""', 'cur_img'], {}), "('cur_img', cur_img)\n", (7016, 7036), False, 'import cv2\n'), ((7049, 7081), 'cv2.imshow', 'cv2.imshow', (['"""cur_img2"""', 'cur_img2'], {}), "('cur_img2', cur_img2)\n", (7059, 7081), False, 'import cv2\n'), ((7099, 7113), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (7110, 7113), False, 'import cv2\n'), ((7126, 7149), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7147, 7149), False, 'import cv2\n'), ((7357, 7389), 'numpy.vstack', 'np.vstack', (['(result_img, cur_img)'], {}), '((result_img, cur_img))\n', (7366, 7389), True, 'import numpy as np\n'), ((3847, 3888), 'difflib.SequenceMatcher', 'SequenceMatcher', (['None', 'cur_word', 'pre_word'], {}), '(None, cur_word, pre_word)\n', (3862, 3888), False, 'from difflib import SequenceMatcher\n'), ((6685, 6820), 're.sub', 're.sub', (['"""\\\\d+|[ ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㅏㅑㅓㅕㅗㅛㅜㅠㅡㅣ\\\\{\\\\}\\\\[\\\\]\\\\/?.,;:|\\\\)「*ㆍ:”…*~`!^\\\\-_+<>@\\\\#$%&\\\\\\\\=\\\\(\'"\x0c]|[A-Za-z]"""', '""""""', 'text'], {}), '(\n \'\\\\d+|[ ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㅏㅑㅓㅕㅗㅛㅜㅠㅡㅣ\\\\{\\\\}\\\\[\\\\]\\\\/?.,;:|\\\\)「*ㆍ:”…*~`!^\\\\-_+<>@\\\\#$%&\\\\\\\\=\\\\(\\\'"\\x0c]|[A-Za-z]\'\n , \'\', text)\n', (6691, 6820), False, 'import re\n'), ((7204, 7237), 'numpy.vstack', 'np.vstack', (['(result_img, cur_img2)'], {}), '((result_img, cur_img2))\n', (7213, 7237), True, 'import numpy as np\n'), ((7285, 7317), 'numpy.vstack', 'np.vstack', (['(result_img, cur_img)'], {}), '((result_img, cur_img))\n', (7294, 7317), True, 'import numpy as np\n'), ((7595, 7644), 'cv2.imread', 'cv2.imread', (["('src/thumbs/' + thumb_list[thumb_cnt])"], {}), "('src/thumbs/' + thumb_list[thumb_cnt])\n", (7605, 7644), False, 'import cv2\n'), ((4478, 4500), 'cv2.imshow', 'cv2.imshow', (['"""dst"""', 'dst'], {}), "('dst', dst)\n", (4488, 4500), False, 'import cv2\n'), ((4517, 4547), 'cv2.imshow', 'cv2.imshow', (['"""cur_bin"""', 'cur_bin'], {}), "('cur_bin', cur_bin)\n", (4527, 4547), False, 'import cv2\n'), ((4574, 4620), 'cv2.addWeighted', 'cv2.addWeighted', (['pre_img', '(0.5)', 'cur_img', '(0.5)', '(0)'], {}), '(pre_img, 0.5, cur_img, 0.5, 0)\n', (4589, 4620), False, 'import cv2\n'), ((4637, 4673), 'cv2.imshow', 'cv2.imshow', (['"""Okay to enter"""', 'add_img'], {}), "('Okay to enter', add_img)\n", (4647, 4673), False, 'import cv2\n'), ((4695, 4709), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4706, 4709), False, 'import cv2\n'), ((4726, 4749), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4747, 4749), False, 'import cv2\n')] |
from math import pi
import numpy as np
from aleph.consts import *
from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent
from reamber.osu.OsuBpm import OsuBpm, MIN_BPM
from reamber.osu.OsuMap import OsuMap
COS_POWER = 2
def f950(m: OsuMap):
FIRST = 375444
LAST = 383124
events = [
*[SvOsuMeasureLineEvent(
firstOffset=o, lastOffset=LAST,
startX=0, endX=1,
startY=-1, endY=1,
funcs=[
lambda x: np.cos(x * pi / 2) ** COS_POWER
]) for o in np.linspace(FIRST, LAST, 75)],
* [SvOsuMeasureLineEvent(
firstOffset=o, lastOffset=LAST,
startX=0, endX=1,
startY=-1, endY=1,
funcs=[
lambda x: -np.cos(x * pi / 2) ** COS_POWER
]) for o in np.linspace(FIRST, LAST, 75)]
]
for e, (first, last) in enumerate(zip(np.linspace(FIRST, LAST, 10)[:-1], np.linspace(FIRST, LAST, 10)[1:])):
svs, bpms = svOsuMeasureLineMD(events=events,
scalingFactor=SCALE,
firstOffset=first,
lastOffset=last,
paddingSize=PADDING * e,
endBpm=MIN_BPM)
m.svs.extend(svs)
m.bpms.extend(bpms[:-1])
m.bpms.append(OsuBpm(LAST, 250))
| [
"reamber.osu.OsuBpm.OsuBpm",
"numpy.cos",
"numpy.linspace",
"reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD.svOsuMeasureLineMD"
] | [((996, 1131), 'reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD.svOsuMeasureLineMD', 'svOsuMeasureLineMD', ([], {'events': 'events', 'scalingFactor': 'SCALE', 'firstOffset': 'first', 'lastOffset': 'last', 'paddingSize': '(PADDING * e)', 'endBpm': 'MIN_BPM'}), '(events=events, scalingFactor=SCALE, firstOffset=first,\n lastOffset=last, paddingSize=PADDING * e, endBpm=MIN_BPM)\n', (1014, 1131), False, 'from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent\n'), ((1401, 1418), 'reamber.osu.OsuBpm.OsuBpm', 'OsuBpm', (['LAST', '(250)'], {}), '(LAST, 250)\n', (1407, 1418), False, 'from reamber.osu.OsuBpm import OsuBpm, MIN_BPM\n'), ((905, 933), 'numpy.linspace', 'np.linspace', (['FIRST', 'LAST', '(10)'], {}), '(FIRST, LAST, 10)\n', (916, 933), True, 'import numpy as np\n'), ((940, 968), 'numpy.linspace', 'np.linspace', (['FIRST', 'LAST', '(10)'], {}), '(FIRST, LAST, 10)\n', (951, 968), True, 'import numpy as np\n'), ((574, 602), 'numpy.linspace', 'np.linspace', (['FIRST', 'LAST', '(75)'], {}), '(FIRST, LAST, 75)\n', (585, 602), True, 'import numpy as np\n'), ((826, 854), 'numpy.linspace', 'np.linspace', (['FIRST', 'LAST', '(75)'], {}), '(FIRST, LAST, 75)\n', (837, 854), True, 'import numpy as np\n'), ((522, 540), 'numpy.cos', 'np.cos', (['(x * pi / 2)'], {}), '(x * pi / 2)\n', (528, 540), True, 'import numpy as np\n'), ((774, 792), 'numpy.cos', 'np.cos', (['(x * pi / 2)'], {}), '(x * pi / 2)\n', (780, 792), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# step 1: read the dataset
columns = ['unitid', 'time', 'set_1','set_2','set_3']
columns.extend(['sensor_' + str(i) for i in range(1,22)])
df = pd.read_csv('./data/train_FD001.txt', delim_whitespace=True,names=columns)
print(df.head())
#step 2: EDA
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
df_std = df.groupby('unitid').std()
print(df_std==0)
# removing unusefull data
df=df.drop(['set_3', 'sensor_1', 'sensor_5', 'sensor_10', 'sensor_16', 'sensor_18', 'sensor_19'], axis=1)
# correlation
from scipy.stats import pearsonr
def calculate_pvalues(df):
df = df.dropna()._get_numeric_data()
dfcols = pd.DataFrame(columns=df.columns)
pvalues = dfcols.transpose().join(dfcols, how='outer')
for r in df.columns:
for c in df.columns:
pvalues[r][c] = round(pearsonr(df[r], df[c])[1], 4)
return pvalues
print('correlation engine 1')
print(calculate_pvalues(df[(df.unitid ==1)]))
print('correlation engine 3')
print(calculate_pvalues(df[(df.unitid ==5)]))
print('correlation engine 10')
print(calculate_pvalues(df[(df.unitid ==10)]))
# showing correlation
import matplotlib.pyplot as plt
import seaborn as sns
def show_feature(df):
# showing the first 5 engines and the first five variables
sns.pairplot(df[(df.unitid <=5) ], hue="unitid", vars=["set_1", "set_2",'sensor_2','sensor_3','sensor_4'])
plt.show()
#timeseries
df1=df[(df.unitid <5) ]
i=0
for column in df1:
if ('sensor' in column):
i=i+1
ax= plt.subplot(4,4,i)
ax = sns.tsplot(time="time", value=column, condition='unitid',
unit='unitid',legend=False,
data=df1, ax=ax)
plt.show()
show_feature(df)
# selected fetures
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestRegressor
def select_feature(df):
print("extract var")
# separate into input and output variables
array = df.values
X = array[:,0:-1]
y = array[:,-1]
# perform feature selection
rfe = RFE(RandomForestRegressor(n_estimators=50, random_state=1), 4)
fit = rfe.fit(X, y)
# report selected features
print('Selected Features:')
names = df.columns.values[0:-1]
for i in range(len(fit.support_)):
if fit.support_[i]:
print(names[i])
select_feature(df)
# fit the model
import math
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
def prepare_dataset(dataframe, columns):
dataframe = dataframe[columns]
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
return dataset
def build_model(input_dim):
# create model
model = Sequential()
model.add(Dense(16, input_dim=input_dim, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def create_train_dataset(dataset):
dataX, dataY = [], []
start=len(dataset)
for i in range(len(dataset)):
a=dataset[i]
b=(start-i) / start
dataX.append(a)
dataY.append(b)
return np.array(dataX), np.array(dataY)
def train_model(model, dataset):
# create the dataset
trainX, trainY = create_train_dataset(dataset)
# Fit the model
model.fit(trainX, trainY, epochs=150, batch_size=10, verbose=0)
# make predictions
trainPredict = model.predict(trainX)
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY, trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
# prepare model
#columns_feature=['set_1','set_2','sensor_4','sensor_7','sensor_11','sensor_12']
columns_feature=['sensor_4','sensor_7']
# fix random seed for reproducibility
np.random.seed(7)
i=1
# build the model
model=build_model(len(columns_feature))
# train the model
dataset= prepare_dataset(df[(df.unitid ==i)],columns_feature)
train_model(model, dataset)
# test
df_test = pd.read_csv('./data/test_FD001.txt', delim_whitespace=True,names=columns)
expected = pd.read_csv('./data/RUL_FD001.txt', delim_whitespace=True,names=['RUL'])
n=len(dataset)
dataset_test = prepare_dataset(df_test[(df_test.unitid ==i)],columns_feature)
testPredict = model.predict(dataset_test)
testPredict = np.multiply(testPredict,n)
print("RUL of Engine %s : predicted:%s expected:%s"%(1, testPredict[-1], expected['RUL'][i-1]))
| [
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"numpy.random.seed",
"numpy.multiply",
"matplotlib.pyplot.show",
"pandas.read_csv",
"seaborn.tsplot",
"sklearn.preprocessing.MinMaxScaler",
"scipy.stats.pearsonr",
"sklearn.ensemble.RandomForestRegressor",
"keras.layers.Dense",
"numpy.array",
... | [((216, 291), 'pandas.read_csv', 'pd.read_csv', (['"""./data/train_FD001.txt"""'], {'delim_whitespace': '(True)', 'names': 'columns'}), "('./data/train_FD001.txt', delim_whitespace=True, names=columns)\n", (227, 291), True, 'import pandas as pd\n'), ((324, 366), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (337, 366), True, 'import pandas as pd\n'), ((367, 406), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (380, 406), True, 'import pandas as pd\n'), ((4210, 4227), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (4224, 4227), True, 'import numpy as np\n'), ((4419, 4493), 'pandas.read_csv', 'pd.read_csv', (['"""./data/test_FD001.txt"""'], {'delim_whitespace': '(True)', 'names': 'columns'}), "('./data/test_FD001.txt', delim_whitespace=True, names=columns)\n", (4430, 4493), True, 'import pandas as pd\n'), ((4504, 4577), 'pandas.read_csv', 'pd.read_csv', (['"""./data/RUL_FD001.txt"""'], {'delim_whitespace': '(True)', 'names': "['RUL']"}), "('./data/RUL_FD001.txt', delim_whitespace=True, names=['RUL'])\n", (4515, 4577), True, 'import pandas as pd\n'), ((4727, 4754), 'numpy.multiply', 'np.multiply', (['testPredict', 'n'], {}), '(testPredict, n)\n', (4738, 4754), True, 'import numpy as np\n'), ((722, 754), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df.columns'}), '(columns=df.columns)\n', (734, 754), True, 'import pandas as pd\n'), ((1351, 1462), 'seaborn.pairplot', 'sns.pairplot', (['df[df.unitid <= 5]'], {'hue': '"""unitid"""', 'vars': "['set_1', 'set_2', 'sensor_2', 'sensor_3', 'sensor_4']"}), "(df[df.unitid <= 5], hue='unitid', vars=['set_1', 'set_2',\n 'sensor_2', 'sensor_3', 'sensor_4'])\n", (1363, 1462), True, 'import seaborn as sns\n'), ((1464, 1474), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1472, 1474), True, 'import matplotlib.pyplot as plt\n'), ((1814, 1824), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1822, 1824), True, 'import matplotlib.pyplot as plt\n'), ((2845, 2879), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (2857, 2879), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3003, 3015), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3013, 3015), False, 'from keras.models import Sequential\n'), ((2165, 2219), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(50)', 'random_state': '(1)'}), '(n_estimators=50, random_state=1)\n', (2186, 2219), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((3030, 3079), 'keras.layers.Dense', 'Dense', (['(16)'], {'input_dim': 'input_dim', 'activation': '"""relu"""'}), "(16, input_dim=input_dim, activation='relu')\n", (3035, 3079), False, 'from keras.layers import Dense\n'), ((3095, 3123), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (3100, 3123), False, 'from keras.layers import Dense\n'), ((3139, 3169), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3144, 3169), False, 'from keras.layers import Dense\n'), ((3559, 3574), 'numpy.array', 'np.array', (['dataX'], {}), '(dataX)\n', (3567, 3574), True, 'import numpy as np\n'), ((3576, 3591), 'numpy.array', 'np.array', (['dataY'], {}), '(dataY)\n', (3584, 3591), True, 'import numpy as np\n'), ((3933, 3979), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['trainY', 'trainPredict[:, 0]'], {}), '(trainY, trainPredict[:, 0])\n', (3951, 3979), False, 'from sklearn.metrics import mean_squared_error\n'), ((1618, 1638), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(4)', 'i'], {}), '(4, 4, i)\n', (1629, 1638), True, 'import matplotlib.pyplot as plt\n'), ((1654, 1761), 'seaborn.tsplot', 'sns.tsplot', ([], {'time': '"""time"""', 'value': 'column', 'condition': '"""unitid"""', 'unit': '"""unitid"""', 'legend': '(False)', 'data': 'df1', 'ax': 'ax'}), "(time='time', value=column, condition='unitid', unit='unitid',\n legend=False, data=df1, ax=ax)\n", (1664, 1761), True, 'import seaborn as sns\n'), ((902, 924), 'scipy.stats.pearsonr', 'pearsonr', (['df[r]', 'df[c]'], {}), '(df[r], df[c])\n', (910, 924), False, 'from scipy.stats import pearsonr\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.