code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from typing import List, Optional, Tuple, Type
import numpy as np
import torch as T
from gym import Env
from pearll.agents import BaseAgent
from pearll.buffers import BaseBuffer, ReplayBuffer
from pearll.callbacks.base_callback import BaseCallback
from pearll.common.enumerations import FrequencyType
from pearll.common.type_aliases import Log, Observation, Trajectories
from pearll.explorers.base_explorer import BaseExplorer
from pearll.models import ModelEnv
from pearll.models.actor_critics import ActorCritic
from pearll.settings import (
BufferSettings,
ExplorerSettings,
LoggerSettings,
MiscellaneousSettings,
OptimizerSettings,
Settings,
)
from pearll.signal_processing import return_estimators
from pearll.updaters.critics import BaseCriticUpdater, DiscreteQRegression
from pearll.updaters.environment import BaseDeepUpdater, DeepRegression
class DynaQ(BaseAgent):
"""
Dyna-Q model based RL algorithm.
:param env: the gym-like environment to be used
:param agent_model: the agent model to be used
:param env_model: the environment model to be used
:param td_gamma: trajectory discount factor
:param agent_updater_class: the updater class for the agent critic
:param agent_optimizer_settings: the settings for the agent updater
:param obs_updater_class: the updater class for the observation function in the environment model
:param obs_optimizer_settings: the settings for the observation updater
:param reward_updater_class: the updater class for the reward function in the environment model
:param reward_optimizer_settings: the settings for the reward updater
:param done_updater_class: the updater class for the done function in the environment model
:param done_optimizer_settings: the settings for the done updater
:param buffer_class: the buffer class for storing and sampling trajectories
:param buffer_settings: settings for the buffer
:param action_explorer_class: the explorer class for random search at beginning of training and
adding noise to actions
:param explorer settings: settings for the action explorer
:param callbacks: an optional list of callbacks (e.g. if you want to save the model)
:param callback_settings: settings for callbacks
:param logger_settings: settings for the logger
:param misc_settings: settings for miscellaneous parameters
"""
def __init__(
self,
env: Env,
agent_model: ActorCritic,
env_model: ModelEnv,
td_gamma: float = 0.99,
agent_updater_class: Type[BaseCriticUpdater] = DiscreteQRegression,
agent_optimizer_settings: OptimizerSettings = OptimizerSettings(),
obs_updater_class: BaseDeepUpdater = DeepRegression,
obs_optimizer_settings: OptimizerSettings = OptimizerSettings(),
reward_updater_class: Type[BaseDeepUpdater] = DeepRegression,
reward_optimizer_settings: OptimizerSettings = OptimizerSettings(),
done_updater_class: Optional[Type[BaseDeepUpdater]] = None,
done_optimizer_settings: OptimizerSettings = OptimizerSettings(
loss_class=T.nn.BCELoss()
),
buffer_class: Type[BaseBuffer] = ReplayBuffer,
buffer_settings: BufferSettings = BufferSettings(),
action_explorer_class: Type[BaseExplorer] = BaseExplorer,
explorer_settings: ExplorerSettings = ExplorerSettings(start_steps=100),
callbacks: Optional[List[Type[BaseCallback]]] = None,
callback_settings: Optional[List[Settings]] = None,
logger_settings: LoggerSettings = LoggerSettings(),
misc_settings: MiscellaneousSettings = MiscellaneousSettings(),
) -> None:
super().__init__(
env=env,
model=agent_model,
action_explorer_class=action_explorer_class,
explorer_settings=explorer_settings,
buffer_class=buffer_class,
buffer_settings=buffer_settings,
logger_settings=logger_settings,
callbacks=callbacks,
callback_settings=callback_settings,
misc_settings=misc_settings,
)
self.env_model = env_model
self.td_gamma = td_gamma
self.learning_rate = agent_optimizer_settings.learning_rate
self.policy_updater = agent_updater_class(
loss_class=agent_optimizer_settings.loss_class,
optimizer_class=agent_optimizer_settings.optimizer_class,
max_grad=agent_optimizer_settings.max_grad,
)
self.obs_updater = obs_updater_class(
loss_class=obs_optimizer_settings.loss_class,
optimizer_class=obs_optimizer_settings.optimizer_class,
max_grad=obs_optimizer_settings.max_grad,
)
self.reward_updater = reward_updater_class(
loss_class=reward_optimizer_settings.loss_class,
optimizer_class=reward_optimizer_settings.optimizer_class,
max_grad=reward_optimizer_settings.max_grad,
)
if self.env_model.done_fn is not None:
self.done_updater = done_updater_class(
loss_class=done_optimizer_settings.loss_class,
optimizer_class=done_optimizer_settings.optimizer_class,
max_grad=done_optimizer_settings.max_grad,
)
self.model_step = 0
self.model_episode = 0
def step_model_env(
self, observation: Observation, num_steps: int = 1
) -> np.ndarray:
"""
Step the agent in the model environment
:param observation: the starting observation to step from
:param num_steps: how many steps to take
:return: the final observation after all steps have been done
"""
self.model.eval()
for _ in range(num_steps):
action = self.action_explorer(self.model, observation, self.step)
next_observation, reward, done, _ = self.env_model.step(observation, action)
self.buffer.add_trajectory(
observation, action, reward, next_observation, done
)
self.logger.debug(
Trajectories(observation, action, reward, next_observation, done)
)
observation = next_observation
if done:
observation = self.env_model.reset()
self.model_episode += 1
self.model_step += 1
return observation
def _fit_model_env(self, batch_size: int, epochs: int = 1) -> None:
"""
Fit the model environment
:param batch_size: the batch size to use
:param epochs: how many epochs to fit for
"""
obs_loss = np.zeros(epochs)
reward_loss = np.zeros(epochs)
done_loss = np.zeros(epochs)
for i in range(epochs):
trajectories = self.buffer.sample(batch_size)
obs_update_log = self.obs_updater(
model=self.env_model.observation_fn,
observations=trajectories.observations,
actions=trajectories.actions,
targets=trajectories.next_observations,
learning_rate=self.learning_rate,
)
obs_loss[i] = obs_update_log.loss
reward_update_log = self.reward_updater(
model=self.env_model.reward_fn,
observations=trajectories.observations,
actions=trajectories.actions,
targets=trajectories.rewards,
learning_rate=self.learning_rate,
)
reward_loss[i] = reward_update_log.loss
if self.env_model.done_fn is not None:
done_update_log = self.done_updater(
model=self.env_model.done_fn,
observations=trajectories.observations,
actions=trajectories.actions,
targets=trajectories.dones,
learning_rate=self.learning_rate,
)
done_loss[i] = done_update_log.loss
self.logger.debug(f"obs_loss: {obs_loss.mean()}")
self.logger.debug(f"reward_loss: {reward_loss.mean()}")
if self.env_model.done_fn is not None:
self.logger.debug(f"done_loss: {done_loss.mean()}")
def _fit(
self, batch_size: int, actor_epochs: int = 1, critic_epochs: int = 1
) -> Log:
critic_losses = np.zeros(shape=(critic_epochs))
for i in range(critic_epochs):
trajectories = self.buffer.sample(batch_size=batch_size, flatten_env=False)
with T.no_grad():
next_q_values = self.model.forward_target_critics(
trajectories.next_observations
)
next_q_values = T.unsqueeze(next_q_values.max(dim=-1)[0], dim=-1)
target_q_values = return_estimators.TD_zero(
trajectories.rewards,
next_q_values,
trajectories.dones,
self.td_gamma,
)
updater_log = self.policy_updater(
self.model,
trajectories.observations,
target_q_values,
trajectories.actions,
learning_rate=self.learning_rate,
)
critic_losses[i] = updater_log.loss
self.model.assign_targets()
return Log(critic_loss=np.mean(critic_losses))
def fit(
self,
env_steps: int,
plan_steps: int,
env_batch_size: int,
plan_batch_size: int,
actor_epochs: int = 1,
critic_epochs: int = 1,
env_epochs: int = 1,
env_train_frequency: Tuple[str, int] = ("step", 1),
plan_train_frequency: Tuple[str, int] = ("step", 1),
no_model_steps: int = 0,
) -> None:
"""
Train the agent in the environment
1. Collect samples in the real environment.
2. Train the model environment on samples collected.
3. Collect samples in the model environment.
4. Train the agent on samples collected in both environments.
:param env_steps: total number of real environment steps to train over
:param plan_steps: number of model environment steps to run each planning phase
:param env_batch_size: minibatch size for the model environment to make a single gradient descent step on
:param plan_batch_size: minibatch size for the agent to make a single gradient descent step on
:param actor_epochs: how many times to update the actor network in each training step
:param critic_epochs: how many times to update the critic network in each training step
:param env_epochs: how many times to update the model environment in each training step
:param env_train_frequency: the number of steps or episodes to run in the real environment before running a model environment training step.
To run every n episodes, use `("episode", n)`.
To run every n steps, use `("step", n)`.
:param plan_train_frequency: the number of steps or episodes to run in the model environment before running an agent training step.
To run every n episodes, use `("episode", n)`.
To run every n steps, use `("step", n)`.
:param no_model_steps: number of steps to run without collecting trajectories from the model environment.
"""
env_train_frequency = (
FrequencyType(env_train_frequency[0].lower()),
env_train_frequency[1],
)
plan_train_frequency = (
FrequencyType(plan_train_frequency[0].lower()),
plan_train_frequency[1],
)
# We can pre-calculate how many training steps to run if train frequency is in steps rather than episodes
if env_train_frequency[0] == FrequencyType.STEP:
env_steps = env_steps // env_train_frequency[1]
if plan_train_frequency[0] == FrequencyType.STEP:
plan_steps = plan_steps // plan_train_frequency[1]
observation = self.env.reset()
for _ in range(env_steps):
self.logger.debug("REAL ENVIRONMENT")
# Step for number of steps specified
if env_train_frequency[0] == FrequencyType.STEP:
observation = self.step_env(
observation=observation, num_steps=env_train_frequency[1]
)
# Step for number of episodes specified
elif env_train_frequency[0] == FrequencyType.EPISODE:
start_episode = self.episode
end_episode = start_episode + env_train_frequency[1]
while self.episode != end_episode:
observation = self.step_env(observation=observation)
if self.step >= env_steps:
break
if self.done:
break
# Update the environment model
self._fit_model_env(batch_size=env_batch_size, epochs=env_epochs)
if self.step < no_model_steps:
# Update the agent model
self.model.train()
train_log = self._fit(
batch_size=plan_batch_size,
actor_epochs=actor_epochs,
critic_epochs=critic_epochs,
)
self.model.update_global()
self.logger.add_train_log(train_log)
else:
self.logger.debug("MODEL ENVIRONMENT")
# Plan for number of steps specified
model_obs = self.env_model.reset()
for _ in range(plan_steps):
# Step for number of steps specified
if plan_train_frequency[0] == FrequencyType.STEP:
model_obs = self.step_model_env(
observation=model_obs, num_steps=plan_train_frequency[1]
)
# Step for number of episodes specified
elif plan_train_frequency[0] == FrequencyType.EPISODE:
start_episode = self.model_episode
end_episode = start_episode + plan_train_frequency[1]
while self.model_episode != end_episode:
observation = self.step_model_env(observation=observation)
if self.model_step >= plan_steps:
break
# Update the agent model
self.model.train()
train_log = self._fit(
batch_size=plan_batch_size,
actor_epochs=actor_epochs,
critic_epochs=critic_epochs,
)
self.model.update_global()
self.logger.add_train_log(train_log)
self.buffer.reset()
| [
"pearll.settings.BufferSettings",
"torch.nn.BCELoss",
"pearll.settings.MiscellaneousSettings",
"pearll.settings.OptimizerSettings",
"pearll.common.type_aliases.Trajectories",
"numpy.zeros",
"numpy.mean",
"pearll.settings.ExplorerSettings",
"pearll.settings.LoggerSettings",
"torch.no_grad",
"pear... | [((2684, 2703), 'pearll.settings.OptimizerSettings', 'OptimizerSettings', ([], {}), '()\n', (2701, 2703), False, 'from pearll.settings import BufferSettings, ExplorerSettings, LoggerSettings, MiscellaneousSettings, OptimizerSettings, Settings\n'), ((2818, 2837), 'pearll.settings.OptimizerSettings', 'OptimizerSettings', ([], {}), '()\n', (2835, 2837), False, 'from pearll.settings import BufferSettings, ExplorerSettings, LoggerSettings, MiscellaneousSettings, OptimizerSettings, Settings\n'), ((2964, 2983), 'pearll.settings.OptimizerSettings', 'OptimizerSettings', ([], {}), '()\n', (2981, 2983), False, 'from pearll.settings import BufferSettings, ExplorerSettings, LoggerSettings, MiscellaneousSettings, OptimizerSettings, Settings\n'), ((3271, 3287), 'pearll.settings.BufferSettings', 'BufferSettings', ([], {}), '()\n', (3285, 3287), False, 'from pearll.settings import BufferSettings, ExplorerSettings, LoggerSettings, MiscellaneousSettings, OptimizerSettings, Settings\n'), ((3401, 3434), 'pearll.settings.ExplorerSettings', 'ExplorerSettings', ([], {'start_steps': '(100)'}), '(start_steps=100)\n', (3417, 3434), False, 'from pearll.settings import BufferSettings, ExplorerSettings, LoggerSettings, MiscellaneousSettings, OptimizerSettings, Settings\n'), ((3600, 3616), 'pearll.settings.LoggerSettings', 'LoggerSettings', ([], {}), '()\n', (3614, 3616), False, 'from pearll.settings import BufferSettings, ExplorerSettings, LoggerSettings, MiscellaneousSettings, OptimizerSettings, Settings\n'), ((3665, 3688), 'pearll.settings.MiscellaneousSettings', 'MiscellaneousSettings', ([], {}), '()\n', (3686, 3688), False, 'from pearll.settings import BufferSettings, ExplorerSettings, LoggerSettings, MiscellaneousSettings, OptimizerSettings, Settings\n'), ((6698, 6714), 'numpy.zeros', 'np.zeros', (['epochs'], {}), '(epochs)\n', (6706, 6714), True, 'import numpy as np\n'), ((6737, 6753), 'numpy.zeros', 'np.zeros', (['epochs'], {}), '(epochs)\n', (6745, 6753), True, 'import numpy as np\n'), ((6774, 6790), 'numpy.zeros', 'np.zeros', (['epochs'], {}), '(epochs)\n', (6782, 6790), True, 'import numpy as np\n'), ((8414, 8443), 'numpy.zeros', 'np.zeros', ([], {'shape': 'critic_epochs'}), '(shape=critic_epochs)\n', (8422, 8443), True, 'import numpy as np\n'), ((3148, 3162), 'torch.nn.BCELoss', 'T.nn.BCELoss', ([], {}), '()\n', (3160, 3162), True, 'import torch as T\n'), ((6149, 6214), 'pearll.common.type_aliases.Trajectories', 'Trajectories', (['observation', 'action', 'reward', 'next_observation', 'done'], {}), '(observation, action, reward, next_observation, done)\n', (6161, 6214), False, 'from pearll.common.type_aliases import Log, Observation, Trajectories\n'), ((8591, 8602), 'torch.no_grad', 'T.no_grad', ([], {}), '()\n', (8600, 8602), True, 'import torch as T\n'), ((8856, 8958), 'pearll.signal_processing.return_estimators.TD_zero', 'return_estimators.TD_zero', (['trajectories.rewards', 'next_q_values', 'trajectories.dones', 'self.td_gamma'], {}), '(trajectories.rewards, next_q_values, trajectories\n .dones, self.td_gamma)\n', (8881, 8958), False, 'from pearll.signal_processing import return_estimators\n'), ((9424, 9446), 'numpy.mean', 'np.mean', (['critic_losses'], {}), '(critic_losses)\n', (9431, 9446), True, 'import numpy as np\n')] |
from numba import jit, guvectorize, float64, int32, float64, b1
#@guvectorize([(float64[:], float64[:], float64[:], float64[:],float64[:], b1[:])], '(),(),(m),(m),(k),()', nopython = True, cache = True, target='parallel')
@jit(nopython=True)#
def point_in_polygon(xArr,yArr, xpts, ypts, bbox, ans):
"""Calculate if coordinates x, y is inside of the polygon described by
the points array. bbox must be xmin, xmax, ymin, ymax
"""
for j in range(len(xArr)):
x = xArr[j]
y = yArr[j]
inside = ans[j]
if (x > bbox[0]) and (x < bbox[1]) and (y > bbox[2]) and (y < bbox[3]):
"see if inside of polygon"
n = len(xpts)
for i in range(n-1):
px0 = xpts[i]
py0 = ypts[i]
px1 = xpts[i+1]
py1 = ypts[i+1]
if y > min(py0, py1):
if y<= max(py0, py1):
if x <= max(px0, px1):
if py0 != py1:
xinters = (y-py0)*(px1-px0)/(py1-py0)+px0
if px0==px1 or x<= xinters:
inside = not inside
ans[j] = inside
if __name__=='__main__':
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
# build the test points
### SHOULD BE True
num_points = 200000
r = np.random.rand(num_points)#*.5
theta = np.random.rand(num_points)*2*np.pi
xt = r*np.cos(theta)
yt = r*np.sin(theta)
#plt.plot(xt,yt, '.')
### SHOULD BE false
#r = .5*np.random.rand(num_points) + .5
#theta = np.random.rand(num_points)*2*np.pi
#xt = np.append(xt,r*np.cos(theta))
#yt = np.append(yt,r*np.sin(theta))
theta = np.linspace(0, 2*np.pi, 1000)
#print(np.sin(theta[0]),np.sin(theta[-1]))
#print(np.cos(theta[0]),np.cos(theta[-1]))
r= .4 + .6*np.cos(theta)
polyX = r*np.cos(theta)
polyX = np.append(polyX, polyX[0])
#polyX = np.append(polyX,np.array([0,0]))
polyY = r*np.sin(theta)
polyY = np.append(polyY, polyY[0])
#polyY = np.append(polyY,np.array([0,-.5]))
#polyX = np.append(polyX,np.append(.5*np.cos(theta), polyX[0]))
#polyY = np.append(polyY,np.append(.5*np.sin(theta), polyY[0]))
ans = np.zeros(len(xt), dtype='bool')
bbox = np.array([polyX.min(), polyX.max(), polyY.min(), polyY.max()])
point_in_polygon(xt, yt, polyX, polyY,bbox , ans)
plt.plot(xt[np.invert(ans)],yt[np.invert(ans)], '.')
plt.plot(xt[ans],yt[ans], '.')
plt.gca().add_patch(Polygon(np.stack((polyX,polyY), axis=-1), True, fc='w', ec = 'k'))
print(np.stack((polyX,polyY), axis=-1).shape)
plt.show()
| [
"numpy.stack",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.invert",
"matplotlib.pyplot.gca",
"numpy.append",
"numpy.sin",
"numba.jit",
"numpy.linspace",
"numpy.cos",
"numpy.random.rand"
] | [((225, 243), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (228, 243), False, 'from numba import jit, guvectorize, float64, int32, float64, b1\n'), ((1504, 1530), 'numpy.random.rand', 'np.random.rand', (['num_points'], {}), '(num_points)\n', (1518, 1530), True, 'import numpy as np\n'), ((1867, 1898), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1000)'], {}), '(0, 2 * np.pi, 1000)\n', (1878, 1898), True, 'import numpy as np\n'), ((2060, 2086), 'numpy.append', 'np.append', (['polyX', 'polyX[0]'], {}), '(polyX, polyX[0])\n', (2069, 2086), True, 'import numpy as np\n'), ((2173, 2199), 'numpy.append', 'np.append', (['polyY', 'polyY[0]'], {}), '(polyY, polyY[0])\n', (2182, 2199), True, 'import numpy as np\n'), ((2622, 2653), 'matplotlib.pyplot.plot', 'plt.plot', (['xt[ans]', 'yt[ans]', '"""."""'], {}), "(xt[ans], yt[ans], '.')\n", (2630, 2653), True, 'import matplotlib.pyplot as plt\n'), ((2798, 2808), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2806, 2808), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1606), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1599, 1606), True, 'import numpy as np\n'), ((1618, 1631), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1624, 1631), True, 'import numpy as np\n'), ((2034, 2047), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2040, 2047), True, 'import numpy as np\n'), ((2147, 2160), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2153, 2160), True, 'import numpy as np\n'), ((1547, 1573), 'numpy.random.rand', 'np.random.rand', (['num_points'], {}), '(num_points)\n', (1561, 1573), True, 'import numpy as np\n'), ((2006, 2019), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2012, 2019), True, 'import numpy as np\n'), ((2577, 2591), 'numpy.invert', 'np.invert', (['ans'], {}), '(ans)\n', (2586, 2591), True, 'import numpy as np\n'), ((2596, 2610), 'numpy.invert', 'np.invert', (['ans'], {}), '(ans)\n', (2605, 2610), True, 'import numpy as np\n'), ((2657, 2666), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2664, 2666), True, 'import matplotlib.pyplot as plt\n'), ((2685, 2718), 'numpy.stack', 'np.stack', (['(polyX, polyY)'], {'axis': '(-1)'}), '((polyX, polyY), axis=-1)\n', (2693, 2718), True, 'import numpy as np\n'), ((2754, 2787), 'numpy.stack', 'np.stack', (['(polyX, polyY)'], {'axis': '(-1)'}), '((polyX, polyY), axis=-1)\n', (2762, 2787), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import logging
import threading
import time
import numpy as np
import random
class PrototypeServer(threading.Thread):
def __init__(self):
super(PrototypeServer, self).__init__()
# setup client specific logger
self._logger = logging.getLogger("AppAware.Server." + self.__class__.__name__)
self._logger.debug("PrototypeServer.__init__()")
self._running = False
self._default_config = {'request_mode': 'constant',
'request_interval': 0.1,
'random_wait_time': 0.0}
self._config = None
self.setName("%s" % (self.__class__.__name__))
self._next_request = time.time()
def prepare(self):
self._logger.debug("PrototypeServer.prepare()")
pass
def run(self):
self._logger.debug("PrototypeServer.run()")
self._running = True
while self._running:
self._logger.info("PrototypeServer running")
time.sleep(1)
def stop(self):
self._logger.debug("PrototypeServer.stop()")
self._running = False
def clean_up(self):
self._logger.debug("PrototypeServer.clean_up()")
pass
def is_running(self):
return self._running
def set_config(self, config={}):
self._logger.debug("PrototypeServer.set_config()")
self._config = self._default_config.copy()
self._config.update(config)
self._apply_config()
# overwritten by individual client
def _apply_config(self):
self._logger.debug("PrototypeServer._apply_config()")
pass
def _random_wait_start(self):
"""
If configured, randomly wait up to one minute. Is called before the first request.
:return:
"""
# wait random amount of time to break synchron client behavior
wait_until = time.time() + random.randint(0, self._config['random_wait_time'])
while time.time() < wait_until and self._running:
self._logger.debug("Waiting %.1fs to start first request" % (wait_until - time.time()))
time.sleep(1)
def _do_request(self):
"""
Returns true of the client should do a new request.
:return:
"""
if time.time() < self._next_request:
return False
else:
return True
def _request_finished(self):
"""
Call when client request is finished.
:return:
"""
self._next_request = self._next_request_ts()
self._logger.debug("next call at %s" % (time.strftime("%H:%M:%S", time.localtime(self._next_request))))
def _next_request_ts(self):
"""
Returns the timestamp when to schedule the next request.
:return:
"""
self._logger.debug("PrototypeServer._next_request_ts()")
# set if not already contained in config
if self._config.get('inter_request_not_pause', False):
self._config['inter_request_not_pause'] = True
else:
self._config['inter_request_not_pause'] = False
if self._config['inter_request_not_pause']:
self._logger.debug("Using old request timestamp as starting point")
t_ref = self._next_request
self._logger.debug("Using time.time() as starting point")
else:
t_ref = time.time()
# Constant time between requests
if self._config['request_mode'] == 'constant':
req_ts = t_ref + self._config['request_interval']
# Exponential distributed time between the requests
elif self._config['request_mode'] == 'exponential':
req_ts = t_ref + np.random.exponential(self._config['request_interval'])
else:
raise Exception("Unknown request mode %s" % self._config['request_mode'])
self._logger.debug("next_request_ts(): mode: %s, interval: %.1f, rel_ts: %.3fs, inter_request_not_pause %s" %
(self._config['request_mode'], self._config['request_interval'], req_ts - time.time(), self._config['inter_request_not_pause']))
return req_ts | [
"random.randint",
"numpy.random.exponential",
"logging.getLogger",
"time.sleep",
"time.time",
"time.localtime"
] | [((305, 368), 'logging.getLogger', 'logging.getLogger', (["('AppAware.Server.' + self.__class__.__name__)"], {}), "('AppAware.Server.' + self.__class__.__name__)\n", (322, 368), False, 'import logging\n'), ((745, 756), 'time.time', 'time.time', ([], {}), '()\n', (754, 756), False, 'import time\n'), ((1049, 1062), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1059, 1062), False, 'import time\n'), ((1936, 1947), 'time.time', 'time.time', ([], {}), '()\n', (1945, 1947), False, 'import time\n'), ((1950, 2001), 'random.randint', 'random.randint', (['(0)', "self._config['random_wait_time']"], {}), "(0, self._config['random_wait_time'])\n", (1964, 2001), False, 'import random\n'), ((2172, 2185), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2182, 2185), False, 'import time\n'), ((2328, 2339), 'time.time', 'time.time', ([], {}), '()\n', (2337, 2339), False, 'import time\n'), ((3441, 3452), 'time.time', 'time.time', ([], {}), '()\n', (3450, 3452), False, 'import time\n'), ((2016, 2027), 'time.time', 'time.time', ([], {}), '()\n', (2025, 2027), False, 'import time\n'), ((2676, 2710), 'time.localtime', 'time.localtime', (['self._next_request'], {}), '(self._next_request)\n', (2690, 2710), False, 'import time\n'), ((3761, 3816), 'numpy.random.exponential', 'np.random.exponential', (["self._config['request_interval']"], {}), "(self._config['request_interval'])\n", (3782, 3816), True, 'import numpy as np\n'), ((2146, 2157), 'time.time', 'time.time', ([], {}), '()\n', (2155, 2157), False, 'import time\n'), ((4137, 4148), 'time.time', 'time.time', ([], {}), '()\n', (4146, 4148), False, 'import time\n')] |
""" Unit tests for analysis functions. """
from datetime import datetime
from unittest import TestCase
from unittest.mock import Mock, create_autospec
import numpy as np
import pytz
from acnportal import acnsim
from acnportal.algorithms import BaseAlgorithm
class TestAnalysis(TestCase):
def setUp(self):
start = datetime(2018, 12, 31, tzinfo=pytz.timezone("America/Los_Angeles"))
network = acnsim.ChargingNetwork()
evse1 = acnsim.EVSE("PS-001", max_rate=32)
network.register_evse(evse1, 240, 0)
scheduler = create_autospec(BaseAlgorithm)
scheduler.max_recompute = None
self.events = acnsim.EventQueue(events=[acnsim.Event(1)])
self.simulator = acnsim.Simulator(
network, scheduler, self.events, start, period=240
)
self.simulator._iteration = 10
self.expected_datetime_array = [
np.datetime64("2018-12-31T00:00:00.000000"),
np.datetime64("2018-12-31T04:00:00.000000"),
np.datetime64("2018-12-31T08:00:00.000000"),
np.datetime64("2018-12-31T12:00:00.000000"),
np.datetime64("2018-12-31T16:00:00.000000"),
np.datetime64("2018-12-31T20:00:00.000000"),
np.datetime64("2019-01-01T00:00:00.000000"),
np.datetime64("2019-01-01T04:00:00.000000"),
np.datetime64("2019-01-01T08:00:00.000000"),
np.datetime64("2019-01-01T12:00:00.000000"),
]
def test_datetimes_array_warning(self):
with self.assertWarns(UserWarning):
datetime_array = acnsim.datetimes_array(self.simulator)
np.testing.assert_equal(datetime_array, self.expected_datetime_array)
def test_datetimes_array(self):
self.events.empty = Mock(self.events.empty)
self.events.empty = lambda: True
datetime_array = acnsim.datetimes_array(self.simulator)
# Check that simulator start is unchanged.
self.assertIsNotNone(self.simulator.start.tzinfo)
np.testing.assert_equal(datetime_array, self.expected_datetime_array)
| [
"acnportal.acnsim.EVSE",
"unittest.mock.create_autospec",
"acnportal.acnsim.Event",
"acnportal.acnsim.datetimes_array",
"numpy.datetime64",
"unittest.mock.Mock",
"pytz.timezone",
"acnportal.acnsim.Simulator",
"acnportal.acnsim.ChargingNetwork",
"numpy.testing.assert_equal"
] | [((415, 439), 'acnportal.acnsim.ChargingNetwork', 'acnsim.ChargingNetwork', ([], {}), '()\n', (437, 439), False, 'from acnportal import acnsim\n'), ((456, 490), 'acnportal.acnsim.EVSE', 'acnsim.EVSE', (['"""PS-001"""'], {'max_rate': '(32)'}), "('PS-001', max_rate=32)\n", (467, 490), False, 'from acnportal import acnsim\n'), ((556, 586), 'unittest.mock.create_autospec', 'create_autospec', (['BaseAlgorithm'], {}), '(BaseAlgorithm)\n', (571, 586), False, 'from unittest.mock import Mock, create_autospec\n'), ((717, 785), 'acnportal.acnsim.Simulator', 'acnsim.Simulator', (['network', 'scheduler', 'self.events', 'start'], {'period': '(240)'}), '(network, scheduler, self.events, start, period=240)\n', (733, 785), False, 'from acnportal import acnsim\n'), ((1772, 1795), 'unittest.mock.Mock', 'Mock', (['self.events.empty'], {}), '(self.events.empty)\n', (1776, 1795), False, 'from unittest.mock import Mock, create_autospec\n'), ((1862, 1900), 'acnportal.acnsim.datetimes_array', 'acnsim.datetimes_array', (['self.simulator'], {}), '(self.simulator)\n', (1884, 1900), False, 'from acnportal import acnsim\n'), ((2018, 2087), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['datetime_array', 'self.expected_datetime_array'], {}), '(datetime_array, self.expected_datetime_array)\n', (2041, 2087), True, 'import numpy as np\n'), ((900, 943), 'numpy.datetime64', 'np.datetime64', (['"""2018-12-31T00:00:00.000000"""'], {}), "('2018-12-31T00:00:00.000000')\n", (913, 943), True, 'import numpy as np\n'), ((957, 1000), 'numpy.datetime64', 'np.datetime64', (['"""2018-12-31T04:00:00.000000"""'], {}), "('2018-12-31T04:00:00.000000')\n", (970, 1000), True, 'import numpy as np\n'), ((1014, 1057), 'numpy.datetime64', 'np.datetime64', (['"""2018-12-31T08:00:00.000000"""'], {}), "('2018-12-31T08:00:00.000000')\n", (1027, 1057), True, 'import numpy as np\n'), ((1071, 1114), 'numpy.datetime64', 'np.datetime64', (['"""2018-12-31T12:00:00.000000"""'], {}), "('2018-12-31T12:00:00.000000')\n", (1084, 1114), True, 'import numpy as np\n'), ((1128, 1171), 'numpy.datetime64', 'np.datetime64', (['"""2018-12-31T16:00:00.000000"""'], {}), "('2018-12-31T16:00:00.000000')\n", (1141, 1171), True, 'import numpy as np\n'), ((1185, 1228), 'numpy.datetime64', 'np.datetime64', (['"""2018-12-31T20:00:00.000000"""'], {}), "('2018-12-31T20:00:00.000000')\n", (1198, 1228), True, 'import numpy as np\n'), ((1242, 1285), 'numpy.datetime64', 'np.datetime64', (['"""2019-01-01T00:00:00.000000"""'], {}), "('2019-01-01T00:00:00.000000')\n", (1255, 1285), True, 'import numpy as np\n'), ((1299, 1342), 'numpy.datetime64', 'np.datetime64', (['"""2019-01-01T04:00:00.000000"""'], {}), "('2019-01-01T04:00:00.000000')\n", (1312, 1342), True, 'import numpy as np\n'), ((1356, 1399), 'numpy.datetime64', 'np.datetime64', (['"""2019-01-01T08:00:00.000000"""'], {}), "('2019-01-01T08:00:00.000000')\n", (1369, 1399), True, 'import numpy as np\n'), ((1413, 1456), 'numpy.datetime64', 'np.datetime64', (['"""2019-01-01T12:00:00.000000"""'], {}), "('2019-01-01T12:00:00.000000')\n", (1426, 1456), True, 'import numpy as np\n'), ((1586, 1624), 'acnportal.acnsim.datetimes_array', 'acnsim.datetimes_array', (['self.simulator'], {}), '(self.simulator)\n', (1608, 1624), False, 'from acnportal import acnsim\n'), ((1637, 1706), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['datetime_array', 'self.expected_datetime_array'], {}), '(datetime_array, self.expected_datetime_array)\n', (1660, 1706), True, 'import numpy as np\n'), ((359, 395), 'pytz.timezone', 'pytz.timezone', (['"""America/Los_Angeles"""'], {}), "('America/Los_Angeles')\n", (372, 395), False, 'import pytz\n'), ((674, 689), 'acnportal.acnsim.Event', 'acnsim.Event', (['(1)'], {}), '(1)\n', (686, 689), False, 'from acnportal import acnsim\n')] |
from DNN import Dnn
import numpy as np
import csv
import random
from keras.optimizers import sgd,adagrad,rmsprop,adadelta,adam
import matplotlib.pyplot as plt
batch_size=64
input_dim=784
output_dim=10
learning_rate=0.001
train_size=0.8
num_epoch=100
f=open("./train.csv",'r',encoding='utf-8',newline='')
reader=csv.reader(f,delimiter=',')
raw_data=[[int(e) for e in r] for r in list(reader)[1:]]
random.shuffle(raw_data)
data_set=np.array(raw_data)
train_x=data_set[:len(data_set)*train_size,1:]
train_y=np.eye(10)[data_set[:len(data_set)*train_size,0]]
valid_x=data_set[len(data_set)*train_size:,1:]
valid_y=np.eye(10)[data_set[len(data_set)*train_size:,0]]
plt.xlabel("epoch")
plt.ylabel("loss")
# stochastic gradient descent
dnn=Dnn(input_dim,output_dim,sgd(lr=learning_rate))
dnn.model.summary()
hist=dnn.train_on_batch(np.array(train_x).reshape(-1,1,input_dim),train_y,batch_size,num_epoch,np.array(valid_x).reshape(-1,1,input_dim),valid_y)
plt.plot(range(len(hist.history['val_loss'])),hist.history['val_loss'],label="SGD")
# momentum
dnn=Dnn(input_dim,output_dim,sgd(lr=learning_rate,momentum=0.9))
dnn.model.summary()
hist=dnn.train_on_batch(np.array(train_x).reshape(-1,1,input_dim),train_y,batch_size,num_epoch,np.array(valid_x).reshape(-1,1,input_dim),valid_y)
plt.plot(range(len(hist.history['val_loss'])),hist.history['val_loss'],label="MOMENTUM")
# nestrov accelerated gradient
dnn=Dnn(input_dim,output_dim,sgd(lr=learning_rate,momentum=0.9,nesterov=True))
dnn.model.summary()
hist=dnn.train_on_batch(np.array(train_x).reshape(-1,1,input_dim),train_y,batch_size,num_epoch,np.array(valid_x).reshape(-1,1,input_dim),valid_y)
plt.plot(range(len(hist.history['val_loss'])),hist.history['val_loss'],label="NAG")
# adagrad
dnn=Dnn(input_dim,output_dim,adagrad(lr=learning_rate))
dnn.model.summary()
hist=dnn.train_on_batch(np.array(train_x).reshape(-1,1,input_dim),train_y,batch_size,num_epoch,np.array(valid_x).reshape(-1,1,input_dim),valid_y)
plt.plot(range(len(hist.history['val_loss'])),hist.history['val_loss'],label="ADAGRAD")
#rmsprop
dnn=Dnn(input_dim,output_dim,rmsprop(lr=learning_rate))
dnn.model.summary()
hist=dnn.train_on_batch(np.array(train_x).reshape(-1,1,input_dim),train_y,batch_size,num_epoch,np.array(valid_x).reshape(-1,1,input_dim),valid_y)
plt.plot(range(len(hist.history['val_loss'])),hist.history['val_loss'],label="RMSPROP")
#adam
dnn=Dnn(input_dim,output_dim,adam(lr=learning_rate))
dnn.model.summary()
hist=dnn.train_on_batch(np.array(train_x).reshape(-1,1,input_dim),train_y,batch_size,num_epoch,np.array(valid_x).reshape(-1,1,input_dim),valid_y)
plt.plot(range(len(hist.history['val_loss'])),hist.history['val_loss'],label="ADAM")
plt.legend(bbox_to_anchor=(1,1),loc=6,borderaxespad=0.)
plt.show()
# test
'''
f=open("./test.csv",'r',encoding='utf-8',newline='')
reader=csv.reader(f,delimiter=',')
raw_data=[[int(e) for e in r] for r in list(reader)[1:]]
data_set=np.array(raw_data)
test_x=data_set[:,:].reshape(-1,1,input_dim)
for i in range(len(test_x)):
print(np.argmax(dnn.predict(test_x[i:i+1])))
'''
| [
"keras.optimizers.adam",
"keras.optimizers.rmsprop",
"csv.reader",
"matplotlib.pyplot.show",
"numpy.eye",
"random.shuffle",
"matplotlib.pyplot.legend",
"keras.optimizers.sgd",
"keras.optimizers.adagrad",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((330, 358), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (340, 358), False, 'import csv\n'), ((419, 443), 'random.shuffle', 'random.shuffle', (['raw_data'], {}), '(raw_data)\n', (433, 443), False, 'import random\n'), ((454, 472), 'numpy.array', 'np.array', (['raw_data'], {}), '(raw_data)\n', (462, 472), True, 'import numpy as np\n'), ((688, 707), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (698, 707), True, 'import matplotlib.pyplot as plt\n'), ((709, 727), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (719, 727), True, 'import matplotlib.pyplot as plt\n'), ((2745, 2804), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1, 1)', 'loc': '(6)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(1, 1), loc=6, borderaxespad=0.0)\n', (2755, 2804), True, 'import matplotlib.pyplot as plt\n'), ((2802, 2812), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2810, 2812), True, 'import matplotlib.pyplot as plt\n'), ((530, 540), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (536, 540), True, 'import numpy as np\n'), ((637, 647), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (643, 647), True, 'import numpy as np\n'), ((791, 812), 'keras.optimizers.sgd', 'sgd', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (794, 812), False, 'from keras.optimizers import sgd, adagrad, rmsprop, adadelta, adam\n'), ((1111, 1146), 'keras.optimizers.sgd', 'sgd', ([], {'lr': 'learning_rate', 'momentum': '(0.9)'}), '(lr=learning_rate, momentum=0.9)\n', (1114, 1146), False, 'from keras.optimizers import sgd, adagrad, rmsprop, adadelta, adam\n'), ((1469, 1519), 'keras.optimizers.sgd', 'sgd', ([], {'lr': 'learning_rate', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=learning_rate, momentum=0.9, nesterov=True)\n', (1472, 1519), False, 'from keras.optimizers import sgd, adagrad, rmsprop, adadelta, adam\n'), ((1815, 1840), 'keras.optimizers.adagrad', 'adagrad', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (1822, 1840), False, 'from keras.optimizers import sgd, adagrad, rmsprop, adadelta, adam\n'), ((2141, 2166), 'keras.optimizers.rmsprop', 'rmsprop', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (2148, 2166), False, 'from keras.optimizers import sgd, adagrad, rmsprop, adadelta, adam\n'), ((2464, 2486), 'keras.optimizers.adam', 'adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (2468, 2486), False, 'from keras.optimizers import sgd, adagrad, rmsprop, adadelta, adam\n'), ((860, 877), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (868, 877), True, 'import numpy as np\n'), ((931, 948), 'numpy.array', 'np.array', (['valid_x'], {}), '(valid_x)\n', (939, 948), True, 'import numpy as np\n'), ((1193, 1210), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (1201, 1210), True, 'import numpy as np\n'), ((1264, 1281), 'numpy.array', 'np.array', (['valid_x'], {}), '(valid_x)\n', (1272, 1281), True, 'import numpy as np\n'), ((1565, 1582), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (1573, 1582), True, 'import numpy as np\n'), ((1636, 1653), 'numpy.array', 'np.array', (['valid_x'], {}), '(valid_x)\n', (1644, 1653), True, 'import numpy as np\n'), ((1888, 1905), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (1896, 1905), True, 'import numpy as np\n'), ((1959, 1976), 'numpy.array', 'np.array', (['valid_x'], {}), '(valid_x)\n', (1967, 1976), True, 'import numpy as np\n'), ((2214, 2231), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (2222, 2231), True, 'import numpy as np\n'), ((2285, 2302), 'numpy.array', 'np.array', (['valid_x'], {}), '(valid_x)\n', (2293, 2302), True, 'import numpy as np\n'), ((2534, 2551), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (2542, 2551), True, 'import numpy as np\n'), ((2605, 2622), 'numpy.array', 'np.array', (['valid_x'], {}), '(valid_x)\n', (2613, 2622), True, 'import numpy as np\n')] |
# Built-in libraries
import copy
import datetime
from typing import Dict, List
# Third-party libraries
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from sklearn.metrics import f1_score
from tqdm import tqdm
# Local files
from utils import save
from config import LABEL_DICT
from transformers import BertTokenizer, RobertaTokenizer, get_cosine_schedule_with_warmup
class Trainer():
'''
The trainer for training models.
It can be used for both single and multi task training.
Every class function ends with _m is for multi-task training.
'''
def __init__(
self,
model: nn.Module,
epochs: int,
dataloaders: Dict[str, DataLoader],
criterion: nn.Module,
loss_weights: List[float],
clip: bool,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler,
device: str,
print_iter: int,
patience: int,
task_name: str,
model_name: str,
final: bool,
seed: int
):
self.model = model
self.epochs = epochs
self.dataloaders = dataloaders
self.criterion = criterion
self.loss_weights = loss_weights
self.clip = clip
self.optimizer = optimizer
self.scheduler = scheduler
self.device = device
self.print_iter = print_iter
self.patience = patience
self.task_name = task_name
self.model_name = model_name
self.final = final
self.seed = seed
self.datetimestr = datetime.datetime.now().strftime('%Y-%b-%d_%H:%M:%S')
# Evaluation results
self.train_losses = []
self.test_losses = []
self.train_f1 = []
self.test_f1 = []
self.best_train_f1 = 0.0
self.best_test_f1 = 0.0
# Evaluation results for multi-task
self.best_train_f1_m = np.array([0, 0, 0], dtype=np.float64)
self.best_test_f1_m = np.array([0, 0, 0], dtype=np.float64)
if self.final:
self.best_train_f1_m = np.array([0, 0, 0, 0], dtype=np.float64)
self.best_test_f1_m = np.array([0, 0, 0, 0], dtype=np.float64)
def train(self):
for epoch in range(self.epochs):
print(f'Epoch {epoch}')
print('=' * 20)
self.train_one_epoch()
self.test()
print(f'Best test f1: {self.best_test_f1:.4f}')
print('=' * 20)
print('Saving results ...')
save(
(self.train_losses, self.test_losses, self.train_f1, self.test_f1, self.best_train_f1, self.best_test_f1),
f'./save/results/single_{self.task_name}_{self.datetimestr}_{self.best_test_f1:.4f}.pt'
)
def train_one_epoch(self):
self.model.train()
dataloader = self.dataloaders['train']
y_pred_all = None
labels_all = None
loss = 0
iters_per_epoch = 0
for inputs, lens, mask, labels in tqdm(dataloader, desc='Training'):
iters_per_epoch += 1
if labels_all is None:
labels_all = labels.numpy()
else:
labels_all = np.concatenate((labels_all, labels.numpy()))
inputs = inputs.to(device=self.device)
lens = lens.to(device=self.device)
mask = mask.to(device=self.device)
labels = labels.to(device=self.device)
self.optimizer.zero_grad()
with torch.set_grad_enabled(True):
# Forward
logits = self.model(inputs, lens, mask, labels)
_loss = self.criterion(logits, labels)
loss += _loss.item()
y_pred = logits.argmax(dim=1).cpu().numpy()
if y_pred_all is None:
y_pred_all = y_pred
else:
y_pred_all = np.concatenate((y_pred_all, y_pred))
# Backward
_loss.backward()
if self.clip:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10)
self.optimizer.step()
if self.scheduler is not None:
self.scheduler.step()
loss /= iters_per_epoch
f1 = f1_score(labels_all, y_pred_all, average='macro')
print(f'loss = {loss:.4f}')
print(f'Macro-F1 = {f1:.4f}')
self.train_losses.append(loss)
self.train_f1.append(f1)
if f1 > self.best_train_f1:
self.best_train_f1 = f1
def test(self):
self.model.eval()
dataloader = self.dataloaders['test']
y_pred_all = None
labels_all = None
loss = 0
iters_per_epoch = 0
for inputs, lens, mask, labels in tqdm(dataloader, desc='Testing'):
iters_per_epoch += 1
if labels_all is None:
labels_all = labels.numpy()
else:
labels_all = np.concatenate((labels_all, labels.numpy()))
inputs = inputs.to(device=self.device)
lens = lens.to(device=self.device)
mask = mask.to(device=self.device)
labels = labels.to(device=self.device)
with torch.set_grad_enabled(False):
logits = self.model(inputs, lens, mask, labels)
_loss = self.criterion(logits, labels)
y_pred = logits.argmax(dim=1).cpu().numpy()
loss += _loss.item()
if y_pred_all is None:
y_pred_all = y_pred
else:
y_pred_all = np.concatenate((y_pred_all, y_pred))
loss /= iters_per_epoch
f1 = f1_score(labels_all, y_pred_all, average='macro')
print(f'loss = {loss:.4f}')
print(f'Macro-F1 = {f1:.4f}')
self.test_losses.append(loss)
self.test_f1.append(f1)
if f1 > self.best_test_f1:
self.best_test_f1 = f1
self.save_model()
def train_m(self):
for epoch in range(self.epochs):
print(f'Epoch {epoch}')
print('=' * 20)
self.train_one_epoch_m()
self.test_m()
print(f'Best test results A: {self.best_test_f1_m[0]:.4f}')
print(f'Best test results B: {self.best_test_f1_m[1]:.4f}')
print(f'Best test results C: {self.best_test_f1_m[2]:.4f}')
if self.final:
print(f'Best test results Final: {self.best_test_f1_m[3]:.4f}')
print('=' * 20)
print('Saving results ...')
if self.final:
save(
(self.train_losses, self.test_losses, self.train_f1, self.test_f1, self.best_train_f1_m, self.best_test_f1_m),
f'./save/results/mtl_final_{self.datetimestr}_{self.best_test_f1_m[0]:.4f}_{self.best_test_f1_m[3]:.4f}.pt'
)
else:
save(
(self.train_losses, self.test_losses, self.train_f1, self.test_f1, self.best_train_f1_m, self.best_test_f1_m),
f'./save/results/mtl_{self.datetimestr}_{self.best_test_f1_m[0]:.4f}.pt'
)
def train_one_epoch_m(self):
self.model.train()
dataloader = self.dataloaders['train']
y_pred_all_A = None
y_pred_all_B = None
y_pred_all_C = None
labels_all_A = None
labels_all_B = None
labels_all_C = None
loss = 0
iters_per_epoch = 0
for inputs, lens, mask, label_A, label_B, label_C in tqdm(dataloader, desc='Training M'):
iters_per_epoch += 1
inputs = inputs.to(device=self.device)
lens = lens.to(device=self.device)
mask = mask.to(device=self.device)
label_A = label_A.to(device=self.device)
label_B = label_B.to(device=self.device)
label_C = label_C.to(device=self.device)
self.optimizer.zero_grad()
with torch.set_grad_enabled(True):
# Forward
# logits_A, logits_B, logits_C = self.model(inputs, mask)
all_logits = self.model(inputs, lens, mask)
y_pred_A = all_logits[0].argmax(dim=1).cpu().numpy()
y_pred_B = all_logits[1][:, 0:2].argmax(dim=1)
y_pred_C = all_logits[2][:, 0:3].argmax(dim=1)
Non_null_index_B = label_B != LABEL_DICT['b']['NULL']
Non_null_label_B = label_B[Non_null_index_B]
Non_null_pred_B = y_pred_B[Non_null_index_B]
Non_null_index_C = label_C != LABEL_DICT['c']['NULL']
Non_null_label_C = label_C[Non_null_index_C]
Non_null_pred_C = y_pred_C[Non_null_index_C]
labels_all_A = label_A.cpu().numpy() if labels_all_A is None else np.concatenate((labels_all_A, label_A.cpu().numpy()))
labels_all_B = Non_null_label_B.cpu().numpy() if labels_all_B is None else np.concatenate((labels_all_B, Non_null_label_B.cpu().numpy()))
labels_all_C = Non_null_label_C.cpu().numpy() if labels_all_C is None else np.concatenate((labels_all_C, Non_null_label_C.cpu().numpy()))
y_pred_all_A = y_pred_A if y_pred_all_A is None else np.concatenate((y_pred_all_A, y_pred_A))
y_pred_all_B = Non_null_pred_B.cpu().numpy() if y_pred_all_B is None else np.concatenate((y_pred_all_B, Non_null_pred_B.cpu().numpy()))
y_pred_all_C = Non_null_pred_C.cpu().numpy() if y_pred_all_C is None else np.concatenate((y_pred_all_C, Non_null_pred_C.cpu().numpy()))
# f1[0] += self.calc_f1(label_A, y_pred_A)
# f1[1] += self.calc_f1(Non_null_label_B, Non_null_pred_B)
# f1[2] += self.calc_f1(Non_null_label_C, Non_null_pred_C)
_loss = self.loss_weights[0] * self.criterion(all_logits[0], label_A)
_loss += self.loss_weights[1] * self.criterion(all_logits[1], label_B)
_loss += self.loss_weights[2] * self.criterion(all_logits[2], label_C)
if self.final:
y_pred_final = all_logits[3].argmax(dim=1)
_loss += self.loss_weights[3] * self.criterion(all_logits[3], label_A)
f1[3] += self.calc_f1(label_A, y_pred_final)
loss += _loss.item()
# Backward
_loss.backward()
if self.clip:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10)
self.optimizer.step()
if self.scheduler is not None:
self.scheduler.step()
loss /= iters_per_epoch
f1_A = f1_score(labels_all_A, y_pred_all_A, average='macro')
f1_B = f1_score(labels_all_B, y_pred_all_B, average='macro')
f1_C = f1_score(labels_all_C, y_pred_all_C, average='macro')
print(f'loss = {loss:.4f}')
print(f'A: {f1_A:.4f}')
print(f'B: {f1_B:.4f}')
print(f'C: {f1_C:.4f}')
if self.final:
print(f'Final: {f1[3][0]:.4f}, {f1[3][1]:.4f}, {f1[3][2]:.4f}')
self.train_losses.append(loss)
self.train_f1.append([f1_A, f1_B, f1_C])
if f1_A > self.best_train_f1_m[0]:
self.best_train_f1_m[0] = f1_A
if f1_B > self.best_train_f1_m[1]:
self.best_train_f1_m[1] = f1_B
if f1_C > self.best_train_f1_m[2]:
self.best_train_f1_m[2] = f1_C
# for i in range(len(f1)):
# for j in range(len(f1[0])):
# if f1[i][j] > self.best_train_f1_m[i][j]:
# self.best_train_f1_m[i][j] = f1[i][j]
# if not self.final and i == 0 and j == 0:
# self.save_model()
# if self.final and i == 3 and j == 0:
# self.save_model()
def test_m(self):
self.model.eval()
dataloader = self.dataloaders['test']
if self.final:
f1 = np.concatenate((f1, [[0, 0, 0]]))
loss = 0
iters_per_epoch = 0
y_pred_all_A = None
y_pred_all_B = None
y_pred_all_C = None
labels_all_A = None
labels_all_B = None
labels_all_C = None
for inputs, lens, mask, label_A, label_B, label_C in tqdm(dataloader, desc='Test M'):
iters_per_epoch += 1
labels_all_A = label_A.numpy() if labels_all_A is None else np.concatenate((labels_all_A, label_A.numpy()))
labels_all_B = label_B.numpy() if labels_all_B is None else np.concatenate((labels_all_B, label_B.numpy()))
labels_all_C = label_C.numpy() if labels_all_C is None else np.concatenate((labels_all_C, label_C.numpy()))
inputs = inputs.to(device=self.device)
lens = lens.to(device=self.device)
mask = mask.to(device=self.device)
label_A = label_A.to(device=self.device)
label_B = label_B.to(device=self.device)
label_C = label_C.to(device=self.device)
with torch.set_grad_enabled(False):
all_logits = self.model(inputs, lens, mask)
y_pred_A = all_logits[0].argmax(dim=1).cpu().numpy()
y_pred_B = all_logits[1].argmax(dim=1).cpu().numpy()
y_pred_C = all_logits[2].argmax(dim=1).cpu().numpy()
# f1[0] += self.calc_f1(label_A, y_pred_A)
# f1[1] += self.calc_f1(label_B, y_pred_B)
# f1[2] += self.calc_f1(label_C, y_pred_C)
y_pred_all_A = y_pred_A if y_pred_all_A is None else np.concatenate((y_pred_all_A, y_pred_A))
y_pred_all_B = y_pred_B if y_pred_all_B is None else np.concatenate((y_pred_all_B, y_pred_B))
y_pred_all_C = y_pred_C if y_pred_all_C is None else np.concatenate((y_pred_all_C, y_pred_C))
_loss = self.loss_weights[0] * self.criterion(all_logits[0], label_A)
_loss += self.loss_weights[1] * self.criterion(all_logits[1], label_B)
_loss += self.loss_weights[2] * self.criterion(all_logits[2], label_C)
if self.final:
y_pred_final = all_logits[3].argmax(dim=1)
_loss += self.loss_weights[3] * self.criterion(all_logits[3], label_A)
f1[3] += self.calc_f1(label_A, y_pred_final)
loss += _loss.item()
loss /= iters_per_epoch
f1_A = f1_score(labels_all_A, y_pred_all_A, average='macro')
f1_B = f1_score(labels_all_B, y_pred_all_B, average='macro')
f1_C = f1_score(labels_all_C, y_pred_all_C, average='macro')
print(f'loss = {loss:.4f}')
print(f'A: {f1_A:.4f}')
print(f'B: {f1_B:.4f}')
print(f'C: {f1_C:.4f}')
if self.final:
print(f'Final: {f1[3][0]:.4f}, {f1[3][1]:.4f}, {f1[3][2]:.4f}')
self.test_losses.append(loss)
self.test_f1.append([f1_A, f1_B, f1_C])
if f1_A > self.best_test_f1_m[0]:
self.best_test_f1_m[0] = f1_A
self.save_model()
if f1_B > self.best_test_f1_m[1]:
self.best_test_f1_m[1] = f1_B
if f1_C > self.best_test_f1_m[2]:
self.best_test_f1_m[2] = f1_C
# for i in range(len(f1)):
# for j in range(len(f1[0])):
# if f1[i][j] > self.best_test_f1_m[i][j]:
# self.best_test_f1_m[i][j] = f1[i][j]
# if i == 0 and j == 0:
# self.save_model()
def calc_f1(self, labels, y_pred):
return np.array([
f1_score(labels.cpu(), y_pred.cpu(), average='macro'),
f1_score(labels.cpu(), y_pred.cpu(), average='micro'),
f1_score(labels.cpu(), y_pred.cpu(), average='weighted')
], np.float64)
def printing(self, loss, f1):
print(f'loss = {loss:.4f}')
print(f'Macro-F1 = {f1[0]:.4f}')
# print(f'Micro-F1 = {f1[1]:.4f}')
# print(f'Weighted-F1 = {f1[2]:.4f}')
def save_model(self):
print('Saving model...')
if self.task_name == 'all':
filename = f'./save/models/{self.task_name}_{self.model_name}_{self.best_test_f1_m[0]}_seed{self.seed}.pt'
else:
filename = f'./save/models/{self.task_name}_{self.model_name}_{self.best_test_f1}_seed{self.seed}.pt'
save(copy.deepcopy(self.model.state_dict()), filename)
| [
"tqdm.tqdm",
"sklearn.metrics.f1_score",
"numpy.array",
"torch.set_grad_enabled",
"datetime.datetime.now",
"numpy.concatenate",
"utils.save"
] | [((1925, 1962), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.float64'}), '([0, 0, 0], dtype=np.float64)\n', (1933, 1962), True, 'import numpy as np\n'), ((1993, 2030), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.float64'}), '([0, 0, 0], dtype=np.float64)\n', (2001, 2030), True, 'import numpy as np\n'), ((2524, 2737), 'utils.save', 'save', (['(self.train_losses, self.test_losses, self.train_f1, self.test_f1, self.\n best_train_f1, self.best_test_f1)', 'f"""./save/results/single_{self.task_name}_{self.datetimestr}_{self.best_test_f1:.4f}.pt"""'], {}), "((self.train_losses, self.test_losses, self.train_f1, self.test_f1,\n self.best_train_f1, self.best_test_f1),\n f'./save/results/single_{self.task_name}_{self.datetimestr}_{self.best_test_f1:.4f}.pt'\n )\n", (2528, 2737), False, 'from utils import save\n'), ((3004, 3037), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'desc': '"""Training"""'}), "(dataloader, desc='Training')\n", (3008, 3037), False, 'from tqdm import tqdm\n'), ((4296, 4345), 'sklearn.metrics.f1_score', 'f1_score', (['labels_all', 'y_pred_all'], {'average': '"""macro"""'}), "(labels_all, y_pred_all, average='macro')\n", (4304, 4345), False, 'from sklearn.metrics import f1_score\n'), ((4798, 4830), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'desc': '"""Testing"""'}), "(dataloader, desc='Testing')\n", (4802, 4830), False, 'from tqdm import tqdm\n'), ((5717, 5766), 'sklearn.metrics.f1_score', 'f1_score', (['labels_all', 'y_pred_all'], {'average': '"""macro"""'}), "(labels_all, y_pred_all, average='macro')\n", (5725, 5766), False, 'from sklearn.metrics import f1_score\n'), ((7545, 7580), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'desc': '"""Training M"""'}), "(dataloader, desc='Training M')\n", (7549, 7580), False, 'from tqdm import tqdm\n'), ((10724, 10777), 'sklearn.metrics.f1_score', 'f1_score', (['labels_all_A', 'y_pred_all_A'], {'average': '"""macro"""'}), "(labels_all_A, y_pred_all_A, average='macro')\n", (10732, 10777), False, 'from sklearn.metrics import f1_score\n'), ((10793, 10846), 'sklearn.metrics.f1_score', 'f1_score', (['labels_all_B', 'y_pred_all_B'], {'average': '"""macro"""'}), "(labels_all_B, y_pred_all_B, average='macro')\n", (10801, 10846), False, 'from sklearn.metrics import f1_score\n'), ((10862, 10915), 'sklearn.metrics.f1_score', 'f1_score', (['labels_all_C', 'y_pred_all_C'], {'average': '"""macro"""'}), "(labels_all_C, y_pred_all_C, average='macro')\n", (10870, 10915), False, 'from sklearn.metrics import f1_score\n'), ((12349, 12380), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'desc': '"""Test M"""'}), "(dataloader, desc='Test M')\n", (12353, 12380), False, 'from tqdm import tqdm\n'), ((14504, 14557), 'sklearn.metrics.f1_score', 'f1_score', (['labels_all_A', 'y_pred_all_A'], {'average': '"""macro"""'}), "(labels_all_A, y_pred_all_A, average='macro')\n", (14512, 14557), False, 'from sklearn.metrics import f1_score\n'), ((14573, 14626), 'sklearn.metrics.f1_score', 'f1_score', (['labels_all_B', 'y_pred_all_B'], {'average': '"""macro"""'}), "(labels_all_B, y_pred_all_B, average='macro')\n", (14581, 14626), False, 'from sklearn.metrics import f1_score\n'), ((14642, 14695), 'sklearn.metrics.f1_score', 'f1_score', (['labels_all_C', 'y_pred_all_C'], {'average': '"""macro"""'}), "(labels_all_C, y_pred_all_C, average='macro')\n", (14650, 14695), False, 'from sklearn.metrics import f1_score\n'), ((2089, 2129), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {'dtype': 'np.float64'}), '([0, 0, 0, 0], dtype=np.float64)\n', (2097, 2129), True, 'import numpy as np\n'), ((2164, 2204), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {'dtype': 'np.float64'}), '([0, 0, 0, 0], dtype=np.float64)\n', (2172, 2204), True, 'import numpy as np\n'), ((6628, 6865), 'utils.save', 'save', (['(self.train_losses, self.test_losses, self.train_f1, self.test_f1, self.\n best_train_f1_m, self.best_test_f1_m)', 'f"""./save/results/mtl_final_{self.datetimestr}_{self.best_test_f1_m[0]:.4f}_{self.best_test_f1_m[3]:.4f}.pt"""'], {}), "((self.train_losses, self.test_losses, self.train_f1, self.test_f1,\n self.best_train_f1_m, self.best_test_f1_m),\n f'./save/results/mtl_final_{self.datetimestr}_{self.best_test_f1_m[0]:.4f}_{self.best_test_f1_m[3]:.4f}.pt'\n )\n", (6632, 6865), False, 'from utils import save\n'), ((6925, 7122), 'utils.save', 'save', (['(self.train_losses, self.test_losses, self.train_f1, self.test_f1, self.\n best_train_f1_m, self.best_test_f1_m)', 'f"""./save/results/mtl_{self.datetimestr}_{self.best_test_f1_m[0]:.4f}.pt"""'], {}), "((self.train_losses, self.test_losses, self.train_f1, self.test_f1,\n self.best_train_f1_m, self.best_test_f1_m),\n f'./save/results/mtl_{self.datetimestr}_{self.best_test_f1_m[0]:.4f}.pt')\n", (6929, 7122), False, 'from utils import save\n'), ((12039, 12072), 'numpy.concatenate', 'np.concatenate', (['(f1, [[0, 0, 0]])'], {}), '((f1, [[0, 0, 0]]))\n', (12053, 12072), True, 'import numpy as np\n'), ((1586, 1609), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1607, 1609), False, 'import datetime\n'), ((3499, 3527), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (3521, 3527), False, 'import torch\n'), ((5252, 5281), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (5274, 5281), False, 'import torch\n'), ((7978, 8006), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (8000, 8006), False, 'import torch\n'), ((13099, 13128), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (13121, 13128), False, 'import torch\n'), ((3906, 3942), 'numpy.concatenate', 'np.concatenate', (['(y_pred_all, y_pred)'], {}), '((y_pred_all, y_pred))\n', (3920, 3942), True, 'import numpy as np\n'), ((5634, 5670), 'numpy.concatenate', 'np.concatenate', (['(y_pred_all, y_pred)'], {}), '((y_pred_all, y_pred))\n', (5648, 5670), True, 'import numpy as np\n'), ((9264, 9304), 'numpy.concatenate', 'np.concatenate', (['(y_pred_all_A, y_pred_A)'], {}), '((y_pred_all_A, y_pred_A))\n', (9278, 9304), True, 'import numpy as np\n'), ((13645, 13685), 'numpy.concatenate', 'np.concatenate', (['(y_pred_all_A, y_pred_A)'], {}), '((y_pred_all_A, y_pred_A))\n', (13659, 13685), True, 'import numpy as np\n'), ((13755, 13795), 'numpy.concatenate', 'np.concatenate', (['(y_pred_all_B, y_pred_B)'], {}), '((y_pred_all_B, y_pred_B))\n', (13769, 13795), True, 'import numpy as np\n'), ((13865, 13905), 'numpy.concatenate', 'np.concatenate', (['(y_pred_all_C, y_pred_C)'], {}), '((y_pred_all_C, y_pred_C))\n', (13879, 13905), True, 'import numpy as np\n')] |
"""
Display one 3-D volume layer using the add_volume API
"""
import numpy as np
import napari
translate = (10,) * 3
with napari.gui_qt():
data = np.random.randint(0,255, (10,10,10), dtype='uint8')
viewer = napari.Viewer()
viewer.add_image(data, name='raw', opacity=.5)
viewer.add_image(data, translate=translate, name=f'translated_{translate}', colormap='blue', opacity=.5) | [
"napari.Viewer",
"napari.gui_qt",
"numpy.random.randint"
] | [((124, 139), 'napari.gui_qt', 'napari.gui_qt', ([], {}), '()\n', (137, 139), False, 'import napari\n'), ((152, 206), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(10, 10, 10)'], {'dtype': '"""uint8"""'}), "(0, 255, (10, 10, 10), dtype='uint8')\n", (169, 206), True, 'import numpy as np\n'), ((217, 232), 'napari.Viewer', 'napari.Viewer', ([], {}), '()\n', (230, 232), False, 'import napari\n')] |
import torch
from PIL import Image
import torch.utils.data as data
import quaternion
import os
import numpy as np
import cv2
import glob
from torchvision import transforms
import os.path as osp
SCENES_PATH="/home/t/data/7Scenes/"
class ScenesDataset(data.Dataset):
def __init__(self, data_dir =SCENES_PATH, train=True, scenes ='all',seq_list=[1,2,3,4], transform_rgb=None, transform_depth = None):
super(ScenesDataset, self).__init__()
self.data_dir = data_dir
if scenes=='all':
scenes=[d for d in os.listdir(data_dir)]
self.imgs_path = []
self.depth_path = []
self.labels_path = []
for scene in scenes:
sceneDir = os.path.join(data_dir+scene)
seq_list=[]
if train:
split_file = osp.join(sceneDir, 'TrainSplit.txt')
else:
split_file = osp.join(sceneDir, 'TestSplit.txt')
with open(split_file, 'r') as f:
seq_list = [int(l.split('sequence')[-1]) for l in f if not l.startswith('#')]
for i in seq_list:
if i<10:
seq_idx = "seq-0{}".format(i)
else:
seq_idx = "seq-{}".format(i)
sequenceDir = os.path.join(data_dir+scene,seq_idx)
poselabelsNames = glob.glob(sequenceDir+"/*.pose.txt")
poselabelsNames.sort()
for label in poselabelsNames:
self.labels_path.append( label )
self.depth_path.append( label.replace("pose.txt","depth.png") )
self.imgs_path.append( label.replace("pose.txt","color.png") )
self.transform_depth = transform_depth
def __getitem__(self, index):
img_color=cv2.imread(self.imgs_path[index])
pose = np.loadtxt(self.labels_path[index])
q = quaternion.from_rotation_matrix(pose[:3,:3] )
t = pose[:3,3]
q_arr = quaternion.as_float_array(q)#[np.newaxis,:]
result = (torch.tensor(img_color).to(torch.float32),torch.tensor(t).to(torch.float32), torch.tensor(q_arr).to(torch.float32) )
return result
def __len__(self):
return len(self.labels_path)
if __name__ == '__main__':
dataset= ScenesDataset(train=False)
data_loader = data.DataLoader(dataset, batch_size=1, num_workers=8,shuffle=True)
for i, data in enumerate(data_loader):
x,t,q=data
frame=x[0].numpy()
cv2.imshow('test',frame.astype(np.uint8))
print('t:'+str(t))
print('q:'+str(q))
if cv2.waitKey(0) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
| [
"torch.utils.data.DataLoader",
"cv2.waitKey",
"quaternion.from_rotation_matrix",
"cv2.imread",
"numpy.loadtxt",
"quaternion.as_float_array",
"glob.glob",
"cv2.destroyAllWindows",
"os.path.join",
"os.listdir",
"torch.tensor"
] | [((2341, 2408), 'torch.utils.data.DataLoader', 'data.DataLoader', (['dataset'], {'batch_size': '(1)', 'num_workers': '(8)', 'shuffle': '(True)'}), '(dataset, batch_size=1, num_workers=8, shuffle=True)\n', (2356, 2408), True, 'import torch.utils.data as data\n'), ((2669, 2692), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2690, 2692), False, 'import cv2\n'), ((1804, 1837), 'cv2.imread', 'cv2.imread', (['self.imgs_path[index]'], {}), '(self.imgs_path[index])\n', (1814, 1837), False, 'import cv2\n'), ((1853, 1888), 'numpy.loadtxt', 'np.loadtxt', (['self.labels_path[index]'], {}), '(self.labels_path[index])\n', (1863, 1888), True, 'import numpy as np\n'), ((1902, 1947), 'quaternion.from_rotation_matrix', 'quaternion.from_rotation_matrix', (['pose[:3, :3]'], {}), '(pose[:3, :3])\n', (1933, 1947), False, 'import quaternion\n'), ((1987, 2015), 'quaternion.as_float_array', 'quaternion.as_float_array', (['q'], {}), '(q)\n', (2012, 2015), False, 'import quaternion\n'), ((706, 736), 'os.path.join', 'os.path.join', (['(data_dir + scene)'], {}), '(data_dir + scene)\n', (718, 736), False, 'import os\n'), ((810, 846), 'os.path.join', 'osp.join', (['sceneDir', '"""TrainSplit.txt"""'], {}), "(sceneDir, 'TrainSplit.txt')\n", (818, 846), True, 'import os.path as osp\n'), ((894, 929), 'os.path.join', 'osp.join', (['sceneDir', '"""TestSplit.txt"""'], {}), "(sceneDir, 'TestSplit.txt')\n", (902, 929), True, 'import os.path as osp\n'), ((1290, 1329), 'os.path.join', 'os.path.join', (['(data_dir + scene)', 'seq_idx'], {}), '(data_dir + scene, seq_idx)\n', (1302, 1329), False, 'import os\n'), ((1361, 1399), 'glob.glob', 'glob.glob', (["(sequenceDir + '/*.pose.txt')"], {}), "(sequenceDir + '/*.pose.txt')\n", (1370, 1399), False, 'import glob\n'), ((2612, 2626), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2623, 2626), False, 'import cv2\n'), ((543, 563), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (553, 563), False, 'import os\n'), ((2049, 2072), 'torch.tensor', 'torch.tensor', (['img_color'], {}), '(img_color)\n', (2061, 2072), False, 'import torch\n'), ((2091, 2106), 'torch.tensor', 'torch.tensor', (['t'], {}), '(t)\n', (2103, 2106), False, 'import torch\n'), ((2126, 2145), 'torch.tensor', 'torch.tensor', (['q_arr'], {}), '(q_arr)\n', (2138, 2145), False, 'import torch\n')] |
import pytest
import numpy as np
from astropy.modeling import models
from ..filters import Window1D, Optimal1D, filter_for_deadtime
from stingray.events import EventList
class TestFilters(object):
@classmethod
def setup_class(self):
self.x = np.linspace(0, 10, 100)
self.amplitude_0 = 5.
self.x_0_0 = 5.
self.fwhm_0 = 1.
self.amplitude_1 = -5
self.lorentz = models.Lorentz1D(amplitude=self.amplitude_0,
x_0=self.x_0_0, fwhm=self.fwhm_0)
self.const = models.Const1D(amplitude=self.amplitude_1)
self.model = self.lorentz + self.const
self.y = self.model(self.x)
def test_window(self):
tophat_filter = Window1D(self.model)
filtered_y = self.y * tophat_filter(self.x)
filter_w = [1. if np.abs(x_i - self.x_0_0) <= self.fwhm_0 / 2 else 0.
for x_i in self.x]
y_w = self.y * filter_w
assert np.all(filtered_y == y_w)
def test_optimal(self):
optimal_filter = Optimal1D(self.model)
filtered_y = self.y * optimal_filter(self.x)
filter_o = (self.lorentz / self.model)(self.x)
y_o = self.y * filter_o
assert np.all(filtered_y == y_o)
def test_filter_for_deadtime_nonpar():
"""Test dead time filter, non-paralyzable case."""
events = np.array([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])
filt_events, info = filter_for_deadtime(events, 0.11, return_all=True)
expected = np.array([1, 2, 2.2, 3, 3.2])
assert np.all(filt_events == expected), \
"Wrong: {} vs {}".format(filt_events, expected)
assert np.all(filt_events == info.uf_events[info.is_event])
def test_filter_for_deadtime_evlist():
"""Test dead time filter, non-paralyzable case."""
events = np.array([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])
events = EventList(events)
events.pi=np.array([1, 2, 2, 2, 2, 1, 1, 1, 2, 1])
events.energy=np.array([1, 2, 2, 2, 2, 1, 1, 1, 2, 1])
events.mjdref = 10
filt_events = filter_for_deadtime(events, 0.11)
expected = np.array([1, 2, 2.2, 3, 3.2])
assert np.all(filt_events.time == expected), \
"Wrong: {} vs {}".format(filt_events, expected)
assert np.all(filt_events.pi == 1)
assert np.all(filt_events.energy == 1)
def test_filter_for_deadtime_lt0():
"""Test dead time filter, non-paralyzable case."""
events = np.array([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])
with pytest.raises(ValueError) as excinfo:
_ = filter_for_deadtime(events, -0.11)
assert "Dead time is less than 0. Please check." in str(excinfo.value)
def test_filter_for_deadtime_0():
"""Test dead time filter, non-paralyzable case."""
events = np.array([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])
filt_events, info = filter_for_deadtime(events, 0, return_all=True)
assert np.all(events == filt_events)
assert np.all(filt_events == info.uf_events[info.is_event])
def test_filter_for_deadtime_nonpar_sigma():
"""Test dead time filter, non-paralyzable case."""
events = np.array([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])
filt_events = filter_for_deadtime(events, 0.11, dt_sigma=0.001)
expected = np.array([1, 2, 2.2, 3, 3.2])
assert np.all(filt_events == expected), \
"Wrong: {} vs {}".format(filt_events, expected)
def test_filter_for_deadtime_nonpar_bkg():
"""Test dead time filter, non-paralyzable case, with background."""
events = np.array([1.1, 2, 2.2, 3, 3.2])
bkg_events = np.array([1, 3.1])
filt_events, info = \
filter_for_deadtime(events, 0.11, bkg_ev_list=bkg_events,
return_all=True)
expected_ev = np.array([2, 2.2, 3, 3.2])
expected_bk = np.array([1])
assert np.all(filt_events == expected_ev), \
"Wrong: {} vs {}".format(filt_events, expected_ev)
assert np.all(info.bkg == expected_bk), \
"Wrong: {} vs {}".format(info.bkg, expected_bk)
assert np.all(filt_events == info.uf_events[info.is_event])
def test_filter_for_deadtime_par():
"""Test dead time filter, paralyzable case."""
events = np.array([1, 1.1, 2, 2.2, 3, 3.1, 3.2])
assert np.all(filter_for_deadtime(
events, 0.11, paralyzable=True) == np.array([1, 2, 2.2, 3]))
def test_filter_for_deadtime_par_bkg():
"""Test dead time filter, paralyzable case, with background."""
events = np.array([1.1, 2, 2.2, 3, 3.2])
bkg_events = np.array([1, 3.1])
filt_events, info = \
filter_for_deadtime(events, 0.11, bkg_ev_list=bkg_events,
paralyzable=True, return_all=True)
expected_ev = np.array([2, 2.2, 3])
expected_bk = np.array([1])
assert np.all(filt_events == expected_ev), \
"Wrong: {} vs {}".format(filt_events, expected_ev)
assert np.all(info.bkg == expected_bk), \
"Wrong: {} vs {}".format(info.bkg, expected_bk)
assert np.all(filt_events == info.uf_events[info.is_event])
| [
"numpy.abs",
"stingray.events.EventList",
"pytest.raises",
"numpy.array",
"numpy.linspace",
"astropy.modeling.models.Lorentz1D",
"numpy.all",
"astropy.modeling.models.Const1D"
] | [((1368, 1425), 'numpy.array', 'np.array', (['[1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2]'], {}), '([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])\n', (1376, 1425), True, 'import numpy as np\n'), ((1516, 1545), 'numpy.array', 'np.array', (['[1, 2, 2.2, 3, 3.2]'], {}), '([1, 2, 2.2, 3, 3.2])\n', (1524, 1545), True, 'import numpy as np\n'), ((1557, 1588), 'numpy.all', 'np.all', (['(filt_events == expected)'], {}), '(filt_events == expected)\n', (1563, 1588), True, 'import numpy as np\n'), ((1659, 1711), 'numpy.all', 'np.all', (['(filt_events == info.uf_events[info.is_event])'], {}), '(filt_events == info.uf_events[info.is_event])\n', (1665, 1711), True, 'import numpy as np\n'), ((1821, 1878), 'numpy.array', 'np.array', (['[1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2]'], {}), '([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])\n', (1829, 1878), True, 'import numpy as np\n'), ((1892, 1909), 'stingray.events.EventList', 'EventList', (['events'], {}), '(events)\n', (1901, 1909), False, 'from stingray.events import EventList\n'), ((1924, 1964), 'numpy.array', 'np.array', (['[1, 2, 2, 2, 2, 1, 1, 1, 2, 1]'], {}), '([1, 2, 2, 2, 2, 1, 1, 1, 2, 1])\n', (1932, 1964), True, 'import numpy as np\n'), ((1983, 2023), 'numpy.array', 'np.array', (['[1, 2, 2, 2, 2, 1, 1, 1, 2, 1]'], {}), '([1, 2, 2, 2, 2, 1, 1, 1, 2, 1])\n', (1991, 2023), True, 'import numpy as np\n'), ((2115, 2144), 'numpy.array', 'np.array', (['[1, 2, 2.2, 3, 3.2]'], {}), '([1, 2, 2.2, 3, 3.2])\n', (2123, 2144), True, 'import numpy as np\n'), ((2156, 2192), 'numpy.all', 'np.all', (['(filt_events.time == expected)'], {}), '(filt_events.time == expected)\n', (2162, 2192), True, 'import numpy as np\n'), ((2264, 2291), 'numpy.all', 'np.all', (['(filt_events.pi == 1)'], {}), '(filt_events.pi == 1)\n', (2270, 2291), True, 'import numpy as np\n'), ((2303, 2334), 'numpy.all', 'np.all', (['(filt_events.energy == 1)'], {}), '(filt_events.energy == 1)\n', (2309, 2334), True, 'import numpy as np\n'), ((2441, 2498), 'numpy.array', 'np.array', (['[1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2]'], {}), '([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])\n', (2449, 2498), True, 'import numpy as np\n'), ((2772, 2829), 'numpy.array', 'np.array', (['[1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2]'], {}), '([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])\n', (2780, 2829), True, 'import numpy as np\n'), ((2913, 2942), 'numpy.all', 'np.all', (['(events == filt_events)'], {}), '(events == filt_events)\n', (2919, 2942), True, 'import numpy as np\n'), ((2954, 3006), 'numpy.all', 'np.all', (['(filt_events == info.uf_events[info.is_event])'], {}), '(filt_events == info.uf_events[info.is_event])\n', (2960, 3006), True, 'import numpy as np\n'), ((3122, 3179), 'numpy.array', 'np.array', (['[1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2]'], {}), '([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])\n', (3130, 3179), True, 'import numpy as np\n'), ((3263, 3292), 'numpy.array', 'np.array', (['[1, 2, 2.2, 3, 3.2]'], {}), '([1, 2, 2.2, 3, 3.2])\n', (3271, 3292), True, 'import numpy as np\n'), ((3304, 3335), 'numpy.all', 'np.all', (['(filt_events == expected)'], {}), '(filt_events == expected)\n', (3310, 3335), True, 'import numpy as np\n'), ((3525, 3556), 'numpy.array', 'np.array', (['[1.1, 2, 2.2, 3, 3.2]'], {}), '([1.1, 2, 2.2, 3, 3.2])\n', (3533, 3556), True, 'import numpy as np\n'), ((3574, 3592), 'numpy.array', 'np.array', (['[1, 3.1]'], {}), '([1, 3.1])\n', (3582, 3592), True, 'import numpy as np\n'), ((3748, 3774), 'numpy.array', 'np.array', (['[2, 2.2, 3, 3.2]'], {}), '([2, 2.2, 3, 3.2])\n', (3756, 3774), True, 'import numpy as np\n'), ((3793, 3806), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (3801, 3806), True, 'import numpy as np\n'), ((3818, 3852), 'numpy.all', 'np.all', (['(filt_events == expected_ev)'], {}), '(filt_events == expected_ev)\n', (3824, 3852), True, 'import numpy as np\n'), ((3926, 3957), 'numpy.all', 'np.all', (['(info.bkg == expected_bk)'], {}), '(info.bkg == expected_bk)\n', (3932, 3957), True, 'import numpy as np\n'), ((4028, 4080), 'numpy.all', 'np.all', (['(filt_events == info.uf_events[info.is_event])'], {}), '(filt_events == info.uf_events[info.is_event])\n', (4034, 4080), True, 'import numpy as np\n'), ((4183, 4222), 'numpy.array', 'np.array', (['[1, 1.1, 2, 2.2, 3, 3.1, 3.2]'], {}), '([1, 1.1, 2, 2.2, 3, 3.1, 3.2])\n', (4191, 4222), True, 'import numpy as np\n'), ((4454, 4485), 'numpy.array', 'np.array', (['[1.1, 2, 2.2, 3, 3.2]'], {}), '([1.1, 2, 2.2, 3, 3.2])\n', (4462, 4485), True, 'import numpy as np\n'), ((4503, 4521), 'numpy.array', 'np.array', (['[1, 3.1]'], {}), '([1, 3.1])\n', (4511, 4521), True, 'import numpy as np\n'), ((4695, 4716), 'numpy.array', 'np.array', (['[2, 2.2, 3]'], {}), '([2, 2.2, 3])\n', (4703, 4716), True, 'import numpy as np\n'), ((4735, 4748), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (4743, 4748), True, 'import numpy as np\n'), ((4760, 4794), 'numpy.all', 'np.all', (['(filt_events == expected_ev)'], {}), '(filt_events == expected_ev)\n', (4766, 4794), True, 'import numpy as np\n'), ((4868, 4899), 'numpy.all', 'np.all', (['(info.bkg == expected_bk)'], {}), '(info.bkg == expected_bk)\n', (4874, 4899), True, 'import numpy as np\n'), ((4970, 5022), 'numpy.all', 'np.all', (['(filt_events == info.uf_events[info.is_event])'], {}), '(filt_events == info.uf_events[info.is_event])\n', (4976, 5022), True, 'import numpy as np\n'), ((264, 287), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (275, 287), True, 'import numpy as np\n'), ((421, 499), 'astropy.modeling.models.Lorentz1D', 'models.Lorentz1D', ([], {'amplitude': 'self.amplitude_0', 'x_0': 'self.x_0_0', 'fwhm': 'self.fwhm_0'}), '(amplitude=self.amplitude_0, x_0=self.x_0_0, fwhm=self.fwhm_0)\n', (437, 499), False, 'from astropy.modeling import models\n'), ((561, 603), 'astropy.modeling.models.Const1D', 'models.Const1D', ([], {'amplitude': 'self.amplitude_1'}), '(amplitude=self.amplitude_1)\n', (575, 603), False, 'from astropy.modeling import models\n'), ((976, 1001), 'numpy.all', 'np.all', (['(filtered_y == y_w)'], {}), '(filtered_y == y_w)\n', (982, 1001), True, 'import numpy as np\n'), ((1233, 1258), 'numpy.all', 'np.all', (['(filtered_y == y_o)'], {}), '(filtered_y == y_o)\n', (1239, 1258), True, 'import numpy as np\n'), ((2508, 2533), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2521, 2533), False, 'import pytest\n'), ((4305, 4329), 'numpy.array', 'np.array', (['[1, 2, 2.2, 3]'], {}), '([1, 2, 2.2, 3])\n', (4313, 4329), True, 'import numpy as np\n'), ((838, 862), 'numpy.abs', 'np.abs', (['(x_i - self.x_0_0)'], {}), '(x_i - self.x_0_0)\n', (844, 862), True, 'import numpy as np\n')] |
import numpy as np
from ga.solutions import DoubleArraySolution, BitArraySolution, Solution
class Crossing:
def cross(self, momma: Solution, poppa: Solution):
pass
class GaussCrossing(Crossing):
def cross(self, momma: DoubleArraySolution, poppa: DoubleArraySolution):
minimum = np.minimum(momma.arr, poppa.arr)
maximum = np.maximum(momma.arr, poppa.arr)
deltas = maximum - minimum
arr = np.random.normal(minimum + deltas / 2, deltas / 2, len(momma.arr))
return momma.new_like_me().set_arr(arr)
class BLXalphaCrossing(Crossing):
def __init__(self, alpha):
self.alpha = alpha
def cross(self, momma: DoubleArraySolution, poppa: DoubleArraySolution):
alphas = self.alpha * np.ones(len(momma.arr))
arr = []
for i in range(len(momma.arr)):
arr.append(blxAlpha(momma.arr[i], poppa.arr[i], alphas[i]))
return momma.new_like_me().set_arr(arr)
class UniformCrossing(Crossing):
def cross(self, momma: BitArraySolution, poppa: BitArraySolution):
r = np.random.choice([True, False], len(momma.arr))
a = momma.arr
b = poppa.arr
# C = A*B + R*(A xor B)
arr = np.logical_or(np.logical_and(a, b), np.logical_and(r, np.logical_xor(a, b)))
return momma.new_like_me().set_arr(arr)
class OnePointCrossing(Crossing):
def cross(self, momma: BitArraySolution, poppa: BitArraySolution):
point = np.random.choice(list(range(1, len(momma.arr))))
arr = []
for i in range(point):
arr.append(momma.arr[i])
for i in range(point, len(momma.arr)):
arr.append(poppa.arr[i])
return momma.new_like_me().set_arr(np.array(arr))
def blxAlpha(a, b, alpha):
minimum = min(a, b)
maximum = max(a, b)
delta = alpha * (maximum - minimum)
return minimum - delta + np.random.uniform(0.0, 1.0, 1) * (maximum + 2 * delta - minimum)
| [
"numpy.random.uniform",
"numpy.minimum",
"numpy.maximum",
"numpy.logical_and",
"numpy.logical_xor",
"numpy.array"
] | [((309, 341), 'numpy.minimum', 'np.minimum', (['momma.arr', 'poppa.arr'], {}), '(momma.arr, poppa.arr)\n', (319, 341), True, 'import numpy as np\n'), ((360, 392), 'numpy.maximum', 'np.maximum', (['momma.arr', 'poppa.arr'], {}), '(momma.arr, poppa.arr)\n', (370, 392), True, 'import numpy as np\n'), ((1234, 1254), 'numpy.logical_and', 'np.logical_and', (['a', 'b'], {}), '(a, b)\n', (1248, 1254), True, 'import numpy as np\n'), ((1731, 1744), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (1739, 1744), True, 'import numpy as np\n'), ((1893, 1923), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(1)'], {}), '(0.0, 1.0, 1)\n', (1910, 1923), True, 'import numpy as np\n'), ((1274, 1294), 'numpy.logical_xor', 'np.logical_xor', (['a', 'b'], {}), '(a, b)\n', (1288, 1294), True, 'import numpy as np\n')] |
import os
import numpy as np
import tensorflow as tf
ALL_VARS = None
def clip(gradient, min_clip_value=-1e+0, max_clip_value=1e+0):
"""
Examples:
>>> loss = ...
>>> optimizer = tf.train.AdamOptimizer()
>>> gvs = optimizer.compute_gradients(loss)
>>> gvs = [(clip(grad), var) for grad, var in gvs]
>>> train_op = optimizer.apply_gradients(gvs)
Args:
gradient: Tensor.
min_clip_value: Negative float.
max_clip_value: Positive float.
Returns:
Tensor.
"""
assert (min_clip_value < 0 and max_clip_value > 0)
return tf.clip_by_value(gradient, min_clip_value, max_clip_value)
def get_ckpt_name(scope):
"""Auxillary funtion.
Argus:
scope: String or `None`.
"""
if scope is None:
return 'all_scopes.ckpt'
return '{}_scope.ckpt'.format(scope)
def save_variables(session, scope, save_dir):
"""Saves the trained variables within the scope from session
to disk.
Args:
session: An instance of `tf.Session`.
scope: String or `ALL_VARS`.
save_dir: String. This directory can be non-exist. If not exists,
then calling this function will create one.
"""
ensure_directory(save_dir)
pretrained_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope)
saver = tf.train.Saver(pretrained_vars)
ckpt_path = os.path.join(save_dir, get_ckpt_name(scope))
saver.save(session, ckpt_path)
def restore_variables(session, scope, save_dir):
"""Restores the pre-trained variables within the scope from disk
to session. Cooperating with the function `save_vars`.
Args:
session: An instance of `tf.Session`.
scope: String or `ALL_VARS`.
save_dir: String. This directory shall exist.
"""
pretrained_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope)
saver = tf.train.Saver(pretrained_vars)
ckpt_path = os.path.join(save_dir, get_ckpt_name(scope))
saver.restore(session, ckpt_path)
def create_frugal_session(gpu_allocation=0.1):
"""Creates a session that occupies `gpu_allocation` percent GPU-memory only.
Args:
gpu_allocation: Float in range (0, 1].
Returns:
An instance of `tf.Session`.
"""
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=gpu_allocation)
config = tf.ConfigProto(gpu_options=gpu_options)
return tf.Session(config=config)
def smear(values, window_size):
"""Auxillary function for plotting. If the plot are bushing,
e.g. plot of loss-values, smearing is called for.
Args:
values: List of real numbers.
window_size: Positive integer.
Returns:
List of real numbers.
"""
smeared = []
for i, _ in enumerate(values):
# Get values-in-window
start_id = i + 1 - window_size
if start_id < 0:
start_id = 0
end_id = i + 1
values_in_window = values[start_id:end_id]
smeared.append(np.mean(values_in_window))
return smeared
def ensure_directory(directory):
"""Checks if the directory exists, and if not so, create one.
Args:
directory: String.
"""
os.makedirs(directory, exist_ok=True)
| [
"os.makedirs",
"tensorflow.train.Saver",
"tensorflow.clip_by_value",
"tensorflow.get_collection",
"tensorflow.Session",
"tensorflow.ConfigProto",
"numpy.mean",
"tensorflow.GPUOptions"
] | [((556, 614), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['gradient', 'min_clip_value', 'max_clip_value'], {}), '(gradient, min_clip_value, max_clip_value)\n', (572, 614), True, 'import tensorflow as tf\n'), ((1182, 1243), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'scope'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)\n', (1199, 1243), True, 'import tensorflow as tf\n'), ((1292, 1323), 'tensorflow.train.Saver', 'tf.train.Saver', (['pretrained_vars'], {}), '(pretrained_vars)\n', (1306, 1323), True, 'import tensorflow as tf\n'), ((1752, 1813), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'scope'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)\n', (1769, 1813), True, 'import tensorflow as tf\n'), ((1862, 1893), 'tensorflow.train.Saver', 'tf.train.Saver', (['pretrained_vars'], {}), '(pretrained_vars)\n', (1876, 1893), True, 'import tensorflow as tf\n'), ((2235, 2296), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'gpu_allocation'}), '(per_process_gpu_memory_fraction=gpu_allocation)\n', (2248, 2296), True, 'import tensorflow as tf\n'), ((2315, 2354), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (2329, 2354), True, 'import tensorflow as tf\n'), ((2364, 2389), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2374, 2389), True, 'import tensorflow as tf\n'), ((3139, 3176), 'os.makedirs', 'os.makedirs', (['directory'], {'exist_ok': '(True)'}), '(directory, exist_ok=True)\n', (3150, 3176), False, 'import os\n'), ((2954, 2979), 'numpy.mean', 'np.mean', (['values_in_window'], {}), '(values_in_window)\n', (2961, 2979), True, 'import numpy as np\n')] |
"""
Inference for GP regression.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import scipy.optimize as spop
import warnings
from collections import namedtuple
from itertools import izip
from ...utils import linalg as la
__all__ = ['exact', 'laplace', 'fitc']
# create a named tuple which functions as storage for the posterior sufficient
# statistics.
Statistics = namedtuple('Statistics', 'L, a, w, lZ, dlZ, C')
Statistics.__new__.__defaults__ = (None, )
def exact(like, kern, mean, X, Y):
K = kern.get_kernel(X)
K = la.add_diagonal(K, like.get_variance())
r = Y - mean.get_mean(X)
# compute the cholesky but ignore any warnings that may be thrown when
# more "noise" is added to the diagonal.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
L = la.cholesky(K)
# the rest of the posterior parameterization
a = la.solve_cholesky(L, r)
w = np.ones_like(a)
# we'll need this to compute the log-likelihood derivatives
Q = la.cholesky_inverse(L) - np.outer(a, a)
# the log-likelihood
lZ = -0.5 * np.inner(a, r)
lZ -= 0.5 * np.log(2 * np.pi) * len(X)
lZ -= np.sum(np.log(L.diagonal()))
dlZ = np.r_[
# derivative wrt the likelihood's noise term.
-0.5*np.trace(Q),
# derivative wrt each kernel hyperparameter.
[-0.5*np.sum(Q*dK) for dK in kern.get_grad(X)],
# derivative wrt the mean.
[np.dot(dmu, a) for dmu in mean.get_grad(X)]]
return Statistics(L, a, w, lZ, dlZ)
def laplace(like, kern, mean, X, Y):
MAXIT = 60
MINTOL = 1e-6
# grab the kernel, mean, and initialize the weights
K = kern.get_kernel(X)
L = None
m = mean.get_mean(X)
a = np.zeros(K.shape[1])
def psi(a):
# define the linesearch objective
r = np.dot(K, a)
lp, d1, d2, d3 = like.get_logprob(Y, r+m)
psi = 0.5 * np.inner(r, a) - np.sum(lp)
return psi, r, d1, d2, d3
psi1, r, dy1, dy2, dy3 = psi(a)
psi0 = np.inf
for _ in xrange(MAXIT):
# attempt to breakout early
if np.abs(psi1 - psi0) < MINTOL:
break
psi0 = psi1
# find the step direction
w = np.sqrt(-dy2)
L = la.cholesky(la.add_diagonal(np.outer(w, w)*K, 1))
b = w**2 * r + dy1
# find the step size
d = b - a - w*la.solve_cholesky(L, w*np.dot(K, b))
s = spop.brent(lambda s: psi(a+s*d)[0], tol=1e-4, maxiter=12)
# update the parameters
a += s*d
psi1, r, dy1, dy2, dy3 = psi(a)
# update the posterior parameters
w = np.sqrt(-dy2)
L = la.cholesky(la.add_diagonal(np.outer(w, w)*K, 1))
# compute the marginal log-likelihood
lZ = -psi1 - np.sum(np.log(np.diag(L)))
# compute parameters needed for the hyperparameter gradients
R = w * la.solve_cholesky(L, np.diag(w))
C = la.solve_triangular(L, w*K)
g = 0.5 * (np.diag(K) - np.sum(C**2, axis=0))
f = r+m
df = g * dy3
# define the implicit part of the gradients
implicit = lambda b: np.dot(df, b - np.dot(K, np.dot(R, b)))
# allocate space for the gradients
dlZ = np.zeros(like.params.size + kern.params.size + mean.params.size)
# the likelihood derivatives
i = 0
for dl0, dl1, dl2 in like.get_laplace_grad(Y, f):
dlZ[i] = np.dot(g, dl2) + np.sum(dl0)
dlZ[i] += implicit(np.dot(K, dl1))
i += 1
# covariance derivatives
for dK in kern.get_grad(X):
dlZ[i] = 0.5 * (np.dot(a, np.dot(dK, a)) - np.sum(R*dK))
dlZ[i] += implicit(np.dot(dK, dy1))
i += 1
# mean derivatives
for dm in mean.get_grad(X):
dlZ[i] = np.dot(dm, a) + implicit(dm)
i += 1
return Statistics(L, a, w, lZ, dlZ)
def fitc(like, kern, mean, X, Y, U):
sn2 = like.get_variance()
su2 = sn2 / 1e6
# get the kernel matrices
Kux = kern.get_kernel(U, X)
kxx = kern.get_dkernel(X) + sn2
Kuu = la.add_diagonal(kern.get_kernel(U), su2)
Luu = la.cholesky(Kuu)
V = la.solve_triangular(Luu, Kux)
r = (Y - mean.get_mean(X))
ell = np.sqrt(kxx - np.sum(V**2, axis=0))
V /= ell
r /= ell
L = la.cholesky(la.add_diagonal(np.dot(V, V.T), 1))
b = la.solve_triangular(L, np.dot(V, r))
a = (r - np.dot(V.T, la.solve_triangular(L, b, True))) / ell
# the log-likelihood
lZ = -np.sum(np.log(L.diagonal())) - np.sum(np.log(ell))
lZ -= 0.5 * (np.inner(r, r) - np.inner(b, b))
lZ -= 0.5 * len(X)*np.log(2*np.pi)
# components needed for the gradient
B = la.solve_triangular(Luu, V*ell, True)
W = la.solve_triangular(L, V/ell)
w = np.dot(B, a)
v = 2 * su2 * np.sum(B**2, axis=0)
# allocate space for the derivatives
dlZ = np.zeros(like.params.size + kern.params.size + mean.params.size)
# derivative wrt sn2
dlZ[0] = 0.5 * (
- (np.sum(ell**-2) - np.sum(W**2) - np.inner(a, a))
- (np.sum(w**2) + np.sum(np.dot(B, W.T)**2)) / 1e6
+ (np.inner(a, v*a) + np.inner(np.sum(W**2, axis=0), v)) / 2 / sn2)
# iterator over gradients of the kernels
dK = izip(kern.get_grad(U),
kern.get_grad(U, X),
kern.get_dgrad(X))
# we need to keep track of how many gradients we've already computed.
# note also that at the end of the next loop this variable will have
# changed to track the current number of gradients.
i = like.params.size
for i, (dKuu, dKux, dkxx) in enumerate(dK, i):
M = 2*dKux - np.dot(dKuu, B)
v = dkxx - np.sum(M*B, axis=0)
dlZ[i] = 0.5 * (- np.sum(dkxx/ell**2)
- np.inner(w, dKuu.dot(w) - 2*dKux.dot(a))
+ np.inner(a, v*a)
+ np.inner(np.sum(W**2, axis=0), v)
+ np.sum(M.dot(W.T) * B.dot(W.T)))
for i, dmu in enumerate(mean.get_grad(X), i+1):
dlZ[i] = np.dot(dmu, a)
C = np.dot(Luu, L)
a = la.solve_cholesky(C, np.dot(Kux, r/ell))
w = np.ones_like(a)
return Statistics(Luu, a, w, lZ, dlZ, C)
| [
"numpy.trace",
"numpy.outer",
"numpy.ones_like",
"warnings.simplefilter",
"numpy.log",
"numpy.sum",
"numpy.abs",
"numpy.zeros",
"warnings.catch_warnings",
"collections.namedtuple",
"numpy.inner",
"numpy.dot",
"numpy.diag",
"numpy.sqrt"
] | [((459, 506), 'collections.namedtuple', 'namedtuple', (['"""Statistics"""', '"""L, a, w, lZ, dlZ, C"""'], {}), "('Statistics', 'L, a, w, lZ, dlZ, C')\n", (469, 506), False, 'from collections import namedtuple\n'), ((1005, 1020), 'numpy.ones_like', 'np.ones_like', (['a'], {}), '(a)\n', (1017, 1020), True, 'import numpy as np\n'), ((1814, 1834), 'numpy.zeros', 'np.zeros', (['K.shape[1]'], {}), '(K.shape[1])\n', (1822, 1834), True, 'import numpy as np\n'), ((2696, 2709), 'numpy.sqrt', 'np.sqrt', (['(-dy2)'], {}), '(-dy2)\n', (2703, 2709), True, 'import numpy as np\n'), ((3245, 3309), 'numpy.zeros', 'np.zeros', (['(like.params.size + kern.params.size + mean.params.size)'], {}), '(like.params.size + kern.params.size + mean.params.size)\n', (3253, 3309), True, 'import numpy as np\n'), ((4742, 4754), 'numpy.dot', 'np.dot', (['B', 'a'], {}), '(B, a)\n', (4748, 4754), True, 'import numpy as np\n'), ((4846, 4910), 'numpy.zeros', 'np.zeros', (['(like.params.size + kern.params.size + mean.params.size)'], {}), '(like.params.size + kern.params.size + mean.params.size)\n', (4854, 4910), True, 'import numpy as np\n'), ((6025, 6039), 'numpy.dot', 'np.dot', (['Luu', 'L'], {}), '(Luu, L)\n', (6031, 6039), True, 'import numpy as np\n'), ((6097, 6112), 'numpy.ones_like', 'np.ones_like', (['a'], {}), '(a)\n', (6109, 6112), True, 'import numpy as np\n'), ((821, 846), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (844, 846), False, 'import warnings\n'), ((856, 887), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (877, 887), False, 'import warnings\n'), ((1119, 1133), 'numpy.outer', 'np.outer', (['a', 'a'], {}), '(a, a)\n', (1127, 1133), True, 'import numpy as np\n'), ((1176, 1190), 'numpy.inner', 'np.inner', (['a', 'r'], {}), '(a, r)\n', (1184, 1190), True, 'import numpy as np\n'), ((1906, 1918), 'numpy.dot', 'np.dot', (['K', 'a'], {}), '(K, a)\n', (1912, 1918), True, 'import numpy as np\n'), ((2297, 2310), 'numpy.sqrt', 'np.sqrt', (['(-dy2)'], {}), '(-dy2)\n', (2304, 2310), True, 'import numpy as np\n'), ((4353, 4365), 'numpy.dot', 'np.dot', (['V', 'r'], {}), '(V, r)\n', (4359, 4365), True, 'import numpy as np\n'), ((4592, 4609), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (4598, 4609), True, 'import numpy as np\n'), ((4773, 4795), 'numpy.sum', 'np.sum', (['(B ** 2)'], {'axis': '(0)'}), '(B ** 2, axis=0)\n', (4779, 4795), True, 'import numpy as np\n'), ((6001, 6015), 'numpy.dot', 'np.dot', (['dmu', 'a'], {}), '(dmu, a)\n', (6007, 6015), True, 'import numpy as np\n'), ((6069, 6089), 'numpy.dot', 'np.dot', (['Kux', '(r / ell)'], {}), '(Kux, r / ell)\n', (6075, 6089), True, 'import numpy as np\n'), ((1207, 1224), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1213, 1224), True, 'import numpy as np\n'), ((2006, 2016), 'numpy.sum', 'np.sum', (['lp'], {}), '(lp)\n', (2012, 2016), True, 'import numpy as np\n'), ((2182, 2201), 'numpy.abs', 'np.abs', (['(psi1 - psi0)'], {}), '(psi1 - psi0)\n', (2188, 2201), True, 'import numpy as np\n'), ((2954, 2964), 'numpy.diag', 'np.diag', (['w'], {}), '(w)\n', (2961, 2964), True, 'import numpy as np\n'), ((3017, 3027), 'numpy.diag', 'np.diag', (['K'], {}), '(K)\n', (3024, 3027), True, 'import numpy as np\n'), ((3030, 3052), 'numpy.sum', 'np.sum', (['(C ** 2)'], {'axis': '(0)'}), '(C ** 2, axis=0)\n', (3036, 3052), True, 'import numpy as np\n'), ((3425, 3439), 'numpy.dot', 'np.dot', (['g', 'dl2'], {}), '(g, dl2)\n', (3431, 3439), True, 'import numpy as np\n'), ((3442, 3453), 'numpy.sum', 'np.sum', (['dl0'], {}), '(dl0)\n', (3448, 3453), True, 'import numpy as np\n'), ((3481, 3495), 'numpy.dot', 'np.dot', (['K', 'dl1'], {}), '(K, dl1)\n', (3487, 3495), True, 'import numpy as np\n'), ((3666, 3681), 'numpy.dot', 'np.dot', (['dK', 'dy1'], {}), '(dK, dy1)\n', (3672, 3681), True, 'import numpy as np\n'), ((3771, 3784), 'numpy.dot', 'np.dot', (['dm', 'a'], {}), '(dm, a)\n', (3777, 3784), True, 'import numpy as np\n'), ((4217, 4239), 'numpy.sum', 'np.sum', (['(V ** 2)'], {'axis': '(0)'}), '(V ** 2, axis=0)\n', (4223, 4239), True, 'import numpy as np\n'), ((4302, 4316), 'numpy.dot', 'np.dot', (['V', 'V.T'], {}), '(V, V.T)\n', (4308, 4316), True, 'import numpy as np\n'), ((4506, 4517), 'numpy.log', 'np.log', (['ell'], {}), '(ell)\n', (4512, 4517), True, 'import numpy as np\n'), ((4536, 4550), 'numpy.inner', 'np.inner', (['r', 'r'], {}), '(r, r)\n', (4544, 4550), True, 'import numpy as np\n'), ((4553, 4567), 'numpy.inner', 'np.inner', (['b', 'b'], {}), '(b, b)\n', (4561, 4567), True, 'import numpy as np\n'), ((5601, 5616), 'numpy.dot', 'np.dot', (['dKuu', 'B'], {}), '(dKuu, B)\n', (5607, 5616), True, 'import numpy as np\n'), ((5636, 5657), 'numpy.sum', 'np.sum', (['(M * B)'], {'axis': '(0)'}), '(M * B, axis=0)\n', (5642, 5657), True, 'import numpy as np\n'), ((1358, 1369), 'numpy.trace', 'np.trace', (['Q'], {}), '(Q)\n', (1366, 1369), True, 'import numpy as np\n'), ((1526, 1540), 'numpy.dot', 'np.dot', (['dmu', 'a'], {}), '(dmu, a)\n', (1532, 1540), True, 'import numpy as np\n'), ((1989, 2003), 'numpy.inner', 'np.inner', (['r', 'a'], {}), '(r, a)\n', (1997, 2003), True, 'import numpy as np\n'), ((2746, 2760), 'numpy.outer', 'np.outer', (['w', 'w'], {}), '(w, w)\n', (2754, 2760), True, 'import numpy as np\n'), ((2842, 2852), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (2849, 2852), True, 'import numpy as np\n'), ((3625, 3639), 'numpy.sum', 'np.sum', (['(R * dK)'], {}), '(R * dK)\n', (3631, 3639), True, 'import numpy as np\n'), ((1439, 1453), 'numpy.sum', 'np.sum', (['(Q * dK)'], {}), '(Q * dK)\n', (1445, 1453), True, 'import numpy as np\n'), ((2351, 2365), 'numpy.outer', 'np.outer', (['w', 'w'], {}), '(w, w)\n', (2359, 2365), True, 'import numpy as np\n'), ((3180, 3192), 'numpy.dot', 'np.dot', (['R', 'b'], {}), '(R, b)\n', (3186, 3192), True, 'import numpy as np\n'), ((3608, 3621), 'numpy.dot', 'np.dot', (['dK', 'a'], {}), '(dK, a)\n', (3614, 3621), True, 'import numpy as np\n'), ((2475, 2487), 'numpy.dot', 'np.dot', (['K', 'b'], {}), '(K, b)\n', (2481, 2487), True, 'import numpy as np\n'), ((5002, 5016), 'numpy.inner', 'np.inner', (['a', 'a'], {}), '(a, a)\n', (5010, 5016), True, 'import numpy as np\n'), ((5029, 5043), 'numpy.sum', 'np.sum', (['(w ** 2)'], {}), '(w ** 2)\n', (5035, 5043), True, 'import numpy as np\n'), ((5088, 5106), 'numpy.inner', 'np.inner', (['a', '(v * a)'], {}), '(a, v * a)\n', (5096, 5106), True, 'import numpy as np\n'), ((5795, 5813), 'numpy.inner', 'np.inner', (['a', '(v * a)'], {}), '(a, v * a)\n', (5803, 5813), True, 'import numpy as np\n'), ((5847, 5869), 'numpy.sum', 'np.sum', (['(W ** 2)'], {'axis': '(0)'}), '(W ** 2, axis=0)\n', (5853, 5869), True, 'import numpy as np\n'), ((4969, 4986), 'numpy.sum', 'np.sum', (['(ell ** -2)'], {}), '(ell ** -2)\n', (4975, 4986), True, 'import numpy as np\n'), ((4987, 5001), 'numpy.sum', 'np.sum', (['(W ** 2)'], {}), '(W ** 2)\n', (4993, 5001), True, 'import numpy as np\n'), ((5116, 5138), 'numpy.sum', 'np.sum', (['(W ** 2)'], {'axis': '(0)'}), '(W ** 2, axis=0)\n', (5122, 5138), True, 'import numpy as np\n'), ((5051, 5065), 'numpy.dot', 'np.dot', (['B', 'W.T'], {}), '(B, W.T)\n', (5057, 5065), True, 'import numpy as np\n'), ((5682, 5705), 'numpy.sum', 'np.sum', (['(dkxx / ell ** 2)'], {}), '(dkxx / ell ** 2)\n', (5688, 5705), True, 'import numpy as np\n')] |
# Implementar dicho métodos para obtener la segmentación de una imagen
# Implementar dicho métodos para obtener la segmentación de una imagen
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from PIL import Image, ImageOps
def imageToArray(filename: str, rgb: bool = False):
img = Image.open(filename)
if not rgb:
img = ImageOps.grayscale(img)
img_array = np.array(img)
return img_array
A = imageToArray("imagen4.jpg")
plt.subplot(2, 4, 1)
plt.title('Imagen Original')
plt.imshow(A, cmap='gray')
m, n = A.shape
# Histograma de la imagen original
plt.subplot(2,2,2);
plt.title('Histograma Original')
plt.hist(A.ravel(), bins = 256, range=(0, 256))
# Umbral basico global
T = 165; iter = 16;
for k in range (1,iter):
# Mascaras de los bloques
I1 = (A>T) * 1;
# Matriz binaria, donde I1(i,j)=1 si A(i,j)>T
I2 = (A<=T) * 1;
# Matriz binaria, donde I1(i,j)=1 si A(i,j)<=T
B1 = A*I1 # Bloque 1, donde se cumple que B1(i,j)=A(i,j), si A(i,j)>T;
# B1(i,j)=0, si A(i,j)<=T;
B2 = A*I2;
m1 = sum(sum(B1))/sum(sum(I1)); # Promedio de intensidad del Bloque 1
m2 = sum(sum(B2))/sum(sum(I2)); # Promedio de intensidad del Bloque 1
T = 0.5*(m1+m2);
C = np.zeros((3,5));
C = (A>T) * 1;
plt.subplot(2,2,3);
plt.imshow(C, cmap='gray')
plt.title('Umbral Basico T=' + str(T));
# Umbral metodo de Otsu
# Paso 0: Calcular el histograma de la imagen A
[q,_] = np.histogram(A, bins=256, range=(0, 255))
# Paso 1: Calcular el histograma normalizado
h = (1/(m*n))
h = (h*q)
# Paso 2: Calcular vector de suma acumulada
p = np.zeros((256,1))
for k in range (0,256):
p[k] = np.sum(h[0:k])
# Paso 3: Calcular vector de suma acumulada con peso
mc = np.zeros((256,1))
for k in range(0,256):
temp = np.arange(start=0, stop=k)
temp = temp.transpose()
mc[k] = np.sum(h[0:k]*temp)
# Paso 4: Calcular el maximo de mc
mg = mc[255];
print("mg"+str(mg))
# Paso 5: Para k1 = 0,1,...,255 y k2 = 0,1,...,255
sigma2b = np.zeros((256,256));
for k1 in range(0,256):
for k2 in range(0,256):
if (k1<k2):
# Paso 6: Calcular P1, P2 y P3
P1 = np.sum(h[0:k1]);
P2 = np.sum(h[k1:k2]);
P3 = np.sum(h[k2:256]);
# Paso 7: Calcular m1, m2 y m3
resultado = 0;
if (P1!=0):
temp = np.arange(start=0, stop=k1)
temp = temp.transpose()
m1 = (1/P1)*np.sum(temp*h[0:k1])
resultado += P1 * np.power((m2-mg),2)
if (P2!=0):
temp = np.arange(start=k1, stop=k2)
temp = temp.transpose()
m2 = (1/P2)*np.sum(temp*h[k1:k2])
resultado += P2 * np.power((m2-mg),2)
if (P3!=0):
temp = np.arange(start=k2, stop=256)
temp = temp.transpose()
m3 = (1/P3)*np.sum(temp*h[k2:256])
resultado += P3 * np.power((m3-mg),2)
# Paso 8: Establecer el valor de sigma_b2 para la posicion correspondiente
sigma2b[k1,k2] = resultado;
# Paso 9: Obtener los umbrales T1, T2
F = sigma2b.max(0)
X = sigma2b.argmax(0)
Z = F.max(0)
T2 = F.argmax(0)
T1 = X[T2];
T1 = T1-1;
T2 = T2-1;
# T1 = 166
# T2 = 189
print(str(T1) + ' ' + str(T2))
C = np.zeros((m,n))
for i in range(0,m):
for j in range(0,n):
if T1<A[i,j]<T2:
C[i,j] = 0.5
elif (A[i,j]<=T1):
C[i,j] = 0
else:
C[i,j] = 1
plt.subplot(2,2,4);
plt.imshow(C, cmap='gray');
plt.title('Umbral Compuesto Otsu: T1=' + str(T1) + ' T2=' + str(T2));
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.imshow",
"numpy.power",
"numpy.zeros",
"PIL.ImageOps.grayscale",
"PIL.Image.open",
"numpy.histogram",
"numpy.array",
"numpy.arange"
] | [((494, 514), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(4)', '(1)'], {}), '(2, 4, 1)\n', (505, 514), True, 'import matplotlib.pyplot as plt\n'), ((515, 543), 'matplotlib.pyplot.title', 'plt.title', (['"""Imagen Original"""'], {}), "('Imagen Original')\n", (524, 543), True, 'import matplotlib.pyplot as plt\n'), ((544, 570), 'matplotlib.pyplot.imshow', 'plt.imshow', (['A'], {'cmap': '"""gray"""'}), "(A, cmap='gray')\n", (554, 570), True, 'import matplotlib.pyplot as plt\n'), ((623, 643), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (634, 643), True, 'import matplotlib.pyplot as plt\n'), ((643, 675), 'matplotlib.pyplot.title', 'plt.title', (['"""Histograma Original"""'], {}), "('Histograma Original')\n", (652, 675), True, 'import matplotlib.pyplot as plt\n'), ((1303, 1319), 'numpy.zeros', 'np.zeros', (['(3, 5)'], {}), '((3, 5))\n', (1311, 1319), True, 'import numpy as np\n'), ((1336, 1356), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (1347, 1356), True, 'import matplotlib.pyplot as plt\n'), ((1356, 1382), 'matplotlib.pyplot.imshow', 'plt.imshow', (['C'], {'cmap': '"""gray"""'}), "(C, cmap='gray')\n", (1366, 1382), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1545), 'numpy.histogram', 'np.histogram', (['A'], {'bins': '(256)', 'range': '(0, 255)'}), '(A, bins=256, range=(0, 255))\n', (1516, 1545), True, 'import numpy as np\n'), ((1665, 1683), 'numpy.zeros', 'np.zeros', (['(256, 1)'], {}), '((256, 1))\n', (1673, 1683), True, 'import numpy as np\n'), ((1790, 1808), 'numpy.zeros', 'np.zeros', (['(256, 1)'], {}), '((256, 1))\n', (1798, 1808), True, 'import numpy as np\n'), ((2056, 2076), 'numpy.zeros', 'np.zeros', (['(256, 256)'], {}), '((256, 256))\n', (2064, 2076), True, 'import numpy as np\n'), ((3223, 3239), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (3231, 3239), True, 'import numpy as np\n'), ((3392, 3412), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (3403, 3412), True, 'import matplotlib.pyplot as plt\n'), ((3412, 3438), 'matplotlib.pyplot.imshow', 'plt.imshow', (['C'], {'cmap': '"""gray"""'}), "(C, cmap='gray')\n", (3422, 3438), True, 'import matplotlib.pyplot as plt\n'), ((3511, 3521), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3519, 3521), True, 'import matplotlib.pyplot as plt\n'), ((334, 354), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (344, 354), False, 'from PIL import Image, ImageOps\n'), ((425, 438), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (433, 438), True, 'import numpy as np\n'), ((1716, 1730), 'numpy.sum', 'np.sum', (['h[0:k]'], {}), '(h[0:k])\n', (1722, 1730), True, 'import numpy as np\n'), ((1840, 1866), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'k'}), '(start=0, stop=k)\n', (1849, 1866), True, 'import numpy as np\n'), ((1903, 1924), 'numpy.sum', 'np.sum', (['(h[0:k] * temp)'], {}), '(h[0:k] * temp)\n', (1909, 1924), True, 'import numpy as np\n'), ((385, 408), 'PIL.ImageOps.grayscale', 'ImageOps.grayscale', (['img'], {}), '(img)\n', (403, 408), False, 'from PIL import Image, ImageOps\n'), ((2191, 2206), 'numpy.sum', 'np.sum', (['h[0:k1]'], {}), '(h[0:k1])\n', (2197, 2206), True, 'import numpy as np\n'), ((2226, 2242), 'numpy.sum', 'np.sum', (['h[k1:k2]'], {}), '(h[k1:k2])\n', (2232, 2242), True, 'import numpy as np\n'), ((2255, 2272), 'numpy.sum', 'np.sum', (['h[k2:256]'], {}), '(h[k2:256])\n', (2261, 2272), True, 'import numpy as np\n'), ((2372, 2399), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'k1'}), '(start=0, stop=k1)\n', (2381, 2399), True, 'import numpy as np\n'), ((2559, 2587), 'numpy.arange', 'np.arange', ([], {'start': 'k1', 'stop': 'k2'}), '(start=k1, stop=k2)\n', (2568, 2587), True, 'import numpy as np\n'), ((2748, 2777), 'numpy.arange', 'np.arange', ([], {'start': 'k2', 'stop': '(256)'}), '(start=k2, stop=256)\n', (2757, 2777), True, 'import numpy as np\n'), ((2452, 2474), 'numpy.sum', 'np.sum', (['(temp * h[0:k1])'], {}), '(temp * h[0:k1])\n', (2458, 2474), True, 'import numpy as np\n'), ((2499, 2519), 'numpy.power', 'np.power', (['(m2 - mg)', '(2)'], {}), '(m2 - mg, 2)\n', (2507, 2519), True, 'import numpy as np\n'), ((2640, 2663), 'numpy.sum', 'np.sum', (['(temp * h[k1:k2])'], {}), '(temp * h[k1:k2])\n', (2646, 2663), True, 'import numpy as np\n'), ((2688, 2708), 'numpy.power', 'np.power', (['(m2 - mg)', '(2)'], {}), '(m2 - mg, 2)\n', (2696, 2708), True, 'import numpy as np\n'), ((2830, 2854), 'numpy.sum', 'np.sum', (['(temp * h[k2:256])'], {}), '(temp * h[k2:256])\n', (2836, 2854), True, 'import numpy as np\n'), ((2879, 2899), 'numpy.power', 'np.power', (['(m3 - mg)', '(2)'], {}), '(m3 - mg, 2)\n', (2887, 2899), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
"""
-------------------------------------------------
File Name: lstm_classification
Description: lstm 文本分类
Author: Miller
date: 2017/9/12 0012
-------------------------------------------------
"""
__author__ = 'Miller'
import numpy as np
from keras.layers import Dense, Dropout
from keras.layers import LSTM, Embedding
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from classification.datasets import datasets
from classification.setting import *
if __name__ == '__main__':
train_datas, train_labels, test_datas, test_labels = datasets.load()
all_datas = train_datas + test_datas
all_labels = train_labels + test_labels
tokenizer = Tokenizer()
tokenizer.fit_on_texts(all_datas)
sequences = tokenizer.texts_to_sequences(all_datas)
word_index = tokenizer.word_index
print('found %d unique tokens' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(all_labels))
print('shape of data tensor:', data.shape)
print('shape of label tensor:', labels.shape)
print('split data set')
train_len = int(len(data) * (1 - VALIDATION_SPLIT - TEST_SPLIT))
validation_len = int(len(data) * (1 - TEST_SPLIT))
x_train = data[:train_len]
y_train = labels[:train_len]
x_val = data[train_len:validation_len]
y_val = labels[train_len:validation_len]
x_test = data[validation_len:]
y_test = labels[validation_len:]
print('train data: ' + str(len(x_train)))
print('val data: ' + str(len(x_val)))
print('test data: ' + str(len(x_test)))
model = Sequential()
model.add(Embedding(len(word_index) + 1, 200, input_length=MAX_SEQUENCE_LENGTH))
model.add(LSTM(200, dropout=0.2, recurrent_dropout=0.2))
model.add(Dropout(0.2))
model.add(Dense(labels.shape[1], activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
print(model.metrics_names)
model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=2, batch_size=128)
model.save('../data/model/lstm/lstm.h5')
print(model.evaluate(x_test, y_test))
| [
"keras.preprocessing.sequence.pad_sequences",
"keras.layers.LSTM",
"numpy.asarray",
"keras.layers.Dropout",
"keras.preprocessing.text.Tokenizer",
"keras.layers.Dense",
"keras.models.Sequential",
"classification.datasets.datasets.load"
] | [((722, 737), 'classification.datasets.datasets.load', 'datasets.load', ([], {}), '()\n', (735, 737), False, 'from classification.datasets import datasets\n'), ((839, 850), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (848, 850), False, 'from keras.preprocessing.text import Tokenizer\n'), ((1048, 1100), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences'], {'maxlen': 'MAX_SEQUENCE_LENGTH'}), '(sequences, maxlen=MAX_SEQUENCE_LENGTH)\n', (1061, 1100), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((1773, 1785), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1783, 1785), False, 'from keras.models import Sequential\n'), ((1129, 1151), 'numpy.asarray', 'np.asarray', (['all_labels'], {}), '(all_labels)\n', (1139, 1151), True, 'import numpy as np\n'), ((1885, 1930), 'keras.layers.LSTM', 'LSTM', (['(200)'], {'dropout': '(0.2)', 'recurrent_dropout': '(0.2)'}), '(200, dropout=0.2, recurrent_dropout=0.2)\n', (1889, 1930), False, 'from keras.layers import LSTM, Embedding\n'), ((1946, 1958), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1953, 1958), False, 'from keras.layers import Dense, Dropout\n'), ((1974, 2018), 'keras.layers.Dense', 'Dense', (['labels.shape[1]'], {'activation': '"""softmax"""'}), "(labels.shape[1], activation='softmax')\n", (1979, 2018), False, 'from keras.layers import Dense, Dropout\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 16 18:54:12 2017
@author: shenda
class order: ['A', 'N', 'O', '~']
"""
from CDL import CDL
import dill
import numpy as np
class Encase(object):
def __init__(self, clf_list):
self.clf_list = clf_list
self.n_clf = len(self.clf_list)
self.prob_list = [[] for i in range(self.n_clf)]
self.final_prob = None
self.pred_list = []
self.labels = ['N', 'A', 'O', '~']
self.weight = [1/self.n_clf for i in range(self.n_clf)]
def fit(self, train_data, train_label):
for clf in self.clf_list:
clf.fit(train_data, train_label)
def predict_prob(self, test_data):
for i in range(self.n_clf):
self.prob_list[i] = self.weight[i] * self.clf_list[i].predict_prob(test_data)
self.final_prob = np.sum(np.array(self.prob_list), axis=0)
return self.final_prob
def predict(self, test_data):
self.final_prob = self.predict_prob(test_data)
self.pred_list = []
n_row, _ = self.final_prob.shape
for i in range(n_row):
tmp_pred = self.final_prob[i, :]
self.pred_list.append(self.labels[list(tmp_pred).index(max(tmp_pred))])
return self.pred_list
if __name__ == "__main__":
pass | [
"numpy.array"
] | [((886, 910), 'numpy.array', 'np.array', (['self.prob_list'], {}), '(self.prob_list)\n', (894, 910), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
__author__ = '<NAME>'
__contact__ = '<EMAIL>'
__copyright__ = '(c) <NAME> 2017'
__license__ = 'MIT'
__date__ = 'Thu Feb 2 14:02:12 2017'
__status__ = "initial release"
__url__ = "___"
"""
Name: views.py
Compatibility: Python 3.5
Description: This program handles all the views and twitter analysis for the Django app
Requires: oauth2, json, random, nltk, dateutil, numpy, plotly, rq, django-rq
AUTHOR: <NAME>
ORGANIZATION: Dartmouth College
Contact: <EMAIL>
Copyright: (c) <NAME> 2017
"""
#------------------------------------------------------------------------------
# Imports
# -all library/module imports go here-
#python libraries for various tool tasks
import json
import random
import datetime
from dateutil import parser
import time
import logging
import sys
#authorization library for making GET req to Twitter
import oauth2
#NLTK libraries to classify data
import nltk
from nltk.corpus import pros_cons
#django libraries
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.urls import reverse
from django.conf import settings
#data analysis and plotting libraries
import numpy as np
import plotly.offline as pltly
import plotly.graph_objs as go
#Redis libraries for background workers
import django_rq
from rq import Connection
from rq.job import Job
from redis import Redis
connection = django_rq.get_connection()
'''
Function used to analyze the JSON response data from Twitter
Params:
classifier - NTLK Naive Bayes' classifier
jsonResponse - the JSON response from Twitter REST APIs call
data - the data structure to add to
counter - current count of Tweets read
Returns:
counter - current count of Tweets read
'''
def analyzeJSON(classifier, jsonResponse, data, counter):
#if twitter didn't return statuses, return
if (not('statuses' in jsonResponse)):
return counter
#for each tweet in the Twitter response
for tweet in jsonResponse['statuses']:
#init total as 0
total = 0
#tokenize the tweet
tokens = nltk.word_tokenize(tweet['full_text'])
#get the parts of speech from the tokens
pos = nltk.pos_tag(tokens)
#init empty data set to analyze
words_to_analyze = []
#for each word/tag pair in parts of speech analysis...
for (word, tag) in pos:
#check if is an @ sign or contains one - don't include if so
if not("@" in word):
words_to_analyze.append((word, tag))
#for each word to analyze...
for (word, tag) in words_to_analyze:
#classify the word as positive or negative
classification = classifier.classify({ 'word': word })
#score using basic probability that it is correct
score = classifier.prob_classify({ 'word': word }).prob(classification)
#if pro, add to score
if (classification == "pro"):
#check if is adjective or adverb - if so, weight 100
if ("JJ" in tag or "RB" in tag):
total += (score*100)
#check if is noun or verb - if so, weight 20
elif ("NN" in tag or "VB" in tag):
total += (score*20)
#otherwise subtract
else:
#check if is adjective or adverb - if so, weight 100
if ("JJ" in tag or "RB" in tag):
total -= (score*100)
#check if is noun or verb - if so, weight 20
elif ("NN" in tag or "VB" in tag):
total -= (score*20)
#get the user's timezone
timeZone = tweet['user']['time_zone']
#add data about the tweet for tweet table
data["items"].append({ 'name': tweet['user']['name'], 'text': tweet['full_text'], 'avg': total, 'count': counter, 'tz': timeZone})
#increment or init the timezone count
if (not(timeZone in data['timezones'])):
data["timezones"][timeZone] = 1
else:
data["timezones"][timeZone] += 1
#add the total for timezone sentiment analysis
if (not(timeZone in data['timezones_sentiment'])):
data["timezones_sentiment"][timeZone] = total
else:
data["timezones_sentiment"][timeZone] += total
#add date and time of tweet
datetimetweet = parser.parse(tweet['created_at'])
data["dates"].append(datetimetweet)
#add total sentiment to data
data["sentiments"].append(total)
#add total weighted by retweet count
for i in range(tweet['retweet_count']):
data["retweets"].append(total)
#increment counter
counter += 1
#return current count
return counter
'''
Function used to create the classifier using NLTK Naive Bayes' classifier
Params: NONE
Returns:
classifier - classifier object created by NLTK library
'''
def createClassifier():
#add data path to nltk
nltk.data.path.append('./static/twitter/nltk_dir')
#add labeled pros and cons
labeled_pros_cons = []
#label pros
for word in pros_cons.words('IntegratedPros.txt'):
labeled_pros_cons.append((word, 'pro'))
#label cons
for word in pros_cons.words('IntegratedCons.txt'):
labeled_pros_cons.append((word, 'con'))
#shuffle order to mix up pros/cons
random.shuffle(labeled_pros_cons)
#add feature list, just word refernce
features = []
for (word, pro_con) in labeled_pros_cons:
features.append(({'word': word}, pro_con))
#return classifier
return nltk.NaiveBayesClassifier.train(features)
'''
Function used to retrieve the JSON response data from Twitter
Params: NONE
Returns:
data - all of the parsed JSON data returned by Twitter API call
'''
def getTwitterData(cap):
#create classifier
classifier = createClassifier();
#Twitter consumer and token keys
consumerKey = 'eBa29YaUmi43aCmN9dDjKaTIN'
consumerToken = '<KEY>'
tokenKey = '<KEY>';
tokenSecret = '<KEY>'
#create consumer/token/client with auth keys
consumer = oauth2.Consumer(key=consumerKey, secret=consumerToken)
token = oauth2.Token(key=tokenKey, secret=tokenSecret)
client = oauth2.Client(consumer, token)
#make twitter search API request
searchUrl = 'https://api.twitter.com/1.1/search/tweets.json?q=%40EPA%20-filter%3Aretweets&result_type=recent&count=100&tweet_mode=extended&exclude_replies=true'
resp, content = client.request(searchUrl, method="GET", body=b"", headers=None)
#decode and parse result
stringResponse = content.decode("utf-8")
jsonResponse = json.loads(stringResponse)
#start counter at 1
counter = 1
#create data dictionary with each necessary part
data = {}
data["sentiments"] = []
data["items"] = []
data["dates"] = []
data["retweets"] = []
data["timezones"] = {}
data["timezones_sentiment"] = {}
data["cap"] = cap
#if twitter didn't return statuses, return error
if (not('search_metadata' in jsonResponse)):
print("--------------START ERROR--------------")
print("Maximum requests reached - try again in 15 minutes")
print(jsonResponse)
print("--------------END ERROR--------------")
sys.stdout.flush()
return {"failed": "Twitter rate limites reached - wait 15 minutes and try again"}
#otherwise...
else:
#analyze first response
counter = analyzeJSON(classifier, jsonResponse, data, counter)
#while next results are available
while ('search_metadata' in jsonResponse and 'next_results' in jsonResponse['search_metadata'] and counter <= cap):
#get next response with new search URL
searchUrl = 'https://api.twitter.com/1.1/search/tweets.json' + str(jsonResponse['search_metadata']['next_results']) + '&tweet_mode=extended&exclude_replies=true'
resp, content = client.request(searchUrl, method="GET", body=b"", headers=None)
stringResponse = content.decode("utf-8")
jsonResponse = json.loads(stringResponse)
#analyze next batch
counter = analyzeJSON(classifier, jsonResponse, data, counter)
#if twitter didn't return statuses, return
if (not('search_metadata' in jsonResponse)):
print("--------------START ERROR--------------")
print("Twitter rate limites reached - wait 15 minutes and try again")
print(jsonResponse)
print("--------------END ERROR--------------")
sys.stdout.flush()
#still have some results - return those
data["error"] = "Twitter rate limites were reached - there may be incomplete data"
return data
return data;
'''
Function used to create base histogram
Params:
data - analyzed data from Twitter search
Returns:
graph - HTML for graph created by plotly
'''
def plotBaseData(data):
#set data for chart
data = [go.Histogram(
x=data["sentiments"],
opacity=0.75,
marker=dict(
color="#DB5461"
),
name='Tweets'
),
go.Histogram(
x=data["retweets"],
opacity=0.75,
marker=dict(
color="#8AA29E"
),
name='Retweets'
)]
#set colors/fonts/titles for chart
layout = go.Layout(barmode='overlay', title='Sentiment Analysis of Tweets Containing @EPA',
titlefont=dict(
family='Raleway',
size=18,
color='#031634'
),
xaxis=dict(
title='Sentiment',
titlefont=dict(
family='Raleway',
size=18,
color='#031634'
),
tickfont=dict(
family='Raleway',
size=12,
color='#031634'
)
),
yaxis=dict(
title='Count',
titlefont=dict(
family='Raleway',
size=18,
color='#031634'
),
tickfont=dict(
family='Raleway',
size=12,
color='#031634'
)
))
#create figure and graph
figure = go.Figure(data = data, layout = layout)
graph = pltly.plot(figure, output_type='div')
#remove mod bar and plotly link
graph = graph.replace('displayModeBar:"hover"', 'displayModeBar:false')
graph = graph.replace('"showLink": true', '"showLink": false')
return graph;
'''
Function used to create time zone pie chart
Params:
data - analyzed data from Twitter search
Returns:
graph - HTML for graph created by plotly
'''
def plotTzPie(data):
#remove none type time zones
if None in data["timezones"]: del data["timezones"][None]
#set data for chart
data = [go.Pie(
labels=data["timezones"].keys(),
values=data["timezones"].values(),
textinfo='none'
)]
#set colors/fonts/titles for chart
layout = go.Layout(title='Timezones of Tweets Containing @EPA',
titlefont=dict(
family='Raleway',
size=18,
color='#031634'
),
font=dict(
family='Raleway',
size=18,
color='#031634'
))
#create figure and graph
figure = go.Figure(data = data, layout = layout)
graph = pltly.plot(figure, output_type='div')
#remove mod bar and plotly link
graph = graph.replace('displayModeBar:"hover"', 'displayModeBar:false')
graph = graph.replace('"showLink": true', '"showLink": false')
return graph;
'''
Function used to create time zone bar graph
Params:
data - analyzed data from Twitter search
Returns:
graph - HTML for graph created by plotly
'''
def plotTzBar(data):
#delete none type time zones
if None in data["timezones_sentiment"]: del data["timezones_sentiment"][None]
#get the data for bar chart
data = [go.Bar(
x=data["timezones_sentiment"].keys(),
y=data["timezones_sentiment"].values()
)]
#set colors/fonts/titles for chart
layout = go.Layout(title='Sentiment of Tweets Containing @EPA by Timezone',
titlefont=dict(
family='Raleway',
size=18,
color='#031634'
),
xaxis=dict(
title='Timezone',
titlefont=dict(
family='Raleway',
size=18,
color='#031634'
),
tickfont=dict(
family='Raleway',
size=12,
color='#031634'
)
),
yaxis=dict(
title='Overall Sentiment',
titlefont=dict(
family='Raleway',
size=18,
color='#031634'
),
tickfont=dict(
family='Raleway',
size=12,
color='#031634'
)
),
height=600,
margin=go.Margin(
b=200
))
#create figure and graph
figure = go.Figure(data = data, layout = layout)
graph = pltly.plot(figure, output_type='div')
#remove mod bar and plotly link
graph = graph.replace('displayModeBar:"hover"', 'displayModeBar:false')
graph = graph.replace('"showLink": true', '"showLink": false')
return graph;
'''
Function used to load charts and data into web template
Params:
request - GET request
Returns:
HttpResponse - response to be served
'''
def load_charts(request):
try:
#get job and check status for fnish;failed - wait if is not
job = Job.fetch(request.session['job-id'], connection=connection)
while (job.status != "finished" and job.status != "failed"):
time.sleep(0.1)
#if failed, tell user something went wrong
if (job.status == "failed"):
return HttpResponse("Something went wrong - most likely Redis is full. Wait 30 seconds and try returning to the homepage.", content_type='text/plain')
#get the job data
data = job.result
#if failed, this means Twitter responded with error - tell user
if ("failed" in data):
return HttpResponse(data["failed"], content_type='text/plain')
#if error, tell user
if ("error" in data):
pageData = {'error': data["error"], 'cap': data["cap"], 'graph': plotBaseData(data), 'tzGraph':plotTzPie(data), 'tsGraph':plotTzBar(data), 'items': data["items"], 'median': np.median(data["sentiments"]), 'mean': np.mean(data["sentiments"])}
#otehrwise send nornal page data
else:
pageData = {'cap': data["cap"], 'graph': plotBaseData(data), 'tzGraph':plotTzPie(data), 'tsGraph':plotTzBar(data), 'items': data["items"], 'median': np.median(data["sentiments"]), 'mean': np.mean(data["sentiments"])}
#serve the charts page with data dictionary
return HttpResponse(render(request, 'charts.html', pageData, content_type='application/html'))
except Exception as e:
print("--------------START ERROR--------------")
print(e)
print("--------------END ERROR--------------")
sys.stdout.flush()
return redirect('/')
'''
Function used to check status of the current job
Params:
request - GET request
Returns:
job.status - status of the worker job
'''
def check_status(request):
try:
#get job id and check status - send to user
job = Job.fetch(request.session['job-id'], connection=connection)
return HttpResponse(job.status, content_type='text/plain')
except Exception as e:
print("--------------START ERROR--------------")
print(e)
print("--------------END ERROR--------------")
sys.stdout.flush()
return redirect('/')
'''
Function used to serve the methods page
Params:
request - GET request
Returns:
HttpResponse - response to be served
'''
def methods(request):
return HttpResponse(render(request, 'methods.html', content_type='application/html'))
'''
Function used to serve the index and start the worker
Params:
request - GET request
Returns:
HttpResponse - response to be served
'''
def index(request):
try:
try:
#get tweet cap
cap = int(request.GET.get("cap"))
except Exception as e:
#set tweet cap as default
cap = 500
#queue the worker to get Twitter data and analyze
job = django_rq.enqueue(getTwitterData, cap, timeout=600, result_ttl=30)
#store job id to ref later
request.session['job-id'] = job.id
return HttpResponse(render(request, 'index.html', content_type='application/html'))
except Exception as e:
print("--------------START ERROR--------------")
print(e)
print("--------------END ERROR--------------")
sys.stdout.flush()
return HttpResponse("Too many requests - try again in a few seconds.", content_type='text/plain')
| [
"random.shuffle",
"numpy.mean",
"sys.stdout.flush",
"plotly.graph_objs.Margin",
"nltk.word_tokenize",
"json.loads",
"django.http.HttpResponse",
"nltk.corpus.pros_cons.words",
"plotly.offline.plot",
"oauth2.Consumer",
"oauth2.Client",
"django.shortcuts.render",
"plotly.graph_objs.Figure",
"... | [((1522, 1548), 'django_rq.get_connection', 'django_rq.get_connection', ([], {}), '()\n', (1546, 1548), False, 'import django_rq\n'), ((4615, 4665), 'nltk.data.path.append', 'nltk.data.path.append', (['"""./static/twitter/nltk_dir"""'], {}), "('./static/twitter/nltk_dir')\n", (4636, 4665), False, 'import nltk\n'), ((4745, 4782), 'nltk.corpus.pros_cons.words', 'pros_cons.words', (['"""IntegratedPros.txt"""'], {}), "('IntegratedPros.txt')\n", (4760, 4782), False, 'from nltk.corpus import pros_cons\n'), ((4852, 4889), 'nltk.corpus.pros_cons.words', 'pros_cons.words', (['"""IntegratedCons.txt"""'], {}), "('IntegratedCons.txt')\n", (4867, 4889), False, 'from nltk.corpus import pros_cons\n'), ((4971, 5004), 'random.shuffle', 'random.shuffle', (['labeled_pros_cons'], {}), '(labeled_pros_cons)\n', (4985, 5004), False, 'import random\n'), ((5177, 5218), 'nltk.NaiveBayesClassifier.train', 'nltk.NaiveBayesClassifier.train', (['features'], {}), '(features)\n', (5208, 5218), False, 'import nltk\n'), ((5664, 5718), 'oauth2.Consumer', 'oauth2.Consumer', ([], {'key': 'consumerKey', 'secret': 'consumerToken'}), '(key=consumerKey, secret=consumerToken)\n', (5679, 5718), False, 'import oauth2\n'), ((5728, 5774), 'oauth2.Token', 'oauth2.Token', ([], {'key': 'tokenKey', 'secret': 'tokenSecret'}), '(key=tokenKey, secret=tokenSecret)\n', (5740, 5774), False, 'import oauth2\n'), ((5785, 5815), 'oauth2.Client', 'oauth2.Client', (['consumer', 'token'], {}), '(consumer, token)\n', (5798, 5815), False, 'import oauth2\n'), ((6179, 6205), 'json.loads', 'json.loads', (['stringResponse'], {}), '(stringResponse)\n', (6189, 6205), False, 'import json\n'), ((9297, 9332), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'layout'}), '(data=data, layout=layout)\n', (9306, 9332), True, 'import plotly.graph_objs as go\n'), ((9346, 9383), 'plotly.offline.plot', 'pltly.plot', (['figure'], {'output_type': '"""div"""'}), "(figure, output_type='div')\n", (9356, 9383), True, 'import plotly.offline as pltly\n'), ((10301, 10336), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'layout'}), '(data=data, layout=layout)\n', (10310, 10336), True, 'import plotly.graph_objs as go\n'), ((10350, 10387), 'plotly.offline.plot', 'pltly.plot', (['figure'], {'output_type': '"""div"""'}), "(figure, output_type='div')\n", (10360, 10387), True, 'import plotly.offline as pltly\n'), ((11817, 11852), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'layout'}), '(data=data, layout=layout)\n', (11826, 11852), True, 'import plotly.graph_objs as go\n'), ((11866, 11903), 'plotly.offline.plot', 'pltly.plot', (['figure'], {'output_type': '"""div"""'}), "(figure, output_type='div')\n", (11876, 11903), True, 'import plotly.offline as pltly\n'), ((2164, 2202), 'nltk.word_tokenize', 'nltk.word_tokenize', (["tweet['full_text']"], {}), "(tweet['full_text'])\n", (2182, 2202), False, 'import nltk\n'), ((2254, 2274), 'nltk.pos_tag', 'nltk.pos_tag', (['tokens'], {}), '(tokens)\n', (2266, 2274), False, 'import nltk\n'), ((4071, 4104), 'dateutil.parser.parse', 'parser.parse', (["tweet['created_at']"], {}), "(tweet['created_at'])\n", (4083, 4104), False, 'from dateutil import parser\n'), ((6751, 6769), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6767, 6769), False, 'import sys\n'), ((12337, 12396), 'rq.job.Job.fetch', 'Job.fetch', (["request.session['job-id']"], {'connection': 'connection'}), "(request.session['job-id'], connection=connection)\n", (12346, 12396), False, 'from rq.job import Job\n'), ((14016, 14075), 'rq.job.Job.fetch', 'Job.fetch', (["request.session['job-id']"], {'connection': 'connection'}), "(request.session['job-id'], connection=connection)\n", (14025, 14075), False, 'from rq.job import Job\n'), ((14085, 14136), 'django.http.HttpResponse', 'HttpResponse', (['job.status'], {'content_type': '"""text/plain"""'}), "(job.status, content_type='text/plain')\n", (14097, 14136), False, 'from django.http import HttpResponse\n'), ((14489, 14553), 'django.shortcuts.render', 'render', (['request', '"""methods.html"""'], {'content_type': '"""application/html"""'}), "(request, 'methods.html', content_type='application/html')\n", (14495, 14553), False, 'from django.shortcuts import render, redirect\n'), ((14914, 14980), 'django_rq.enqueue', 'django_rq.enqueue', (['getTwitterData', 'cap'], {'timeout': '(600)', 'result_ttl': '(30)'}), '(getTwitterData, cap, timeout=600, result_ttl=30)\n', (14931, 14980), False, 'import django_rq\n'), ((7475, 7501), 'json.loads', 'json.loads', (['stringResponse'], {}), '(stringResponse)\n', (7485, 7501), False, 'import json\n'), ((7886, 7904), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7902, 7904), False, 'import sys\n'), ((11748, 11764), 'plotly.graph_objs.Margin', 'go.Margin', ([], {'b': '(200)'}), '(b=200)\n', (11757, 11764), True, 'import plotly.graph_objs as go\n'), ((12464, 12479), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (12474, 12479), False, 'import time\n'), ((12567, 12720), 'django.http.HttpResponse', 'HttpResponse', (['"""Something went wrong - most likely Redis is full. Wait 30 seconds and try returning to the homepage."""'], {'content_type': '"""text/plain"""'}), "(\n 'Something went wrong - most likely Redis is full. Wait 30 seconds and try returning to the homepage.'\n , content_type='text/plain')\n", (12579, 12720), False, 'from django.http import HttpResponse\n'), ((12854, 12909), 'django.http.HttpResponse', 'HttpResponse', (["data['failed']"], {'content_type': '"""text/plain"""'}), "(data['failed'], content_type='text/plain')\n", (12866, 12909), False, 'from django.http import HttpResponse\n'), ((13535, 13608), 'django.shortcuts.render', 'render', (['request', '"""charts.html"""', 'pageData'], {'content_type': '"""application/html"""'}), "(request, 'charts.html', pageData, content_type='application/html')\n", (13541, 13608), False, 'from django.shortcuts import render, redirect\n'), ((13748, 13766), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13764, 13766), False, 'import sys\n'), ((13776, 13789), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (13784, 13789), False, 'from django.shortcuts import render, redirect\n'), ((14275, 14293), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (14291, 14293), False, 'import sys\n'), ((14303, 14316), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (14311, 14316), False, 'from django.shortcuts import render, redirect\n'), ((15070, 15132), 'django.shortcuts.render', 'render', (['request', '"""index.html"""'], {'content_type': '"""application/html"""'}), "(request, 'index.html', content_type='application/html')\n", (15076, 15132), False, 'from django.shortcuts import render, redirect\n'), ((15272, 15290), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (15288, 15290), False, 'import sys\n'), ((15300, 15394), 'django.http.HttpResponse', 'HttpResponse', (['"""Too many requests - try again in a few seconds."""'], {'content_type': '"""text/plain"""'}), "('Too many requests - try again in a few seconds.',\n content_type='text/plain')\n", (15312, 15394), False, 'from django.http import HttpResponse\n'), ((13134, 13163), 'numpy.median', 'np.median', (["data['sentiments']"], {}), "(data['sentiments'])\n", (13143, 13163), True, 'import numpy as np\n'), ((13173, 13200), 'numpy.mean', 'np.mean', (["data['sentiments']"], {}), "(data['sentiments'])\n", (13180, 13200), True, 'import numpy as np\n'), ((13397, 13426), 'numpy.median', 'np.median', (["data['sentiments']"], {}), "(data['sentiments'])\n", (13406, 13426), True, 'import numpy as np\n'), ((13436, 13463), 'numpy.mean', 'np.mean', (["data['sentiments']"], {}), "(data['sentiments'])\n", (13443, 13463), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from udacidrone.messaging import MsgID
from enum import Enum
from udacidrone.connection import MavlinkConnection
import numpy as np
from plane_drone import Udaciplane
from plane_control import LongitudinalAutoPilot
from plane_control import LateralAutoPilot
from plane_control import euler2RM
import time
import sys
class Scenario(Enum):
SANDBOX = 0
TRIM = 1
ALTITUDE = 2
AIRSPEED = 3
CLIMB = 4
LONGITUDINAL = 5
ROLL = 6
TURN = 7
YAW = 8
LINE = 9
ORBIT = 10
LATERAL = 11
FIXEDWING = 12
FLYINGCAR = 13
class FixedWingProject(Udaciplane):
def __init__(self, connection, tlog_directory="logs", tlog_name="tlog.txt"):
super().__init__(connection, tlog_directory=tlog_directory, tlog_name=tlog_name)
self.longitudinal_autopilot= LongitudinalAutoPilot()
self.lateral_autopilot = LateralAutoPilot()
#defined as [along_track_distance (meters), altitude (meters)]
self.longitudinal_gates = [np.array([200.0, 200.0]),
np.array([1100.0, 300.0]),
np.array([1400.0, 280.0]),
np.array([2200.0, 200.0])]
self.airspeed_cmd = 41.0
self.altitude_cmd = 450.0
self.throttle_cmd = 0.0
self.elevator_cmd = 0.0
self.pitch_cmd = 0.0
self.aileron_cmd = 0.0
self.rudder_cmd = 0.0
self.roll_cmd = 0.0
self.sideslip_cmd = 0.0
self.yaw_cmd = 0.0
self.roll_ff = 0.0
self.course_cmd = 0.0
self.line_origin = np.array([0.0, 0.0, 0.0])
self.orbit_center = np.array([0.0, 0.0, 0.0])
self.orbit_cw = True
self.waypoints = [np.array([0.0, 500.0, -400.0]),
np.array([2600.0, 500.0, -500.0]),
np.array([2600.0, -2500.0, -400.0]),
np.array([100.0, 500.0, -450.0]),
np.array([100.0, -2000.0, -450.0])]
self.waypoint_tuple = (np.array([0.0, 0.0, 0.0]),
np.array([0.0, 0.0, 0.0]),
np.array([0.0, 0.0, 0.0]))
self.scenario = Scenario.SANDBOX
self.time_cmd = 0.0
self.cmd_freq = 100.0
self.last_airspeed_time = None
self.last_position_time = None
self.last_attitude_time = None
# register all your callbacks here
self.register_callback(MsgID.LOCAL_POSITION,
self.local_position_callback)
self.register_callback(MsgID.LOCAL_VELOCITY, self.airspeed_callback)
self.register_callback(MsgID.STATE, self.state_callback)
self.register_callback(MsgID.ATTITUDE, self.attitude_callback)
#self.register_callback(MsgID.RAW_GYROSCOPE, self.gyro_callback)
#self.register_callback(MsgID.AIRSPEED, self.airspeed_callback)
self._scenario_started = False
def state_callback(self):
if(self.scenario == Scenario.SANDBOX):
if(self.status != 0):
self.scenario = Scenario(self.status)
self.init_scenario()
if(self.scenario != Scenario.SANDBOX):
if(self._scenario_started == False):
self.take_control()
self.arm()
self._scenario_started = True
print('Start Scenario...')
elif(self.guided != True):
self.stop()
def airspeed_callback(self):
#Assuming no wind, for now...
self.airspeed = np.linalg.norm(self.local_velocity)
if(self.airspeed != 0.0):
rot_mat = euler2RM(self.attitude[0], self.attitude[1],
self.attitude[2])
side_velocity = rot_mat.transpose()[1,0]*self.local_velocity[0] +\
rot_mat.transpose()[1,1]*self.local_velocity[1] +\
rot_mat.transpose()[1,2]*self.local_velocity[2]
self.sideslip = np.arcsin(side_velocity/self.airspeed)
else:
self.sideslip = 0.0
dt = 0.0
if(self.last_airspeed_time != None):
dt = self.local_velocity_time - self.last_airspeed_time
if(dt <= 0.0):
return
self.last_airspeed_time = self.local_velocity_time
if(self._scenario_started == False):
return
if(self.scenario == Scenario.AIRSPEED):
self.throttle_cmd = self.longitudinal_autopilot.airspeed_loop(
self.airspeed, self.airspeed_cmd, dt)
self.cmd_longitude_mode(self.elevator_cmd, self.throttle_cmd,
0,0,self.last_airspeed_time)
if(self.scenario == Scenario.CLIMB):
self.pitch_cmd = self.longitudinal_autopilot.airspeed_pitch_loop(
self.airspeed, self.airspeed_cmd, dt)
if((self.scenario == Scenario.LONGITUDINAL) |
(self.scenario == Scenario.FIXEDWING)):
altitude = -self.local_position[2]
[self.pitch_cmd, self.throttle_cmd] = \
self.longitudinal_autopilot.longitudinal_loop(
self.airspeed, altitude, self.airspeed_cmd,
self.altitude_cmd, dt)
if((self.scenario == Scenario.ROLL) |
(self.scenario == Scenario.TURN) |
(self.scenario == Scenario.YAW) |
(self.scenario == Scenario.LINE) |
(self.scenario == Scenario.ORBIT) |
(self.scenario == Scenario.LATERAL) |
(self.scenario == Scenario.FIXEDWING)):
self.rudder_cmd = self.lateral_autopilot.sideslip_hold_loop(
self.sideslip, dt)
if(self.scenario == Scenario.FLYINGCAR):
# TODO: Insert Flying Car Scenario code here
pass
def attitude_callback(self):
dt = 0.0
if(self.last_attitude_time != None):
dt = self.attitude_time-self.last_attitude_time
self.last_attitude_time = self.attitude_time
if(self._scenario_started == False):
return
if((self.scenario == Scenario.ALTITUDE) |
(self.scenario == Scenario.AIRSPEED) |
(self.scenario == Scenario.CLIMB) |
(self.scenario == Scenario.LONGITUDINAL)):
self.elevator_cmd = self.longitudinal_autopilot.pitch_loop(
self.attitude[1], self.gyro_raw[1], self.pitch_cmd)
self.cmd_longitude_mode(self.elevator_cmd, self.throttle_cmd)
if(np.abs(self.gyro_raw[1]) >= 1):
print(self.gyro_raw)
if((self.scenario == Scenario.YAW) |
(self.scenario == Scenario.LINE) |
(self.scenario == Scenario.ORBIT) |
(self.scenario == Scenario.LATERAL) |
(self.scenario == Scenario.FIXEDWING)):
self.roll_cmd = self.lateral_autopilot.yaw_hold_loop(
self.yaw_cmd, self.attitude[2], dt, self.roll_ff)
if((self.scenario == Scenario.ROLL) |
(self.scenario == Scenario.TURN) |
(self.scenario == Scenario.YAW) |
(self.scenario == Scenario.LINE) |
(self.scenario == Scenario.ORBIT) |
(self.scenario == Scenario.LATERAL)):
self.aileron_cmd = self.lateral_autopilot.roll_attitude_hold_loop(
self.roll_cmd, self.attitude[0], self.gyro_raw[0])
self.cmd_lateral_mode(self.aileron_cmd, self.rudder_cmd,
self.altitude_cmd, self.airspeed_cmd)
if(self.scenario == Scenario.FIXEDWING):
self.aileron_cmd = self.lateral_autopilot.roll_attitude_hold_loop(
self.roll_cmd, self.attitude[0], self.gyro_raw[0])
self.elevator_cmd = self.longitudinal_autopilot.pitch_loop(
self.attitude[1], self.gyro_raw[1], self.pitch_cmd)
self.cmd_controls(self.aileron_cmd, self.elevator_cmd, self.rudder_cmd, self.throttle_cmd)
if(self.scenario == Scenario.FLYINGCAR):
# TODO: Insert Flying Car Scenario code here
pass
def local_position_callback(self):
dt = 0.0
if(self.last_position_time != None):
dt = self.local_position_time - self.last_position_time
self.last_position_time = self.local_position_time
if(dt <= 0.0):
return
if(self._scenario_started == False):
return
if(self.scenario == Scenario.ALTITUDE):
altitude = -self.local_position[2]
self.pitch_cmd = self.longitudinal_autopilot.altitude_loop(
altitude,self.altitude_cmd, dt)
if((self.scenario == Scenario.LONGITUDINAL)):
along_track = np.linalg.norm(self.local_position[0:2])
if(along_track > self.gate_target[0]):
if(len(self.longitudinal_gates)==0):
self.stop()
else:
self.gate_target = self.longitudinal_gates.pop(0)
print('Gate Target: ', self.gate_target)
self.altitude_cmd = self.gate_target[1]
if(self.scenario == Scenario.LINE):
self.yaw_cmd = self.lateral_autopilot.straight_line_guidance(
self.line_origin, self.line_course, self.local_position)
self.roll_ff = 0.0
if(self.scenario == Scenario.ORBIT):
self.yaw_cmd = self.lateral_autopilot.orbit_guidance(
self.orbit_center, self.orbit_radius, self.local_position,
self.attitude[2], self.orbit_cw)
self.roll_ff = self.lateral_autopilot.coordinated_turn_ff(
self.airspeed_cmd, self.orbit_radius, self.orbit_cw)
if(self.scenario == Scenario.LATERAL):
(self.roll_ff, self.yaw_cmd) = self.lateral_autopilot.path_manager(
self.local_position, self.attitude[2], self.airspeed_cmd)
if(self.scenario == Scenario.FIXEDWING):
(self.roll_ff, self.yaw_cmd, switch) = self.lateral_autopilot.waypoint_follower(
self.waypoint_tuple, self.local_position[0:2], self.attitude[2], self.airspeed_cmd)
if(switch):
if(len(self.waypoints)==0):
next_waypoint = self.waypoint_tuple[2]
else:
next_waypoint = self.waypoints.pop(0)
print('Adding waypoint: ', next_waypoint)
self.waypoint_tuple = (self.waypoint_tuple[1], self.waypoint_tuple[2], next_waypoint)
self.altitude_cmd = -self.waypoint_tuple[0][2]
if(self.scenario == Scenario.FLYINGCAR):
# TODO: Insert Flying Car Scenario code here
pass
def init_scenario(self):
if(self.scenario == Scenario.SANDBOX):
pass
elif(self.scenario == Scenario.ALTITUDE):
print('Starting Altitude Hold Scenario')
self.throttle_cmd = 0.66
self.altitude_cmd = 450.0
elif(self.scenario == Scenario.AIRSPEED):
print('Starting Airspeed Hold Scenario')
self.elevator_cmd = 0.0
self.airspeed_cmd = 41.0
elif(self.scenario == Scenario.CLIMB):
print('Starting Climb Scenario')
self.airspeed_cmd = 41.0
self.throttle_cmd = 1.0
elif(self.scenario == Scenario.LONGITUDINAL):
print('Starting Longitudinal Challenge')
self.airspeed_cmd = 41.0
self.gate_target = self.longitudinal_gates.pop(0)
self.altitude_cmd = self.gate_target[1]
elif(self.scenario == Scenario.ROLL):
print('Starting Stabilize Roll Scenario')
self.airspeed_cmd = 41.0
self.altitude_cmd = 450.0
self.roll_cmd = 0.0
self.rudder_cmd = 0.0
elif(self.scenario == Scenario.TURN):
print('Starting Coordinated Turn Scenario')
self.airpseed_cmd = 41.0
self.altitude_cmd = 450.0
self.roll_cmd = 45.0*np.pi/180.0
self.sideslip_cmd = 0.0
elif(self.scenario == Scenario.YAW):
print('Starting Yaw Hold Scenario')
self.airspeed_cmd = 41.0
self.altitude_cmd = 450.0
self.yaw_cmd = 0.0;
self.sideslip_cmd = 0.0
self.roll_ff = 0.0
elif(self.scenario == Scenario.LINE):
print('Starting Line Following Scenario')
self.airspeed_cmd = 41.0
self.altitude_cmd = 450.0
self.line_course = 0.0
self.line_origin = np.array([0.0, 20.0, 450.0])
elif(self.scenario == Scenario.ORBIT):
print('Starting Orbit Following Scenario')
self.airspeed_cmd = 41.0
self.altitude_cmd = 450.0
self.orbit_radius = 500.0
self.orbit_center = np.array([0.0, 500.0, -450.0])
self.orbit_cw = True
elif(self.scenario == Scenario.LATERAL):
print('Starting Lateral Challenge')
self.airspeed_cmd = 41.0
self.altitude_cmd = 450.0
elif(self.scenario == Scenario.FIXEDWING):
print('Starting Fixed Wing Challenge')
self.airspeed_cmd = 41.0
prev_waypoint = self.waypoints.pop(0)
curr_waypoint = self.waypoints.pop(0)
next_waypoint = self.waypoints.pop(0)
self.waypoint_tuple = (prev_waypoint, curr_waypoint, next_waypoint)
self.altitude_cmd = -self.waypoint_tuple[0][2]
elif(self.scenario == Scenario.FLYINGCAR):
# TODO: Insert Flying Car Scenario code here
pass
else:
print('Invalid Scenario')
return
def run_scenario(self,scenario):
self.scenario = scenario
self.init_scenario()
self.start()
if __name__ == "__main__":
conn = MavlinkConnection('tcp:127.0.0.1:5760', threaded=False, PX4=False)
#conn = WebSocketConnection('ws://127.0.0.1:5760')
drone = FixedWingProject(conn, tlog_directory="logs", tlog_name="tlog.txt")
time.sleep(2)
if(len(sys.argv)>1):
try:
scenario = float(sys.argv[1][1:(len(sys.argv[1])+1)])
#drone.run_scenario(Scenario(scenario))
except:
print('Scenario argument must be a number')
else:
scenario = 0
drone.run_scenario(Scenario(scenario))
| [
"plane_control.LateralAutoPilot",
"numpy.abs",
"plane_control.LongitudinalAutoPilot",
"numpy.arcsin",
"time.sleep",
"numpy.array",
"udacidrone.connection.MavlinkConnection",
"numpy.linalg.norm",
"plane_control.euler2RM"
] | [((14502, 14568), 'udacidrone.connection.MavlinkConnection', 'MavlinkConnection', (['"""tcp:127.0.0.1:5760"""'], {'threaded': '(False)', 'PX4': '(False)'}), "('tcp:127.0.0.1:5760', threaded=False, PX4=False)\n", (14519, 14568), False, 'from udacidrone.connection import MavlinkConnection\n'), ((14713, 14726), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (14723, 14726), False, 'import time\n'), ((854, 877), 'plane_control.LongitudinalAutoPilot', 'LongitudinalAutoPilot', ([], {}), '()\n', (875, 877), False, 'from plane_control import LongitudinalAutoPilot\n'), ((911, 929), 'plane_control.LateralAutoPilot', 'LateralAutoPilot', ([], {}), '()\n', (927, 929), False, 'from plane_control import LateralAutoPilot\n'), ((1666, 1691), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1674, 1691), True, 'import numpy as np\n'), ((1720, 1745), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1728, 1745), True, 'import numpy as np\n'), ((3716, 3751), 'numpy.linalg.norm', 'np.linalg.norm', (['self.local_velocity'], {}), '(self.local_velocity)\n', (3730, 3751), True, 'import numpy as np\n'), ((1036, 1060), 'numpy.array', 'np.array', (['[200.0, 200.0]'], {}), '([200.0, 200.0])\n', (1044, 1060), True, 'import numpy as np\n'), ((1097, 1122), 'numpy.array', 'np.array', (['[1100.0, 300.0]'], {}), '([1100.0, 300.0])\n', (1105, 1122), True, 'import numpy as np\n'), ((1159, 1184), 'numpy.array', 'np.array', (['[1400.0, 280.0]'], {}), '([1400.0, 280.0])\n', (1167, 1184), True, 'import numpy as np\n'), ((1221, 1246), 'numpy.array', 'np.array', (['[2200.0, 200.0]'], {}), '([2200.0, 200.0])\n', (1229, 1246), True, 'import numpy as np\n'), ((1810, 1840), 'numpy.array', 'np.array', (['[0.0, 500.0, -400.0]'], {}), '([0.0, 500.0, -400.0])\n', (1818, 1840), True, 'import numpy as np\n'), ((1868, 1901), 'numpy.array', 'np.array', (['[2600.0, 500.0, -500.0]'], {}), '([2600.0, 500.0, -500.0])\n', (1876, 1901), True, 'import numpy as np\n'), ((1929, 1964), 'numpy.array', 'np.array', (['[2600.0, -2500.0, -400.0]'], {}), '([2600.0, -2500.0, -400.0])\n', (1937, 1964), True, 'import numpy as np\n'), ((1992, 2024), 'numpy.array', 'np.array', (['[100.0, 500.0, -450.0]'], {}), '([100.0, 500.0, -450.0])\n', (2000, 2024), True, 'import numpy as np\n'), ((2052, 2086), 'numpy.array', 'np.array', (['[100.0, -2000.0, -450.0]'], {}), '([100.0, -2000.0, -450.0])\n', (2060, 2086), True, 'import numpy as np\n'), ((2128, 2153), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2136, 2153), True, 'import numpy as np\n'), ((2186, 2211), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2194, 2211), True, 'import numpy as np\n'), ((2244, 2269), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2252, 2269), True, 'import numpy as np\n'), ((3808, 3870), 'plane_control.euler2RM', 'euler2RM', (['self.attitude[0]', 'self.attitude[1]', 'self.attitude[2]'], {}), '(self.attitude[0], self.attitude[1], self.attitude[2])\n', (3816, 3870), False, 'from plane_control import euler2RM\n'), ((4133, 4173), 'numpy.arcsin', 'np.arcsin', (['(side_velocity / self.airspeed)'], {}), '(side_velocity / self.airspeed)\n', (4142, 4173), True, 'import numpy as np\n'), ((9208, 9248), 'numpy.linalg.norm', 'np.linalg.norm', (['self.local_position[0:2]'], {}), '(self.local_position[0:2])\n', (9222, 9248), True, 'import numpy as np\n'), ((6839, 6863), 'numpy.abs', 'np.abs', (['self.gyro_raw[1]'], {}), '(self.gyro_raw[1])\n', (6845, 6863), True, 'import numpy as np\n'), ((13169, 13197), 'numpy.array', 'np.array', (['[0.0, 20.0, 450.0]'], {}), '([0.0, 20.0, 450.0])\n', (13177, 13197), True, 'import numpy as np\n'), ((13445, 13475), 'numpy.array', 'np.array', (['[0.0, 500.0, -450.0]'], {}), '([0.0, 500.0, -450.0])\n', (13453, 13475), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import ElasticNet
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.preprocessing import FunctionTransformer, MinMaxScaler
from sklearn.svm import LinearSVR
from tpot.operators.preprocessors import ZeroCount
# NOTE: Make sure that the class is labeled 'class' in the data file
tpot_data = np.recfromcsv('PATH/TO/DATA/FILE', delimiter='COLUMN_SEPARATOR', dtype=np.float64)
features = np.delete(tpot_data.view(np.float64).reshape(tpot_data.size, -1), tpot_data.dtype.names.index('class'), axis=1)
training_features, testing_features, training_classes, testing_classes = \
train_test_split(features, tpot_data['class'], random_state=42)
exported_pipeline = make_pipeline(
ZeroCount(),
make_union(VotingClassifier([("est", ElasticNet(alpha=0.96, l1_ratio=0.81))]), FunctionTransformer(lambda X: X)),
MinMaxScaler(),
LinearSVR(C=25.0, dual=True)
)
exported_pipeline.fit(training_features, training_classes)
results = exported_pipeline.predict(testing_features)
| [
"sklearn.preprocessing.FunctionTransformer",
"sklearn.linear_model.ElasticNet",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.MinMaxScaler",
"numpy.recfromcsv",
"tpot.operators.preprocessors.ZeroCount",
"sklearn.svm.LinearSVR"
] | [((453, 540), 'numpy.recfromcsv', 'np.recfromcsv', (['"""PATH/TO/DATA/FILE"""'], {'delimiter': '"""COLUMN_SEPARATOR"""', 'dtype': 'np.float64'}), "('PATH/TO/DATA/FILE', delimiter='COLUMN_SEPARATOR', dtype=np.\n float64)\n", (466, 540), True, 'import numpy as np\n'), ((738, 801), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', "tpot_data['class']"], {'random_state': '(42)'}), "(features, tpot_data['class'], random_state=42)\n", (754, 801), False, 'from sklearn.model_selection import train_test_split\n'), ((842, 853), 'tpot.operators.preprocessors.ZeroCount', 'ZeroCount', ([], {}), '()\n', (851, 853), False, 'from tpot.operators.preprocessors import ZeroCount\n'), ((977, 991), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (989, 991), False, 'from sklearn.preprocessing import FunctionTransformer, MinMaxScaler\n'), ((997, 1025), 'sklearn.svm.LinearSVR', 'LinearSVR', ([], {'C': '(25.0)', 'dual': '(True)'}), '(C=25.0, dual=True)\n', (1006, 1025), False, 'from sklearn.svm import LinearSVR\n'), ((938, 970), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', (['(lambda X: X)'], {}), '(lambda X: X)\n', (957, 970), False, 'from sklearn.preprocessing import FunctionTransformer, MinMaxScaler\n'), ((896, 933), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'alpha': '(0.96)', 'l1_ratio': '(0.81)'}), '(alpha=0.96, l1_ratio=0.81)\n', (906, 933), False, 'from sklearn.linear_model import ElasticNet\n')] |
#gen_func
from __future__ import division
from __future__ import absolute_import
import numpy as np
from libensemble.message_numbers import STOP_TAG, PERSIS_STOP
from libensemble.gen_funcs.support import sendrecv_mgr_worker_msg
def persistent_updater_after_likelihood(H, persis_info, gen_specs, libE_info):
"""
"""
ub = gen_specs['ub']
lb = gen_specs['lb']
n = len(lb)
comm = libE_info['comm']
subbatch_size = gen_specs['subbatch_size']
num_subbatches = gen_specs['num_subbatches']
# Receive information from the manager (or a STOP_TAG)
batch = -1
tag = None
while tag not in [STOP_TAG, PERSIS_STOP]:
batch += 1
O = np.zeros(subbatch_size*num_subbatches, dtype=gen_specs['out'])
if 'w' in vars():
O['weight'] = w
for j in range(num_subbatches):
for i in range(subbatch_size):
row = subbatch_size*j + i
O['x'][row] = persis_info['rand_stream'].uniform(lb,ub,(1,n))
O['subbatch'][row] = j
O['batch'][row] = batch
O['prior'][row] = np.random.randn()
O['prop'][row] = np.random.randn()
# Send data and get next assignment
tag, Work, calc_in = sendrecv_mgr_worker_msg(comm, O)
if calc_in is not None:
w = O['prior'] + calc_in['like'] - O['prop']
return O, persis_info, tag
| [
"numpy.zeros",
"libensemble.gen_funcs.support.sendrecv_mgr_worker_msg",
"numpy.random.randn"
] | [((686, 750), 'numpy.zeros', 'np.zeros', (['(subbatch_size * num_subbatches)'], {'dtype': "gen_specs['out']"}), "(subbatch_size * num_subbatches, dtype=gen_specs['out'])\n", (694, 750), True, 'import numpy as np\n'), ((1262, 1294), 'libensemble.gen_funcs.support.sendrecv_mgr_worker_msg', 'sendrecv_mgr_worker_msg', (['comm', 'O'], {}), '(comm, O)\n', (1285, 1294), False, 'from libensemble.gen_funcs.support import sendrecv_mgr_worker_msg\n'), ((1119, 1136), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1134, 1136), True, 'import numpy as np\n'), ((1170, 1187), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1185, 1187), True, 'import numpy as np\n')] |
'''
This code runs pre-trained MGN.
If you use this code please cite:
"Multi-Garment Net: Learning to Dress 3D People from Images", ICCV 2019
Code author: Bharat
'''
import tensorflow as tf
import numpy as np
import pickle as pkl # Python 3 change
from network.base_network import PoseShapeOffsetModel
from config_ver1 import config, NUM, IMG_SIZE, FACE
def pca2offsets(pca_layers, scatter_layers, pca_coeffs, naked_verts, vertexlabel, return_all=False):
disps = []
for l, s, p in zip(pca_layers, scatter_layers, pca_coeffs):
temp = l(p)
temp = s(temp)
disps.append(temp)
temp = tf.stack(disps, axis=-1)
temp = tf.concat(
[tf.keras.backend.expand_dims(naked_verts, -1), temp], axis=-1)
temp2 = tf.transpose(temp, perm=[0, 1, 3, 2])
temp = tf.batch_gather(temp2, tf.cast(vertexlabel, tf.int32))
temp = tf.squeeze(tf.transpose(temp, perm=[0, 1, 3, 2]))
if return_all:
return temp, temp2
return temp
def split_garments(pca, mesh, vertex_label, gar):
'''
Since garments are layered we do net get high frequency parts for invisible garment vertices.
Hence we generate the base garment from pca predictions and add the hf term whenever available.
:param pred_mesh:
:param garments:
:return:
'''
vertex_label = vertex_label.reshape(-1,)
base = pca_verts[config.garmentKeys[gar]
].inverse_transform(pca).reshape(-1, 3)
ind = np.where(TEMPLATE[config.garmentKeys[gar]][1])[0]
gar_mesh = Mesh(mesh.v, mesh.f)
gar_mesh.v[ind] = base
gar_mesh.v[vertex_label] = mesh.v[vertex_label]
gar_mesh.keep_vertices(ind)
return gar_mesh
def get_results(m, inp, with_pose=False):
images = [inp['image_{}'.format(i)].astype('float32') for i in range(NUM)]
J_2d = [inp['J_2d_{}'.format(i)].astype('float32') for i in range(NUM)]
vertex_label = inp['vertexlabel'].astype('int64')
out = m([images, vertex_label, J_2d])
with open('assets/hresMapping.pkl', 'rb') as f:
_, faces = pkl.load(f)
pca_layers = [l.PCA_ for l in m.garmentModels]
scatter_layers = m.scatters
pca_coeffs = np.transpose(out['pca_verts'], [1, 0, 2])
naked_verts = out['vertices_naked']
temp = pca2offsets(pca_layers, scatter_layers, pca_coeffs,
naked_verts.numpy().astype('float32'), vertex_label)
pred_mesh = Mesh(out['vertices_tposed'][0].numpy(), faces)
pred_naked = Mesh(naked_verts[0].numpy(), faces)
pred_pca = Mesh(temp[0].numpy(), faces)
gar_meshes = []
# np.where(inp['garments'][0])[0]:
for gar in np.unique(inp['vertexlabel'][0]):
if gar == 0:
continue
gar_meshes.append(split_garments(
out['pca_verts'][0][gar-1], pred_mesh, vertex_label[0] == gar, gar - 1))
return {'garment_meshes': gar_meshes, 'body': pred_naked, 'pca_mesh': pred_pca}
def load_model(model_dir):
m = PoseShapeOffsetModel(config, latent_code_garms_sz=int(
config.latent_code_garms_sz / 2))
# Create the models and optimizers.
model_objects = {
'network': m,
'optimizer': m.optimizer,
'step': tf.Variable(0),
}
latest_cpkt = tf.train.latest_checkpoint(model_dir)
if latest_cpkt:
print('Using latest checkpoint at ' + latest_cpkt)
else:
print('No pre-trained model found')
checkpoint = tf.train.Checkpoint(**model_objects)
# Restore variables on creation if a checkpoint exists.
checkpoint.restore(latest_cpkt)
return m
def fine_tune(m, inp, out, display=False):
# Need to do a forward pass to get trainable variables
images = [inp['image_{}'.format(i)].astype('float32') for i in range(NUM)]
vertex_label = inp['vertexlabel'].astype('int64')
J_2d = [inp['J_2d_{}'.format(i)].astype('float32') for i in range(NUM)]
_ = m([images, vertex_label, J_2d])
# First optimize pose then other stuff
vars = []
losses_2d = {}
epochs = 50
vars = ['pose_trans']
losses_2d['rendered'] = 5 * 10. ** 3
losses_2d['laplacian'] = 5 * 10 ** 5.
for i in range(NUM):
losses_2d['J_2d_{}'.format(i)] = 10**3.
vars2opt = []
for v in vars:
for vv in m.trainable_variables:
if v in vv.name:
vars2opt.append(vv.name)
for ep in range(epochs):
lo = m.train(inp, out, loss_dict=losses_2d, vars2opt=vars2opt)
J_2d = 0
stri = ''
for k in losses_2d:
if 'J_2d' in k:
J_2d += abs(lo[k])
continue
stri = stri + k + ' :{}, '.format(lo[k])
stri = stri + 'J_2d' + ' :{}'.format(J_2d / NUM)
print('Ep: {}, {}'.format(ep, stri))
vars.extend(
['pca_comp', 'betas', 'latent_code_offset_ShapeMerged', 'byPass'])
losses_2d['laplacian'] = 5 * 10 ** 5.
losses_2d['rendered'] = 5 * 10. ** 5
for i in range(NUM):
losses_2d['J_2d_{}'.format(i)] = 10.
vars2opt = []
for v in vars:
for vv in m.trainable_variables:
if v in vv.name:
vars2opt.append(vv.name)
for ep in range(epochs):
lo = m.train(inp, out, loss_dict=losses_2d, vars2opt=vars2opt)
J_2d = 0
stri = ''
for k in losses_2d:
if 'J_2d' in k:
J_2d += abs(lo[k])
continue
stri = stri + k + ' :{}, '.format(lo[k])
stri = stri + 'J_2d' + ' :{}'.format(J_2d/NUM)
print('Ep: {}, {}'.format(ep, stri))
return m
if __name__ == "__main__":
import os
from os.path import exists, join, split
from psbody.mesh import Mesh, MeshViewer, MeshViewers
os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3'
conf = tf.ConfigProto()
conf.gpu_options.allow_growth = True
tf.enable_eager_execution(config=conf)
with open('assets/hresMapping.pkl', 'rb') as f:
_, faces = pkl.load(f)
TEMPLATE = pkl.load(
open('assets/allTemplate_withBoundaries_symm.pkl', 'rb'))
pca_verts = {}
for garment in config.garmentKeys:
with open(os.path.join('assets/garment_basis_35_temp20', garment + '_param_{}_corrected.pkl'.format(config.PCA_)), 'rb') as f:
pca_verts[garment] = pkl.load(f)
model_dir = 'saved_model/'
# Load model
m = load_model(model_dir)
# Load test data
dat = pkl.load(open('assets/test_data.pkl'))
# Get results before optimization
pred = get_results(m, dat)
mv = MeshViewers((1, 2), keepalive=True)
mv[0][0].set_static_meshes(pred['garment_meshes'] + [pred['body']])
mv[0][1].set_static_meshes([pred['body']])
# Optimize the network
m = fine_tune(m, dat, dat, display=False)
pred = get_results(m, dat, )
mv1 = MeshViewers((1, 2), keepalive=True)
mv1[0][0].set_static_meshes(pred['garment_meshes'])
mv1[0][1].set_static_meshes([pred['body']])
print('Done')
| [
"psbody.mesh.Mesh",
"tensorflow.train.Checkpoint",
"numpy.transpose",
"tensorflow.transpose",
"tensorflow.stack",
"tensorflow.ConfigProto",
"tensorflow.cast",
"tensorflow.train.latest_checkpoint",
"numpy.where",
"pickle.load",
"tensorflow.Variable",
"tensorflow.enable_eager_execution",
"tens... | [((645, 669), 'tensorflow.stack', 'tf.stack', (['disps'], {'axis': '(-1)'}), '(disps, axis=-1)\n', (653, 669), True, 'import tensorflow as tf\n'), ((779, 816), 'tensorflow.transpose', 'tf.transpose', (['temp'], {'perm': '[0, 1, 3, 2]'}), '(temp, perm=[0, 1, 3, 2])\n', (791, 816), True, 'import tensorflow as tf\n'), ((1574, 1594), 'psbody.mesh.Mesh', 'Mesh', (['mesh.v', 'mesh.f'], {}), '(mesh.v, mesh.f)\n', (1578, 1594), False, 'from psbody.mesh import Mesh, MeshViewer, MeshViewers\n'), ((2228, 2269), 'numpy.transpose', 'np.transpose', (["out['pca_verts']", '[1, 0, 2]'], {}), "(out['pca_verts'], [1, 0, 2])\n", (2240, 2269), True, 'import numpy as np\n'), ((2696, 2728), 'numpy.unique', 'np.unique', (["inp['vertexlabel'][0]"], {}), "(inp['vertexlabel'][0])\n", (2705, 2728), True, 'import numpy as np\n'), ((3312, 3349), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_dir'], {}), '(model_dir)\n', (3338, 3349), True, 'import tensorflow as tf\n'), ((3505, 3541), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {}), '(**model_objects)\n', (3524, 3541), True, 'import tensorflow as tf\n'), ((5939, 5955), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (5953, 5955), True, 'import tensorflow as tf\n'), ((6003, 6041), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {'config': 'conf'}), '(config=conf)\n', (6028, 6041), True, 'import tensorflow as tf\n'), ((6706, 6741), 'psbody.mesh.MeshViewers', 'MeshViewers', (['(1, 2)'], {'keepalive': '(True)'}), '((1, 2), keepalive=True)\n', (6717, 6741), False, 'from psbody.mesh import Mesh, MeshViewer, MeshViewers\n'), ((6987, 7022), 'psbody.mesh.MeshViewers', 'MeshViewers', (['(1, 2)'], {'keepalive': '(True)'}), '((1, 2), keepalive=True)\n', (6998, 7022), False, 'from psbody.mesh import Mesh, MeshViewer, MeshViewers\n'), ((852, 882), 'tensorflow.cast', 'tf.cast', (['vertexlabel', 'tf.int32'], {}), '(vertexlabel, tf.int32)\n', (859, 882), True, 'import tensorflow as tf\n'), ((907, 944), 'tensorflow.transpose', 'tf.transpose', (['temp'], {'perm': '[0, 1, 3, 2]'}), '(temp, perm=[0, 1, 3, 2])\n', (919, 944), True, 'import tensorflow as tf\n'), ((1508, 1554), 'numpy.where', 'np.where', (['TEMPLATE[config.garmentKeys[gar]][1]'], {}), '(TEMPLATE[config.garmentKeys[gar]][1])\n', (1516, 1554), True, 'import numpy as np\n'), ((2111, 2122), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (2119, 2122), True, 'import pickle as pkl\n'), ((3270, 3284), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {}), '(0)\n', (3281, 3284), True, 'import tensorflow as tf\n'), ((6117, 6128), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (6125, 6128), True, 'import pickle as pkl\n'), ((703, 748), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['naked_verts', '(-1)'], {}), '(naked_verts, -1)\n', (731, 748), True, 'import tensorflow as tf\n'), ((6454, 6465), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (6462, 6465), True, 'import pickle as pkl\n')] |
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from climateeconomics.core.core_witness.climateeco_discipline import ClimateEcoDiscipline
from climateeconomics.core.core_witness.macroeconomics_model import MacroEconomics
from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart
from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter
import pandas as pd
import numpy as np
from copy import deepcopy
class InvestDiscipline(ClimateEcoDiscipline):
"Macroeconomics discipline for WITNESS"
# ontology information
_ontology_data = {
'label': 'WITNESS Investissement Model',
'type': 'Research',
'source': 'SoSTrades Project',
'validated': '',
'validated_by': 'SoSTrades Project',
'last_modification_date': '',
'category': '',
'definition': '',
'icon': '',
'version': '',
}
_maturity = 'Research'
years = np.arange(2020, 2101)
DESC_IN = {
'energy_investment_macro': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_witness'},
'energy_investment': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_energy_mix'},
'invest_norm': {'type': 'float', 'default': 10.0},
'formulation': {'type': 'string', 'default': 'objective', 'possile_values': ['objective', 'constraint']},
'max_difference': {'type': 'float', 'default': 1.0e-1},
}
DESC_OUT = {
'invest_objective': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_witness'},
'diff_norm': {'type': 'array'}
}
def run(self):
# Get inputs
inputs = self.get_sosdisc_inputs()
difference = np.linalg.norm(inputs['energy_investment_macro']['energy_investment'].values -
inputs['energy_investment']['energy_investment'].values) / inputs['invest_norm']
if inputs['formulation'] == 'objective':
invest_objective = difference
elif inputs['formulation'] == 'constraint':
invest_objective = inputs['max_difference'] - difference
# Store output data
dict_values = {'invest_objective': pd.DataFrame(
{'norm': [invest_objective]}),
'diff_norm': difference}
self.store_sos_outputs_values(dict_values)
def compute_sos_jacobian(self):
"""
Compute jacobian for each coupling variable
gradiant of coupling variable to compute
"""
inputs = self.get_sosdisc_inputs()
invest_objective = self.get_sosdisc_outputs(
'invest_objective')['norm'].values[0]
dinvestment = (inputs['energy_investment_macro']['energy_investment'].values -
inputs['energy_investment']['energy_investment'].values) / invest_objective / inputs['invest_norm']**2
self.set_partial_derivative_for_other_types(
('invest_objective', 'norm'), ('energy_investment_macro', 'energy_investment'), dinvestment) # Invest from T$ to G$
self.set_partial_derivative_for_other_types(
('invest_objective', 'norm'), ('energy_investment', 'energy_investment'), -dinvestment) # Invest from T$ to G$
def get_chart_filter_list(self):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
chart_filters = []
chart_list = ['Difference of investments']
# First filter to deal with the view : program or actor
chart_filters.append(ChartFilter(
'Charts', chart_list, chart_list, 'charts'))
return chart_filters
def get_post_processing_list(self, chart_filters=None):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
instanciated_charts = []
# Overload default value with chart filter
if chart_filters is not None:
for chart_filter in chart_filters:
if chart_filter.filter_key == 'charts':
chart_list = chart_filter.selected_values
if 'Difference of investments' in chart_list:
energy_investment_macro = self.get_sosdisc_inputs(
'energy_investment_macro')
energy_investment = self.get_sosdisc_inputs('energy_investment')
years = list(energy_investment_macro['years'].values)
year_start = years[0]
year_end = years[len(years) - 1]
chart_name = 'Energy investments between macroeconomy output and energy input'
new_chart = TwoAxesInstanciatedChart(
'years', 'Investments', chart_name=chart_name)
energy_investment_series = InstanciatedSeries(
years, list(energy_investment['energy_investment'].values), 'energy investment (energy)', 'lines')
new_chart.series.append(energy_investment_series)
energy_investment_macro_series = InstanciatedSeries(
years, list(energy_investment_macro['energy_investment'].values), 'energy_investment (macroeconomy)', 'lines')
new_chart.series.append(energy_investment_macro_series)
instanciated_charts.append(new_chart)
norm = self.get_sosdisc_outputs('diff_norm')
chart_name = 'Differences between energy investments'
new_chart = TwoAxesInstanciatedChart(
'years', 'Differences of investments', chart_name=chart_name)
energy_investment_series = InstanciatedSeries(
years, list(energy_investment_macro['energy_investment'].values - energy_investment['energy_investment'].values), '', 'lines')
new_chart.series.append(energy_investment_series)
instanciated_charts.append(new_chart)
return instanciated_charts
| [
"pandas.DataFrame",
"sos_trades_core.tools.post_processing.charts.chart_filter.ChartFilter",
"numpy.linalg.norm",
"numpy.arange",
"sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.TwoAxesInstanciatedChart"
] | [((1555, 1576), 'numpy.arange', 'np.arange', (['(2020)', '(2101)'], {}), '(2020, 2101)\n', (1564, 1576), True, 'import numpy as np\n'), ((2340, 2480), 'numpy.linalg.norm', 'np.linalg.norm', (["(inputs['energy_investment_macro']['energy_investment'].values - inputs[\n 'energy_investment']['energy_investment'].values)"], {}), "(inputs['energy_investment_macro']['energy_investment'].\n values - inputs['energy_investment']['energy_investment'].values)\n", (2354, 2480), True, 'import numpy as np\n'), ((2830, 2872), 'pandas.DataFrame', 'pd.DataFrame', (["{'norm': [invest_objective]}"], {}), "({'norm': [invest_objective]})\n", (2842, 2872), True, 'import pandas as pd\n'), ((4252, 4307), 'sos_trades_core.tools.post_processing.charts.chart_filter.ChartFilter', 'ChartFilter', (['"""Charts"""', 'chart_list', 'chart_list', '"""charts"""'], {}), "('Charts', chart_list, chart_list, 'charts')\n", (4263, 4307), False, 'from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter\n'), ((5391, 5462), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.TwoAxesInstanciatedChart', 'TwoAxesInstanciatedChart', (['"""years"""', '"""Investments"""'], {'chart_name': 'chart_name'}), "('years', 'Investments', chart_name=chart_name)\n", (5415, 5462), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n'), ((6196, 6287), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.TwoAxesInstanciatedChart', 'TwoAxesInstanciatedChart', (['"""years"""', '"""Differences of investments"""'], {'chart_name': 'chart_name'}), "('years', 'Differences of investments', chart_name=\n chart_name)\n", (6220, 6287), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n')] |
from __future__ import division, print_function, absolute_import
from . import util
import numpy as np
class PatternFilterer(object):
#The idea is that 'patterns' gets divided into the patterns that pass and
# the patterns that get filtered
def __call__(self, patterns):
raise NotImplementedError()
def chain(self, pattern_filterer):
def func(patterns):
passing_patterns1, filtered_patterns1 = self(patterns)
passing_patterns2, filtered_patterns2 =\
pattern_filterer(passing_patterns1)
final_passing = passing_patterns2
final_filtered = list(filtered_patterns1)+list(filtered_patterns2)
#sanity check to make sure no patterns got lost
assert len(final_filtered)+len(final_passing) == len(patterns)
return (final_passing, final_filtered)
return FuncPatternFilterer(function=func)
class FuncPatternFilterer(PatternFilterer):
def __init__(self, function):
self.function = function
def __call__(self, patterns):
return self.function(patterns)
class ConditionPatternFilterer(PatternFilterer):
def _condition(self, pattern):
raise NotImplementedError()
def __call__(self, patterns):
filtered_patterns = []
passing_patterns = []
for pattern in patterns:
if self._condition(pattern):
passing_patterns.append(pattern)
else:
filtered_patterns.append(pattern)
return (passing_patterns, filtered_patterns)
class MinSeqletSupportFilterer(ConditionPatternFilterer):
#filter out patterns that don't have at least min_seqlet_support
def __init__(self, min_seqlet_support):
self.min_seqlet_support = min_seqlet_support
def _condition(self, pattern):
return len(pattern.seqlets) >= self.min_seqlet_support
class MinICinWindow(ConditionPatternFilterer):
#filter out patterns that don't have at least min_seqlet_support
def __init__(self, window_size, min_ic_in_window, background,
sequence_track_name,
ppm_pseudocount):
self.window_size = window_size
self.min_ic_in_window = min_ic_in_window
self.background = background
self.sequence_track_name = sequence_track_name
self.ppm_pseudocount = 0.001
def _condition(self, pattern):
ppm = pattern[self.sequence_track_name].fwd
#compute per-position ic for the pattern
per_position_ic = util.compute_per_position_ic(
ppm=ppm, background=self.background,
pseudocount=self.ppm_pseudocount)
if (len(per_position_ic) < self.window_size):
print("WARNING: motif length is < window_size")
return np.sum(per_position_ic) >= self.min_ic_in_window
else:
#do the sliding window sum rearrangement
windowed_ic = np.sum(util.rolling_window(
a=per_position_ic, window=self.window_size),
axis=-1)
return np.max(windowed_ic) >= self.min_ic_in_window
| [
"numpy.max",
"numpy.sum"
] | [((2859, 2882), 'numpy.sum', 'np.sum', (['per_position_ic'], {}), '(per_position_ic)\n', (2865, 2882), True, 'import numpy as np\n'), ((3161, 3180), 'numpy.max', 'np.max', (['windowed_ic'], {}), '(windowed_ic)\n', (3167, 3180), True, 'import numpy as np\n')] |
from pyoptsolver import OptProblem, OptSolver, OptConfig
import numpy as np
class TestFunction2(OptProblem):
"""The same problem but different style"""
def __init__(self):
OptProblem.__init__(self, 2, 2, 4)
self.set_lb([1., 0.])
self.set_ub([1e20, 1e20])
self.set_xlb([-1e20, -1e20])
self.set_xub([0.5, 1e20])
def __cost__(self, x):
"""The eval_f function required by ipopt.
:param x: a guess/solution of the problem
:return f: float, objective function
"""
return 100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2
def __gradient__(self, x, g):
"""Evaluation of the gradient of objective function.
:param x: guess/solution to the problem
:param g: the gradient of objective function w.r.t x to be written into
"""
v1 = 200 * (x[1] - x[0] ** 2)
g[:] = [-2 * x[0] * v1 + 2 * x[0] - 1, v1]
return True
def __constraint__(self, x, f):
"""Evaluate constraint function.
:param x: guess/solution to the problem
:param f: constraints ready to be written upon
"""
f[:] = np.array([x[0] * x[1], x[0] + x[1] * x[1]])
return 1
def __jacobian__(self, x, g, row, col, rec):
"""Evaluate the Jacobian of the problem.
:param x: guess/solution to the problem
:param g: the vector being written on for Jacobian entries
"""
if rec:
row[:] = [0, 0, 1, 1]
col[:] = [0, 1, 0, 1]
g[:] = [x[1], x[0], 1, 2 * x[1]]
return 1
if __name__ == "__main__":
print('\n\n Test another style\n\n')
prob = TestFunction2()
options = {'1014': 20, '1023': 1e-4, '1027': 1e-4, '1016': 2, 'history': True}
config = OptConfig(backend='knitro', **options)
print('solve with random guess')
solver = OptSolver(prob, config)
rst = solver.solve_rand()
print(rst.flag, rst.obj, rst.sol)
print('solve with provided guess')
rst = solver.solve_guess([0.3, 0.4])
print(rst.flag, rst.obj, rst.sol, rst.history)
print('solve with auto guess')
rst = solver.solve_guess(None)
print(rst.flag, rst.obj, rst.sol, rst.history)
| [
"pyoptsolver.OptSolver",
"numpy.array",
"pyoptsolver.OptProblem.__init__",
"pyoptsolver.OptConfig"
] | [((1808, 1846), 'pyoptsolver.OptConfig', 'OptConfig', ([], {'backend': '"""knitro"""'}), "(backend='knitro', **options)\n", (1817, 1846), False, 'from pyoptsolver import OptProblem, OptSolver, OptConfig\n'), ((1897, 1920), 'pyoptsolver.OptSolver', 'OptSolver', (['prob', 'config'], {}), '(prob, config)\n', (1906, 1920), False, 'from pyoptsolver import OptProblem, OptSolver, OptConfig\n'), ((190, 224), 'pyoptsolver.OptProblem.__init__', 'OptProblem.__init__', (['self', '(2)', '(2)', '(4)'], {}), '(self, 2, 2, 4)\n', (209, 224), False, 'from pyoptsolver import OptProblem, OptSolver, OptConfig\n'), ((1174, 1217), 'numpy.array', 'np.array', (['[x[0] * x[1], x[0] + x[1] * x[1]]'], {}), '([x[0] * x[1], x[0] + x[1] * x[1]])\n', (1182, 1217), True, 'import numpy as np\n')] |
import argparse
import os
import sys
from collections import defaultdict
import gym
import matplotlib.pyplot as plt
import numpy as np
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import active_reward_learning
from active_reward_learning.util.helpers import get_dict_default
from active_reward_learning.util.plotting import plot_result_percentiles, set_plot_style
from active_reward_learning.util.results import FileExperimentResults
AF_COLORS = {
"random": "#4daf4a",
"variance": "#984ea3",
"true reward": "#377eb8",
"idrl": "#e41a1c",
"idrl_m1000_p100_rollout_cand": "#e41a1c",
"idrl_m1000_p100": "#1a81e4",
}
AF_MARKERS = {
"random": "s",
"variance": "^",
"true reward": "x",
"idrl": "o",
"idrl_m1000_p100_rollout_cand": "o",
"idrl_m1000_p100": "v",
}
AF_LINESTYLE = {
"random": "-",
"variance": "-",
"true reward": "-",
"idrl": "-",
"idrl_m1000_p100_rollout_cand": "-",
"idrl_m1000_p100": "--",
}
# AF_COLORS = defaultdict(lambda: "blue", AF_COLORS)
NEW_COLOR_COUNT = 0
NEW_COLORS = [
"#e41a1c",
"#377eb8",
"#4daf4a",
"#984ea3",
"#ff7f00",
"#ffff33",
"#a65628",
"#f781bf",
"#999999",
]
AF_ALPHA = {
"idrl": 1.0,
"idrl_m1000_p100": 0.6,
"idrl_m1000_p100_rollout_cand": 1.0,
}
AF_ALPHA = defaultdict(lambda: 0.6, AF_ALPHA) # 0.6 before
AF_ZORDER = {
"idrl": 2,
"idrl_m1000_p100": 1,
"idrl_m1000_p100_rollout_cand": 2,
}
AF_ZORDER = defaultdict(lambda: 1, AF_ZORDER)
# returns are averaged over 1000 episodes
RANDOM_POLICY_RETURNS = {
"HalfCheetah-Long-v3": -287.3389870083097,
"Hopper-Long-v3": -46.1818188934309,
"Walker2d-Long-v3": -6.772750336937592,
"Swimmer-Long-v3": 0.5256574172271935,
"Ant-Long-v3": -349.3281947598893,
"Reacher-v2": -43.23157686737403,
"InvertedPendulum-Penalty-Long-v2": -1447.4255937526677,
"InvertedDoublePendulum-Penalty-Long-v2": -8020.161758098083,
}
EXPERT_POLICY_RETURNS = {
"HalfCheetah-Long-v3": 14477.754,
"Hopper-Long-v3": 3714.0933,
"Walker2d-Long-v3": 5563.308,
"Swimmer-Long-v3": 95.490906,
"Ant-Long-v3": 7604.946,
"Reacher-v2": -3.4786558,
"InvertedPendulum-Penalty-Long-v2": 999.92804,
"InvertedDoublePendulum-Penalty-Long-v2": 11316.3,
}
def read_tb_events(dirname):
"""Read a TensorBoard event file.
Args:
dirname (str): Path to Tensorboard log
Returns:
dict: A dictionary of containing scalar run data with keys like
'train/loss', 'train/mae', 'val/loss', etc.
"""
summary_iterator = EventAccumulator(dirname).Reload()
tags = summary_iterator.Tags()["scalars"]
out_dict = {t: {"steps": [], "values": []} for t in tags}
for tag in tags:
events = summary_iterator.Scalars(tag)
for e in events:
out_dict[tag]["steps"].append(e.step)
out_dict[tag]["values"].append(e.value)
out_dict[tag]["steps"] = np.array(out_dict[tag]["steps"])
out_dict[tag]["values"] = np.array(out_dict[tag]["values"])
return out_dict
def add_results(tb_logs_dict, env_id, af, result):
if env_id not in tb_logs_dict:
tb_logs_dict[env_id] = dict()
if af not in tb_logs_dict[env_id]:
tb_logs_dict[env_id][af] = []
tb_logs_dict[env_id][af].append(result)
return tb_logs_dict
def get_af_label(experiment):
af = experiment.config["acquisition_function"]
schedule_update_every = get_dict_default(
experiment.config, "schedule_update_every", None
)
n_model_updates = get_dict_default(experiment.config, "n_model_updates", None)
n_policy_updates = get_dict_default(experiment.config, "n_policy_updates", None)
exploration_model_path = get_dict_default(
experiment.config, "exploration_model_path", None
)
exploration_sigma = get_dict_default(experiment.config, "exploration_sigma", None)
exploration_eps = get_dict_default(experiment.config, "exploration_eps", None)
rollout_cand = get_dict_default(
experiment.config, "rollout_candidate_policies_for_exploration", False
)
reinit = get_dict_default(
experiment.config, "reinitialize_candidate_policies", False
)
af_label = af
if schedule_update_every is not None:
total_timesteps = get_dict_default(experiment.config, "total_timesteps", 0)
n_model_updates = int(total_timesteps // schedule_update_every)
if n_model_updates is not None:
af_label += f"_m{n_model_updates}"
if n_policy_updates is not None:
af_label += f"_p{n_policy_updates}"
if exploration_model_path is not None:
af_label += f"_expl_"
if exploration_sigma is not None:
af_label += f"_sigma_{exploration_sigma}"
if exploration_eps is not None:
af_label += f"_eps_{exploration_eps}"
if rollout_cand:
af_label += f"_rollout_cand_"
if reinit:
af_label += f"_reinit"
return af_label.strip("_")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("tb_folder", type=str)
parser.add_argument("--plot_folder", type=str, default="plots/drlhp")
parser.add_argument("--csv_baseline", type=str, default=None)
parser.add_argument("--baseline_label", type=str, default="Baseline")
parser.add_argument("--mean", action="store_true")
parser.add_argument("--stderr", action="store_true")
parser.add_argument("--average_envs", action="store_true")
parser.add_argument("--results_folder", type=str, default=None)
parser.add_argument("--aspect", type=float, default=1.15)
parser.add_argument("--n_markers", type=int, default=10)
parser.add_argument("--marker_size", type=int, default=11)
parser.add_argument("--no_true_reward_plot", action="store_true")
parser.add_argument("--no_legend", action="store_true")
parser.add_argument("--no_title", action="store_true")
parser.add_argument("--paper_style", action="store_true")
parser.add_argument("--pdf", action="store_true")
return parser.parse_args()
def main():
args = parse_args()
os.makedirs(args.plot_folder, exist_ok=True)
tb_logs_dict = dict()
if args.results_folder is not None:
for x in os.walk(args.results_folder):
subdir = x[0]
try:
experiment = FileExperimentResults(subdir)
except:
print("Could not open", subdir)
continue
if experiment.info is not None:
tb_log_path = os.path.basename(experiment.info["tb_log"])
tb_log_path = os.path.join(args.tb_folder, tb_log_path)
if os.path.exists(tb_log_path):
af_label = get_af_label(experiment)
env_id = experiment.config["env_id"]
tb_logs_dict = add_results(
tb_logs_dict, env_id, af_label, tb_log_path
)
else:
print("tb_log does not exist", tb_log_path)
else:
print("Could not read info of", subdir)
for x in os.walk(args.tb_folder):
subdir = x[0]
print(subdir)
dir = os.path.basename(subdir)
if dir.startswith("tb_log"):
if args.results_folder is not None:
continue
else:
dir = dir[3:]
elif dir.startswith("sac") and not args.no_true_reward_plot:
if args.results_folder is not None:
env_id = dir.split("_")[1]
if not env_id in tb_logs_dict:
# only load true reward runs that are in results folder as well (if given)
continue
else:
continue
dir = dir.split("_")
env_id = dir[1]
if dir[0] == "sac":
af = "true reward"
else:
af = dir[2]
tb_logs_dict = add_results(tb_logs_dict, env_id, af, subdir)
for env_id in tb_logs_dict.keys():
print(env_id)
for af in tb_logs_dict[env_id].keys():
print(" " * 4, af)
print(" " * 8, tb_logs_dict[env_id][af])
results = dict()
for env_id in tb_logs_dict.keys():
print(env_id)
results[env_id] = dict()
env = gym.make(env_id)
L = env.spec.max_episode_steps
for af in tb_logs_dict[env_id].keys():
print(" " * 4, af)
steps_list, rew_list = [], []
for path in set(tb_logs_dict[env_id][af]):
print(" " * 8, path)
data = read_tb_events(path)
if af == "true reward":
if "rollout/ep_rew_mean" in data and not args.no_true_reward_plot:
steps = data["rollout/ep_rew_mean"]["steps"]
rew = data["rollout/ep_rew_mean"]["values"]
else:
continue
else:
if "rollout/ep_avg_true_ret_mean" in data:
steps = data["rollout/ep_avg_true_ret_mean"]["steps"]
rew = data["rollout/ep_avg_true_ret_mean"]["values"]
elif "rollout/ep_avg_true_rew_mean" in data:
## old runs logged average per step reward (new ones record return)
steps = data["rollout/ep_avg_true_rew_mean"]["steps"]
rew = data["rollout/ep_avg_true_rew_mean"]["values"] * L
else:
continue
# DL: this fixes an issue where multiple tensorboard logs are written
# to the same log
# this should be fixed in the drlhp code now, but it is
# still a problem in some of the results
if np.max(steps) > 1e2:
idx = []
for i in range(1, len(steps)):
if steps[i] >= steps[i - 1]:
idx.append(i)
else:
steps_list.append(steps[idx])
rew_list.append(rew[idx])
idx = [i]
if len(idx) > 1:
steps_list.append(steps[idx])
rew_list.append(rew[idx])
if len(steps_list) > 0 and len(rew_list) > 0:
results[env_id][af] = (steps_list, rew_list)
if args.average_envs:
envs = results.keys()
afs = [set(results[env_id].keys()) for env_id in results.keys()]
all_afs = afs[0].union(*afs[1:])
afs = afs[0].intersection(*afs[1:])
min_returns = dict()
new_results = {"All-Envs": dict()}
for af in afs:
steps_list_all, rew_list_all = [], []
for env_id in envs:
random_return = RANDOM_POLICY_RETURNS[env_id]
expert_return = EXPERT_POLICY_RETURNS[env_id]
steps_list, rew_list = results[env_id][af]
new_rew_list = [
100 * (rewards - random_return) / (expert_return - random_return)
for rewards in rew_list
]
steps_list_all += steps_list
rew_list_all += new_rew_list
new_results["All-Envs"][af] = (steps_list_all, rew_list_all)
results = new_results
if args.paper_style:
set_plot_style()
for env_id in results.keys():
height, width = plt.figaspect(args.aspect)
fig = plt.figure(figsize=(width, height))
legend_handles = []
legend_labels = []
for af in results[env_id].keys():
steps_list, rew_list = results[env_id][af]
if af not in AF_COLORS:
af_short = af.split("_")[0]
afs_short = [af_.split("_")[0] for af_ in results[env_id].keys()]
if afs_short.count(af_short) == 1 and af_short in AF_COLORS:
# af only used with one set of parameters
# (not the case during hyperparameter tuning)
af = af_short
else:
global NEW_COLOR_COUNT
AF_COLORS[af] = NEW_COLORS[NEW_COLOR_COUNT % len(NEW_COLORS)]
NEW_COLOR_COUNT += 1
color = get_dict_default(AF_COLORS, af, "b")
linestyle = get_dict_default(AF_LINESTYLE, af, "-")
if args.n_markers > 0:
marker = get_dict_default(AF_MARKERS, af, None)
markers_every = len(steps_list[0]) // args.n_markers
else:
marker = None
markers_every = 1
if args.mean:
l = min(min(map(len, steps_list)), min(map(len, rew_list)))
steps_list = [s[:l] for s in steps_list]
rew_list = [r[:l] for r in rew_list]
steps = steps_list[0] # np.mean(steps_list, 0)
rew = np.mean(rew_list, 0)
all_steps_same = np.all(
[
np.all(steps_list[i] == steps_list[0])
for i in range(1, len(steps_list))
]
)
label = f"{af}_mean_over_{len(steps_list)}"
if args.paper_style:
label = label.replace("_", "\_")
if not all_steps_same:
print("Warning: not all steps are the same")
p1 = plt.plot(
steps,
rew,
color=color,
linestyle=linestyle,
marker=marker,
markevery=markers_every,
markersize=args.marker_size,
alpha=AF_ALPHA[af],
zorder=AF_ZORDER[af],
)[0]
if args.stderr:
stderr = np.std(rew_list, 0) / np.sqrt(len(rew_list))
p2 = plt.fill_between(
steps,
rew - stderr,
rew + stderr,
color=color,
alpha=0.2,
zorder=AF_ZORDER[af],
)
legend_handles.append((p1, p2))
else:
legend_handles.append(p1)
else:
for i, (steps, rew) in enumerate(zip(steps_list, rew_list)):
label = f"{af}_{i}"
if args.paper_style:
label = label.replace("_", "\_")
p1 = plt.plot(
steps,
rew,
color=color,
linestyle=linestyle,
marker=marker,
markevery=markers_every,
markersize=args.marker_size,
alpha=AF_ALPHA[af],
zorder=AF_ZORDER[af],
)[0]
legend_handles.append(p1)
legend_labels.append(label)
if args.csv_baseline is not None:
baseline_data = np.genfromtxt(args.csv_baseline, delimiter=",")
baseline = np.max(baseline_data[:, 1])
if args.average_envs:
baseline /= best_return
plt.axhline(baseline, color="black", linestyle="--")
# plt.xlim(0, 100000)
plt.xlabel("timestep")
if args.average_envs:
plt.ylabel("average score")
else:
plt.ylabel("return")
if not args.no_title:
plt.title(env_id)
if not args.no_legend:
plt.legend(
legend_handles,
legend_labels,
loc="center left",
bbox_to_anchor=(1, 0.5),
)
if args.pdf:
plt.savefig(
os.path.join(args.plot_folder, f"{env_id}.pdf"), bbox_inches="tight"
)
else:
plt.savefig(
os.path.join(args.plot_folder, f"{env_id}.png"), bbox_inches="tight"
)
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.figaspect",
"argparse.ArgumentParser",
"os.walk",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"numpy.mean",
"active_reward_learning.util.helpers.get_dict_default",
"tensorboard.backend.event_processing.event_accumulator.EventAccumulator",
"m... | [((1356, 1391), 'collections.defaultdict', 'defaultdict', (['(lambda : 0.6)', 'AF_ALPHA'], {}), '(lambda : 0.6, AF_ALPHA)\n', (1367, 1391), False, 'from collections import defaultdict\n'), ((1514, 1548), 'collections.defaultdict', 'defaultdict', (['(lambda : 1)', 'AF_ZORDER'], {}), '(lambda : 1, AF_ZORDER)\n', (1525, 1548), False, 'from collections import defaultdict\n'), ((3506, 3572), 'active_reward_learning.util.helpers.get_dict_default', 'get_dict_default', (['experiment.config', '"""schedule_update_every"""', 'None'], {}), "(experiment.config, 'schedule_update_every', None)\n", (3522, 3572), False, 'from active_reward_learning.util.helpers import get_dict_default\n'), ((3609, 3669), 'active_reward_learning.util.helpers.get_dict_default', 'get_dict_default', (['experiment.config', '"""n_model_updates"""', 'None'], {}), "(experiment.config, 'n_model_updates', None)\n", (3625, 3669), False, 'from active_reward_learning.util.helpers import get_dict_default\n'), ((3693, 3754), 'active_reward_learning.util.helpers.get_dict_default', 'get_dict_default', (['experiment.config', '"""n_policy_updates"""', 'None'], {}), "(experiment.config, 'n_policy_updates', None)\n", (3709, 3754), False, 'from active_reward_learning.util.helpers import get_dict_default\n'), ((3784, 3851), 'active_reward_learning.util.helpers.get_dict_default', 'get_dict_default', (['experiment.config', '"""exploration_model_path"""', 'None'], {}), "(experiment.config, 'exploration_model_path', None)\n", (3800, 3851), False, 'from active_reward_learning.util.helpers import get_dict_default\n'), ((3890, 3952), 'active_reward_learning.util.helpers.get_dict_default', 'get_dict_default', (['experiment.config', '"""exploration_sigma"""', 'None'], {}), "(experiment.config, 'exploration_sigma', None)\n", (3906, 3952), False, 'from active_reward_learning.util.helpers import get_dict_default\n'), ((3975, 4035), 'active_reward_learning.util.helpers.get_dict_default', 'get_dict_default', (['experiment.config', '"""exploration_eps"""', 'None'], {}), "(experiment.config, 'exploration_eps', None)\n", (3991, 4035), False, 'from active_reward_learning.util.helpers import get_dict_default\n'), ((4055, 4147), 'active_reward_learning.util.helpers.get_dict_default', 'get_dict_default', (['experiment.config', '"""rollout_candidate_policies_for_exploration"""', '(False)'], {}), "(experiment.config,\n 'rollout_candidate_policies_for_exploration', False)\n", (4071, 4147), False, 'from active_reward_learning.util.helpers import get_dict_default\n'), ((4171, 4248), 'active_reward_learning.util.helpers.get_dict_default', 'get_dict_default', (['experiment.config', '"""reinitialize_candidate_policies"""', '(False)'], {}), "(experiment.config, 'reinitialize_candidate_policies', False)\n", (4187, 4248), False, 'from active_reward_learning.util.helpers import get_dict_default\n'), ((5053, 5078), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5076, 5078), False, 'import argparse\n'), ((6147, 6191), 'os.makedirs', 'os.makedirs', (['args.plot_folder'], {'exist_ok': '(True)'}), '(args.plot_folder, exist_ok=True)\n', (6158, 6191), False, 'import os\n'), ((7165, 7188), 'os.walk', 'os.walk', (['args.tb_folder'], {}), '(args.tb_folder)\n', (7172, 7188), False, 'import os\n'), ((3001, 3033), 'numpy.array', 'np.array', (["out_dict[tag]['steps']"], {}), "(out_dict[tag]['steps'])\n", (3009, 3033), True, 'import numpy as np\n'), ((3068, 3101), 'numpy.array', 'np.array', (["out_dict[tag]['values']"], {}), "(out_dict[tag]['values'])\n", (3076, 3101), True, 'import numpy as np\n'), ((4350, 4407), 'active_reward_learning.util.helpers.get_dict_default', 'get_dict_default', (['experiment.config', '"""total_timesteps"""', '(0)'], {}), "(experiment.config, 'total_timesteps', 0)\n", (4366, 4407), False, 'from active_reward_learning.util.helpers import get_dict_default\n'), ((6277, 6305), 'os.walk', 'os.walk', (['args.results_folder'], {}), '(args.results_folder)\n', (6284, 6305), False, 'import os\n'), ((7249, 7273), 'os.path.basename', 'os.path.basename', (['subdir'], {}), '(subdir)\n', (7265, 7273), False, 'import os\n'), ((8345, 8361), 'gym.make', 'gym.make', (['env_id'], {}), '(env_id)\n', (8353, 8361), False, 'import gym\n'), ((11488, 11504), 'active_reward_learning.util.plotting.set_plot_style', 'set_plot_style', ([], {}), '()\n', (11502, 11504), False, 'from active_reward_learning.util.plotting import plot_result_percentiles, set_plot_style\n'), ((11564, 11590), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['args.aspect'], {}), '(args.aspect)\n', (11577, 11590), True, 'import matplotlib.pyplot as plt\n'), ((11605, 11640), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (11615, 11640), True, 'import matplotlib.pyplot as plt\n'), ((15560, 15582), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""timestep"""'], {}), "('timestep')\n", (15570, 15582), True, 'import matplotlib.pyplot as plt\n'), ((2630, 2655), 'tensorboard.backend.event_processing.event_accumulator.EventAccumulator', 'EventAccumulator', (['dirname'], {}), '(dirname)\n', (2646, 2655), False, 'from tensorboard.backend.event_processing.event_accumulator import EventAccumulator\n'), ((12404, 12440), 'active_reward_learning.util.helpers.get_dict_default', 'get_dict_default', (['AF_COLORS', 'af', '"""b"""'], {}), "(AF_COLORS, af, 'b')\n", (12420, 12440), False, 'from active_reward_learning.util.helpers import get_dict_default\n'), ((12465, 12504), 'active_reward_learning.util.helpers.get_dict_default', 'get_dict_default', (['AF_LINESTYLE', 'af', '"""-"""'], {}), "(AF_LINESTYLE, af, '-')\n", (12481, 12504), False, 'from active_reward_learning.util.helpers import get_dict_default\n'), ((15283, 15330), 'numpy.genfromtxt', 'np.genfromtxt', (['args.csv_baseline'], {'delimiter': '""","""'}), "(args.csv_baseline, delimiter=',')\n", (15296, 15330), True, 'import numpy as np\n'), ((15354, 15381), 'numpy.max', 'np.max', (['baseline_data[:, 1]'], {}), '(baseline_data[:, 1])\n', (15360, 15381), True, 'import numpy as np\n'), ((15468, 15520), 'matplotlib.pyplot.axhline', 'plt.axhline', (['baseline'], {'color': '"""black"""', 'linestyle': '"""--"""'}), "(baseline, color='black', linestyle='--')\n", (15479, 15520), True, 'import matplotlib.pyplot as plt\n'), ((15625, 15652), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""average score"""'], {}), "('average score')\n", (15635, 15652), True, 'import matplotlib.pyplot as plt\n'), ((15679, 15699), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""return"""'], {}), "('return')\n", (15689, 15699), True, 'import matplotlib.pyplot as plt\n'), ((15743, 15760), 'matplotlib.pyplot.title', 'plt.title', (['env_id'], {}), '(env_id)\n', (15752, 15760), True, 'import matplotlib.pyplot as plt\n'), ((15804, 15894), 'matplotlib.pyplot.legend', 'plt.legend', (['legend_handles', 'legend_labels'], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)'}), "(legend_handles, legend_labels, loc='center left', bbox_to_anchor\n =(1, 0.5))\n", (15814, 15894), True, 'import matplotlib.pyplot as plt\n'), ((6379, 6408), 'active_reward_learning.util.results.FileExperimentResults', 'FileExperimentResults', (['subdir'], {}), '(subdir)\n', (6400, 6408), False, 'from active_reward_learning.util.results import FileExperimentResults\n'), ((6576, 6619), 'os.path.basename', 'os.path.basename', (["experiment.info['tb_log']"], {}), "(experiment.info['tb_log'])\n", (6592, 6619), False, 'import os\n'), ((6650, 6691), 'os.path.join', 'os.path.join', (['args.tb_folder', 'tb_log_path'], {}), '(args.tb_folder, tb_log_path)\n', (6662, 6691), False, 'import os\n'), ((6711, 6738), 'os.path.exists', 'os.path.exists', (['tb_log_path'], {}), '(tb_log_path)\n', (6725, 6738), False, 'import os\n'), ((12566, 12604), 'active_reward_learning.util.helpers.get_dict_default', 'get_dict_default', (['AF_MARKERS', 'af', 'None'], {}), '(AF_MARKERS, af, None)\n', (12582, 12604), False, 'from active_reward_learning.util.helpers import get_dict_default\n'), ((13055, 13075), 'numpy.mean', 'np.mean', (['rew_list', '(0)'], {}), '(rew_list, 0)\n', (13062, 13075), True, 'import numpy as np\n'), ((16032, 16079), 'os.path.join', 'os.path.join', (['args.plot_folder', 'f"""{env_id}.pdf"""'], {}), "(args.plot_folder, f'{env_id}.pdf')\n", (16044, 16079), False, 'import os\n'), ((16170, 16217), 'os.path.join', 'os.path.join', (['args.plot_folder', 'f"""{env_id}.png"""'], {}), "(args.plot_folder, f'{env_id}.png')\n", (16182, 16217), False, 'import os\n'), ((9866, 9879), 'numpy.max', 'np.max', (['steps'], {}), '(steps)\n', (9872, 9879), True, 'import numpy as np\n'), ((13576, 13750), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'rew'], {'color': 'color', 'linestyle': 'linestyle', 'marker': 'marker', 'markevery': 'markers_every', 'markersize': 'args.marker_size', 'alpha': 'AF_ALPHA[af]', 'zorder': 'AF_ZORDER[af]'}), '(steps, rew, color=color, linestyle=linestyle, marker=marker,\n markevery=markers_every, markersize=args.marker_size, alpha=AF_ALPHA[af\n ], zorder=AF_ZORDER[af])\n', (13584, 13750), True, 'import matplotlib.pyplot as plt\n'), ((14075, 14176), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['steps', '(rew - stderr)', '(rew + stderr)'], {'color': 'color', 'alpha': '(0.2)', 'zorder': 'AF_ZORDER[af]'}), '(steps, rew - stderr, rew + stderr, color=color, alpha=0.2,\n zorder=AF_ZORDER[af])\n', (14091, 14176), True, 'import matplotlib.pyplot as plt\n'), ((13163, 13201), 'numpy.all', 'np.all', (['(steps_list[i] == steps_list[0])'], {}), '(steps_list[i] == steps_list[0])\n', (13169, 13201), True, 'import numpy as np\n'), ((14005, 14024), 'numpy.std', 'np.std', (['rew_list', '(0)'], {}), '(rew_list, 0)\n', (14011, 14024), True, 'import numpy as np\n'), ((14718, 14892), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'rew'], {'color': 'color', 'linestyle': 'linestyle', 'marker': 'marker', 'markevery': 'markers_every', 'markersize': 'args.marker_size', 'alpha': 'AF_ALPHA[af]', 'zorder': 'AF_ZORDER[af]'}), '(steps, rew, color=color, linestyle=linestyle, marker=marker,\n markevery=markers_every, markersize=args.marker_size, alpha=AF_ALPHA[af\n ], zorder=AF_ZORDER[af])\n', (14726, 14892), True, 'import matplotlib.pyplot as plt\n')] |
from typing import Any, Iterable as IterableType, Dict, List, Tuple, Union
from .base import BaseStorageBackend
from FPSim2.FPSim2lib import py_popcount
from ..chem import (
get_mol_suplier,
get_fp_length,
rdmol_to_efp,
FP_FUNC_DEFAULTS,
)
import tables as tb
import numpy as np
import rdkit
import math
import os
BATCH_WRITE_SIZE = 10000
def create_schema(fp_length: int) -> Any:
class Particle(tb.IsDescription):
pass
columns = {}
pos = 1
columns["fp_id"] = tb.Int64Col(pos=pos)
for i in range(1, math.ceil(fp_length / 64) + 1):
pos += 1
columns["f" + str(i)] = tb.UInt64Col(pos=pos)
columns["popcnt"] = tb.Int64Col(pos=pos + 1)
Particle.columns = columns
return Particle
def create_db_file(
mols_source: Union[str, IterableType],
filename: str,
fp_type: str,
fp_params: dict = {},
mol_id_prop: str = "mol_id",
gen_ids: bool = False,
sort_by_popcnt: bool = True,
) -> None:
"""Creates FPSim2 FPs db file from .smi, .sdf files or from an iterable.
Parameters
----------
mols_source : str
.smi/.sdf filename or iterable.
filename: float
Fingerprint database filename.
fp_type : str
Fingerprint type used to create the fingerprints.
fp_params : dict
Parameters used to create the fingerprints.
mol_id_prop : str
Name of the .sdf property to read the molecule id.
gen_ids : bool
Autogenerate FP ids.
sort_by_popcnt: bool
Whether if the FPs should be sorted or not.
Returns
-------
None
"""
# if params dict is empty use defaults
if not fp_params:
fp_params = FP_FUNC_DEFAULTS[fp_type]
supplier = get_mol_suplier(mols_source)
fp_length = get_fp_length(fp_type, fp_params)
# set compression
filters = tb.Filters(complib="blosc", complevel=5)
# set the output file and fps table
with tb.open_file(filename, mode="w") as fp_file:
particle = create_schema(fp_length)
fps_table = fp_file.create_table(
fp_file.root, "fps", particle, "Table storing fps", filters=filters
)
# set config table; used fp function, parameters and rdkit version
param_table = fp_file.create_vlarray(
fp_file.root, "config", atom=tb.ObjectAtom()
)
param_table.append(fp_type)
param_table.append(fp_params)
param_table.append(rdkit.__version__)
fps = []
for mol_id, rdmol in supplier(mols_source, gen_ids, mol_id_prop=mol_id_prop):
efp = rdmol_to_efp(rdmol, fp_type, fp_params)
popcnt = py_popcount(np.array(efp, dtype=np.uint64))
efp.insert(0, mol_id)
efp.append(popcnt)
fps.append(tuple(efp))
if len(fps) == BATCH_WRITE_SIZE:
fps_table.append(fps)
fps = []
# append last batch < 10k
fps_table.append(fps)
# create index so table can be sorted
fps_table.cols.popcnt.create_index(kind="full")
if sort_by_popcnt:
sort_db_file(filename)
def calc_popcnt_bins_pytables(fps: Any, fp_length: int) -> list:
popcnt_bins = []
for i in range(0, fp_length + 1):
idx_gen = (row.nrow for row in fps.where("popcnt == {}".format(str(i))))
try:
first_id = next(idx_gen)
except StopIteration:
continue
j = first_id
for j in idx_gen:
pass
cnt_idxs = (first_id, j + 1)
popcnt_bins.append((i, cnt_idxs))
return popcnt_bins
def sort_db_file(filename: str) -> None:
"""Sorts the FPs db file."""
# rename not sorted filename
tmp_filename = filename + "_tmp"
os.rename(filename, tmp_filename)
filters = tb.Filters(complib="blosc", complevel=5)
# copy sorted fps and config to a new file
with tb.open_file(tmp_filename, mode="r") as fp_file:
with tb.open_file(filename, mode="w") as sorted_fp_file:
fp_type = fp_file.root.config[0]
fp_params = fp_file.root.config[1]
fp_length = get_fp_length(fp_type, fp_params)
# create a sorted copy of the fps table
dst_fps = fp_file.root.fps.copy(
sorted_fp_file.root,
"fps",
filters=filters,
copyuserattrs=True,
overwrite=True,
stats={
"groups": 0,
"leaves": 0,
"links": 0,
"bytes": 0,
"hardlinks": 0,
},
start=None,
stop=None,
step=None,
chunkshape="keep",
sortby="popcnt",
check_CSI=True,
propindexes=True,
)
# set config table; used fp function, parameters and rdkit version
param_table = sorted_fp_file.create_vlarray(
sorted_fp_file.root, "config", atom=tb.ObjectAtom()
)
param_table.append(fp_type)
param_table.append(fp_params)
param_table.append(rdkit.__version__)
# update count ranges
popcnt_bins = calc_popcnt_bins_pytables(dst_fps, fp_length)
param_table.append(popcnt_bins)
# remove not sorted file
os.remove(tmp_filename)
class PyTablesStorageBackend(BaseStorageBackend):
def __init__(self, fp_filename: str, in_memory_fps: bool = True, fps_sort: bool = False) -> None:
super(PyTablesStorageBackend, self).__init__(fp_filename)
self.name = "pytables"
self.fp_type, self.fp_params, self.rdkit_ver = self.read_parameters()
if in_memory_fps:
self.load_fps(in_memory_fps, fps_sort)
self.load_popcnt_bins(fps_sort)
def read_parameters(self) -> Tuple[str, Dict[str, Dict[str, dict]], str]:
"""Reads fingerprint parameters"""
with tb.open_file(self.fp_filename, mode="r") as fp_file:
fp_type = fp_file.root.config[0]
fp_params = fp_file.root.config[1]
rdkit_ver = fp_file.root.config[2]
return fp_type, fp_params, rdkit_ver
def get_fps_chunk(self, chunk_range: Tuple[int, int]) -> np.asarray:
with tb.open_file(self.fp_filename, mode="r") as fp_file:
fps = fp_file.root.fps[slice(*chunk_range)]
return fps
def load_popcnt_bins(self, fps_sort) -> None:
if fps_sort:
popcnt_bins = self.calc_popcnt_bins(self.fps)
else:
with tb.open_file(self.fp_filename, mode="r") as fp_file:
popcnt_bins = fp_file.root.config[3]
self.popcnt_bins = popcnt_bins
def load_fps(self, in_memory_fps, fps_sort) -> None:
"""Loads FP db file into memory.
Parameters
----------
in_memory_fps : bool
Whether if the FPs should be loaded into memory or not.
fps_sort: bool
Whether if the FPs should be sorted or not.
Returns
-------
fps: numpy array
Numpy array with the fingerprints.
"""
with tb.open_file(self.fp_filename, mode="r") as fp_file:
fps = fp_file.root.fps[:]
# files should be sorted but if the file is updated without sorting it
# can be also in memory sorted
if fps_sort:
fps.sort(order="popcnt")
num_fields = len(fps[0])
fps = fps.view("<u8")
fps = fps.reshape(int(fps.size / num_fields), num_fields)
self.fps = fps
def delete_fps(self, ids_list: List[int]) -> None:
"""Delete FPs given a list of ids.
Parameters
----------
ids_list : list
ids to delete.
Returns
-------
None
"""
with tb.open_file(self.fp_filename, mode="a") as fp_file:
fps_table = fp_file.root.fps
for fp_id in ids_list:
to_delete = [
row.nrow
for row in fps_table.where("fp_id == {}".format(str(fp_id)))
]
fps_table.remove_row(to_delete[0])
def append_fps(self, mols_source: Union[str, IterableType], mol_id_prop: str = "mol_id") -> None:
"""Appends FPs to the file.
Parameters
----------
mols_source : str or iterable
.smi or .sdf filename or iterable.
Returns
-------
None
"""
supplier = get_mol_suplier(mols_source)
fp_type, fp_params, _ = self.read_parameters()
with tb.open_file(self.fp_filename, mode="a") as fp_file:
fps_table = fp_file.root.fps
new_mols = []
for mol_id, rdmol in supplier(mols_source, False, mol_id_prop=mol_id_prop):
if not rdmol:
continue
efp = rdmol_to_efp(rdmol, fp_type, fp_params)
popcnt = py_popcount(np.array(efp, dtype=np.uint64))
efp.insert(0, mol_id)
efp.append(popcnt)
new_mols.append(tuple(efp))
if len(new_mols) == BATCH_WRITE_SIZE:
# append last batch < 10k
fps_table.append(new_mols)
new_mols = []
fps_table.append(new_mols)
| [
"os.remove",
"math.ceil",
"tables.ObjectAtom",
"os.rename",
"tables.UInt64Col",
"tables.Int64Col",
"tables.Filters",
"numpy.array",
"tables.open_file"
] | [((503, 523), 'tables.Int64Col', 'tb.Int64Col', ([], {'pos': 'pos'}), '(pos=pos)\n', (514, 523), True, 'import tables as tb\n'), ((673, 697), 'tables.Int64Col', 'tb.Int64Col', ([], {'pos': '(pos + 1)'}), '(pos=pos + 1)\n', (684, 697), True, 'import tables as tb\n'), ((1854, 1894), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(5)'}), "(complib='blosc', complevel=5)\n", (1864, 1894), True, 'import tables as tb\n'), ((3756, 3789), 'os.rename', 'os.rename', (['filename', 'tmp_filename'], {}), '(filename, tmp_filename)\n', (3765, 3789), False, 'import os\n'), ((3804, 3844), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(5)'}), "(complib='blosc', complevel=5)\n", (3814, 3844), True, 'import tables as tb\n'), ((5400, 5423), 'os.remove', 'os.remove', (['tmp_filename'], {}), '(tmp_filename)\n', (5409, 5423), False, 'import os\n'), ((627, 648), 'tables.UInt64Col', 'tb.UInt64Col', ([], {'pos': 'pos'}), '(pos=pos)\n', (639, 648), True, 'import tables as tb\n'), ((1945, 1977), 'tables.open_file', 'tb.open_file', (['filename'], {'mode': '"""w"""'}), "(filename, mode='w')\n", (1957, 1977), True, 'import tables as tb\n'), ((3902, 3938), 'tables.open_file', 'tb.open_file', (['tmp_filename'], {'mode': '"""r"""'}), "(tmp_filename, mode='r')\n", (3914, 3938), True, 'import tables as tb\n'), ((546, 571), 'math.ceil', 'math.ceil', (['(fp_length / 64)'], {}), '(fp_length / 64)\n', (555, 571), False, 'import math\n'), ((3964, 3996), 'tables.open_file', 'tb.open_file', (['filename'], {'mode': '"""w"""'}), "(filename, mode='w')\n", (3976, 3996), True, 'import tables as tb\n'), ((6005, 6045), 'tables.open_file', 'tb.open_file', (['self.fp_filename'], {'mode': '"""r"""'}), "(self.fp_filename, mode='r')\n", (6017, 6045), True, 'import tables as tb\n'), ((6329, 6369), 'tables.open_file', 'tb.open_file', (['self.fp_filename'], {'mode': '"""r"""'}), "(self.fp_filename, mode='r')\n", (6341, 6369), True, 'import tables as tb\n'), ((7208, 7248), 'tables.open_file', 'tb.open_file', (['self.fp_filename'], {'mode': '"""r"""'}), "(self.fp_filename, mode='r')\n", (7220, 7248), True, 'import tables as tb\n'), ((7903, 7943), 'tables.open_file', 'tb.open_file', (['self.fp_filename'], {'mode': '"""a"""'}), "(self.fp_filename, mode='a')\n", (7915, 7943), True, 'import tables as tb\n'), ((8678, 8718), 'tables.open_file', 'tb.open_file', (['self.fp_filename'], {'mode': '"""a"""'}), "(self.fp_filename, mode='a')\n", (8690, 8718), True, 'import tables as tb\n'), ((2329, 2344), 'tables.ObjectAtom', 'tb.ObjectAtom', ([], {}), '()\n', (2342, 2344), True, 'import tables as tb\n'), ((2670, 2700), 'numpy.array', 'np.array', (['efp'], {'dtype': 'np.uint64'}), '(efp, dtype=np.uint64)\n', (2678, 2700), True, 'import numpy as np\n'), ((6618, 6658), 'tables.open_file', 'tb.open_file', (['self.fp_filename'], {'mode': '"""r"""'}), "(self.fp_filename, mode='r')\n", (6630, 6658), True, 'import tables as tb\n'), ((5053, 5068), 'tables.ObjectAtom', 'tb.ObjectAtom', ([], {}), '()\n', (5066, 5068), True, 'import tables as tb\n'), ((9044, 9074), 'numpy.array', 'np.array', (['efp'], {'dtype': 'np.uint64'}), '(efp, dtype=np.uint64)\n', (9052, 9074), True, 'import numpy as np\n')] |
# encoding: utf-8
##################################################
# This script shows uses the pandas library to create statistically describe data sets
# It also shows basic plotting features
# Find extra documentation about data frame here:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
##################################################
#
##################################################
# Author: <NAME>
# Copyright: Copyright 2019, IAAC
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: <NAME>
# Email: <EMAIL>
# Status: development
##################################################
# We need to import pandas library as well as the plot libraries matplotlib and seaborn
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# We read the file for population data
a_df = pd.read_csv('../data/world/pop_total_v2.csv', skiprows=4, header=0)
# Range is the combination of min-max values
print('####################')
print('This is an example of the range values for the year 1960:')
pop_2010 = a_df['2010']
pop_2010 = pop_2010[~np.isnan(pop_2010)] # Note: working with series and seaborn usually demands dropping 'nan'
pop_comparison = a_df[['Country Code', '1960', '1990', '2010']]
pop_country = a_df[a_df['Country Name'] == 'Spain']
pop_country = pop_country.iloc[0, 4:53]
# Histogram and normal distribution
sns.distplot(pop_2010)
plt.show()
# Kernel density
sns.distplot(pop_2010, hist=False, rug=True)
plt.show()
sns.kdeplot(pop_2010, shade=True)
plt.show()
# Scatter plot two years all countries
pop_comparison.plot.scatter(x='1960', y='1990', c='DarkBlue')
max_range = range(0, 1000000000, 500000000) # Sets values for an straight line
plt.plot(max_range, max_range)
plt.show()
# Scatter plot one country all years
pop_country.plot()
plt.show()
# Try to compare multiple countries
# plot correlation
# ====================================
sns.set_style('ticks')
sns.regplot(pop_comparison['1960'], pop_comparison['1990'], ci=None)
sns.despine()
sns.set_style('ticks')
sns.regplot(pop_comparison['1990'], pop_comparison['2010'], ci=None)
sns.despine()
# rate of growth
| [
"seaborn.set_style",
"seaborn.kdeplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"seaborn.despine",
"numpy.isnan",
"seaborn.regplot",
"seaborn.distplot"
] | [((979, 1046), 'pandas.read_csv', 'pd.read_csv', (['"""../data/world/pop_total_v2.csv"""'], {'skiprows': '(4)', 'header': '(0)'}), "('../data/world/pop_total_v2.csv', skiprows=4, header=0)\n", (990, 1046), True, 'import pandas as pd\n'), ((1522, 1544), 'seaborn.distplot', 'sns.distplot', (['pop_2010'], {}), '(pop_2010)\n', (1534, 1544), True, 'import seaborn as sns\n'), ((1545, 1555), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1553, 1555), True, 'import matplotlib.pyplot as plt\n'), ((1575, 1619), 'seaborn.distplot', 'sns.distplot', (['pop_2010'], {'hist': '(False)', 'rug': '(True)'}), '(pop_2010, hist=False, rug=True)\n', (1587, 1619), True, 'import seaborn as sns\n'), ((1620, 1630), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1628, 1630), True, 'import matplotlib.pyplot as plt\n'), ((1632, 1665), 'seaborn.kdeplot', 'sns.kdeplot', (['pop_2010'], {'shade': '(True)'}), '(pop_2010, shade=True)\n', (1643, 1665), True, 'import seaborn as sns\n'), ((1666, 1676), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1674, 1676), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1893), 'matplotlib.pyplot.plot', 'plt.plot', (['max_range', 'max_range'], {}), '(max_range, max_range)\n', (1871, 1893), True, 'import matplotlib.pyplot as plt\n'), ((1894, 1904), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1902, 1904), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1973), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1971, 1973), True, 'import matplotlib.pyplot as plt\n'), ((2072, 2094), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (2085, 2094), True, 'import seaborn as sns\n'), ((2095, 2163), 'seaborn.regplot', 'sns.regplot', (["pop_comparison['1960']", "pop_comparison['1990']"], {'ci': 'None'}), "(pop_comparison['1960'], pop_comparison['1990'], ci=None)\n", (2106, 2163), True, 'import seaborn as sns\n'), ((2164, 2177), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (2175, 2177), True, 'import seaborn as sns\n'), ((2180, 2202), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (2193, 2202), True, 'import seaborn as sns\n'), ((2203, 2271), 'seaborn.regplot', 'sns.regplot', (["pop_comparison['1990']", "pop_comparison['2010']"], {'ci': 'None'}), "(pop_comparison['1990'], pop_comparison['2010'], ci=None)\n", (2214, 2271), True, 'import seaborn as sns\n'), ((2272, 2285), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (2283, 2285), True, 'import seaborn as sns\n'), ((1235, 1253), 'numpy.isnan', 'np.isnan', (['pop_2010'], {}), '(pop_2010)\n', (1243, 1253), True, 'import numpy as np\n')] |
import argparse
import subprocess
import os
import shutil
import glob
import pprint
import math
import time
import pandas as pd
import numpy as np
import hyperopt
from rl_baselines.registry import registered_rl
from environments.registry import registered_env
from state_representation.registry import registered_srl
from srl_zoo.utils import printGreen
ITERATION_SCALE = 10000
MIN_ITERATION = 30000
class HyperParameterOptimizer(object):
def __init__(self, opt_param, train, seed=0):
"""
the base class for hyper parameter optimizer
:param opt_param: (dict) the parameters to optimize
:param train: (function (dict, int, int): float) the function that take:
- params: (dict) the hyper parameters to train with
- num_iters (int) the number of iterations to train (can be None)
- train_id: (int) the current iteration number in the hyperparameter search (can be None)
- returns: (float) the score of the training to minimize
:param seed: (int) the initial seed for the random number generator
"""
self.opt_param = opt_param
self.train = train
self.seed = seed
self.history = []
def run(self):
"""
run the hyper parameter search
"""
raise NotImplementedError
class Hyperband(HyperParameterOptimizer):
def __init__(self, opt_param, train, seed=0, max_iter=100, eta=3.0):
"""
A Hyperband implementation, it is similar to a targeted random search
Hyperband: https://arxiv.org/abs/1603.06560
:param opt_param: (dict) the parameters to optimize
:param train: (function (dict, int, int): float) the function that take:
- params: (dict) the hyper parameters to train with
- num_iters (int) the number of iterations to train (can be None)
- train_id: (int) the current iteration number in the hyperparameter search (can be None)
- returns: (float) the score of the training to minimize
:param seed: (int) the initial seed for the random number generator
:param max_iter: (int) the maximum budget for hyperband's search
:param eta: (float) the reduction factor of the search
"""
super(Hyperband, self).__init__(opt_param, train, seed=seed)
self.max_iter = max_iter
self.eta = eta
self.max_steps = int(math.floor(math.log(self.max_iter) / math.log(self.eta)))
self.budget = (self.max_steps + 1) * self.max_iter
self.rng = np.random.RandomState(seed)
self.param_sampler = self._generate_sampler()
def _generate_sampler(self):
# will generate a hyperparameter sampler for Hyperband
def _sample():
params = {}
for name, (param_type, val) in self.opt_param.items():
if param_type == int:
params[name] = self.rng.randint(val[0], val[1])
elif param_type == float:
params[name] = self.rng.uniform(val[0], val[1])
elif isinstance(param_type, tuple) and param_type[0] == list:
params[name] = val[self.rng.randint(len(val))]
else:
raise AssertionError("Error: unknown type {}".format(param_type))
return params
return _sample
def run(self):
for step in reversed(range(self.max_steps + 1)):
max_n_param_sampled = int(math.ceil(self.budget / self.max_iter * self.eta**step / (step + 1)))
max_iters = self.max_iter * self.eta**(-step)
all_parameters = np.array([self.param_sampler() for _ in range(max_n_param_sampled)])
for i in range(step + 1):
printGreen("\npop_itt:{}/{}, itt:{}/{}, pop_size:{}".format(self.max_steps - step, self.max_steps + 1,
i, step+1, len(all_parameters)))
n_param_sampled = int(math.floor(max_n_param_sampled * self.eta**(-i)))
num_iters = max_iters * self.eta**i
losses = [self.train(params, num_iters, train_id) for train_id, params in enumerate(all_parameters)]
self.history.extend(zip([(params, num_iters) for params in all_parameters], losses))
all_parameters = all_parameters[np.argsort(losses)[:int(math.floor(n_param_sampled / self.eta))]]
return self.history[int(np.argmin([val[1] for val in self.history]))]
class Hyperopt(HyperParameterOptimizer):
def __init__(self, opt_param, train, seed=0, num_eval=100):
"""
A Hyperopt implementation, it is similar to a bayesian search
Hyperopt: https://www.lri.fr/~kegl/research/PDFs/BeBaBeKe11.pdf
:param opt_param: (dict) the parameters to optimize
:param train: (function (dict, int, int): float) the function that take:
- params: (dict) the hyper parameters to train with
- num_iters (int) the number of iterations to train (can be None)
- train_id: (int) the current iteration number in the hyperparameter search (can be None)
- returns: (float) the score of the training to minimize
:param seed: (int) the initial seed for the random number generator
:param num_eval: (int) the number of evaluation to do
"""
super(Hyperopt, self).__init__(opt_param, train, seed=seed)
self.num_eval = num_eval
self.search_space = {}
for name, (param_type, val) in self.opt_param.items():
if param_type == int:
self.search_space[name] = hyperopt.hp.choice(name, np.arange(int(val[0]), int(val[1]), dtype=int))
elif param_type == float:
self.search_space[name] = hyperopt.hp.uniform(name, val[0], val[1])
elif isinstance(param_type, tuple) and param_type[0] == list:
self.search_space[name] = hyperopt.hp.choice(name, val)
else:
raise AssertionError("Error: unknown type {}".format(param_type))
def run(self):
trials = hyperopt.Trials()
hyperopt.fmin(fn=lambda kwargs: {'loss': self.train(kwargs), 'status': hyperopt.STATUS_OK},
space=self.search_space,
algo=hyperopt.tpe.suggest,
max_evals=self.num_eval,
trials=trials,
verbose=10)
# from the trials, get the values for every parameter
# set the number of iter to None as they are not changed in Hyperopt
# and zip the loss
self.history.extend(zip([(
{name: val[0] for name, val in params["misc"]["vals"].items()}, None)
for params in trials.trials], trials.losses()))
return self.history[int(np.argmin([val[1] for val in self.history]))]
def makeRlTrainingFunction(args, train_args):
"""
makes a training function for the hyperparam optimizers
:param args: (ArgumentParser) the optimizer arguments
:param train_args: (ArgumentParser) the remaining arguments
:return: (function (dict, int, int): float) the function that take:
- params: (dict) the hyper parameters to train with
- num_iters (int) the number of iterations to train (can be None)
- train_id: (int) the current iteration number in the hyperparameter search (can be None)
- returns: (float) the score of the training to minimize
"""
if args.verbose:
# None here means stdout of terminal for subprocess.call
stdout = None
else:
stdout = open(os.devnull, 'w')
def _train(params, num_iters=None, train_id=None):
# generate a print string
print_str = "\nID_num={}, "
format_args = []
if train_id is None:
if not hasattr(_train, "current_id"):
_train.current_id = 0
train_id = _train.current_id
_train.current_id += 1
format_args.append(train_id)
if num_iters is not None:
print_str += "Num-timesteps={}, "
format_args.append(int(max(MIN_ITERATION, num_iters * ITERATION_SCALE)))
print_str += "Param:"
printGreen(print_str.format(*format_args))
pprint.pprint(params)
# cleanup old files
if os.path.exists(args.log_dir):
shutil.rmtree(args.log_dir)
# add the training args that where parsed for the hyperparam optimizers
if num_iters is not None:
loop_args = ['--num-timesteps', str(int(max(MIN_ITERATION, num_iters * ITERATION_SCALE)))]
else:
loop_args = ['--num-timesteps', str(int(args.num_timesteps))]
# redefine the hyperparam args for rl_baselines.train
if len(params) > 0:
loop_args.append("--hyperparam")
for param_name, param_val in params.items():
loop_args.append("{}:{}".format(param_name, param_val))
# call the training
ok = subprocess.call(['python', '-m', 'rl_baselines.train'] + train_args + loop_args, stdout=stdout)
if ok != 0:
# throw the error down to the terminal
raise ChildProcessError("An error occured, error code: {}".format(ok))
# load the logging of the training, and extract the reward
folders = glob.glob("{}/{}/{}/{}/*".format(args.log_dir, args.env, args.srl_model, args.algo))
assert len(folders) != 0, "Error: Could not find generated directory, halting {} search.".format(args.optimizer)
rewards = []
for monitor_path in glob.glob(folders[0] + "/*.monitor.csv"):
rewards.append(np.mean(pd.read_csv(monitor_path, skiprows=1)["r"][-10:]))
if np.isnan(rewards).any():
rewards = -np.inf
print("reward: ", np.mean(rewards))
# negative reward, as we are minimizing with hyperparameter search
return -np.mean(rewards)
return _train
def main():
parser = argparse.ArgumentParser(description="Hyperparameter search for implemented RL models")
parser.add_argument('--optimizer', default='hyperband', choices=['hyperband', 'hyperopt'], type=str,
help='The hyperparameter optimizer to choose from')
parser.add_argument('--algo', default='ppo2', choices=list(registered_rl.keys()), help='OpenAI baseline to use',
type=str)
parser.add_argument('--env', type=str, help='environment ID', default='KukaButtonGymEnv-v0',
choices=list(registered_env.keys()))
parser.add_argument('--seed', type=int, default=0, help='random seed (default: 0)')
parser.add_argument('--srl-model', type=str, default='raw_pixels', choices=list(registered_srl.keys()),
help='SRL model to use')
parser.add_argument('--num-timesteps', type=int, default=1e6, help='number of timesteps the baseline should run')
parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Display baseline STDOUT')
parser.add_argument('--max-eval', type=int, default=100, help='Number of evalutation to try for hyperopt')
args, train_args = parser.parse_known_args()
args.log_dir = "logs/_{}_search/".format(args.optimizer)
train_args.extend(['--srl-model', args.srl_model, '--seed', str(args.seed), '--algo', args.algo, '--env', args.env,
'--log-dir', args.log_dir, '--no-vis'])
# verify the algorithm has defined it, and that it returnes an expected value
try:
opt_param = registered_rl[args.algo][0].getOptParam()
assert opt_param is not None
except AttributeError or AssertionError:
raise AssertionError("Error: {} algo does not support hyperparameter search.".format(args.algo))
if args.optimizer == "hyperband":
opt = Hyperband(opt_param, makeRlTrainingFunction(args, train_args), seed=args.seed,
max_iter=args.num_timesteps // ITERATION_SCALE)
elif args.optimizer == "hyperopt":
opt = Hyperopt(opt_param, makeRlTrainingFunction(args, train_args), seed=args.seed, num_eval=args.max_eval)
else:
raise ValueError("Error: optimizer {} was defined but not implemented, Halting.".format(args.optimizer))
t_start = time.time()
opt.run()
all_params, loss = zip(*opt.history)
idx = np.argmin(loss)
opt_params, nb_iter = all_params[idx]
reward = loss[idx]
print('\ntime to run : {}s'.format(int(time.time() - t_start)))
print('Total nb. evaluations : {}'.format(len(all_params)))
if nb_iter is not None:
print('Best nb. of iterations : {}'.format(int(nb_iter)))
print('Best params : ')
pprint.pprint(opt_params)
print('Best reward : {:.3f}'.format(-reward))
param_dict, timesteps = zip(*all_params)
output = pd.DataFrame(list(param_dict))
# make sure we returned a timestep value to log, otherwise ignore
if not any([el is None for el in timesteps]):
output["timesteps"] = np.array(np.maximum(MIN_ITERATION, np.array(timesteps) * ITERATION_SCALE).astype(int))
output["reward"] = -np.array(loss)
output.to_csv("logs/{}_{}_{}_{}_seed{}_numtimestep{}.csv"
.format(args.optimizer, args.algo, args.env, args.srl_model, args.seed, args.num_timesteps))
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"pandas.read_csv",
"hyperopt.hp.choice",
"numpy.argmin",
"numpy.isnan",
"numpy.argsort",
"numpy.mean",
"pprint.pprint",
"environments.registry.registered_env.keys",
"glob.glob",
"shutil.rmtree",
"os.path.exists",
"numpy.random.RandomState",
"hyperopt.Trials",
"... | [((10049, 10140), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Hyperparameter search for implemented RL models"""'}), "(description=\n 'Hyperparameter search for implemented RL models')\n", (10072, 10140), False, 'import argparse\n'), ((12344, 12355), 'time.time', 'time.time', ([], {}), '()\n', (12353, 12355), False, 'import time\n'), ((12421, 12436), 'numpy.argmin', 'np.argmin', (['loss'], {}), '(loss)\n', (12430, 12436), True, 'import numpy as np\n'), ((12760, 12785), 'pprint.pprint', 'pprint.pprint', (['opt_params'], {}), '(opt_params)\n', (12773, 12785), False, 'import pprint\n'), ((2567, 2594), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2588, 2594), True, 'import numpy as np\n'), ((6157, 6174), 'hyperopt.Trials', 'hyperopt.Trials', ([], {}), '()\n', (6172, 6174), False, 'import hyperopt\n'), ((8321, 8342), 'pprint.pprint', 'pprint.pprint', (['params'], {}), '(params)\n', (8334, 8342), False, 'import pprint\n'), ((8383, 8411), 'os.path.exists', 'os.path.exists', (['args.log_dir'], {}), '(args.log_dir)\n', (8397, 8411), False, 'import os\n'), ((9066, 9165), 'subprocess.call', 'subprocess.call', (["(['python', '-m', 'rl_baselines.train'] + train_args + loop_args)"], {'stdout': 'stdout'}), "(['python', '-m', 'rl_baselines.train'] + train_args +\n loop_args, stdout=stdout)\n", (9081, 9165), False, 'import subprocess\n'), ((9657, 9697), 'glob.glob', 'glob.glob', (["(folders[0] + '/*.monitor.csv')"], {}), "(folders[0] + '/*.monitor.csv')\n", (9666, 9697), False, 'import glob\n'), ((13187, 13201), 'numpy.array', 'np.array', (['loss'], {}), '(loss)\n', (13195, 13201), True, 'import numpy as np\n'), ((8425, 8452), 'shutil.rmtree', 'shutil.rmtree', (['args.log_dir'], {}), '(args.log_dir)\n', (8438, 8452), False, 'import shutil\n'), ((9877, 9893), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (9884, 9893), True, 'import numpy as np\n'), ((9987, 10003), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (9994, 10003), True, 'import numpy as np\n'), ((3494, 3564), 'math.ceil', 'math.ceil', (['(self.budget / self.max_iter * self.eta ** step / (step + 1))'], {}), '(self.budget / self.max_iter * self.eta ** step / (step + 1))\n', (3503, 3564), False, 'import math\n'), ((4493, 4536), 'numpy.argmin', 'np.argmin', (['[val[1] for val in self.history]'], {}), '([val[1] for val in self.history])\n', (4502, 4536), True, 'import numpy as np\n'), ((6865, 6908), 'numpy.argmin', 'np.argmin', (['[val[1] for val in self.history]'], {}), '([val[1] for val in self.history])\n', (6874, 6908), True, 'import numpy as np\n'), ((9796, 9813), 'numpy.isnan', 'np.isnan', (['rewards'], {}), '(rewards)\n', (9804, 9813), True, 'import numpy as np\n'), ((10380, 10400), 'rl_baselines.registry.registered_rl.keys', 'registered_rl.keys', ([], {}), '()\n', (10398, 10400), False, 'from rl_baselines.registry import registered_rl\n'), ((10602, 10623), 'environments.registry.registered_env.keys', 'registered_env.keys', ([], {}), '()\n', (10621, 10623), False, 'from environments.registry import registered_env\n'), ((10798, 10819), 'state_representation.registry.registered_srl.keys', 'registered_srl.keys', ([], {}), '()\n', (10817, 10819), False, 'from state_representation.registry import registered_srl\n'), ((2441, 2464), 'math.log', 'math.log', (['self.max_iter'], {}), '(self.max_iter)\n', (2449, 2464), False, 'import math\n'), ((2467, 2485), 'math.log', 'math.log', (['self.eta'], {}), '(self.eta)\n', (2475, 2485), False, 'import math\n'), ((4025, 4073), 'math.floor', 'math.floor', (['(max_n_param_sampled * self.eta ** -i)'], {}), '(max_n_param_sampled * self.eta ** -i)\n', (4035, 4073), False, 'import math\n'), ((5832, 5873), 'hyperopt.hp.uniform', 'hyperopt.hp.uniform', (['name', 'val[0]', 'val[1]'], {}), '(name, val[0], val[1])\n', (5851, 5873), False, 'import hyperopt\n'), ((12545, 12556), 'time.time', 'time.time', ([], {}), '()\n', (12554, 12556), False, 'import time\n'), ((4394, 4412), 'numpy.argsort', 'np.argsort', (['losses'], {}), '(losses)\n', (4404, 4412), True, 'import numpy as np\n'), ((5990, 6019), 'hyperopt.hp.choice', 'hyperopt.hp.choice', (['name', 'val'], {}), '(name, val)\n', (6008, 6019), False, 'import hyperopt\n'), ((9734, 9771), 'pandas.read_csv', 'pd.read_csv', (['monitor_path'], {'skiprows': '(1)'}), '(monitor_path, skiprows=1)\n', (9745, 9771), True, 'import pandas as pd\n'), ((13111, 13130), 'numpy.array', 'np.array', (['timesteps'], {}), '(timesteps)\n', (13119, 13130), True, 'import numpy as np\n'), ((4418, 4456), 'math.floor', 'math.floor', (['(n_param_sampled / self.eta)'], {}), '(n_param_sampled / self.eta)\n', (4428, 4456), False, 'import math\n')] |
import pandas as pd
import numpy as np
df = pd.Series(range(3), index=['a', 'b', 'c'])
print(f'================df:\n{df}')
# 5.2.1 && 5.2.2
# 5.2.1 重建索引, Series
# - 如果某个索引值之前并不存在,则会引入缺失值:
# - 如果某个索引值删除了,则对应的value会删除:
df2 = df.reindex(index=['a', 'c', 'd', 'e'])
print("=============df2:\n{}".format(df2))
# 顺序数据的重建索引,会前向填充
obj3 = pd.Series(data=['blue', 'purple', 'yellow'], index=[0, 2, 4])
print("=============obj3:\n{}".format(obj3))
obj4 = obj3.reindex(range(6), method='ffill')
print("=============obj4:\n{}".format(obj4))
# dataframe重建索引
rg = range(9)
arg = np.arange(9)
print("rg:{} type:{}".format(rg, type(rg)))
print("arg:{} type:{}".format(arg, type(arg)))
rarg = np.reshape(arg, (3, 3))
print("rarg:\n{}".format(rarg))
frame = pd.DataFrame(rarg, index=['a', 'c', 'd'], columns=[
'Beijing', 'Shanghai', 'Tianjin'])
print("=====================\nframe:{}".format(frame))
# df.reindex
frame1 = frame.reindex(['a', 'b', 'c', 'd'])
print("=====================\nframe.reindex:{}".format(frame1))
# columns reindex
frame2 = frame.reindex(columns=['Beijing', 'Shanghai', 'Hebei'])
print("=====================\nframe.reindex:\n{}".format(frame2))
# 使用loc进行检索 ,许多用户倾向使用loc检索
frame3 = frame.copy()
loc1 = frame3.loc[['a', 'c', 'd'], ['Beijing', 'Shanghai']]
print("=====================\nframe.loc:\n{}".format(loc1))
# drop方法删除行
obj = pd.Series(np.arange(5), index=['a', 'b', 'c', 'd', 'e'])
print("=====================\nobj:\n{}".format(obj))
obj_1 = obj.drop('c')
print("=====================\nobj_1:\n{}".format(obj_1))
obj_2 = obj.drop(['c', 'd'])
print("=====================\nobj_2:\n{}".format(obj_2))
# 索引值可以从轴向上删除
cdata =np.arange(16).reshape(4,4)
states=['Ohio', 'Colorado', 'Utah' ,'New york']
data = pd.DataFrame(cdata, index=states, columns=['one', 'tow', 'three', 'four'])
print("=====================\ndata:\n{}".format(data))
# drop删除行
data_1 = data.drop(['Colorado', 'Ohio'])
print("=====================\ndata:\n{}".format(data_1))
# axis='index' <== axis=0
# drop删除列 axis='columns' <== axis=1
data_2 = data.drop(['tow', 'four'], axis='columns')
print("=====================\ndata:\n{}".format(data_2))
# drop直接删除原对象参数 inplace=True
data3 = data.copy()
data3.drop(['Colorado', 'Ohio'], inplace=True)
print("=====================\ndata:\n{}".format(data3))
data4 = data.copy()
print("=====================\ndata4:\n{}".format(data4))
data4.drop(['tow', 'four'], axis=1, inplace=True) # 从左到右逐列删除
# print("=====================\ndata:\n{}".format(data4))
ser = pd.Series(range(5))
print("====ser:\n{}".format(ser))
ser1 = ser.reindex(list('abcde'))
print("====ser:\n{}".format(ser1))
obj = pd.Series(range(4))
print("obj:{}".format(obj))
print("obj.index:{}".format(obj.index))
print("obj:{}".format(obj.reindex(list('abcde'))))
obj1 = pd.Series(range(4), index=['d', 'b', 'a', 'c'])
print("obj.index:{}".format(obj1.index))
| [
"pandas.DataFrame",
"numpy.arange",
"pandas.Series",
"numpy.reshape"
] | [((336, 397), 'pandas.Series', 'pd.Series', ([], {'data': "['blue', 'purple', 'yellow']", 'index': '[0, 2, 4]'}), "(data=['blue', 'purple', 'yellow'], index=[0, 2, 4])\n", (345, 397), True, 'import pandas as pd\n'), ((572, 584), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (581, 584), True, 'import numpy as np\n'), ((683, 706), 'numpy.reshape', 'np.reshape', (['arg', '(3, 3)'], {}), '(arg, (3, 3))\n', (693, 706), True, 'import numpy as np\n'), ((748, 837), 'pandas.DataFrame', 'pd.DataFrame', (['rarg'], {'index': "['a', 'c', 'd']", 'columns': "['Beijing', 'Shanghai', 'Tianjin']"}), "(rarg, index=['a', 'c', 'd'], columns=['Beijing', 'Shanghai',\n 'Tianjin'])\n", (760, 837), True, 'import pandas as pd\n'), ((1755, 1829), 'pandas.DataFrame', 'pd.DataFrame', (['cdata'], {'index': 'states', 'columns': "['one', 'tow', 'three', 'four']"}), "(cdata, index=states, columns=['one', 'tow', 'three', 'four'])\n", (1767, 1829), True, 'import pandas as pd\n'), ((1383, 1395), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (1392, 1395), True, 'import numpy as np\n'), ((1673, 1686), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (1682, 1686), True, 'import numpy as np\n')] |
"""
Hello poppet
"""
import csv
import matplotlib.pyplot as plt
import numpy as np
import os
Na = 8
N_helpers = 8
RunNr = 3
plot_protagonist = True
plot_helpers = True
big_folder_name = "anchors_" + str(Na) + "_helpers_" + str(N_helpers) + "_run_" + str(RunNr)
# input_file = os.path.join("C:\\Users\\<NAME>\\Documents\\GitHub\\uwb-simulator\\publication", big_folder_name)
input_file = os.path.join("C:\\Users\\<NAME>\\Documents\\GitHub\\uwb-simulator\\publication")
def TrajectoryPlotter(DataPath, ax1, ax2, ax3, ax4, fig):
PlotDataReader = csv.reader(open(DataPath))
Data_Line = next(PlotDataReader)
Data_Array = np.ndarray((len(open(DataPath).readlines()) - 1, 7))
for index, row in enumerate(PlotDataReader):
Data_Array[index] = np.array([[float(i) for i in row]])
ax1.plot(Data_Array[:, 1], Data_Array[:, 2], label="ekf")
ax1.plot(Data_Array[:, 4], Data_Array[:, 5], label="ot")
ax1.set_title("2D top view")
ax1.legend()
ax2.plot(Data_Array[:, 0], Data_Array[:, 1], label="ekf")
ax2.plot(Data_Array[:, 0], Data_Array[:, 4], label="ot")
ax2.set_title("x-pos vs time")
# ax2.scatter(Data_Array[:,0], Data_Array[:,1])
ax2.legend()
ax3.plot(Data_Array[:, 0], Data_Array[:, 2], label="ekf")
ax3.plot(Data_Array[:, 0], Data_Array[:, 5], label="ot")
ax3.set_title("y-pos vs time")
# ax3.scatter(Data_Array[:,0], Data_Array[:,2])
ax3.legend()
ax4.plot(Data_Array[:, 0], Data_Array[:, 3], label="ekf")
ax4.plot(Data_Array[:, 0], Data_Array[:, 6], label="ot")
ax4.set_title("z-pos vs time")
ax4.legend()
def ErrorPlotter():
pass
N_anchorsArray = np.array([]) # Array containing anchor numbers
ErrorArray = np.ndarray((10,7,3)) # Array containing helpers error for each N_anchors
print(ErrorArray.ndim)
for folder_index, folder in enumerate(os.listdir(input_file)):
if "_run_0" in folder and plot_helpers and "helpers_8" in folder:
Na = int(folder[8])
N_helpers = int(folder[-7])
HelpersErrorSum = 0
HelpersElementsSum = 0
for trajectory in os.listdir(os.path.join(input_file, folder)):
ErrorReader = csv.DictReader(
open(os.path.join(input_file, folder, trajectory, "runs_data.csv"), "r", newline=""),
skipinitialspace=True)
ErrorCounterArray = np.array([])
for line in ErrorReader:
if "inf" not in line["ekf_tot"]:
ErrorCounterArray = np.append(ErrorCounterArray, float(line["ekf_tot"]))
HelpersErrorSum += sum(ErrorCounterArray)
HelpersElementsSum += ErrorCounterArray.size
if ProtagonistElementsSum == 0:
ErrorArray[0, Na - 2, :] = [Na, np.inf, N_helpers]
else:
ErrorArray[0, Na - 2, :] = [Na, ProtagonistErrorSum / ProtagonistElementsSum, N_helpers]
if "_run_0" in folder and plot_protagonist and int(folder[8]) <= 4:
Na = int(folder[8])
N_helpers = int(folder[-7])
print(N_helpers)
ProtagonistErrorSum = 0
ProtagonistElementsSum = 0
for trajectory in os.listdir(os.path.join(input_file, folder)):
# Average all protagonist errors
try:
ErrorReader = csv.DictReader(
open(os.path.join(input_file, folder, trajectory, "DroneUser_runs_data0.csv"), "r", newline=""),
skipinitialspace=True)
except FileNotFoundError:
ErrorReader = csv.DictReader(
open(os.path.join(input_file, folder, trajectory, "DroneUser_runs_data1.csv"), "r", newline=""),
skipinitialspace=True)
ErrorCounterArray = np.array([])
for line in ErrorReader:
if "inf" not in line["ekf_tot"]:
ErrorCounterArray = np.append(ErrorCounterArray, float(line["ekf_tot"]))
ProtagonistErrorSum += sum(ErrorCounterArray)
ProtagonistElementsSum += ErrorCounterArray.size
if ProtagonistElementsSum == 0:
ErrorArray[N_helpers + 1, Na - 2, :] = [Na, np.inf, N_helpers]
else:
ErrorArray[N_helpers+1,Na-2,:] = [Na, ProtagonistErrorSum/ProtagonistElementsSum, N_helpers]
print(ErrorArray)
plt.scatter(ErrorArray[0,0:3,0], ErrorArray[0,0:3,1], label="HelperFlight")
for i in range(len(ErrorArray[1:,:,0])):
plt.scatter(ErrorArray[i+1,0:3,0], ErrorArray[i+1,0:3,1], label="Nh: "+ str(i))
plt.xlabel("N_anchors")
plt.ylabel("Error")
plt.legend()
plt.show()
"""
print("HI")
for log in os.listdir(os.path.join(input_file, folder)):
if "DroneUser_DronePosLog_SimType_0" in log:
DataPath = os.path.join(input_file, folder, log)
TrajectoryPlotter(DataPath, ax1, ax2, ax3, ax4, fig)
plt.suptitle("Solo flight path: " + str(folder.split("_")[1]))
plt.show()
if plot_helped:
# Plot protagonist with help
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
for log in os.listdir(os.path.join(input_file, folder)):
if "DroneUser_DronePosLog_SimType_1" in log:
DataPath = os.path.join(input_file, folder, log)
TrajectoryPlotter(DataPath, ax1, ax2, ax3, ax4, fig)
plt.suptitle("Helped flight path: " + str(folder.split("_")[1]))
plt.show()
if plot_helpers:
# Plot helper drone trajectories
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
for i in range(N_helpers):
DataPath = os.path.join(input_file, folder, "DronePosLog" + str(i) + ".csv")
TrajectoryPlotter(DataPath, ax1, ax2, ax3, ax4, fig)
plt.suptitle("Helper flight paths: " + str(folder.split("_")[1]))
plt.show()
"""
| [
"matplotlib.pyplot.show",
"numpy.ndarray",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"os.listdir"
] | [((393, 478), 'os.path.join', 'os.path.join', (['"""C:\\\\Users\\\\<NAME>\\\\Documents\\\\GitHub\\\\uwb-simulator\\\\publication"""'], {}), "('C:\\\\Users\\\\<NAME>\\\\Documents\\\\GitHub\\\\uwb-simulator\\\\publication'\n )\n", (405, 478), False, 'import os\n'), ((1658, 1670), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1666, 1670), True, 'import numpy as np\n'), ((1718, 1740), 'numpy.ndarray', 'np.ndarray', (['(10, 7, 3)'], {}), '((10, 7, 3))\n', (1728, 1740), True, 'import numpy as np\n'), ((4310, 4389), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ErrorArray[0, 0:3, 0]', 'ErrorArray[0, 0:3, 1]'], {'label': '"""HelperFlight"""'}), "(ErrorArray[0, 0:3, 0], ErrorArray[0, 0:3, 1], label='HelperFlight')\n", (4321, 4389), True, 'import matplotlib.pyplot as plt\n'), ((4511, 4534), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""N_anchors"""'], {}), "('N_anchors')\n", (4521, 4534), True, 'import matplotlib.pyplot as plt\n'), ((4535, 4554), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (4545, 4554), True, 'import matplotlib.pyplot as plt\n'), ((4555, 4567), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4565, 4567), True, 'import matplotlib.pyplot as plt\n'), ((4569, 4579), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4577, 4579), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1876), 'os.listdir', 'os.listdir', (['input_file'], {}), '(input_file)\n', (1864, 1876), False, 'import os\n'), ((2112, 2144), 'os.path.join', 'os.path.join', (['input_file', 'folder'], {}), '(input_file, folder)\n', (2124, 2144), False, 'import os\n'), ((2362, 2374), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2370, 2374), True, 'import numpy as np\n'), ((3154, 3186), 'os.path.join', 'os.path.join', (['input_file', 'folder'], {}), '(input_file, folder)\n', (3166, 3186), False, 'import os\n'), ((3739, 3751), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3747, 3751), True, 'import numpy as np\n'), ((2210, 2271), 'os.path.join', 'os.path.join', (['input_file', 'folder', 'trajectory', '"""runs_data.csv"""'], {}), "(input_file, folder, trajectory, 'runs_data.csv')\n", (2222, 2271), False, 'import os\n'), ((3323, 3395), 'os.path.join', 'os.path.join', (['input_file', 'folder', 'trajectory', '"""DroneUser_runs_data0.csv"""'], {}), "(input_file, folder, trajectory, 'DroneUser_runs_data0.csv')\n", (3335, 3395), False, 'import os\n'), ((3571, 3643), 'os.path.join', 'os.path.join', (['input_file', 'folder', 'trajectory', '"""DroneUser_runs_data1.csv"""'], {}), "(input_file, folder, trajectory, 'DroneUser_runs_data1.csv')\n", (3583, 3643), False, 'import os\n')] |
import numpy as np
import pytest
import torch
from probflow.distributions import Normal
tod = torch.distributions
def is_close(a, b, tol=1e-3):
return np.abs(a - b) < tol
def test_Normal():
"""Tests Normal distribution"""
# Create the distribution
dist = Normal()
# Check default params
assert dist.loc == 0
assert dist.scale == 1
# Call should return backend obj
assert isinstance(dist(), tod.normal.Normal)
# Test methods
npdf = lambda x, m, s: (
1.0
/ np.sqrt(2 * np.pi * s * s)
* np.exp(-np.power(x - m, 2) / (2 * s * s))
)
assert is_close(dist.prob(0).numpy(), npdf(0, 0, 1))
assert is_close(dist.prob(1).numpy(), npdf(1, 0, 1))
assert is_close(dist.log_prob(0).numpy(), np.log(npdf(0, 0, 1)))
assert is_close(dist.log_prob(1).numpy(), np.log(npdf(1, 0, 1)))
assert dist.mean().numpy() == 0.0
# Test sampling
samples = dist.sample()
assert isinstance(samples, torch.Tensor)
assert samples.ndim == 0
samples = dist.sample(10)
assert isinstance(samples, torch.Tensor)
assert samples.ndim == 1
assert samples.shape[0] == 10
# Should be able to set params
dist = Normal(loc=3, scale=2)
assert dist.loc == 3
assert dist.scale == 2
# But only with Tensor-like objs
with pytest.raises(TypeError):
dist = Normal(loc="lalala", scale="lalala")
with pytest.raises(TypeError):
dist = Normal(loc=0, scale="lalala")
with pytest.raises(TypeError):
dist = Normal(loc="lalala", scale=1)
| [
"numpy.abs",
"numpy.power",
"probflow.distributions.Normal",
"pytest.raises",
"numpy.sqrt"
] | [((278, 286), 'probflow.distributions.Normal', 'Normal', ([], {}), '()\n', (284, 286), False, 'from probflow.distributions import Normal\n'), ((1208, 1230), 'probflow.distributions.Normal', 'Normal', ([], {'loc': '(3)', 'scale': '(2)'}), '(loc=3, scale=2)\n', (1214, 1230), False, 'from probflow.distributions import Normal\n'), ((159, 172), 'numpy.abs', 'np.abs', (['(a - b)'], {}), '(a - b)\n', (165, 172), True, 'import numpy as np\n'), ((1330, 1354), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1343, 1354), False, 'import pytest\n'), ((1371, 1407), 'probflow.distributions.Normal', 'Normal', ([], {'loc': '"""lalala"""', 'scale': '"""lalala"""'}), "(loc='lalala', scale='lalala')\n", (1377, 1407), False, 'from probflow.distributions import Normal\n'), ((1417, 1441), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1430, 1441), False, 'import pytest\n'), ((1458, 1487), 'probflow.distributions.Normal', 'Normal', ([], {'loc': '(0)', 'scale': '"""lalala"""'}), "(loc=0, scale='lalala')\n", (1464, 1487), False, 'from probflow.distributions import Normal\n'), ((1497, 1521), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1510, 1521), False, 'import pytest\n'), ((1538, 1567), 'probflow.distributions.Normal', 'Normal', ([], {'loc': '"""lalala"""', 'scale': '(1)'}), "(loc='lalala', scale=1)\n", (1544, 1567), False, 'from probflow.distributions import Normal\n'), ((525, 551), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * s * s)'], {}), '(2 * np.pi * s * s)\n', (532, 551), True, 'import numpy as np\n'), ((570, 588), 'numpy.power', 'np.power', (['(x - m)', '(2)'], {}), '(x - m, 2)\n', (578, 588), True, 'import numpy as np\n')] |
## Have Adam double check the conversion from bolometric to apparent magnitude
## ellc.lc is in arbitrary flux units... am I using this correctly?
import math
import scipy.special as ss
import scipy.stats
from scipy.interpolate import interp1d
import multiprocessing
import logging
import numpy as np
import os
import time
import pandas as pd
import csv
import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
#OpSim (in OpSimRun1) - http://ops2.lsst.org/docs/current/architecture.html
import sqlite3
import astropy.stats as astroStats
from astropy import units, constants
from astropy.coordinates import SkyCoord, ICRS
#3rd party codes
import ellc
import gatspy
from gatspy import datasets, periodic
from gatspy.periodic import LombScargleMultiband, LombScargle, LombScargleFast, LombScargleMultibandFast
#only using a small portion of vespa, to get the A_V value, but NOTE vespa also can access TRILEGAL galaxy model...
import vespa
#extinction will allow me to convert A_V to any wavelength. Not sure which reference is best. I will use ccm89, for now.
#import extinction
#could use this instead, seems to be linked more closely to astropy : https://dust-extinction.readthedocs.io/en/latest/index.html
#pip install git+https://github.com/karllark/dust_extinction.git
from dust_extinction.parameter_averages import F99
filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_']
class EclipsingBinary(object):
def __init__(self, *args,**kwargs):
self.MbolSun = 4.73 #as "recommended" for Flower's bolometric correction in http://iopscience.iop.org/article/10.1088/0004-6256/140/5/1158/pdf
#these is defined by the user
self.m1 = None #*units.solMass
self.m2 = None #*units.solMass
self.r1 = None #*units.solRad
self.r2 = None #*units.solRad
self.L1 = None #*units.solLum
self.L2 = None #*units.solLum
self.period = None #*units.day
self.eccentricity = None
self.omega = None
self.inclination = None
self.t_zero = None
self.dist = None #*units.kpc
self.xGx = None #*units.parsec
self.yGx = None #*units.parsec
self.zGx = None #*units.parsec
self.lineNum = 0
self.verbose = False
self.RV = 3.1
#from https://www.lsst.org/scientists/keynumbers
#in nm
self.wavelength = {
'u_': (324. + 395.)/2.,
'g_': (405. + 552.)/2.,
'r_': (552. + 691.)/2.,
'i_': (691. + 818.)/2.,
'z_': (818. + 921.)/2.,
'y_': (922. + 997. )/2.
}
#these will be calculated after calling self.initialize()
self.RL1 = None
self.RL2 = None
self.T1 = None
self.T2 = None
self.T12 = None
self.L1 = None
self.L2 = None
self.g1 = None
self.g2 = None
self.a = None
self.q = None
self.f_c = None
self.f_s = None
self.R_1 = None
self.R_2 = None
self.sbratio = None
self.RA = None
self.Dec = None
self.Mbol = None
self.AV = None
self.appMagMean = dict()
self.L1f = dict()
self.L2f = dict()
self.appMagMeanAll = None
self.absMagMean = dict()
self.Ared = dict()
self.BC = dict()
#for light curves
self.SED = None
self.filters = filters
self.M_H = 0
self.ld_1 = 'claret'
self.ld_2 = 'claret'
self.grid_1 = 'default'
self.grid_2 = 'default'
self.shape_1 = 'sphere'
self.shape_2 = 'sphere'
self.sigma_sys = 0.005 #systematic photometric error
self.obsDates = dict()
self.appMag = dict()
self.appMagObs = dict()
self.appMagObsErr = dict()
self.deltaMag = dict()
self.maxDeltaMag = 0.
self.doOpSim = True
self.fieldCursor = None
self.summaryCursor = None
self.observable = True
self.appmag_failed = 0
self.incl_failed = 0
self.period_failed = 0
self.radius_failed = 0
self.OpSimID = None
self.years = 10.
self.totaltime = 365.* self.years
self.cadence = 3.
self.Nfilters = 6.
self.nobs = 0
#this is for the magnitude uncertainties
self.sigmaDict = {
'u_': {
'gamma' : 0.037,
'seeing': 0.77,
'm_sky' : 22.9,
'C_m' : 22.92,
'k_m' : 0.451},
'g_': {
'gamma' : 0.038,
'seeing': 0.73,
'm_sky' : 22.3,
'C_m' : 24.29,
'k_m' :0.163},
'r_': {
'gamma' : 0.039,
'seeing': 0.70,
'm_sky' : 21.2,
'C_m' : 24.33,
'k_m' : 0.087},
'i_': {
'gamma' : 0.039,
'seeing': 0.67,
'm_sky' : 20.5,
'C_m' : 24.20,
'k_m' : 0.065},
'z_': {
'gamma' : 0.040,
'seeing': 0.65,
'm_sky' : 19.6,
'C_m' : 24.07,
'k_m' : 0.043},
'y_': {
#from Ivezic et al 2008 - https://arxiv.org/pdf/0805.2366.pdf - Table 2 (p26)
'gamma' : 0.0039,
'seeing': 0.65, #not sure where this is from - not in Ivezic; still the z value
'm_sky' : 18.61,
'C_m' : 23.73,
'k_m' : 0.170}
}
#set within the "driver" code, for gatspy
self.LSS = dict()
self.LSSmodel = dict()
self.LSM = -999.
self.LSMmodel = None
self.seed = None
def getFlowerBCV(self, Teff):
#from http://iopscience.iop.org/article/10.1088/0004-6256/140/5/1158/pdf
#which updates/corrects from <NAME>. 1996, ApJ, 469, 355
lt = np.log10(Teff)
a = 0.
b = 0.
c = 0.
d = 0.
e = 0.
f = 0.
if (lt < 3.7):
a = -0.190537291496456*10.**5.
b = 0.155144866764412*10.**5.
c = -0.421278819301717*10.**4.
d = 0.381476328422343*10.**3.
if (lt >= 3.7 and lt < 3.9):
a = -0.370510203809015*10.**5.
b = 0.385672629965804*10.**5.
c = -0.150651486316025*10.**5.
d = 0.261724637119416*10.**4.
e = -0.170623810323864*10.**3.
if (lt >= 3.9):
a = -0.118115450538963*10.**6.
b = 0.137145973583929*10.**6.
c = -0.636233812100225*10.**5.
d = 0.147412923562646*10.**5.
e = -0.170587278406872*10.**4.
f = 0.788731721804990*10.**2.
BCV = a + b*lt + c*lt**2. + d*lt**3. + e*lt**4 + f*lt**5.
return BCV
#Some approximate function for deriving stellar parameters
def getRad(self, m):
#(not needed with Katie's model, but included here in case needed later)
#use stellar mass to get stellar radius (not necessary, read out by Katie's work)
if (m > 1): #*units.solMass
eta = 0.57
else:
eta = 0.8
return (m)**eta #* units.solRad (units.solMass)
def getTeff(self, L, R):
#use stellar radius and stellar luminosity to get the star's effective temperature
logTeff = 3.762 + 0.25*np.log10(L) - 0.5*np.log10(R)
return 10.**logTeff
def getlogg(self,m, L, T):
#use stellar mass, luminosity, and effective temperature to get log(gravity)
return np.log10(m) + 4.*np.log10(T) - np.log10(L) - 10.6071
def getLum(self, m):
#(not needed with Katie's model, but included here in case needed later)
#use stellar mass to return stellar luminosity (not necessary, read out by Katie's work)
if (m<0.43):
cons = 0.23
coeff = 2.3
if m>0.43 and m<2.0:
cons = 1
coeff = 4
if m>2.0 and m<20.0:
cons = 1.5
coeff = 3.5
else:
cons= 3200
coeff = 1
return cons*(m**coeff)
def getafromP(self, m1, m2, P):
#returns the semimajor axis from the period and stellar masses
return (((P**2.) * constants.G * (m1 + m2) / (4*np.pi**2.))**(1./3.)).decompose().to(units.AU)
def Eggleton_RL(self, q,a):
#Eggleton (1983) <NAME>
#assuming synchronous rotation
#but taking the separation at pericenter
return a*0.49*q**(2./3.)/(0.6*q**(2./3.) + np.log(1. + q**(1./3.)))
def setLightCurve(self, filt, t_vis=30., X=1.):
def getSig2Rand(filt, magnitude):
#returns 2 sigma random error based on the pass band (y-values may be wonky - need to check for seeing and
# against others)
#X = 1. #function of distance??
#t_vis = 30. #seconds
m_5 = self.sigmaDict[filt]['C_m'] + (0.50*(self.sigmaDict[filt]['m_sky'] - 21.)) + (2.50*np.log10(0.7/self.sigmaDict[filt]['seeing'])) + (1.25*np.log10(t_vis/30.)) - (self.sigmaDict[filt]['k_m']*(X-1.))
return (0.04 - self.sigmaDict[filt]['gamma'])*(10**(0.4*(magnitude - m_5))) + self.sigmaDict[filt]['gamma']*((10**(0.4*(magnitude - m_5)))**2)*(magnitude**2)
self.appMagObs[filt] = [None]
self.deltaMag[filt] = [None]
#in case the user did not initialize
if (self.T1 == None):
self.initialize()
#limb darkenning
##########################
#Can we find limb darkening coefficients for y band?? (Then we wouldn't need this trick)
##########################
filtellc = filt
if (filt == 'y_'):
filtellc = 'z_' #because we don't have limb darkening for y_
ldy_filt = ellc.ldy.LimbGravityDarkeningCoeffs(filtellc)
a1_1, a2_1, a3_1, a4_1, y = ldy_filt(self.T1, self.g1, self.M_H)
a1_2, a2_2, a3_2, a4_2, y = ldy_filt(self.T2, self.g2, self.M_H)
ldc_1 = [a1_1, a2_1, a3_1, a4_1]
ldc_2 = [a1_2, a2_2, a3_2, a4_2]
#light curve
self.period = 5
self.inclination = 90
self.R_1 = 0.05
self.R_2 = 0.05
self.sbratio = 1.
self.q = 1.
print(self.t_zero, self.period, self.a, self.q,
self.R_1, self.R_2, self.inclination, self.sbratio)
#This is in arbitrary units... H ow do we get this into real units??
lc = ellc.lc(self.obsDates[filt], ldc_1=ldc_1, ldc_2=ldc_2,
t_zero=self.t_zero, period=self.period, a=self.a, q=self.q,
f_c=self.f_c, f_s=self.f_s, ld_1=self.ld_1, ld_2=self.ld_2,
radius_1=self.R_1, radius_2=self.R_2, incl=self.inclination, sbratio=self.sbratio,
shape_1=self.shape_1, shape_2=self.shape_2, grid_1=self.grid_1,grid_2=self.grid_2)
lc = lc/np.max(lc)
if (min(lc) > 0):
#this is mathematically the same as below
# #let's redefine these here, but with the lc accounted for
# absMag = self.MbolSun - 2.5*np.log10( (self.L1f[filt] + self.L2f[filt])*lc) #This may not be strictly correct? Should I be using the Sun's magnitude in the given filter? But maybe this is OK because, L1f and L2f are in units of LSun, which is related to the bolometric luminosity?
# self.appMag[filt] = absMag + 5.*np.log10(self.dist*100.) + self.Ared[filt] #multiplying by 1000 to get to parsec units
magn = -2.5*np.log10(lc)
self.appMag[filt] = self.appMagMean[filt] + magn
# plt.plot((self.obsDates[filt] % self.period), lc,'.')
# plt.ylim(min(lc), max(lc))
# plt.show()
# plt.plot((self.obsDates[filt] % self.period), self.appMag[filt],'.', color='red')
# plt.plot((self.obsDates[filt] % self.period), self.appMagMean[filt] - 2.5*np.log10(lc), '.', color='blue')
# plt.ylim(max(self.appMag[filt]), min(self.appMag[filt]))
# plt.show()
# print( (self.appMagMean[filt] - 2.5*np.log10(lc)) - self.appMag[filt])
# raise
#Ivezic 2008, https://arxiv.org/pdf/0805.2366.pdf , Table 2
sigma2_rand = getSig2Rand(filt, self.appMag[filt]) #random photometric error
self.appMagObsErr[filt] = ((self.sigma_sys**2.) + (sigma2_rand))**(1./2.)
#now add the uncertainty onto the magnitude
self.appMagObs[filt] = np.array([np.random.normal(loc=x, scale=sig) for (x,sig) in zip(self.appMag[filt], self.appMagObsErr[filt])])
self.deltaMag[filt] = abs(min(self.appMagObs[filt]) - max(self.appMagObs[filt]))
#For OpSim database
def getFieldID(self, myRA, myDEC, deglim = 3.5/2.):
#uses RA/Dec (from galactic coordinates) to return locatiom's fieldID according to OpSim
#field-of-view == 3.5-degree diameter (also returned with fieldFov key)
RA = self.fieldCursor[:,1].astype(float)
Dec = self.fieldCursor[:,2].astype(float)
dbCoord = SkyCoord(ra = RA*units.degree, dec = Dec*units.degree, frame='icrs')
inCoord = SkyCoord(ra = myRA*units.degree, dec = myDEC*units.degree, frame='icrs')
imin, sep2d, dist3d = inCoord.match_to_catalog_sky(dbCoord)
dbID = (self.fieldCursor[imin,0]).astype('int')
mask = np.where(sep2d.to(units.degree).value > deglim)
#this check apparently isn't necessary because it looks like the entire sky is covered with fieldIDs, but I suppose some of these fieldIDs don't have any observation dates (in the northern hemisphere)
if (len(mask[0]) > 0):
print(mask[0])
print("WARNING: coordinate outside LSST FOV", myRA[mask], myDec[mask])
dbID[mask] = -999
if (self.verbose):
print("have Field ID", dbID)
return dbID
def getOpSimDates(self, filtin):
#matches FieldID to existing OpSim database ID and matches observation filters to get dates (in seconds since the start of the
# survey)
FieldID = self.summaryCursor[:,0].astype('int')
date = self.summaryCursor[:,1].astype('float')
filt = self.summaryCursor[:,2]
posIDFilt = np.where(np.logical_and(FieldID == self.OpSimID, filt == filtin[:-1]))
if (self.verbose):
print("posIDFilt = ", posIDFilt, filtin)
OpSimdates = posIDFilt[0]
if (len(OpSimdates) < 1):
return [None]
else:
if (self.verbose):
print('OpSimdates =', OpSimdates)
dates = np.array([float(d) for d in date[OpSimdates] ])/86400. #converting seconds to days
return dates
def checkIfObservable(self):
if (self.appMagMeanAll <= 11. or self.appMagMeanAll >= 24.):
self.appmag_failed = 1
min_incl = 90. - np.arctan2(self.r1 + self.r2, 2.*self.a)*180./np.pi
if (self.inclination <= min_incl):
self.incl_failed = 1
if (self.period >= self.totaltime):
self.period_failed = 1
if (self.R_1 <= 0 or self.R_1 >=1 or self.R_2 <=0 or self.R_2 >= 1 or self.R_1e >=1 or self.R_2e >=1):
self.radius_failed = 1
if (self.radius_failed or self.period_failed or self.incl_failed or self.appmag_failed):
self.observable = False
def initializeSeed(self):
if (self.seed == None):
np.random.seed()
else:
np.random.seed(seed = self.seed)
def initialize(self):
#should I initialize the seed here?
#No I am initializing it in LSSTEBworker.py
#self.initializeSeed()
self.q = self.m2/self.m1
self.T1 = min(50000., max(3500., self.getTeff(self.L1, self.r1)))
self.T2 = min(50000., max(3500., self.getTeff(self.L2, self.r2)))
self.g1 = min(5., max(0., self.getlogg(self.m1, self.L1, self.T1)))
self.g2 = min(5., max(0., self.getlogg(self.m2, self.L2, self.T2)))
self.a = self.getafromP(self.m1*units.solMass, self.m2*units.solMass, self.period*units.day).to(units.solRad).value
self.f_c = np.sqrt(self.eccentricity)*np.cos(self.omega*np.pi/180.)
self.f_s = np.sqrt(self.eccentricity)*np.sin(self.omega*np.pi/180.)
self.R_1 = (self.r1/self.a)
self.R_2 = (self.r2/self.a)
self.sbratio = self.L2/self.L1
self.R_1e = self.r1/self.Eggleton_RL(self.m1/self.m2, self.a * (1. - self.eccentricity))
self.R_2e = self.r2/self.Eggleton_RL(self.m2/self.m1, self.a * (1. - self.eccentricity))
#estimate a combined Teff value, as I do in the N-body codes (but where does this comes from?)
logLb = np.log10(self.L1 + self.L2)
logRb = 0.5*np.log10(self.r1**2. + self.r2**2.)
self.T12 = 10.**(3.762 + 0.25*logLb - 0.5*logRb)
#print(self.L1, self.L2, self.T1, self.T2, self.T12)
coord = SkyCoord(x=self.xGx, y=self.yGx, z=self.zGx, unit='pc', representation='cartesian', frame='galactocentric')
self.RA = coord.icrs.ra.to(units.deg).value
self.Dec = coord.icrs.dec.to(units.deg).value
self.Mbol = self.MbolSun - 2.5*np.log10(self.L1 + self.L2)
#account for reddening and the different filter throughput functions (currently related to a blackbody)
self.appMagMeanAll = 0.
#one option for getting the exinction
self.AV = vespa.stars.extinction.get_AV_infinity(self.RA, self.Dec, frame='icrs')
ext = F99(Rv=self.RV)
for f in self.filters:
#print(extinction.fitzpatrick99(np.array([self.wavelength[f]*10.]), self.AV, self.RV, unit='aa')[0] , ext(self.wavelength[f]*units.nm)*self.AV)
#self.Ared[f] = extinction.fitzpatrick99(np.array([self.wavelength[f]*10.]), self.AV, self.RV, unit='aa')[0] #or ccm89
self.Ared[f] = ext(self.wavelength[f]*units.nm)*self.AV
#BCV = self.getFlowerBCV(self.T12)
BCf1 = self.SED.getBCf(self.T1*units.K, f)
BCf2 = self.SED.getBCf(self.T2*units.K, f)
self.L1f[f] = self.L1 * BCf1
self.L2f[f] = self.L2 * BCf2
self.absMagMean[f] = self.MbolSun - 2.5*np.log10(self.L1f[f] + self.L2f[f]) #This may not be strictly correct? Should I be using the Sun's magnitude in the given filter? But maybe this is OK because, L1f and L2f are in units of LSun, which is related to the bolometric luminosity?
self.appMagMean[f] = self.absMagMean[f] + 5.*np.log10(self.dist*100.) + self.Ared[f] #multiplying by 1000 to get to parsec units
self.LSS[f] = -999.
self.appMagMeanAll += self.appMagMean[f]
self.appMagMeanAll /= len(self.filters)
#check if we can observe this (not accounting for the location in galaxy)
self.checkIfObservable()
#if we're using OpSim, then get the field ID
#get the field ID number from OpSim where this binary would be observed
if (self.doOpSim and self.observable):
self.OpSimID = self.getFieldID(self.RA, self.Dec)
def observe(self, filt):
#get the observation dates
if (self.doOpSim):
self.obsDates[filt] = self.getOpSimDates(filt)
else:
nobs = int(round(self.totaltime / (self.cadence * self.Nfilters)))
self.obsDates[filt] = np.sort(self.totaltime * np.random.random(size=nobs))
self.nobs += len(self.obsDates[filt])
#get the light curve, and related information
self.setLightCurve(filt)
###########################################################################################################
###########################################################################################################
###########################################################################################################
###########################################################################################################
#This is copied from Katie's GxRealizationThinDisk.py, but here we've created a Class
#This will draw the binaries from the Galaxy realization
class BreivikGalaxy(object):
def __init__(self, *args,**kwargs):
self.verbose = False
self.GalaxyFile ='../input/dat_ThinDisk_12_0_12_0.h5' #for Katie's model
self.GalaxyFileLogPrefix ='../input/fixedPopLogCm_'
self.n_bin = 100000
self.n_cores = 4
self.popID = '0012'
self.seed = None
def GxSample(self, x, pop, sampleKernel, bw, nEcc, Tobs, output):
def untransform(dat, datSet):
datMin = min(datSet)
datMax = max(datSet)
datUntransformed = dat*(datMax-datMin)
datUnzeroed = datUntransformed+datMin
return datUnzeroed
# CONSTANTS
##############################################################################
G = 6.67384*math.pow(10, -11.0)
c = 2.99792458*math.pow(10, 8.0)
parsec = 3.08567758*math.pow(10, 16)
Rsun = 6.955*math.pow(10, 8)
Msun = 1.9891*math.pow(10,30)
day = 86400.0
rsun_in_au = 215.0954
day_in_year = 365.242
sec_in_day = 86400.0
sec_in_hour = 3600.0
hrs_in_day = 24.0
sec_in_year = 3.15569*10**7.0
Tobs = 3.15569*10**7.0
geo_mass = G/c**2
m_in_AU = 1.496*10**11.0
##### UNITS EXPECTED FOR POPULATION ###################
# mass: Msun, orbital period: days, Tobs: seconds #
#######################################################
# seed the random generator
########################################
np.random.seed()
# solar coordinates in the galaxy: in parsecs from
# (Chaper 8 of Galactic Structure and stellar Pops book) Yoshii (2013)
############################################################################
x_sun = 8000.0
y_sun = 0.0
z_sun = 25
# sample the number of binaries given by nBin
#################################################
power = []
freq = []
n = 0
nWrite = 0
eccBin = 0
# open the file to save the gx data
#################################################
gxFile = 'gxRealization_'+str(x)+'_'+str(self.popID)+'.dat'
binDat = []
nSample = int(self.n_bin/float(self.n_cores))
dataSample = sampleKernel.resample(nSample)
# check to see if the population has BHs or is ecc
###########################################################
m1T = ss.expit(dataSample[0,:])
m2T = ss.expit(dataSample[1,:])
porbT = ss.expit(dataSample[2,:])
eccT = ss.expit(dataSample[3,:])
rad1T = ss.expit(dataSample[4,:])
rad2T = ss.expit(dataSample[5,:])
Lum1T = ss.expit(dataSample[6,:])
Lum2T = ss.expit(dataSample[7,:])
m1 = untransform(m1T, pop['m1'])
m2 = untransform(m2T, pop['m2'])
porb = untransform(porbT, np.log10(pop['porb']))
ecc = eccT
ii=0
for e in ecc:
if e < 0:
ecc[ii] = abs(e)
elif e > 1:
ecc[ii] = 1-(ecc-1)
ii+=1
rad1 = 10**(untransform(rad1T, pop['rad1']))
rad2 = 10**(untransform(rad2T, pop['rad2']))
Lum1 = 10**(untransform(Lum1T, pop['Lum1']))
Lum2 = 10**(untransform(Lum2T, pop['Lum2']))
# COMPUTE THE POSITION AND ORIENTATION OF EACH BINARY
##############################################################################
# First assign the position relative to the galactic center
# Assign the radial position of the binary
norm_r = 1/2.5
a_0_r = np.random.uniform(0, 1.0, len(m1))
r = -2.5*np.log(1-a_0_r)
# Assign the azimuthal position of the star
phi = np.random.uniform(0, 2*np.pi, len(m1))
# Assign the z position of the star with r as a parameter
norm_zr = 0.71023
a_0_zr = np.random.uniform(0, 1.0, len(m1))
z = 1/1.42*np.arctanh(a_0_zr/(0.704)-1)
# convert to cartesian and parsecs
xGX = r*np.cos(phi)*1000.0
yGX = r*np.sin(phi)*1000.0
zGX = z*1000.0
# compute the distance to Earth/LISA/us in kiloparsecs
dist = ((xGX-x_sun)**2+(yGX-y_sun)**2+(zGX-z_sun)**2)**(1/2.0)
dist_kpc = dist/1000.0
inc = np.arccos(2.*np.random.uniform(0,1.0,len(m1)) - 1.)
OMEGA = np.random.uniform(0,2*math.pi,len(m1))
omega = np.random.uniform(0,2*math.pi,len(m1))
binDat = np.vstack((m1, m2, porb, ecc, rad1, rad2, Lum1, Lum2, xGX, yGX, zGX, dist_kpc, inc, OMEGA, omega)).T
radTotAU = (rad1+rad2)/rsun_in_au
radAng = radTotAU/dist
binEclipseIndex, = np.where(radAng>inc*4.8e-6)
np.savetxt(gxFile, binDat, delimiter = ',')
#gxFile.close()
output.put(np.shape(binDat))
def LSSTsim(self):
def paramTransform(dat):
datMin = min(dat)-0.0001
datMax = max(dat)+0.0001
datZeroed = dat-datMin
datTransformed = datZeroed/(datMax-datMin)
return datTransformed
# SET TIME TO TRACK COMPUTATION TIME
##############################################################################
start_time = time.time()
#np.random.seed()
# CONSTANTS
##############################################################################
G = 6.67384* 10.**-11.0
c = 2.99792458* 10.**8.0
parsec = 3.08567758* 10.**16.
Rsun = 6.955* 10.**8.
Msun = 1.9891* 10.**30.
day = 86400.0
rsun_in_au = 215.0954
day_in_year = 365.242
sec_in_day = 86400.0
sec_in_hour = 3600.0
hrs_in_day = 24.0
sec_in_year = 3.15569*10**7.0
Tobs = 3.15569*10**7.0
geo_mass = G/c**2
m_in_AU = 1.496*10**11.0
mTotDisk = 2.15*10**10
##############################################################################
# STELLAR TYPES - KW
#
# 0 - deeply or fully convective low mass MS star
# 1 - Main Sequence star
# 2 - Hertzsprung Gap
# 3 - First Giant Branch
# 4 - Core Helium Burning
# 5 - First Asymptotic Giant Branch
# 6 - Second Asymptotic Giant Branch
# 7 - Main Sequence Naked Helium star
# 8 - Hertzsprung Gap Naked Helium star
# 9 - Giant Branch Naked Helium star
# 10 - Helium White Dwarf
# 11 - Carbon/Oxygen White Dwarf
# 12 - Oxygen/Neon White Dwarf
# 13 - Neutron Star
# 14 - Black Hole
# 15 - Massless Supernova
##############################################################################
# LOAD THE FIXED POPULATION DATA
##############################################################################
############## UNITS ################
# #
# mass: Msun, porb: year, sep: rsun #
# #
#####################################
dts = {'names':('binNum','tBornBin','Time','tBorn1','tBorn2','commonEnv','id1','id2',
'm1','m2','m1Init','m2Init','Lum1','Lum2','rad1','rad2',
'T1','T2','massc1','massc2','radc1','radc2','menv1','menv2',
'renv1','renv2','spin1','spin2','rrol1','rrol2','porb','sep','ecc'),
'formats':('i','f','f','f','f','i','i','i',
'f','f','f','f','f','f','f','f',
'f','f','f','f','f','f','f','f',
'f','f','f','f','f','f','f','f','f')}
FixedPop = pd.read_hdf(self.GalaxyFile, key='bcm')
FixedPopLog = np.loadtxt(self.GalaxyFileLogPrefix+self.popID+'.dat', delimiter = ',')
# COMPUTE THE NUMBER AT PRESENT DAY NORMALIZED BY TOTAL MASS OF THE GX COMPONENT
##############################################################################
mTotFixed = sum(FixedPopLog[:,2])
nPop = int(len(FixedPop)*mTotDisk/mTotFixed)
print('The number of binaries in the Gx for: '+str(self.popID)+' is: '+str(nPop))
# TRANSFORM THE FIXED POP DATA TO HAVE LIMITS [0,1] &
# COMPUTE THE BINWIDTH TO USE FOR THE KDE SAMPLE; SEE KNUTH_BIN_WIDTH IN ASTROPY
##############################################################################
# UNITS:
# MASS [MSUN], ORBITAL PERIOD [LOG10(YEARS)], LUMINOSITIES [LSUN], RADII [RSUN]
#FixedPop['m1'] = FixedPop['mass_1'] #or maybe some more efficient way
###
#print (FixedPop['m1'])
FixedPop['m1'] = FixedPop['mass_1']
#print (FixedPop['m1'])
FixedPop['m2'] = FixedPop['mass_2']
FixedPop['Lum1'] = FixedPop['lumin_1']
FixedPop['Lum2'] = FixedPop['lumin_2']
FixedPop['rad1'] = FixedPop['rad_1']
FixedPop['rad2'] = FixedPop['rad_2']
m1Trans = ss.logit(paramTransform(FixedPop['m1']))
bwM1 = astroStats.scott_bin_width(m1Trans)
m2Trans = ss.logit(paramTransform(FixedPop['m2']))
bwM2 = astroStats.scott_bin_width(m2Trans)
porbTrans = ss.logit(paramTransform(np.log10(FixedPop['porb'])))
bwPorb = astroStats.scott_bin_width(porbTrans)
Lum1Trans = ss.logit(paramTransform(FixedPop['Lum1']))
bwLum1 = astroStats.scott_bin_width(FixedPop['Lum1'])
Lum2Trans = ss.logit(paramTransform(FixedPop['Lum2']))
bwLum2 = astroStats.scott_bin_width(FixedPop['Lum2'])
# The eccentricity is already transformed, but only fit KDE to ecc if ecc!=0.0
eIndex, = np.where(FixedPop['ecc']>1e-2)
if len(eIndex) > 50:
eccTrans = FixedPop['ecc']
for jj in eccTrans.keys():
if eccTrans[jj] > 0.999:
eccTrans[jj] = 0.999
elif eccTrans[jj] < 1e-4:
eccTrans[jj] = 1e-4
eccTrans = ss.logit(eccTrans)
bwEcc = astroStats.scott_bin_width(eccTrans)
else:
bwEcc = 100.0
rad1Trans = ss.logit(paramTransform(FixedPop['rad1']))
bwRad1 = astroStats.scott_bin_width(rad1Trans)
rad2Trans = ss.logit(paramTransform(FixedPop['rad2']))
bwRad2 = astroStats.scott_bin_width(rad2Trans)
#print(bwEcc,bwPorb,bwM1,bwM2,bwLum1,bwLum2,bwRad1,bwRad2)
popBw = min(bwEcc,bwPorb,bwM1,bwM2,bwLum1,bwLum2,bwRad1,bwRad2)
# GENERATE THE DATA LIST DEPENDING ON THE TYPE OF COMPACT BINARY TYPE
##############################################################################
type1Save = 1
if type1Save < 14 and len(eIndex)>50:
print('both bright stars and eccentric')
datList = np.array((m1Trans, m2Trans, porbTrans, eccTrans, rad1Trans, rad2Trans, Lum1Trans, Lum2Trans))
elif type1Save < 14 and len(eIndex)<50:
print('both bright stars and circular')
datList = np.array((m1Trans, m2Trans, porbTrans, rad1Trans, rad2Trans, Lum1Trans, Lum2Trans))
# GENERATE THE KDE FOR THE DATA LIST
##############################################################################
if (self.verbose):
print(popBw)
print(datList)
sampleKernel = scipy.stats.gaussian_kde(datList)#, bw_method=popBw)
# CALL THE MONTE CARLO GALAXY SAMPLE CODE
##############################################################################
print('nSample: '+str(self.n_bin))
output = multiprocessing.Queue()
processes = [multiprocessing.Process(target = self.GxSample, \
args = (x, FixedPop, sampleKernel, popBw, len(eIndex), Tobs, output)) \
for x in range(self.n_cores)]
for p in processes:
p.start()
for p in processes:
p.join()
nSRC = [output.get() for p in processes]
print('The number of sources in pop '+self.popID+' is ', nSRC)
gxDatTot = []
for kk in range(self.n_cores):
if os.path.getsize('gxRealization_'+str(kk)+'_'+str(self.popID)+'.dat') > 0:
gxReal = np.loadtxt('gxRealization_'+str(kk)+'_'+str(self.popID)+'.dat', delimiter = ',')
else:
gxReal = []
if len(gxReal)>0:
gxDatTot.append(gxReal)
os.remove('gxRealization_'+str(kk)+'_'+str(self.popID)+'.dat')
gxDatSave = np.vstack(gxDatTot)
return gxDatSave
###########################################################################################################
###########################################################################################################
###########################################################################################################
###########################################################################################################
class SED(object):
def __init__(self, *args,**kwargs):
self.filterFilesRoot = '../input/filters/'
#['u_band_Response.dat','g_band_Response.dat','r_band_Response.dat','i_band_Response.dat','z_band_Response.dat','y_band_Response.dat']
self.filters = filters
self.filterThroughput = dict()
self.BCf = dict()
def readFilters(self):
for f in self.filters:
# #https://github.com/lsst-pst/syseng_throughputs/tree/master/components/camera/filters
# fname = self.filterFilesRoot + f + 'band_Response.dat'
# df = pd.read_csv(fname, delim_whitespace=True, header=None, names=['w','t'])
#https://github.com/lsst/throughputs/tree/master/baseline
fname = self.filterFilesRoot + 'filter_'+f[0]+'.dat'
df = pd.read_csv(fname, delim_whitespace=True, header=None, names=['w','t'], skiprows = 6)
self.filterThroughput[f] = {'w':df['w'].values*units.nm, 't':df['t'].values}
def blackbody(self, w, T):
#erg/s/cm^2/AA/steradian
Bl = 2.*constants.h*constants.c**2./w**5. / (np.exp( constants.h*constants.c / (w*constants.k_B*T)) -1.)
return Bl.decompose()
def getBCf(self, T, filt, dw = 0.1, wmin =10, wmax = 10000):
#could improve this with a Kurucz model
w = np.arange(wmin, wmax, dw)*units.nm
f = self.blackbody(w,T).value
#norm = np.sum(f)*dw
norm = max(f)
fn = f/norm
ftot = np.sum(fn)
ft = np.interp(w, self.filterThroughput[filt]['w'], self.filterThroughput[filt]['t'])
# plt.plot(w,ft,'--')
tot = np.sum(fn*ft)
return tot/ftot
def setBCf(self, T, dw = 0.1, wmin =10, wmax = 10000):
#could improve this with a Kurucz model
w = np.arange(wmin, wmax, dw)*units.nm
f = self.blackbody(w,T).value
#norm = np.sum(f)*dw
norm = max(f)
fn = f/norm
ftot = np.sum(fn)
for filt in self.filters:
ft = np.interp(w, self.filterThroughput[filt]['w'], self.filterThroughput[filt]['t'])
# plt.plot(w,ft,'--')
tot = np.sum(fn*ft)
self.BCf[f] = tot/ftot
#print(self.BCf)
# plt.plot(w,fn)
# for f in self.filters:
# plt.plot(self.filterThroughput[f]['w'], self.filterThroughput[f]['t'])
# plt.xlim(100, 2000)
# plt.show()
###########################################################################################################
###########################################################################################################
###########################################################################################################
###########################################################################################################
class LSSTEBworker(object):
def __init__(self, *args,**kwargs):
#NOTE: these need to be defined on the command line. The default, if not defined, will be False
self.do_plot = False
self.verbose = False
self.doOpSim = False
self.useFast = True
self.doLSM = True
self.do_parallel = False
self.years = 10.
self.totaltime = 365.* self.years
self.cadence = 3.
self.filters = filters
self.n_bin = 100000
self.n_band = 2
self.n_base = 2
self.n_cores = 1
self.ofile = 'output_file.csv' #output file name
self.dbFile = '../db/minion_1016_sqlite.db' #for the OpSim database
self.filterFilesRoot = '../input/filters/'
self.db = None
self.cursor = None
#dictionaries -- could be handled by the multiprocessing manager, redefined in driver
self.return_dict = dict()
self.csvwriter = None #will hold the csvwriter object
#some counters
self.n_totalrun = 0
self.n_appmag_failed = 0
self.n_incl_failed = 0
self.n_period_failed = 0
self.n_radius_failed = 0
self.seed = None
self.SED = None
#database manipulation
def getCursors(self):
#gets SQlite cursor to pull information from OpSim
self.db = sqlite3.connect(self.dbFile)
cursor = self.db.cursor()
cursor.execute("SELECT fieldid, expDate, filter FROM summary")
self.summaryCursor = np.array(cursor.fetchall()) #NOTE: this takes a LONG time
print("have summary cursor.")
cursor.execute("SELECT fieldid, fieldra, fielddec FROM field")
self.fieldCursor = np.array(cursor.fetchall()) #NOTE: this takes a LONG time
print("have field cursor.")
def make_gatspy_plots(self, j):
EB = self.return_dict[j]
#colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
#print([ matplotlib.colors.to_hex(c) for c in colors])
colors = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd', u'#8c564b', u'#e377c2', u'#7f7f7f', u'#bcbd22', u'#17becf']
f, ax = plt.subplots(len(self.filters)+1, 2)
LSM = EB.LSM
period = EB.period
pds = np.linspace(0.2, 2.*period, 10000)
for ii,filt in enumerate(self.filters):
drng = max(EB.obsDates[filt]) - min(EB.obsDates[filt])
phase_obs = np.array([(tt % period)/period for tt in EB.obsDates[filt]])
scores = EB.LSSmodel[filt].score(pds)
mag_obs = EB.appMagObs[filt]
mag = EB.appMag[filt]
LSS = EB.LSS[filt]
sx = np.argsort(phase_obs)
ax[ii][0].plot(phase_obs[sx], np.array(mag_obs)[sx], 'o', mfc='none', mec = colors[ii])
ax[ii][0].plot(phase_obs[sx], np.array(mag)[sx], color = "black")
ax[ii][0].set_ylim(ax[ii][0].get_ylim()[::-1])
ax[ii][0].set_xlim(0,1)
ax[ii][0].set_ylabel(filt)
ax[ii][0].set_xticklabels([])
ax[ii][1].plot(pds, scores, color = colors[ii])
ax[ii][1].plot([LSS,LSS],[0,1], color = "dimgray", lw = 2)
ax[ii][1].plot([period,period],[0,1],'--', color = "black")
ax[ii][1].set_xlim(0, 2.*period)
ax[ii][1].set_ylim(0, max(scores))
ax[ii][1].set_xticklabels([])
ax[ii][1].set_yticklabels([])
if (self.doLSM):
plt.locator_params(axis='y', nticks=2)
P_multi = EB.LSMmodel.periodogram(pds)
ii = len(self.filters)
ax[ii][1].plot(pds, P_multi, color = colors[ii])
ax[ii][1].set_xlim(0, 2.*period)
ax[ii][1].set_ylim(0, max(P_multi))
ax[ii][1].plot([period,period],[0,1],'--', color = "black")
ax[ii][1].plot([LSM,LSM],[0,1], ':', color = "dimgray")
f.subplots_adjust(hspace=0.1, wspace=0.1)
f.delaxes(ax[ii][0])
ax[ii-1][0].set_xlabel("phase")
ax[ii][1].set_xlabel("period (days)")
ax[ii][1].set_yticklabels([])
f.savefig("lc_gatspy_fig_"+str(seed).rjust(10,'0')+".png", bbox_inches='tight')
def run_ellc_gatspy(self, j):
#this is the general simulation - ellc light curves and gatspy periodograms
EB = self.return_dict[j]
#for the multiband gatspy fit
allObsDates = np.array([])
allAppMagObs = np.array([])
allAppMagObsErr = np.array([])
allObsFilters = np.array([])
if (self.verbose):
print("in run_ellc_gatspy")
for i, filt in enumerate(self.filters):
#observe the EB (get dates, create the light curve for this filter)
EB.observe(filt)
EB.LSS[filt] = -999.
if (EB.obsDates[filt][0] != None and min(EB.appMagObs[filt]) > 0):
#run gatspy for this filter
drng = max(EB.obsDates[filt]) - min(EB.obsDates[filt])
#print("filter, nobs", filt, len(EB.obsDates[filt]))
if (self.useFast and len(EB.obsDates[filt]) > 50):
model = LombScargleFast(fit_period = True)
else:
model = LombScargle(fit_period = True)
model.optimizer.period_range = (0.2, drng)
model.fit(EB.obsDates[filt], EB.appMagObs[filt], EB.appMagObsErr[filt])
EB.LSS[filt] = model.best_period
EB.LSSmodel[filt] = model
EB.maxDeltaMag = max(EB.deltaMag[filt], EB.maxDeltaMag)
#to use for the multiband fit
allObsDates = np.append(allObsDates, EB.obsDates[filt])
allAppMagObs = np.append(allAppMagObs, EB.appMagObs[filt])
allAppMagObsErr = np.append(allAppMagObsErr, EB.appMagObsErr[filt])
allObsFilters = np.append(allObsFilters, np.full(len(EB.obsDates[filt]), filt))
if (self.verbose):
print(j, 'filter = ', filt)
print(j, 'obsDates = ', EB.obsDates[filt][0:10])
print(j, 'appMagObs = ', EB.appMagObs[filt][0:10])
print(j, 'delta_mag = ', EB.deltaMag[filt])
print(j, 'LSS = ',EB.LSS[filt])
if (len(allObsDates) > 0 and self.doLSM):
drng = max(allObsDates) - min(allObsDates)
if (self.useFast and len(allObsDates) > 50*len(self.filters)):
model = LombScargleMultibandFast(fit_period = True)
else:
model = LombScargleMultiband(Nterms_band=self.n_band, Nterms_base=self.n_base, fit_period = True)
model.optimizer.period_range = (0.2, drng)
model.fit(allObsDates, allAppMagObs, allAppMagObsErr, allObsFilters)
EB.LSM = model.best_period
EB.LSMmodel = model
if (self.verbose):
print(j, 'LSM =', EB.LSM)
#not sure if I need to do this...
self.return_dict[j] = EB
def getEB(self, line, i):
EB = EclipsingBinary()
# EB.seed = self.seed + i
EB.initializeSeed()
EB.SED = self.SED
#solar units
EB.m1 = line[0]
EB.m2 = line[1]
EB.r1 = line[4]
EB.r2 = line[5]
EB.L1 = line[6]
EB.L2 = line[7]
EB.period = 10.**line[2] #days
EB.eccentricity = line[3]
EB.inclination = line[12] *180./np.pi #degrees
EB.omega = line[13] #radians
EB.dist = line[11] #kpc
#pc
EB.xGx = line[8]
EB.yGx = line[9]
EB.zGx = line[10]
EB.t_zero = np.random.random() * EB.period
#for observations
EB.doOpSim = self.doOpSim
EB.years = self.years
EB.totaltime = self.totaltime
EB.cadence= self.cadence
EB.Nfilters = len(self.filters)
EB.verbose = self.verbose
if (self.doOpSim):
EB.summaryCursor = self.summaryCursor
EB.fieldCursor = self.fieldCursor
EB.initialize()
#some counters for how many EBs we could potentially observe with LSST
self.n_totalrun += 1
self.n_appmag_failed += EB.appmag_failed
self.n_incl_failed += EB.incl_failed
self.n_period_failed += EB.period_failed
self.n_radius_failed += EB.radius_failed
return EB
def writeOutputLine(self, EB, header = False):
if (header):
self.csvwriter.writerow(['p', 'm1', 'm2', 'r1', 'r2', 'e', 'i', 'RA', 'Dec', 'd', 'nobs','appMagMean', 'maxDeltaMag', 'mag_failure', 'incl_failure', 'period_failure', 'radius_failure', 'u_LSS_PERIOD', 'g_LSS_PERIOD', 'r_LSS_PERIOD', 'i_LSS_PERIOD', 'z_LSS_PERIOD', 'y_LSS_PERIOD','LSM_PERIOD'])
else:
output = [EB.period, EB.m1, EB.m2, EB.r1, EB.r2, EB.eccentricity, EB.inclination, EB.RA, EB.Dec, EB.dist, EB.nobs, EB.appMagMeanAll, EB.maxDeltaMag, EB.appmag_failed, EB.incl_failed, EB.period_failed, EB.radius_failed]
#this is for gatspt
for filt in self.filters:
output.append(EB.LSS[filt])
output.append(EB.LSM)
self.csvwriter.writerow(output)
def initialize(self):
if (self.seed == None):
np.random.seed()
else:
np.random.seed(seed = self.seed)
self.SED = SED()
self.SED.filterFilesRoot = self.filterFilesRoot
self.SED.readFilters()
| [
"numpy.random.seed",
"numpy.sum",
"numpy.arctan2",
"pandas.read_csv",
"numpy.shape",
"numpy.argsort",
"scipy.special.logit",
"numpy.sin",
"numpy.arange",
"numpy.exp",
"multiprocessing.Queue",
"numpy.random.normal",
"numpy.interp",
"ellc.ldy.LimbGravityDarkeningCoeffs",
"numpy.arctanh",
... | [((4958, 4972), 'numpy.log10', 'np.log10', (['Teff'], {}), '(Teff)\n', (4966, 4972), True, 'import numpy as np\n'), ((8274, 8319), 'ellc.ldy.LimbGravityDarkeningCoeffs', 'ellc.ldy.LimbGravityDarkeningCoeffs', (['filtellc'], {}), '(filtellc)\n', (8309, 8319), False, 'import ellc\n'), ((8836, 9193), 'ellc.lc', 'ellc.lc', (['self.obsDates[filt]'], {'ldc_1': 'ldc_1', 'ldc_2': 'ldc_2', 't_zero': 'self.t_zero', 'period': 'self.period', 'a': 'self.a', 'q': 'self.q', 'f_c': 'self.f_c', 'f_s': 'self.f_s', 'ld_1': 'self.ld_1', 'ld_2': 'self.ld_2', 'radius_1': 'self.R_1', 'radius_2': 'self.R_2', 'incl': 'self.inclination', 'sbratio': 'self.sbratio', 'shape_1': 'self.shape_1', 'shape_2': 'self.shape_2', 'grid_1': 'self.grid_1', 'grid_2': 'self.grid_2'}), '(self.obsDates[filt], ldc_1=ldc_1, ldc_2=ldc_2, t_zero=self.t_zero,\n period=self.period, a=self.a, q=self.q, f_c=self.f_c, f_s=self.f_s,\n ld_1=self.ld_1, ld_2=self.ld_2, radius_1=self.R_1, radius_2=self.R_2,\n incl=self.inclination, sbratio=self.sbratio, shape_1=self.shape_1,\n shape_2=self.shape_2, grid_1=self.grid_1, grid_2=self.grid_2)\n', (8843, 9193), False, 'import ellc\n'), ((11143, 11211), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': '(RA * units.degree)', 'dec': '(Dec * units.degree)', 'frame': '"""icrs"""'}), "(ra=RA * units.degree, dec=Dec * units.degree, frame='icrs')\n", (11151, 11211), False, 'from astropy.coordinates import SkyCoord, ICRS\n'), ((11224, 11296), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': '(myRA * units.degree)', 'dec': '(myDEC * units.degree)', 'frame': '"""icrs"""'}), "(ra=myRA * units.degree, dec=myDEC * units.degree, frame='icrs')\n", (11232, 11296), False, 'from astropy.coordinates import SkyCoord, ICRS\n'), ((14374, 14401), 'numpy.log10', 'np.log10', (['(self.L1 + self.L2)'], {}), '(self.L1 + self.L2)\n', (14382, 14401), True, 'import numpy as np\n'), ((14569, 14681), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'x': 'self.xGx', 'y': 'self.yGx', 'z': 'self.zGx', 'unit': '"""pc"""', 'representation': '"""cartesian"""', 'frame': '"""galactocentric"""'}), "(x=self.xGx, y=self.yGx, z=self.zGx, unit='pc', representation=\n 'cartesian', frame='galactocentric')\n", (14577, 14681), False, 'from astropy.coordinates import SkyCoord, ICRS\n'), ((15019, 15090), 'vespa.stars.extinction.get_AV_infinity', 'vespa.stars.extinction.get_AV_infinity', (['self.RA', 'self.Dec'], {'frame': '"""icrs"""'}), "(self.RA, self.Dec, frame='icrs')\n", (15057, 15090), False, 'import vespa\n'), ((15099, 15114), 'dust_extinction.parameter_averages.F99', 'F99', ([], {'Rv': 'self.RV'}), '(Rv=self.RV)\n', (15102, 15114), False, 'from dust_extinction.parameter_averages import F99\n'), ((18816, 18832), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (18830, 18832), True, 'import numpy as np\n'), ((19646, 19672), 'scipy.special.expit', 'ss.expit', (['dataSample[0, :]'], {}), '(dataSample[0, :])\n', (19654, 19672), True, 'import scipy.special as ss\n'), ((19680, 19706), 'scipy.special.expit', 'ss.expit', (['dataSample[1, :]'], {}), '(dataSample[1, :])\n', (19688, 19706), True, 'import scipy.special as ss\n'), ((19716, 19742), 'scipy.special.expit', 'ss.expit', (['dataSample[2, :]'], {}), '(dataSample[2, :])\n', (19724, 19742), True, 'import scipy.special as ss\n'), ((19751, 19777), 'scipy.special.expit', 'ss.expit', (['dataSample[3, :]'], {}), '(dataSample[3, :])\n', (19759, 19777), True, 'import scipy.special as ss\n'), ((19787, 19813), 'scipy.special.expit', 'ss.expit', (['dataSample[4, :]'], {}), '(dataSample[4, :])\n', (19795, 19813), True, 'import scipy.special as ss\n'), ((19823, 19849), 'scipy.special.expit', 'ss.expit', (['dataSample[5, :]'], {}), '(dataSample[5, :])\n', (19831, 19849), True, 'import scipy.special as ss\n'), ((19859, 19885), 'scipy.special.expit', 'ss.expit', (['dataSample[6, :]'], {}), '(dataSample[6, :])\n', (19867, 19885), True, 'import scipy.special as ss\n'), ((19895, 19921), 'scipy.special.expit', 'ss.expit', (['dataSample[7, :]'], {}), '(dataSample[7, :])\n', (19903, 19921), True, 'import scipy.special as ss\n'), ((21578, 21610), 'numpy.where', 'np.where', (['(radAng > inc * 4.8e-06)'], {}), '(radAng > inc * 4.8e-06)\n', (21586, 21610), True, 'import numpy as np\n'), ((21611, 21652), 'numpy.savetxt', 'np.savetxt', (['gxFile', 'binDat'], {'delimiter': '""","""'}), "(gxFile, binDat, delimiter=',')\n", (21621, 21652), True, 'import numpy as np\n'), ((22056, 22067), 'time.time', 'time.time', ([], {}), '()\n', (22065, 22067), False, 'import time\n'), ((24112, 24151), 'pandas.read_hdf', 'pd.read_hdf', (['self.GalaxyFile'], {'key': '"""bcm"""'}), "(self.GalaxyFile, key='bcm')\n", (24123, 24151), True, 'import pandas as pd\n'), ((24168, 24241), 'numpy.loadtxt', 'np.loadtxt', (["(self.GalaxyFileLogPrefix + self.popID + '.dat')"], {'delimiter': '""","""'}), "(self.GalaxyFileLogPrefix + self.popID + '.dat', delimiter=',')\n", (24178, 24241), True, 'import numpy as np\n'), ((25326, 25361), 'astropy.stats.scott_bin_width', 'astroStats.scott_bin_width', (['m1Trans'], {}), '(m1Trans)\n', (25352, 25361), True, 'import astropy.stats as astroStats\n'), ((25429, 25464), 'astropy.stats.scott_bin_width', 'astroStats.scott_bin_width', (['m2Trans'], {}), '(m2Trans)\n', (25455, 25464), True, 'import astropy.stats as astroStats\n'), ((25548, 25585), 'astropy.stats.scott_bin_width', 'astroStats.scott_bin_width', (['porbTrans'], {}), '(porbTrans)\n', (25574, 25585), True, 'import astropy.stats as astroStats\n'), ((25659, 25703), 'astropy.stats.scott_bin_width', 'astroStats.scott_bin_width', (["FixedPop['Lum1']"], {}), "(FixedPop['Lum1'])\n", (25685, 25703), True, 'import astropy.stats as astroStats\n'), ((25773, 25817), 'astropy.stats.scott_bin_width', 'astroStats.scott_bin_width', (["FixedPop['Lum2']"], {}), "(FixedPop['Lum2'])\n", (25799, 25817), True, 'import astropy.stats as astroStats\n'), ((25917, 25949), 'numpy.where', 'np.where', (["(FixedPop['ecc'] > 0.01)"], {}), "(FixedPop['ecc'] > 0.01)\n", (25925, 25949), True, 'import numpy as np\n'), ((26317, 26354), 'astropy.stats.scott_bin_width', 'astroStats.scott_bin_width', (['rad1Trans'], {}), '(rad1Trans)\n', (26343, 26354), True, 'import astropy.stats as astroStats\n'), ((26424, 26461), 'astropy.stats.scott_bin_width', 'astroStats.scott_bin_width', (['rad2Trans'], {}), '(rad2Trans)\n', (26450, 26461), True, 'import astropy.stats as astroStats\n'), ((27573, 27596), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (27594, 27596), False, 'import multiprocessing\n'), ((28338, 28357), 'numpy.vstack', 'np.vstack', (['gxDatTot'], {}), '(gxDatTot)\n', (28347, 28357), True, 'import numpy as np\n'), ((30136, 30146), 'numpy.sum', 'np.sum', (['fn'], {}), '(fn)\n', (30142, 30146), True, 'import numpy as np\n'), ((30155, 30240), 'numpy.interp', 'np.interp', (['w', "self.filterThroughput[filt]['w']", "self.filterThroughput[filt]['t']"], {}), "(w, self.filterThroughput[filt]['w'], self.filterThroughput[filt]['t']\n )\n", (30164, 30240), True, 'import numpy as np\n'), ((30268, 30283), 'numpy.sum', 'np.sum', (['(fn * ft)'], {}), '(fn * ft)\n', (30274, 30283), True, 'import numpy as np\n'), ((30537, 30547), 'numpy.sum', 'np.sum', (['fn'], {}), '(fn)\n', (30543, 30547), True, 'import numpy as np\n'), ((32523, 32551), 'sqlite3.connect', 'sqlite3.connect', (['self.dbFile'], {}), '(self.dbFile)\n', (32538, 32551), False, 'import sqlite3\n'), ((33354, 33391), 'numpy.linspace', 'np.linspace', (['(0.2)', '(2.0 * period)', '(10000)'], {}), '(0.2, 2.0 * period, 10000)\n', (33365, 33391), True, 'import numpy as np\n'), ((35158, 35170), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (35166, 35170), True, 'import numpy as np\n'), ((35188, 35200), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (35196, 35200), True, 'import numpy as np\n'), ((35221, 35233), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (35229, 35233), True, 'import numpy as np\n'), ((35252, 35264), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (35260, 35264), True, 'import numpy as np\n'), ((9203, 9213), 'numpy.max', 'np.max', (['lc'], {}), '(lc)\n', (9209, 9213), True, 'import numpy as np\n'), ((12213, 12273), 'numpy.logical_and', 'np.logical_and', (['(FieldID == self.OpSimID)', '(filt == filtin[:-1])'], {}), '(FieldID == self.OpSimID, filt == filtin[:-1])\n', (12227, 12273), True, 'import numpy as np\n'), ((13232, 13248), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (13246, 13248), True, 'import numpy as np\n'), ((13260, 13290), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'self.seed'}), '(seed=self.seed)\n', (13274, 13290), True, 'import numpy as np\n'), ((13864, 13890), 'numpy.sqrt', 'np.sqrt', (['self.eccentricity'], {}), '(self.eccentricity)\n', (13871, 13890), True, 'import numpy as np\n'), ((13891, 13925), 'numpy.cos', 'np.cos', (['(self.omega * np.pi / 180.0)'], {}), '(self.omega * np.pi / 180.0)\n', (13897, 13925), True, 'import numpy as np\n'), ((13934, 13960), 'numpy.sqrt', 'np.sqrt', (['self.eccentricity'], {}), '(self.eccentricity)\n', (13941, 13960), True, 'import numpy as np\n'), ((13961, 13995), 'numpy.sin', 'np.sin', (['(self.omega * np.pi / 180.0)'], {}), '(self.omega * np.pi / 180.0)\n', (13967, 13995), True, 'import numpy as np\n'), ((14416, 14457), 'numpy.log10', 'np.log10', (['(self.r1 ** 2.0 + self.r2 ** 2.0)'], {}), '(self.r1 ** 2.0 + self.r2 ** 2.0)\n', (14424, 14457), True, 'import numpy as np\n'), ((18169, 18188), 'math.pow', 'math.pow', (['(10)', '(-11.0)'], {}), '(10, -11.0)\n', (18177, 18188), False, 'import math\n'), ((18206, 18223), 'math.pow', 'math.pow', (['(10)', '(8.0)'], {}), '(10, 8.0)\n', (18214, 18223), False, 'import math\n'), ((18246, 18262), 'math.pow', 'math.pow', (['(10)', '(16)'], {}), '(10, 16)\n', (18254, 18262), False, 'import math\n'), ((18278, 18293), 'math.pow', 'math.pow', (['(10)', '(8)'], {}), '(10, 8)\n', (18286, 18293), False, 'import math\n'), ((18310, 18326), 'math.pow', 'math.pow', (['(10)', '(30)'], {}), '(10, 30)\n', (18318, 18326), False, 'import math\n'), ((20023, 20044), 'numpy.log10', 'np.log10', (["pop['porb']"], {}), "(pop['porb'])\n", (20031, 20044), True, 'import numpy as np\n'), ((20685, 20702), 'numpy.log', 'np.log', (['(1 - a_0_r)'], {}), '(1 - a_0_r)\n', (20691, 20702), True, 'import numpy as np\n'), ((20933, 20963), 'numpy.arctanh', 'np.arctanh', (['(a_0_zr / 0.704 - 1)'], {}), '(a_0_zr / 0.704 - 1)\n', (20943, 20963), True, 'import numpy as np\n'), ((21395, 21497), 'numpy.vstack', 'np.vstack', (['(m1, m2, porb, ecc, rad1, rad2, Lum1, Lum2, xGX, yGX, zGX, dist_kpc, inc,\n OMEGA, omega)'], {}), '((m1, m2, porb, ecc, rad1, rad2, Lum1, Lum2, xGX, yGX, zGX,\n dist_kpc, inc, OMEGA, omega))\n', (21404, 21497), True, 'import numpy as np\n'), ((21697, 21713), 'numpy.shape', 'np.shape', (['binDat'], {}), '(binDat)\n', (21705, 21713), True, 'import numpy as np\n'), ((26156, 26174), 'scipy.special.logit', 'ss.logit', (['eccTrans'], {}), '(eccTrans)\n', (26164, 26174), True, 'import scipy.special as ss\n'), ((26186, 26222), 'astropy.stats.scott_bin_width', 'astroStats.scott_bin_width', (['eccTrans'], {}), '(eccTrans)\n', (26212, 26222), True, 'import astropy.stats as astroStats\n'), ((26860, 26957), 'numpy.array', 'np.array', (['(m1Trans, m2Trans, porbTrans, eccTrans, rad1Trans, rad2Trans, Lum1Trans,\n Lum2Trans)'], {}), '((m1Trans, m2Trans, porbTrans, eccTrans, rad1Trans, rad2Trans,\n Lum1Trans, Lum2Trans))\n', (26868, 26957), True, 'import numpy as np\n'), ((29541, 29629), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'delim_whitespace': '(True)', 'header': 'None', 'names': "['w', 't']", 'skiprows': '(6)'}), "(fname, delim_whitespace=True, header=None, names=['w', 't'],\n skiprows=6)\n", (29552, 29629), True, 'import pandas as pd\n'), ((30007, 30032), 'numpy.arange', 'np.arange', (['wmin', 'wmax', 'dw'], {}), '(wmin, wmax, dw)\n', (30016, 30032), True, 'import numpy as np\n'), ((30408, 30433), 'numpy.arange', 'np.arange', (['wmin', 'wmax', 'dw'], {}), '(wmin, wmax, dw)\n', (30417, 30433), True, 'import numpy as np\n'), ((30585, 30670), 'numpy.interp', 'np.interp', (['w', "self.filterThroughput[filt]['w']", "self.filterThroughput[filt]['t']"], {}), "(w, self.filterThroughput[filt]['w'], self.filterThroughput[filt]['t']\n )\n", (30594, 30670), True, 'import numpy as np\n'), ((30700, 30715), 'numpy.sum', 'np.sum', (['(fn * ft)'], {}), '(fn * ft)\n', (30706, 30715), True, 'import numpy as np\n'), ((33506, 33568), 'numpy.array', 'np.array', (['[(tt % period / period) for tt in EB.obsDates[filt]]'], {}), '([(tt % period / period) for tt in EB.obsDates[filt]])\n', (33514, 33568), True, 'import numpy as np\n'), ((33696, 33717), 'numpy.argsort', 'np.argsort', (['phase_obs'], {}), '(phase_obs)\n', (33706, 33717), True, 'import numpy as np\n'), ((34358, 34396), 'matplotlib.pyplot.locator_params', 'plt.locator_params', ([], {'axis': '"""y"""', 'nticks': '(2)'}), "(axis='y', nticks=2)\n", (34376, 34396), True, 'import matplotlib.pyplot as plt\n'), ((37789, 37807), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (37805, 37807), True, 'import numpy as np\n'), ((39209, 39225), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (39223, 39225), True, 'import numpy as np\n'), ((39237, 39267), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'self.seed'}), '(seed=self.seed)\n', (39251, 39267), True, 'import numpy as np\n'), ((6192, 6203), 'numpy.log10', 'np.log10', (['R'], {}), '(R)\n', (6200, 6203), True, 'import numpy as np\n'), ((6376, 6387), 'numpy.log10', 'np.log10', (['L'], {}), '(L)\n', (6384, 6387), True, 'import numpy as np\n'), ((7167, 7197), 'numpy.log', 'np.log', (['(1.0 + q ** (1.0 / 3.0))'], {}), '(1.0 + q ** (1.0 / 3.0))\n', (7173, 7197), True, 'import numpy as np\n'), ((9771, 9783), 'numpy.log10', 'np.log10', (['lc'], {}), '(lc)\n', (9779, 9783), True, 'import numpy as np\n'), ((14805, 14832), 'numpy.log10', 'np.log10', (['(self.L1 + self.L2)'], {}), '(self.L1 + self.L2)\n', (14813, 14832), True, 'import numpy as np\n'), ((21009, 21020), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (21015, 21020), True, 'import numpy as np\n'), ((21038, 21049), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (21044, 21049), True, 'import numpy as np\n'), ((25508, 25534), 'numpy.log10', 'np.log10', (["FixedPop['porb']"], {}), "(FixedPop['porb'])\n", (25516, 25534), True, 'import numpy as np\n'), ((27052, 27139), 'numpy.array', 'np.array', (['(m1Trans, m2Trans, porbTrans, rad1Trans, rad2Trans, Lum1Trans, Lum2Trans)'], {}), '((m1Trans, m2Trans, porbTrans, rad1Trans, rad2Trans, Lum1Trans,\n Lum2Trans))\n', (27060, 27139), True, 'import numpy as np\n'), ((29810, 29869), 'numpy.exp', 'np.exp', (['(constants.h * constants.c / (w * constants.k_B * T))'], {}), '(constants.h * constants.c / (w * constants.k_B * T))\n', (29816, 29869), True, 'import numpy as np\n'), ((36158, 36199), 'numpy.append', 'np.append', (['allObsDates', 'EB.obsDates[filt]'], {}), '(allObsDates, EB.obsDates[filt])\n', (36167, 36199), True, 'import numpy as np\n'), ((36219, 36262), 'numpy.append', 'np.append', (['allAppMagObs', 'EB.appMagObs[filt]'], {}), '(allAppMagObs, EB.appMagObs[filt])\n', (36228, 36262), True, 'import numpy as np\n'), ((36285, 36334), 'numpy.append', 'np.append', (['allAppMagObsErr', 'EB.appMagObsErr[filt]'], {}), '(allAppMagObsErr, EB.appMagObsErr[filt])\n', (36294, 36334), True, 'import numpy as np\n'), ((36845, 36886), 'gatspy.periodic.LombScargleMultibandFast', 'LombScargleMultibandFast', ([], {'fit_period': '(True)'}), '(fit_period=True)\n', (36869, 36886), False, 'from gatspy.periodic import LombScargleMultiband, LombScargle, LombScargleFast, LombScargleMultibandFast\n'), ((36910, 37001), 'gatspy.periodic.LombScargleMultiband', 'LombScargleMultiband', ([], {'Nterms_band': 'self.n_band', 'Nterms_base': 'self.n_base', 'fit_period': '(True)'}), '(Nterms_band=self.n_band, Nterms_base=self.n_base,\n fit_period=True)\n', (36930, 37001), False, 'from gatspy.periodic import LombScargleMultiband, LombScargle, LombScargleFast, LombScargleMultibandFast\n'), ((6174, 6185), 'numpy.log10', 'np.log10', (['L'], {}), '(L)\n', (6182, 6185), True, 'import numpy as np\n'), ((6345, 6356), 'numpy.log10', 'np.log10', (['m'], {}), '(m)\n', (6353, 6356), True, 'import numpy as np\n'), ((10618, 10652), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'x', 'scale': 'sig'}), '(loc=x, scale=sig)\n', (10634, 10652), True, 'import numpy as np\n'), ((12737, 12780), 'numpy.arctan2', 'np.arctan2', (['(self.r1 + self.r2)', '(2.0 * self.a)'], {}), '(self.r1 + self.r2, 2.0 * self.a)\n', (12747, 12780), True, 'import numpy as np\n'), ((15706, 15741), 'numpy.log10', 'np.log10', (['(self.L1f[f] + self.L2f[f])'], {}), '(self.L1f[f] + self.L2f[f])\n', (15714, 15741), True, 'import numpy as np\n'), ((16766, 16793), 'numpy.random.random', 'np.random.random', ([], {'size': 'nobs'}), '(size=nobs)\n', (16782, 16793), True, 'import numpy as np\n'), ((33751, 33768), 'numpy.array', 'np.array', (['mag_obs'], {}), '(mag_obs)\n', (33759, 33768), True, 'import numpy as np\n'), ((33842, 33855), 'numpy.array', 'np.array', (['mag'], {}), '(mag)\n', (33850, 33855), True, 'import numpy as np\n'), ((35766, 35798), 'gatspy.periodic.LombScargleFast', 'LombScargleFast', ([], {'fit_period': '(True)'}), '(fit_period=True)\n', (35781, 35798), False, 'from gatspy.periodic import LombScargleMultiband, LombScargle, LombScargleFast, LombScargleMultibandFast\n'), ((35824, 35852), 'gatspy.periodic.LombScargle', 'LombScargle', ([], {'fit_period': '(True)'}), '(fit_period=True)\n', (35835, 35852), False, 'from gatspy.periodic import LombScargleMultiband, LombScargle, LombScargleFast, LombScargleMultibandFast\n'), ((6362, 6373), 'numpy.log10', 'np.log10', (['T'], {}), '(T)\n', (6370, 6373), True, 'import numpy as np\n'), ((7618, 7640), 'numpy.log10', 'np.log10', (['(t_vis / 30.0)'], {}), '(t_vis / 30.0)\n', (7626, 7640), True, 'import numpy as np\n'), ((15996, 16023), 'numpy.log10', 'np.log10', (['(self.dist * 100.0)'], {}), '(self.dist * 100.0)\n', (16004, 16023), True, 'import numpy as np\n'), ((7564, 7610), 'numpy.log10', 'np.log10', (["(0.7 / self.sigmaDict[filt]['seeing'])"], {}), "(0.7 / self.sigmaDict[filt]['seeing'])\n", (7572, 7610), True, 'import numpy as np\n')] |
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
from multiprocessing.dummy import Pool as ThreadPool
import os
import time
import pandas as pd
import nltk
import numpy as np
import re
import spacy
from sklearn.feature_extraction.text import CountVectorizer
import progressbar as bar
import extractUnique as xq
import tristream_processor as stream
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier # 248
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score
#_start = time.time()
testB = pd.read_csv("CSV/Restaurants_Test_Data_phaseB.csv")
trainB = pd.read_csv("CSV/Restaurants_Train_v2.csv")
trainB_1 = trainB.iloc[:, [0, 7, 5]]
testB_1 = testB.iloc[:, [0, 5, 4]]
del testB
fullB = pd.concat([trainB_1, testB_1], axis=0, ignore_index=True)
dataset = fullB # MAJOR DATA-SET
# --------------------- FUNCTIONS --------------------------
def check_dep_parse(token_dep):
dep_str = token_dep
# if dep_str.startswith('nsub'):
# pass
# elif dep_str.startswith('amod'):
# pass
# elif dep_str.startswith('rcmod'):
# pass
# elif dep_str.startswith('dobj'):
# pass
# elif dep_str.startswith('neg'):
# pass
if dep_str.startswith('det'):
pass
else:
return False
return True
def streamers(full_dataset):
dataset = full_dataset
# --------------------- STREAM INITIALIZER ----------------------------
PoS_Tag_sent = list()
S1_corpus = [] # CORPUS (For Collecting Lemmas)
corpora = '' # CORPORA (For Collecting Corpora of single sentence)
S2_super_corpus = [] # CORPUS (For Collecting Bigrams sentence wise)
# --------------------- SPACY SPECS ------------------------
nlp_en = spacy.load('en_core_web_sm')
plot_nlp = 0 # For Plotting of Dependency chart
S3_dep_corpus = [] # CORPUS (For Collecting Dependency Relations)
# ---------------------------------------------------------- STREAM 1 - LEMMATIZATION
stream1 = stream.lemmatize(dataset)
# ----------------------------------------------------------- STREAM 2 - BIGRAMS
stream2 = stream.bigram(dataset)
# ----------------------------------------------------------- STREAM 3 - DEPENDENCY FEATURES (spaCy)
stream3 = stream.dep_rel(dataset)
stream1.to_csv('Wave2/stream1.csv', index=False)
stream2.to_csv('Wave2/stream2.csv', index=False)
stream3.to_csv('Wave2/stream3.csv', index=False)
del S1_corpus, S2_super_corpus, S3_dep_corpus
return stream1, stream2, stream3
def sheet_generator(s1, s2, s3):
stream1 = s1
stream2 = s2
stream3 = s3
df = pd.concat([stream1, stream2, stream3], axis=1)
df = df.rename(columns={0: 'lemmas', 1: 'bigrams', 2: 'depenrel'})
df.to_csv('Wave2/FeatureSet.csv', index=False)
df = pd.read_csv('Wave2/FeatureSet.csv', sep=',')
del df
# try:
# pool = ThreadPool(2)
# pool.map(os.system('firefox localhost:5000 &'), spacy.displacy.serve(plot_nlp, style='dep')).join()
# exit(0)
# except OSError:
# print("Browser must start with Graph. If doesn't please make sure to use Ubuntu with Firefox")
# except TypeError:
# print("Browser must start with Graph. If doesn't please make sure to use Ubuntu with Firefox")
# Get Unique Features from Bi-grams, Dependency Rel
whole_df = pd.concat([dataset.iloc[0:, 0], stream1, stream2, stream3, dataset.iloc[0:, 2]], axis=1)
whole_df = whole_df.rename(columns={'text': 'reviews', 0: 'lemmas', 1: 'bigrams', 2: 'depenrel',
'aspectCategories/aspectCategory/0/_category': 'aspectCategory'})
whole_df.to_csv('Wave2/WholeSet.csv', index=False)
whole_df = pd.read_csv('Wave2/WholeSet.csv', sep=',')
u_feat = list()
try:
u_feat = xq.unique(whole_df=whole_df, bigram_col=2, dep_rel_col=3)
print("Unique Features Extracted")
except KeyboardInterrupt:
print("[STAGE 3] Manual Interrupt to Unique Features")
exit(0)
except Exception as e:
print('[STAGE 3] Improper Termination due to:', e)
exit(0)
# DF with Review, Lemmas, U_feat, Aspect Cat
Feature_df = whole_df[['reviews', 'lemmas']][0:]
Feature_df = pd.concat([Feature_df, pd.Series(u_feat), whole_df.iloc[0:, -1]], axis=1)
Feature_df = Feature_df.rename(columns={0: 'ufeat'})
Feature_df.to_csv('Wave2/Feature.csv', index=False)
del whole_df,
# Aspect Cat, Lemmas + U_feat (from All sentences)
c_list = list()
try:
Feature_df = Feature_df.dropna()
c_list = xq.combiner(Feature_df=Feature_df, lemma_col=1, uniqueFeat_col=2, use_ast=True)
except KeyboardInterrupt:
print("[STAGE 4] Manual Interrupt to Combiner")
exit(0)
except Exception as e:
print("[STAGE 4] Improper Termination due to:", e)
exit(0)
return Feature_df, c_list
def corrector(combined_features_list):
c_list = combined_features_list
ngram_list = list()
try:
st = time.time()
ngram_list = xq.get_correct_spell(word_list=c_list, split_by=';')
#syn_list = stream.syns_of_ngrams(ngram_list)
#ngram_list+=syn_list
et = time.time()
print('Time elapsed %.3f' % float(((et-st)/60)/60))
except ValueError:
print("[STAGE 5] Spell Checker | Interrupted")
except TypeError:
print("[STAGE 5] Spell Checker | Multi-threading issue")
except AttributeError:
print("[STAGE 5] Spell Checker | Attrition")
except KeyboardInterrupt:
print("[STAGE 5] Spell Checker | Forced Drop")
pd.Series(ngram_list).to_csv('Wave2/ngram_list.csv', index=False)
return ngram_list
# Creating Bag of Words Model
def creating_bow(corrected_list, features_dataframe, max_features=33433):
ngram_list = list(corrected_list)
Feature_df = features_dataframe
max_ft = max_features
cv = CountVectorizer(max_features=max_ft, ngram_range=(1, 2))
# key_Book = pd.DataFrame(itemDict, index=range(itemDict.__len__()))
# key_Book.to_csv('key_Book.csv', index=True, sep=',')
# ============================== Preparing Train set =============================
# ML with Bag of Words to Aspect Categories
X = cv.fit_transform(ngram_list).toarray()
y = Feature_df['aspectCategory']
del ngram_list
return X, y, cv.vocabulary_
def split_train_test(X, y):
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
return X_train, X_test, y_train, y_test
def evaluator(prf, li2, total):
li = ['Precision', 'Recall\t', 'F1 Measure']
print("EVALUATION RESULTS".center(60,'_'))
cmx = [[73.6, 81.3, 90.9, 89.9, 92.2, 87.5],
[66.1, 70.5, 83.3, 95.2, 89.0, 80.3],
[69.6, 75.5, 86.9, 92.4, 90.5, 83.5]]
print('\t\t %s %.8s \t %s \t %s \t %s %s' % (li2[0], li2[1], li2[2], li2[3], li2[4], li2[5]))
for i in range(len(prf) - 1):
x = prf[i] * 100.0
y = cmx[i]
print('%s \t %r \t\t %r \t %r \t %r \t %r \t %r' % (li[i], x[0] >= y[0], x[1] >= y[1], x[2] >= y[2],
x[3] >= y[3], x[4] >= y[4], total[i] >= y[5]))
def prf_to_csv(prf, fileName):
PRF = np.array(prf)
PRF_DF = pd.DataFrame(PRF, index=['Precision', 'Recall', 'F1 Measure', 'Support'])
PRF_DF = PRF_DF.iloc[:,:] * 100
PRF_DF.to_csv('Results/%s'%fileName)
# ----------------- PREPARING THE MACHINE --------------------------
def the_machine(X_train, X_test, y_train, y_test):
print("RANDOM FOREST CLASSIFIER RESULTS:")
rf_Classifier = RandomForestClassifier(n_estimators=50, n_jobs=4)
rf_Classifier.fit(X_train, y_train)
y_pred = rf_Classifier.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
prf = precision_recall_fscore_support(y_test, y_pred)
li2 = list(rf_Classifier.classes_)
li2.append('TOTAL')
li = ['Precision', 'Recall\t', 'F1 Measure']
method = 'weighted'
total_f1 = f1_score(y_test, y_pred, average=method) * 100
total_pr = precision_score(y_test, y_pred, average=method) * 100
total_re = recall_score(y_test, y_pred, average=method) * 100
total = [total_pr, total_re, total_f1]
print('\t\t %s %.8s \t %s \t %s \t %s %s' % (li2[0], li2[1], li2[2], li2[3], li2[4], li2[5]))
for i in range(len(prf) - 1):
x = prf[i] * 100.0
print(
'%s \t %.2f \t\t %.2f \t %.2f \t %.2f \t %.2f \t %.1f' % (li[i], x[0], x[1], x[2], x[3], x[4], total[i]))
evaluator(prf, li2, total)
prf_to_csv(prf, 'RandomForest_LBD.csv')
print("SVM RESULTS:")
from sklearn.svm import LinearSVC
# classifier = SVC(kernel='sigmoid', degree=3)
linsvc_classifier = LinearSVC(multi_class='crammer_singer', C=1)
linsvc_classifier.fit(X_train, y_train)
y_pred = linsvc_classifier.predict(X_test)
cm1 = confusion_matrix(y_test, y_pred)
print(cm1)
prf = precision_recall_fscore_support(y_test, y_pred)
li2 = list(linsvc_classifier.classes_)
li2.append('TOTAL')
li = ['Precision', 'Recall\t', 'F1 Measure']
total_f1 = f1_score(y_test, y_pred, average=method) * 100
total_pr = precision_score(y_test, y_pred, average=method) * 100
total_re = recall_score(y_test, y_pred, average=method) * 100
total = [total_pr, total_re, total_f1]
print('\t\t %s %.8s \t %s \t %s \t %s %s' % (li2[0], li2[1], li2[2], li2[3], li2[4], li2[5]))
for i in range(len(prf) - 1):
x = prf[i] * 100.0
print('%s \t %.2f \t\t %.2f \t %.2f \t %.2f \t %.2f \t %.1f' % (li[i], x[0], x[1], x[2], x[3], x[4], total[i]))
evaluator(prf, li2, total)
prf_to_csv(prf, 'LinearSVC_LBD.csv')
print("MULTINOMIAL NB RESULTS:")
from sklearn.naive_bayes import MultinomialNB
# classifier = SVC(kernel='sigmoid', degree=3)
multi_nb_classifier = MultinomialNB()
multi_nb_classifier.fit(X_train, y_train)
y_pred = multi_nb_classifier.predict(X_test)
cm1 = confusion_matrix(y_test, y_pred)
print(cm1)
prf = precision_recall_fscore_support(y_test, y_pred)
li2 = list(multi_nb_classifier.classes_)
li2.append('TOTAL')
li = ['Precision', 'Recall\t', 'F1 Measure']
total_f1 = f1_score(y_test, y_pred, average=method) * 100
total_pr = precision_score(y_test, y_pred, average=method) * 100
total_re = recall_score(y_test, y_pred, average=method) * 100
total = [total_pr, total_re, total_f1]
print('\t\t %s %.8s \t %s \t %s \t %s %s' % (li2[0], li2[1], li2[2], li2[3], li2[4], li2[5]))
for i in range(len(prf) - 1):
x = prf[i] * 100.0
print(
'%s \t %.2f \t\t %.2f \t %.2f \t %.2f \t %.2f \t %.1f' % (li[i], x[0], x[1], x[2], x[3], x[4], total[i]))
evaluator(prf, li2, total)
prf_to_csv(prf, 'MultinomialNB_LBD.csv')
print("VOTING CLASSIFIER RESULTS:")
# BEST CLASSIFIERS
RFC_C1 = RandomForestClassifier(n_estimators=25, n_jobs=4)
LSVC_C2 = LinearSVC(multi_class='crammer_singer', C=1)
MNB_C3 = MultinomialNB()
from sklearn.ensemble import VotingClassifier
# classifier = GaussianNB()
# classifier = MultinomialNB(fit_prior=False)
classifier = VotingClassifier(estimators=[('lr', RFC_C1), ('rf', LSVC_C2),
('gnb', MNB_C3)], voting='hard', n_jobs=4)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
cm1 = confusion_matrix(y_test, y_pred)
print(cm1)
prf = precision_recall_fscore_support(y_test, y_pred)
li2 = list(classifier.classes_)
li2.append('TOTAL')
li = ['Precision', 'Recall\t', 'F1 Measure']
total_f1 = f1_score(y_test, y_pred, average='macro') * 100
total_pr = precision_score(y_test, y_pred, average='micro') * 100
total_re = recall_score(y_test, y_pred, average='micro') * 100
total = [total_pr, total_re, total_f1]
print('\t\t %s %.8s \t %s \t %s \t %s %s' % (li2[0], li2[1], li2[2], li2[3], li2[4], li2[5]))
for i in range(len(prf) - 1):
x = prf[i] * 100.0
print('%s \t %.2f \t\t %.2f \t %.2f \t %.2f \t %.2f \t %.1f' % (li[i], x[0], x[1], x[2], x[3], x[4],
total[i]))
evaluator(prf, li2, total)
prf_to_csv(prf, 'VotingClassifier_LBD.csv')
def executor():
'''
streamers: Will create all 3 streams of Lemmas, Bi-grams and Dependency Relations for Training Set
sheet_generator: Does the Job of combining Features from 3 Streams to One Unique set of Feature for TrS
corrector: Corrects all the ngrams and produces a list of uni-grams and bi-grams from TrS
creating_bow: Creates Bag of Words from Corrected ngrams of TrS
streamers_test: Will create all 3 streams of Lemmas, Bi-grams and Dependency Relations for Test Set
sheet_generator_test: Does the Job of combining Features from 3 Streams to One Uniquely
corrected set of Feature for TeS
creating_bow_test: Creates Bag of Words from Corrected ngrams of TeS
ARGUMENTS
train_ds: Dataset
:return:
'''
all_streams = list()
X, y = 0, 0
max_feat = 1000
def take_feat():
max_feat = int(input('Enter No. of features (MIN:MAX) to use in Machine\n (1000:33433) Input:'))
return max_feat
while True:
global fullB, testB_1
choice = int(input("""\t\t\t\tMENU\n
-------- Data Pre-processing ---------(1 Hr 20 Mins)
1. Perform Lemmatization, Bi-grams formation \n\t\t& Dependency Relations\n
2. Combine into Unique Features (4Secs)\n
3. Create Bag of Words Model (2Secs)\n
-------- Train Test Split ----------(50 Mins)
4. Perform Pre-processing & Processing on Test Set
-------- MACHINE LEARNING ------
5. Call Machine
6. Exit
\t Choice:"""))
if choice == 1:
arr = os.listdir('Wave2')
exists = [item.startswith('stream') for item in arr if item.startswith('stream')]
if 'False' in exists:
a, b, c = streamers(fullB)
all_streams.append(a)
all_streams.append(b)
all_streams.append(c)
else:
print('\t\t\t\t\t\tALREADY PROCESSED: GO TO STEP 2')
elif choice == 2:
arr = os.listdir('Wave2')
exists = [item.startswith('stream') for item in arr if item.startswith('stream')]
if 'False' in exists:
print('\t\t\t\t\t\t[CHOICE 2] GENERATING STREAMS')
streamers(fullB)
else:
print('\t\t\t\t\t\tALREADY PROCESSED: GO TO STEP 3')
a = pd.read_csv('Wave2/stream1.csv', header=None)
b = pd.read_csv('Wave2/stream2.csv', header=None)
c = pd.read_csv('Wave2/stream3.csv', header=None)
all_streams.append(pd.Series(a[0]))
all_streams.append(pd.Series(b[0]))
all_streams.append(pd.Series(c[0]))
features_dataframe, combined_features = sheet_generator(all_streams[0], all_streams[1], all_streams[2])
arr = os.listdir('Wave2')
if 'ngram_list.csv' not in arr:
corrector(combined_features_list=combined_features)
else:
print('\n\t\t\t\t\t\tAVAILABLE CORRECTED NGRAMS: OFFLINE AVAILABLE')
elif choice == 3:
max_feat = take_feat()
df2 = pd.read_csv('Wave2/ngram_list.csv', header=None)
X, y, vocab = creating_bow(corrected_list=list(df2[0]), features_dataframe=features_dataframe,
max_features=max_feat)
print("\t\t\t\t\t\tDATASET IS NOW READY")
# X_df = pd.DataFrame(X)
# y_df = pd.DataFrame(y)
# import operator
# dataset_cv = pd.concat([X_df, y_df], axis=1)
# dataset_cv.to_csv('processed_dataset.csv', index=False)
# pcd_df = pd.read_csv('processed_dataset.csv')
# sorted_val_df.iloc[:, 0]
# list(sorted_val_df.iloc[:, 0])
# pcd_df.columns = list(sorted_val_df.iloc[:, 0]) + ['aspectCategory']
elif choice == 4:
# arr = os.listdir('./Wave2/')
# if 'X_train.csv' and 'X_test.csv' and 'y_train.csv' and 'y_test.csv' in arr:
# X_train = np.array(pd.read_csv('Wave2/X_train.csv', header=None))
# X_test = np.array(pd.read_csv('Wave2/X_test.csv', header=None))
# y_train = np.array(pd.read_csv('Wave2/y_train.csv', header=None))
# y_test = np.array(pd.read_csv('Wave2/y_test.csv', header=None))
#
# print('\t'*6,"SPLIT AVAILABLE & COMPLETE")
# else:
X_train, X_test, y_train, y_test = split_train_test(X, y)
print("\t"*6, "TRAIN TEST SPLIT READY")
# pd.DataFrame(X_train).to_csv('Wave2/X_train.csv', index=False)
# pd.DataFrame(X_test).to_csv('Wave2/X_test.csv', index=False)
# pd.DataFrame(y_train).to_csv('Wave2/y_train.csv', index=False)
# pd.DataFrame(y_test).to_csv('Wave2/y_test.csv', index=False)
elif choice == 5:
try:
the_machine(X_train, X_test, y_train, y_test)
except UnboundLocalError as e:
print("Execute with choice 3 & 4. Retry Err:", e)
else:
return
executor()
# TODO GRAPHS
# 1. X: AspectCategories Y: Percentages
# I. Lemmas (Paper vs. Results)
# II. Lemmas + Dependency
# III. Lemmas + Bigrams + Dependency
# from collections import Counter
# dictionary = Counter(dataset['aspectCategories/aspectCategory/0/_category'])
# dictionary_ = dict(dictionary)
# f1 = open('category_stack.csv', 'w')
# for k, v in dictionary_.items():
# print(k, v)
# f1.write(k + ',' + str(v) + '\n')
# f1.close() | [
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.VotingClassifier",
"sklearn.metrics.f1_score",
"sklearn.metrics.precision_recall_fscore_support",
"pandas.DataFrame",
"extractUnique.unique",
"spacy.load",
"tristream... | [((624, 675), 'pandas.read_csv', 'pd.read_csv', (['"""CSV/Restaurants_Test_Data_phaseB.csv"""'], {}), "('CSV/Restaurants_Test_Data_phaseB.csv')\n", (635, 675), True, 'import pandas as pd\n'), ((685, 728), 'pandas.read_csv', 'pd.read_csv', (['"""CSV/Restaurants_Train_v2.csv"""'], {}), "('CSV/Restaurants_Train_v2.csv')\n", (696, 728), True, 'import pandas as pd\n'), ((821, 878), 'pandas.concat', 'pd.concat', (['[trainB_1, testB_1]'], {'axis': '(0)', 'ignore_index': '(True)'}), '([trainB_1, testB_1], axis=0, ignore_index=True)\n', (830, 878), True, 'import pandas as pd\n'), ((1834, 1862), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (1844, 1862), False, 'import spacy\n'), ((2092, 2117), 'tristream_processor.lemmatize', 'stream.lemmatize', (['dataset'], {}), '(dataset)\n', (2108, 2117), True, 'import tristream_processor as stream\n'), ((2218, 2240), 'tristream_processor.bigram', 'stream.bigram', (['dataset'], {}), '(dataset)\n', (2231, 2240), True, 'import tristream_processor as stream\n'), ((2361, 2384), 'tristream_processor.dep_rel', 'stream.dep_rel', (['dataset'], {}), '(dataset)\n', (2375, 2384), True, 'import tristream_processor as stream\n'), ((2730, 2776), 'pandas.concat', 'pd.concat', (['[stream1, stream2, stream3]'], {'axis': '(1)'}), '([stream1, stream2, stream3], axis=1)\n', (2739, 2776), True, 'import pandas as pd\n'), ((2908, 2952), 'pandas.read_csv', 'pd.read_csv', (['"""Wave2/FeatureSet.csv"""'], {'sep': '""","""'}), "('Wave2/FeatureSet.csv', sep=',')\n", (2919, 2952), True, 'import pandas as pd\n'), ((3463, 3555), 'pandas.concat', 'pd.concat', (['[dataset.iloc[0:, 0], stream1, stream2, stream3, dataset.iloc[0:, 2]]'], {'axis': '(1)'}), '([dataset.iloc[0:, 0], stream1, stream2, stream3, dataset.iloc[0:,\n 2]], axis=1)\n', (3472, 3555), True, 'import pandas as pd\n'), ((3829, 3871), 'pandas.read_csv', 'pd.read_csv', (['"""Wave2/WholeSet.csv"""'], {'sep': '""","""'}), "('Wave2/WholeSet.csv', sep=',')\n", (3840, 3871), True, 'import pandas as pd\n'), ((6029, 6085), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'max_features': 'max_ft', 'ngram_range': '(1, 2)'}), '(max_features=max_ft, ngram_range=(1, 2))\n', (6044, 6085), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((6614, 6636), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {}), '(X, y)\n', (6630, 6636), False, 'from sklearn.model_selection import train_test_split\n'), ((7402, 7415), 'numpy.array', 'np.array', (['prf'], {}), '(prf)\n', (7410, 7415), True, 'import numpy as np\n'), ((7429, 7502), 'pandas.DataFrame', 'pd.DataFrame', (['PRF'], {'index': "['Precision', 'Recall', 'F1 Measure', 'Support']"}), "(PRF, index=['Precision', 'Recall', 'F1 Measure', 'Support'])\n", (7441, 7502), True, 'import pandas as pd\n'), ((7769, 7818), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(50)', 'n_jobs': '(4)'}), '(n_estimators=50, n_jobs=4)\n', (7791, 7818), False, 'from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n'), ((7912, 7944), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7928, 7944), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((7969, 8016), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (8000, 8016), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((8910, 8954), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'multi_class': '"""crammer_singer"""', 'C': '(1)'}), "(multi_class='crammer_singer', C=1)\n", (8919, 8954), False, 'from sklearn.svm import LinearSVC\n'), ((9057, 9089), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (9073, 9089), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((9115, 9162), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (9146, 9162), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((10046, 10061), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (10059, 10061), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((10168, 10200), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (10184, 10200), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((10226, 10273), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (10257, 10273), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((11088, 11137), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(25)', 'n_jobs': '(4)'}), '(n_estimators=25, n_jobs=4)\n', (11110, 11137), False, 'from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n'), ((11152, 11196), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'multi_class': '"""crammer_singer"""', 'C': '(1)'}), "(multi_class='crammer_singer', C=1)\n", (11161, 11196), False, 'from sklearn.svm import LinearSVC\n'), ((11210, 11225), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (11223, 11225), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((11375, 11483), 'sklearn.ensemble.VotingClassifier', 'VotingClassifier', ([], {'estimators': "[('lr', RFC_C1), ('rf', LSVC_C2), ('gnb', MNB_C3)]", 'voting': '"""hard"""', 'n_jobs': '(4)'}), "(estimators=[('lr', RFC_C1), ('rf', LSVC_C2), ('gnb',\n MNB_C3)], voting='hard', n_jobs=4)\n", (11391, 11483), False, 'from sklearn.ensemble import VotingClassifier\n'), ((11614, 11646), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (11630, 11646), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((11672, 11719), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (11703, 11719), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((3918, 3975), 'extractUnique.unique', 'xq.unique', ([], {'whole_df': 'whole_df', 'bigram_col': '(2)', 'dep_rel_col': '(3)'}), '(whole_df=whole_df, bigram_col=2, dep_rel_col=3)\n', (3927, 3975), True, 'import extractUnique as xq\n'), ((4697, 4776), 'extractUnique.combiner', 'xq.combiner', ([], {'Feature_df': 'Feature_df', 'lemma_col': '(1)', 'uniqueFeat_col': '(2)', 'use_ast': '(True)'}), '(Feature_df=Feature_df, lemma_col=1, uniqueFeat_col=2, use_ast=True)\n', (4708, 4776), True, 'import extractUnique as xq\n'), ((5135, 5146), 'time.time', 'time.time', ([], {}), '()\n', (5144, 5146), False, 'import time\n'), ((5168, 5220), 'extractUnique.get_correct_spell', 'xq.get_correct_spell', ([], {'word_list': 'c_list', 'split_by': '""";"""'}), "(word_list=c_list, split_by=';')\n", (5188, 5220), True, 'import extractUnique as xq\n'), ((5318, 5329), 'time.time', 'time.time', ([], {}), '()\n', (5327, 5329), False, 'import time\n'), ((8169, 8209), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'y_pred'], {'average': 'method'}), '(y_test, y_pred, average=method)\n', (8177, 8209), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((8231, 8278), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {'average': 'method'}), '(y_test, y_pred, average=method)\n', (8246, 8278), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((8300, 8344), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'y_pred'], {'average': 'method'}), '(y_test, y_pred, average=method)\n', (8312, 8344), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((9296, 9336), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'y_pred'], {'average': 'method'}), '(y_test, y_pred, average=method)\n', (9304, 9336), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((9358, 9405), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {'average': 'method'}), '(y_test, y_pred, average=method)\n', (9373, 9405), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((9427, 9471), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'y_pred'], {'average': 'method'}), '(y_test, y_pred, average=method)\n', (9439, 9471), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((10409, 10449), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'y_pred'], {'average': 'method'}), '(y_test, y_pred, average=method)\n', (10417, 10449), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((10471, 10518), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {'average': 'method'}), '(y_test, y_pred, average=method)\n', (10486, 10518), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((10540, 10584), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'y_pred'], {'average': 'method'}), '(y_test, y_pred, average=method)\n', (10552, 10584), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((11846, 11887), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'y_pred'], {'average': '"""macro"""'}), "(y_test, y_pred, average='macro')\n", (11854, 11887), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((11909, 11957), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {'average': '"""micro"""'}), "(y_test, y_pred, average='micro')\n", (11924, 11957), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((11979, 12024), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'y_pred'], {'average': '"""micro"""'}), "(y_test, y_pred, average='micro')\n", (11991, 12024), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score\n'), ((4372, 4389), 'pandas.Series', 'pd.Series', (['u_feat'], {}), '(u_feat)\n', (4381, 4389), True, 'import pandas as pd\n'), ((5725, 5746), 'pandas.Series', 'pd.Series', (['ngram_list'], {}), '(ngram_list)\n', (5734, 5746), True, 'import pandas as pd\n'), ((14025, 14044), 'os.listdir', 'os.listdir', (['"""Wave2"""'], {}), "('Wave2')\n", (14035, 14044), False, 'import os\n'), ((14462, 14481), 'os.listdir', 'os.listdir', (['"""Wave2"""'], {}), "('Wave2')\n", (14472, 14481), False, 'import os\n'), ((14813, 14858), 'pandas.read_csv', 'pd.read_csv', (['"""Wave2/stream1.csv"""'], {'header': 'None'}), "('Wave2/stream1.csv', header=None)\n", (14824, 14858), True, 'import pandas as pd\n'), ((14875, 14920), 'pandas.read_csv', 'pd.read_csv', (['"""Wave2/stream2.csv"""'], {'header': 'None'}), "('Wave2/stream2.csv', header=None)\n", (14886, 14920), True, 'import pandas as pd\n'), ((14937, 14982), 'pandas.read_csv', 'pd.read_csv', (['"""Wave2/stream3.csv"""'], {'header': 'None'}), "('Wave2/stream3.csv', header=None)\n", (14948, 14982), True, 'import pandas as pd\n'), ((15262, 15281), 'os.listdir', 'os.listdir', (['"""Wave2"""'], {}), "('Wave2')\n", (15272, 15281), False, 'import os\n'), ((15014, 15029), 'pandas.Series', 'pd.Series', (['a[0]'], {}), '(a[0])\n', (15023, 15029), True, 'import pandas as pd\n'), ((15062, 15077), 'pandas.Series', 'pd.Series', (['b[0]'], {}), '(b[0])\n', (15071, 15077), True, 'import pandas as pd\n'), ((15110, 15125), 'pandas.Series', 'pd.Series', (['c[0]'], {}), '(c[0])\n', (15119, 15125), True, 'import pandas as pd\n'), ((15576, 15624), 'pandas.read_csv', 'pd.read_csv', (['"""Wave2/ngram_list.csv"""'], {'header': 'None'}), "('Wave2/ngram_list.csv', header=None)\n", (15587, 15624), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 5 17:42:09 2019
@author: WENDY
"""
import os
import numpy as np
import scipy.sparse as sp
from src.graphviz.func import postprune_init
# 获得所有文件夹目录
def Getdirnext(dirname_list, f=0):
dirnext = []
for name in dirname_list:
for i in range(5):
if name != []:
newdir = name + os.path.sep + '%d' % i
if os.path.exists(newdir):
f = 1
dirnext.append(newdir)
else:
dirnext.append([])
return dirnext, f
# 归一化
def Getnorm(data):
m = sum(data)
m_list = [i / m for i in data]
return m_list
def normalize(data):
for i in range(len(data)):
m = np.sum(data[i])
data[i] /= m
return data
# 求矩阵的熵(每一个向量距离质心的离散程度)
def Getmatrix_dis(ma):
"""
:param ma: sp.csr_matrix
:return:
"""
# print('ma', ma.shape)
# 计算质心
ma_mean = sp.csr_matrix.mean(ma, axis=0)
# 不需要逐行计算
maT = ma.T
# 计算交叉项
vecProd = ma_mean * maT
# 先求 (ma_mean)^2
Sq_ma_mean = np.power(ma_mean, 2)
sum_Sq_ma_mean_i = np.sum(Sq_ma_mean, axis=1)
# 建立
sum_Sq_ma_mean = np.tile(sum_Sq_ma_mean_i, (1, vecProd.shape[1]))
# 计算 (maT)^2
Sq_ma = sp.csr_matrix.power(ma, 2)
sum_Sq_ma = sp.csr_matrix.sum(Sq_ma, axis=1)
# 求和
SqED = sum_Sq_ma.T + sum_Sq_ma_mean - 2 * vecProd
# 开方得到欧式距离
ma_dis = np.sqrt(SqED)
return ma_dis, ma.shape
# 得到 sub_list
def Getlist(data, k, U):
sub_list = []
# 生成这个节点聚类结果sub_list
for i in range(k):
# 将这层的景点聚类结果写进这层的result文件夹中
matrix = U.toarray()
matrix = normalize(matrix)
# 顺序输出POI所属的类别
class_POI = matrix.argmax(axis=1)
# 输出属于这一类的景点的列表索引值
index = np.where(class_POI == i)
index_list = index[0].tolist()
sub_list.append(index_list)
return sub_list
def GetSE(X, U, sub=5, alpha=4):
# 获得全部的距离
X_dis, X_shape = Getmatrix_dis(X)
X_dis_list = X_dis.tolist()[0]
# 总距离
X_dis_sum = sum(X_dis_list)
# 获得子矩阵索引
sub_list = Getlist(X, 5, U)
sub_SE = []
for sub_list_ in sub_list:
sub_data = X[sub_list_[0]]
for i in sub_list_[1:]:
sub_data = sp.vstack((sub_data, X[i]))
# 获得子矩阵的全部的距离
sub_dis, sub_shape = Getmatrix_dis(sub_data)
sub_dis_list = sub_dis.tolist()[0]
# 总距离
sub_dis_sum = sum(sub_dis_list)
sub_SE.append(sub_dis_sum)
sub_SSE = sum(sub_SE)
loss = (X_dis_sum - sub_SSE) - alpha * sub
if loss < 0:
result = False
else:
result = True
return X_dis_sum, sub_SSE, loss, result, X_dis_sum - sub_SSE, X_shape
def postPrune(data_dir):
# 设置要保存的文件夹路径
data_path, visual_data_path, visual_datacut_path = postprune_init(data_dir)
print("[PostPrune] 待进行后剪枝的结果: {}".format(data_path))
print("[PostPrune] 后剪枝后结果的保存文件夹: {} ".format(visual_datacut_path))
U_name = '\\model\\1001_U_sp.npz'
X_name = '\\data\\buchai_POI_matrix.npz'
dirnext = [data_path]
dir_all = []
f = 1
while f:
dir_all.append(dirnext)
# print(dirnext)
dirnext, f = Getdirnext(dirnext)
# 得到所有的文件夹目录
data_path_text = data_path.split('\\')
get_dir = []
for sub_dir in dir_all:
get_dir_sub = []
for sub_dir_ in sub_dir:
if sub_dir_ != []:
sub_dir_text = sub_dir_.split('\\')
get_dir_sub.append([i for i in sub_dir_text if i not in data_path_text])
get_dir.append(get_dir_sub)
CZ = []
LOSS = []
shang = []
SSE_all = []
SSE_result = []
for i in range(len(dir_all)):
SSE_alli = []
SSE_resulti = []
CZi = []
LOSSi = []
shangi = []
dir_ = dir_all[i]
for file in dir_:
if file != []:
U_file = file + U_name
X_file = file + X_name
U = sp.load_npz(U_file)
X = sp.load_npz(X_file)
X_SE_sum, sub_SSE, loss, result, chazhi, m_shape = GetSE(X, U)
SSE_alli.append(['X_SE_sum:', X_SE_sum, 'sub_SSE:',
sub_SSE, 'loss', loss, 'result', result])
CZi.append(chazhi)
LOSSi.append(loss)
SSE_resulti.append(result)
shangi.append((X_SE_sum, sub_SSE))
CZ.append(CZi)
LOSS.append(LOSSi)
shang.append(shangi)
SSE_all.append(SSE_alli)
SSE_result.append(SSE_resulti)
# 找到存放所有 csv文件的文件夹
data_csv_path = visual_datacut_path
csv_all = []
for file_csv in os.listdir(data_csv_path):
csv_all.append(file_csv)
# csv_all.remove('results.txt')
csv_name = []
for file_csv in csv_all:
name = file_csv.split('-')
csv_name.append(name[0])
filename_remove = []
for i in range(len(SSE_result)):
result = SSE_result[i]
for j in range(len(result)):
get_filename = get_dir[i][j]
if get_filename == []:
sub_file = ''
else:
sub_file = ''.join(get_filename)
resulti = result[j]
if resulti != True:
length = len(sub_file)
for t in range(len(csv_name)):
name = csv_name[t]
if sub_file == name[:length]:
if sub_file != name:
print(name)
filename_sub = data_csv_path + os.path.sep + name + '-feature.csv'
filename_sub_word = data_csv_path + os.path.sep + name + '-word.csv'
filename_sub_poi = data_csv_path + os.path.sep + name + '-poi.csv'
if os.path.exists(filename_sub):
os.remove(filename_sub)
os.remove(filename_sub_word)
os.remove(filename_sub_poi)
filename_remove.append(filename_sub)
# print('删除', name)
if __name__ == '__main__':
# 设置要可视化的源文件夹
data_path = '2019-06-08-18-45-01'
postPrune(data_path)
| [
"os.remove",
"numpy.sum",
"scipy.sparse.csr_matrix.mean",
"scipy.sparse.csr_matrix.sum",
"scipy.sparse.vstack",
"numpy.power",
"scipy.sparse.load_npz",
"os.path.exists",
"numpy.where",
"numpy.tile",
"src.graphviz.func.postprune_init",
"numpy.sqrt",
"os.listdir",
"scipy.sparse.csr_matrix.po... | [((966, 996), 'scipy.sparse.csr_matrix.mean', 'sp.csr_matrix.mean', (['ma'], {'axis': '(0)'}), '(ma, axis=0)\n', (984, 996), True, 'import scipy.sparse as sp\n'), ((1107, 1127), 'numpy.power', 'np.power', (['ma_mean', '(2)'], {}), '(ma_mean, 2)\n', (1115, 1127), True, 'import numpy as np\n'), ((1151, 1177), 'numpy.sum', 'np.sum', (['Sq_ma_mean'], {'axis': '(1)'}), '(Sq_ma_mean, axis=1)\n', (1157, 1177), True, 'import numpy as np\n'), ((1209, 1257), 'numpy.tile', 'np.tile', (['sum_Sq_ma_mean_i', '(1, vecProd.shape[1])'], {}), '(sum_Sq_ma_mean_i, (1, vecProd.shape[1]))\n', (1216, 1257), True, 'import numpy as np\n'), ((1288, 1314), 'scipy.sparse.csr_matrix.power', 'sp.csr_matrix.power', (['ma', '(2)'], {}), '(ma, 2)\n', (1307, 1314), True, 'import scipy.sparse as sp\n'), ((1331, 1363), 'scipy.sparse.csr_matrix.sum', 'sp.csr_matrix.sum', (['Sq_ma'], {'axis': '(1)'}), '(Sq_ma, axis=1)\n', (1348, 1363), True, 'import scipy.sparse as sp\n'), ((1457, 1470), 'numpy.sqrt', 'np.sqrt', (['SqED'], {}), '(SqED)\n', (1464, 1470), True, 'import numpy as np\n'), ((2848, 2872), 'src.graphviz.func.postprune_init', 'postprune_init', (['data_dir'], {}), '(data_dir)\n', (2862, 2872), False, 'from src.graphviz.func import postprune_init\n'), ((4720, 4745), 'os.listdir', 'os.listdir', (['data_csv_path'], {}), '(data_csv_path)\n', (4730, 4745), False, 'import os\n'), ((750, 765), 'numpy.sum', 'np.sum', (['data[i]'], {}), '(data[i])\n', (756, 765), True, 'import numpy as np\n'), ((1818, 1842), 'numpy.where', 'np.where', (['(class_POI == i)'], {}), '(class_POI == i)\n', (1826, 1842), True, 'import numpy as np\n'), ((2289, 2316), 'scipy.sparse.vstack', 'sp.vstack', (['(sub_data, X[i])'], {}), '((sub_data, X[i]))\n', (2298, 2316), True, 'import scipy.sparse as sp\n'), ((410, 432), 'os.path.exists', 'os.path.exists', (['newdir'], {}), '(newdir)\n', (424, 432), False, 'import os\n'), ((4020, 4039), 'scipy.sparse.load_npz', 'sp.load_npz', (['U_file'], {}), '(U_file)\n', (4031, 4039), True, 'import scipy.sparse as sp\n'), ((4060, 4079), 'scipy.sparse.load_npz', 'sp.load_npz', (['X_file'], {}), '(X_file)\n', (4071, 4079), True, 'import scipy.sparse as sp\n'), ((5879, 5907), 'os.path.exists', 'os.path.exists', (['filename_sub'], {}), '(filename_sub)\n', (5893, 5907), False, 'import os\n'), ((5941, 5964), 'os.remove', 'os.remove', (['filename_sub'], {}), '(filename_sub)\n', (5950, 5964), False, 'import os\n'), ((5997, 6025), 'os.remove', 'os.remove', (['filename_sub_word'], {}), '(filename_sub_word)\n', (6006, 6025), False, 'import os\n'), ((6058, 6085), 'os.remove', 'os.remove', (['filename_sub_poi'], {}), '(filename_sub_poi)\n', (6067, 6085), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import os
import time
import argparse
import numpy as np
from PIL import Image
from scipy.ndimage.filters import gaussian_filter
import matplotlib.pyplot as plt
from matplotlib import widgets
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--vis", help="visible image dir",
default='../../TT/hres_vis')
parser.add_argument("-r", "--ir", help="IR image dir",
default='../../TT/hres_tir')
parser.add_argument("-i", "--input", help="input npy file")
parser.add_argument("-o", "--out", help="output file")
parser.add_argument("-s", "--start", help="start of file name",
type=int)
parser.add_argument("-f", "--first", help="first image num in npy file",
type=int, default=0)
args = parser.parse_args()
vis_base=args.vis
ir_base=args.ir
lnpixel = 120*0.014/math.tan(math.radians(17))
nlst = np.load('nlst.npy')
scans = np.load(args.input)
'''
l25m: the list of 25 point test of each image shape(nimage, 25)
l45mn: list 25 to 45 point test image number
l45m: shape(nimage, 20)
logtemp: [RAWlargmax, 'str', yshift, xshift]
'''
class align():
def __init__(self, filename, start_num, first_img):
timestruct = time.localtime()
strtime = time.strftime('%Y-%m-%d %H:%M:%S', timestruct)
self.outfn = filename
self.outf = open(self.outfn, 'a')
self.outf.write('-----'+strtime+'-----\n')
self.outf.close()
self.img_num = start_num
self.first_img = first_img
self.logtemp = []
self.yshiftNpix = None
self.xshiftNpix = 0
#print(num) is a speed test
def next_img(self):
self.yshiftNpix = None
self.xshiftNpix = 0
self.outf = open(self.outfn, 'a')
self.outf.write('FLIR%d'%(nlst[self.img_num]))
if len(self.logtemp) == 3:
self.outf.write(' %2d %2d %8s\n'%tuple(self.logtemp))
elif len(self.logtemp) == 5:
self.outf.write(' %2d %2d %8s %2d %2d\n'%tuple(self.logtemp))
elif len(self.logtemp) == 0:
pass
else:
raise ValueError('len logtemp not 3 or 5')
self.outf.close()
self.logtemp = []
self.img_num +=1
n = nlst[self.img_num]
self.scan = scans[self.img_num - self.first_img]
#print(1)
vis_name = os.path.join(vis_base, 'FLIR'+str(n)+'.jpg')
ir_name = os.path.join(ir_base, 'FLIR'+str(n)+'.png')
vis_img = Image.open(vis_name)
vis_img = vis_img.resize((427,320), Image.ANTIALIAS)
self.vis_img = np.array(vis_img).astype(np.float32)
ir_img = np.array(Image.open(ir_name)).astype(np.float32)
#print(2)
vis_G = np.gradient(gaussian_filter(self.vis_img, 1), axis=(0,1))
ir_G = np.gradient(ir_img, axis=(0,1))
#print(3)
vis_dy = vis_G[0]
vis_dx = vis_G[1]
vis_r = np.sqrt(vis_dx*vis_dx + vis_dy*vis_dy)
vis_r = np.linalg.norm(vis_r, axis=2)
ir_dy = ir_G[0]
ir_dx = ir_G[1]
ir_r = np.sqrt(ir_dx*ir_dx + ir_dy*ir_dy)
self.vis_norm = vis_r/np.percentile(vis_r, 99)
self.ir_norm = ir_r/np.percentile(ir_r, 99)
#print(4)
self.yshiftNpix, self.xshiftNpix = divmod(self.scan.argmax(), 40)
self.yshiftNpix -= 20
self.xshiftNpix -= 20
#print(5)
self.logtemp.append(self.yshiftNpix)
self.logtemp.append(self.xshiftNpix)
# subplot(223)
vis_norm_crop = self.vis_norm[40+self.yshiftNpix:
40+self.yshiftNpix+240,
53+self.xshiftNpix:
53+self.xshiftNpix+320]
grad_img = np.stack((vis_norm_crop,self.ir_norm,vis_norm_crop), axis=2)
grad_show.clear()
grad_show.imshow(grad_img)
# subplot(221)
vis_crop = self.vis_img[40+self.yshiftNpix:
40+self.yshiftNpix+240,
53+self.xshiftNpix:
53+self.xshiftNpix+320]/255.0
vis_show.clear()
vis_show.imshow(vis_crop)
# subplot(222)
ir_show.clear()
ir_show.imshow(ir_img)
# subplot(224)
scan_show.clear()
scan_show.imshow(self.scan,
extent=(-20.5,19.5,39.5,-20.5), cmap='gist_ncar')
scan_show.scatter(self.xshiftNpix, self.yshiftNpix, color='red')
#print(6)
try:
show_yl = lnpixel / (self.yshiftNpix)
except ZeroDivisionError:
show_yl = float('inf')
try:
show_xl = lnpixel / (self.xshiftNpix)
except ZeroDivisionError:
show_xl = float('inf')
plt.draw()
print('FLIR%d'%n)
print('y %2d %.2f'%(self.yshiftNpix, show_yl))
print('x %2d %.2f'%(self.xshiftNpix, show_xl))
def pass_click(self, event):
self.logtemp.append('pass')
self.next_img()
def not_pass_click(self, event):
self.logtemp.append('not pass')
self.next_img()
def change_value_click(self, event):
self.logtemp.append('change')
#self.logtemp.append(int(input('yshift:')))
#self.logtemp.append(int(input('xshift:')))
self.next_img()
def update_show(self):
n35 = self.yshiftNpix + 5
xsp = self.xshiftNpix
vis_crop = self.vis_img[35+n35:35+n35+240, 53+xsp:53+xsp+320]/255.0
vis_show.clear()
vis_show.imshow(vis_crop)
vis_norm_crop = self.vis_norm[35+n35:35+n35+240, 53+xsp:53+xsp+320]
grad_img = np.stack((vis_norm_crop,self.ir_norm,vis_norm_crop), axis=2)
grad_show.clear()
grad_show.imshow(grad_img)
if args.start is None:
start_num = -1
else:
start_num = np.where(np.array(nlst) == args.start)[0][0]
aligner = align(args.out, start_num, args.first)
vis_show = plt.subplot(221)
ir_show = plt.subplot(222)
grad_show = plt.subplot(223)
scan_show = plt.subplot(224)
pass_ax = plt.axes([0.81, 0.01, 0.09, 0.05])
pass_button = widgets.Button(pass_ax, 'pass')
pass_button.on_clicked(aligner.pass_click)
not_pass_ax = plt.axes([0.71, 0.01, 0.05, 0.03])
not_pass_button = widgets.Button(not_pass_ax, 'not pass')
not_pass_button.on_clicked(aligner.not_pass_click)
change_value_ax = plt.axes([0.71, 0.05, 0.05, 0.03])
change_value_button = widgets.Button(change_value_ax, 'change value')
change_value_button.on_clicked(aligner.change_value_click)
# TextBox.set_val is slowly, I no longer use it
aligner.next_img()
plt.show()
| [
"numpy.stack",
"matplotlib.pyplot.subplot",
"numpy.load",
"scipy.ndimage.filters.gaussian_filter",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.axes",
"math.radians",
"time.strftime",
"matplotlib.widgets.Button",
"PIL.Image.open",
"matplotlib.pyplot.draw",
"numpy.g... | [((846, 871), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (869, 871), False, 'import argparse\n'), ((1528, 1547), 'numpy.load', 'np.load', (['"""nlst.npy"""'], {}), "('nlst.npy')\n", (1535, 1547), True, 'import numpy as np\n'), ((1556, 1575), 'numpy.load', 'np.load', (['args.input'], {}), '(args.input)\n', (1563, 1575), True, 'import numpy as np\n'), ((6648, 6664), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (6659, 6664), True, 'import matplotlib.pyplot as plt\n'), ((6675, 6691), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (6686, 6691), True, 'import matplotlib.pyplot as plt\n'), ((6704, 6720), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (6715, 6720), True, 'import matplotlib.pyplot as plt\n'), ((6733, 6749), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (6744, 6749), True, 'import matplotlib.pyplot as plt\n'), ((6760, 6794), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.81, 0.01, 0.09, 0.05]'], {}), '([0.81, 0.01, 0.09, 0.05])\n', (6768, 6794), True, 'import matplotlib.pyplot as plt\n'), ((6809, 6840), 'matplotlib.widgets.Button', 'widgets.Button', (['pass_ax', '"""pass"""'], {}), "(pass_ax, 'pass')\n", (6823, 6840), False, 'from matplotlib import widgets\n'), ((6899, 6933), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.71, 0.01, 0.05, 0.03]'], {}), '([0.71, 0.01, 0.05, 0.03])\n', (6907, 6933), True, 'import matplotlib.pyplot as plt\n'), ((6952, 6991), 'matplotlib.widgets.Button', 'widgets.Button', (['not_pass_ax', '"""not pass"""'], {}), "(not_pass_ax, 'not pass')\n", (6966, 6991), False, 'from matplotlib import widgets\n'), ((7062, 7096), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.71, 0.05, 0.05, 0.03]'], {}), '([0.71, 0.05, 0.05, 0.03])\n', (7070, 7096), True, 'import matplotlib.pyplot as plt\n'), ((7119, 7166), 'matplotlib.widgets.Button', 'widgets.Button', (['change_value_ax', '"""change value"""'], {}), "(change_value_ax, 'change value')\n", (7133, 7166), False, 'from matplotlib import widgets\n'), ((7293, 7303), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7301, 7303), True, 'import matplotlib.pyplot as plt\n'), ((1503, 1519), 'math.radians', 'math.radians', (['(17)'], {}), '(17)\n', (1515, 1519), False, 'import math\n'), ((1855, 1871), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1869, 1871), False, 'import time\n'), ((1890, 1936), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""', 'timestruct'], {}), "('%Y-%m-%d %H:%M:%S', timestruct)\n", (1903, 1936), False, 'import time\n'), ((3118, 3138), 'PIL.Image.open', 'Image.open', (['vis_name'], {}), '(vis_name)\n', (3128, 3138), False, 'from PIL import Image\n'), ((3433, 3465), 'numpy.gradient', 'np.gradient', (['ir_img'], {'axis': '(0, 1)'}), '(ir_img, axis=(0, 1))\n', (3444, 3465), True, 'import numpy as np\n'), ((3551, 3593), 'numpy.sqrt', 'np.sqrt', (['(vis_dx * vis_dx + vis_dy * vis_dy)'], {}), '(vis_dx * vis_dx + vis_dy * vis_dy)\n', (3558, 3593), True, 'import numpy as np\n'), ((3606, 3635), 'numpy.linalg.norm', 'np.linalg.norm', (['vis_r'], {'axis': '(2)'}), '(vis_r, axis=2)\n', (3620, 3635), True, 'import numpy as np\n'), ((3699, 3737), 'numpy.sqrt', 'np.sqrt', (['(ir_dx * ir_dx + ir_dy * ir_dy)'], {}), '(ir_dx * ir_dx + ir_dy * ir_dy)\n', (3706, 3737), True, 'import numpy as np\n'), ((4384, 4446), 'numpy.stack', 'np.stack', (['(vis_norm_crop, self.ir_norm, vis_norm_crop)'], {'axis': '(2)'}), '((vis_norm_crop, self.ir_norm, vis_norm_crop), axis=2)\n', (4392, 4446), True, 'import numpy as np\n'), ((5440, 5450), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (5448, 5450), True, 'import matplotlib.pyplot as plt\n'), ((6355, 6417), 'numpy.stack', 'np.stack', (['(vis_norm_crop, self.ir_norm, vis_norm_crop)'], {'axis': '(2)'}), '((vis_norm_crop, self.ir_norm, vis_norm_crop), axis=2)\n', (6363, 6417), True, 'import numpy as np\n'), ((3372, 3404), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['self.vis_img', '(1)'], {}), '(self.vis_img, 1)\n', (3387, 3404), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((3764, 3788), 'numpy.percentile', 'np.percentile', (['vis_r', '(99)'], {}), '(vis_r, 99)\n', (3777, 3788), True, 'import numpy as np\n'), ((3817, 3840), 'numpy.percentile', 'np.percentile', (['ir_r', '(99)'], {}), '(ir_r, 99)\n', (3830, 3840), True, 'import numpy as np\n'), ((3223, 3240), 'numpy.array', 'np.array', (['vis_img'], {}), '(vis_img)\n', (3231, 3240), True, 'import numpy as np\n'), ((3286, 3305), 'PIL.Image.open', 'Image.open', (['ir_name'], {}), '(ir_name)\n', (3296, 3305), False, 'from PIL import Image\n'), ((6551, 6565), 'numpy.array', 'np.array', (['nlst'], {}), '(nlst)\n', (6559, 6565), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook gee_score_test_simulation.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # GEE score tests
#
# This notebook uses simulation to demonstrate robust GEE score tests.
# These tests can be used in a GEE analysis to compare nested hypotheses
# about the mean structure. The tests are robust to miss-specification of
# the working correlation model, and to certain forms of misspecification of
# the variance structure (e.g. as captured by the scale parameter in a
# quasi-Poisson analysis).
#
# The data are simulated as clusters, where there is dependence within but
# not between clusters. The cluster-wise dependence is induced using a
# copula approach. The data marginally follow a negative binomial
# (gamma/Poisson) mixture.
#
# The level and power of the tests are considered below to assess the
# performance of the tests.
import pandas as pd
import numpy as np
from scipy.stats.distributions import norm, poisson
import statsmodels.api as sm
import matplotlib.pyplot as plt
# The function defined in the following cell uses a copula approach to
# simulate correlated random values that marginally follow a negative
# binomial distribution. The input parameter `u` is an array of values in
# (0, 1). The elements of `u` must be marginally uniformly distributed on
# (0, 1). Correlation in `u` will induce correlations in the returned
# negative binomial values. The array parameter `mu` gives the marginal
# means, and the scalar parameter `scale` defines the mean/variance
# relationship (the variance is `scale` times the mean). The lengths of `u`
# and `mu` must be the same.
def negbinom(u, mu, scale):
p = (scale - 1) / scale
r = mu * (1 - p) / p
x = np.random.gamma(r, p / (1 - p), len(u))
return poisson.ppf(u, mu=x)
# Below are some parameters that govern the data used in the simulation.
# Sample size
n = 1000
# Number of covariates (including intercept) in the alternative hypothesis
# model
p = 5
# Cluster size
m = 10
# Intraclass correlation (controls strength of clustering)
r = 0.5
# Group indicators
grp = np.kron(np.arange(n / m), np.ones(m))
# The simulation uses a fixed design matrix.
# Build a design matrix for the alternative (more complex) model
x = np.random.normal(size=(n, p))
x[:, 0] = 1
# The null design matrix is nested in the alternative design matrix. It
# has rank two less than the alternative design matrix.
x0 = x[:, 0:3]
# The GEE score test is robust to dependence and overdispersion. Here we
# set the overdispersion parameter. The variance of the negative binomial
# distribution for each observation is equal to `scale` times its mean
# value.
# Scale parameter for negative binomial distribution
scale = 10
# In the next cell, we set up the mean structures for the null and
# alternative models
# The coefficients used to define the linear predictors
coeff = [[4, 0.4, -0.2], [4, 0.4, -0.2, 0, -0.04]]
# The linear predictors
lp = [np.dot(x0, coeff[0]), np.dot(x, coeff[1])]
# The mean values
mu = [np.exp(lp[0]), np.exp(lp[1])]
# Below is a function that carries out the simulation.
# hyp = 0 is the null hypothesis, hyp = 1 is the alternative hypothesis.
# cov_struct is a statsmodels covariance structure
def dosim(hyp, cov_struct=None, mcrep=500):
# Storage for the simulation results
scales = [[], []]
# P-values from the score test
pv = []
# Monte Carlo loop
for k in range(mcrep):
# Generate random "probability points" u that are uniformly
# distributed, and correlated within clusters
z = np.random.normal(size=n)
u = np.random.normal(size=n // m)
u = np.kron(u, np.ones(m))
z = r * z + np.sqrt(1 - r**2) * u
u = norm.cdf(z)
# Generate the observed responses
y = negbinom(u, mu=mu[hyp], scale=scale)
# Fit the null model
m0 = sm.GEE(y,
x0,
groups=grp,
cov_struct=cov_struct,
family=sm.families.Poisson())
r0 = m0.fit(scale='X2')
scales[0].append(r0.scale)
# Fit the alternative model
m1 = sm.GEE(y,
x,
groups=grp,
cov_struct=cov_struct,
family=sm.families.Poisson())
r1 = m1.fit(scale='X2')
scales[1].append(r1.scale)
# Carry out the score test
st = m1.compare_score_test(r0)
pv.append(st["p-value"])
pv = np.asarray(pv)
rslt = [np.mean(pv), np.mean(pv < 0.1)]
return rslt, scales
# Run the simulation using the independence working covariance structure.
# We expect the mean to be around 0 under the null hypothesis, and much
# lower under the alternative hypothesis. Similarly, we expect that under
# the null hypothesis, around 10% of the p-values are less than 0.1, and a
# much greater fraction of the p-values are less than 0.1 under the
# alternative hypothesis.
rslt, scales = [], []
for hyp in 0, 1:
s, t = dosim(hyp, sm.cov_struct.Independence())
rslt.append(s)
scales.append(t)
rslt = pd.DataFrame(rslt, index=["H0", "H1"], columns=["Mean", "Prop(p<0.1)"])
print(rslt)
# Next we check to make sure that the scale parameter estimates are
# reasonable. We are assessing the robustness of the GEE score test to
# dependence and overdispersion, so here we are confirming that the
# overdispersion is present as expected.
_ = plt.boxplot([scales[0][0], scales[0][1], scales[1][0], scales[1][1]])
plt.ylabel("Estimated scale")
# Next we conduct the same analysis using an exchangeable working
# correlation model. Note that this will be slower than the example above
# using independent working correlation, so we use fewer Monte Carlo
# repetitions.
rslt, scales = [], []
for hyp in 0, 1:
s, t = dosim(hyp, sm.cov_struct.Exchangeable(), mcrep=100)
rslt.append(s)
scales.append(t)
rslt = pd.DataFrame(rslt, index=["H0", "H1"], columns=["Mean", "Prop(p<0.1)"])
print(rslt)
| [
"pandas.DataFrame",
"matplotlib.pyplot.boxplot",
"scipy.stats.distributions.norm.cdf",
"numpy.asarray",
"numpy.ones",
"statsmodels.api.cov_struct.Independence",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"numpy.random.normal",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"scipy.stats.distributio... | [((2358, 2387), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, p)'}), '(size=(n, p))\n', (2374, 2387), True, 'import numpy as np\n'), ((5231, 5302), 'pandas.DataFrame', 'pd.DataFrame', (['rslt'], {'index': "['H0', 'H1']", 'columns': "['Mean', 'Prop(p<0.1)']"}), "(rslt, index=['H0', 'H1'], columns=['Mean', 'Prop(p<0.1)'])\n", (5243, 5302), True, 'import pandas as pd\n'), ((5570, 5639), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['[scales[0][0], scales[0][1], scales[1][0], scales[1][1]]'], {}), '([scales[0][0], scales[0][1], scales[1][0], scales[1][1]])\n', (5581, 5639), True, 'import matplotlib.pyplot as plt\n'), ((5640, 5669), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Estimated scale"""'], {}), "('Estimated scale')\n", (5650, 5669), True, 'import matplotlib.pyplot as plt\n'), ((6048, 6119), 'pandas.DataFrame', 'pd.DataFrame', (['rslt'], {'index': "['H0', 'H1']", 'columns': "['Mean', 'Prop(p<0.1)']"}), "(rslt, index=['H0', 'H1'], columns=['Mean', 'Prop(p<0.1)'])\n", (6060, 6119), True, 'import pandas as pd\n'), ((1877, 1897), 'scipy.stats.distributions.poisson.ppf', 'poisson.ppf', (['u'], {'mu': 'x'}), '(u, mu=x)\n', (1888, 1897), False, 'from scipy.stats.distributions import norm, poisson\n'), ((2212, 2228), 'numpy.arange', 'np.arange', (['(n / m)'], {}), '(n / m)\n', (2221, 2228), True, 'import numpy as np\n'), ((2230, 2240), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (2237, 2240), True, 'import numpy as np\n'), ((3069, 3089), 'numpy.dot', 'np.dot', (['x0', 'coeff[0]'], {}), '(x0, coeff[0])\n', (3075, 3089), True, 'import numpy as np\n'), ((3091, 3110), 'numpy.dot', 'np.dot', (['x', 'coeff[1]'], {}), '(x, coeff[1])\n', (3097, 3110), True, 'import numpy as np\n'), ((3137, 3150), 'numpy.exp', 'np.exp', (['lp[0]'], {}), '(lp[0])\n', (3143, 3150), True, 'import numpy as np\n'), ((3152, 3165), 'numpy.exp', 'np.exp', (['lp[1]'], {}), '(lp[1])\n', (3158, 3165), True, 'import numpy as np\n'), ((4614, 4628), 'numpy.asarray', 'np.asarray', (['pv'], {}), '(pv)\n', (4624, 4628), True, 'import numpy as np\n'), ((3692, 3716), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n'}), '(size=n)\n', (3708, 3716), True, 'import numpy as np\n'), ((3729, 3758), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n // m)'}), '(size=n // m)\n', (3745, 3758), True, 'import numpy as np\n'), ((3848, 3859), 'scipy.stats.distributions.norm.cdf', 'norm.cdf', (['z'], {}), '(z)\n', (3856, 3859), False, 'from scipy.stats.distributions import norm, poisson\n'), ((4641, 4652), 'numpy.mean', 'np.mean', (['pv'], {}), '(pv)\n', (4648, 4652), True, 'import numpy as np\n'), ((4654, 4671), 'numpy.mean', 'np.mean', (['(pv < 0.1)'], {}), '(pv < 0.1)\n', (4661, 4671), True, 'import numpy as np\n'), ((5153, 5181), 'statsmodels.api.cov_struct.Independence', 'sm.cov_struct.Independence', ([], {}), '()\n', (5179, 5181), True, 'import statsmodels.api as sm\n'), ((5959, 5987), 'statsmodels.api.cov_struct.Exchangeable', 'sm.cov_struct.Exchangeable', ([], {}), '()\n', (5985, 5987), True, 'import statsmodels.api as sm\n'), ((3782, 3792), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (3789, 3792), True, 'import numpy as np\n'), ((3814, 3833), 'numpy.sqrt', 'np.sqrt', (['(1 - r ** 2)'], {}), '(1 - r ** 2)\n', (3821, 3833), True, 'import numpy as np\n'), ((4131, 4152), 'statsmodels.api.families.Poisson', 'sm.families.Poisson', ([], {}), '()\n', (4150, 4152), True, 'import statsmodels.api as sm\n'), ((4406, 4427), 'statsmodels.api.families.Poisson', 'sm.families.Poisson', ([], {}), '()\n', (4425, 4427), True, 'import statsmodels.api as sm\n')] |
"""
## Conditional Deep Feature Consistent VAE model
--------------------------------------------------
## Author: <NAME>.
## Email: <EMAIL>
## Version: 1.0.0
--------------------------------------------------
## License: MIT
## Copyright: Copyright <NAME> & <NAME> 2020, ICSG3D
-------------------------------------------------
"""
import argparse
import os
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from keras import backend as K
from keras.callbacks import Callback, ModelCheckpoint
from keras.datasets import mnist
from keras.layers import (
BatchNormalization,
Concatenate,
Conv2D,
Conv3D,
Conv3DTranspose,
Dense,
Dropout,
Flatten,
Input,
Lambda,
LeakyReLU,
MaxPool3D,
ReLU,
Reshape,
UpSampling3D,
)
from keras.losses import binary_crossentropy, mse
from keras.models import Model, load_model
from keras.optimizers import Adam
from keras.utils import Sequence, plot_model, to_categorical
from sklearn.manifold import TSNE
import tensorflow as tf
from unet.unet import custom_objects
from viz import imscatter, viz
matplotlib.use("Agg")
def sampling(args):
"""Reparameterization trick by sampling fr an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
class LatticeDFCVAE:
"""
Conditional Variational Auto-encoder with a deep feature consistent model
params:
input_shape: Dimensions of the density matrix inputs, default (32,32,32,4)
kernel_size: kernel dim for Conv3D layers
pool_size: max pool dimensions
filters: Number of filters at each conv layer
latent_dim: size of bottleneck
beta: Weighting of KLD loss term
alpha: Weightin of DFC loss term
perceptual_model: Path to pre-trained Unet model (.h5)
pm_layers: Which layers to use of unet for DFC calcs
pm_layer_Weights: Weights of each DFC layer
cond_shape: Dimension of condition vector
custom_objects: Losses and metrics for the unet
"""
def __init__(
self,
input_shape=(32, 32, 32, 4),
kernel_size=(3, 3, 3),
pool_size=(2, 2, 2),
filters=[16, 32, 64, 128],
latent_dim=256,
beta=3e-4,
alpha=0.5,
optimizer=Adam(5e-4),
perceptual_model="saved_models/unet.h5",
pm_layers=["re_lu_2", "re_lu_4", "re_lu_6", "re_lu_8"],
pm_layer_weights=[1.0, 1.0, 1.0, 1.0],
cond_shape=10,
custom_objects=custom_objects,
output_dir = 'output'
):
self.input_shape = input_shape
self.kernel_size = kernel_size
self.pool_size = pool_size
self.filters = filters
self.latent_dim = latent_dim
self.channels = self.input_shape[-1]
self.optimizer = optimizer
self.beta = beta
self.alpha = alpha
self.batch_size = None
self.cond_shape = cond_shape
self.losses = []
self.sdir = output_dir
self.pm = load_model(perceptual_model, custom_objects=custom_objects)
self.pm_layers = pm_layers
self.pm_layer_weights = pm_layer_weights
self.metrics = [self.perceptual_loss, self.mse_loss, self.kld_loss]
self.metric_names = ["Loss", "PM", "MSE", "KLD"]
def _set_model(self, weights=None, batch_size=20):
self.encoder = self.build_encoder()
self.decoder = self.build_decoder()
M_input = Input(batch_shape=(self.batch_size,) + self.input_shape)
cond_input = Input(batch_shape=(self.batch_size, self.cond_shape))
z_mean, z_log_var, z = self.encoder([M_input, cond_input])
reconstructed = self.decoder([z, cond_input])
self.model = Model(inputs=[M_input, cond_input], outputs=reconstructed)
self.z = z
self.z_mean = z_mean
self.z_log_var = z_log_var
self.model.compile(
optimizer=self.optimizer,
loss=self._vae_dfc_loss(self.alpha, self.beta),
metrics=self.metrics,
)
# self.decoder.compile(optimizer=self.optimizer, loss='mse')
self.batch_size = batch_size
if weights and os.path.exists(weights):
self.model.load_weights(weights)
self.filepath = weights
elif weights and not os.path.exists(weights):
self.filepath = weights
else:
self.filepath = "saved_models/lattice_dfc_vae_weights.best.hdf5"
# print("Model setup complete...")
return
def build_encoder(self):
"""
Encoder model
"""
M_input = Input(batch_shape=(self.batch_size,) + self.input_shape)
cond_input = Input(batch_shape=(self.batch_size, self.cond_shape))
cond = Reshape((1, 1, 1, self.cond_shape))(cond_input)
cond = Lambda(K.tile, arguments={"n": self.input_shape})(cond)
x = Concatenate()([M_input, cond])
for i in range(len(self.filters)):
f = self.filters[i]
x = Conv3D(filters=f, kernel_size=self.kernel_size, padding="same")(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = MaxPool3D(pool_size=self.pool_size)(x)
x = Conv3D(filters=4, kernel_size=self.kernel_size, padding="same")(x)
x = LeakyReLU()(x)
# generate latent vector Q(z|X)
x = Flatten()(x)
x = Dense(self.latent_dim, activation="relu")(x)
z_mean = Dense(self.latent_dim, name="z_mean")(x)
z_log_var = Dense(self.latent_dim, name="z_log_var")(x)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(self.latent_dim,), name="z")(
[z_mean, z_log_var]
)
# instantiate encoder model
encoder = Model([M_input, cond_input], [z_mean, z_log_var, z], name="encoder")
return encoder
def build_decoder(self):
"""
Decoder model
"""
latent_inputs = Input(
batch_shape=(self.batch_size, self.latent_dim), name="decoder_input"
) # (None, 256)
cond_inputs = Input(batch_shape=(self.batch_size, self.cond_shape))
# concatenate the condition
z_cond = Concatenate()([latent_inputs, cond_inputs])
x = Dense(self.latent_dim)(z_cond)
x = Reshape((4, 4, 4, 4))(x)
for i in range(len(self.filters)):
f = self.filters[::-1][i]
x = Conv3D(filters=f, kernel_size=self.kernel_size, padding="same")(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
if i < len(self.filters) - 1:
x = UpSampling3D(self.pool_size)(x)
outputs = Conv3D(
filters=self.channels,
kernel_size=self.kernel_size,
padding="same",
name="decoder_output",
)(x)
outputs = BatchNormalization()(outputs)
outputs = ReLU()(outputs)
# instantiate decoder model
decoder = Model([latent_inputs, cond_inputs], outputs, name="decoder")
return decoder
def mse_loss(self, inputs, outputs):
return mse(K.flatten(inputs), K.flatten(outputs))
def kld_loss(self, y_true=None, y_pred=None):
kl_loss = 1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
return kl_loss
def _vae_dfc_loss(self, alpha, beta):
""" Combined loss function """
alpha = K.variable(alpha)
beta = K.variable(beta)
def loss(x, t_decoded):
"""Total loss for the DFC VAE"""
rs_loss = self.mse_loss(x, t_decoded)
kl_loss = self.kld_loss()
pm_loss = self.perceptual_loss(x, t_decoded)
return K.mean(rs_loss + (alpha * pm_loss) + (beta * kl_loss))
return loss
def perceptual_loss(self, y_true, y_pred):
""" Perceptual model loss """
outputs = [self.pm.get_layer(l).output for l in self.pm_layers]
model = Model(self.pm.input, outputs)
h1_list = model(y_true)
h2_list = model(y_pred)
rc_loss = 0.0
for h1, h2, weight in zip(h1_list, h2_list, self.pm_layer_weights):
h1 = K.batch_flatten(h1)
h2 = K.batch_flatten(h2)
rc_loss += weight * K.mean(K.square(h1 - h2), axis=-1)
return rc_loss
def train(self, train_gen, val_gen, epochs, weights=None):
best_loss = np.inf
self.train_batch_size = train_gen.batch_size
self.val_batch_size = val_gen.batch_size
self.batch_size = self.train_batch_size
self.num_epochs = epochs
train_steps_per_epoch = int(len(train_gen.list_IDs) / self.train_batch_size)
val_steps_per_epoch = int(len(val_gen.list_IDs) / self.val_batch_size)
print(
"Data size %d, batch_size %d steps per epoch %d"
% (len(train_gen.list_IDs), self.train_batch_size, train_steps_per_epoch)
)
self._set_model(weights)
self.losses = np.empty((self.num_epochs, 2))
for e in range(self.num_epochs):
print("Epoch %s:" % e)
t0 = time.time()
# Training
# Storing losses for computing mean
train_metrics = []
for batch_idx in range(train_steps_per_epoch):
train_batch, train_cond = train_gen[batch_idx]
batch_metrics = self.model.train_on_batch(
[train_batch, train_cond], train_batch
)
train_metrics.append(np.array(list(batch_metrics)))
train_metrics = np.mean(train_metrics, axis=0)
epoch_train_loss = train_metrics[0]
# Validation
val_metrics = []
for batch_idx in range(val_steps_per_epoch):
val_batch, val_cond = val_gen[batch_idx]
batch_metrics = self.model.test_on_batch(
[val_batch, val_cond], val_batch
)
val_metrics.append(np.array(list(batch_metrics)))
val_metrics = np.mean(val_metrics, axis=0)
t1 = time.time()
epoch_str = "Time: %.3f s " % (t1 - t0)
for m in range(len(self.metrics) + 1):
epoch_str += "Train %s: %.3f " % (
self.metric_names[m],
train_metrics[m],
)
for m in range(len(self.metrics) + 1):
epoch_str += "Val %s: %.3f " % (self.metric_names[m], val_metrics[m])
print(epoch_str)
epoch_val_loss = val_metrics[0]
self.losses[
e,
] = [epoch_train_loss, epoch_val_loss]
if e % 10 == 0:
self.plot_losses(epoch=e, name=os.path.join(self.sdir, "vae_losses.png"))
if epoch_val_loss < best_loss:
best_loss = epoch_val_loss
self.plot_reconstructions(
val_gen, epoch=e, name=os.path.join(self.sdir, "vae_reconstructions.png")
)
self.plot_samples(
self.batch_size, epoch=e, name=os.path.join(self.sdir, "vae_samples.png")
)
self.plot_kde(val_gen, epoch=e, name=os.path.join(self.sdir, "vae_kde.png"))
print("Saving Model")
self.model.save_weights(self.filepath)
self.model.load_weights(self.filepath)
self.model.save(os.path.splitext(self.filepath)[0] + ".h5")
print("Model saved")
def save_(self, weights, model="saved_models/vae.h5"):
self.model.load_weights(weights)
self.model.save(model)
return
def sample_vae(self, n_samples, cond=None, var=1.0):
if cond is None:
cond = np.random.randint(low=0, high=self.cond_shape, size=n_samples)
cond_tensor = to_categorical(cond, num_classes=self.cond_shape)
cond_tensor = np.tile(cond_tensor, (n_samples, 1))
z_sample = np.random.normal(0, var, size=(n_samples, self.latent_dim))
output = self.decoder.predict([z_sample, cond_tensor])
return z_sample, output
def plot_samples(self, n_samples=20, epoch=0, name=None):
_, samples = self.sample_vae(n_samples)
fig, axes = plt.subplots(10, 2)
ax = 0
for i in range(0, n_samples, 2): # 0, 2, 4, 6, 8
axes[ax][0].imshow(samples[i, :, :, 16, 0])
axes[ax][1].imshow(samples[i + 1, :, :, 16, 0])
axes[ax][0].set_xticks([])
axes[ax][0].set_yticks([])
axes[ax][1].set_xticks([])
axes[ax][1].set_yticks([])
ax += 1
if ax == 10:
break
if name is None:
name = "output/vae/samples_epoch_%d.png" % epoch
plt.tight_layout()
plt.savefig(name)
plt.close()
return
def plot_reconstructions(self, val_gen, epoch, name=None):
fig, axes = plt.subplots(int(self.batch_size/2), 2)
ax = 0
for M, cond in val_gen:
recon = self.model.predict([M, cond])
axes[ax][0].imshow(M[0, :, :, 16, 0])
axes[ax][1].imshow(recon[0, :, :, 16, 0])
axes[ax][0].set_xticks([])
axes[ax][0].set_yticks([])
axes[ax][1].set_xticks([])
axes[ax][1].set_yticks([])
ax += 1
if ax == int(self.batch_size/2):
break
if name is None:
name = "output/vae/reconstructions_epoch_%d.png" % epoch
plt.tight_layout()
plt.savefig(name)
plt.close()
return
def plot_kde(self, val_gen, epoch, name=None, maxz=1000):
z = []
# Real samples
for M, p in val_gen:
_, _, z_m = self.encoder.predict([M, p])
for iz in range(len(z_m)):
z.append(z_m[iz])
if len(z) >= maxz:
break
z = np.array(z)
x = np.linspace(-3, 3, 50)
fig, ax = plt.subplots(1, 1)
for i in range(self.latent_dim):
density = stats.gaussian_kde(z[:, i])
ax.plot(x, density(x))
plt.xlabel("$x$")
plt.ylabel("Density")
plt.show(block=True)
plt.savefig(name, format="svg")
plt.savefig(name, format="png")
plt.close()
return
def plot_losses(self, epoch, name="output/vae/loss.png"):
fig, ax = plt.subplots(1, 1)
ax.plot(self.losses[: epoch + 1, 0], label="Train Loss")
ax.plot(self.losses[: epoch + 1, 1], label="Val loss")
ax.set_xlabel("Epoch #")
ax.set_ylabel("Loss")
plt.legend(loc="best")
plt.savefig(name)
plt.close()
return
| [
"keras.models.load_model",
"keras.layers.UpSampling3D",
"numpy.empty",
"keras.models.Model",
"numpy.mean",
"keras.backend.shape",
"numpy.tile",
"numpy.random.normal",
"numpy.random.randint",
"keras.layers.Input",
"keras.layers.Reshape",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"ke... | [((1149, 1170), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1163, 1170), False, 'import matplotlib\n'), ((1569, 1604), 'keras.backend.random_normal', 'K.random_normal', ([], {'shape': '(batch, dim)'}), '(shape=(batch, dim))\n', (1584, 1604), True, 'from keras import backend as K\n'), ((1448, 1463), 'keras.backend.shape', 'K.shape', (['z_mean'], {}), '(z_mean)\n', (1455, 1463), True, 'from keras import backend as K\n'), ((1477, 1496), 'keras.backend.int_shape', 'K.int_shape', (['z_mean'], {}), '(z_mean)\n', (1488, 1496), True, 'from keras import backend as K\n'), ((2615, 2627), 'keras.optimizers.Adam', 'Adam', (['(0.0005)'], {}), '(0.0005)\n', (2619, 2627), False, 'from keras.optimizers import Adam\n'), ((3342, 3401), 'keras.models.load_model', 'load_model', (['perceptual_model'], {'custom_objects': 'custom_objects'}), '(perceptual_model, custom_objects=custom_objects)\n', (3352, 3401), False, 'from keras.models import Model, load_model\n'), ((3783, 3839), 'keras.layers.Input', 'Input', ([], {'batch_shape': '((self.batch_size,) + self.input_shape)'}), '(batch_shape=(self.batch_size,) + self.input_shape)\n', (3788, 3839), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((3861, 3914), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(self.batch_size, self.cond_shape)'}), '(batch_shape=(self.batch_size, self.cond_shape))\n', (3866, 3914), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((4057, 4115), 'keras.models.Model', 'Model', ([], {'inputs': '[M_input, cond_input]', 'outputs': 'reconstructed'}), '(inputs=[M_input, cond_input], outputs=reconstructed)\n', (4062, 4115), False, 'from keras.models import Model, load_model\n'), ((4942, 4998), 'keras.layers.Input', 'Input', ([], {'batch_shape': '((self.batch_size,) + self.input_shape)'}), '(batch_shape=(self.batch_size,) + self.input_shape)\n', (4947, 4998), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((5020, 5073), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(self.batch_size, self.cond_shape)'}), '(batch_shape=(self.batch_size, self.cond_shape))\n', (5025, 5073), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((6211, 6279), 'keras.models.Model', 'Model', (['[M_input, cond_input]', '[z_mean, z_log_var, z]'], {'name': '"""encoder"""'}), "([M_input, cond_input], [z_mean, z_log_var, z], name='encoder')\n", (6216, 6279), False, 'from keras.models import Model, load_model\n'), ((6404, 6479), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(self.batch_size, self.latent_dim)', 'name': '"""decoder_input"""'}), "(batch_shape=(self.batch_size, self.latent_dim), name='decoder_input')\n", (6409, 6479), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((6539, 6592), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(self.batch_size, self.cond_shape)'}), '(batch_shape=(self.batch_size, self.cond_shape))\n', (6544, 6592), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((7417, 7477), 'keras.models.Model', 'Model', (['[latent_inputs, cond_inputs]', 'outputs'], {'name': '"""decoder"""'}), "([latent_inputs, cond_inputs], outputs, name='decoder')\n", (7422, 7477), False, 'from keras.models import Model, load_model\n'), ((7755, 7778), 'keras.backend.sum', 'K.sum', (['kl_loss'], {'axis': '(-1)'}), '(kl_loss, axis=-1)\n', (7760, 7778), True, 'from keras import backend as K\n'), ((7925, 7942), 'keras.backend.variable', 'K.variable', (['alpha'], {}), '(alpha)\n', (7935, 7942), True, 'from keras import backend as K\n'), ((7958, 7974), 'keras.backend.variable', 'K.variable', (['beta'], {}), '(beta)\n', (7968, 7974), True, 'from keras import backend as K\n'), ((8469, 8498), 'keras.models.Model', 'Model', (['self.pm.input', 'outputs'], {}), '(self.pm.input, outputs)\n', (8474, 8498), False, 'from keras.models import Model, load_model\n'), ((9499, 9529), 'numpy.empty', 'np.empty', (['(self.num_epochs, 2)'], {}), '((self.num_epochs, 2))\n', (9507, 9529), True, 'import numpy as np\n'), ((12355, 12404), 'keras.utils.to_categorical', 'to_categorical', (['cond'], {'num_classes': 'self.cond_shape'}), '(cond, num_classes=self.cond_shape)\n', (12369, 12404), False, 'from keras.utils import Sequence, plot_model, to_categorical\n'), ((12427, 12463), 'numpy.tile', 'np.tile', (['cond_tensor', '(n_samples, 1)'], {}), '(cond_tensor, (n_samples, 1))\n', (12434, 12463), True, 'import numpy as np\n'), ((12483, 12542), 'numpy.random.normal', 'np.random.normal', (['(0)', 'var'], {'size': '(n_samples, self.latent_dim)'}), '(0, var, size=(n_samples, self.latent_dim))\n', (12499, 12542), True, 'import numpy as np\n'), ((12769, 12788), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(10)', '(2)'], {}), '(10, 2)\n', (12781, 12788), True, 'import matplotlib.pyplot as plt\n'), ((13295, 13313), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13311, 13313), True, 'import matplotlib.pyplot as plt\n'), ((13322, 13339), 'matplotlib.pyplot.savefig', 'plt.savefig', (['name'], {}), '(name)\n', (13333, 13339), True, 'import matplotlib.pyplot as plt\n'), ((13348, 13359), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13357, 13359), True, 'import matplotlib.pyplot as plt\n'), ((14046, 14064), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14062, 14064), True, 'import matplotlib.pyplot as plt\n'), ((14073, 14090), 'matplotlib.pyplot.savefig', 'plt.savefig', (['name'], {}), '(name)\n', (14084, 14090), True, 'import matplotlib.pyplot as plt\n'), ((14099, 14110), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14108, 14110), True, 'import matplotlib.pyplot as plt\n'), ((14447, 14458), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (14455, 14458), True, 'import numpy as np\n'), ((14471, 14493), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(50)'], {}), '(-3, 3, 50)\n', (14482, 14493), True, 'import numpy as np\n'), ((14513, 14531), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (14525, 14531), True, 'import matplotlib.pyplot as plt\n'), ((14666, 14683), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (14676, 14683), True, 'import matplotlib.pyplot as plt\n'), ((14692, 14713), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (14702, 14713), True, 'import matplotlib.pyplot as plt\n'), ((14722, 14742), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (14730, 14742), True, 'import matplotlib.pyplot as plt\n'), ((14751, 14782), 'matplotlib.pyplot.savefig', 'plt.savefig', (['name'], {'format': '"""svg"""'}), "(name, format='svg')\n", (14762, 14782), True, 'import matplotlib.pyplot as plt\n'), ((14791, 14822), 'matplotlib.pyplot.savefig', 'plt.savefig', (['name'], {'format': '"""png"""'}), "(name, format='png')\n", (14802, 14822), True, 'import matplotlib.pyplot as plt\n'), ((14831, 14842), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14840, 14842), True, 'import matplotlib.pyplot as plt\n'), ((14940, 14958), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (14952, 14958), True, 'import matplotlib.pyplot as plt\n'), ((15158, 15180), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (15168, 15180), True, 'import matplotlib.pyplot as plt\n'), ((15189, 15206), 'matplotlib.pyplot.savefig', 'plt.savefig', (['name'], {}), '(name)\n', (15200, 15206), True, 'import matplotlib.pyplot as plt\n'), ((15215, 15226), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15224, 15226), True, 'import matplotlib.pyplot as plt\n'), ((1625, 1647), 'keras.backend.exp', 'K.exp', (['(0.5 * z_log_var)'], {}), '(0.5 * z_log_var)\n', (1630, 1647), True, 'from keras import backend as K\n'), ((4501, 4524), 'os.path.exists', 'os.path.exists', (['weights'], {}), '(weights)\n', (4515, 4524), False, 'import os\n'), ((5089, 5124), 'keras.layers.Reshape', 'Reshape', (['(1, 1, 1, self.cond_shape)'], {}), '((1, 1, 1, self.cond_shape))\n', (5096, 5124), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((5152, 5201), 'keras.layers.Lambda', 'Lambda', (['K.tile'], {'arguments': "{'n': self.input_shape}"}), "(K.tile, arguments={'n': self.input_shape})\n", (5158, 5201), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((5220, 5233), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (5231, 5233), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((5549, 5612), 'keras.layers.Conv3D', 'Conv3D', ([], {'filters': '(4)', 'kernel_size': 'self.kernel_size', 'padding': '"""same"""'}), "(filters=4, kernel_size=self.kernel_size, padding='same')\n", (5555, 5612), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((5628, 5639), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (5637, 5639), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((5696, 5705), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5703, 5705), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((5721, 5762), 'keras.layers.Dense', 'Dense', (['self.latent_dim'], {'activation': '"""relu"""'}), "(self.latent_dim, activation='relu')\n", (5726, 5762), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((5783, 5820), 'keras.layers.Dense', 'Dense', (['self.latent_dim'], {'name': '"""z_mean"""'}), "(self.latent_dim, name='z_mean')\n", (5788, 5820), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((5844, 5884), 'keras.layers.Dense', 'Dense', (['self.latent_dim'], {'name': '"""z_log_var"""'}), "(self.latent_dim, name='z_log_var')\n", (5849, 5884), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((6053, 6112), 'keras.layers.Lambda', 'Lambda', (['sampling'], {'output_shape': '(self.latent_dim,)', 'name': '"""z"""'}), "(sampling, output_shape=(self.latent_dim,), name='z')\n", (6059, 6112), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((6646, 6659), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (6657, 6659), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((6702, 6724), 'keras.layers.Dense', 'Dense', (['self.latent_dim'], {}), '(self.latent_dim)\n', (6707, 6724), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((6745, 6766), 'keras.layers.Reshape', 'Reshape', (['(4, 4, 4, 4)'], {}), '((4, 4, 4, 4))\n', (6752, 6766), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((7119, 7221), 'keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'self.channels', 'kernel_size': 'self.kernel_size', 'padding': '"""same"""', 'name': '"""decoder_output"""'}), "(filters=self.channels, kernel_size=self.kernel_size, padding='same',\n name='decoder_output')\n", (7125, 7221), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((7298, 7318), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7316, 7318), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((7346, 7352), 'keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (7350, 7352), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((7562, 7579), 'keras.backend.flatten', 'K.flatten', (['inputs'], {}), '(inputs)\n', (7571, 7579), True, 'from keras import backend as K\n'), ((7581, 7599), 'keras.backend.flatten', 'K.flatten', (['outputs'], {}), '(outputs)\n', (7590, 7599), True, 'from keras import backend as K\n'), ((7715, 7736), 'keras.backend.exp', 'K.exp', (['self.z_log_var'], {}), '(self.z_log_var)\n', (7720, 7736), True, 'from keras import backend as K\n'), ((8218, 8268), 'keras.backend.mean', 'K.mean', (['(rs_loss + alpha * pm_loss + beta * kl_loss)'], {}), '(rs_loss + alpha * pm_loss + beta * kl_loss)\n', (8224, 8268), True, 'from keras import backend as K\n'), ((8679, 8698), 'keras.backend.batch_flatten', 'K.batch_flatten', (['h1'], {}), '(h1)\n', (8694, 8698), True, 'from keras import backend as K\n'), ((8716, 8735), 'keras.backend.batch_flatten', 'K.batch_flatten', (['h2'], {}), '(h2)\n', (8731, 8735), True, 'from keras import backend as K\n'), ((9623, 9634), 'time.time', 'time.time', ([], {}), '()\n', (9632, 9634), False, 'import time\n'), ((10092, 10122), 'numpy.mean', 'np.mean', (['train_metrics'], {'axis': '(0)'}), '(train_metrics, axis=0)\n', (10099, 10122), True, 'import numpy as np\n'), ((10560, 10588), 'numpy.mean', 'np.mean', (['val_metrics'], {'axis': '(0)'}), '(val_metrics, axis=0)\n', (10567, 10588), True, 'import numpy as np\n'), ((10606, 10617), 'time.time', 'time.time', ([], {}), '()\n', (10615, 10617), False, 'import time\n'), ((12269, 12331), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'self.cond_shape', 'size': 'n_samples'}), '(low=0, high=self.cond_shape, size=n_samples)\n', (12286, 12331), True, 'import numpy as np\n'), ((14595, 14622), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['z[:, i]'], {}), '(z[:, i])\n', (14613, 14622), True, 'import scipy.stats as stats\n'), ((5343, 5406), 'keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'f', 'kernel_size': 'self.kernel_size', 'padding': '"""same"""'}), "(filters=f, kernel_size=self.kernel_size, padding='same')\n", (5349, 5406), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((5426, 5446), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5444, 5446), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((5466, 5477), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (5475, 5477), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((5497, 5532), 'keras.layers.MaxPool3D', 'MaxPool3D', ([], {'pool_size': 'self.pool_size'}), '(pool_size=self.pool_size)\n', (5506, 5532), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((6868, 6931), 'keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'f', 'kernel_size': 'self.kernel_size', 'padding': '"""same"""'}), "(filters=f, kernel_size=self.kernel_size, padding='same')\n", (6874, 6931), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((6951, 6971), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6969, 6971), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((6991, 7002), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (7000, 7002), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((7691, 7712), 'keras.backend.square', 'K.square', (['self.z_mean'], {}), '(self.z_mean)\n', (7699, 7712), True, 'from keras import backend as K\n'), ((4636, 4659), 'os.path.exists', 'os.path.exists', (['weights'], {}), '(weights)\n', (4650, 4659), False, 'import os\n'), ((7068, 7096), 'keras.layers.UpSampling3D', 'UpSampling3D', (['self.pool_size'], {}), '(self.pool_size)\n', (7080, 7096), False, 'from keras.layers import BatchNormalization, Concatenate, Conv2D, Conv3D, Conv3DTranspose, Dense, Dropout, Flatten, Input, Lambda, LeakyReLU, MaxPool3D, ReLU, Reshape, UpSampling3D\n'), ((8775, 8792), 'keras.backend.square', 'K.square', (['(h1 - h2)'], {}), '(h1 - h2)\n', (8783, 8792), True, 'from keras import backend as K\n'), ((11947, 11978), 'os.path.splitext', 'os.path.splitext', (['self.filepath'], {}), '(self.filepath)\n', (11963, 11978), False, 'import os\n'), ((11259, 11300), 'os.path.join', 'os.path.join', (['self.sdir', '"""vae_losses.png"""'], {}), "(self.sdir, 'vae_losses.png')\n", (11271, 11300), False, 'import os\n'), ((11474, 11524), 'os.path.join', 'os.path.join', (['self.sdir', '"""vae_reconstructions.png"""'], {}), "(self.sdir, 'vae_reconstructions.png')\n", (11486, 11524), False, 'import os\n'), ((11629, 11671), 'os.path.join', 'os.path.join', (['self.sdir', '"""vae_samples.png"""'], {}), "(self.sdir, 'vae_samples.png')\n", (11641, 11671), False, 'import os\n'), ((11743, 11781), 'os.path.join', 'os.path.join', (['self.sdir', '"""vae_kde.png"""'], {}), "(self.sdir, 'vae_kde.png')\n", (11755, 11781), False, 'import os\n')] |
"""
Quantify regulation cost.
First run simulations to obtain historical records.
Then analyze the historical records to get cost coefficients by regression.
"""
import logging
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from mpc_coordinator import predict_agc, CementPlant, EnergyStorage, BuildOptModel
log = logging.getLogger('cement')
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter('%(module)s %(levelname)s %(lineno)d: - %(message)s'))
log.addHandler(ch)
def test_simulate_mpc_hours(log_file='history_records.txt'):
"""Run MPC simulations over many hours and record the hourly summary.
With various settings of R/B and unique setting of MPC config.
"""
agc_days = np.loadtxt('data/regD-13-01-04-Fri.txt')
# agc_days = np.hstack((agc, np.loadtxt('data/regD-13-01-05-Sat.txt')))
config = {'beta': 10,
'gamma': 10,
'switch_limit': 15,
'mpc_horizon': 15}
base2reg = {4: [3.5, 4, 4.5, 5, 5.5, 6, 6.5],
2: [3.75, 4.25],
6: [3.75, 4.25],
3: [3.5, 4, 4.5]}
agc_days = agc_days[:1800*5]
base2reg = {k: base2reg[k] for k in [4]}
fo = open(log_file, 'w+')
msg = 'alpha 10 beta %d gamma %d e_ratio %.1f switch_limit %d horizon %d' % \
(config['beta'], config['gamma'], -9.9, config['switch_limit'], config['mpc_horizon'])
fo.write(msg + '\n')
for base_mw in base2reg.keys():
for reg_mw in base2reg[base_mw]:
for hour in range(1, len(agc_days)/1800-2, 1):
[preds, agc] = predict_agc(agc_days, mpc_horizon=config['mpc_horizon'], hour=hour)
cement = CementPlant(config['switch_limit'])
storage = EnergyStorage()
opt = BuildOptModel(cement, storage)
opt.set_base_reg(base_mw, reg_mw)
opt.mpc_obj_beta = config['beta']
opt.mpc_obj_gamma = config['gamma']
cement_p_mw, storage_p_mw, storage_e_mwh, cpu_list = opt.simulate_mpc(agc, preds)
if cement_p_mw is None:
continue
reg_vio_mwh, switch_mw, storage_de_mwh, cement_mwh = opt.simulation_summary(agc, cement_p_mw, storage_p_mw, storage_e_mwh, cpu_list)
penalty = reg_vio_mwh*10 + switch_mw*0.1 + 2*abs(storage_de_mwh)
msg = 'R %.1f B %d hour %2d P %7.1f = (%.3f,%.1f,%.1f) Cem %.1f' \
% (reg_mw, base_mw, hour, penalty, reg_vio_mwh, switch_mw, storage_de_mwh, cement_mwh)
fo.write(msg + '\n')
fo.close()
def test_analyze_records(file_name='history_records.txt'):
"""Analyze the hourly summary (violation, switching, etc.) of regulation provision over days."""
costs = ['Regulation Violation MWh', 'Storage Deviation MWh', 'Cement Energy', 'Switch MW']
for cost in costs:
records = dict()
with open(file_name, 'r+') as f:
for line in f.readlines()[1:]:
words = line.split()
reg = float(words[1])
base = float(words[3])
hour = int(words[5])
penalty = float(words[7])
vio_reg_mwh = float(words[9][1:-1].split(',')[0])
switch_mw = float(words[9][1:-1].split(',')[1])
e_deviation_mwh = float(words[9][1:-1].split(',')[2])
cement_energy = float(words[11])
if base not in records:
records[base] = dict()
if reg not in records[base]:
records[base][reg] = dict()
if 'Penalty' not in records[base][reg]:
records[base][reg]['Penalty'] = dict()
records[base][reg]['Regulation Violation MWh'] = dict()
records[base][reg]['Switch MW'] = dict()
records[base][reg]['Storage Deviation MWh'] = dict()
records[base][reg]['Cement Energy'] = dict()
records[base][reg]['Penalty'][hour] = penalty
records[base][reg]['Regulation Violation MWh'][hour] = vio_reg_mwh
records[base][reg]['Switch MW'][hour] = switch_mw
records[base][reg]['Storage Deviation MWh'][hour] = e_deviation_mwh
records[base][reg]['Cement Energy'][hour] = cement_energy
expectation = dict()
matplotlib.rcParams['font.size'] = 16
plt.figure(figsize=(12, 3))
for base in sorted(records.keys()):
expectation[base] = dict()
for reg in sorted(records[base].keys()):
# for reg in [4, 5, 6]:
curve = []
hours = sorted(records[base][reg][cost].keys())
for hour in hours:
curve += [records[base][reg][cost][hour]]
log.info('R %.1f B %.1f %s %f' % (reg, base, cost, np.mean(curve)))
expectation[base][reg] = np.mean(curve)
curve = curve[:48]
hours = range(len(curve))
plt.plot(hours, curve, label='R %.1f B %.1f' % (reg, base), ls='-', marker='s', linewidth=2,
markersize=9)
hours = [hours[i] for i in range(0, len(hours), 3)]
plt.xticks(hours)
# plt.legend(loc='upper center', prop={'size': 12})
plt.legend(loc='upper right', prop={'size': 16})
plt.xlabel('Hour')
plt.ylabel(cost)
plt.grid()
if cost == 'Regulation Violation MWh':
plt.ylim([-0.0005, 0.0055])
elif cost == 'Switch MW':
plt.ylim([-3, 53])
elif cost == 'Cement Energy':
plt.ylim([0, 8])
elif cost == 'Storage Deviation MWh':
plt.ylim([-0.55, 0.55])
plt.savefig('%s.pdf' % cost.replace(' ', ''))
plt.show()
# hourly average cost
colors = ['b', 'r', 'g', 'k', 'm']
plt.figure(figsize=(12, 6))
for base in expectation.keys():
regs = sorted(expectation[base].keys())
vals = [expectation[base][reg] for reg in regs]
plt.plot(regs, vals, ls='None', marker='s', linewidth=2, markersize=9,
color=colors[int(base/2)-1])
p = np.polyfit(regs, vals, 1)
xx = np.linspace(3.0, max(regs)+0.5)
yy = np.polyval(p, xx)
plt.plot(xx, yy, ls='--', linewidth=3, color=colors[int(base/2)-1], label='B %.1f' % base)
log.info('B%d p %.3f %.3f' % (base, p[0], p[1]))
plt.legend(loc='upper center')
plt.xlabel('Regulation MW')
plt.ylabel(cost)
plt.grid()
plt.savefig('Avg%s.pdf' % cost.replace(' ', ''))
plt.show()
if __name__ == "__main__":
log.info(datetime.datetime.today())
test_simulate_mpc_hours()
test_analyze_records()
| [
"numpy.polyfit",
"logging.Formatter",
"matplotlib.pyplot.figure",
"numpy.mean",
"mpc_coordinator.predict_agc",
"numpy.polyval",
"mpc_coordinator.EnergyStorage",
"numpy.loadtxt",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"datetime.datetime.today",
"matplotlib.pyplot.ylim",
"matplo... | [((370, 397), 'logging.getLogger', 'logging.getLogger', (['"""cement"""'], {}), "('cement')\n", (387, 397), False, 'import logging\n'), ((433, 456), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (454, 456), False, 'import logging\n'), ((501, 572), 'logging.Formatter', 'logging.Formatter', (['"""%(module)s %(levelname)s %(lineno)d: - %(message)s"""'], {}), "('%(module)s %(levelname)s %(lineno)d: - %(message)s')\n", (518, 572), False, 'import logging\n'), ((830, 870), 'numpy.loadtxt', 'np.loadtxt', (['"""data/regD-13-01-04-Fri.txt"""'], {}), "('data/regD-13-01-04-Fri.txt')\n", (840, 870), True, 'import numpy as np\n'), ((4647, 4674), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (4657, 4674), True, 'import matplotlib.pyplot as plt\n'), ((5588, 5636), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'prop': "{'size': 16}"}), "(loc='upper right', prop={'size': 16})\n", (5598, 5636), True, 'import matplotlib.pyplot as plt\n'), ((5646, 5664), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Hour"""'], {}), "('Hour')\n", (5656, 5664), True, 'import matplotlib.pyplot as plt\n'), ((5674, 5690), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['cost'], {}), '(cost)\n', (5684, 5690), True, 'import matplotlib.pyplot as plt\n'), ((5700, 5710), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5708, 5710), True, 'import matplotlib.pyplot as plt\n'), ((6084, 6094), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6092, 6094), True, 'import matplotlib.pyplot as plt\n'), ((6181, 6208), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (6191, 6208), True, 'import matplotlib.pyplot as plt\n'), ((6803, 6833), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""'}), "(loc='upper center')\n", (6813, 6833), True, 'import matplotlib.pyplot as plt\n'), ((6843, 6870), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Regulation MW"""'], {}), "('Regulation MW')\n", (6853, 6870), True, 'import matplotlib.pyplot as plt\n'), ((6880, 6896), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['cost'], {}), '(cost)\n', (6890, 6896), True, 'import matplotlib.pyplot as plt\n'), ((6906, 6916), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6914, 6916), True, 'import matplotlib.pyplot as plt\n'), ((6984, 6994), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6992, 6994), True, 'import matplotlib.pyplot as plt\n'), ((7041, 7066), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (7064, 7066), False, 'import datetime\n'), ((5772, 5799), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.0005, 0.0055]'], {}), '([-0.0005, 0.0055])\n', (5780, 5799), True, 'import matplotlib.pyplot as plt\n'), ((6516, 6541), 'numpy.polyfit', 'np.polyfit', (['regs', 'vals', '(1)'], {}), '(regs, vals, 1)\n', (6526, 6541), True, 'import numpy as np\n'), ((6610, 6627), 'numpy.polyval', 'np.polyval', (['p', 'xx'], {}), '(p, xx)\n', (6620, 6627), True, 'import numpy as np\n'), ((1725, 1792), 'mpc_coordinator.predict_agc', 'predict_agc', (['agc_days'], {'mpc_horizon': "config['mpc_horizon']", 'hour': 'hour'}), "(agc_days, mpc_horizon=config['mpc_horizon'], hour=hour)\n", (1736, 1792), False, 'from mpc_coordinator import predict_agc, CementPlant, EnergyStorage, BuildOptModel\n'), ((1819, 1854), 'mpc_coordinator.CementPlant', 'CementPlant', (["config['switch_limit']"], {}), "(config['switch_limit'])\n", (1830, 1854), False, 'from mpc_coordinator import predict_agc, CementPlant, EnergyStorage, BuildOptModel\n'), ((1882, 1897), 'mpc_coordinator.EnergyStorage', 'EnergyStorage', ([], {}), '()\n', (1895, 1897), False, 'from mpc_coordinator import predict_agc, CementPlant, EnergyStorage, BuildOptModel\n'), ((1921, 1951), 'mpc_coordinator.BuildOptModel', 'BuildOptModel', (['cement', 'storage'], {}), '(cement, storage)\n', (1934, 1951), False, 'from mpc_coordinator import predict_agc, CementPlant, EnergyStorage, BuildOptModel\n'), ((5170, 5184), 'numpy.mean', 'np.mean', (['curve'], {}), '(curve)\n', (5177, 5184), True, 'import numpy as np\n'), ((5281, 5392), 'matplotlib.pyplot.plot', 'plt.plot', (['hours', 'curve'], {'label': "('R %.1f B %.1f' % (reg, base))", 'ls': '"""-"""', 'marker': '"""s"""', 'linewidth': '(2)', 'markersize': '(9)'}), "(hours, curve, label='R %.1f B %.1f' % (reg, base), ls='-', marker=\n 's', linewidth=2, markersize=9)\n", (5289, 5392), True, 'import matplotlib.pyplot as plt\n'), ((5500, 5517), 'matplotlib.pyplot.xticks', 'plt.xticks', (['hours'], {}), '(hours)\n', (5510, 5517), True, 'import matplotlib.pyplot as plt\n'), ((5848, 5866), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-3, 53]'], {}), '([-3, 53])\n', (5856, 5866), True, 'import matplotlib.pyplot as plt\n'), ((5919, 5935), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 8]'], {}), '([0, 8])\n', (5927, 5935), True, 'import matplotlib.pyplot as plt\n'), ((5996, 6019), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.55, 0.55]'], {}), '([-0.55, 0.55])\n', (6004, 6019), True, 'import matplotlib.pyplot as plt\n'), ((5111, 5125), 'numpy.mean', 'np.mean', (['curve'], {}), '(curve)\n', (5118, 5125), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 24 16:49:59 2015
@author: yuki
"""
import matplotlib.pyplot as plt
import numpy as np
Deff1=np.load('D.npy')
Deff2=np.load('Deff.npy')
spars=1
if spars==1:
x = range(242,-1,-1)
y = range(243)
x, y = np.meshgrid(x, y)
fig=plt.figure(figsize=(9, 4.5))
plt.rcParams.update({'font.size': 9})
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.subplot(1,2,1)
z=Deff1
z[z==0]=1.1*np.max(z)
z[np.diag_indices(242)]=0.0
plt.pcolormesh(x, y, np.rot90(z),clim=(243,243))
plt.colorbar() #need a colorbar to show the intensity scale
plt.title('First order contacts (inf distance scaled to value %s)'%np.round(np.max(z),1))
plt.xlim(xmax=243) #or xl
plt.ylim(ymax=243) #or yl
plt.subplot(1,2,2)
z=Deff2
z[np.diag_indices(242)]=0.0
plt.pcolormesh(x,y,np.rot90(z), clim=(243,243))
plt.colorbar() #need a colorbar to show the intensity scale
plt.title('Up to third order contacts')
plt.xlim(xmax=243) #or xl
plt.ylim(ymax=243) #or yl
plt.show()
plt.subplots_adjust(hspace=0.6)
plt.suptitle('Effective distances (1-log(P_{mn}))',fontsize=12)
fig.savefig('effDistMatricesDeff.png', dpi=500)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.load",
"numpy.meshgrid",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.suptitle",
"numpy.diag_indices",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"matplotlib.pyplot... | [((142, 158), 'numpy.load', 'np.load', (['"""D.npy"""'], {}), "('D.npy')\n", (149, 158), True, 'import numpy as np\n'), ((165, 184), 'numpy.load', 'np.load', (['"""Deff.npy"""'], {}), "('Deff.npy')\n", (172, 184), True, 'import numpy as np\n'), ((277, 294), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (288, 294), True, 'import numpy as np\n'), ((308, 336), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 4.5)'}), '(figsize=(9, 4.5))\n', (318, 336), True, 'import matplotlib.pyplot as plt\n'), ((341, 378), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 9}"], {}), "({'font.size': 9})\n", (360, 378), True, 'import matplotlib.pyplot as plt\n'), ((383, 410), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (389, 410), True, 'import matplotlib.pyplot as plt\n'), ((415, 445), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (421, 445), True, 'import matplotlib.pyplot as plt\n'), ((450, 470), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (461, 470), True, 'import matplotlib.pyplot as plt\n'), ((601, 615), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (613, 615), True, 'import matplotlib.pyplot as plt\n'), ((759, 777), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'xmax': '(243)'}), '(xmax=243)\n', (767, 777), True, 'import matplotlib.pyplot as plt\n'), ((789, 807), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymax': '(243)'}), '(ymax=243)\n', (797, 807), True, 'import matplotlib.pyplot as plt\n'), ((824, 844), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (835, 844), True, 'import matplotlib.pyplot as plt\n'), ((943, 957), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (955, 957), True, 'import matplotlib.pyplot as plt\n'), ((1007, 1046), 'matplotlib.pyplot.title', 'plt.title', (['"""Up to third order contacts"""'], {}), "('Up to third order contacts')\n", (1016, 1046), True, 'import matplotlib.pyplot as plt\n'), ((1051, 1069), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'xmax': '(243)'}), '(xmax=243)\n', (1059, 1069), True, 'import matplotlib.pyplot as plt\n'), ((1081, 1099), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymax': '(243)'}), '(ymax=243)\n', (1089, 1099), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1121), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1119, 1121), True, 'import matplotlib.pyplot as plt\n'), ((1128, 1159), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.6)'}), '(hspace=0.6)\n', (1147, 1159), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1228), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Effective distances (1-log(P_{mn}))"""'], {'fontsize': '(12)'}), "('Effective distances (1-log(P_{mn}))', fontsize=12)\n", (1176, 1228), True, 'import matplotlib.pyplot as plt\n'), ((502, 511), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (508, 511), True, 'import numpy as np\n'), ((518, 538), 'numpy.diag_indices', 'np.diag_indices', (['(242)'], {}), '(242)\n', (533, 538), True, 'import numpy as np\n'), ((569, 580), 'numpy.rot90', 'np.rot90', (['z'], {}), '(z)\n', (577, 580), True, 'import numpy as np\n'), ((861, 881), 'numpy.diag_indices', 'np.diag_indices', (['(242)'], {}), '(242)\n', (876, 881), True, 'import numpy as np\n'), ((910, 921), 'numpy.rot90', 'np.rot90', (['z'], {}), '(z)\n', (918, 921), True, 'import numpy as np\n'), ((741, 750), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (747, 750), True, 'import numpy as np\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_data.ipynb (unless otherwise specified).
__all__ = ['DATA_PATH', 'acquire_data', 'rmtree', 'load_custom_data', 'load_data', 'pad_trajectories',
'normalize_trajectory', 'get_custom_dls', 'get_discriminative_dls', 'get_turning_point_dls', 'get_1vall_dls',
'get_validation_dl', 'get_dls_from_ds', 'create_custom_dataset', 'cut_trajectory', 'validate_model',
'validate_task']
# Cell
from pathlib import Path
import urllib.request as u_request
from zipfile import ZipFile
import csv
import pandas as pd
from andi import andi_datasets, normalize
import numpy as np
from fastai.text.all import *
# Cell
DATA_PATH = Path("../data")
# Cell
def acquire_data(train=True, val=True):
"""Obtains the train and validation datasets of the competition.
The train url maight fail. Get it from https://drive.google.com/drive/folders/1RXziMCO4Y0Fmpm5bmjcpy-Genhzv4QJ4"""
DATA_PATH.mkdir(exist_ok=True)
train_url = ("https://doc-4k-88-drive-data-export.googleusercontent.com/download/qh9kfuk2n3khcj0qvrn9t3a4j19nve1a/" +
"rqpd3tajosn0gta5f9mmbbb1e4u8csnn/1599642000000/17390da5-4567-4189-8a62-1749e1b19b06/108540842544374891611/" +
"ADt3v-N9HwRAxXINIFMKGcsrjzMlrvhOOYitRyphFom1Ma-CUUekLTkDp75fOegXlyeVVrTPjlnqDaK0g6iI7eDL9YJw91-" +
"jiityR3iTfrysZP6hpGA62c4lkZbjGp_NJL-XSDUlPcwiVi5Hd5rFtH1YYP0tiiFCoJZsTT4akE8fjdrkZU7vaqFznxuyQDA8YGaiuYlKu" +
"-F1HiAc9kG_k9EMgkMncNflNJtlugxH5pFcNDdrYiOzIINRIRivt5ScquQ_s4KyuV-zYOQ_g2_VYri8YAg0IqbBrcO-exlp5j-" +
"t02GDh5JZKU3Hky5b70Z8brCL5lvK0SFAFIKOer45ZrFaACA3HGRNJg==?authuser=0&nonce=k5g7m53pp3cqq&user=" +
"108540842544374891611&hash=m7kmrh87gmekjhrdcpbhuf1kj13ui0l2")
val_url = ("https://competitions.codalab.org/my/datasets/download/7ea12913-dfcf-4a50-9f5d-8bf9666e9bb4")
if train:
data = _download_bytes(train_url)
_write_bytes(data, DATA_PATH)
train_path = DATA_PATH/"Development dataset for Training"
train_path.rename(train_path.parent/"train")
if val:
data = _download_bytes(val_url)
_write_bytes(data, DATA_PATH)
val_path = DATA_PATH/"validation_for_scoring"
val_path.rename(val_path.parent/"val")
rmtree(DATA_PATH/"__MACOSX")
def _download_bytes(url):
"Downloads data from `url` as bytes"
u = u_request.urlopen(url)
data = u.read()
u.close()
return data
def _write_bytes(data, path):
"Saves `data` (bytes) into path."
zip_path = _zip_bytes(data)
_unzip_file(zip_path, new_path=path)
def _zip_bytes(data, path=None):
"Saves bytes data as .zip in `path`."
if path is None: path = Path("../temp")
zip_path = path.with_suffix(".zip")
with open(zip_path, "wb") as f:
f.write(data)
return zip_path
def _unzip_file(file_path, new_path=None, purge=True):
"Unzips file in `file_path` to `new_path`."
if new_path is None: new_path = file_path.with_suffix("")
zip_path = file_path.with_suffix(".zip")
with ZipFile(zip_path, 'r') as f:
f.extractall(new_path)
if purge: zip_path.unlink()
def rmtree(root):
for p in root.iterdir():
if p.is_dir(): rmtree(p)
else: p.unlink()
root.rmdir()
# Cell
def load_custom_data(dim=1, models=None, exps=None, path=None):
"Loads data from custom dataset."
path = DATA_PATH/f"custom{dim}.pkl" if path is None else path
df = pd.read_pickle(path)
mod_mask = sum([df['model'] == m for m in models]) if models is not None else np.ones(df.shape[0], dtype=bool)
exp_mask = sum([df['exp'] == e for e in exps]) if exps is not None else np.ones(df.shape[0], dtype=bool)
mask = mod_mask & exp_mask
return df[mask].reset_index(drop=True)
def load_data(task, dim=1, ds='train'):
"Loads 'train' or 'val' data of corresponding dimension."
path = DATA_PATH/ds
try:
df = pd.read_pickle(path/f"task{task}.pkl")
except:
_txt2df(task, ds=[ds])
df = pd.read_pickle(path/f"task{task}.pkl")
return df[df['dim']==dim].reset_index(drop=True)
def _txt2df(task, ds=['train', 'val']):
"Extracts dataset and saves it in df form"
if 'train' in ds:
df = pd.DataFrame(columns=['dim', 'y', 'x', 'len'], dtype=object)
train_path = DATA_PATH/"train"
if not (train_path/f"task{task}.txt").exists(): acquire_data(train=True, val=False)
with open(train_path/f"task{task}.txt", "r") as D, open(train_path/f"ref{task}.txt") as Y:
trajs = csv.reader(D, delimiter=";", lineterminator="\n", quoting=csv.QUOTE_NONNUMERIC)
labels = csv.reader(Y, delimiter=";", lineterminator="\n", quoting=csv.QUOTE_NONNUMERIC)
for t, y in zip(trajs, labels):
dim, x = int(t[0]), t[1:]
x = tensor(x).view(dim, -1).T
label = tensor(y[1:]) if task is 3 else y[1]
df = df.append({'dim': dim, 'y': label, 'x': x, 'len': len(x)}, ignore_index=True)
df.to_pickle(train_path/f"task{task}.pkl")
if 'val' in ds:
df = pd.DataFrame(columns=['dim', 'x', 'len'], dtype=object)
val_path = DATA_PATH/"val"
task_path = val_path/f"task{task}.txt"
if not task_path.exists(): acquire_data(train=False, val=True)
with open(task_path, "r") as D:
trajs = csv.reader(D, delimiter=";", lineterminator="\n", quoting=csv.QUOTE_NONNUMERIC)
for t in trajs:
dim, x = int(t[0]), t[1:]
x = tensor(x).view(dim, -1).T
df = df.append({'dim': dim, 'x': x, 'len': len(x)}, ignore_index=True)
df['y'] = ""
df.to_pickle(val_path/f"task{task}.pkl")
# Cell
def pad_trajectories(samples, pad_value=0, pad_first=True, backwards=False):
"Pads trajectories assuming shape (len, dim)"
max_len = max([s.shape[0] for s, _ in samples])
if backwards: pad_first = not pad_first
def _pad_sample(s):
s = normalize_trajectory(s)
diff = max_len - s.shape[0]
pad = s.new_zeros((diff, s.shape[1])) + pad_value
pad_s = torch.cat([pad, s] if pad_first else [s, pad])
if backwards: pad_s = pad_s.flip(0)
return pad_s
return L((_pad_sample(s), y) for s, y in samples)
def normalize_trajectory(traj):
"Normalizes the trajectory displacements."
n_traj = torch.zeros_like(traj)
disp = traj[1:]-traj[:-1]
n_traj[1:] = disp.div_(disp.std(0)).cumsum(0)
return n_traj
# Cell
@delegates(pad_trajectories)
def get_custom_dls(target='model', dim=1, models=None, exps=None, bs=128, split_pct=0.2, path=None, balance=False, **kwargs):
"Obtain `DataLoaders` from custom dataset filtered by `models` and `exps` to predict `target`."
data = load_custom_data(dim=dim, models=models, exps=exps, path=path)
if balance: data = _subsample_df(data)
ds = L(zip(data['x'], data[target])) if target is 'exp' else L(zip(data['x'], data[target].astype(int)))
sorted_dl = partial(SortedDL, before_batch=partial(pad_trajectories, **kwargs), shuffle=True)
return get_dls_from_ds(ds, sorted_dl, split_pct=split_pct, bs=bs)
@delegates(pad_trajectories)
def get_discriminative_dls(task, dim=1, bs=128, split_pct=0.2, ds='train', **kwargs):
"Obtain `DataLoaders` for classification/regression models."
data = load_data(task, dim=dim, ds=ds)
ds = L(zip(data['x'], data['y'])) if task==1 else L(zip(data['x'], data['y'].astype(int)))
sorted_dl = partial(SortedDL, before_batch=partial(pad_trajectories, **kwargs), shuffle=True)
return get_dls_from_ds(ds, sorted_dl, split_pct=split_pct, bs=bs)
@delegates(SortedDL.__init__)
def get_turning_point_dls(task=3, dim=1, bs=128, split_pct=0.2, ds='train', **kwargs):
"Obtain `DataLoaders` to predict change points in trajecotries."
data = load_data(task, dim=dim, ds=ds)
ds = L(zip(data['x'], torch.stack(list(data['y'].values))[:, 0]))
sorted_dl = partial(SortedDL, shuffle=True, **kwargs)
return get_dls_from_ds(ds, sorted_dl, split_pct=split_pct, bs=bs)
@delegates(pad_trajectories)
def get_1vall_dls(target=0, dim=1, models=None, exps=None, bs=128, split_pct=0.2, **kwargs):
data = load_custom_data(dim=dim, models=models, exps=exps)
x, y = data['x'], (data['model'] != target).astype(int)
ds = L(zip(x, y))
sorted_dl = partial(SortedDL, before_batch=partial(pad_trajectories, **kwargs), shuffle=True)
return get_dls_from_ds(ds, sorted_dl, split_pct=split_pct, bs=bs)
@delegates(pad_trajectories)
def get_validation_dl(task, dim=1, bs=64, ds='val', **kwargs):
"Obtain `DataLoaders` for validation."
data = load_data(task, dim=dim, ds=ds)
ds = L(zip(data['x'], data['y']))
return DataLoader(ds, bs=bs, before_batch=partial(pad_trajectories, **kwargs), device=default_device())
def get_dls_from_ds(ds, dl_type, split_pct=0.2, bs=128):
idx = L(int(i) for i in torch.randperm(len(ds)))
cut = int(len(ds)*split_pct)
train_ds, val_ds = ds[idx[cut:]], ds[idx[:cut]]
return DataLoaders.from_dsets(train_ds, val_ds, bs=bs, dl_type=dl_type, device=default_device())
def _subsample_df(df):
"Subsamples df to balance models"
models = df.model.unique()
max_s = min([len(df[df.model==m]) for m in models])
sub_dfs = [df[df.model==m].sample(frac=1)[:max_s] for m in models]
return pd.concat(sub_dfs, ignore_index=True)
# Cell
def create_custom_dataset(N, max_T=1000, min_T=10, dimensions=[1, 2, 3], save=True):
ad = andi_datasets()
exponents = np.arange(0.05, 2.01, 0.05)
n_exp, n_models = len(exponents), len(ad.avail_models_name)
# Trajectories per model and exponent. Arbitrarely chose to fulfill balanced classes
N_per_model = np.ceil(1.6*N/5)
subdif, superdif = n_exp//2, n_exp//2+1
num_per_class = np.zeros((n_models, n_exp))
num_per_class[:2,:subdif] = np.ceil(N_per_model/subdif) # ctrw, attm
num_per_class[2, :] = np.ceil(N_per_model/(n_exp-1)) # fbm
num_per_class[2, exponents == 2] = 0 # fbm can't be ballistic
num_per_class[3, subdif:] = np.ceil((N_per_model/superdif)*0.8) # lw
num_per_class[4, :] = np.ceil(N_per_model/n_exp) # sbm
for dim in dimensions:
dataset = ad.create_dataset(T=max_T, N=num_per_class, exponents=exponents,
dimension=dim, models=np.arange(n_models))
# Normalize trajectories
n_traj = dataset.shape[0]
norm_trajs = normalize(dataset[:, 2:].reshape(n_traj*dim, max_T))
dataset[:, 2:] = norm_trajs.reshape(dataset[:, 2:].shape)
# Add localization error, Gaussian noise with sigma = [0.1, 0.5, 1]
loc_error_amplitude = np.random.choice(np.array([0.1, 0.5, 1]), size=n_traj*dim)
loc_error = (np.random.randn(n_traj*dim, int(max_T)).transpose()*loc_error_amplitude).transpose()
dataset = ad.create_noisy_localization_dataset(dataset, dimension=dim, T=max_T, noise_func=loc_error)
# Add random diffusion coefficients
trajs = dataset[:, 2:].reshape(n_traj*dim, max_T)
displacements = trajs[:, 1:] - trajs[:, :-1]
# Get new diffusion coefficients and displacements
diffusion_coefficients = np.random.randn(trajs.shape[0])
new_displacements = (displacements.transpose()*diffusion_coefficients).transpose()
# Generate new trajectories and add to dataset
new_trajs = np.cumsum(new_displacements, axis=1)
new_trajs = np.concatenate((np.zeros((new_trajs.shape[0], 1)), new_trajs), axis=1)
dataset[:, 2:] = new_trajs.reshape(dataset[:, 2:].shape)
df = pd.DataFrame(columns=['dim', 'model', 'exp', 'x', 'len'], dtype=object)
for traj in dataset:
mod, exp, x = int(traj[0]), traj[1], traj[2:]
x = cut_trajectory(x, np.random.randint(min_T, max_T), dim=dim)
x = tensor(x).view(dim, -1).T
df = df.append({'dim': dim, 'model': mod, 'exp': exp, 'x': x, 'len': len(x)}, ignore_index=True)
if save:
DATA_PATH.mkdir(exist_ok=True)
ds_path = DATA_PATH/f"custom{dim}.pkl"
df.to_pickle(ds_path, protocol=pickle.HIGHEST_PROTOCOL)
return df
def cut_trajectory(traj, t_cut, dim=1):
"Takes a trajectory and cuts it to `T_max` length."
cut_traj = traj.reshape(dim, -1)[:, :t_cut]
return cut_traj.reshape(1, -1)
# Cell
def validate_model(model, task, dim=1, bs=256, act=False, **kwargs):
"Validates model on specific task and dimension."
val_dl = get_validation_dl(task, dim=dim, bs=bs, **kwargs)
if act: return torch.cat([to_detach(model(batch)[0].softmax(1)) for batch, _ in val_dl])
else: return torch.cat([to_detach(model(batch)) for batch, _ in val_dl])
@delegates(validate_model)
def validate_task(models, task, dims, **kwargs):
"Validates `models` on task for `dims`."
if not hasattr(models, '__iter__'): models = [models]
if not hasattr(dims, '__iter__'): dims = [dims]
if len(models) != len(dims):
raise InputError(f"There are {len(models)} models and {len(dims)} dimensions")
pred_path = DATA_PATH/"preds"
pred_path.mkdir(exist_ok=True)
task_path = pred_path/f"task{task}.txt"
preds_dim = []
for model, dim in zip(models, dims): preds_dim.append(validate_model(model, task, dim=dim, **kwargs))
with open(task_path, "w") as f:
for dim, preds in zip(dims, preds_dim):
for pred in preds:
f.write(f"{int(dim)}; {';'.join(str(i.item()) for i in pred)}\n") | [
"pandas.DataFrame",
"zipfile.ZipFile",
"numpy.ceil",
"andi.andi_datasets",
"numpy.random.randn",
"csv.reader",
"numpy.zeros",
"urllib.request.urlopen",
"numpy.ones",
"numpy.cumsum",
"pathlib.Path",
"numpy.random.randint",
"numpy.arange",
"numpy.array",
"pandas.read_pickle",
"pandas.con... | [((696, 711), 'pathlib.Path', 'Path', (['"""../data"""'], {}), "('../data')\n", (700, 711), False, 'from pathlib import Path\n'), ((2414, 2436), 'urllib.request.urlopen', 'u_request.urlopen', (['url'], {}), '(url)\n', (2431, 2436), True, 'import urllib.request as u_request\n'), ((3487, 3507), 'pandas.read_pickle', 'pd.read_pickle', (['path'], {}), '(path)\n', (3501, 3507), True, 'import pandas as pd\n'), ((9401, 9438), 'pandas.concat', 'pd.concat', (['sub_dfs'], {'ignore_index': '(True)'}), '(sub_dfs, ignore_index=True)\n', (9410, 9438), True, 'import pandas as pd\n'), ((9541, 9556), 'andi.andi_datasets', 'andi_datasets', ([], {}), '()\n', (9554, 9556), False, 'from andi import andi_datasets, normalize\n'), ((9573, 9600), 'numpy.arange', 'np.arange', (['(0.05)', '(2.01)', '(0.05)'], {}), '(0.05, 2.01, 0.05)\n', (9582, 9600), True, 'import numpy as np\n'), ((9772, 9792), 'numpy.ceil', 'np.ceil', (['(1.6 * N / 5)'], {}), '(1.6 * N / 5)\n', (9779, 9792), True, 'import numpy as np\n'), ((9853, 9880), 'numpy.zeros', 'np.zeros', (['(n_models, n_exp)'], {}), '((n_models, n_exp))\n', (9861, 9880), True, 'import numpy as np\n'), ((9913, 9942), 'numpy.ceil', 'np.ceil', (['(N_per_model / subdif)'], {}), '(N_per_model / subdif)\n', (9920, 9942), True, 'import numpy as np\n'), ((9988, 10022), 'numpy.ceil', 'np.ceil', (['(N_per_model / (n_exp - 1))'], {}), '(N_per_model / (n_exp - 1))\n', (9995, 10022), True, 'import numpy as np\n'), ((10161, 10198), 'numpy.ceil', 'np.ceil', (['(N_per_model / superdif * 0.8)'], {}), '(N_per_model / superdif * 0.8)\n', (10168, 10198), True, 'import numpy as np\n'), ((10228, 10256), 'numpy.ceil', 'np.ceil', (['(N_per_model / n_exp)'], {}), '(N_per_model / n_exp)\n', (10235, 10256), True, 'import numpy as np\n'), ((2733, 2748), 'pathlib.Path', 'Path', (['"""../temp"""'], {}), "('../temp')\n", (2737, 2748), False, 'from pathlib import Path\n'), ((3087, 3109), 'zipfile.ZipFile', 'ZipFile', (['zip_path', '"""r"""'], {}), "(zip_path, 'r')\n", (3094, 3109), False, 'from zipfile import ZipFile\n'), ((3590, 3622), 'numpy.ones', 'np.ones', (['df.shape[0]'], {'dtype': 'bool'}), '(df.shape[0], dtype=bool)\n', (3597, 3622), True, 'import numpy as np\n'), ((3699, 3731), 'numpy.ones', 'np.ones', (['df.shape[0]'], {'dtype': 'bool'}), '(df.shape[0], dtype=bool)\n', (3706, 3731), True, 'import numpy as np\n'), ((3955, 3995), 'pandas.read_pickle', 'pd.read_pickle', (["(path / f'task{task}.pkl')"], {}), "(path / f'task{task}.pkl')\n", (3969, 3995), True, 'import pandas as pd\n'), ((4265, 4325), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dim', 'y', 'x', 'len']", 'dtype': 'object'}), "(columns=['dim', 'y', 'x', 'len'], dtype=object)\n", (4277, 4325), True, 'import pandas as pd\n'), ((5135, 5190), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dim', 'x', 'len']", 'dtype': 'object'}), "(columns=['dim', 'x', 'len'], dtype=object)\n", (5147, 5190), True, 'import pandas as pd\n'), ((11304, 11335), 'numpy.random.randn', 'np.random.randn', (['trajs.shape[0]'], {}), '(trajs.shape[0])\n', (11319, 11335), True, 'import numpy as np\n'), ((11502, 11538), 'numpy.cumsum', 'np.cumsum', (['new_displacements'], {'axis': '(1)'}), '(new_displacements, axis=1)\n', (11511, 11538), True, 'import numpy as np\n'), ((11709, 11780), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dim', 'model', 'exp', 'x', 'len']", 'dtype': 'object'}), "(columns=['dim', 'model', 'exp', 'x', 'len'], dtype=object)\n", (11721, 11780), True, 'import pandas as pd\n'), ((4050, 4090), 'pandas.read_pickle', 'pd.read_pickle', (["(path / f'task{task}.pkl')"], {}), "(path / f'task{task}.pkl')\n", (4064, 4090), True, 'import pandas as pd\n'), ((4576, 4655), 'csv.reader', 'csv.reader', (['D'], {'delimiter': '""";"""', 'lineterminator': '"""\n"""', 'quoting': 'csv.QUOTE_NONNUMERIC'}), "(D, delimiter=';', lineterminator='\\n', quoting=csv.QUOTE_NONNUMERIC)\n", (4586, 4655), False, 'import csv\n'), ((4677, 4756), 'csv.reader', 'csv.reader', (['Y'], {'delimiter': '""";"""', 'lineterminator': '"""\n"""', 'quoting': 'csv.QUOTE_NONNUMERIC'}), "(Y, delimiter=';', lineterminator='\\n', quoting=csv.QUOTE_NONNUMERIC)\n", (4687, 4756), False, 'import csv\n'), ((5404, 5483), 'csv.reader', 'csv.reader', (['D'], {'delimiter': '""";"""', 'lineterminator': '"""\n"""', 'quoting': 'csv.QUOTE_NONNUMERIC'}), "(D, delimiter=';', lineterminator='\\n', quoting=csv.QUOTE_NONNUMERIC)\n", (5414, 5483), False, 'import csv\n'), ((10798, 10821), 'numpy.array', 'np.array', (['[0.1, 0.5, 1]'], {}), '([0.1, 0.5, 1])\n', (10806, 10821), True, 'import numpy as np\n'), ((10445, 10464), 'numpy.arange', 'np.arange', (['n_models'], {}), '(n_models)\n', (10454, 10464), True, 'import numpy as np\n'), ((11575, 11608), 'numpy.zeros', 'np.zeros', (['(new_trajs.shape[0], 1)'], {}), '((new_trajs.shape[0], 1))\n', (11583, 11608), True, 'import numpy as np\n'), ((11902, 11933), 'numpy.random.randint', 'np.random.randint', (['min_T', 'max_T'], {}), '(min_T, max_T)\n', (11919, 11933), True, 'import numpy as np\n')] |
import numpy as np
from collections import Counter
import string
import math
def process_data(args):
source_fname = '../data/raw/europarl-v7.es-en.en'
target_fname = '../data/raw/europarl-v7.es-en.es'
source_sentences = read_sentences_from_file(source_fname)
target_sentences = read_sentences_from_file(target_fname)
source_sentences, target_sentences = remove_long_sentences(source_sentences, target_sentences)
source_clean, target_clean = clean_sentence_lists(source_sentences, target_sentences)
source_dictionary, source_vocabulary = build_vocabulary(source_clean)
target_dictionary, target_vocabulary = build_vocabulary(target_clean)
bucket_dict = create_bucket_dict(source_clean, target_clean)
data = add_tokens_to_text(source_clean, target_clean, bucket_dict, source_dictionary, target_dictionary)
data['source_vocabulary'] = source_vocabulary
data['source_dictionary'] = source_dictionary
data['target_vocabulary'] = target_vocabulary
data['target_dictionary'] = target_dictionary
data['bucket_dictionary'] = bucket_dict
data['source_reverse_dictionary'] = {num_id: word for word, num_id in source_dictionary.items()}
data['target_reverse_dictionary'] = {num_id: word for word, num_id in target_dictionary.items()}
return data
def remove_long_sentences(source_sentences, target_sentences):
removal_indices = list()
for index, (source, target) in enumerate(zip(source_sentences, target_sentences)):
max_len = max(len(source.split()), len(target.split()))
if max_len >= 50:
removal_indices.append(index)
for i in sorted(removal_indices, reverse=True):
del source_sentences[i]
del target_sentences[i]
return source_sentences, target_sentences
def read_sentences_from_file(filename):
with open(filename, 'r') as f:
content = f.readlines()
content = [line.strip('\n') for line in content]
return content
def clean_sentence_lists(source_list, target_list, max_len=64):
source_clean, target_clean = list(), list()
punctuation_translator = str.maketrans('', '' ,string.punctuation)
punctuation_translator[191] = None # to remove inverted question mark
for source, target in zip(source_list, target_list):
if len(source.split()) < (max_len-1) and len(target.split()) < (max_len-1):
if source is not '' and target is not '':
source = source.translate(punctuation_translator)
source = source.replace(" s ", "'s ")
target = target.translate(punctuation_translator)
target = target.replace(" s ", "'s ")
source_clean.append(source.lower())
target_clean.append(target.lower())
return source_clean, target_clean
def build_vocabulary(sentence_list, vocabulary_size=50000):
tokens = [('<UNK>', None), ('<PAD>', None), ('<EOS>', None), ('<GO>', None)]
vocabulary_size -= len(tokens)
word_list = [word for line in sentence_list for word in line.split()]
vocabulary = tokens + Counter(word_list).most_common(vocabulary_size)
vocabulary = np.array([word for word, _ in vocabulary])
dictionary = {word: code for code, word in enumerate(vocabulary)}
return dictionary, vocabulary
def create_bucket_dict(eng_sentences, span_sentences):
sample_bucket_sizes = []
bucket_dict = {}
for eng_sentence, span_sentence in zip(eng_sentences, span_sentences):
max_len = max(len(eng_sentence.split()), len(span_sentence.split()))
rounded_max_len = roundup(max_len)
sample_bucket_sizes.append(rounded_max_len)
for i in range(5, max(sample_bucket_sizes) + 1, 5):
bucket_dict[i] = create_buckets(sample_bucket_sizes, i)
return bucket_dict
def roundup(x):
return int(math.ceil((x + 1) / 5.0)) * 5 # x+1 to push *0 into next bucket to account for tokens
def create_buckets(buckets, bucket_len):
return [index for index, value in enumerate(buckets) if value == bucket_len]
def add_tokens_to_text(source_list, target_list, bucket_dict, source_dictionary, target_dictionary):
number_of_samples = len(source_list)
source_final, target_input_final, target_output_final = [None] * number_of_samples, [None] * number_of_samples, [
None] * number_of_samples
inverse_bucket_dict = invert(bucket_dict)
for index, bucket_size in inverse_bucket_dict.items():
source_final[index] = pad_source_sentences(source_list[index], bucket_size)
target_input_final[index] = pad_target_input_sentences(target_list[index], bucket_size)
target_output_final[index] = pad_target_output_sentences(target_list[index], bucket_size)
source_final_numerical = convert_words_to_numerical_id(source_final, source_dictionary)
target_input_final_numerical = convert_words_to_numerical_id(target_input_final, target_dictionary)
target_output_final_numerical = convert_words_to_numerical_id(target_output_final, target_dictionary)
data = {'source_input': source_final_numerical, 'target_input': target_input_final_numerical, 'target_output': target_output_final_numerical}
return data
def pad_source_sentences(sentence, bucket_size):
sentence_length = len(sentence.split())
pad_length = bucket_size - sentence_length
return sentence + ' <PAD>' * pad_length
def pad_target_input_sentences(sentence, bucket_size):
sentence_length = len(sentence.split())
pad_length = bucket_size - sentence_length - 1
return '<GO> ' + sentence + ' <PAD>' * pad_length
def pad_target_output_sentences(sentence, bucket_size):
sentence_length = len(sentence.split())
pad_length = bucket_size - sentence_length - 1
return sentence + ' <EOS> ' + ' <PAD>' * pad_length
def invert(dictionary):
return dict((value, key) for key in dictionary for value in dictionary[key])
def convert_words_to_numerical_id(sentence_list, dictionary):
out = []
for sentence in sentence_list:
out.append([dictionary[word] if word in dictionary else dictionary['<UNK>'] for word in sentence.split()])
return out
| [
"collections.Counter",
"numpy.array",
"math.ceil"
] | [((3147, 3189), 'numpy.array', 'np.array', (['[word for word, _ in vocabulary]'], {}), '([word for word, _ in vocabulary])\n', (3155, 3189), True, 'import numpy as np\n'), ((3825, 3849), 'math.ceil', 'math.ceil', (['((x + 1) / 5.0)'], {}), '((x + 1) / 5.0)\n', (3834, 3849), False, 'import math\n'), ((3082, 3100), 'collections.Counter', 'Counter', (['word_list'], {}), '(word_list)\n', (3089, 3100), False, 'from collections import Counter\n')] |
# -*- coding: utf-8 -*-
##
## @file voc_format_detection_dataset.py
## @brief Pascal VOC Format Detection Dataset Class
## @author Keitetsu
## @date 2020/05/22
## @copyright Copyright (c) 2020 Keitetsu
## @par License
## This software is released under the MIT License.
##
import os
import glob
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
import cv2
import chainer
class VOCFormatDetectionDataset(chainer.dataset.DatasetMixin):
def __init__(
self,
anno_dir,
img_dir,
classes,
classes_conversion_table=None,
id_list_file_path=None,
min_img_size=None,
max_img_size=None,
use_difficult=False,
return_difficult=False,
):
self.anno_dir = anno_dir
self.img_dir = img_dir
self.classes = classes
self.classes_conversion_table = classes_conversion_table
self.min_img_size = min_img_size
self.max_img_size = max_img_size
self.use_difficult = use_difficult
self.return_difficult = return_difficult
self.anno_file_paths, self.img_file_paths = self._make_file_paths_lists(id_list_file_path)
def __len__(self):
return len(self.anno_file_paths)
def get_example(self, i):
# 画像ファイル名とアノテーションデータを取得
bboxes, labels, difficult_flags = self._get_annotations(i)
# 画像を取得
img = self._get_image(i)
if self.return_difficult:
return img, bboxes, labels, difficult_flags
return img, bboxes, labels
def _make_file_paths_lists(self, id_list_file_path):
# アノテーションファイルパスのリストを作成
if id_list_file_path:
print("loading file list...: %s" % (id_list_file_path))
with open(id_list_file_path, mode='rt') as f:
ids = [x.strip() for x in f.readlines()]
all_anno_file_paths = []
for id_ in ids:
all_anno_file_paths.append(os.path.join(self.anno_dir, id_ + ".xml"))
else:
print("getting annotation file paths...")
anno_file_search_path = os.path.join(self.anno_dir, '*')
all_anno_file_paths = sorted(glob.glob(anno_file_search_path))
print("number of annotation files: %d" % len(all_anno_file_paths))
anno_file_paths = []
img_file_paths = []
file_count = [0] * len(self.classes)
non_difficult_file_count = [0] * len(self.classes)
obj_count = [0] * len(self.classes)
non_difficult_obj_count = [0] * len(self.classes)
for anno_file_path in all_anno_file_paths:
# 各アノテーションファイルに対象クラスが存在するかを検証
img_filename, _, labels, difficult_flags = self._preprocess_xml(anno_file_path)[:4]
if (labels.size == 0):
continue
# ファイルリストに画像ファイルとアノテーションファイルを追加
anno_file_paths.append(anno_file_path)
img_file_path = os.path.join(self.img_dir, img_filename)
img_file_paths.append(img_file_path)
# ファイル数とオブジェクト数を集計
for i, name in enumerate(self.classes):
obj_flags = (labels == i)
non_difficult_obj_flags = np.logical_and(obj_flags, np.logical_not(difficult_flags))
num_objs = np.count_nonzero(obj_flags, axis = 0)
num_non_difficult_objs = np.count_nonzero(non_difficult_obj_flags, axis = 0)
obj_count[i] += num_objs
non_difficult_obj_count[i] += num_non_difficult_objs
if (num_objs != 0):
file_count[i] += 1
if (num_non_difficult_objs != 0):
non_difficult_file_count[i] += 1
print("number of selected annotation files: %d" % len(anno_file_paths))
# 集計結果を表示
print("non-d: non-difficult")
count_df = pd.DataFrame(
{
'class': self.classes,
'# files' : file_count,
'# non-d files': non_difficult_file_count,
'# objects': obj_count,
'# non-d objects': non_difficult_obj_count
}
)
pd.set_option('display.max_rows', None)
print(count_df)
return anno_file_paths, img_file_paths
def _get_image(self, i):
# 画像を取得
img = cv2.imread(self.img_file_paths[i], cv2.IMREAD_COLOR)
return img
def _get_annotations(self, i):
return self._preprocess_xml(self.anno_file_paths[i])[1:]
def _preprocess_xml(self, anno_file_path):
tree = ET.parse(anno_file_path)
root = tree.getroot()
# 画像ファイル名を取得
img_filename = root.find('filename').text
# オブジェクトを取得
bboxes = []
labels = []
difficult_flags = []
for obj in root.findall('object'):
# non-difficultなオブジェクトのみを取得する場合
if ((self.use_difficult is False) and (int(obj.find('difficult').text) == 1)):
continue
# クラス名を小文字変換して空白削除
name = obj.find('name').text.lower().strip()
# クラス名を置換
if self.classes_conversion_table:
if (name in self.classes_conversion_table):
name = self.classes_conversion_table[name]
# 対象クラスであるかを検証
if (not (name in self.classes)):
continue
bbox = obj.find('bndbox')
# The top-left pixel in the image has coordinates (1,1)
xmin = int(bbox.find('xmin').text) - 1
ymin = int(bbox.find('ymin').text) - 1
xmax = int(bbox.find('xmax').text) - 1
ymax = int(bbox.find('ymax').text) - 1
# オブジェクトサイズ制限
if self.min_img_size:
dh = ymax - ymin
dw = xmax - xmin
if ((self.min_img_size[0] > dw) and (self.min_img_size[1] > dh)):
continue
if self.max_img_size:
dh = ymax - ymin
dw = xmax - xmin
if ((self.max_img_size[0] < dw) and (self.max_img_size[1] < dh)):
continue
bboxes.append([ymin, xmin, ymax, xmax])
labels.append(self.classes.index(name))
# difficultフラグを取得
difficult_flags.append(int(obj.find('difficult').text))
# bboxが1つ以上ある場合はndarrayに変換
if bboxes:
bboxes = np.stack(bboxes).astype(np.float32)
labels = np.stack(labels).astype(np.int32)
difficult_flags = np.array(difficult_flags, dtype=np.bool)
else:
bboxes = np.empty(0)
labels = np.empty(0)
difficult_flags = np.empty(0)
return img_filename, bboxes, labels, difficult_flags
| [
"pandas.DataFrame",
"xml.etree.ElementTree.parse",
"numpy.stack",
"numpy.count_nonzero",
"os.path.join",
"numpy.empty",
"numpy.logical_not",
"cv2.imread",
"numpy.array",
"glob.glob",
"pandas.set_option"
] | [((3916, 4095), 'pandas.DataFrame', 'pd.DataFrame', (["{'class': self.classes, '# files': file_count, '# non-d files':\n non_difficult_file_count, '# objects': obj_count, '# non-d objects':\n non_difficult_obj_count}"], {}), "({'class': self.classes, '# files': file_count, '# non-d files':\n non_difficult_file_count, '# objects': obj_count, '# non-d objects':\n non_difficult_obj_count})\n", (3928, 4095), True, 'import pandas as pd\n'), ((4213, 4252), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (4226, 4252), True, 'import pandas as pd\n'), ((4393, 4445), 'cv2.imread', 'cv2.imread', (['self.img_file_paths[i]', 'cv2.IMREAD_COLOR'], {}), '(self.img_file_paths[i], cv2.IMREAD_COLOR)\n', (4403, 4445), False, 'import cv2\n'), ((4630, 4654), 'xml.etree.ElementTree.parse', 'ET.parse', (['anno_file_path'], {}), '(anno_file_path)\n', (4638, 4654), True, 'import xml.etree.ElementTree as ET\n'), ((2153, 2185), 'os.path.join', 'os.path.join', (['self.anno_dir', '"""*"""'], {}), "(self.anno_dir, '*')\n", (2165, 2185), False, 'import os\n'), ((2980, 3020), 'os.path.join', 'os.path.join', (['self.img_dir', 'img_filename'], {}), '(self.img_dir, img_filename)\n', (2992, 3020), False, 'import os\n'), ((6590, 6630), 'numpy.array', 'np.array', (['difficult_flags'], {'dtype': 'np.bool'}), '(difficult_flags, dtype=np.bool)\n', (6598, 6630), True, 'import numpy as np\n'), ((6666, 6677), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (6674, 6677), True, 'import numpy as np\n'), ((6699, 6710), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (6707, 6710), True, 'import numpy as np\n'), ((6741, 6752), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (6749, 6752), True, 'import numpy as np\n'), ((2227, 2259), 'glob.glob', 'glob.glob', (['anno_file_search_path'], {}), '(anno_file_search_path)\n', (2236, 2259), False, 'import glob\n'), ((3324, 3359), 'numpy.count_nonzero', 'np.count_nonzero', (['obj_flags'], {'axis': '(0)'}), '(obj_flags, axis=0)\n', (3340, 3359), True, 'import numpy as np\n'), ((3403, 3452), 'numpy.count_nonzero', 'np.count_nonzero', (['non_difficult_obj_flags'], {'axis': '(0)'}), '(non_difficult_obj_flags, axis=0)\n', (3419, 3452), True, 'import numpy as np\n'), ((2006, 2047), 'os.path.join', 'os.path.join', (['self.anno_dir', "(id_ + '.xml')"], {}), "(self.anno_dir, id_ + '.xml')\n", (2018, 2047), False, 'import os\n'), ((3264, 3295), 'numpy.logical_not', 'np.logical_not', (['difficult_flags'], {}), '(difficult_flags)\n', (3278, 3295), True, 'import numpy as np\n'), ((6469, 6485), 'numpy.stack', 'np.stack', (['bboxes'], {}), '(bboxes)\n', (6477, 6485), True, 'import numpy as np\n'), ((6526, 6542), 'numpy.stack', 'np.stack', (['labels'], {}), '(labels)\n', (6534, 6542), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 22 14:27:32 2017
@author: virati
Quick file to load in Data Frame
"""
import scipy
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.linear_model import ElasticNet, ElasticNetCV
import pdb
def Phase_List(exprs='all',nmo=-1):
all_phases = ['A04','A03','A02','A01','B01','B02','B03','B04']
for aa in range(1,25):
if aa < 10:
numstr = '0' + str(aa)
else:
numstr = str(aa)
all_phases.append('C'+numstr)
ephys_phases = all_phases[4:]
if exprs=='all':
return all_phases
elif exprs=='ephys':
return ephys_phases
elif exprs == 'Nmo_ephys':
#nmo = 3
return ephys_phases[0:4*(nmo+1)-1]
elif exprs == 'Nmo_onStim':
#nmo = 5
return ephys_phases[4:4*(nmo+1)-1]
#The frame is made by the old scripts in Integrated Ephys (for now)
#Let's load in that frame, which should contain all the data we want
#Only do Chronics with this analysis
exp = 'Chronics'
DataFrame = np.load('/home/virati/MDD_Data/Data_frame_ChronicsMed.npy').item()
#Load in the stim change times, though this should be merged into DataFrame itself in the generation script
StimChange = scipy.io.loadmat('/home/virati/MDD_Data/stim_changes.mat')['StimMatrix']
f_focus = ((0,50))
#%%
#Nitty gritty time
f_vect = np.linspace(0,211,512)
f_trunc = np.where(np.logical_and(f_vect < f_focus[1], f_vect > f_focus[0]))[0]
f_dict = {'FreqVector': f_vect, 'FreqTrunc': f_trunc}
#fixed 3test x 3train approach (for now)
training_patients = ['905','907','906']
test_patients = ['901','903','908']
#Need to make our design matrix now
#get the list of ephys-related clinical phases
def Phase_StateMatrix(DataFrame,Phases,max_freq = 211,training_patients=['901','903','905','906','907','908']):
max_fidx = int(max_freq/211 * 512)
dsgnX = []
dsgnY = []
for pt, patient in enumerate(training_patients):
for pp,phase in enumerate(DataFrame['DBS'+patient].keys()):
if phase[0] != 'A':
print(patient + ' ' + phase + ' up now')
try:
state_vector = np.hstack((DataFrame['DBS'+patient][phase]['MeanPSD']['LOGPxx'][:max_fidx,0],DataFrame['DBS'+patient][phase]['MeanPSD']['LOGPxx'][:max_fidx,1]))
#state_vector = np.hstack((DataFrame['DBS'+patient][phase]['MeanPSD']['LOGPxx'][:max_fidx,0],DataFrame['DBS'+patient][phase]['MeanPSD']['LOGPxx'][:max_fidx,1]))
dsgnX.append(state_vector)
dsgnY.append(DataFrame['DBS'+patient][phase]['HDRS17'])
except:
print(patient + ' ' + phase + ' has a problem')
#confirm dimensional conguence
Xout = np.array(dsgnX)
Yout = np.array(dsgnY)
#return the design tensors and flags for NaN presence
return Xout, Yout
DsgnX, clinY = Phase_StateMatrix(DataFrame,Phase_List(exprs='ephys'),max_freq=70,training_patients=training_patients)
#The actual regression code below
EN_alpha = 0.2
#Generate our elastic net model now
DM = ElasticNet(alpha=EN_alpha,tol=0.001,normalize=True,positive=False)
DM.fit(DsgnX,clinY)
plt.figure()
coefs = DM.coef_
csize = coefs.shape[0]
plt.plot(coefs[0:int(np.ceil(csize/2))],color='blue')
plt.plot(coefs[int(np.ceil(csize/2) + 1):],color='red') | [
"numpy.load",
"numpy.ceil",
"numpy.logical_and",
"scipy.io.loadmat",
"sklearn.linear_model.ElasticNet",
"numpy.hstack",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linspace"
] | [((1406, 1430), 'numpy.linspace', 'np.linspace', (['(0)', '(211)', '(512)'], {}), '(0, 211, 512)\n', (1417, 1430), True, 'import numpy as np\n'), ((3180, 3249), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'alpha': 'EN_alpha', 'tol': '(0.001)', 'normalize': '(True)', 'positive': '(False)'}), '(alpha=EN_alpha, tol=0.001, normalize=True, positive=False)\n', (3190, 3249), False, 'from sklearn.linear_model import ElasticNet, ElasticNetCV\n'), ((3268, 3280), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3278, 3280), True, 'import matplotlib.pyplot as plt\n'), ((1281, 1339), 'scipy.io.loadmat', 'scipy.io.loadmat', (['"""/home/virati/MDD_Data/stim_changes.mat"""'], {}), "('/home/virati/MDD_Data/stim_changes.mat')\n", (1297, 1339), False, 'import scipy\n'), ((2838, 2853), 'numpy.array', 'np.array', (['dsgnX'], {}), '(dsgnX)\n', (2846, 2853), True, 'import numpy as np\n'), ((2865, 2880), 'numpy.array', 'np.array', (['dsgnY'], {}), '(dsgnY)\n', (2873, 2880), True, 'import numpy as np\n'), ((1093, 1152), 'numpy.load', 'np.load', (['"""/home/virati/MDD_Data/Data_frame_ChronicsMed.npy"""'], {}), "('/home/virati/MDD_Data/Data_frame_ChronicsMed.npy')\n", (1100, 1152), True, 'import numpy as np\n'), ((1448, 1504), 'numpy.logical_and', 'np.logical_and', (['(f_vect < f_focus[1])', '(f_vect > f_focus[0])'], {}), '(f_vect < f_focus[1], f_vect > f_focus[0])\n', (1462, 1504), True, 'import numpy as np\n'), ((3342, 3360), 'numpy.ceil', 'np.ceil', (['(csize / 2)'], {}), '(csize / 2)\n', (3349, 3360), True, 'import numpy as np\n'), ((2234, 2389), 'numpy.hstack', 'np.hstack', (["(DataFrame['DBS' + patient][phase]['MeanPSD']['LOGPxx'][:max_fidx, 0],\n DataFrame['DBS' + patient][phase]['MeanPSD']['LOGPxx'][:max_fidx, 1])"], {}), "((DataFrame['DBS' + patient][phase]['MeanPSD']['LOGPxx'][:max_fidx,\n 0], DataFrame['DBS' + patient][phase]['MeanPSD']['LOGPxx'][:max_fidx, 1]))\n", (2243, 2389), True, 'import numpy as np\n'), ((3394, 3412), 'numpy.ceil', 'np.ceil', (['(csize / 2)'], {}), '(csize / 2)\n', (3401, 3412), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# *****************************************************************************
# * cloudFPGA
# * Copyright 2016 -- 2022 IBM Corporation
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *----------------------------------------------------------------------------
## @file test_warp_transform_numpi_video_threaded.py
## @author DID
## @date October 2021
## @brief A python script for testing the cF warp_transform kernel in multi-threaded environment.
'''
WarpTransform multithreaded video processing sample.
Usage:
test_warp_transform_numpi_video_threaded.py {<video device number>|<video file name>}
Shows how python threading capabilities can be used
to organize parallel captured frame processing pipeline
for smoother playback.
Keyboard shortcuts:
ESC - exit
f - switch between CPU and cloudFPGA version (pre-programming is required)
space - switch between multi and single threaded processing
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
import os
video_common_lib=os.environ['cFpRootDir'] + "HOST/vision/common/languages/python/var"
sys.path.append(video_common_lib)
import numpy as np
import cv2 as cv
import multiprocessing
from multiprocessing.pool import ThreadPool
from collections import deque
# Manager to create shared object.
manager = multiprocessing.Manager()
from threading import Lock
from common import clock, draw_str, StatValue
import video
import time
trieres_lib=os.environ['cFpRootDir'] + "HOST/vision/warp_transform/languages/python/build"
sys.path.append(trieres_lib)
import _trieres_warp_transform_numpi
ROI = True
# import the necessary packages
import datetime
class FPS:
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
def start(self):
# start the timer
self._start = datetime.datetime.now()
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
def elapsed(self):
# return the total number of seconds between the start and
# end interval
return (self._end - self._start).total_seconds()
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
class DummyTask:
def __init__(self, data):
self.data = data
def ready(self):
return True
def get(self):
return self.data
def main():
import sys
config_file=os.environ['cFpRootDir'] + "HOST/vision/warp_transform/languages/cplusplus/include/config.h"
with open(config_file) as cfg:
for line in cfg:
if "#define FRAME_WIDTH" in line:
width = int(line.split()[2])
elif "#define FRAME_HEIGHT" in line:
height = int(line.split()[2])
try:
print("Found in " + config_file + ": width = "+str(width) + ", height = "+str(height))
total_size = height * width
except:
print("Coudln't find FRAME_WIDTH or FRAME_HEIGHT in "+ config_file + ". Aborting...")
exit(0)
try:
fn = sys.argv[1]
except:
fn = 0
cap = video.create_capture(fn)
fps = FPS().start()
# Create a lock.
lock = manager.Lock()
#lock = Lock()
def crop_square_roi(img, size, interpolation=cv.INTER_AREA):
h, w = img.shape[:2]
if ROI:
if (h>height) and (w>width):
roi_x_pos = int((w-width) /2)
roi_y_pos = int((h-height)/2)
crop_img = img[int(roi_y_pos):int(roi_y_pos+height), int(roi_x_pos):int(roi_x_pos+width)]
else:
crop_img = img
print("WARNING: The input image of [", h , " x ", w , "] is not bigger to crop a ROI of [", height , " x ", width, "]. Will just resize")
else:
min_size = np.amin([np.amin([h,w]), np.amin([height,width])])
# Centralize and crop
crop_img = img[int(h/2-min_size/2):int(h/2+min_size/2), int(w/2-min_size/2):int(w/2+min_size/2)]
# Adjusting the image file if needed
if ((crop_img.shape[0] != height) or (crop_img.shape[1] != width)):
print("WARNING: The image was resized from [", crop_img.shape[0] , " x ", crop_img.shape[1] , "] to [", height , " x ", width, "]")
resized = cv.resize(crop_img , (size, size), interpolation=interpolation)
else:
resized = crop_img
return resized
def patch_sqaure_roi(orig, frame, interpolation=cv.INTER_AREA):
h_orig, w_orig = orig.shape[:2]
h_frame, w_frame = frame.shape[:2]
patched_img = orig
if (h_orig>h_frame) and (w_orig>w_frame):
roi_x_pos = int((w_orig-w_frame)/2)
roi_y_pos = int((h_orig-h_frame)/2)
frame_backtorgb = cv.cvtColor(frame,cv.COLOR_GRAY2RGB)
patched_img[int(roi_y_pos):int(roi_y_pos+h_frame), int(roi_x_pos):int(roi_x_pos+w_frame),:] = frame_backtorgb
else:
patched_img = frame
print("WARNING: The input image of [", h_orig , " x ", w_orig , "] is not bigger to embed a ROI of [", h_frame , " x ", w_frame, "]. Will just resize")
print("after 1st if")
# Adjusting the image file if needed
if ((patched_img.shape[0] != h_orig) or (patched_img.shape[1] != w_orig)):
print("WARNING: The image was resized from [", patched_img.shape[0] , " x ", patched_img.shape[1] , "] to [", h_orig , " x ", w_orig, "]")
resized = cv.resize(patched_img , (w_orig, h_orig), interpolation=interpolation)
else:
resized = patched_img
return resized
def process_frame(frame, t0, threaded_mode, accel_mode, fpga, fpgas):
# Converting to grayscale
orig = frame
frame = cv.cvtColor(frame, cv.COLOR_RGB2GRAY)
# Adjusting the image file if needed
##frame = cv.resize(frame, (width, height), interpolation = cv.INTER_LINEAR)
frame = crop_square_roi(frame, width, interpolation = cv.INTER_AREA)
if accel_mode:
#print("Will execute on fpga with ip:port: "+fpga[0]+":"+fpga[1])
# some intensive computation...
# Flattening the image from 2D to 1D
image = frame.flatten()
output_array = _trieres_warp_transform_numpi.warp_transform(image, total_size, fpga[0], fpga[1])
# Convert 1D array to a 2D numpy array
#time.sleep(1)
frame = np.reshape(output_array, (height, width))
print(type(frame))
print("Declare free the fpga: "+str(fpga))
lock.acquire()
if threaded_mode:
fpgas.append(fpga)
else:
fpgas.appendleft(fpga)
lock.release()
else:
#time.sleep(10)
frame = cv.medianBlur(frame, 9)
if ROI:
frame = patch_sqaure_roi(orig, frame, cv.INTER_AREA)
print("returning from process_frame")
return frame, t0
threaded_mode = True
accel_mode = True
fpgas = deque([["10.12.200.225" , "2718"],
["10.12.200.224" , "2719"]])
# ["10.12.200.11" , "2720"],
# ["10.12.200.19" , "2721"],
# ["10.12.200.29" , "2722"]])
if accel_mode:
threadn = len(fpgas)
else:
threadn = cv.getNumberOfCPUs()
pool = ThreadPool(processes = threadn)
pending = deque()
latency = StatValue()
frame_interval = StatValue()
last_frame_time = clock()
while True:
fpga = 0
print("Before while len(pending)="+str(len(pending)))
while len(pending) > 0 and pending[0].ready() :
print("After while len(pending)="+str(len(pending)))
print("Before pending.popleft().get()")
res, t0 = pending.popleft().get()
print("After pending.popleft().get(): len(pending)="+str(len(pending)))
print(type(fpga))
print(str(fpga))
#exit(0)
latency.update(clock() - t0)
draw_str(res, (20, 20), "threaded : " + str(threaded_mode))
draw_str(res, (20, 40), "cloudFPA : " + str(accel_mode))
draw_str(res, (20, 60), "latency : %.1f ms" % (latency.value*1000))
draw_str(res, (20, 80), "frame interval : %.1f ms" % (frame_interval.value*1000))
draw_str(res, (20, 100), "FPS : %.1f" % (1.0/frame_interval.value))
# try:
# video_out.write(res)
# except:
# video_name = str(fn)+"_out.avi"
# video_out = cv.VideoWriter(video_name, cv.VideoWriter_fourcc('M','J','P','G'), 30, (res.shape[1],res.shape[0]))
#print("video_out Size is:"+str(res.shape[1])+","+str(res.shape[0]))
#cv.imshow('threaded video', res)
if len(pending) < threadn: # and len(fpgas) != 0:
_ret, frame = cap.read()
if _ret is False:
print("Reached EOF.")
print("Saved video: " + video_name)
#video_out.release()
break
#print("frame Size is:"+str(frame.shape[1])+","+str(frame.shape[0]))
t = clock()
frame_interval.update(t - last_frame_time)
last_frame_time = t
# update the FPS counter
fps.update()
if accel_mode:
lock.acquire()
fpga = fpgas.popleft()
print("Reserved the fpga:"+str(fpga))
lock.release()
else:
fpga = 0
if threaded_mode:
task = pool.apply_async(process_frame, (frame.copy(), t, threaded_mode, accel_mode, fpga, fpgas))
fpga = 0
else:
task = DummyTask(process_frame(frame, t, threaded_mode, accel_mode, fpga, fpgas))
pending.append(task)
else:
if accel_mode:
print("Waiting for a free fpga")
else:
print("Waiting for a free thread")
#if accel_mode and type(fpga) is list:
# print("Declare free the fpga: "+str(fpga))
# fpgas.appendleft(fpga)
ch = cv.waitKey(1)
if ch == ord(' '):
threaded_mode = not threaded_mode
if ch == ord('f'):
accel_mode = not accel_mode
if ch == 27:
break
print('Done')
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
if __name__ == '__main__':
print(__doc__)
main()
#cv.destroyAllWindows()
| [
"sys.path.append",
"video.create_capture",
"cv2.resize",
"multiprocessing.pool.ThreadPool",
"numpy.amin",
"cv2.medianBlur",
"cv2.cvtColor",
"multiprocessing.Manager",
"cv2.waitKey",
"common.clock",
"_trieres_warp_transform_numpi.warp_transform",
"common.StatValue",
"common.draw_str",
"nump... | [((1693, 1726), 'sys.path.append', 'sys.path.append', (['video_common_lib'], {}), '(video_common_lib)\n', (1708, 1726), False, 'import sys\n'), ((1909, 1934), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (1932, 1934), False, 'import multiprocessing\n'), ((2128, 2156), 'sys.path.append', 'sys.path.append', (['trieres_lib'], {}), '(trieres_lib)\n', (2143, 2156), False, 'import sys\n'), ((3912, 3936), 'video.create_capture', 'video.create_capture', (['fn'], {}), '(fn)\n', (3932, 3936), False, 'import video\n'), ((7990, 8051), 'collections.deque', 'deque', (["[['10.12.200.225', '2718'], ['10.12.200.224', '2719']]"], {}), "([['10.12.200.225', '2718'], ['10.12.200.224', '2719']])\n", (7995, 8051), False, 'from collections import deque\n'), ((8325, 8354), 'multiprocessing.pool.ThreadPool', 'ThreadPool', ([], {'processes': 'threadn'}), '(processes=threadn)\n', (8335, 8354), False, 'from multiprocessing.pool import ThreadPool\n'), ((8371, 8378), 'collections.deque', 'deque', ([], {}), '()\n', (8376, 8378), False, 'from collections import deque\n'), ((8394, 8405), 'common.StatValue', 'StatValue', ([], {}), '()\n', (8403, 8405), False, 'from common import clock, draw_str, StatValue\n'), ((8427, 8438), 'common.StatValue', 'StatValue', ([], {}), '()\n', (8436, 8438), False, 'from common import clock, draw_str, StatValue\n'), ((8461, 8468), 'common.clock', 'clock', ([], {}), '()\n', (8466, 8468), False, 'from common import clock, draw_str, StatValue\n'), ((2526, 2549), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2547, 2549), False, 'import datetime\n'), ((2614, 2637), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2635, 2637), False, 'import datetime\n'), ((6686, 6723), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_RGB2GRAY'], {}), '(frame, cv.COLOR_RGB2GRAY)\n', (6697, 6723), True, 'import cv2 as cv\n'), ((8293, 8313), 'cv2.getNumberOfCPUs', 'cv.getNumberOfCPUs', ([], {}), '()\n', (8311, 8313), True, 'import cv2 as cv\n'), ((11214, 11227), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (11224, 11227), True, 'import cv2 as cv\n'), ((5148, 5210), 'cv2.resize', 'cv.resize', (['crop_img', '(size, size)'], {'interpolation': 'interpolation'}), '(crop_img, (size, size), interpolation=interpolation)\n', (5157, 5210), True, 'import cv2 as cv\n'), ((5669, 5706), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_GRAY2RGB'], {}), '(frame, cv.COLOR_GRAY2RGB)\n', (5680, 5706), True, 'import cv2 as cv\n'), ((6392, 6461), 'cv2.resize', 'cv.resize', (['patched_img', '(w_orig, h_orig)'], {'interpolation': 'interpolation'}), '(patched_img, (w_orig, h_orig), interpolation=interpolation)\n', (6401, 6461), True, 'import cv2 as cv\n'), ((7198, 7283), '_trieres_warp_transform_numpi.warp_transform', '_trieres_warp_transform_numpi.warp_transform', (['image', 'total_size', 'fpga[0]', 'fpga[1]'], {}), '(image, total_size, fpga[0],\n fpga[1])\n', (7242, 7283), False, 'import _trieres_warp_transform_numpi\n'), ((7379, 7420), 'numpy.reshape', 'np.reshape', (['output_array', '(height, width)'], {}), '(output_array, (height, width))\n', (7389, 7420), True, 'import numpy as np\n'), ((7745, 7768), 'cv2.medianBlur', 'cv.medianBlur', (['frame', '(9)'], {}), '(frame, 9)\n', (7758, 7768), True, 'import cv2 as cv\n'), ((9155, 9232), 'common.draw_str', 'draw_str', (['res', '(20, 60)', "('latency : %.1f ms' % (latency.value * 1000))"], {}), "(res, (20, 60), 'latency : %.1f ms' % (latency.value * 1000))\n", (9163, 9232), False, 'from common import clock, draw_str, StatValue\n'), ((9243, 9331), 'common.draw_str', 'draw_str', (['res', '(20, 80)', "('frame interval : %.1f ms' % (frame_interval.value * 1000))"], {}), "(res, (20, 80), 'frame interval : %.1f ms' % (frame_interval.value *\n 1000))\n", (9251, 9331), False, 'from common import clock, draw_str, StatValue\n'), ((9338, 9423), 'common.draw_str', 'draw_str', (['res', '(20, 100)', "('FPS : %.1f' % (1.0 / frame_interval.value))"], {}), "(res, (20, 100), 'FPS : %.1f' % (1.0 / frame_interval.value)\n )\n", (9346, 9423), False, 'from common import clock, draw_str, StatValue\n'), ((10174, 10181), 'common.clock', 'clock', ([], {}), '()\n', (10179, 10181), False, 'from common import clock, draw_str, StatValue\n'), ((4671, 4686), 'numpy.amin', 'np.amin', (['[h, w]'], {}), '([h, w])\n', (4678, 4686), True, 'import numpy as np\n'), ((4687, 4711), 'numpy.amin', 'np.amin', (['[height, width]'], {}), '([height, width])\n', (4694, 4711), True, 'import numpy as np\n'), ((8974, 8981), 'common.clock', 'clock', ([], {}), '()\n', (8979, 8981), False, 'from common import clock, draw_str, StatValue\n')] |
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs FFN inference within a dense bounding box.
Inference is performed within a single process.
"""
import os
import time
from google.protobuf import text_format
from absl import app
from absl import flags
import logging
import os
import glob
import numpy as np
import sys
import tensorflow as tf
from tqdm import tqdm
from ffn.utils import bounding_box_pb2
from ffn.inference import inference
from ffn.inference import inference_flags
from ffn.utils import bounding_box
from ffn.utils import geom_utils
from mpi4py import MPI
mpi_comm = MPI.COMM_WORLD
mpi_rank = mpi_comm.Get_rank()
mpi_size = mpi_comm.Get_size()
gfile = tf.io.gfile
tf.compat.v1.disable_eager_execution()
FLAGS = flags.FLAGS
flags.DEFINE_string('bounding_box', None,
'BoundingBox proto in text format defining the area '
'to segmented.')
flags.DEFINE_list('subvolume_size', '512,512,128', '"valid"subvolume_size to issue to each runner')
flags.DEFINE_list('overlap', '32,32,16', 'overlap of bbox')
flags.DEFINE_string('output_dir', None, 'Enforce output dir regardless of config')
flags.DEFINE_boolean('use_cpu', False, 'Use CPU instead of GPU')
flags.DEFINE_integer('num_gpu', 0, 'Allocate on different GPUs')
flags.DEFINE_boolean('resume', False, 'Whether resuming from cpoints')
flags.DEFINE_boolean('verbose', False, 'logging level')
def divide_bounding_box(bbox, subvolume_size, overlap):
"""divide up into valid subvolumes."""
# deal with parsed bbox missing "end" attr
start = geom_utils.ToNumpy3Vector(bbox.start)
size = geom_utils.ToNumpy3Vector(bbox.size)
bbox = bounding_box.BoundingBox(start, size)
calc = bounding_box.OrderlyOverlappingCalculator(
outer_box=bbox,
sub_box_size=subvolume_size,
overlap=overlap,
include_small_sub_boxes=True,
back_shift_small_sub_boxes=False)
return [bb for bb in calc.generate_sub_boxes()]
def find_unfinished(sub_bboxes, root_output_dir):
filtered_sub_bboxes = []
for sub_bbox in sub_bboxes:
out_name = 'seg-%d_%d_%d_%d_%d_%d' % (
sub_bbox.start[0], sub_bbox.start[1], sub_bbox.start[2],
sub_bbox.size[0], sub_bbox.size[1], sub_bbox.size[2])
segmentation_output_dir = os.path.join(root_output_dir, out_name)
npzs = glob.glob(os.path.join(segmentation_output_dir, '**/*.npz'), recursive=True)
if len(npzs):
continue
else:
filtered_sub_bboxes.append(sub_bbox)
logging.warning('pre-post filter: %s %s', len(sub_bboxes), len(filtered_sub_bboxes))
return filtered_sub_bboxes
def main(unused_argv):
print('log_level', FLAGS.verbose)
if FLAGS.verbose:
logger = logging.getLogger()
logger.setLevel(logging.INFO)
else:
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
start_time = time.time()
request = inference_flags.request_from_flags()
if mpi_rank == 0:
if not gfile.exists(request.segmentation_output_dir):
gfile.makedirs(request.segmentation_output_dir)
if FLAGS.output_dir is None:
root_output_dir = request.segmentation_output_dir
else:
root_output_dir = FLAGS.output_dir
bbox = bounding_box_pb2.BoundingBox()
text_format.Parse(FLAGS.bounding_box, bbox)
subvolume_size = np.array([int(i) for i in FLAGS.subvolume_size])
overlap = np.array([int(i) for i in FLAGS.overlap])
sub_bboxes = divide_bounding_box(bbox, subvolume_size, overlap)
if FLAGS.resume:
sub_bboxes = find_unfinished(sub_bboxes, root_output_dir)
sub_bboxes = np.array_split(np.array(sub_bboxes), mpi_size)
else:
sub_bboxes = None
root_output_dir = None
sub_bboxes = mpi_comm.scatter(sub_bboxes, 0)
root_output_dir = mpi_comm.bcast(root_output_dir, 0)
for sub_bbox in sub_bboxes:
out_name = 'seg-%d_%d_%d_%d_%d_%d' % (
sub_bbox.start[0], sub_bbox.start[1], sub_bbox.start[2],
sub_bbox.size[0], sub_bbox.size[1], sub_bbox.size[2])
segmentation_output_dir = os.path.join(root_output_dir, out_name)
request.segmentation_output_dir = segmentation_output_dir
if FLAGS.num_gpu > 0:
use_gpu = str(mpi_rank % FLAGS.num_gpu)
else:
use_gpu = ''
runner = inference.Runner(use_cpu=FLAGS.use_cpu, use_gpu=use_gpu)
cube_start_time = (time.time() - start_time) / 60
runner.start(request)
runner.run(sub_bbox.start[::-1], sub_bbox.size[::-1])
cube_finish_time = (time.time() - start_time) / 60
print('%s finished in %s min' % (out_name, cube_finish_time - cube_start_time))
runner.stop_executor()
counter_path = os.path.join(request.segmentation_output_dir, 'counters_%d.txt' % mpi_rank)
if not gfile.exists(counter_path):
runner.counters.dump(counter_path)
if __name__ == '__main__':
app.run(main)
| [
"ffn.inference.inference_flags.request_from_flags",
"ffn.utils.bounding_box.BoundingBox",
"ffn.utils.bounding_box.OrderlyOverlappingCalculator",
"ffn.inference.inference.Runner",
"os.path.join",
"ffn.utils.bounding_box_pb2.BoundingBox",
"time.time",
"tensorflow.compat.v1.disable_eager_execution",
"a... | [((1324, 1362), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (1360, 1362), True, 'import tensorflow as tf\n'), ((1385, 1498), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""bounding_box"""', 'None', '"""BoundingBox proto in text format defining the area to segmented."""'], {}), "('bounding_box', None,\n 'BoundingBox proto in text format defining the area to segmented.')\n", (1404, 1498), False, 'from absl import flags\n'), ((1538, 1641), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""subvolume_size"""', '"""512,512,128"""', '""""valid"subvolume_size to issue to each runner"""'], {}), '(\'subvolume_size\', \'512,512,128\',\n \'"valid"subvolume_size to issue to each runner\')\n', (1555, 1641), False, 'from absl import flags\n'), ((1638, 1697), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""overlap"""', '"""32,32,16"""', '"""overlap of bbox"""'], {}), "('overlap', '32,32,16', 'overlap of bbox')\n", (1655, 1697), False, 'from absl import flags\n'), ((1698, 1784), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_dir"""', 'None', '"""Enforce output dir regardless of config"""'], {}), "('output_dir', None,\n 'Enforce output dir regardless of config')\n", (1717, 1784), False, 'from absl import flags\n'), ((1781, 1845), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""use_cpu"""', '(False)', '"""Use CPU instead of GPU"""'], {}), "('use_cpu', False, 'Use CPU instead of GPU')\n", (1801, 1845), False, 'from absl import flags\n'), ((1846, 1910), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_gpu"""', '(0)', '"""Allocate on different GPUs"""'], {}), "('num_gpu', 0, 'Allocate on different GPUs')\n", (1866, 1910), False, 'from absl import flags\n'), ((1911, 1981), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""resume"""', '(False)', '"""Whether resuming from cpoints"""'], {}), "('resume', False, 'Whether resuming from cpoints')\n", (1931, 1981), False, 'from absl import flags\n'), ((1982, 2037), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""verbose"""', '(False)', '"""logging level"""'], {}), "('verbose', False, 'logging level')\n", (2002, 2037), False, 'from absl import flags\n'), ((2193, 2230), 'ffn.utils.geom_utils.ToNumpy3Vector', 'geom_utils.ToNumpy3Vector', (['bbox.start'], {}), '(bbox.start)\n', (2218, 2230), False, 'from ffn.utils import geom_utils\n'), ((2240, 2276), 'ffn.utils.geom_utils.ToNumpy3Vector', 'geom_utils.ToNumpy3Vector', (['bbox.size'], {}), '(bbox.size)\n', (2265, 2276), False, 'from ffn.utils import geom_utils\n'), ((2287, 2324), 'ffn.utils.bounding_box.BoundingBox', 'bounding_box.BoundingBox', (['start', 'size'], {}), '(start, size)\n', (2311, 2324), False, 'from ffn.utils import bounding_box\n'), ((2335, 2511), 'ffn.utils.bounding_box.OrderlyOverlappingCalculator', 'bounding_box.OrderlyOverlappingCalculator', ([], {'outer_box': 'bbox', 'sub_box_size': 'subvolume_size', 'overlap': 'overlap', 'include_small_sub_boxes': '(True)', 'back_shift_small_sub_boxes': '(False)'}), '(outer_box=bbox, sub_box_size=\n subvolume_size, overlap=overlap, include_small_sub_boxes=True,\n back_shift_small_sub_boxes=False)\n', (2376, 2511), False, 'from ffn.utils import bounding_box\n'), ((3457, 3468), 'time.time', 'time.time', ([], {}), '()\n', (3466, 3468), False, 'import time\n'), ((3481, 3517), 'ffn.inference.inference_flags.request_from_flags', 'inference_flags.request_from_flags', ([], {}), '()\n', (3515, 3517), False, 'from ffn.inference import inference_flags\n'), ((5213, 5288), 'os.path.join', 'os.path.join', (['request.segmentation_output_dir', "('counters_%d.txt' % mpi_rank)"], {}), "(request.segmentation_output_dir, 'counters_%d.txt' % mpi_rank)\n", (5225, 5288), False, 'import os\n'), ((5395, 5408), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (5402, 5408), False, 'from absl import app\n'), ((2884, 2923), 'os.path.join', 'os.path.join', (['root_output_dir', 'out_name'], {}), '(root_output_dir, out_name)\n', (2896, 2923), False, 'import os\n'), ((3308, 3327), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3325, 3327), False, 'import logging\n'), ((3383, 3402), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3400, 3402), False, 'import logging\n'), ((3802, 3832), 'ffn.utils.bounding_box_pb2.BoundingBox', 'bounding_box_pb2.BoundingBox', ([], {}), '()\n', (3830, 3832), False, 'from ffn.utils import bounding_box_pb2\n'), ((3837, 3880), 'google.protobuf.text_format.Parse', 'text_format.Parse', (['FLAGS.bounding_box', 'bbox'], {}), '(FLAGS.bounding_box, bbox)\n', (3854, 3880), False, 'from google.protobuf import text_format\n'), ((4618, 4657), 'os.path.join', 'os.path.join', (['root_output_dir', 'out_name'], {}), '(root_output_dir, out_name)\n', (4630, 4657), False, 'import os\n'), ((4834, 4890), 'ffn.inference.inference.Runner', 'inference.Runner', ([], {'use_cpu': 'FLAGS.use_cpu', 'use_gpu': 'use_gpu'}), '(use_cpu=FLAGS.use_cpu, use_gpu=use_gpu)\n', (4850, 4890), False, 'from ffn.inference import inference\n'), ((2945, 2994), 'os.path.join', 'os.path.join', (['segmentation_output_dir', '"""**/*.npz"""'], {}), "(segmentation_output_dir, '**/*.npz')\n", (2957, 2994), False, 'import os\n'), ((4194, 4214), 'numpy.array', 'np.array', (['sub_bboxes'], {}), '(sub_bboxes)\n', (4202, 4214), True, 'import numpy as np\n'), ((4914, 4925), 'time.time', 'time.time', ([], {}), '()\n', (4923, 4925), False, 'import time\n'), ((5053, 5064), 'time.time', 'time.time', ([], {}), '()\n', (5062, 5064), False, 'import time\n')] |
import numpy as np
import h0rton.h0_inference.mcmc_utils as mcmc_utils
import unittest
class TestMCMCUtils(unittest.TestCase):
"""A suite of tests for the h0rton.h0_inference.mcmc_utils package
"""
@classmethod
def setUpClass(cls):
cls.init_dict = dict(
external_shear_gamma1=0.01,
external_shear_gamma2=-0.005,
lens_light_R_sersic=1.0,
lens_light_center_x=0.01,
lens_light_center_y=0.01,
lens_light_e1=-0.01,
lens_light_e2=-0.2,
lens_mass_center_x=0.02,
lens_mass_center_y=0.01,
lens_mass_e1=-0.1,
lens_mass_e2=-0.1,
lens_mass_gamma=2.0,
lens_mass_theta_E=1.2,
src_light_center_x=-0.04,
src_light_center_y=0.08,
)
def test_get_lens_kwargs(self):
"""Test if the default lens kwargs dictionary config is plausible
"""
not_null_spread = mcmc_utils.get_lens_kwargs(self.init_dict, null_spread=False)
lens_param_names = not_null_spread[-1][0].keys()
ext_shear_names = not_null_spread[-1][-1].keys()
# Check that lower is less than upper
for p in lens_param_names:
assert not_null_spread[-2][0][p] < not_null_spread[-1][0][p]
for p in ext_shear_names:
assert not_null_spread[-2][-1][p] < not_null_spread[-1][-1][p]
null_spread = mcmc_utils.get_lens_kwargs(self.init_dict, null_spread=True)
# Fixed, external_shear, ra_0 should equal init, lens_mass, center_x
assert null_spread[2][-1]['ra_0'] == null_spread[0][0]['center_x']
assert null_spread[2][-1]['dec_0'] == null_spread[0][0]['center_y']
# TODO: check that std is small < 1.e-5
for param_name, param_sigma in null_spread[1][0].items():
assert param_sigma < 1.e-5
for param_name, param_sigma in null_spread[1][-1].items():
assert param_sigma < 1.e-5
def test_get_ps_kwargs(self):
"""Test if the default ps kwargs dictionary config defined on the image plane is plausible
"""
ps_kwargs = mcmc_utils.get_ps_kwargs(measured_img_ra=np.random.randn(4), measured_img_dec=np.random.randn(4), astrometry_sigma=0.005)
ps_param_names = ps_kwargs[-1][0].keys()
# Check that lower is less than upper
for p in ps_param_names:
assert np.all(ps_kwargs[-2][0][p] < ps_kwargs[-1][0][p])
def test_get_ps_kwargs_src_plane(self):
"""Test if the default ps kwargs dictionary config defined on the source plane is plausible
"""
ps_kwargs = mcmc_utils.get_ps_kwargs_src_plane(self.init_dict, astrometry_sigma=0.005)
ps_param_names = ps_kwargs[-1][0].keys()
# Check that lower is less than upper
for p in ps_param_names:
assert ps_kwargs[-2][0][p] < ps_kwargs[-1][0][p]
def test_get_light_kwargs(self):
"""Test if the default light kwargs dictionary config is plausible
"""
init_R = 0.5
not_null_spread = mcmc_utils.get_light_kwargs(init_R, null_spread=False)
lens_light_param_names = not_null_spread[-1][0].keys()
# Check that lower is less than upper
for p in lens_light_param_names:
assert not_null_spread[-2][0][p] < not_null_spread[-1][0][p]
null_spread = mcmc_utils.get_light_kwargs(init_R, null_spread=True)
# TODO: check that std is small < 1.e-5
for param_name, param_sigma in null_spread[1][0].items():
assert param_sigma < 1.e-5
def test_get_special_kwargs(self):
"""Test if the default special kwargs dictionary config is plausible
"""
special_kwargs = mcmc_utils.get_special_kwargs(n_img=4, astrometry_sigma=5.e-3)
special_param_names = special_kwargs[-1].keys()
# Check that lower is less than upper
for p in special_param_names:
assert np.all(special_kwargs[-2][p] < special_kwargs[-1][p])
def test_postprocess_mcmc_chain(self):
# TODO
pass
def test_HybridBNNPenalty(self):
# TODO
pass
def test_get_idx_for_params(self):
"""Test if `get_idx_for_params` returns the right indices
"""
Y_dim = 4
out_dim = Y_dim**2 + 3*Y_dim + 1
orig_Y_cols = ['a', 'b', 'c', 'd']
to_test = mcmc_utils.get_idx_for_params(out_dim, orig_Y_cols, ['a', 'c'], 'DoubleGaussianNLL', debug=True)
tril_mask = np.array([0, 1, 3, 4, 5, 6, 8])
idx_within_tril1 = Y_dim + tril_mask
param_idx = [0, 2]
np.testing.assert_array_equal(to_test['param_idx'], param_idx)
np.testing.assert_array_equal(np.sort(to_test['tril_mask']), np.sort(tril_mask))
np.testing.assert_array_equal(np.sort(to_test['idx_within_tril1']), np.sort(idx_within_tril1))
def test_remove_parameters_from_pred(self):
"""Test if correct parameters are removed from the NN output
"""
orig_pred = np.arange(20).reshape([4, 5])
#array([[ 0, 1, 2, 3, 4],
# [ 5, 6, 7, 8, 9],
# [10, 11, 12, 13, 14],
# [15, 16, 17, 18, 19]])
remove_idx = [1, 3]
new_pred = mcmc_utils.remove_parameters_from_pred(orig_pred, remove_idx, return_as_tensor=False, device='cpu')
expected_new_pred = np.array([[ 0, 2, 4],
[ 5, 7, 9],
[10, 12, 14],
[15, 17, 19]])
np.testing.assert_array_equal(new_pred, expected_new_pred, err_msg="test_remove_parameters_from_pred")
def test_split_component_param(self):
"""Test if string split of Baobab column names is done correctly
"""
actual = mcmc_utils.split_component_param('lens_mass_theta_E', sep='_', pos=2)
expected = ('lens_mass', 'theta_E')
assert actual == expected
def test_dict_to_array(self):
"""Test if dict from MCMC iteration is converted into array form correctly
"""
Y_cols = ['lens_mass_gamma', 'lens_mass_theta_E', 'lens_mass_center_x', 'lens_mass_center_y', 'external_shear_gamma1', 'external_shear_gamma2', 'src_light_R_sersic', 'src_light_center_x', 'src_light_center_y']
kwargs_ps = [{'ra_source': 0.1, 'dec_source': -0.2}]
kwargs_source = [{'R_sersic': 0.3}]
kwargs_lens = [{'gamma': 2.0, 'theta_E': 1.5, 'center_x': -0.05, 'center_y': 0.01}, {'gamma1': -0.01, 'gamma2': 0.005}]
actual = mcmc_utils.dict_to_array(Y_cols, kwargs_lens, kwargs_source, kwargs_lens_light=None, kwargs_ps=kwargs_ps,)
expected = np.array([[2.0, 1.5, -0.05, 0.01, -0.01, 0.005, 0.3, 0.1 - (-0.05), -0.2 - 0.01]])
np.testing.assert_array_equal(actual, expected, err_msg="test_dict_to_array")
def test_reorder_to_param_class(self):
"""Test if dict from MCMC iteration is converted into array form correctly
"""
bnn_array = np.arange(20).reshape([1, 4, 5])
bnn_Y_cols = ['lens_mass_center_x', 'lens_mass_center_y', 'src_light_center_x', 'src_light_center_y', 'lens_mass_theta_E']
D_dt_array = -np.arange(4).reshape([1, 4, 1])
param_class_Y_cols = ['ra_source', 'dec_source', 'theta_E_lens0', 'center_x_lens0', 'center_y_lens0', 'D_dt']
actual = mcmc_utils.reorder_to_param_class(bnn_Y_cols, param_class_Y_cols, bnn_array, D_dt_array)
expected = np.array([[[ 2+0, 3+1, 4, 0, 1, -0],
[ 7+5, 8+6, 9, 5, 6, -1],
[ 12+10, 13+11, 14, 10, 11, -2],
[ 17+15, 18+16, 19, 15, 16, -3]]])
np.testing.assert_array_equal(actual, expected, err_msg="test_reorder_to_param_class")
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"h0rton.h0_inference.mcmc_utils.reorder_to_param_class",
"numpy.random.randn",
"numpy.testing.assert_array_equal",
"h0rton.h0_inference.mcmc_utils.split_component_param",
"h0rton.h0_inference.mcmc_utils.dict_to_array",
"numpy.all",
"h0rton.h0_inference.mcmc_utils.get_special_kwargs",
... | [((8100, 8115), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8113, 8115), False, 'import unittest\n'), ((1244, 1305), 'h0rton.h0_inference.mcmc_utils.get_lens_kwargs', 'mcmc_utils.get_lens_kwargs', (['self.init_dict'], {'null_spread': '(False)'}), '(self.init_dict, null_spread=False)\n', (1270, 1305), True, 'import h0rton.h0_inference.mcmc_utils as mcmc_utils\n'), ((1705, 1765), 'h0rton.h0_inference.mcmc_utils.get_lens_kwargs', 'mcmc_utils.get_lens_kwargs', (['self.init_dict'], {'null_spread': '(True)'}), '(self.init_dict, null_spread=True)\n', (1731, 1765), True, 'import h0rton.h0_inference.mcmc_utils as mcmc_utils\n'), ((2917, 2991), 'h0rton.h0_inference.mcmc_utils.get_ps_kwargs_src_plane', 'mcmc_utils.get_ps_kwargs_src_plane', (['self.init_dict'], {'astrometry_sigma': '(0.005)'}), '(self.init_dict, astrometry_sigma=0.005)\n', (2951, 2991), True, 'import h0rton.h0_inference.mcmc_utils as mcmc_utils\n'), ((3354, 3408), 'h0rton.h0_inference.mcmc_utils.get_light_kwargs', 'mcmc_utils.get_light_kwargs', (['init_R'], {'null_spread': '(False)'}), '(init_R, null_spread=False)\n', (3381, 3408), True, 'import h0rton.h0_inference.mcmc_utils as mcmc_utils\n'), ((3654, 3707), 'h0rton.h0_inference.mcmc_utils.get_light_kwargs', 'mcmc_utils.get_light_kwargs', (['init_R'], {'null_spread': '(True)'}), '(init_R, null_spread=True)\n', (3681, 3707), True, 'import h0rton.h0_inference.mcmc_utils as mcmc_utils\n'), ((4016, 4078), 'h0rton.h0_inference.mcmc_utils.get_special_kwargs', 'mcmc_utils.get_special_kwargs', ([], {'n_img': '(4)', 'astrometry_sigma': '(0.005)'}), '(n_img=4, astrometry_sigma=0.005)\n', (4045, 4078), True, 'import h0rton.h0_inference.mcmc_utils as mcmc_utils\n'), ((4669, 4769), 'h0rton.h0_inference.mcmc_utils.get_idx_for_params', 'mcmc_utils.get_idx_for_params', (['out_dim', 'orig_Y_cols', "['a', 'c']", '"""DoubleGaussianNLL"""'], {'debug': '(True)'}), "(out_dim, orig_Y_cols, ['a', 'c'],\n 'DoubleGaussianNLL', debug=True)\n", (4698, 4769), True, 'import h0rton.h0_inference.mcmc_utils as mcmc_utils\n'), ((4786, 4817), 'numpy.array', 'np.array', (['[0, 1, 3, 4, 5, 6, 8]'], {}), '([0, 1, 3, 4, 5, 6, 8])\n', (4794, 4817), True, 'import numpy as np\n'), ((4898, 4960), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["to_test['param_idx']", 'param_idx'], {}), "(to_test['param_idx'], param_idx)\n", (4927, 4960), True, 'import numpy as np\n'), ((5516, 5619), 'h0rton.h0_inference.mcmc_utils.remove_parameters_from_pred', 'mcmc_utils.remove_parameters_from_pred', (['orig_pred', 'remove_idx'], {'return_as_tensor': '(False)', 'device': '"""cpu"""'}), "(orig_pred, remove_idx,\n return_as_tensor=False, device='cpu')\n", (5554, 5619), True, 'import h0rton.h0_inference.mcmc_utils as mcmc_utils\n'), ((5644, 5704), 'numpy.array', 'np.array', (['[[0, 2, 4], [5, 7, 9], [10, 12, 14], [15, 17, 19]]'], {}), '([[0, 2, 4], [5, 7, 9], [10, 12, 14], [15, 17, 19]])\n', (5652, 5704), True, 'import numpy as np\n'), ((5833, 5940), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['new_pred', 'expected_new_pred'], {'err_msg': '"""test_remove_parameters_from_pred"""'}), "(new_pred, expected_new_pred, err_msg=\n 'test_remove_parameters_from_pred')\n", (5862, 5940), True, 'import numpy as np\n'), ((6082, 6151), 'h0rton.h0_inference.mcmc_utils.split_component_param', 'mcmc_utils.split_component_param', (['"""lens_mass_theta_E"""'], {'sep': '"""_"""', 'pos': '(2)'}), "('lens_mass_theta_E', sep='_', pos=2)\n", (6114, 6151), True, 'import h0rton.h0_inference.mcmc_utils as mcmc_utils\n'), ((6829, 6938), 'h0rton.h0_inference.mcmc_utils.dict_to_array', 'mcmc_utils.dict_to_array', (['Y_cols', 'kwargs_lens', 'kwargs_source'], {'kwargs_lens_light': 'None', 'kwargs_ps': 'kwargs_ps'}), '(Y_cols, kwargs_lens, kwargs_source,\n kwargs_lens_light=None, kwargs_ps=kwargs_ps)\n', (6853, 6938), True, 'import h0rton.h0_inference.mcmc_utils as mcmc_utils\n'), ((6955, 7040), 'numpy.array', 'np.array', (['[[2.0, 1.5, -0.05, 0.01, -0.01, 0.005, 0.3, 0.1 - -0.05, -0.2 - 0.01]]'], {}), '([[2.0, 1.5, -0.05, 0.01, -0.01, 0.005, 0.3, 0.1 - -0.05, -0.2 - 0.01]]\n )\n', (6963, 7040), True, 'import numpy as np\n'), ((7046, 7123), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['actual', 'expected'], {'err_msg': '"""test_dict_to_array"""'}), "(actual, expected, err_msg='test_dict_to_array')\n", (7075, 7123), True, 'import numpy as np\n'), ((7637, 7729), 'h0rton.h0_inference.mcmc_utils.reorder_to_param_class', 'mcmc_utils.reorder_to_param_class', (['bnn_Y_cols', 'param_class_Y_cols', 'bnn_array', 'D_dt_array'], {}), '(bnn_Y_cols, param_class_Y_cols, bnn_array,\n D_dt_array)\n', (7670, 7729), True, 'import h0rton.h0_inference.mcmc_utils as mcmc_utils\n'), ((7745, 7892), 'numpy.array', 'np.array', (['[[[2 + 0, 3 + 1, 4, 0, 1, -0], [7 + 5, 8 + 6, 9, 5, 6, -1], [12 + 10, 13 + \n 11, 14, 10, 11, -2], [17 + 15, 18 + 16, 19, 15, 16, -3]]]'], {}), '([[[2 + 0, 3 + 1, 4, 0, 1, -0], [7 + 5, 8 + 6, 9, 5, 6, -1], [12 + \n 10, 13 + 11, 14, 10, 11, -2], [17 + 15, 18 + 16, 19, 15, 16, -3]]])\n', (7753, 7892), True, 'import numpy as np\n'), ((7981, 8072), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['actual', 'expected'], {'err_msg': '"""test_reorder_to_param_class"""'}), "(actual, expected, err_msg=\n 'test_reorder_to_param_class')\n", (8010, 8072), True, 'import numpy as np\n'), ((2689, 2738), 'numpy.all', 'np.all', (['(ps_kwargs[-2][0][p] < ps_kwargs[-1][0][p])'], {}), '(ps_kwargs[-2][0][p] < ps_kwargs[-1][0][p])\n', (2695, 2738), True, 'import numpy as np\n'), ((4238, 4291), 'numpy.all', 'np.all', (['(special_kwargs[-2][p] < special_kwargs[-1][p])'], {}), '(special_kwargs[-2][p] < special_kwargs[-1][p])\n', (4244, 4291), True, 'import numpy as np\n'), ((4999, 5028), 'numpy.sort', 'np.sort', (["to_test['tril_mask']"], {}), "(to_test['tril_mask'])\n", (5006, 5028), True, 'import numpy as np\n'), ((5030, 5048), 'numpy.sort', 'np.sort', (['tril_mask'], {}), '(tril_mask)\n', (5037, 5048), True, 'import numpy as np\n'), ((5088, 5124), 'numpy.sort', 'np.sort', (["to_test['idx_within_tril1']"], {}), "(to_test['idx_within_tril1'])\n", (5095, 5124), True, 'import numpy as np\n'), ((5126, 5151), 'numpy.sort', 'np.sort', (['idx_within_tril1'], {}), '(idx_within_tril1)\n', (5133, 5151), True, 'import numpy as np\n'), ((2461, 2479), 'numpy.random.randn', 'np.random.randn', (['(4)'], {}), '(4)\n', (2476, 2479), True, 'import numpy as np\n'), ((2498, 2516), 'numpy.random.randn', 'np.random.randn', (['(4)'], {}), '(4)\n', (2513, 2516), True, 'import numpy as np\n'), ((5304, 5317), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (5313, 5317), True, 'import numpy as np\n'), ((7284, 7297), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (7293, 7297), True, 'import numpy as np\n'), ((7470, 7482), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (7479, 7482), True, 'import numpy as np\n')] |
# coding: utf-8
# # Monetary Economics: Chapter 3
# From "Monetary Economics: An Integrated Approach to Credit, Money, Income, Production and Wealth, 2nd ed" by <NAME> and <NAME>, 2012.
# ## The Simplest Model with Government Money, Model SIM
# Assumptions
# * No private money, only Government money (no private banks)
# * No profits, *pure labor economy*
# * Fixed price of labor, unlimited quantity of labor, thus the economy is not supply-constrained.
# ## Transactions matrix for Model SIM
# ||1.Households|2.Production|3.Government|Σ|
# |-------|:------:|:--------:|:--:|-----|
# |1.Consumption|-C|+C||0|
# |2.Govt expenditures||+G|-G|0|
# |3.[Output]||[Y]|||
# |4.Factor income (wages)|+WB|-WB||0|
# |5.Taxes|-T||+T|0|
# |6.Change in the stock of money|-ΔH||+ΔH|0|
# |Σ|0|0|0|0|
# Definition of terms
#
# * **C** : Consumption goods demand by households
#
# * **G** : Government expenditures
#
# * **Y** : National income
#
# * **WB** : Wage bill
#
# * **T** : Taxes
#
# * **ΔH** : Change in cash money
#
# In this model, people (as consumers and producers of income) have been separated.
# ## Behavioral (transactions) matrix for Model SIM
# ||1.Households|2.Production|3.Government|Σ|
# |----------|:----------:|:----------:|:---:||
# |1.Consumption|-Cd|+Cs||0|
# |2.Govt expenditures||+Gs|-Gd|0|
# |3.[Output]||[Y]|||
# |4.Factor income (wages)|+W•Ns|-W•Nd||0|
# |5.Taxes|-Ts||+Td|0|
# |6.Change in the stock of money|-ΔHh||+ΔHs|0|
# |Σ|0|0|0|0|
# Differences from previous matrix:
# * Each transaction has a suffix, *s*, *d*, and *h*.
# * *s* supply
# * *d* demand
# * *h* household cash
# * The Wage Bill (WB) has been separated into two parts.
# * *W* Wage rate
# * *N* employment
# Definition of terms
# * **Cd** : Consumption goods demand by households
# * **Cs** : Consumption goods supply by firms
# * **Gs** : Services supplied by the government
# * **Gd** : Services demanded from government
# * **Y** : National income
# * **W** : Wage rate
# * **Ns** : Supply of labor
# * **Nd** : Demand for labor
# * **Ts** : Taxes supplied
# * **Td** : Taxes demanded by government
# * **ΔHh** : Change in cash money held by households
# * **ΔHs** : Change in cash money supplied by the central bank
# ## Model SIM
# > From here, I will be building the model in code.
# > Because this is the first model, the Python code will
# > be explained in more detail also.
# **Important:** Use sympy version 0.7.5
#
# The following piece of code is necessary to show the graphics inline for iPython notebooks. To view the graphs, matplotlib is required.
# In[1]:
# This line configures matplotlib to show figures embedded in the notebook,
# If you are using an old version of IPython, try using '%pylab inline' instead.
from pysolve.model import Model
from pysolve.utils import is_close,round_solution
import matplotlib.pyplot as plt
# ###### Preliminaries
# In order to build the model, we must first start off by importing several modules that will be used to build the model. *pysolve* is a Python module that I have developed to make it easier to specify and solve linear models.
#
# The first line will import the main Model class. The second line imports several utility functions that will prove useful.
# In[2]:
# ###### Create the model
# The first step when developing a pysolve model is to create the model. This is just an empty model for now, but we will be adding the rest of the information to this.
# In[3]:
model = Model()
# ###### Define the variables
# The second step is to define the (endogenous) variables. These are the variables that we are allowed to manipulate within the model. This is pretty straigtforward.
#
# As a useful step, I define the default value for all variables. This can be changed on an individual basis. This is the value that the variable will start off with if nothing is changed.
# In[4]:
model.set_var_default(0)
# Next, we create the variables used by the sim. Most of these have been explained above.
# In[5]:
model.var('Cd', desc='Consumption goods demand by households')
model.var('Cs', desc='Consumption goods supply')
model.var('Gs', desc='Government goods, supply')
model.var('Hh', desc='Cash money held by households')
model.var('Hs', desc='Cash money supplied by the government')
model.var('Nd', desc='Demand for labor')
model.var('Ns', desc='Supply of labor')
model.var('Td', desc='Taxes, demand')
model.var('Ts', desc='Taxes, supply')
model.var('Y', desc='Income = GDP')
model.var('YD', desc='Disposable income of households');
# As an aside, multiple variables can be created by the following code. But the above is more descriptive.
#
# ```python
# model.vars('Y', 'YD', 'Ts', 'Td', 'Hs', 'Hh', 'Gs', 'Cs',
# 'Cd', 'Ns', 'Nd')
# ```
#
# The value of the variables may also be changed mid-iteration. They will then be used to seed the value of the next iteration.
#
# For example
# ```python
# varx = model.var('x')
#
# # ... later
# varx.value = 22
# # this will also work
# model.variables['x'].value = 22
# ```
# Aside: the semicolon ';' at the end of the last line of code is an iPython artifact, and is not needed by the python code. It is used to suppress output by the iPython interpreter.
# ###### Define the parameters
# The next step is to define the parameters. I do not differentiate between exogenous variables and parameters since both are set outside of the model. The solver will not be able to change these values. However, the user may change these values between calls to the solver.
#
# Like the variables, there is a call that may be made to set a default value for all parameters, but I will be creating the parameters with their default values. The call would look like this
# ```python
# model.set_parameter_initial(1.0)
# ```
#
# In addition the parameter values could be changed like this:
# ```python
# Gd = model.param('Gd', initial=10)
# # ...
# # at some later time
# Gd.value = 20
# # or this would work also
# model.parameters['Gd'].value = 20
# ```
# Some of the parameters (alpha1, alpha2 and theta) have not been explained yet, but will be explained when we add the equations that use them.
# In[6]:
model.param('Gd', desc='Government goods, demand', default=20.)
model.param('W', desc='Wage rate', default=1.)
model.param('alpha1', desc='Propensity to consume out of income', default=0.6)
model.param('alpha2', desc='Propensity to consume o of wealth', default=0.4)
model.param('theta', desc='Tax rate', default=0.2);
# ###### Define the equations
# Adding an equation is just adding the textual form of the equation. There are some restrictions. Linear systems only.
# In[7]:
model.add('Cs = Cd')
model.add('Gs = Gd')
model.add('Ts = Td')
model.add('Ns = Nd');
# These four equations imply that demand equals supply for this period, no supply constraints of any kind.
# In[8]:
model.add('YD = (W*Ns) - Ts');
# Disposable income (*YD*) is the wages earned by households minus taxes.
# In[9]:
model.add('Td = theta * W * Ns');
# Taxes are a fixed proportion (*theta*) of income. *theta* is decided by the government and is exogenous to the model.
# In[10]:
model.add('Cd = alpha1*YD + alpha2*Hh(-1)');
# This is a consumption function, the rates at which housholds consume. This is a combination of consumption of inherited wealth (*Hh(-1)*) and post-tax income (*YD*).
# In[11]:
model.add('Hs - Hs(-1) = Gd - Td');
# This comes from the transaction-flow matrix and represents the governments budget constraint. Government expenditures that are not paid for by taxes (*Gd-Td*), must be covered by differences in the money supply.
# In[12]:
model.add('Hh - Hh(-1) = YD - Cd');
# The difference in the cash that households carry is the difference between their income and their consumption.
# In[13]:
model.add('Y = Cs + Gs');
# The determination of national income.
# In[14]:
model.add('Nd = Y/W');
# The determination of employment.
# We now have 11 equations and 11 unknowns. **Each of the eleven unknowns has been set on the left-hand side of an equation** (This implies that we can use the Gauss-Seidel algorithm to iterate to a solution, convergence is not guaranteed but we can try.)
# ###### Solve
# We have set the default for all of the variables to 0, and that will be used as an initial solution.
# In[15]:
model.solve(iterations=100, threshold=1e-4);
# In[16]:
prev = round_solution(model.solutions[-2], decimals=1)
solution = round_solution(model.solutions[-1], decimals=1)
print("Y : " + str(solution['Y']))
print("T : " + str(solution['Ts']))
print("YD : " + str(solution['YD']))
print("C : " + str(solution['Cs']))
print("Hs-Hs(-1) : " + str(solution['Hs'] - prev['Hs']))
print("Hh-Hh(-1) : " + str(solution['Hh'] - prev['Hh']))
print("H : " + str(solution['Hh']))
# ### The code for the full model
# To make the model easier to manipulate, I will encapsulate model creation into a single function.
# In[17]:
def create_sim_model():
model = Model()
model.set_var_default(0)
model.var('Cd', desc='Consumption goods demand by households')
model.var('Cs', desc='Consumption goods supply')
model.var('Gs', desc='Government goods, supply')
model.var('Hh', desc='Cash money held by households')
model.var('Hs', desc='Cash money supplied by the government')
model.var('Nd', desc='Demand for labor')
model.var('Ns', desc='Supply of labor')
model.var('Td', desc='Taxes, demand')
model.var('Ts', desc='Taxes, supply')
model.var('Y', desc='Income = GDP')
model.var('YD', desc='Disposable income of households')
model.param('Gd', desc='Government goods, demand')
model.param('W', desc='Wage rate')
model.param('alpha1', desc='Propensity to consume out of income')
model.param('alpha2', desc='Propensity to consume out of wealth')
model.param('theta', desc='Tax rate')
model.add('Cs = Cd') # 3.1
model.add('Gs = Gd') # 3.2
model.add('Ts = Td') # 3.3
model.add('Ns = Nd') # 3.4
model.add('YD = (W*Ns) - Ts') # 3.5
model.add('Td = theta * W * Ns') # 3.6, theta < 1.0
model.add('Cd = alpha1*YD + alpha2*Hh(-1)') # 3.7, 0 < alpha2 < alpha1 < 1
model.add('Hs - Hs(-1) = Gd - Td') # 3.8
model.add('Hh - Hh(-1) = YD - Cd') # 3.9
model.add('Y = Cs + Gs') # 3.10
model.add('Nd = Y/W') # 3.11
return model
# Now we can run the simulation using the model.
# In[18]:
model = create_sim_model()
model.set_values({'alpha1': 0.6,
'alpha2': 0.4,
'theta': 0.2,
'Gd': 20,
'W': 1})
model.solve(iterations=100, threshold=1e-5)
prev = round_solution(model.solutions[-2], decimals=1)
solution = round_solution(model.solutions[-1], decimals=1)
print("Y : " + str(solution['Y']))
print("T : " + str(solution['Ts']))
print("YD : " + str(solution['YD']))
print("C : " + str(solution['Cs']))
print("Hs-Hs(-1) : " + str(solution['Hs'] - prev['Hs']))
print("Hh-Hh(-1) : " + str(solution['Hh'] - prev['Hh']))
print("H : " + str(solution['Hh']))
# ### Steady-state solution
# We now generate the steady-state solution by iterating until the solutions converge.
# In[19]:
steady_state = create_sim_model()
steady_state.set_values({'alpha1': 0.6,
'alpha2': 0.4,
'theta': 0.2,
'Gd': 20,
'W': 1})
for _ in range(100):
steady_state.solve(iterations=100, threshold=1e-5)
prev_soln = steady_state.solutions[-2]
soln = steady_state.solutions[-1]
if is_close(prev_soln, soln, atol=1e-4):
break
prev = round_solution(steady_state.solutions[-2], decimals=1)
solution = round_solution(steady_state.solutions[-1], decimals=1)
print("Y : " + str(solution['Y']))
print("T : " + str(solution['Ts']))
print("YD : " + str(solution['YD']))
print("C : " + str(solution['Cs']))
print("Hs-Hs(-1) : " + str(solution['Hs'] - prev['Hs']))
print("Hh-Hh(-1) : " + str(solution['Hh'] - prev['Hh']))
print("H : " + str(solution['Hh']))
# ###### Table 3.4
# We can also generate table 3.4
# In[20]:
from IPython.display import HTML
import numpy
from pysolve.utils import generate_html_table
data = list()
for var in [('Gd', 'G'), ('Y', 'Y'), ('Ts', 'T'), ('YD', 'YD'), ('Cs', 'C')]:
rowdata = list()
rowdata.append(var[1])
for i in [0, 1, 2, -1]:
rowdata.append(str(numpy.round(steady_state.solutions[i][var[0]], decimals=1)))
data.append(rowdata)
for var in [('Hs', 'ΔHs'), ('Hh', 'ΔHh')]:
rowdata = list()
rowdata.append(var[1])
rowdata.append(str(numpy.round(steady_state.solutions[0][var[0]], decimals=1)))
for i in [1, 2, -1]:
rowdata.append(str(numpy.round(steady_state.solutions[i][var[0]] -
steady_state.solutions[i-1][var[0]], decimals=1)))
data.append(rowdata)
for var in [('Hh', 'H')]:
rowdata = list()
rowdata.append(var[1])
for i in [0, 1, 2, -1]:
rowdata.append(str(numpy.round(steady_state.solutions[i][var[0]], decimals=1)))
data.append(rowdata)
s = generate_html_table(['Period', '1', '2', '3', '∞'], data)
HTML(s)
# ### Scenario: Model SIM, increase government expenditures
# In[21]:
step_model = create_sim_model()
step_model.set_values({'alpha1': 0.6,
'alpha2': 0.4,
'theta': 0.2,
'Gd': 20,
'W': 1})
# Use the steady state solution as a starting point
step_model.solutions = steady_state.solutions[-2:]
for i in range(45):
step_model.solve(iterations=100, threshold=1e-5)
if i == 2:
step_model.parameters['Gd'].value += 5
# ###### Figure 3.1
# Calculate the solution but with an permanent increase in government expenditures (+5) and starting from the steady state solution.
# In[22]:
caption = '''
Figure 3.1 Impact on national income Y and the steady state solution Y*,
following a permanent increase in government expenditures ($\\bigtriangleup$G = 5).'''
gdata = [s['Gd']/s['theta'] for s in step_model.solutions]
ydata = [s['Y'] for s in step_model.solutions]
# Now graph G/theta and Y
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(97, 129)
axes.plot(gdata, 'r') # plot G/theta
axes.plot(ydata, linestyle='--', color='g') # plot Y
# add labels
plt.text(10, 126, 'Steady-state solution Y*')
plt.text(15, 120, 'Income Y')
fig.text(.1, -.1, caption);
# ###### Figure 3.2
# In[23]:
caption = '''
Figure 3.2 Disposable income and consumption starting from scratch (Table 3.4)'''
cdata = [s['Cd'] for s in steady_state.solutions]
yddata = [s['YD'] for s in steady_state.solutions]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.0, 1.0])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(0, 85)
axes.set_xlim(-2, 50)
axes.plot(cdata, linestyle=':', color='r') # plot C
axes.plot(yddata, linestyle='--', color='g') # plot YD
plt.axhline(y=80, color='k')
# add labels
plt.text(2, 72, 'Disposable')
plt.text(2, 68, 'Income YD')
plt.text(10, 60, 'Consumption C')
fig.text(0.1, 0, caption);
# ###### Figure 3.3
# In[24]:
caption = '''
Figure 3.3 Wealth change and wealth level starting from scratch (Table 3.4)'''
hdata = [s['Hh'] for s in steady_state.solutions]
deltahdata = [s['Hh'] for s in steady_state.solutions]
for i in range(1, len(steady_state.solutions)):
deltahdata[i] -= hdata[i-1]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.0, 1.0])
axes.tick_params(top=False)
axes.set_ylim(0, 13)
axes.set_xlim(-2, 50)
axes.plot(deltahdata, linestyle='--', color='b')
axes2 = axes.twinx()
axes2.set_ylim(0, 85)
axes2.set_xlim(-2, 50)
axes2.plot(hdata, 'r')
# add labels
plt.text(20, 16, 'Household saving')
plt.text(20, 12, '(the change in the money stock)')
plt.text(20, 70, 'Wealth level H (money stock)')
fig.text(0.1, -0.05, caption);
# ###### Figure 3.4
# In[25]:
caption = '''
Figure 3.4 Evolution of wealth, target wealth, consumption and disposable income
following an increase in government expenditures ($\\bigtriangleup$G = 5) Model SIM '''
hdata = [s['Hh'] for s in step_model.solutions]
cdata = [s['Cs'] for s in step_model.solutions]
vtdata = [s['YD']*(1.-s['alpha1'])/s['alpha2'] for s in step_model.solutions]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.0, 1.0])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(78, 102)
axes.set_xlim(-2, 50)
axes.plot(hdata, linestyle='-', color='g', label='Riqueza')
axes.plot(cdata, linestyle=':', color='r', linewidth=2, label='Consumo')
axes.plot(vtdata, linestyle='--', color='b', label='Riqueza objetivo (e renda disponível)')
plt.legend(loc=(0.35,0.2), frameon=False)
fig.text(0.1, -0.05, caption);
# ### Scenario: Model SIM, increase propensity to consume
# In[26]:
alpha_model = create_sim_model()
alpha_model.set_values({'alpha1': 0.6,
'alpha2': 0.4,
'theta': 0.2,
'Gd': 20,
'W': 1})
# Use the steady state solution as a starting point
alpha_model.solutions = steady_state.solutions[-2:]
for i in range(50):
alpha_model.solve(iterations=100, threshold=1e-4)
if i == 2:
alpha_model.parameters['alpha1'].value = 0.7
# ###### Figure 3.8
# We will need to generate solutions that involve a change in alpha1 (from 0.6 to 0.7).
# In[27]:
caption = '''
Figure 3.8 Evolution of consumption, disposable income and wealth following
an increase in the propensity to consume out of current income
($\\alpha_1$ moves from 0.6 to 0.7)'''
hdata = [s['Hh'] for s in alpha_model.solutions]
cdata = [s['Cs'] for s in alpha_model.solutions]
vtdata = [s['YD'] for s in alpha_model.solutions]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.0, 1.0])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(58, 100)
axes.set_xlim(-2, 50)
axes.plot(hdata, linestyle='-', color='g')
axes.plot(cdata, linestyle=':', color='r', linewidth=2)
axes.plot(vtdata, linestyle='--', color='b')
plt.text(6, 97, 'Consumption')
plt.text(8, 79, 'Disposable income')
plt.text(20, 62, 'Wealth')
fig.text(0.1, -0.1, caption);
| [
"pysolve.model.Model",
"matplotlib.pyplot.axhline",
"pysolve.utils.generate_html_table",
"matplotlib.pyplot.legend",
"pysolve.utils.is_close",
"matplotlib.pyplot.text",
"pysolve.utils.round_solution",
"matplotlib.pyplot.figure",
"numpy.round",
"IPython.display.HTML"
] | [((3572, 3579), 'pysolve.model.Model', 'Model', ([], {}), '()\n', (3577, 3579), False, 'from pysolve.model import Model\n'), ((8519, 8566), 'pysolve.utils.round_solution', 'round_solution', (['model.solutions[-2]'], {'decimals': '(1)'}), '(model.solutions[-2], decimals=1)\n', (8533, 8566), False, 'from pysolve.utils import is_close, round_solution\n'), ((8578, 8625), 'pysolve.utils.round_solution', 'round_solution', (['model.solutions[-1]'], {'decimals': '(1)'}), '(model.solutions[-1], decimals=1)\n', (8592, 8625), False, 'from pysolve.utils import is_close, round_solution\n'), ((10814, 10861), 'pysolve.utils.round_solution', 'round_solution', (['model.solutions[-2]'], {'decimals': '(1)'}), '(model.solutions[-2], decimals=1)\n', (10828, 10861), False, 'from pysolve.utils import is_close, round_solution\n'), ((10873, 10920), 'pysolve.utils.round_solution', 'round_solution', (['model.solutions[-1]'], {'decimals': '(1)'}), '(model.solutions[-1], decimals=1)\n', (10887, 10920), False, 'from pysolve.utils import is_close, round_solution\n'), ((11832, 11886), 'pysolve.utils.round_solution', 'round_solution', (['steady_state.solutions[-2]'], {'decimals': '(1)'}), '(steady_state.solutions[-2], decimals=1)\n', (11846, 11886), False, 'from pysolve.utils import is_close, round_solution\n'), ((11898, 11952), 'pysolve.utils.round_solution', 'round_solution', (['steady_state.solutions[-1]'], {'decimals': '(1)'}), '(steady_state.solutions[-1], decimals=1)\n', (11912, 11952), False, 'from pysolve.utils import is_close, round_solution\n'), ((13353, 13416), 'pysolve.utils.generate_html_table', 'generate_html_table', (["['Period', '1', '2', '3', '∞']", 'data'], {}), "(['Period', '1', '2', '3', '∞'], data)\n", (13372, 13416), False, 'from pysolve.utils import generate_html_table\n'), ((13417, 13424), 'IPython.display.HTML', 'HTML', (['s'], {}), '(s)\n', (13421, 13424), False, 'from IPython.display import HTML\n'), ((14441, 14453), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14451, 14453), True, 'import matplotlib.pyplot as plt\n'), ((14704, 14749), 'matplotlib.pyplot.text', 'plt.text', (['(10)', '(126)', '"""Steady-state solution Y*"""'], {}), "(10, 126, 'Steady-state solution Y*')\n", (14712, 14749), True, 'import matplotlib.pyplot as plt\n'), ((14750, 14779), 'matplotlib.pyplot.text', 'plt.text', (['(15)', '(120)', '"""Income Y"""'], {}), "(15, 120, 'Income Y')\n", (14758, 14779), True, 'import matplotlib.pyplot as plt\n'), ((15052, 15064), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15062, 15064), True, 'import matplotlib.pyplot as plt\n'), ((15379, 15407), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(80)', 'color': '"""k"""'}), "(y=80, color='k')\n", (15390, 15407), True, 'import matplotlib.pyplot as plt\n'), ((15422, 15451), 'matplotlib.pyplot.text', 'plt.text', (['(2)', '(72)', '"""Disposable"""'], {}), "(2, 72, 'Disposable')\n", (15430, 15451), True, 'import matplotlib.pyplot as plt\n'), ((15452, 15480), 'matplotlib.pyplot.text', 'plt.text', (['(2)', '(68)', '"""Income YD"""'], {}), "(2, 68, 'Income YD')\n", (15460, 15480), True, 'import matplotlib.pyplot as plt\n'), ((15481, 15514), 'matplotlib.pyplot.text', 'plt.text', (['(10)', '(60)', '"""Consumption C"""'], {}), "(10, 60, 'Consumption C')\n", (15489, 15514), True, 'import matplotlib.pyplot as plt\n'), ((15868, 15880), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15878, 15880), True, 'import matplotlib.pyplot as plt\n'), ((16147, 16183), 'matplotlib.pyplot.text', 'plt.text', (['(20)', '(16)', '"""Household saving"""'], {}), "(20, 16, 'Household saving')\n", (16155, 16183), True, 'import matplotlib.pyplot as plt\n'), ((16184, 16235), 'matplotlib.pyplot.text', 'plt.text', (['(20)', '(12)', '"""(the change in the money stock)"""'], {}), "(20, 12, '(the change in the money stock)')\n", (16192, 16235), True, 'import matplotlib.pyplot as plt\n'), ((16236, 16284), 'matplotlib.pyplot.text', 'plt.text', (['(20)', '(70)', '"""Wealth level H (money stock)"""'], {}), "(20, 70, 'Wealth level H (money stock)')\n", (16244, 16284), True, 'import matplotlib.pyplot as plt\n'), ((16725, 16737), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16735, 16737), True, 'import matplotlib.pyplot as plt\n'), ((17170, 17212), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0.35, 0.2)', 'frameon': '(False)'}), '(loc=(0.35, 0.2), frameon=False)\n', (17180, 17212), True, 'import matplotlib.pyplot as plt\n'), ((18265, 18277), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18275, 18277), True, 'import matplotlib.pyplot as plt\n'), ((18629, 18659), 'matplotlib.pyplot.text', 'plt.text', (['(6)', '(97)', '"""Consumption"""'], {}), "(6, 97, 'Consumption')\n", (18637, 18659), True, 'import matplotlib.pyplot as plt\n'), ((18660, 18696), 'matplotlib.pyplot.text', 'plt.text', (['(8)', '(79)', '"""Disposable income"""'], {}), "(8, 79, 'Disposable income')\n", (18668, 18696), True, 'import matplotlib.pyplot as plt\n'), ((18697, 18723), 'matplotlib.pyplot.text', 'plt.text', (['(20)', '(62)', '"""Wealth"""'], {}), "(20, 62, 'Wealth')\n", (18705, 18723), True, 'import matplotlib.pyplot as plt\n'), ((9145, 9152), 'pysolve.model.Model', 'Model', ([], {}), '()\n', (9150, 9152), False, 'from pysolve.model import Model\n'), ((11772, 11810), 'pysolve.utils.is_close', 'is_close', (['prev_soln', 'soln'], {'atol': '(0.0001)'}), '(prev_soln, soln, atol=0.0001)\n', (11780, 11810), False, 'from pysolve.utils import is_close, round_solution\n'), ((12855, 12913), 'numpy.round', 'numpy.round', (['steady_state.solutions[0][var[0]]'], {'decimals': '(1)'}), '(steady_state.solutions[0][var[0]], decimals=1)\n', (12866, 12913), False, 'import numpy\n'), ((12642, 12700), 'numpy.round', 'numpy.round', (['steady_state.solutions[i][var[0]]'], {'decimals': '(1)'}), '(steady_state.solutions[i][var[0]], decimals=1)\n', (12653, 12700), False, 'import numpy\n'), ((12968, 13071), 'numpy.round', 'numpy.round', (['(steady_state.solutions[i][var[0]] - steady_state.solutions[i - 1][var[0]])'], {'decimals': '(1)'}), '(steady_state.solutions[i][var[0]] - steady_state.solutions[i - \n 1][var[0]], decimals=1)\n', (12979, 13071), False, 'import numpy\n'), ((13262, 13320), 'numpy.round', 'numpy.round', (['steady_state.solutions[i][var[0]]'], {'decimals': '(1)'}), '(steady_state.solutions[i][var[0]], decimals=1)\n', (13273, 13320), False, 'import numpy\n')] |
"""
Methods and classes for writing data to disk.
- Methods:
- create_zarr_dataset: Creates and returns a Zarr hierarchy/dataset.
- create_zarr_obj_array: Creates and returns a Zarr object array.
- create_zarr_count_assay: Creates and returns a Zarr array with name 'counts'.
- subset_assay_zarr: Selects a subset of the data in an assay in the specified Zarr hierarchy.
- dask_to_zarr: Creates a Zarr hierarchy from a Dask array.
- Classes:
- ZarrMerge: Merge multiple Zarr files into a single Zarr file.
- CrToZarr: A class for converting data in the Cellranger format to a Zarr hierarchy.
- H5adToZarr: A class for converting data in the Cellranger Matrix Market format to a Zarr hierarchy.
- MtxToZarr: A class for converting data in the Cellranger Matrix Market format to a Zarr hierarchy.
- NaboH5ToZarr: A class for converting data in a h5 file generated by Nabo, to a Zarr hierarchy.
- LoomToZarr: A class for converting data in a Loom file to a Zarr hierarchy.
"""
import zarr
from typing import Any, Tuple, List, Union
import numpy as np
from tqdm import tqdm
from .readers import CrReader, H5adReader, NaboH5Reader, LoomReader
import os
import pandas as pd
from .utils import controlled_compute
from .logging_utils import logger
from scipy.sparse import csr_matrix
__all__ = ['create_zarr_dataset', 'create_zarr_obj_array', 'create_zarr_count_assay',
'subset_assay_zarr', 'dask_to_zarr', 'ZarrMerge', 'SubsetZarr',
'CrToZarr', 'H5adToZarr', 'MtxToZarr', 'NaboH5ToZarr', 'LoomToZarr', 'SparseToZarr']
def create_zarr_dataset(g: zarr.Group, name: str, chunks: tuple,
dtype: Any, shape: Tuple, overwrite: bool = True) -> zarr.hierarchy:
"""
Creates and returns a Zarr array.
Args:
g (zarr.hierarchy):
name (str):
chunks (tuple):
dtype (Any):
shape (Tuple):
overwrite (bool):
Returns:
A Zarr Array.
"""
from numcodecs import Blosc
compressor = Blosc(cname='lz4', clevel=5, shuffle=Blosc.BITSHUFFLE)
return g.create_dataset(name, chunks=chunks, dtype=dtype,
shape=shape, compressor=compressor, overwrite=overwrite)
def create_zarr_obj_array(g: zarr.Group, name: str, data,
dtype: Union[str, Any] = None, overwrite: bool = True) -> zarr.hierarchy:
"""
Creates and returns a Zarr object array.
A Zarr object array can contain any type of object.
https://zarr.readthedocs.io/en/stable/tutorial.html#object-arrays
Args:
g (zarr.hierarchy):
name (str):
data ():
dtype (Union[str, Any]):
overwrite (bool):
Returns:
A Zarr object Array.
"""
data = np.array(data)
if dtype is None or dtype == object:
dtype = 'U' + str(max([len(str(x)) for x in data]))
if np.issubdtype(data.dtype, np.dtype('S')):
data = data.astype('U')
dtype = data.dtype
return g.create_dataset(name, data=data, chunks=(100000,),
shape=len(data), dtype=dtype, overwrite=overwrite)
def create_zarr_count_assay(z: zarr.Group, assay_name: str, chunk_size: Tuple[int, int], n_cells: int,
feat_ids: Union[np.ndarray, List[str]], feat_names: Union[np.ndarray, List[str]],
dtype: str = 'uint32') -> zarr.hierarchy:
"""
Creates and returns a Zarr array with name 'counts'.
Args:
z (zarr.Group):
assay_name (str):
chunk_size (Tuple[int, int]):
n_cells (int):
feat_ids (Union[np.ndarray, List[str]]):
feat_names (Union[np.ndarray, List[str]]):
dtype (str = 'uint32'):
Returns:
A Zarr array.
"""
g = z.create_group(assay_name, overwrite=True)
g.attrs['is_assay'] = True
g.attrs['misc'] = {}
create_zarr_obj_array(g, 'featureData/ids', feat_ids)
create_zarr_obj_array(g, 'featureData/names', feat_names)
create_zarr_obj_array(g, 'featureData/I',
[True for _ in range(len(feat_ids))], 'bool')
return create_zarr_dataset(g, 'counts', chunk_size, dtype,
(n_cells, len(feat_ids)), overwrite=True)
class CrToZarr:
"""
A class for converting data in the Cellranger format to a Zarr hierarchy.
Attributes:
cr: A CrReader object, containing the Cellranger data.
fn: The file name for the Zarr hierarchy.
chunkSizes: The requested size of chunks to load into memory and process.
z: The Zarr hierarchy (array or group).
"""
def __init__(self, cr: CrReader, zarr_fn: str, chunk_size=(1000, 1000), dtype: str = 'uint32'):
"""
Args:
cr: A CrReader object, containing the Cellranger data.
zarr_fn: The file name for the Zarr hierarchy.
chunk_size: The requested size of chunks to load into memory and process.
dtype: the dtype of the data.
"""
self.cr = cr
self.fn = zarr_fn
self.chunkSizes = chunk_size
self.z = zarr.open(self.fn, mode='w')
self._ini_cell_data()
for assay_name in self.cr.assayFeats.columns:
create_zarr_count_assay(self.z, assay_name, chunk_size, self.cr.nCells,
self.cr.feature_ids(assay_name), self.cr.feature_names(assay_name), dtype)
def _ini_cell_data(self):
g = self.z.create_group('cellData')
create_zarr_obj_array(g, 'ids', self.cr.cell_names())
create_zarr_obj_array(g, 'names', self.cr.cell_names())
create_zarr_obj_array(g, 'I', [True for _ in range(self.cr.nCells)], 'bool')
def dump(self, batch_size: int = 1000, lines_in_mem: int = 100000) -> None:
# TODO: add informed description to docstring
"""
Raises:
AssertionError: Catches eventual bugs in the class, if number of cells does not match after transformation.
Returns:
None
"""
stores = [self.z["%s/counts" % x] for x in self.cr.assayFeats.columns]
spidx = [0] + list(self.cr.assayFeats.T.nFeatures.cumsum().values)
spidx = [(spidx[x - 1], spidx[x]) for x in range(1, len(spidx))]
s, e, = 0, 0
n_chunks = self.cr.nCells//batch_size + 1
for a in tqdm(self.cr.consume(batch_size, lines_in_mem), total=n_chunks):
e += a.shape[0]
a = a.todense()
for j in range(len(stores)):
stores[j][s:e] = a[:, spidx[j][0]:spidx[j][1]]
s = e
if e != self.cr.nCells:
raise AssertionError("ERROR: This is a bug in CrToZarr. All cells might not have been successfully "
"written into the zarr file. Please report this issue")
class MtxToZarr:
"""
A class for converting data in the Cellranger Matrix Market format to a Zarr hierarchy.
Attributes:
cr: A CrReader object, containing the Cellranger data.
fn: The file name for the Zarr hierarchy.
chunkSizes: The requested size of chunks to load into memory and process.
z: The Zarr hierarchy (array or group).
"""
def __init__(self, cr: CrReader, zarr_fn: str, chunk_size=(1000, 1000), dtype: str = 'uint32'):
"""
Args:
cr: A CrReader object, containing the Cellranger data.
zarr_fn: The file name for the Zarr hierarchy.
chunk_size: The requested size of chunks to load into memory and process.
dtype: the dtype of the data.
"""
self.cr = cr
self.fn = zarr_fn
self.chunkSizes = chunk_size
self.z = zarr.open(self.fn, mode='w')
self._ini_cell_data()
for assay_name in set(self.cr.assayFeats.columns):
create_zarr_count_assay(self.z, assay_name, chunk_size, self.cr.nCells,
self.cr.feature_ids(assay_name), self.cr.feature_names(assay_name), dtype)
def _ini_cell_data(self):
g = self.z.create_group('cellData')
create_zarr_obj_array(g, 'ids', self.cr.cell_names())
create_zarr_obj_array(g, 'names', self.cr.cell_names())
create_zarr_obj_array(g, 'I', [True for _ in range(self.cr.nCells)], 'bool')
def _prep_assay_ranges(self):
ret_val = {}
for assay in set(self.cr.assayFeats.columns):
temp = []
if len(self.cr.assayFeats[assay].shape) == 2:
for i in self.cr.assayFeats[assay].values[1:3].T:
temp.append([i[0], i[1]])
else:
idx = self.cr.assayFeats[assay]
temp = [[idx.start, idx.end]]
ret_val[assay] = temp
return ret_val
def dump(self, batch_size: int = 1000, lines_in_mem: int = 100000) -> None:
# TODO: add informed description to docstring
"""
Raises:
AssertionError: Catches eventual bugs in the class, if number of cells does not match after transformation.
Returns:
None
"""
stores = {x: self.z["%s/counts" % x] for x in set(self.cr.assayFeats.columns)}
assay_ranges = self._prep_assay_ranges()
s, e, = 0, 0
n_chunks = self.cr.nCells // batch_size + 1
for a in tqdm(self.cr.consume(batch_size, lines_in_mem), total=n_chunks):
e += a.shape[0]
a = a.todense()
b = {x: [] for x in stores.keys()}
for store_name in stores.keys():
ar = assay_ranges[store_name]
temp = []
for i in ar:
temp.append(a[:, i[0]:i[1]])
if len(temp) > 1:
b[store_name] = np.hstack(temp)
else:
b[store_name] = temp[0]
for store_name in stores.keys():
stores[store_name][s:e] = b[store_name]
s = e
if e != self.cr.nCells:
raise AssertionError("ERROR: This is a bug in MtxToZarr. All cells might not have been successfully "
"written into the zarr file. Please report this issue")
class H5adToZarr:
"""
A class for converting data in the Cellranger Matrix Market format to a Zarr hierarchy.
Attributes:
h5ad: A h5ad object (h5 file with added AnnData structure).
fn: The file name for the Zarr hierarchy.
chunkSizes: The requested size of chunks to load into memory and process.
assayName: The Zarr hierarchy (array or group).
z: The Zarr hierarchy (array or group).
"""
def __init__(self, h5ad: H5adReader, zarr_fn: str, assay_name: str = None,
chunk_size=(1000, 1000), dtype: str = 'uint32'):
"""
Args:
h5ad: A H5adReader object, containing the Cellranger data.
zarr_fn: The file name for the Zarr hierarchy.
assay_name: the name of the assay (e. g. 'RNA')
chunk_size: The requested size of chunks to load into memory and process.
dtype: the dtype of the data.
"""
# TODO: support for multiple assay. One of the `var` datasets can be used to group features in separate assays
self.h5ad = h5ad
self.fn = zarr_fn
self.chunkSizes = chunk_size
if assay_name is None:
logger.info(f"No value provided for assay names. Will use default value: 'RNA'")
self.assayName = 'RNA'
else:
self.assayName = assay_name
self.z = zarr.open(self.fn, mode='w')
self._ini_cell_data()
create_zarr_count_assay(self.z, self.assayName, chunk_size, self.h5ad.nCells,
self.h5ad.feat_ids(), self.h5ad.feat_names(), dtype)
for i, j in self.h5ad.get_feat_columns():
if i not in self.z[self.assayName]['featureData']:
create_zarr_obj_array(self.z[self.assayName]['featureData'], i, j, j.dtype)
def _ini_cell_data(self):
g = self.z.create_group('cellData')
ids = self.h5ad.cell_ids()
create_zarr_obj_array(g, 'ids', ids, ids.dtype)
create_zarr_obj_array(g, 'names', ids, ids.dtype)
create_zarr_obj_array(g, 'I', [True for _ in range(self.h5ad.nCells)], 'bool')
for i, j in self.h5ad.get_cell_columns():
create_zarr_obj_array(g, i, j, j.dtype)
def dump(self, batch_size: int = 1000) -> None:
# TODO: add informed description to docstring
"""
Raises:
AssertionError: Catches eventual bugs in the class, if number of cells does not match after transformation.
Returns:
None
"""
store = self.z["%s/counts" % self.assayName]
s, e, = 0, 0
n_chunks = self.h5ad.nCells//batch_size + 1
for a in tqdm(self.h5ad.consume(batch_size), total=n_chunks):
e += a.shape[0]
store[s:e] = a
s = e
if e != self.h5ad.nCells:
raise AssertionError("ERROR: This is a bug in H5adToZarr. All cells might not have been successfully "
"written into the zarr file. Please report this issue")
class NaboH5ToZarr:
"""
A class for converting data in a h5 file generated by Nabo, to a Zarr hierarchy.
Attributes:
h5: A Nabo h5 object.
fn: The file name for the Zarr hierarchy.
chunkSizes: The requested size of chunks to load into memory and process.
assayName: The Zarr hierarchy (array or group).
z: The Zarr hierarchy (array or group).
"""
def __init__(self, h5: NaboH5Reader, zarr_fn: str, assay_name: str = None,
chunk_size=(1000, 1000), dtype: str = 'uint32'):
"""
Args:
h5: A Nabo h5 object containing the data.
zarr_fn: The file name for the Zarr hierarchy.
assay_name: the name of the assay (e. g. 'RNA')
chunk_size: The requested size of chunks to load into memory and process.
dtype: the dtype of the data.
"""
self.h5 = h5
self.fn = zarr_fn
self.chunkSizes = chunk_size
if assay_name is None:
logger.info(f"No value provided for assay names. Will use default value: 'RNA'")
self.assayName = 'RNA'
else:
self.assayName = assay_name
self.z = zarr.open(self.fn, mode='w')
self._ini_cell_data()
create_zarr_count_assay(self.z, self.assayName, chunk_size, self.h5.nCells,
self.h5.feat_ids(), self.h5.feat_names(), dtype)
def _ini_cell_data(self):
g = self.z.create_group('cellData')
create_zarr_obj_array(g, 'ids', self.h5.cell_ids())
create_zarr_obj_array(g, 'names', self.h5.cell_ids())
create_zarr_obj_array(g, 'I', [True for _ in range(self.h5.nCells)], 'bool')
def dump(self, batch_size: int = 500) -> None:
# TODO: add informed description to docstring
"""
Raises:
AssertionError: Catches eventual bugs in the class, if number of cells does not match after transformation.
Returns:
None
"""
store = self.z["%s/counts" % self.assayName]
s, e, = 0, 0
n_chunks = self.h5.nCells // batch_size + 1
for a in tqdm(self.h5.consume(batch_size), total=n_chunks):
e += a.shape[0]
store[s:e] = a
s = e
if e != self.h5.nCells:
raise AssertionError("ERROR: This is a bug in NaboH5ToZarr. All cells might not have been successfully "
"written into the zarr file. Please report this issue")
class LoomToZarr:
"""
A class for converting data in a Loom file to a Zarr hierarchy.
Converts a Loom file read using scarf.LoomReader into Scarf's Zarr format.
Attributes:
loom: A scarf.LoomReader object used to open Loom format file.
fn: The file name for the Zarr hierarchy.
chunkSizes: The requested size of chunks to load into memory and process.
assayName: The Zarr hierarchy (array or group).
z: The Zarr hierarchy (array or group).
"""
def __init__(self, loom: LoomReader, zarr_fn: str, assay_name: str = None,
chunk_size=(1000, 1000)):
"""
Args:
loom: LoomReader object used to open Loom format file
zarr_fn: Output Zarr filename with path
assay_name: Name for the output assay. If not provided then automatically set to RNA
chunk_size: Chunk size for the count matrix saved in Zarr file.
"""
# TODO: support for multiple assay. Data from within individual layers can be treated as separate assays
self.loom = loom
self.fn = zarr_fn
self.chunkSizes = chunk_size
if assay_name is None:
logger.info(f"No value provided for assay names. Will use default value: 'RNA'")
self.assayName = 'RNA'
else:
self.assayName = assay_name
self.z = zarr.open(self.fn, mode='w')
self._ini_cell_data()
create_zarr_count_assay(self.z, self.assayName, chunk_size, self.loom.nCells,
self.loom.feature_ids(), self.loom.feature_names(), self.loom.matrixDtype)
for i, j in self.loom.get_feature_attrs():
create_zarr_obj_array(self.z[self.assayName]['featureData'], i, j, j.dtype)
def _ini_cell_data(self):
g = self.z.create_group('cellData')
create_zarr_obj_array(g, 'ids', self.loom.cell_ids())
create_zarr_obj_array(g, 'names', self.loom.cell_ids())
create_zarr_obj_array(g, 'I', [True for _ in range(self.loom.nCells)], 'bool')
for i, j in self.loom.get_cell_attrs():
create_zarr_obj_array(g, i, j, j.dtype)
def dump(self, batch_size: int = 1000) -> None:
# TODO: add informed description to docstring
"""
Raises:
AssertionError: Catches eventual bugs in the class, if number of cells does not match after transformation.
Returns:
None
"""
store = self.z["%s/counts" % self.assayName]
s, e, = 0, 0
n_chunks = self.loom.nCells//batch_size + 1
for a in tqdm(self.loom.consume(batch_size), total=n_chunks):
e += a.shape[0]
store[s:e] = a
s = e
if e != self.loom.nCells:
raise AssertionError("ERROR: This is a bug in LoomToZarr. All cells might not have been successfully "
"written into the zarr file. Please report this issue")
class SparseToZarr:
"""
A class for converting data in a sparse matrix to a Zarr hierarchy.
Converts a Loom file read using scarf.LoomReader into Scarf's Zarr format.
Attributes:
csr_mat:
fn: The file name for the Zarr hierarchy.
chunkSizes: The requested size of chunks to load into memory and process.
assayName: The Zarr hierarchy (array or group).
z: The Zarr hierarchy (array or group).
"""
def __init__(self, csr_mat: csr_matrix, zarr_fn: str, cell_ids: List[str], feature_ids: List[str],
assay_name: str = None, chunk_size=(1000, 1000), ):
"""
Args:
csr_mat:
zarr_fn: Output Zarr filename with path
cell_ids: Cell IDs for the cells in the dataset.
feature_ids: Feature IDs for the features in the dataset.
assay_name: Name for the output assay. If not provided then automatically set to RNA.
chunk_size: The requested size of chunks to load into memory and process.
Raises:
ValueError: Raised if number of input cell or feature IDs does not match the matrix.
AssertionError: Catches eventual bugs in the class, if number of cells does not match after transformation.
"""
self.mat = csr_mat
self.fn = zarr_fn
self.chunkSizes = chunk_size
if assay_name is None:
logger.info(f"No value provided for assay names. Will use default value: 'RNA'")
self.assayName = 'RNA'
else:
self.assayName = assay_name
self.nFeatures, self.nCells = self.mat.shape
if len(cell_ids) != self.nCells:
raise ValueError("ERROR: Number of cell ids are not same as number of cells in the matrix")
if len(feature_ids) != self.nFeatures:
raise ValueError("ERROR: Number of feature ids are not same as number of features in the matrix")
self.z = zarr.open(self.fn, mode='w')
self._ini_cell_data(cell_ids)
create_zarr_count_assay(self.z, self.assayName, chunk_size, self.nCells,
feature_ids, feature_ids, 'int64')
def _ini_cell_data(self, cell_ids):
g = self.z.create_group('cellData')
create_zarr_obj_array(g, 'ids', cell_ids)
create_zarr_obj_array(g, 'names', cell_ids)
create_zarr_obj_array(g, 'I', [True for _ in range(self.nCells)], 'bool')
def dump(self, batch_size: int = 1000) -> None:
# TODO: add informed description to docstring
"""
Raises:
ValueError: Raised if there is any unexpected errors when writing to the Zarr hierarchy.
AssertionError: Catches eventual bugs in the class, if number of cells does not match after transformation.
Returns:
None
"""
store = self.z["%s/counts" % self.assayName]
s, e, = 0, 0
n_chunks = self.nCells//batch_size + 1
for e in tqdm(range(batch_size, self.nCells+batch_size, batch_size), total=n_chunks):
if s == self.nCells:
raise ValueError("Unexpected error encountered in writing to Zarr. The last iteration has failed. "
"Please report this issue.")
if e > self.nCells:
e = self.nCells
store[s:e] = self.mat[:, s:e].todense().T
s = e
if e != self.nCells:
raise AssertionError("ERROR: This is a bug in SparseToZarr. All cells might not have been successfully "
"written into the zarr file. Please report this issue")
def subset_assay_zarr(zarr_fn: str, in_grp: str, out_grp: str,
cells_idx: np.ndarray, feat_idx: np.ndarray,
chunk_size: tuple):
"""
Selects a subset of the data in an assay in the specified Zarr hierarchy.
For the arguments `cells_idx` and `feat_idx`, refer to the documentation for numpy.split:
https://numpy.org/doc/stable/reference/generated/numpy.split.html
Args:
zarr_fn: The file name for the Zarr hierarchy.
in_grp: Group in Zarr hierarchy to subset.
out_grp: Group name in Zarr hierarchy to write subsetted assay to.
cells_idx: A list of cell indices to (keep | drop ?).
feat_idx: A list of feature indices to (keep | drop ?).
chunk_size: The requested size of chunks to load into memory and process.
Returns:
None
"""
z = zarr.open(zarr_fn, 'r+')
ig = z[in_grp]
og = create_zarr_dataset(z, out_grp, chunk_size, 'uint32', (len(cells_idx), len(feat_idx)))
pos_start, pos_end = 0, 0
for i in tqdm(np.array_split(cells_idx, len(cells_idx) // chunk_size[0] + 1)):
pos_end += len(i)
og[pos_start:pos_end, :] = ig.get_orthogonal_selection((i, feat_idx))
pos_start = pos_end
return None
def dask_to_zarr(df, z, loc, chunk_size, nthreads: int, msg: str = None):
# TODO: perhaps change name of Dask array so it does not get confused with a dataframe
"""
Creates a Zarr hierarchy from a Dask array.
Args:
df (): Dask array.
z (): Zarr hierarchy.
loc (): Location to write data/Zarr hierarchy to.
chunk_size (): Size of chunks to load into memory and process.
nthreads (int): Number of threads to use.
msg (str): Message to use with progress bar (Default: f"Writing data to {loc}").
"""
if msg is None:
msg = f"Writing data to {loc}"
og = create_zarr_dataset(z, loc, chunk_size, 'float64', df.shape)
pos_start, pos_end = 0, 0
for i in tqdm(df.blocks, total=df.numblocks[0], desc=msg):
pos_end += i.shape[0]
og[pos_start:pos_end, :] = controlled_compute(i, nthreads)
pos_start = pos_end
return None
class SubsetZarr:
def __init__(self, in_zarr: str, out_zarr: str, cell_key: str = None, cell_idx: np.ndarray = None,
reset_cell_filter: bool = True, overwrite_existing_file: bool = False,
overwrite_cell_data: bool = False) -> None:
"""
Split Zarr file using a subset of cells
Args:
in_zarr: Path of input Zarr file to be subsetted.
out_zarr: Path of output Zarr files containing only a subset of cells.
cell_key: Name of a boolean column in cell metadata. The cells with with value True are included in the
subset.
cell_idx: Indices of the cells to be included in the subsetted. Only used when cell_key is None.
reset_cell_filter: If True, then the cell filtering information is removed, i.e. even the filtered out cells
are set as True as in the 'I' column. To keep the filtering information set the value for
this parameter to False. (Default value: True)
overwrite_existing_file: If True, then overwrites the existing data. (Default value: False)
overwrite_cell_data: If True, then overwrites cell data (Default value: True)
Returns:
"""
if cell_key is None and cell_idx is None:
raise ValueError("Both 'cell_key' and 'cell_idx' parameters cannot be None")
self.iZname = in_zarr
self.oZname = out_zarr
self.cellKey = cell_key
self.cellIdx = cell_idx
self.resetCells = reset_cell_filter
self.overFn = overwrite_existing_file
self.overcells = overwrite_cell_data
self._check_files()
self.iz = zarr.open(self.iZname)
self._check_idx()
self.oz = zarr.open(self.oZname, mode='w')
self._prep_cell_data()
self.assays = self._get_assays()
self._prep_counts()
def _check_files(self):
if self.iZname == self.oZname:
raise ValueError("You are trying to overwrite the current Zarr file itself with the subset. "
"This is not allowed. Please change the name/path of output file, by supplying a "
"different value to `out_zarr` parameter. No subsetting was performed")
if os.path.isdir(self.oZname) and self.overFn is False:
logger.error(f"Zarr file with name: {self.oZname} already exists.\nIf you want to overwrite it then please "
f"set overwrite_existing_file to True. No subsetting was performed.")
return None
def _check_idx(self):
if self.cellIdx is None:
idx = self.iz['cellData'][self.cellKey][:]
if idx.dtype != bool:
raise ValueError(f"ERROR: {self.cellKey} is not of boolean type. Cannot perform subsetting")
self.cellIdx = np.where(idx)[0]
def _prep_cell_data(self):
n_cells = len(self.cellIdx)
if 'cellData' in self.oz:
g = self.oz['cellData']
else:
g = self.oz.create_group('cellData')
for i in self.iz['cellData'].keys():
if i in g and self.overcells is False:
continue
if i in ['I'] and self.resetCells:
create_zarr_obj_array(g, 'I', [True for _ in range(n_cells)], 'bool')
continue
v = self.iz['cellData'][i][:][self.cellIdx]
create_zarr_obj_array(g, i, v, dtype=v.dtype)
def _get_assays(self):
assays = []
for i in self.iz.group_keys():
if 'is_assay' in self.iz[i].attrs.keys():
assays.append(i)
return assays
def _get_raw_data(self, assay_name):
import dask.array as daskarr
return daskarr.from_zarr(self.iz[assay_name]['counts'], inline_array=True)
def _prep_counts(self):
n_cells = len(self.cellIdx)
for assay_name in self.assays:
print (assay_name)
raw_data = self._get_raw_data(assay_name)
print (self.iz[assay_name]['featureData']['ids'][:])
print (self.iz[assay_name]['featureData']['names'][:])
create_zarr_count_assay(self.oz, assay_name, raw_data.chunksize, n_cells,
self.iz[assay_name]['featureData']['ids'][:],
self.iz[assay_name]['featureData']['names'][:], raw_data.dtype)
def write(self):
for assay_name in self.assays:
raw_data = self._get_raw_data(assay_name)
store = self.oz[f"{assay_name}/counts"]
s, e, = 0, 0
for a in tqdm(raw_data[self.cellIdx].blocks,
desc=f"Subsetting assay: {assay_name}", total=raw_data.numblocks[0]):
if a.shape[0] > 0:
e += a.shape[0]
store[s:e] = a.compute()
s = e
class ZarrMerge:
"""
Merge multiple Zarr files into a single Zarr file.
Attributes:
assays: List of assay objects to be merged. For example, [ds1.RNA, ds2.RNA].
names: Names of the each assay objects in the `assays` parameter.
mergedCells:
nCells: Number of cells in dataset.
featCollection:
mergedFeats:
nFeats: Number of features in the dataset.
featOrder:
z: The merged Zarr file.
assayGroup:
"""
def __init__(self, zarr_path: str, assays: list, names: List[str], merge_assay_name: str,
chunk_size=(1000, 1000), dtype: str = None, overwrite: bool = False,
reset_cell_filter: bool = True):
"""
Args:
zarr_path: Name of the new, merged Zarr file with path.
assays: List of assay objects to be merged. For example, [ds1.RNA, ds2.RNA].
names: Names of the each assay objects in the `assays` parameter. They should be in the same order as in
`assays` parameter.
merge_assay_name: Name of assay in the merged Zarr file. For example, for scRNA-Seq it could be simply,
'RNA'.
chunk_size: Tuple of cell and feature chunk size. (Default value: (1000, 1000)).
dtype: Dtype of the raw values in the assay. Dtype is automatically inferred from the provided assays. If
assays have different dtypes then a float type is used.
overwrite: If True, then overwrites previously created assay in the Zarr file. (Default value: False).
reset_cell_filter: If True, then the cell filtering information is removed, i.e. even the filtered out cells
are set as True as in the 'I' column. To keep the filtering information set the value for
this parameter to False. (Default value: True)
"""
self.assays = assays
self.names = names
self.mergedCells = self._merge_cell_table(reset_cell_filter)
self.nCells = self.mergedCells.shape[0]
self.featCollection = self._get_feat_ids(assays)
self.mergedFeats = self._merge_order_feats()
self.nFeats = self.mergedFeats.shape[0]
self.featOrder = self._ref_order_feat_idx()
self.z = self._use_existing_zarr(zarr_path, merge_assay_name, overwrite)
self._ini_cell_data()
if dtype is None:
if len(set([str(x.rawData.dtype) for x in self.assays])) == 1:
dtype = str(self.assays[0].rawData.dtype)
else:
dtype = 'float'
self.assayGroup = create_zarr_count_assay(
self.z['/'], merge_assay_name, chunk_size, self.nCells, list(self.mergedFeats.index),
list(self.mergedFeats.names.values), dtype
)
def _merge_cell_table(self, reset):
ret_val = []
if len(self.assays) != len(set(self.names)):
raise ValueError("ERROR: A unique name should be provided for each of the assay")
for assay, name in zip(self.assays, self.names):
a = pd.DataFrame({
'ids': [f"{name}__{x}" for x in assay.cells.fetch_all('ids')],
'names': assay.cells.fetch_all('names')
})
for i in assay.cells.columns:
if i not in ['ids', 'I', 'names']:
a[f"orig_{i}"] = assay.cells.fetch_all(i)
if reset:
a['I'] = np.ones(len(a['ids'])).astype(bool)
else:
a['I'] = assay.cells.fetch_all('I')
ret_val.append(a)
return pd.concat(ret_val).reset_index(drop=True)
@staticmethod
def _get_feat_ids(assays):
ret_val = []
for i in assays:
ret_val.append(i.feats.to_pandas_dataframe(['names', 'ids']).set_index('ids')['names'].to_dict())
return ret_val
def _merge_order_feats(self):
union_set = {}
for ids in self.featCollection:
for i in ids:
if i not in union_set:
union_set[i] = ids[i]
return pd.DataFrame({'idx': range(len(union_set)),
'names': list(union_set.values()),
'ids': list(union_set.keys())}).set_index('ids')
def _ref_order_feat_idx(self):
ret_val = []
for ids in self.featCollection:
ret_val.append(self.mergedFeats['idx'].reindex(ids).values)
return ret_val
def _use_existing_zarr(self, zarr_path, merge_assay_name, overwrite):
try:
z = zarr.open(zarr_path, mode='r')
if 'cellData' not in z:
raise ValueError(
f"ERROR: Zarr file with name {zarr_path} exists but seems corrupted. Either delete the "
"existing file or choose another path")
if merge_assay_name in z:
if overwrite is False:
raise ValueError(
f"ERROR: Zarr file `{zarr_path}` already contains {merge_assay_name} assay. Choose "
"a different zarr path or a different assay name. Otherwise set overwrite to True")
try:
if not all(z['cellData']['ids'][:] == np.array(self.mergedCells['ids'].values)):
raise ValueError(f"ERROR: order of cells does not match the one in existing file: {zarr_path}")
except KeyError:
raise ValueError(f"ERROR: 'cell data' in Zarr file {zarr_path} seems corrupted. Either delete the "
"existing file or choose another path")
return zarr.open(zarr_path, mode='r+')
except ValueError:
# So no zarr file with same name exists. Check if a non zarr folder with the same name exists
if os.path.exists(zarr_path):
raise ValueError(
f"ERROR: Directory/file with name `{zarr_path}`exists. Either delete it or use another name")
# creating a new zarr file
return zarr.open(zarr_path, mode='w')
def _ini_cell_data(self):
if 'cellData' not in self.z:
g = self.z.create_group('cellData')
for i in self.mergedCells:
vals = self.mergedCells[i].values
create_zarr_obj_array(g, i, vals, vals.dtype)
else:
logger.info(f"cellData already exists so skipping _ini_cell_data")
def write(self, nthreads=2):
# TODO: add docstring
pos_start, pos_end = 0, 0
for assay, feat_order in zip(self.assays, self.featOrder):
for i in tqdm(assay.rawData.blocks, total=assay.rawData.numblocks[0],
desc=f"Writing data to merged file"):
pos_end += i.shape[0]
a = np.ones((i.shape[0], self.nFeats))
a[:, feat_order] = controlled_compute(i, nthreads)
self.assayGroup[pos_start:pos_end, :] = a
pos_start = pos_end
def to_h5ad(assay, h5ad_filename: str) -> None:
"""
Save an assay as an h5ad file.
Args:
assay: Assay to save.
h5ad_filename: Name for the h5ad file to be created.
Returns:
None
"""
import h5py
def save_attr(group, col, scarf_col, md):
d = md.fetch_all(scarf_col)
h5[group].create_dataset(col, data=d.astype(h5py.special_dtype(vlen=str)))
h5 = h5py.File(h5ad_filename, 'w')
for i in ['X', 'obs', 'var']:
h5.create_group(i)
n_feats_per_cell = assay.cells.fetch_all(f"{assay.name}_nFeatures").astype(int)
tot_counts = int(n_feats_per_cell.sum())
for i, s in zip(['indptr', 'indices', 'data'],
[assay.cells.N + 1, tot_counts, tot_counts]):
h5['X'].create_dataset(i, (s,), chunks=True, compression='gzip', dtype=int)
h5['X/indptr'][:] = np.array([0] + list(n_feats_per_cell.cumsum())).astype(int)
s, e = 0, 0
for i in tqdm(assay.rawData.blocks, total=assay.rawData.numblocks[0]):
i = csr_matrix(i.compute()).astype(int)
e += i.data.shape[0]
h5['X/data'][s:e] = i.data
h5['X/indices'][s:e] = i.indices
s = e
save_attr('obs', '_index', 'ids', assay.cells)
save_attr('var', '_index', 'ids', assay.feats)
save_attr('var', 'gene_short_name', 'names', assay.feats)
attrs = {
'encoding-type': 'csr_matrix',
'encoding-version': '0.1.0',
'shape': np.array([assay.cells.N, assay.feats.N])
}
for i, j in attrs.items():
h5['X'].attrs[i] = j
attrs = {
'_index': '_index',
'column-order': np.array(['_index'], dtype=object),
'encoding-type': 'dataframe',
'encoding-version': '0.1.0'
}
for i, j in attrs.items():
h5['obs'].attrs[i] = j
attrs = {
'_index': '_index',
'column-order': np.array(['_index', 'gene_short_name'], dtype=object),
'encoding-type': 'dataframe',
'encoding-version': '0.1.0'
}
for i, j in attrs.items():
h5['var'].attrs[i] = j
h5.close()
return None
def to_mtx(assay, mtx_directory: str, compress: bool = False):
"""
Save an assay as a Matrix Market directory.
Args:
assay: Scarf assay. For example: `ds.RNA`
mtx_directory: Out directory where MTX file will be saved along with barcodes and features file
compress: If True, then the files are compressed and saved with .gz extension. (Default value: False).
Returns:
None
"""
from scipy.sparse import coo_matrix
import gzip
if os.path.isdir(mtx_directory) is False:
os.mkdir(mtx_directory)
n_feats_per_cell = assay.cells.fetch_all(f"{assay.name}_nFeatures").astype(int)
tot_counts = int(n_feats_per_cell.sum())
if compress:
barcodes_fn = 'barcodes.tsv.gz'
features_fn = 'features.tsv.gz'
h = gzip.open(os.path.join(mtx_directory, 'matrix.mtx.gz'), 'wt')
else:
barcodes_fn = 'barcodes.tsv.gz'
features_fn = 'genes.tsv'
h = open(os.path.join(mtx_directory, 'matrix.mtx'), 'w')
h.write("%%MatrixMarket matrix coordinate integer general\n% Generated by Scarf\n")
h.write(f"{assay.feats.N} {assay.cells.N} {tot_counts}\n")
s = 0
for i in tqdm(assay.rawData.blocks, total=assay.rawData.numblocks[0]):
i = coo_matrix((i.compute()))
df = pd.DataFrame({'col': i.col + 1, 'row': i.row + s + 1, 'd': i.data})
df.to_csv(h, sep=' ', header=False, index=False, mode='a')
s += i.shape[0]
h.close()
assay.cells.to_pandas_dataframe(['ids']).to_csv(
os.path.join(mtx_directory, barcodes_fn),
sep='\t', header=False, index=False)
assay.feats.to_pandas_dataframe(['ids', 'names']).to_csv(
os.path.join(mtx_directory, features_fn),
sep='\t', header=False, index=False)
| [
"pandas.DataFrame",
"os.mkdir",
"tqdm.tqdm",
"h5py.File",
"zarr.open",
"h5py.special_dtype",
"os.path.isdir",
"dask.array.from_zarr",
"numpy.dtype",
"os.path.exists",
"numpy.ones",
"numpy.hstack",
"numpy.where",
"numpy.array",
"os.path.join",
"pandas.concat",
"numcodecs.Blosc"
] | [((2034, 2088), 'numcodecs.Blosc', 'Blosc', ([], {'cname': '"""lz4"""', 'clevel': '(5)', 'shuffle': 'Blosc.BITSHUFFLE'}), "(cname='lz4', clevel=5, shuffle=Blosc.BITSHUFFLE)\n", (2039, 2088), False, 'from numcodecs import Blosc\n'), ((2773, 2787), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2781, 2787), True, 'import numpy as np\n'), ((23287, 23311), 'zarr.open', 'zarr.open', (['zarr_fn', '"""r+"""'], {}), "(zarr_fn, 'r+')\n", (23296, 23311), False, 'import zarr\n'), ((24427, 24475), 'tqdm.tqdm', 'tqdm', (['df.blocks'], {'total': 'df.numblocks[0]', 'desc': 'msg'}), '(df.blocks, total=df.numblocks[0], desc=msg)\n', (24431, 24475), False, 'from tqdm import tqdm\n'), ((37086, 37115), 'h5py.File', 'h5py.File', (['h5ad_filename', '"""w"""'], {}), "(h5ad_filename, 'w')\n", (37095, 37115), False, 'import h5py\n'), ((37622, 37682), 'tqdm.tqdm', 'tqdm', (['assay.rawData.blocks'], {'total': 'assay.rawData.numblocks[0]'}), '(assay.rawData.blocks, total=assay.rawData.numblocks[0])\n', (37626, 37682), False, 'from tqdm import tqdm\n'), ((39962, 40022), 'tqdm.tqdm', 'tqdm', (['assay.rawData.blocks'], {'total': 'assay.rawData.numblocks[0]'}), '(assay.rawData.blocks, total=assay.rawData.numblocks[0])\n', (39966, 40022), False, 'from tqdm import tqdm\n'), ((2922, 2935), 'numpy.dtype', 'np.dtype', (['"""S"""'], {}), "('S')\n", (2930, 2935), True, 'import numpy as np\n'), ((5133, 5161), 'zarr.open', 'zarr.open', (['self.fn'], {'mode': '"""w"""'}), "(self.fn, mode='w')\n", (5142, 5161), False, 'import zarr\n'), ((7729, 7757), 'zarr.open', 'zarr.open', (['self.fn'], {'mode': '"""w"""'}), "(self.fn, mode='w')\n", (7738, 7757), False, 'import zarr\n'), ((11611, 11639), 'zarr.open', 'zarr.open', (['self.fn'], {'mode': '"""w"""'}), "(self.fn, mode='w')\n", (11620, 11639), False, 'import zarr\n'), ((14472, 14500), 'zarr.open', 'zarr.open', (['self.fn'], {'mode': '"""w"""'}), "(self.fn, mode='w')\n", (14481, 14500), False, 'import zarr\n'), ((17178, 17206), 'zarr.open', 'zarr.open', (['self.fn'], {'mode': '"""w"""'}), "(self.fn, mode='w')\n", (17187, 17206), False, 'import zarr\n'), ((20735, 20763), 'zarr.open', 'zarr.open', (['self.fn'], {'mode': '"""w"""'}), "(self.fn, mode='w')\n", (20744, 20763), False, 'import zarr\n'), ((26358, 26380), 'zarr.open', 'zarr.open', (['self.iZname'], {}), '(self.iZname)\n', (26367, 26380), False, 'import zarr\n'), ((26425, 26457), 'zarr.open', 'zarr.open', (['self.oZname'], {'mode': '"""w"""'}), "(self.oZname, mode='w')\n", (26434, 26457), False, 'import zarr\n'), ((28438, 28505), 'dask.array.from_zarr', 'daskarr.from_zarr', (["self.iz[assay_name]['counts']"], {'inline_array': '(True)'}), "(self.iz[assay_name]['counts'], inline_array=True)\n", (28455, 28505), True, 'import dask.array as daskarr\n'), ((38123, 38163), 'numpy.array', 'np.array', (['[assay.cells.N, assay.feats.N]'], {}), '([assay.cells.N, assay.feats.N])\n', (38131, 38163), True, 'import numpy as np\n'), ((38297, 38331), 'numpy.array', 'np.array', (["['_index']"], {'dtype': 'object'}), "(['_index'], dtype=object)\n", (38305, 38331), True, 'import numpy as np\n'), ((38542, 38595), 'numpy.array', 'np.array', (["['_index', 'gene_short_name']"], {'dtype': 'object'}), "(['_index', 'gene_short_name'], dtype=object)\n", (38550, 38595), True, 'import numpy as np\n'), ((39267, 39295), 'os.path.isdir', 'os.path.isdir', (['mtx_directory'], {}), '(mtx_directory)\n', (39280, 39295), False, 'import os\n'), ((39314, 39337), 'os.mkdir', 'os.mkdir', (['mtx_directory'], {}), '(mtx_directory)\n', (39322, 39337), False, 'import os\n'), ((40075, 40142), 'pandas.DataFrame', 'pd.DataFrame', (["{'col': i.col + 1, 'row': i.row + s + 1, 'd': i.data}"], {}), "({'col': i.col + 1, 'row': i.row + s + 1, 'd': i.data})\n", (40087, 40142), True, 'import pandas as pd\n'), ((40309, 40349), 'os.path.join', 'os.path.join', (['mtx_directory', 'barcodes_fn'], {}), '(mtx_directory, barcodes_fn)\n', (40321, 40349), False, 'import os\n'), ((40467, 40507), 'os.path.join', 'os.path.join', (['mtx_directory', 'features_fn'], {}), '(mtx_directory, features_fn)\n', (40479, 40507), False, 'import os\n'), ((26957, 26983), 'os.path.isdir', 'os.path.isdir', (['self.oZname'], {}), '(self.oZname)\n', (26970, 26983), False, 'import os\n'), ((29308, 29416), 'tqdm.tqdm', 'tqdm', (['raw_data[self.cellIdx].blocks'], {'desc': 'f"""Subsetting assay: {assay_name}"""', 'total': 'raw_data.numblocks[0]'}), "(raw_data[self.cellIdx].blocks, desc=f'Subsetting assay: {assay_name}',\n total=raw_data.numblocks[0])\n", (29312, 29416), False, 'from tqdm import tqdm\n'), ((34226, 34256), 'zarr.open', 'zarr.open', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (34235, 34256), False, 'import zarr\n'), ((35295, 35326), 'zarr.open', 'zarr.open', (['zarr_path'], {'mode': '"""r+"""'}), "(zarr_path, mode='r+')\n", (35304, 35326), False, 'import zarr\n'), ((36285, 36387), 'tqdm.tqdm', 'tqdm', (['assay.rawData.blocks'], {'total': 'assay.rawData.numblocks[0]', 'desc': 'f"""Writing data to merged file"""'}), "(assay.rawData.blocks, total=assay.rawData.numblocks[0], desc=\n f'Writing data to merged file')\n", (36289, 36387), False, 'from tqdm import tqdm\n'), ((39587, 39631), 'os.path.join', 'os.path.join', (['mtx_directory', '"""matrix.mtx.gz"""'], {}), "(mtx_directory, 'matrix.mtx.gz')\n", (39599, 39631), False, 'import os\n'), ((39740, 39781), 'os.path.join', 'os.path.join', (['mtx_directory', '"""matrix.mtx"""'], {}), "(mtx_directory, 'matrix.mtx')\n", (39752, 39781), False, 'import os\n'), ((27535, 27548), 'numpy.where', 'np.where', (['idx'], {}), '(idx)\n', (27543, 27548), True, 'import numpy as np\n'), ((33253, 33271), 'pandas.concat', 'pd.concat', (['ret_val'], {}), '(ret_val)\n', (33262, 33271), True, 'import pandas as pd\n'), ((35475, 35500), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (35489, 35500), False, 'import os\n'), ((35708, 35738), 'zarr.open', 'zarr.open', (['zarr_path'], {'mode': '"""w"""'}), "(zarr_path, mode='w')\n", (35717, 35738), False, 'import zarr\n'), ((36468, 36502), 'numpy.ones', 'np.ones', (['(i.shape[0], self.nFeats)'], {}), '((i.shape[0], self.nFeats))\n', (36475, 36502), True, 'import numpy as np\n'), ((9788, 9803), 'numpy.hstack', 'np.hstack', (['temp'], {}), '(temp)\n', (9797, 9803), True, 'import numpy as np\n'), ((37045, 37073), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'str'}), '(vlen=str)\n', (37063, 37073), False, 'import h5py\n'), ((34899, 34939), 'numpy.array', 'np.array', (["self.mergedCells['ids'].values"], {}), "(self.mergedCells['ids'].values)\n", (34907, 34939), True, 'import numpy as np\n')] |
import torch.nn as nn
import torch
import numpy as np
class GlobalLayerNorm(nn.Module):
'''
normalize over both the channel and the time dimensions
gLN(F) = (F-E[F])/(Var[F]+eps)**0.5 element-wise y+beta
E[F] = 1/(NT)*sum_NT(F)[add elements in F along N and T dimensions]
Var[F] = 1/(NT)*sum_NT((F-E[F])**2)
N:channle dimension
T:time dimension
y and beta are trainable parameters ->R^{N*1}
where F ->R^{N*T}
dim:(int or list or torch.Size) - input shape from an expected input of size
elementwise_affine: a boolean value that when set to True, then this module has
learneable parameter initialized to ones(for weights) and zeros (for bias)
'''
def __init__(self,dim,eps=1e-05,elementwise_affine=True):
super(GlobalLayerNorm,self).__init__()
self.dim = dim
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.ones(self.dim,1))
self.bias = nn.Parameter(torch.zeros(self.dim,1))
else:
self.register_parameter('weight',None)
self.register_parameter('bias',None)
def forward(self,x):
if x.dim()!= 3:
raise RuntimeError('{} accept 3D tensor as input'.format(
self.__name__))
mean = torch.mean(x,(1,2),keepdim=True)
var = torch.mean((x-mean)**2,(1,2),keepdim=True)
if self.elementwise_affine:
x = self.weight*(x-mean)/torch.sqrt(var+self.eps)+self.bias
else:
x = (x-mean)/torch.sqrt(var+self.eps)
return x
class CumulativeLayerNorm(nn.Module):
'''
calculate cumulative layer normalization
reference :https://github.com/naplab/Conv-TasNet/blob/master/utility/models.py
'''
def __init__(self, dimension, eps = 1e-8, trainable=True):
super(cLN, self).__init__()
self.eps = eps
if trainable:
self.gain = nn.Parameter(torch.ones(1, dimension, 1))
self.bias = nn.Parameter(torch.zeros(1, dimension, 1))
else:
self.gain = Variable(torch.ones(1, dimension, 1), requires_grad=False)
self.bias = Variable(torch.zeros(1, dimension, 1), requires_grad=False)
def forward(self, input):
# input size: (Batch, Freq, Time)
# cumulative mean for each time step
batch_size = input.size(0)
channel = input.size(1)
time_step = input.size(2)
step_sum = input.sum(1) # B, T
step_pow_sum = input.pow(2).sum(1) # B, T
cum_sum = torch.cumsum(step_sum, dim=1) # B, T
cum_pow_sum = torch.cumsum(step_pow_sum, dim=1) # B, T
entry_cnt = np.arange(channel, channel*(time_step+1), channel)
entry_cnt = torch.from_numpy(entry_cnt).type(input.type())
entry_cnt = entry_cnt.view(1, -1).expand_as(cum_sum)
cum_mean = cum_sum / entry_cnt # B, T
cum_var = (cum_pow_sum - 2*cum_mean*cum_sum) / entry_cnt + cum_mean.pow(2) # B, T
cum_std = (cum_var + self.eps).sqrt() # B, T
cum_mean = cum_mean.unsqueeze(1)
cum_std = cum_std.unsqueeze(1)
x = (input - cum_mean.expand_as(input)) / cum_std.expand_as(input)
return x * self.gain.expand_as(x).type(x.type()) + self.bias.expand_as(x).type(x.type())
if __name__=='__main__':
x = torch.rand(2,3,3)
m = cLN(3)
print(m(x))
| [
"torch.mean",
"torch.ones",
"torch.sqrt",
"torch.cumsum",
"numpy.arange",
"torch.rand",
"torch.zeros",
"torch.from_numpy"
] | [((3451, 3470), 'torch.rand', 'torch.rand', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (3461, 3470), False, 'import torch\n'), ((1367, 1402), 'torch.mean', 'torch.mean', (['x', '(1, 2)'], {'keepdim': '(True)'}), '(x, (1, 2), keepdim=True)\n', (1377, 1402), False, 'import torch\n'), ((1414, 1463), 'torch.mean', 'torch.mean', (['((x - mean) ** 2)', '(1, 2)'], {'keepdim': '(True)'}), '((x - mean) ** 2, (1, 2), keepdim=True)\n', (1424, 1463), False, 'import torch\n'), ((2635, 2664), 'torch.cumsum', 'torch.cumsum', (['step_sum'], {'dim': '(1)'}), '(step_sum, dim=1)\n', (2647, 2664), False, 'import torch\n'), ((2695, 2728), 'torch.cumsum', 'torch.cumsum', (['step_pow_sum'], {'dim': '(1)'}), '(step_pow_sum, dim=1)\n', (2707, 2728), False, 'import torch\n'), ((2766, 2820), 'numpy.arange', 'np.arange', (['channel', '(channel * (time_step + 1))', 'channel'], {}), '(channel, channel * (time_step + 1), channel)\n', (2775, 2820), True, 'import numpy as np\n'), ((1007, 1030), 'torch.ones', 'torch.ones', (['self.dim', '(1)'], {}), '(self.dim, 1)\n', (1017, 1030), False, 'import torch\n'), ((1068, 1092), 'torch.zeros', 'torch.zeros', (['self.dim', '(1)'], {}), '(self.dim, 1)\n', (1079, 1092), False, 'import torch\n'), ((1604, 1630), 'torch.sqrt', 'torch.sqrt', (['(var + self.eps)'], {}), '(var + self.eps)\n', (1614, 1630), False, 'import torch\n'), ((2012, 2039), 'torch.ones', 'torch.ones', (['(1)', 'dimension', '(1)'], {}), '(1, dimension, 1)\n', (2022, 2039), False, 'import torch\n'), ((2078, 2106), 'torch.zeros', 'torch.zeros', (['(1)', 'dimension', '(1)'], {}), '(1, dimension, 1)\n', (2089, 2106), False, 'import torch\n'), ((2155, 2182), 'torch.ones', 'torch.ones', (['(1)', 'dimension', '(1)'], {}), '(1, dimension, 1)\n', (2165, 2182), False, 'import torch\n'), ((2238, 2266), 'torch.zeros', 'torch.zeros', (['(1)', 'dimension', '(1)'], {}), '(1, dimension, 1)\n', (2249, 2266), False, 'import torch\n'), ((2837, 2864), 'torch.from_numpy', 'torch.from_numpy', (['entry_cnt'], {}), '(entry_cnt)\n', (2853, 2864), False, 'import torch\n'), ((1530, 1556), 'torch.sqrt', 'torch.sqrt', (['(var + self.eps)'], {}), '(var + self.eps)\n', (1540, 1556), False, 'import torch\n')] |
import numpy as np
print(np.arange(1,50,3))
| [
"numpy.arange"
] | [((26, 45), 'numpy.arange', 'np.arange', (['(1)', '(50)', '(3)'], {}), '(1, 50, 3)\n', (35, 45), True, 'import numpy as np\n')] |
# coding=utf-8
import numpy as np
class RidgeRegression:
def __init__(self, lambd):
# param stores w
self.param = np.array([])
self.lambd = lambd
def fit(self, X, y):
# least square
X_T_X = np.dot(X.T, X)
self.param = np.array(np.dot(np.dot(np.matrix(X_T_X + np.eye(X_T_X.shape[0]) * self.lambd).I, X.T), y))
def predict(self, X):
result = np.dot(X, self.param)
return result
| [
"numpy.dot",
"numpy.array",
"numpy.eye"
] | [((136, 148), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (144, 148), True, 'import numpy as np\n'), ((242, 256), 'numpy.dot', 'np.dot', (['X.T', 'X'], {}), '(X.T, X)\n', (248, 256), True, 'import numpy as np\n'), ((414, 435), 'numpy.dot', 'np.dot', (['X', 'self.param'], {}), '(X, self.param)\n', (420, 435), True, 'import numpy as np\n'), ((319, 341), 'numpy.eye', 'np.eye', (['X_T_X.shape[0]'], {}), '(X_T_X.shape[0])\n', (325, 341), True, 'import numpy as np\n')] |
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
class Dataset_from_matrix(Dataset):
"""Face Landmarks dataset."""
def __init__(self, data_matrix):
"""
Args: create a torch dataset from a tensor data_matrix with size n * p
[treatment, features, outcome]
"""
self.data_matrix = data_matrix
self.num_data = data_matrix.shape[0]
def __len__(self):
return self.num_data
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = self.data_matrix[idx, :]
return (sample[0:-1], sample[-1])
class Dataset_ihdp(Dataset):
"""Face Landmarks dataset."""
def __init__(self, path='./dataset/ihdp/ihdp_npci_1-1000.all.npy', replications=10):
"""
Args: create a torch dataset from a tensor data_matrix with size n * p
[treatment, features, outcome]
"""
self.data_matrix = np.reshape(self.data_matrix, (-1, self.data_matrix.shape[1]))
self.num_data = self.data_matrix.shape[0] * replications
def __len__(self):
return self.num_data
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = self.data_matrix[idx, :]
return (sample[0:-1], sample[-1])
def get_iter(data_matrix, batch_size, shuffle=True):
dataset = Dataset_from_matrix(data_matrix)
iterator = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)
return iterator
def get_iter_ihdp(path, batch_size, shuffle=True):
dataset = Dataset_ihdp(path)
iterator = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)
return iterator
# Dataset_ihdp() | [
"torch.is_tensor",
"numpy.reshape",
"torch.utils.data.DataLoader"
] | [((1459, 1518), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'shuffle'}), '(dataset, batch_size=batch_size, shuffle=shuffle)\n', (1469, 1518), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1639, 1698), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'shuffle'}), '(dataset, batch_size=batch_size, shuffle=shuffle)\n', (1649, 1698), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((513, 533), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (528, 533), False, 'import torch\n'), ((982, 1043), 'numpy.reshape', 'np.reshape', (['self.data_matrix', '(-1, self.data_matrix.shape[1])'], {}), '(self.data_matrix, (-1, self.data_matrix.shape[1]))\n', (992, 1043), True, 'import numpy as np\n'), ((1206, 1226), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (1221, 1226), False, 'import torch\n')] |
# alternative 3-hour intervals at the end of script
import os
import pandas as pd ; import numpy as np
import datetime as dt
###########################################################################################################
### LOADING DATA & format manipulations
###########################################################################################################
os.chdir("/home/hubert/Downloads/Data Cleaned/proxys/proxys")
BCH_med = pd.read_csv("XRP_medmean", sep=',')
BCH_med.interval = pd.to_datetime(BCH_med.interval, format='%Y/%m/%d %H:%M:%S')
##############################################################################################################
### PART I compute single proxies for all 15-min intervals and for all CCIES
### PART II compute market wide proxies
##############################################################################################################
##############################################################################################################
### PART I
##############################################################################################################
# buil columns announcing the year, month, day, hout, minute of the medianazed
Y = pd.DatetimeIndex(np.array(BCH_med.interval)).year
M = pd.DatetimeIndex(np.array(BCH_med.interval)).month
D = pd.DatetimeIndex(np.array(BCH_med.interval)).day
H = pd.DatetimeIndex(np.array(BCH_med.interval)).hour
T = pd.DatetimeIndex(np.array(BCH_med.interval)).minute
G = np.zeros(T.shape)
# for index, (t,g) in enumerate(zip(T,G)):
# if t == 5 or t==10 or t==15:
# g=15
# elif t==20 or t==25 or t==30:
# g=30
# elif t==35 or t==40 or t==45:
# g=45
# elif t==50 or t==55 or t==00:
# g=60
# G[index] = g
for index, (t,g) in enumerate(zip(T,G)):
if t == 0 or t==5 or t==10:
g=15
elif t==15 or t==20 or t==25:
g=30
elif t==30 or t==35 or t==40:
g=45
elif t==45 or t==50 or t==55:
g=60
G[index] = g
BCH_med.insert(len(BCH_med.columns), 'group', G)
BCH_med.insert(len(BCH_med.columns), 'hour', H)
BCH_med.insert(len(BCH_med.columns), 'day', D)
BCH_med.insert(len(BCH_med.columns), 'month', M)
BCH_med.insert(len(BCH_med.columns), 'year', Y)
################################################################################################
### COMPUTE RETURN AND SQUARED RETURNS
################################################################################################
# yeas: 1 , no: 0
inter_interval_returns = 1
#intra_interval_returns = 1
# compute Equally Weighted proxies (reintroduce groupby keys as index: hence 5 times same command)
# in the process we also compute the log return over these 15min intervals
def ewAverage(x):
return sum(x)/len(x)
def ret(x):
y = x[1:]
return sum(y)
f = {'PQS':ewAverage, 'DEPTH':ewAverage, 'return':ret}
# compute log returns over our base : 5 min intervals
if not inter_interval_returns:
BCH_med["return"] = (np.log(BCH_med.price)).diff()
EW = BCH_med.groupby(["year","month","day","hour","group"]).agg(f)
else:
EW = BCH_med.groupby(["year","month","day","hour","group"]).mean()
EW["return"] = (np.log(EW.price)).diff()
EW.reset_index(level=0, inplace=True)
EW.reset_index(level=0, inplace=True)
EW.reset_index(level=0, inplace=True)
EW.reset_index(level=0, inplace=True)
EW.reset_index(level=0, inplace=True)
# subset relevant features and include return as it is also taken as EW average
EW= EW[["year","month","day","hour","group","PQS","DEPTH","return"]]
EW["V"] = np.power(EW["return"],2)
EW = EW.rename(columns={"PQS":"EWPQS", "DEPTH":"EWDEPTH"})
# compute Size Weighted proxies ...
wm_pqs = lambda x: np.average(x, weights=BCH_med.loc[x.index, "DEPTH"])
wm_pes = lambda x: np.average(x, weights=BCH_med.loc[x.index, "amount"])
wm_pts = lambda x: np.average(x, weights=BCH_med.loc[x.index, "amount"])
f = {'PQS': wm_pqs, 'PES': wm_pes, 'PTS': wm_pts}
SW = BCH_med.groupby(["year","month","day","hour","group"]).agg(f)
SW.reset_index(level=0, inplace=True)
SW.reset_index(level=0, inplace=True)
SW.reset_index(level=0, inplace=True)
SW.reset_index(level=0, inplace=True)
SW.reset_index(level=0, inplace=True)
SW = SW[["year","month","day","hour","group", "PQS", "PES", "PTS"]]
SW = SW.rename(columns={"PQS":"SWPQS", "PES":"SWPES", "PTS":"SWPTS"})
# TW
# Define a lambda function to compute the weighted mean:
wm_s = lambda x: np.average(x, weights=BCH_med.loc[x.index, "tw"])
f = {'PQS': wm_s}
TW = BCH_med.groupby(["year","month","day","hour","group"]).agg(f)
TW.reset_index(level=0, inplace=True)
TW.reset_index(level=0, inplace=True)
TW.reset_index(level=0, inplace=True)
TW.reset_index(level=0, inplace=True)
TW.reset_index(level=0, inplace=True)
TW= TW[["year","month","day","hour","group", "PQS"]]
TW = TW.rename(columns={"PQS":"TWPQS"})
# merge everything
WProx = pd.merge(EW, SW, how="left", on=["year","month","day","hour","group"]).merge(TW, how="left", on=["year","month","day","hour","group"])
#WProx.rename(columns={'group':'minute'}, inplace=True)
#WProx["date"] = pd.to_datetime(WProx[['year', 'month', 'day', 'hour', 'minute']])
# retirer la premiere observation comme toujours qd on fais une diff alors quand on n'a pas le premier element
WProx= WProx.iloc[1:]
WProx.reset_index(drop=True, inplace=True)
WProx.rename(columns={'group':'minute'}, inplace=True)
WProx["date"] = pd.to_datetime(WProx[['year', 'month', 'day', 'hour', 'minute']])
# save it
os.chdir("/home/hubert/Downloads/Data Cleaned/proxys/all_proxies")
WProx.to_csv("XRP_all_proxies", index=False)
### ALTERNATIVE just in case ####
# ###########################################################################################################
# ### Classify all intervals for all days into 3-hour intervals
# ###########################################################################################################
# H = pd.DatetimeIndex(np.array(BCH_med.interval)).hour
# G = np.zeros(H.shape)
# for index, (h,g) in enumerate(zip(H,G)):
# if h==0 or h==1 or h==2:
# g=3
# elif h==3 or h==4 or h==5:
# g=6
# elif h==6 or h==7 or h==8:
# g=9
# elif h==9 or h==10 or h==11:
# g=12
# elif h==12 or h==13 or h==14:
# g=15
# elif h==15 or h==16 or h==17:
# g=18
# elif h==18 or h==19 or h==20:
# g=21
# elif h==21 or h==22 or h==23:
# g=24
# G[index] = g
# BCH_med.insert(len(BCH_med.columns), 'group', G)
# ###########################################################################################################
# ### Standadize wrt to intra group MEAN and ST_DEV
# ###########################################################################################################
# BCH_stdized = BCH_med.groupby("group").transform(lambda x: (x - x.mean()) / x.std())
| [
"numpy.average",
"numpy.log",
"pandas.read_csv",
"numpy.power",
"pandas.merge",
"numpy.zeros",
"pandas.to_datetime",
"numpy.array",
"os.chdir"
] | [((384, 445), 'os.chdir', 'os.chdir', (['"""/home/hubert/Downloads/Data Cleaned/proxys/proxys"""'], {}), "('/home/hubert/Downloads/Data Cleaned/proxys/proxys')\n", (392, 445), False, 'import os\n'), ((456, 491), 'pandas.read_csv', 'pd.read_csv', (['"""XRP_medmean"""'], {'sep': '""","""'}), "('XRP_medmean', sep=',')\n", (467, 491), True, 'import pandas as pd\n'), ((513, 573), 'pandas.to_datetime', 'pd.to_datetime', (['BCH_med.interval'], {'format': '"""%Y/%m/%d %H:%M:%S"""'}), "(BCH_med.interval, format='%Y/%m/%d %H:%M:%S')\n", (527, 573), True, 'import pandas as pd\n'), ((1513, 1530), 'numpy.zeros', 'np.zeros', (['T.shape'], {}), '(T.shape)\n', (1521, 1530), True, 'import numpy as np\n'), ((3508, 3533), 'numpy.power', 'np.power', (["EW['return']", '(2)'], {}), "(EW['return'], 2)\n", (3516, 3533), True, 'import numpy as np\n'), ((5347, 5412), 'pandas.to_datetime', 'pd.to_datetime', (["WProx[['year', 'month', 'day', 'hour', 'minute']]"], {}), "(WProx[['year', 'month', 'day', 'hour', 'minute']])\n", (5361, 5412), True, 'import pandas as pd\n'), ((5423, 5489), 'os.chdir', 'os.chdir', (['"""/home/hubert/Downloads/Data Cleaned/proxys/all_proxies"""'], {}), "('/home/hubert/Downloads/Data Cleaned/proxys/all_proxies')\n", (5431, 5489), False, 'import os\n'), ((3649, 3701), 'numpy.average', 'np.average', (['x'], {'weights': "BCH_med.loc[x.index, 'DEPTH']"}), "(x, weights=BCH_med.loc[x.index, 'DEPTH'])\n", (3659, 3701), True, 'import numpy as np\n'), ((3721, 3774), 'numpy.average', 'np.average', (['x'], {'weights': "BCH_med.loc[x.index, 'amount']"}), "(x, weights=BCH_med.loc[x.index, 'amount'])\n", (3731, 3774), True, 'import numpy as np\n'), ((3794, 3847), 'numpy.average', 'np.average', (['x'], {'weights': "BCH_med.loc[x.index, 'amount']"}), "(x, weights=BCH_med.loc[x.index, 'amount'])\n", (3804, 3847), True, 'import numpy as np\n'), ((4374, 4423), 'numpy.average', 'np.average', (['x'], {'weights': "BCH_med.loc[x.index, 'tw']"}), "(x, weights=BCH_med.loc[x.index, 'tw'])\n", (4384, 4423), True, 'import numpy as np\n'), ((1258, 1284), 'numpy.array', 'np.array', (['BCH_med.interval'], {}), '(BCH_med.interval)\n', (1266, 1284), True, 'import numpy as np\n'), ((1312, 1338), 'numpy.array', 'np.array', (['BCH_med.interval'], {}), '(BCH_med.interval)\n', (1320, 1338), True, 'import numpy as np\n'), ((1367, 1393), 'numpy.array', 'np.array', (['BCH_med.interval'], {}), '(BCH_med.interval)\n', (1375, 1393), True, 'import numpy as np\n'), ((1420, 1446), 'numpy.array', 'np.array', (['BCH_med.interval'], {}), '(BCH_med.interval)\n', (1428, 1446), True, 'import numpy as np\n'), ((1474, 1500), 'numpy.array', 'np.array', (['BCH_med.interval'], {}), '(BCH_med.interval)\n', (1482, 1500), True, 'import numpy as np\n'), ((4821, 4895), 'pandas.merge', 'pd.merge', (['EW', 'SW'], {'how': '"""left"""', 'on': "['year', 'month', 'day', 'hour', 'group']"}), "(EW, SW, how='left', on=['year', 'month', 'day', 'hour', 'group'])\n", (4829, 4895), True, 'import pandas as pd\n'), ((2941, 2962), 'numpy.log', 'np.log', (['BCH_med.price'], {}), '(BCH_med.price)\n', (2947, 2962), True, 'import numpy as np\n'), ((3132, 3148), 'numpy.log', 'np.log', (['EW.price'], {}), '(EW.price)\n', (3138, 3148), True, 'import numpy as np\n')] |
import numpy as np
import scipy.integrate as spi
import matplotlib.pyplot as plt
#total no. agents
n = 50
#fraction of cooperators initial
fc0 = 0.7
#amount of resource available initial
R0 = 100
# Maximum amount of resource
Rmax = 200
# Social parameters
mu = np.linspace(2,4,num=20) # degree of cheating
ec = 0.483/n #level of effort (cooperators) #level of effort (defectors)
w = 15 #cost of harvesting
# Resource parameters
# c inflow, d discharge, q catchability
c, d, q = 50, 50, 1
# Extraction function
def ext(f):
E = n*(f*ec+(1-f)*ed)
return E
# Ostracism function (Gompertz growth)
def gompertz(f):
#parameters (from paper)
h = 0.34
t = -150
g = -10
gomp = h * np.exp(t * np.exp(g*f))
return gomp
#Cobb-Douglas production function
def cobbdoug(E,R):
gamma = 10
a = 0.6
b = 0.2
product = gamma*(E**a)*(R**b)
return product
#Utility functions
def utilCoop(fc,R):
E = ext(fc)
uc = ec*((cobbdoug(E,R)/E)-w)
return uc
def utilDefec(fc,R):
E = ext(fc)
ud = ed*((cobbdoug(E,R)/E)-w)-gompertz(fc)*(ed-ec)/ed
return ud
# harvest-cooperator equations
def deriv(t,y):
F, R = y
dRdt = c - d*(R/Rmax)**2 - q*ext(F)*R
dFdt = F*(1-F)*(utilCoop(F,R)-utilDefec(F,R))
return dFdt, dRdt
# initial condition
y0 = [fc0, R0]
# time points
t = np.linspace(0, 1000,num=1000)
interval = (t[0],t[-1])
# solve ODE
F = np.zeros(len(mu))
R = np.zeros(len(mu))
for j in range(len(mu)):
ed = mu[j]*ec
# Solve TSL model for each value of mu
ret = spi.solve_ivp(deriv, interval, y0, t_eval=t)
# Store final values for F and R (assuming convergence)
F[j] = ret.y[0,-1]
R[j] = ret.y[1,-1]
# Plot
plt.subplot(211)
plt.plot(F, mu, 'o', color='black')
plt.xlabel('Fraction Cooperators')
plt.ylabel('Degree of cheating mu')
plt.show()
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.integrate.solve_ivp",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((264, 289), 'numpy.linspace', 'np.linspace', (['(2)', '(4)'], {'num': '(20)'}), '(2, 4, num=20)\n', (275, 289), True, 'import numpy as np\n'), ((1338, 1368), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)'], {'num': '(1000)'}), '(0, 1000, num=1000)\n', (1349, 1368), True, 'import numpy as np\n'), ((1707, 1723), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (1718, 1723), True, 'import matplotlib.pyplot as plt\n'), ((1724, 1759), 'matplotlib.pyplot.plot', 'plt.plot', (['F', 'mu', '"""o"""'], {'color': '"""black"""'}), "(F, mu, 'o', color='black')\n", (1732, 1759), True, 'import matplotlib.pyplot as plt\n'), ((1761, 1795), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction Cooperators"""'], {}), "('Fraction Cooperators')\n", (1771, 1795), True, 'import matplotlib.pyplot as plt\n'), ((1796, 1831), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Degree of cheating mu"""'], {}), "('Degree of cheating mu')\n", (1806, 1831), True, 'import matplotlib.pyplot as plt\n'), ((1834, 1844), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1842, 1844), True, 'import matplotlib.pyplot as plt\n'), ((1547, 1591), 'scipy.integrate.solve_ivp', 'spi.solve_ivp', (['deriv', 'interval', 'y0'], {'t_eval': 't'}), '(deriv, interval, y0, t_eval=t)\n', (1560, 1591), True, 'import scipy.integrate as spi\n'), ((717, 730), 'numpy.exp', 'np.exp', (['(g * f)'], {}), '(g * f)\n', (723, 730), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import Int16
from cv_bridge import CvBridge
import cv2
import numpy as np
import tensorflow as tf
import os
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1],
padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def makeCNN(x,keep_prob):
# --- define CNN model
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([3, 3, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
return y_conv
class RosTensorFlow():
def __init__(self):
self._cv_bridge = CvBridge()
self.x = tf.placeholder(tf.float32, [None,28,28,1], name="x")
self.keep_prob = tf.placeholder("float")
self.y_conv = makeCNN(self.x,self.keep_prob)
self._saver = tf.train.Saver()
self._session = tf.InteractiveSession()
init_op = tf.global_variables_initializer()
self._session.run(init_op)
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
PATH_TO_CKPT = ROOT_PATH + '/include/mnist/model.ckpt'
self._saver.restore(self._session, PATH_TO_CKPT)
self._sub = rospy.Subscriber('usb_cam/image_raw', Image, self.callback, queue_size=1)
self._pub = rospy.Publisher('/result_ripe', Int16, queue_size=1)
def callback(self, image_msg):
cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
cv_image_gray = cv2.cvtColor(cv_image, cv2.COLOR_RGB2GRAY)
ret,cv_image_binary = cv2.threshold(cv_image_gray,128,255,cv2.THRESH_BINARY_INV)
cv_image_28 = cv2.resize(cv_image_binary,(28,28))
np_image = np.reshape(cv_image_28,(1,28,28,1))
predict_num = self._session.run(self.y_conv, feed_dict={self.x:np_image,self.keep_prob:1.0})
answer = np.argmax(predict_num,1)
rospy.loginfo('%d' % answer)
self._pub.publish(answer)
def main(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node('ros_tensorflow_mnist')
tensor = RosTensorFlow()
tensor.main()
| [
"rospy.Subscriber",
"numpy.argmax",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.InteractiveSession",
"tensorflow.truncated_normal",
"cv2.cvtColor",
"os.path.dirname",
"tensorflow.placeholder",
"rospy.init_node",
"numpy.reshape",
"cv... | [((237, 275), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (256, 275), True, 'import tensorflow as tf\n'), ((285, 305), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (296, 305), True, 'import tensorflow as tf\n'), ((345, 374), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (356, 374), True, 'import tensorflow as tf\n'), ((384, 404), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (395, 404), True, 'import tensorflow as tf\n'), ((433, 489), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (445, 489), True, 'import tensorflow as tf\n'), ((544, 619), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (558, 619), True, 'import tensorflow as tf\n'), ((1151, 1188), 'tensorflow.reshape', 'tf.reshape', (['h_pool2', '[-1, 7 * 7 * 64]'], {}), '(h_pool2, [-1, 7 * 7 * 64])\n', (1161, 1188), True, 'import tensorflow as tf\n'), ((1274, 1305), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h_fc1', 'keep_prob'], {}), '(h_fc1, keep_prob)\n', (1287, 1305), True, 'import tensorflow as tf\n'), ((2948, 2987), 'rospy.init_node', 'rospy.init_node', (['"""ros_tensorflow_mnist"""'], {}), "('ros_tensorflow_mnist')\n", (2963, 2987), False, 'import rospy\n'), ((1541, 1551), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (1549, 1551), False, 'from cv_bridge import CvBridge\n'), ((1570, 1625), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 28, 28, 1]'], {'name': '"""x"""'}), "(tf.float32, [None, 28, 28, 1], name='x')\n", (1584, 1625), True, 'import tensorflow as tf\n'), ((1648, 1671), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {}), "('float')\n", (1662, 1671), True, 'import tensorflow as tf\n'), ((1748, 1764), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1762, 1764), True, 'import tensorflow as tf\n'), ((1789, 1812), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1810, 1812), True, 'import tensorflow as tf\n'), ((1840, 1873), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1871, 1873), True, 'import tensorflow as tf\n'), ((2139, 2212), 'rospy.Subscriber', 'rospy.Subscriber', (['"""usb_cam/image_raw"""', 'Image', 'self.callback'], {'queue_size': '(1)'}), "('usb_cam/image_raw', Image, self.callback, queue_size=1)\n", (2155, 2212), False, 'import rospy\n'), ((2233, 2285), 'rospy.Publisher', 'rospy.Publisher', (['"""/result_ripe"""', 'Int16'], {'queue_size': '(1)'}), "('/result_ripe', Int16, queue_size=1)\n", (2248, 2285), False, 'import rospy\n'), ((2415, 2457), 'cv2.cvtColor', 'cv2.cvtColor', (['cv_image', 'cv2.COLOR_RGB2GRAY'], {}), '(cv_image, cv2.COLOR_RGB2GRAY)\n', (2427, 2457), False, 'import cv2\n'), ((2488, 2549), 'cv2.threshold', 'cv2.threshold', (['cv_image_gray', '(128)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(cv_image_gray, 128, 255, cv2.THRESH_BINARY_INV)\n', (2501, 2549), False, 'import cv2\n'), ((2569, 2606), 'cv2.resize', 'cv2.resize', (['cv_image_binary', '(28, 28)'], {}), '(cv_image_binary, (28, 28))\n', (2579, 2606), False, 'import cv2\n'), ((2624, 2663), 'numpy.reshape', 'np.reshape', (['cv_image_28', '(1, 28, 28, 1)'], {}), '(cv_image_28, (1, 28, 28, 1))\n', (2634, 2663), True, 'import numpy as np\n'), ((2778, 2803), 'numpy.argmax', 'np.argmax', (['predict_num', '(1)'], {}), '(predict_num, 1)\n', (2787, 2803), True, 'import numpy as np\n'), ((2811, 2839), 'rospy.loginfo', 'rospy.loginfo', (["('%d' % answer)"], {}), "('%d' % answer)\n", (2824, 2839), False, 'import rospy\n'), ((2903, 2915), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2913, 2915), False, 'import rospy\n'), ((1212, 1242), 'tensorflow.matmul', 'tf.matmul', (['h_pool2_flat', 'W_fc1'], {}), '(h_pool2_flat, W_fc1)\n', (1221, 1242), True, 'import tensorflow as tf\n'), ((1407, 1435), 'tensorflow.matmul', 'tf.matmul', (['h_fc1_drop', 'W_fc2'], {}), '(h_fc1_drop, W_fc2)\n', (1416, 1435), True, 'import tensorflow as tf\n'), ((1959, 1984), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1974, 1984), False, 'import os\n')] |
import numpy as np
n_loops = {
'A': 2, 'B': 3, 'C': 1, 'D': 2, 'E': 1, 'F': 1, 'G': 1, 'H': 1, 'I': 1, 'J': 1, 'K': 1, 'L': 1, 'M': 1, 'N': 1,
'O': 2, 'P': 2, 'Q': 2, 'R': 2, 'S': 1, 'T': 1, 'U': 1, 'V': 1, 'W': 1, 'X': 1, 'Y': 1, 'Z': 1
}
topology = [15, 4, 4]
simple_templates = [
[0.4*x + 0.5 for y in np.linspace(0, 2*np.pi, 30) for x in (np.cos(y), np.sin(y))] + [0.5]*32,
[0.4*x + 0.5 for y in np.linspace(0, 2*np.pi, 30) for x in (np.cos(y), np.sin(y))] + \
[0.2*x + 0.5 for y in np.linspace(0, 2*np.pi, 8) for x in (np.cos(y), np.sin(y))] + [0.5]*16,
[0.4*x + 0.5 for y in np.linspace(0, 2*np.pi, 30) for x in (np.cos(y), np.sin(y))] + \
[0.15*x + 0.5 for y in np.linspace(0, 2*np.pi, 8) for x in (np.cos(y), np.sin(y)+1.1)] + \
[0.15*x + 0.5 for y in np.linspace(0, 2*np.pi, 8) for x in (np.cos(y), np.sin(y)-1.1)]
]
letter_templates = [
[0.17, 0.9, 0.23, 0.9, 0.28, 0.9, 0.32, 0.77, 0.36, 0.65, 0.42, 0.65, 0.5, 0.65, 0.58, 0.65, 0.65,
0.65, 0.68, 0.76, 0.73, 0.9, 0.78, 0.9, 0.84, 0.9, 0.81, 0.79, 0.76, 0.67, 0.74, 0.59, 0.7, 0.48, 0.66,
0.36, 0.63, 0.27, 0.6, 0.2, 0.57, 0.1, 0.52, 0.1, 0.44, 0.1, 0.42, 0.17, 0.39, 0.27, 0.36, 0.34, 0.33,
0.43, 0.3, 0.52, 0.27, 0.6, 0.24, 0.71, 0.48, 0.29, 0.43, 0.42, 0.38, 0.56, 0.5, 0.57, 0.62, 0.57, 0.58,
0.45, 0.54, 0.32, 0.5, 0.19] + [0.5]*16,
[0.25, 0.12, 0.25, 0.21, 0.25, 0.27, 0.25, 0.34, 0.25, 0.41, 0.25, 0.48, 0.25, 0.55, 0.25, 0.62, 0.25,
0.68, 0.25, 0.75, 0.25, 0.89, 0.34, 0.9, 0.43, 0.9, 0.52, 0.89, 0.62, 0.87, 0.69, 0.82, 0.73, 0.75,
0.75, 0.68, 0.73, 0.59, 0.67, 0.52, 0.58, 0.47, 0.66, 0.43, 0.7, 0.36, 0.72, 0.31, 0.7, 0.23, 0.66,
0.17, 0.59, 0.13, 0.53, 0.11, 0.46, 0.1, 0.37, 0.11, 0.35, 0.19, 0.35, 0.31, 0.35, 0.44, 0.5, 0.44, 0.6,
0.38, 0.62, 0.31, 0.56, 0.21, 0.48, 0.18, 0.35, 0.52, 0.35, 0.65, 0.35, 0.82, 0.47, 0.82, 0.6, 0.77,
0.64, 0.68, 0.6, 0.57, 0.51, 0.53],
[0.39, 0.16, 0.27, 0.25, 0.21, 0.41, 0.19, 0.52, 0.22, 0.63, 0.25, 0.76, 0.33, 0.84, 0.43, 0.87, 0.56,
0.9, 0.68, 0.89, 0.8, 0.86, 0.79, 0.82, 0.78, 0.79, 0.64, 0.82, 0.52, 0.8, 0.43, 0.76, 0.36, 0.68, 0.32,
0.57, 0.32, 0.47, 0.34, 0.34, 0.42, 0.26, 0.48, 0.22, 0.57, 0.18, 0.7, 0.19, 0.78, 0.22, 0.79, 0.18,
0.81, 0.13, 0.73, 0.11, 0.62, 0.1, 0.51, 0.1] + [0.5 ]*32,
[0.18, 0.12, 0.18, 0.2, 0.18, 0.26, 0.18, 0.33, 0.18, 0.37, 0.18, 0.45, 0.18, 0.52, 0.18, 0.58, 0.18,
0.65, 0.18, 0.75, 0.18, 0.89, 0.27, 0.9, 0.34, 0.9, 0.44, 0.9, 0.52, 0.88, 0.61, 0.85, 0.67, 0.82, 0.74,
0.76, 0.78, 0.69, 0.81, 0.6, 0.82, 0.52, 0.82, 0.42, 0.79, 0.31, 0.73, 0.22, 0.66, 0.17, 0.6, 0.12,
0.54, 0.11, 0.47, 0.1, 0.38, 0.1, 0.3, 0.1, 0.28, 0.19, 0.28, 0.45, 0.28, 0.81, 0.48, 0.81, 0.67, 0.68,
0.71, 0.5, 0.68, 0.32, 0.58, 0.21] + [0.5]*16,
[0.28, 0.1, 0.28, 0.19, 0.28, 0.27, 0.28, 0.34, 0.28, 0.44, 0.28, 0.52, 0.28, 0.61, 0.28, 0.71, 0.28,
0.9, 0.5, 0.9, 0.72, 0.9, 0.72, 0.85, 0.72, 0.81, 0.56, 0.81, 0.38, 0.81, 0.38, 0.69, 0.38, 0.52, 0.52,
0.52, 0.69, 0.52, 0.69, 0.5, 0.69, 0.44, 0.54, 0.44, 0.38, 0.44, 0.38, 0.32, 0.38, 0.19, 0.54, 0.19,
0.71, 0.19, 0.71, 0.15, 0.71, 0.1, 0.56, 0.1] + [0.5]*32,
[0.28, 0.1, 0.28, 0.19, 0.28, 0.27, 0.28, 0.34, 0.28, 0.44, 0.28, 0.52, 0.28, 0.61, 0.28, 0.71, 0.28,
0.9, 0.33, 0.9, 0.39, 0.9, 0.39, 0.85, 0.39, 0.81, 0.39, 0.77, 0.39, 0.68, 0.39, 0.6, 0.39, 0.52, 0.53,
0.52, 0.7, 0.52, 0.7, 0.5, 0.7, 0.44, 0.55, 0.44, 0.39, 0.44, 0.39, 0.32, 0.39, 0.19, 0.55, 0.19, 0.72,
0.19, 0.72, 0.15, 0.72, 0.1, 0.57, 0.1] + [0.5]*32,
[0.8, 0.14, 0.64, 0.1, 0.46, 0.11, 0.34, 0.17, 0.23, 0.27, 0.19, 0.39, 0.17, 0.53, 0.2, 0.68, 0.33,
0.84, 0.49, 0.9, 0.83, 0.86, 0.83, 0.68, 0.83, 0.48, 0.72, 0.48, 0.57, 0.48, 0.57, 0.52, 0.57, 0.56,
0.62, 0.56, 0.73, 0.56, 0.73, 0.65, 0.73, 0.8, 0.6, 0.82, 0.42, 0.77, 0.3, 0.57, 0.31, 0.38, 0.4, 0.26,
0.48, 0.21, 0.6, 0.2, 0.78, 0.22, 0.79, 0.18] + [0.5]*32,
[0.2, 0.1, 0.2, 0.23, 0.2, 0.42, 0.2, 0.65, 0.2, 0.9, 0.24, 0.9, 0.31, 0.9, 0.31, 0.74, 0.31, 0.52,
0.45, 0.52, 0.69, 0.52, 0.69, 0.64, 0.69, 0.9, 0.74, 0.9, 0.8, 0.9, 0.8, 0.7, 0.8, 0.44, 0.8, 0.31, 0.8,
0.1, 0.75, 0.1, 0.69, 0.1, 0.69, 0.21, 0.69, 0.43, 0.61, 0.43, 0.5, 0.43, 0.4, 0.43, 0.31, 0.43, 0.31,
0.27, 0.31, 0.1, 0.25, 0.1] + [0.5]*32,
[0.45, 0.1, 0.45, 0.15, 0.45, 0.21, 0.45, 0.29, 0.45, 0.34, 0.45, 0.4, 0.45, 0.43, 0.45, 0.5, 0.45,
0.55, 0.45, 0.61, 0.45, 0.65, 0.45, 0.7, 0.45, 0.74, 0.45, 0.8, 0.45, 0.9, 0.49, 0.9, 0.55, 0.9, 0.55,
0.86, 0.55, 0.75, 0.55, 0.68, 0.55, 0.62, 0.55, 0.57, 0.55, 0.51, 0.55, 0.44, 0.55, 0.38, 0.55, 0.32,
0.55, 0.25, 0.55, 0.19, 0.55, 0.1, 0.5, 0.1] + [0.5]*32,
[0.55, 0.1, 0.55, 0.16, 0.55, 0.22, 0.55, 0.28, 0.55, 0.33, 0.55, 0.41, 0.55, 0.46, 0.55, 0.54, 0.55,
0.62, 0.55, 0.69, 0.5, 0.78, 0.45, 0.81, 0.32, 0.8, 0.31, 0.84, 0.31, 0.88, 0.37, 0.9, 0.43, 0.9, 0.55,
0.88, 0.59, 0.83, 0.64, 0.75, 0.65, 0.67, 0.66, 0.59, 0.66, 0.47, 0.66, 0.39, 0.66, 0.3, 0.66, 0.25,
0.66, 0.2, 0.66, 0.15, 0.66, 0.1, 0.59, 0.1] + [0.5]*32,
[0.22, 0.1, 0.22, 0.23, 0.22, 0.35, 0.22, 0.56, 0.22, 0.9, 0.26, 0.9, 0.32, 0.9, 0.32, 0.79, 0.32, 0.6,
0.36, 0.56, 0.4, 0.51, 0.5, 0.66, 0.66, 0.9, 0.71, 0.9, 0.78, 0.9, 0.7, 0.78, 0.65, 0.71, 0.58, 0.61,
0.47, 0.44, 0.57, 0.32, 0.6, 0.29, 0.67, 0.2, 0.76, 0.1, 0.69, 0.1, 0.63, 0.1, 0.52, 0.24, 0.32, 0.49,
0.32, 0.28, 0.32, 0.1, 0.27, 0.1]+ [0.5]*32,
[0.28, 0.1, 0.28, 0.14, 0.28, 0.21, 0.28, 0.27, 0.28, 0.36, 0.28, 0.44, 0.28, 0.51, 0.28, 0.59, 0.28,
0.63, 0.28, 0.72, 0.28, 0.9, 0.41, 0.9, 0.48, 0.9, 0.59, 0.9, 0.72, 0.9, 0.72, 0.85, 0.72, 0.81, 0.63,
0.81, 0.53, 0.81, 0.45, 0.81, 0.38, 0.81, 0.38, 0.63, 0.38, 0.55, 0.38, 0.44, 0.38, 0.36, 0.38, 0.29,
0.38, 0.23, 0.38, 0.16, 0.38, 0.1, 0.32, 0.1] + [0.5]*32,
[0.17, 0.15, 0.16, 0.29, 0.15, 0.44, 0.14, 0.59, 0.12, 0.9, 0.14, 0.9, 0.21, 0.9, 0.23, 0.61, 0.25,
0.25, 0.35, 0.58, 0.45, 0.9, 0.48, 0.9, 0.53, 0.9, 0.64, 0.6, 0.76, 0.25, 0.77, 0.56, 0.79, 0.9, 0.82,
0.9, 0.88, 0.9, 0.87, 0.65, 0.86, 0.53, 0.85, 0.38, 0.83, 0.15, 0.79, 0.15, 0.71, 0.15, 0.64, 0.35,
0.49, 0.75, 0.4, 0.45, 0.29, 0.15, 0.23, 0.15] + [0.5]*32,
[0.2, 0.1, 0.2, 0.33, 0.2, 0.49, 0.2, 0.67, 0.2, 0.9, 0.24, 0.9, 0.3, 0.9, 0.3, 0.7, 0.3, 0.53, 0.3,
0.42, 0.29, 0.23, 0.4, 0.42, 0.46, 0.52, 0.52, 0.62, 0.7, 0.9, 0.74, 0.9, 0.8, 0.9, 0.8, 0.76, 0.8,
0.53, 0.8, 0.34, 0.8, 0.1, 0.75, 0.1, 0.7, 0.1, 0.7, 0.2, 0.7, 0.35, 0.7, 0.47, 0.71, 0.76, 0.53,
0.45, 0.31, 0.1, 0.26, 0.1] + [0.5]*32,
[0.45, 0.1, 0.35, 0.13, 0.29, 0.16, 0.23, 0.22, 0.19, 0.28, 0.16, 0.34, 0.14, 0.42, 0.14, 0.5, 0.14,
0.57, 0.16, 0.66, 0.18, 0.72, 0.23, 0.79, 0.28, 0.84, 0.34, 0.87, 0.43, 0.9, 0.54, 0.9, 0.62, 0.88,
0.69, 0.85, 0.75, 0.8, 0.8, 0.73, 0.83, 0.65, 0.85, 0.53, 0.86, 0.49, 0.86, 0.41, 0.82, 0.31, 0.79,
0.25, 0.75, 0.2, 0.7, 0.15, 0.62, 0.1, 0.54, 0.1, 0.38, 0.21, 0.26, 0.4, 0.29, 0.68, 0.41, 0.81,
0.61, 0.79, 0.73, 0.61, 0.75, 0.4, 0.65, 0.23] + [0.5]*16,
[0.26, 0.12, 0.26, 0.24, 0.26, 0.33, 0.26, 0.39, 0.26, 0.51, 0.26, 0.59, 0.26, 0.65, 0.26, 0.71, 0.26,
0.9, 0.29, 0.9, 0.36, 0.9, 0.36, 0.81, 0.36, 0.74, 0.36, 0.68, 0.36, 0.58, 0.43, 0.58, 0.5, 0.59,
0.55, 0.56, 0.6, 0.54, 0.67, 0.5, 0.7, 0.47, 0.74, 0.42, 0.74, 0.36, 0.74, 0.28, 0.7, 0.21, 0.68,
0.17, 0.61, 0.13, 0.5, 0.1, 0.44, 0.1, 0.38, 0.1, 0.36, 0.19, 0.36, 0.29, 0.36, 0.49, 0.46, 0.51,
0.57, 0.48, 0.64, 0.37, 0.6, 0.23, 0.53, 0.19] + [0.5]*16,
[0.36, 0.13, 0.27, 0.21, 0.2, 0.35, 0.18, 0.44, 0.18, 0.5, 0.2, 0.58, 0.23, 0.65, 0.28, 0.71, 0.33,
0.76, 0.38, 0.79, 0.5, 0.81, 0.59, 0.84, 0.69, 0.87, 0.78, 0.9, 0.8, 0.87, 0.82, 0.83, 0.73, 0.81,
0.62, 0.78, 0.7, 0.73, 0.75, 0.68, 0.79, 0.6, 0.8, 0.5, 0.81, 0.43, 0.8, 0.36, 0.76, 0.25, 0.72,
0.19, 0.67, 0.13, 0.6, 0.1, 0.52, 0.1, 0.45, 0.1, 0.4, 0.2, 0.31, 0.3, 0.28, 0.47, 0.34, 0.66, 0.49,
0.73, 0.66, 0.66, 0.73, 0.43, 0.67, 0.26] + [0.5]*16,
[0.25, 0.11, 0.25, 0.3, 0.25, 0.44, 0.25, 0.59, 0.24, 0.9, 0.27, 0.9, 0.35, 0.9, 0.35, 0.82, 0.35,
0.71, 0.35, 0.66, 0.35, 0.55, 0.47, 0.55, 0.54, 0.58, 0.59, 0.65, 0.6, 0.73, 0.63, 0.8, 0.66, 0.9,
0.7, 0.9, 0.76, 0.9, 0.72, 0.74, 0.7, 0.64, 0.67, 0.59, 0.59, 0.52, 0.69, 0.45, 0.74, 0.35, 0.72,
0.22, 0.67, 0.15, 0.63, 0.13, 0.54, 0.1, 0.43, 0.1, 0.35, 0.18, 0.34, 0.29, 0.35, 0.47, 0.47, 0.47,
0.61, 0.41, 0.64, 0.32, 0.61, 0.24, 0.53, 0.18] + [0.5]*16,
[0.71, 0.14, 0.61, 0.1, 0.5, 0.1, 0.42, 0.12, 0.35, 0.16, 0.3, 0.24, 0.28, 0.31, 0.32, 0.43, 0.38,
0.48, 0.5, 0.54, 0.59, 0.59, 0.65, 0.69, 0.59, 0.79, 0.46, 0.81, 0.29, 0.78, 0.27, 0.86, 0.37, 0.9,
0.49, 0.9, 0.64, 0.87, 0.71, 0.79, 0.73, 0.71, 0.74, 0.65, 0.7, 0.56, 0.62, 0.49, 0.5, 0.45, 0.41,
0.38, 0.43, 0.21, 0.51, 0.19, 0.68, 0.22, 0.71, 0.14] + [0.5]*32,
[0.2, 0.1, 0.2, 0.15, 0.2, 0.19, 0.31, 0.19, 0.45, 0.19, 0.45, 0.3, 0.45, 0.35, 0.45, 0.42, 0.45, 0.5,
0.45, 0.58, 0.45, 0.66, 0.45, 0.73, 0.45, 0.9, 0.5, 0.9, 0.55, 0.9, 0.55, 0.74, 0.55, 0.67, 0.55,
0.59, 0.55, 0.53, 0.55, 0.43, 0.55, 0.37, 0.55, 0.29, 0.55, 0.19, 0.68, 0.19, 0.8, 0.19, 0.8, 0.14,
0.8, 0.1, 0.65, 0.1, 0.51, 0.1, 0.34, 0.1] + [0.5]*32,
[0.2, 0.1, 0.2, 0.32, 0.2, 0.41, 0.2, 0.55, 0.21, 0.67, 0.25, 0.78, 0.3, 0.86, 0.43, 0.9, 0.57, 0.89,
0.69, 0.83, 0.74, 0.76, 0.79, 0.59, 0.8, 0.44, 0.8, 0.29, 0.8, 0.1, 0.72, 0.1, 0.68, 0.1, 0.68,
0.23, 0.68, 0.36, 0.68, 0.54, 0.64, 0.64, 0.62, 0.74, 0.51, 0.78, 0.37, 0.77, 0.32, 0.68, 0.3, 0.55,
0.3, 0.45, 0.3, 0.24, 0.3, 0.1, 0.26, 0.1] + [0.5]*32,
[0.18, 0.1, 0.24, 0.31, 0.28, 0.41, 0.31, 0.52, 0.33, 0.58, 0.37, 0.7, 0.44, 0.9, 0.49, 0.9, 0.55, 0.9,
0.59, 0.79, 0.64, 0.64, 0.68, 0.53, 0.73, 0.4, 0.77, 0.29, 0.84, 0.1, 0.77, 0.1, 0.73, 0.1, 0.69,
0.2, 0.67, 0.26, 0.64, 0.35, 0.61, 0.43, 0.57, 0.57, 0.5, 0.79, 0.45, 0.63, 0.42, 0.5, 0.39, 0.42,
0.35, 0.3, 0.33, 0.22, 0.29, 0.1, 0.24, 0.1] + [0.5]*32,
[0.11, 0.18, 0.15, 0.36, 0.18, 0.47, 0.21, 0.58, 0.27, 0.82, 0.31, 0.82, 0.36, 0.82, 0.44, 0.54, 0.5,
0.27, 0.56, 0.53, 0.63, 0.82, 0.67, 0.82, 0.72, 0.82, 0.78, 0.61, 0.81, 0.49, 0.84, 0.38, 0.9, 0.18,
0.87, 0.18, 0.81, 0.18, 0.76, 0.38, 0.68, 0.73, 0.63, 0.5, 0.55, 0.18, 0.5, 0.18, 0.46, 0.18, 0.4,
0.41, 0.32, 0.73, 0.27, 0.48, 0.2, 0.18, 0.16, 0.18] + [0.5]*32,
[0.21, 0.1, 0.29, 0.23, 0.33, 0.3, 0.37, 0.36, 0.45, 0.5, 0.4, 0.58, 0.35, 0.66, 0.31, 0.72, 0.2, 0.9,
0.27, 0.9, 0.32, 0.9, 0.4, 0.75, 0.51, 0.56, 0.59, 0.71, 0.7, 0.9, 0.76, 0.9, 0.82, 0.9, 0.76, 0.79,
0.69, 0.68, 0.64, 0.6, 0.56, 0.49, 0.69, 0.3, 0.82, 0.1, 0.75, 0.1, 0.7, 0.1, 0.62, 0.23, 0.52,
0.42, 0.42, 0.25, 0.33, 0.1, 0.28, 0.1] + [0.5]*32,
[0.2, 0.1, 0.26, 0.22, 0.32, 0.32, 0.36, 0.4, 0.45, 0.56, 0.45, 0.63, 0.45, 0.71, 0.45, 0.78, 0.45,
0.9, 0.49, 0.9, 0.56, 0.9, 0.56, 0.84, 0.56, 0.72, 0.56, 0.64, 0.56, 0.56, 0.62, 0.45, 0.67, 0.37,
0.73, 0.27, 0.82, 0.1, 0.77, 0.1, 0.71, 0.1, 0.66, 0.18, 0.63, 0.26, 0.6, 0.31, 0.51, 0.48, 0.45,
0.37, 0.4, 0.27, 0.37, 0.2, 0.32, 0.1, 0.27, 0.1] + [0.5]*32,
[0.25, 0.1, 0.25, 0.14, 0.25, 0.19, 0.31, 0.19, 0.41, 0.19, 0.53, 0.19, 0.65, 0.19, 0.53, 0.37, 0.42,
0.53, 0.33, 0.66, 0.21, 0.84, 0.21, 0.87, 0.21, 0.9, 0.33, 0.9, 0.48, 0.9, 0.62, 0.9, 0.79, 0.9,
0.79, 0.87, 0.79, 0.81, 0.7, 0.81, 0.6, 0.81, 0.5, 0.81, 0.34, 0.81, 0.43, 0.68, 0.51, 0.56, 0.64,
0.38, 0.79, 0.16, 0.79, 0.13, 0.79, 0.1, 0.58, 0.1] + [0.5]*32
]
#######################################################################################################################
chinese_topology = [8, 10, 4, 6, 4, 4, 9, 4]
chinese_letter_templates = [
[0.08, 0.33, 0.1, 0.36, 0.12, 0.38, 0.17, 0.33, 0.21, 0.3, 0.21, 0.43, 0.21, 0.61, 0.25, 0.61, 0.28, 0.61, 0.28,
0.41, 0.28, 0.21, 0.3, 0.18, 0.35, 0.13, 0.32, 0.09, 0.29, 0.08, 0.18, 0.21, 0.32, 0.32, 0.43, 0.21, 0.5, 0.08,
0.53, 0.09, 0.57, 0.12, 0.53, 0.16, 0.5, 0.2, 0.7, 0.2, 0.88, 0.2, 0.85, 0.27, 0.82, 0.34, 0.8, 0.34, 0.75,
0.32, 0.78, 0.29, 0.79, 0.26, 0.63, 0.25, 0.47, 0.25, 0.41, 0.32, 0.37, 0.36, 0.35, 0.33, 0.33, 0.53, 0.41,
0.44, 0.47, 0.36, 0.5, 0.38, 0.53, 0.39, 0.46, 0.49, 0.38, 0.57, 0.37, 0.55, 0.5, 0.54, 0.52, 0.58, 0.53, 0.61,
0.6, 0.59, 0.64, 0.55, 0.65, 0.41, 0.65, 0.3, 0.62, 0.3, 0.58, 0.3, 0.59, 0.43, 0.59, 0.53, 0.55, 0.53, 0.71,
0.39, 0.78, 0.47, 0.84, 0.54, 0.87, 0.52, 0.9, 0.49, 0.83, 0.42, 0.76, 0.35, 0.73, 0.37, 0.71, 0.66, 0.73, 0.63,
0.76, 0.61, 0.85, 0.71, 0.9, 0.77, 0.87, 0.8, 0.85, 0.82, 0.78, 0.73, 0.67, 0.77, 0.72, 0.78, 0.75, 0.79, 0.72,
0.86, 0.67, 0.89, 0.52, 0.9, 0.38, 0.9, 0.34, 0.86, 0.31, 0.81, 0.31, 0.73, 0.31, 0.65, 0.36, 0.65, 0.38, 0.65,
0.38, 0.74, 0.39, 0.82, 0.5, 0.84, 0.62, 0.84, 0.66, 0.81, 0.2, 0.65, 0.23, 0.67, 0.26, 0.68, 0.2, 0.79, 0.16,
0.87, 0.12, 0.85, 0.09, 0.83, 0.16, 0.73]
]
| [
"numpy.sin",
"numpy.cos",
"numpy.linspace"
] | [((320, 349), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(30)'], {}), '(0, 2 * np.pi, 30)\n', (331, 349), True, 'import numpy as np\n'), ((807, 835), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(8)'], {}), '(0, 2 * np.pi, 8)\n', (818, 835), True, 'import numpy as np\n'), ((358, 367), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (364, 367), True, 'import numpy as np\n'), ((369, 378), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (375, 378), True, 'import numpy as np\n'), ((419, 448), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(30)'], {}), '(0, 2 * np.pi, 30)\n', (430, 448), True, 'import numpy as np\n'), ((514, 542), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(8)'], {}), '(0, 2 * np.pi, 8)\n', (525, 542), True, 'import numpy as np\n'), ((612, 641), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(30)'], {}), '(0, 2 * np.pi, 30)\n', (623, 641), True, 'import numpy as np\n'), ((708, 736), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(8)'], {}), '(0, 2 * np.pi, 8)\n', (719, 736), True, 'import numpy as np\n'), ((844, 853), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (850, 853), True, 'import numpy as np\n'), ((457, 466), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (463, 466), True, 'import numpy as np\n'), ((468, 477), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (474, 477), True, 'import numpy as np\n'), ((551, 560), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (557, 560), True, 'import numpy as np\n'), ((562, 571), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (568, 571), True, 'import numpy as np\n'), ((650, 659), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (656, 659), True, 'import numpy as np\n'), ((661, 670), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (667, 670), True, 'import numpy as np\n'), ((745, 754), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (751, 754), True, 'import numpy as np\n'), ((855, 864), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (861, 864), True, 'import numpy as np\n'), ((756, 765), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (762, 765), True, 'import numpy as np\n')] |
import sys
sys.path.append('../')
import unittest
import pydgm
import numpy as np
class TestSOURCES(unittest.TestCase):
def setUp(self):
# Set the variables for the test
pydgm.control.spatial_dimension = 1
pydgm.control.fine_mesh_x = [1]
pydgm.control.coarse_mesh_x = [0.0, 1.0]
pydgm.control.material_map = [1]
pydgm.control.xs_name = 'test/3gXS.anlxs'.ljust(256)
pydgm.control.angle_order = 2
pydgm.control.angle_option = pydgm.angle.gl
pydgm.control.boundary_east = 1.0
pydgm.control.boundary_west = 1.0
pydgm.control.boundary_type = [1.0, 1.0]
pydgm.control.allow_fission = True
pydgm.control.outer_print = False
pydgm.control.outer_tolerance = 1e-14
pydgm.control.equation_type = 'DD'
pydgm.control.lamb = 1.0
pydgm.control.store_psi = False
pydgm.control.solver_type = 'fixed'.ljust(256)
pydgm.control.source_value = 1.0
pydgm.control.scatter_leg_order = 0
pydgm.control.use_dgm = False
# Initialize the dependancies
pydgm.solver.initialize_solver()
def test_compute_source(self):
test = [0.652227008050471, 0.500051646579093, 0.5]
pydgm.sources.compute_source()
for g in range(3):
with self.subTest(g=g):
source = pydgm.state.mg_source[g]
np.testing.assert_array_almost_equal(source, test[g], 12, 'Failed for g={}'.format(g + 1))
def tearDown(self):
pydgm.solver.finalize_solver()
pydgm.control.finalize_control()
class TestSOURCES_2D(unittest.TestCase):
def setUp(self):
# Set the variables for the test
pydgm.control.spatial_dimension = 2
pydgm.control.fine_mesh_x = [1]
pydgm.control.fine_mesh_y = [1]
pydgm.control.coarse_mesh_x = [0.0, 1.0]
pydgm.control.coarse_mesh_y = [0.0, 1.0]
pydgm.control.material_map = [1]
pydgm.control.xs_name = 'test/1gXS.anlxs'.ljust(256)
pydgm.control.boundary_east = 0.0
pydgm.control.boundary_west = 0.0
pydgm.control.boundary_north = 0.0
pydgm.control.boundary_south = 0.0
pydgm.control.angle_order = 2
pydgm.control.angle_option = pydgm.angle.gl
pydgm.control.allow_fission = True
pydgm.control.outer_print = False
pydgm.control.outer_tolerance = 1e-14
pydgm.control.equation_type = 'DD'
pydgm.control.lamb = 1.0
pydgm.control.store_psi = False
pydgm.control.solver_type = 'fixed'.ljust(256)
pydgm.control.source_value = 1.0
pydgm.control.scatter_leg_order = 1
pydgm.control.use_dgm = False
# Initialize the dependancies
pydgm.solver.initialize_solver()
def test_compute_source(self):
test = [(1.0 + 0.5) / (2 * np.pi)]
pydgm.sources.compute_source()
source = pydgm.state.mg_source[0]
np.testing.assert_array_almost_equal(source, test, 12)
test = np.array([0.3, 0.2, 0.2, 0.2]) / (2 * np.pi)
np.testing.assert_array_almost_equal(pydgm.state.sigphi.flatten(), test, 12)
def tearDown(self):
pydgm.solver.finalize_solver()
pydgm.control.finalize_control()
class TestSOURCESdgm(unittest.TestCase):
def setUp(self):
# Set the variables for the test
pydgm.control.spatial_dimension = 1
pydgm.control.fine_mesh_x = [1]
pydgm.control.coarse_mesh_x = [0.0, 1.0]
pydgm.control.material_map = [1]
pydgm.control.angle_order = 2
pydgm.control.angle_option = pydgm.angle.gl
pydgm.control.boundary_type = [1.0, 1.0]
pydgm.control.allow_fission = True
pydgm.control.outer_print = False
pydgm.control.inner_print = False
pydgm.control.outer_tolerance = 1e-14
pydgm.control.inner_tolerance = 1e-14
pydgm.control.equation_type = 'DD'
pydgm.control.lamb = 1.0
pydgm.control.store_psi = True
pydgm.control.solver_type = 'fixed'.ljust(256)
pydgm.control.source_value = 1.0
pydgm.control.scatter_leg_order = 0
pydgm.control.use_dgm = True
pydgm.control.xs_name = 'test/4gXS.anlxs'.ljust(256)
pydgm.control.energy_group_map = [1, 1, 2, 2]
pydgm.control.dgm_basis_name = 'test/4gbasis'.ljust(256)
# Initialize the dependancies
pydgm.dgmsolver.initialize_dgmsolver()
pydgm.dgmsolver.compute_flux_moments()
pydgm.state.mg_phi = pydgm.dgm.phi_m[0]
pydgm.dgmsolver.compute_xs_moments()
pydgm.dgmsolver.slice_xs_moments(0)
pydgm.state.update_fission_density()
def test_compute_source(self):
test = [0.727975095456, 0.707343815354]
pydgm.sources.compute_source()
for g in range(2):
with self.subTest(g=g):
source = pydgm.state.mg_source[g]
np.testing.assert_array_almost_equal(source, test[g], 12, 'Failed for g={}'.format(g + 1))
def tearDown(self):
pydgm.dgmsolver.finalize_dgmsolver()
pydgm.control.finalize_control()
if __name__ == '__main__':
unittest.main()
| [
"sys.path.append",
"unittest.main",
"pydgm.sources.compute_source",
"pydgm.control.finalize_control",
"pydgm.dgmsolver.initialize_dgmsolver",
"pydgm.solver.initialize_solver",
"pydgm.solver.finalize_solver",
"pydgm.state.sigphi.flatten",
"pydgm.dgmsolver.slice_xs_moments",
"pydgm.dgmsolver.compute... | [((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((5193, 5208), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5206, 5208), False, 'import unittest\n'), ((1116, 1148), 'pydgm.solver.initialize_solver', 'pydgm.solver.initialize_solver', ([], {}), '()\n', (1146, 1148), False, 'import pydgm\n'), ((1252, 1282), 'pydgm.sources.compute_source', 'pydgm.sources.compute_source', ([], {}), '()\n', (1280, 1282), False, 'import pydgm\n'), ((1537, 1567), 'pydgm.solver.finalize_solver', 'pydgm.solver.finalize_solver', ([], {}), '()\n', (1565, 1567), False, 'import pydgm\n'), ((1576, 1608), 'pydgm.control.finalize_control', 'pydgm.control.finalize_control', ([], {}), '()\n', (1606, 1608), False, 'import pydgm\n'), ((2771, 2803), 'pydgm.solver.initialize_solver', 'pydgm.solver.initialize_solver', ([], {}), '()\n', (2801, 2803), False, 'import pydgm\n'), ((2892, 2922), 'pydgm.sources.compute_source', 'pydgm.sources.compute_source', ([], {}), '()\n', (2920, 2922), False, 'import pydgm\n'), ((2974, 3028), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['source', 'test', '(12)'], {}), '(source, test, 12)\n', (3010, 3028), True, 'import numpy as np\n'), ((3208, 3238), 'pydgm.solver.finalize_solver', 'pydgm.solver.finalize_solver', ([], {}), '()\n', (3236, 3238), False, 'import pydgm\n'), ((3247, 3279), 'pydgm.control.finalize_control', 'pydgm.control.finalize_control', ([], {}), '()\n', (3277, 3279), False, 'import pydgm\n'), ((4437, 4475), 'pydgm.dgmsolver.initialize_dgmsolver', 'pydgm.dgmsolver.initialize_dgmsolver', ([], {}), '()\n', (4473, 4475), False, 'import pydgm\n'), ((4484, 4522), 'pydgm.dgmsolver.compute_flux_moments', 'pydgm.dgmsolver.compute_flux_moments', ([], {}), '()\n', (4520, 4522), False, 'import pydgm\n'), ((4579, 4615), 'pydgm.dgmsolver.compute_xs_moments', 'pydgm.dgmsolver.compute_xs_moments', ([], {}), '()\n', (4613, 4615), False, 'import pydgm\n'), ((4624, 4659), 'pydgm.dgmsolver.slice_xs_moments', 'pydgm.dgmsolver.slice_xs_moments', (['(0)'], {}), '(0)\n', (4656, 4659), False, 'import pydgm\n'), ((4668, 4704), 'pydgm.state.update_fission_density', 'pydgm.state.update_fission_density', ([], {}), '()\n', (4702, 4704), False, 'import pydgm\n'), ((4797, 4827), 'pydgm.sources.compute_source', 'pydgm.sources.compute_source', ([], {}), '()\n', (4825, 4827), False, 'import pydgm\n'), ((5081, 5117), 'pydgm.dgmsolver.finalize_dgmsolver', 'pydgm.dgmsolver.finalize_dgmsolver', ([], {}), '()\n', (5115, 5117), False, 'import pydgm\n'), ((5126, 5158), 'pydgm.control.finalize_control', 'pydgm.control.finalize_control', ([], {}), '()\n', (5156, 5158), False, 'import pydgm\n'), ((3045, 3075), 'numpy.array', 'np.array', (['[0.3, 0.2, 0.2, 0.2]'], {}), '([0.3, 0.2, 0.2, 0.2])\n', (3053, 3075), True, 'import numpy as np\n'), ((3135, 3163), 'pydgm.state.sigphi.flatten', 'pydgm.state.sigphi.flatten', ([], {}), '()\n', (3161, 3163), False, 'import pydgm\n')] |
import json
import json
import logging
import math
import os
from collections import Counter
from itertools import product, chain
import jinja2
import networkx as nx
import numpy as np
import pandas as pd
from bokeh.colors import RGB
from bokeh.io import reset_output
from bokeh.models import ColumnDataSource, ColorBar, PrintfTickFormatter, LinearColorMapper
from bokeh.plotting import figure, output_file, save
from lazy import lazy
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
from pysrc.app.predefined import query_to_folder
from pysrc.papers.analysis.citations import find_top_cited_papers, build_cit_stats_df, merge_citation_stats, \
build_cocit_grouped_df
from pysrc.papers.analysis.graph import build_papers_graph, \
sparse_graph, to_weighted_graph
from pysrc.papers.analysis.node2vec import node2vec
from pysrc.papers.analysis.text import get_frequent_tokens
from pysrc.papers.analysis.text import texts_embeddings, vectorize_corpus, tokens_embeddings
from pysrc.papers.analysis.topics import get_topics_description, compute_topics_similarity_matrix, cluster_and_sort
from pysrc.papers.analyzer import PapersAnalyzer
from pysrc.papers.db.loaders import Loaders
from pysrc.papers.db.search_error import SearchError
from pysrc.papers.plot.plot_preprocessor import PlotPreprocessor
from pysrc.papers.plot.plotter import Plotter, PLOT_WIDTH, SHORT_PLOT_HEIGHT, TALL_PLOT_HEIGHT
from pysrc.papers.utils import cut_authors_list, factors_colormap, color_to_rgb, topics_palette
from pysrc.version import VERSION
logger = logging.getLogger(__name__)
ANALYSIS_FILES_TYPE = 'files'
# Deployment and development
SEARCH_RESULTS_PATHS = ['/search_results', os.path.expanduser('~/.pubtrends/search_results')]
class AnalyzerFiles(PapersAnalyzer):
def __init__(self, loader, config, test=False):
super(AnalyzerFiles, self).__init__(loader, config)
self.loader = loader
self.source = Loaders.source(self.loader, test)
def total_steps(self):
return 16
def teardown(self):
self.progress.remove_handler()
def analyze_ids(self, ids, source, query, sort, limit, topics, test=False, task=None):
self.query = query
self.query_folder = os.path.join(self.search_results_folder, f"{VERSION.replace(' ', '_')}",
query_to_folder(source, query, sort, limit))
if not os.path.exists(self.query_folder):
os.makedirs(self.query_folder)
logger.info(f'Query folder: {self.query_folder}')
path_ids = os.path.join(self.query_folder, 'ids.txt')
logger.info(f'Ids saved to {path_ids}')
with open(path_ids, 'w') as f:
f.write('\n'.join(ids))
self.progress.info('Loading publications from database', current=2, task=task)
self.pub_df = self.loader.load_publications(ids)
self.pub_types = list(set(self.pub_df['type']))
self.df = self.pub_df
if len(self.pub_df) == 0:
raise SearchError(f'Nothing found in database')
ids = list(self.pub_df['id']) # Limit ids to existing papers only!
self.progress.info(f'Found {len(ids)} papers in database', current=2, task=task)
self.progress.info('Loading citations statistics for papers', current=3, task=task)
cits_by_year_df = self.loader.load_citations_by_year(ids)
self.progress.info(f'Found {len(cits_by_year_df)} records of citations by year', current=3, task=task)
self.cit_stats_df = build_cit_stats_df(cits_by_year_df, len(ids))
if len(self.cit_stats_df) == 0:
logger.warning('No citations of papers were found')
self.df, self.citation_years = merge_citation_stats(self.pub_df, self.cit_stats_df)
# Load data about citations between given papers (excluding outer papers)
# IMPORTANT: cit_df may contain not all the publications for query
self.progress.info('Loading citations information', current=4, task=task)
self.cit_df = self.loader.load_citations(ids)
self.progress.info(f'Found {len(self.cit_df)} citations between papers', current=3, task=task)
self.progress.info('Identifying top cited papers', current=5, task=task)
logger.debug('Top cited papers')
self.top_cited_papers, self.top_cited_df = find_top_cited_papers(self.df, PapersAnalyzer.TOP_CITED_PAPERS)
self.progress.info('Analyzing title and abstract texts', current=6, task=task)
self.corpus, self.corpus_tokens, self.corpus_counts = vectorize_corpus(
self.pub_df,
max_features=PapersAnalyzer.VECTOR_WORDS,
min_df=PapersAnalyzer.VECTOR_MIN_DF,
max_df=PapersAnalyzer.VECTOR_MAX_DF,
test=test
)
logger.debug('Analyzing tokens embeddings')
self.corpus_tokens_embedding = tokens_embeddings(
self.corpus, self.corpus_tokens, test=test
)
logger.debug('Analyzing texts embeddings')
self.texts_embeddings = texts_embeddings(
self.corpus_counts, self.corpus_tokens_embedding
)
self.progress.info('Analyzing MESH terms', current=7, task=task)
mesh_counter = Counter()
for mesh_terms in self.df['mesh']:
if mesh_terms:
for mt in mesh_terms.split(','):
mesh_counter[mt] += 1
path_mesh_terms_freqs = os.path.join(self.query_folder, "mesh_terms_freqs.html")
logger.info(f'Save frequent MESH terms to file {path_mesh_terms_freqs}')
output_file(filename=path_mesh_terms_freqs, title="Mesh terms")
save(plot_mesh_terms(mesh_counter))
reset_output()
logger.info('Computing mesh terms')
mesh_corpus_tokens, mesh_corpus_counts = vectorize_mesh_tokens(self.df, mesh_counter)
if len(mesh_corpus_tokens) > 0:
logger.info('Analyzing mesh terms timeline')
freq_meshs = get_frequent_mesh_terms(self.top_cited_df)
keywords_df, years = PlotPreprocessor.frequent_keywords_data(
freq_meshs, self.df, mesh_corpus_tokens, mesh_corpus_counts, 20
)
path_mesh_terms_timeline = os.path.join(self.query_folder, 'timeline_mesh_terms.html')
logging.info(f'Save frequent mesh terms to file {path_mesh_terms_timeline}')
output_file(filename=path_mesh_terms_timeline, title="Mesh terms timeline")
save(Plotter._plot_keywords_timeline(keywords_df, years))
reset_output()
self.progress.info('Calculating co-citations for selected papers', current=8, task=task)
self.cocit_df = self.loader.load_cocitations(ids)
cocit_grouped_df = build_cocit_grouped_df(self.cocit_df)
logger.debug(f'Found {len(cocit_grouped_df)} co-cited pairs of papers')
self.cocit_grouped_df = cocit_grouped_df[
cocit_grouped_df['total'] >= PapersAnalyzer.SIMILARITY_COCITATION_MIN].copy()
logger.debug(f'Filtered {len(self.cocit_grouped_df)} co-cited pairs of papers, '
f'threshold {PapersAnalyzer.SIMILARITY_COCITATION_MIN}')
self.progress.info('Processing bibliographic coupling for selected papers', current=9, task=task)
bibliographic_coupling_df = self.loader.load_bibliographic_coupling(ids)
logger.debug(f'Found {len(bibliographic_coupling_df)} bibliographic coupling pairs of papers')
self.bibliographic_coupling_df = bibliographic_coupling_df[
bibliographic_coupling_df['total'] >= PapersAnalyzer.SIMILARITY_BIBLIOGRAPHIC_COUPLING_MIN].copy()
logger.debug(f'Filtered {len(self.bibliographic_coupling_df)} bibliographic coupling pairs of papers '
f'threshold {PapersAnalyzer.SIMILARITY_BIBLIOGRAPHIC_COUPLING_MIN}')
self.progress.info('Analyzing papers graph', current=10, task=task)
self.papers_graph = build_papers_graph(
self.df, self.cit_df, self.cocit_grouped_df, self.bibliographic_coupling_df,
)
self.progress.info(f'Built papers graph with {self.papers_graph.number_of_nodes()} nodes '
f'and {self.papers_graph.number_of_edges()} edges', current=10, task=task)
logger.debug('Analyzing papers graph embeddings')
self.weighted_similarity_graph = to_weighted_graph(self.papers_graph, PapersAnalyzer.similarity)
gs = sparse_graph(self.weighted_similarity_graph)
self.graph_embeddings = node2vec(self.df['id'], gs)
logger.debug('Computing aggregated graph and text embeddings for papers')
papers_embeddings = np.concatenate(
(self.graph_embeddings * PapersAnalyzer.GRAPH_EMBEDDINGS_FACTOR,
self.texts_embeddings * PapersAnalyzer.TEXT_EMBEDDINGS_FACTOR), axis=1)
logger.debug('Computing PCA projection')
pca = PCA(n_components=min(len(papers_embeddings), PapersAnalyzer.PCA_COMPONENTS))
t = StandardScaler().fit_transform(papers_embeddings)
self.pca_coords = pca.fit_transform(t)
logger.debug(f'Explained variation {int(np.sum(pca.explained_variance_ratio_) * 100)} %')
if len(self.df) > 1:
logger.debug('Computing PCA projection')
pca = PCA(n_components=min(len(papers_embeddings), PapersAnalyzer.PCA_COMPONENTS))
t = StandardScaler().fit_transform(papers_embeddings)
self.pca_coords = pca.fit_transform(t)
logger.debug(f'Explained variation {int(np.sum(pca.explained_variance_ratio_) * 100)}%')
logger.debug('Apply TSNE transformation on papers PCA coords')
tsne_embeddings_2d = TSNE(n_components=2, random_state=42).fit_transform(self.pca_coords)
self.df['x'] = tsne_embeddings_2d[:, 0]
self.df['y'] = tsne_embeddings_2d[:, 1]
else:
self.pca_coords = np.zeros(shape=(len(self.df), 128))
self.df['x'] = 0
self.df['y'] = 0
self.progress.info(f'Extracting {topics.lower()} number of topics from papers text and graph similarity',
current=11, task=task)
topics_max_number, topic_min_size = PapersAnalyzer.get_topics_info(topics)
clusters, dendrogram = cluster_and_sort(self.pca_coords, topics_max_number, topic_min_size)
self.df['comp'] = clusters
path_topics_sizes = os.path.join(self.query_folder, 'topics_sizes.html')
logging.info(f'Save topics ratios to file {path_topics_sizes}')
output_file(filename=path_topics_sizes, title="Topics sizes")
save(plot_components_ratio(self.df))
reset_output()
similarity_df, topics = topics_similarity_data(
self.pca_coords, self.df['comp']
)
similarity_df['type'] = ['Inside' if x == y else 'Outside'
for (x, y) in zip(similarity_df['comp_x'], similarity_df['comp_y'])]
path_topics_similarity = os.path.join(self.query_folder, 'topics_similarity.html')
logging.info(f'Save similarity heatmap to file {path_topics_similarity}')
output_file(filename=path_topics_similarity, title="Topics mean similarity")
save(heatmap_topics_similarity(similarity_df, topics))
reset_output()
self.progress.info('Analyzing topics descriptions', current=12, task=task)
print('Computing clusters keywords')
clusters_pids = self.df[['id', 'comp']].groupby('comp')['id'].apply(list).to_dict()
clusters_description = get_topics_description(
self.df, clusters_pids,
self.corpus, self.corpus_tokens, self.corpus_counts,
n_words=PapersAnalyzer.TOPIC_DESCRIPTION_WORDS
)
kwds = [(comp, ','.join([f'{t}:{v:.3f}' for t, v in vs[:20]])) for comp, vs in clusters_description.items()]
self.kwd_df = pd.DataFrame(kwds, columns=['comp', 'kwd'])
t = self.kwd_df.copy()
t['comp'] += 1
path_tags = os.path.join(self.query_folder, 'tags.csv')
logger.info(f'Save tags to {path_tags}')
t.to_csv(path_tags, index=False)
del t
self.progress.info('Analyzing topics descriptions with MESH terms', current=13, task=task)
mesh_corpus = [
[[mesh_corpus_tokens[i]] * int(mc) for i, mc in
enumerate(np.asarray(mesh_corpus_counts[pid, :]).reshape(-1)) if mc > 0]
for pid in range(mesh_corpus_counts.shape[0])
]
mesh_clusters_description = get_topics_description(
self.df, clusters_pids,
mesh_corpus, mesh_corpus_tokens, mesh_corpus_counts,
n_words=PapersAnalyzer.TOPIC_DESCRIPTION_WORDS
)
meshs = [(comp, ','.join([f'{t}:{v:.3f}' for t, v in vs[:20]]))
for comp, vs in mesh_clusters_description.items()]
mesh_df = pd.DataFrame(meshs, columns=['comp', 'kwd'])
t = mesh_df.copy()
t['comp'] += 1
path_mesh = os.path.join(self.query_folder, 'mesh.csv')
logger.info(f'Save topic mesh terms to {path_mesh}')
t.to_csv(path_mesh, index=False)
del t
max_year, min_year = self.df['year'].max(), self.df['year'].min()
plot_components, data = PlotPreprocessor.component_size_summary_data(
self.df, sorted(set(self.df['comp'])), min_year, max_year
)
path_topics_by_years = os.path.join(self.query_folder, 'topics_by_years.html')
logging.info(f'Save topics years to file {path_topics_by_years}')
output_file(filename=path_topics_by_years, title="Topics by years")
save(Plotter._plot_topics_years_distribution(self.df, self.kwd_df, plot_components, data, min_year, max_year))
reset_output()
if len(set(self.df['comp'])) > 1:
path_topics = os.path.join(self.query_folder, 'topics.html')
logging.info(f'Save topics hierarchy with keywords to file {path_topics}')
output_file(filename=path_topics, title="Topics dendrogram")
save(Plotter._plot_topics_hierarchy_with_keywords(self.df, self.kwd_df, clusters, dendrogram, max_words=3))
reset_output()
path_topics_mesh = os.path.join(self.query_folder, 'topics_mesh.html')
logging.info(f'Save topics hierarchy with mesh keywords to file {path_topics_mesh}')
output_file(filename=path_topics_mesh, title="Topics dendrogram")
save(Plotter._plot_topics_hierarchy_with_keywords(self.df, mesh_df, clusters, dendrogram, max_words=3))
reset_output()
self.df['topic_tags'] = [','.join(t for t, _ in clusters_description[c][:5]) for c in self.df['comp']]
self.df['topic_meshs'] = [','.join(t for t, _ in mesh_clusters_description[c][:5]) for c in self.df['comp']]
path_papers = os.path.join(self.query_folder, 'papers.csv')
logging.info(f'Saving papers and components dataframes {path_papers}')
t = self.df.copy()
t['comp'] += 1
t.to_csv(path_papers, index=False)
del t
self.progress.info('Preparing papers graphs', current=14, task=task)
logger.debug('Prepare sparse graph for visualization')
self.sparse_papers_graph = self.prepare_sparse_papers_graph(self.papers_graph, self.weighted_similarity_graph)
path_papers_graph = os.path.join(self.query_folder, 'papers.html')
logging.info(f'Saving papers graph for bokeh {path_papers_graph}')
output_file(filename=path_papers_graph, title="Papers graph")
save(Plotter._plot_papers_graph(source, self.sparse_papers_graph, self.df,
topics_tags=clusters_description, topics_meshs=mesh_clusters_description,
add_callback=False,
plot_width=PLOT_WIDTH, plot_height=PLOT_WIDTH))
reset_output()
path_papers_graph_interactive = os.path.join(self.query_folder, 'papers_interactive.html')
logging.info(f'Saving papers graph for cytoscape.js {path_papers_graph_interactive}')
template_path = os.path.realpath(os.path.join(__file__, '../../app/templates/papers_template.html'))
save_sim_papers_graph_interactive(self.sparse_papers_graph, self.df, clusters_description,
mesh_clusters_description, template_path, path_papers_graph_interactive)
self.progress.info('Other analyses', current=15, task=task)
plotter = Plotter(self)
path_timeline = os.path.join(self.query_folder, 'timeline.html')
logging.info(f'Save timeline to {path_timeline}')
output_file(filename=path_timeline, title="Timeline")
save(plotter.plot_papers_by_year())
reset_output()
path_terms_timeline = os.path.join(self.query_folder, "timeline_terms.html")
logging.info(f'Save frequent tokens to file {path_terms_timeline}')
freq_kwds = get_frequent_tokens(chain(*chain(*self.corpus)))
output_file(filename=path_terms_timeline, title="Terms timeline")
keywords_frequencies = plotter.plot_keywords_frequencies(freq_kwds)
if keywords_frequencies is not None:
save(keywords_frequencies)
reset_output()
self.progress.done('Done analysis', task=task)
@lazy
def search_results_folder(self):
logger.info('Preparing search results folder')
for path in SEARCH_RESULTS_PATHS:
if os.path.exists(path):
logger.info(f'Search results will be stored at {path}')
return path
else:
raise RuntimeError(f'Search results folder not found among: {SEARCH_RESULTS_PATHS}')
def plot_mesh_terms(mesh_counter, top=100, plot_width=PLOT_WIDTH, plot_height=TALL_PLOT_HEIGHT):
mc_terms = mesh_counter.most_common(top)
terms = [mc[0] for mc in mc_terms]
numbers = [mc[1] for mc in mc_terms]
cmap = factors_colormap(top)
colors = [color_to_rgb(cmap(i)) for i in range(top)]
source = ColumnDataSource(data=dict(terms=terms, numbers=numbers, colors=colors))
p = figure(plot_width=plot_width, plot_height=plot_height,
toolbar_location="right", tools="save", x_range=terms)
p.vbar(x='terms', top='numbers', width=0.8, fill_alpha=0.5, color='colors', source=source)
p.hover.tooltips = [("Term", '@terms'), ("Number", '@numbers')]
p.sizing_mode = 'stretch_width'
p.xaxis.axis_label = 'Mesh term'
p.xaxis.major_label_orientation = math.pi / 4
p.yaxis.axis_label = 'Numbner of papers'
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.axis.minor_tick_line_color = None
p.outline_line_color = None
return p
def vectorize_mesh_tokens(df, mesh_counter, max_features=1000,
min_df=0.1, max_df=PapersAnalyzer.VECTOR_MAX_DF):
logger.debug(f'Vectorization mesh terms min_df={min_df} max_df={max_df} max_features={max_features}')
features = []
for mt, count in mesh_counter.most_common():
if len(features) >= max_features:
break
if count < min_df * len(df) or count > max_df * len(df):
continue
features.append(mt)
features_map = dict((f, i) for i, f in enumerate(features))
logging.debug(f'Total {len(features)} mesh terms filtered')
counts = np.asmatrix(np.zeros(shape=(len(df), len(features))))
for i, mesh_tokens in enumerate(df['mesh']):
if mesh_tokens:
for mt in mesh_tokens.split(','):
if mt in features_map:
counts[i, features_map[mt]] = 1
logger.debug(f'Vectorized corpus size {counts.shape}')
if counts.shape[1] != 0:
tokens_counts = np.asarray(np.sum(counts, axis=0)).reshape(-1)
tokens_freqs = tokens_counts / len(df)
logger.debug(f'Terms frequencies min={tokens_freqs.min()}, max={tokens_freqs.max()}, '
f'mean={tokens_freqs.mean()}, std={tokens_freqs.std()}')
return features, counts
def get_frequent_mesh_terms(df, fraction=0.1, min_tokens=20):
counter = Counter()
for mesh_terms in df['mesh']:
if mesh_terms:
for mt in mesh_terms.split(','):
counter[mt] += 1
result = {}
tokens = len(counter)
for token, cnt in counter.most_common(max(min_tokens, int(tokens * fraction))):
result[token] = cnt / tokens
return result
def components_ratio_data(df):
comp_sizes = dict(df.groupby('comp')['id'].count())
comps = list(comp_sizes.keys())
ratios = [100 * comp_sizes[c] / len(df) for c in comps]
# c + 1 is used to start numbering from 1
comps = list(map(str, [c + 1 for c in comps]))
return comps, ratios
def plot_components_ratio(df, plot_width=PLOT_WIDTH, plot_height=SHORT_PLOT_HEIGHT):
comps, ratios = components_ratio_data(df)
n_comps = len(comps)
cmap = factors_colormap(n_comps)
colors = [color_to_rgb(cmap(i)) for i in range(n_comps)]
source = ColumnDataSource(data=dict(comps=comps, ratios=ratios, colors=colors))
p = figure(plot_width=plot_width, plot_height=plot_height,
toolbar_location="right", tools="save", x_range=comps)
p.vbar(x='comps', top='ratios', width=0.8, fill_alpha=0.5, color='colors', source=source)
p.hover.tooltips = [("Topic", '@comps'), ("Amount", '@ratios %')]
p.sizing_mode = 'stretch_width'
p.xaxis.axis_label = 'Topic'
p.yaxis.axis_label = 'Percentage of papers'
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.axis.minor_tick_line_color = None
p.outline_line_color = None
return p
def topics_similarity_data(papers_embeddings, comps):
similarity_matrix = compute_topics_similarity_matrix(papers_embeddings, comps)
# c + 1 is used to start numbering with 1
components = [str(c + 1) for c in sorted(set(comps))]
n_comps = len(components)
similarity_topics_df = pd.DataFrame([
{'comp_x': i, 'comp_y': j, 'similarity': similarity_matrix[i, j]}
for i, j in product(range(n_comps), range(n_comps))
])
similarity_topics_df['comp_x'] = similarity_topics_df['comp_x'].apply(lambda x: x + 1).astype(str)
similarity_topics_df['comp_y'] = similarity_topics_df['comp_y'].apply(lambda x: x + 1).astype(str)
return similarity_topics_df, components
def heatmap_topics_similarity(similarity_df, topics, plot_width=PLOT_WIDTH, plot_height=TALL_PLOT_HEIGHT):
logger.debug('Visualizing topics similarity with heatmap')
step = 10
cmap = plt.cm.get_cmap('PuBu', step)
colors = [RGB(*[round(c * 255) for c in cmap(i)[:3]]) for i in range(step)]
mapper = LinearColorMapper(palette=colors,
low=similarity_df.similarity.min(),
high=similarity_df.similarity.max())
p = figure(x_range=topics, y_range=topics,
x_axis_location="below", plot_width=plot_width, plot_height=plot_height,
tools="hover,pan,tap,wheel_zoom,box_zoom,reset,save", toolbar_location="right",
tooltips=[('Topic 1', '@comp_x'),
('Topic 2', '@comp_y'),
('Similarity', '@similarity')])
p.sizing_mode = 'stretch_width'
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "10pt"
p.axis.major_label_standoff = 0
p.rect(x="comp_x", y="comp_y", width=1, height=1,
source=similarity_df,
fill_color={'field': 'similarity', 'transform': mapper},
line_color=None)
color_bar = ColorBar(color_mapper=mapper, major_label_text_font_size="10pt",
formatter=PrintfTickFormatter(format="%.2f"),
label_standoff=11, border_line_color=None, location=(0, 0))
p.add_layout(color_bar, 'right')
return p
def save_sim_papers_graph_interactive(gs, df, clusters_description, mesh_clusters_description,
template_path, path):
logging.info('Saving papers graph for cytoscape.js')
topics_tags = {c: ','.join(t for t, _ in clusters_description[c][:5]) for c in sorted(set(df['comp']))}
topics_meshs = {c: ','.join(t for t, _ in mesh_clusters_description[c][:5]) for c in sorted(set(df['comp']))}
logger.debug('Creating graph')
gss = nx.Graph()
for (u, v) in gs.edges():
gss.add_edge(u, v)
for n in gs.nodes():
if not gss.has_node(n):
gss.add_node(n)
logger.debug('Collect attributes for nodes')
attrs = {}
for node in df['id']:
sel = df[df['id'] == node]
attrs[node] = dict(
title=sel['title'].values[0],
authors=cut_authors_list(sel['authors'].values[0]),
journal=sel['journal'].values[0],
year=int(sel['year'].values[0]),
cited=int(sel['total'].values[0]),
topic=int(sel['comp'].values[0]),
# These can be heavy
abstract=sel['abstract'].values[0],
mesh=sel['mesh'].values[0],
keywords=sel['keywords'].values[0]
)
nx.set_node_attributes(gss, attrs)
graph_cs = nx.cytoscape_data(gss)['elements']
logger.debug('Layout')
maxy = df['y'].max()
for node_cs in graph_cs['nodes']:
nid = node_cs['data']['id']
sel = df.loc[df['id'] == nid]
# Adjust vertical axis with bokeh graph
node_cs['position'] = dict(x=int(sel['x'].values[0] * 8),
y=int((maxy - sel['y'].values[0]) * 6))
with open(template_path) as f:
text = f.read()
html = jinja2.Environment(loader=jinja2.BaseLoader()).from_string(text).render(
topics_palette_json=json.dumps(topics_palette(df)),
topics_tags_json=json.dumps(topics_tags),
topics_meshs_json=json.dumps(topics_meshs),
graph_cytoscape_json=json.dumps(graph_cs)
)
with open(path, 'w') as f:
f.write(html)
logger.debug('Done')
FILES_WITH_DESCRIPTIONS = {
'ids.txt': 'List of ids returned by search request',
'papers.csv': 'Detailed information about papers used for analysis',
'mesh.csv': 'Information about MESH terms for each topic',
'tags.csv': 'Information about keywords for each topic',
'timeline.html': 'Overall number of papers per year',
'timeline_terms.html': 'Most popular keywords per year',
'timeline_mesh_terms.html': 'Most popular MESH terms per year',
'mesh_terms_freqs.html': 'Frequency of MESH terms used in papers',
'papers.html': 'Graph representation of papers',
'papers_interactive.html': 'Interactive version of papers graph with advanced filtering, coloring',
'topics.html': 'Topics hierarchy',
'topics_mesh.html': 'Topics hierarchy with MESH terms',
'topics_by_years.html': 'Topics per year',
'topics_similarity.html': 'Similarity between papers within topics',
'topics_sizes.html': 'Topics sizes',
}
| [
"pysrc.papers.plot.plotter.Plotter",
"pysrc.app.predefined.query_to_folder",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"pysrc.papers.analysis.graph.to_weighted_graph",
"json.dumps",
"bokeh.plotting.output_file",
"pysrc.papers.analysis.text.tokens_embeddings",
"pysrc.papers.analysis.node2v... | [((1647, 1674), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1664, 1674), False, 'import logging\n'), ((1779, 1828), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.pubtrends/search_results"""'], {}), "('~/.pubtrends/search_results')\n", (1797, 1828), False, 'import os\n'), ((18042, 18063), 'pysrc.papers.utils.factors_colormap', 'factors_colormap', (['top'], {}), '(top)\n', (18058, 18063), False, 'from pysrc.papers.utils import cut_authors_list, factors_colormap, color_to_rgb, topics_palette\n'), ((18216, 18330), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'plot_width', 'plot_height': 'plot_height', 'toolbar_location': '"""right"""', 'tools': '"""save"""', 'x_range': 'terms'}), "(plot_width=plot_width, plot_height=plot_height, toolbar_location=\n 'right', tools='save', x_range=terms)\n", (18222, 18330), False, 'from bokeh.plotting import figure, output_file, save\n'), ((20206, 20215), 'collections.Counter', 'Counter', ([], {}), '()\n', (20213, 20215), False, 'from collections import Counter\n'), ((21009, 21034), 'pysrc.papers.utils.factors_colormap', 'factors_colormap', (['n_comps'], {}), '(n_comps)\n', (21025, 21034), False, 'from pysrc.papers.utils import cut_authors_list, factors_colormap, color_to_rgb, topics_palette\n'), ((21189, 21303), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'plot_width', 'plot_height': 'plot_height', 'toolbar_location': '"""right"""', 'tools': '"""save"""', 'x_range': 'comps'}), "(plot_width=plot_width, plot_height=plot_height, toolbar_location=\n 'right', tools='save', x_range=comps)\n", (21195, 21303), False, 'from bokeh.plotting import figure, output_file, save\n'), ((21831, 21889), 'pysrc.papers.analysis.topics.compute_topics_similarity_matrix', 'compute_topics_similarity_matrix', (['papers_embeddings', 'comps'], {}), '(papers_embeddings, comps)\n', (21863, 21889), False, 'from pysrc.papers.analysis.topics import get_topics_description, compute_topics_similarity_matrix, cluster_and_sort\n'), ((22656, 22685), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""PuBu"""', 'step'], {}), "('PuBu', step)\n", (22671, 22685), True, 'from matplotlib import pyplot as plt\n'), ((22957, 23258), 'bokeh.plotting.figure', 'figure', ([], {'x_range': 'topics', 'y_range': 'topics', 'x_axis_location': '"""below"""', 'plot_width': 'plot_width', 'plot_height': 'plot_height', 'tools': '"""hover,pan,tap,wheel_zoom,box_zoom,reset,save"""', 'toolbar_location': '"""right"""', 'tooltips': "[('Topic 1', '@comp_x'), ('Topic 2', '@comp_y'), ('Similarity', '@similarity')]"}), "(x_range=topics, y_range=topics, x_axis_location='below', plot_width=\n plot_width, plot_height=plot_height, tools=\n 'hover,pan,tap,wheel_zoom,box_zoom,reset,save', toolbar_location=\n 'right', tooltips=[('Topic 1', '@comp_x'), ('Topic 2', '@comp_y'), (\n 'Similarity', '@similarity')])\n", (22963, 23258), False, 'from bokeh.plotting import figure, output_file, save\n'), ((24195, 24247), 'logging.info', 'logging.info', (['"""Saving papers graph for cytoscape.js"""'], {}), "('Saving papers graph for cytoscape.js')\n", (24207, 24247), False, 'import logging\n'), ((24517, 24527), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (24525, 24527), True, 'import networkx as nx\n'), ((25297, 25331), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['gss', 'attrs'], {}), '(gss, attrs)\n', (25319, 25331), True, 'import networkx as nx\n'), ((2034, 2067), 'pysrc.papers.db.loaders.Loaders.source', 'Loaders.source', (['self.loader', 'test'], {}), '(self.loader, test)\n', (2048, 2067), False, 'from pysrc.papers.db.loaders import Loaders\n'), ((2654, 2696), 'os.path.join', 'os.path.join', (['self.query_folder', '"""ids.txt"""'], {}), "(self.query_folder, 'ids.txt')\n", (2666, 2696), False, 'import os\n'), ((3798, 3850), 'pysrc.papers.analysis.citations.merge_citation_stats', 'merge_citation_stats', (['self.pub_df', 'self.cit_stats_df'], {}), '(self.pub_df, self.cit_stats_df)\n', (3818, 3850), False, 'from pysrc.papers.analysis.citations import find_top_cited_papers, build_cit_stats_df, merge_citation_stats, build_cocit_grouped_df\n'), ((4422, 4485), 'pysrc.papers.analysis.citations.find_top_cited_papers', 'find_top_cited_papers', (['self.df', 'PapersAnalyzer.TOP_CITED_PAPERS'], {}), '(self.df, PapersAnalyzer.TOP_CITED_PAPERS)\n', (4443, 4485), False, 'from pysrc.papers.analysis.citations import find_top_cited_papers, build_cit_stats_df, merge_citation_stats, build_cocit_grouped_df\n'), ((4636, 4801), 'pysrc.papers.analysis.text.vectorize_corpus', 'vectorize_corpus', (['self.pub_df'], {'max_features': 'PapersAnalyzer.VECTOR_WORDS', 'min_df': 'PapersAnalyzer.VECTOR_MIN_DF', 'max_df': 'PapersAnalyzer.VECTOR_MAX_DF', 'test': 'test'}), '(self.pub_df, max_features=PapersAnalyzer.VECTOR_WORDS,\n min_df=PapersAnalyzer.VECTOR_MIN_DF, max_df=PapersAnalyzer.\n VECTOR_MAX_DF, test=test)\n', (4652, 4801), False, 'from pysrc.papers.analysis.text import texts_embeddings, vectorize_corpus, tokens_embeddings\n'), ((4954, 5015), 'pysrc.papers.analysis.text.tokens_embeddings', 'tokens_embeddings', (['self.corpus', 'self.corpus_tokens'], {'test': 'test'}), '(self.corpus, self.corpus_tokens, test=test)\n', (4971, 5015), False, 'from pysrc.papers.analysis.text import texts_embeddings, vectorize_corpus, tokens_embeddings\n'), ((5121, 5187), 'pysrc.papers.analysis.text.texts_embeddings', 'texts_embeddings', (['self.corpus_counts', 'self.corpus_tokens_embedding'], {}), '(self.corpus_counts, self.corpus_tokens_embedding)\n', (5137, 5187), False, 'from pysrc.papers.analysis.text import texts_embeddings, vectorize_corpus, tokens_embeddings\n'), ((5307, 5316), 'collections.Counter', 'Counter', ([], {}), '()\n', (5314, 5316), False, 'from collections import Counter\n'), ((5511, 5567), 'os.path.join', 'os.path.join', (['self.query_folder', '"""mesh_terms_freqs.html"""'], {}), "(self.query_folder, 'mesh_terms_freqs.html')\n", (5523, 5567), False, 'import os\n'), ((5657, 5720), 'bokeh.plotting.output_file', 'output_file', ([], {'filename': 'path_mesh_terms_freqs', 'title': '"""Mesh terms"""'}), "(filename=path_mesh_terms_freqs, title='Mesh terms')\n", (5668, 5720), False, 'from bokeh.plotting import figure, output_file, save\n'), ((5773, 5787), 'bokeh.io.reset_output', 'reset_output', ([], {}), '()\n', (5785, 5787), False, 'from bokeh.io import reset_output\n'), ((6817, 6854), 'pysrc.papers.analysis.citations.build_cocit_grouped_df', 'build_cocit_grouped_df', (['self.cocit_df'], {}), '(self.cocit_df)\n', (6839, 6854), False, 'from pysrc.papers.analysis.citations import find_top_cited_papers, build_cit_stats_df, merge_citation_stats, build_cocit_grouped_df\n'), ((8018, 8118), 'pysrc.papers.analysis.graph.build_papers_graph', 'build_papers_graph', (['self.df', 'self.cit_df', 'self.cocit_grouped_df', 'self.bibliographic_coupling_df'], {}), '(self.df, self.cit_df, self.cocit_grouped_df, self.\n bibliographic_coupling_df)\n', (8036, 8118), False, 'from pysrc.papers.analysis.graph import build_papers_graph, sparse_graph, to_weighted_graph\n'), ((8437, 8500), 'pysrc.papers.analysis.graph.to_weighted_graph', 'to_weighted_graph', (['self.papers_graph', 'PapersAnalyzer.similarity'], {}), '(self.papers_graph, PapersAnalyzer.similarity)\n', (8454, 8500), False, 'from pysrc.papers.analysis.graph import build_papers_graph, sparse_graph, to_weighted_graph\n'), ((8514, 8558), 'pysrc.papers.analysis.graph.sparse_graph', 'sparse_graph', (['self.weighted_similarity_graph'], {}), '(self.weighted_similarity_graph)\n', (8526, 8558), False, 'from pysrc.papers.analysis.graph import build_papers_graph, sparse_graph, to_weighted_graph\n'), ((8591, 8618), 'pysrc.papers.analysis.node2vec.node2vec', 'node2vec', (["self.df['id']", 'gs'], {}), "(self.df['id'], gs)\n", (8599, 8618), False, 'from pysrc.papers.analysis.node2vec import node2vec\n'), ((8730, 8891), 'numpy.concatenate', 'np.concatenate', (['(self.graph_embeddings * PapersAnalyzer.GRAPH_EMBEDDINGS_FACTOR, self.\n texts_embeddings * PapersAnalyzer.TEXT_EMBEDDINGS_FACTOR)'], {'axis': '(1)'}), '((self.graph_embeddings * PapersAnalyzer.\n GRAPH_EMBEDDINGS_FACTOR, self.texts_embeddings * PapersAnalyzer.\n TEXT_EMBEDDINGS_FACTOR), axis=1)\n', (8744, 8891), True, 'import numpy as np\n'), ((10280, 10318), 'pysrc.papers.analyzer.PapersAnalyzer.get_topics_info', 'PapersAnalyzer.get_topics_info', (['topics'], {}), '(topics)\n', (10310, 10318), False, 'from pysrc.papers.analyzer import PapersAnalyzer\n'), ((10350, 10418), 'pysrc.papers.analysis.topics.cluster_and_sort', 'cluster_and_sort', (['self.pca_coords', 'topics_max_number', 'topic_min_size'], {}), '(self.pca_coords, topics_max_number, topic_min_size)\n', (10366, 10418), False, 'from pysrc.papers.analysis.topics import get_topics_description, compute_topics_similarity_matrix, cluster_and_sort\n'), ((10482, 10534), 'os.path.join', 'os.path.join', (['self.query_folder', '"""topics_sizes.html"""'], {}), "(self.query_folder, 'topics_sizes.html')\n", (10494, 10534), False, 'import os\n'), ((10543, 10606), 'logging.info', 'logging.info', (['f"""Save topics ratios to file {path_topics_sizes}"""'], {}), "(f'Save topics ratios to file {path_topics_sizes}')\n", (10555, 10606), False, 'import logging\n'), ((10615, 10676), 'bokeh.plotting.output_file', 'output_file', ([], {'filename': 'path_topics_sizes', 'title': '"""Topics sizes"""'}), "(filename=path_topics_sizes, title='Topics sizes')\n", (10626, 10676), False, 'from bokeh.plotting import figure, output_file, save\n'), ((10730, 10744), 'bokeh.io.reset_output', 'reset_output', ([], {}), '()\n', (10742, 10744), False, 'from bokeh.io import reset_output\n'), ((11059, 11116), 'os.path.join', 'os.path.join', (['self.query_folder', '"""topics_similarity.html"""'], {}), "(self.query_folder, 'topics_similarity.html')\n", (11071, 11116), False, 'import os\n'), ((11125, 11198), 'logging.info', 'logging.info', (['f"""Save similarity heatmap to file {path_topics_similarity}"""'], {}), "(f'Save similarity heatmap to file {path_topics_similarity}')\n", (11137, 11198), False, 'import logging\n'), ((11207, 11283), 'bokeh.plotting.output_file', 'output_file', ([], {'filename': 'path_topics_similarity', 'title': '"""Topics mean similarity"""'}), "(filename=path_topics_similarity, title='Topics mean similarity')\n", (11218, 11283), False, 'from bokeh.plotting import figure, output_file, save\n'), ((11355, 11369), 'bokeh.io.reset_output', 'reset_output', ([], {}), '()\n', (11367, 11369), False, 'from bokeh.io import reset_output\n'), ((11622, 11779), 'pysrc.papers.analysis.topics.get_topics_description', 'get_topics_description', (['self.df', 'clusters_pids', 'self.corpus', 'self.corpus_tokens', 'self.corpus_counts'], {'n_words': 'PapersAnalyzer.TOPIC_DESCRIPTION_WORDS'}), '(self.df, clusters_pids, self.corpus, self.\n corpus_tokens, self.corpus_counts, n_words=PapersAnalyzer.\n TOPIC_DESCRIPTION_WORDS)\n', (11644, 11779), False, 'from pysrc.papers.analysis.topics import get_topics_description, compute_topics_similarity_matrix, cluster_and_sort\n'), ((11956, 11999), 'pandas.DataFrame', 'pd.DataFrame', (['kwds'], {'columns': "['comp', 'kwd']"}), "(kwds, columns=['comp', 'kwd'])\n", (11968, 11999), True, 'import pandas as pd\n'), ((12074, 12117), 'os.path.join', 'os.path.join', (['self.query_folder', '"""tags.csv"""'], {}), "(self.query_folder, 'tags.csv')\n", (12086, 12117), False, 'import os\n'), ((12596, 12752), 'pysrc.papers.analysis.topics.get_topics_description', 'get_topics_description', (['self.df', 'clusters_pids', 'mesh_corpus', 'mesh_corpus_tokens', 'mesh_corpus_counts'], {'n_words': 'PapersAnalyzer.TOPIC_DESCRIPTION_WORDS'}), '(self.df, clusters_pids, mesh_corpus,\n mesh_corpus_tokens, mesh_corpus_counts, n_words=PapersAnalyzer.\n TOPIC_DESCRIPTION_WORDS)\n', (12618, 12752), False, 'from pysrc.papers.analysis.topics import get_topics_description, compute_topics_similarity_matrix, cluster_and_sort\n'), ((12949, 12993), 'pandas.DataFrame', 'pd.DataFrame', (['meshs'], {'columns': "['comp', 'kwd']"}), "(meshs, columns=['comp', 'kwd'])\n", (12961, 12993), True, 'import pandas as pd\n'), ((13064, 13107), 'os.path.join', 'os.path.join', (['self.query_folder', '"""mesh.csv"""'], {}), "(self.query_folder, 'mesh.csv')\n", (13076, 13107), False, 'import os\n'), ((13488, 13543), 'os.path.join', 'os.path.join', (['self.query_folder', '"""topics_by_years.html"""'], {}), "(self.query_folder, 'topics_by_years.html')\n", (13500, 13543), False, 'import os\n'), ((13552, 13617), 'logging.info', 'logging.info', (['f"""Save topics years to file {path_topics_by_years}"""'], {}), "(f'Save topics years to file {path_topics_by_years}')\n", (13564, 13617), False, 'import logging\n'), ((13626, 13693), 'bokeh.plotting.output_file', 'output_file', ([], {'filename': 'path_topics_by_years', 'title': '"""Topics by years"""'}), "(filename=path_topics_by_years, title='Topics by years')\n", (13637, 13693), False, 'from bokeh.plotting import figure, output_file, save\n'), ((13821, 13835), 'bokeh.io.reset_output', 'reset_output', ([], {}), '()\n', (13833, 13835), False, 'from bokeh.io import reset_output\n'), ((14912, 14957), 'os.path.join', 'os.path.join', (['self.query_folder', '"""papers.csv"""'], {}), "(self.query_folder, 'papers.csv')\n", (14924, 14957), False, 'import os\n'), ((14966, 15036), 'logging.info', 'logging.info', (['f"""Saving papers and components dataframes {path_papers}"""'], {}), "(f'Saving papers and components dataframes {path_papers}')\n", (14978, 15036), False, 'import logging\n'), ((15432, 15478), 'os.path.join', 'os.path.join', (['self.query_folder', '"""papers.html"""'], {}), "(self.query_folder, 'papers.html')\n", (15444, 15478), False, 'import os\n'), ((15487, 15553), 'logging.info', 'logging.info', (['f"""Saving papers graph for bokeh {path_papers_graph}"""'], {}), "(f'Saving papers graph for bokeh {path_papers_graph}')\n", (15499, 15553), False, 'import logging\n'), ((15562, 15623), 'bokeh.plotting.output_file', 'output_file', ([], {'filename': 'path_papers_graph', 'title': '"""Papers graph"""'}), "(filename=path_papers_graph, title='Papers graph')\n", (15573, 15623), False, 'from bokeh.plotting import figure, output_file, save\n'), ((15977, 15991), 'bokeh.io.reset_output', 'reset_output', ([], {}), '()\n', (15989, 15991), False, 'from bokeh.io import reset_output\n'), ((16033, 16091), 'os.path.join', 'os.path.join', (['self.query_folder', '"""papers_interactive.html"""'], {}), "(self.query_folder, 'papers_interactive.html')\n", (16045, 16091), False, 'import os\n'), ((16100, 16190), 'logging.info', 'logging.info', (['f"""Saving papers graph for cytoscape.js {path_papers_graph_interactive}"""'], {}), "(\n f'Saving papers graph for cytoscape.js {path_papers_graph_interactive}')\n", (16112, 16190), False, 'import logging\n'), ((16596, 16609), 'pysrc.papers.plot.plotter.Plotter', 'Plotter', (['self'], {}), '(self)\n', (16603, 16609), False, 'from pysrc.papers.plot.plotter import Plotter, PLOT_WIDTH, SHORT_PLOT_HEIGHT, TALL_PLOT_HEIGHT\n'), ((16634, 16682), 'os.path.join', 'os.path.join', (['self.query_folder', '"""timeline.html"""'], {}), "(self.query_folder, 'timeline.html')\n", (16646, 16682), False, 'import os\n'), ((16691, 16740), 'logging.info', 'logging.info', (['f"""Save timeline to {path_timeline}"""'], {}), "(f'Save timeline to {path_timeline}')\n", (16703, 16740), False, 'import logging\n'), ((16749, 16802), 'bokeh.plotting.output_file', 'output_file', ([], {'filename': 'path_timeline', 'title': '"""Timeline"""'}), "(filename=path_timeline, title='Timeline')\n", (16760, 16802), False, 'from bokeh.plotting import figure, output_file, save\n'), ((16855, 16869), 'bokeh.io.reset_output', 'reset_output', ([], {}), '()\n', (16867, 16869), False, 'from bokeh.io import reset_output\n'), ((16901, 16955), 'os.path.join', 'os.path.join', (['self.query_folder', '"""timeline_terms.html"""'], {}), "(self.query_folder, 'timeline_terms.html')\n", (16913, 16955), False, 'import os\n'), ((16964, 17031), 'logging.info', 'logging.info', (['f"""Save frequent tokens to file {path_terms_timeline}"""'], {}), "(f'Save frequent tokens to file {path_terms_timeline}')\n", (16976, 17031), False, 'import logging\n'), ((17109, 17174), 'bokeh.plotting.output_file', 'output_file', ([], {'filename': 'path_terms_timeline', 'title': '"""Terms timeline"""'}), "(filename=path_terms_timeline, title='Terms timeline')\n", (17120, 17174), False, 'from bokeh.plotting import figure, output_file, save\n'), ((17343, 17357), 'bokeh.io.reset_output', 'reset_output', ([], {}), '()\n', (17355, 17357), False, 'from bokeh.io import reset_output\n'), ((25347, 25369), 'networkx.cytoscape_data', 'nx.cytoscape_data', (['gss'], {}), '(gss)\n', (25364, 25369), True, 'import networkx as nx\n'), ((2439, 2482), 'pysrc.app.predefined.query_to_folder', 'query_to_folder', (['source', 'query', 'sort', 'limit'], {}), '(source, query, sort, limit)\n', (2454, 2482), False, 'from pysrc.app.predefined import query_to_folder\n'), ((2499, 2532), 'os.path.exists', 'os.path.exists', (['self.query_folder'], {}), '(self.query_folder)\n', (2513, 2532), False, 'import os\n'), ((2546, 2576), 'os.makedirs', 'os.makedirs', (['self.query_folder'], {}), '(self.query_folder)\n', (2557, 2576), False, 'import os\n'), ((3103, 3144), 'pysrc.papers.db.search_error.SearchError', 'SearchError', (['f"""Nothing found in database"""'], {}), "(f'Nothing found in database')\n", (3114, 3144), False, 'from pysrc.papers.db.search_error import SearchError\n'), ((6126, 6234), 'pysrc.papers.plot.plot_preprocessor.PlotPreprocessor.frequent_keywords_data', 'PlotPreprocessor.frequent_keywords_data', (['freq_meshs', 'self.df', 'mesh_corpus_tokens', 'mesh_corpus_counts', '(20)'], {}), '(freq_meshs, self.df,\n mesh_corpus_tokens, mesh_corpus_counts, 20)\n', (6165, 6234), False, 'from pysrc.papers.plot.plot_preprocessor import PlotPreprocessor\n'), ((6300, 6359), 'os.path.join', 'os.path.join', (['self.query_folder', '"""timeline_mesh_terms.html"""'], {}), "(self.query_folder, 'timeline_mesh_terms.html')\n", (6312, 6359), False, 'import os\n'), ((6372, 6448), 'logging.info', 'logging.info', (['f"""Save frequent mesh terms to file {path_mesh_terms_timeline}"""'], {}), "(f'Save frequent mesh terms to file {path_mesh_terms_timeline}')\n", (6384, 6448), False, 'import logging\n'), ((6461, 6536), 'bokeh.plotting.output_file', 'output_file', ([], {'filename': 'path_mesh_terms_timeline', 'title': '"""Mesh terms timeline"""'}), "(filename=path_mesh_terms_timeline, title='Mesh terms timeline')\n", (6472, 6536), False, 'from bokeh.plotting import figure, output_file, save\n'), ((6619, 6633), 'bokeh.io.reset_output', 'reset_output', ([], {}), '()\n', (6631, 6633), False, 'from bokeh.io import reset_output\n'), ((13707, 13815), 'pysrc.papers.plot.plotter.Plotter._plot_topics_years_distribution', 'Plotter._plot_topics_years_distribution', (['self.df', 'self.kwd_df', 'plot_components', 'data', 'min_year', 'max_year'], {}), '(self.df, self.kwd_df,\n plot_components, data, min_year, max_year)\n', (13746, 13815), False, 'from pysrc.papers.plot.plotter import Plotter, PLOT_WIDTH, SHORT_PLOT_HEIGHT, TALL_PLOT_HEIGHT\n'), ((13905, 13951), 'os.path.join', 'os.path.join', (['self.query_folder', '"""topics.html"""'], {}), "(self.query_folder, 'topics.html')\n", (13917, 13951), False, 'import os\n'), ((13964, 14038), 'logging.info', 'logging.info', (['f"""Save topics hierarchy with keywords to file {path_topics}"""'], {}), "(f'Save topics hierarchy with keywords to file {path_topics}')\n", (13976, 14038), False, 'import logging\n'), ((14051, 14111), 'bokeh.plotting.output_file', 'output_file', ([], {'filename': 'path_topics', 'title': '"""Topics dendrogram"""'}), "(filename=path_topics, title='Topics dendrogram')\n", (14062, 14111), False, 'from bokeh.plotting import figure, output_file, save\n'), ((14244, 14258), 'bokeh.io.reset_output', 'reset_output', ([], {}), '()\n', (14256, 14258), False, 'from bokeh.io import reset_output\n'), ((14291, 14342), 'os.path.join', 'os.path.join', (['self.query_folder', '"""topics_mesh.html"""'], {}), "(self.query_folder, 'topics_mesh.html')\n", (14303, 14342), False, 'import os\n'), ((14355, 14444), 'logging.info', 'logging.info', (['f"""Save topics hierarchy with mesh keywords to file {path_topics_mesh}"""'], {}), "(\n f'Save topics hierarchy with mesh keywords to file {path_topics_mesh}')\n", (14367, 14444), False, 'import logging\n'), ((14452, 14517), 'bokeh.plotting.output_file', 'output_file', ([], {'filename': 'path_topics_mesh', 'title': '"""Topics dendrogram"""'}), "(filename=path_topics_mesh, title='Topics dendrogram')\n", (14463, 14517), False, 'from bokeh.plotting import figure, output_file, save\n'), ((14646, 14660), 'bokeh.io.reset_output', 'reset_output', ([], {}), '()\n', (14658, 14660), False, 'from bokeh.io import reset_output\n'), ((15637, 15860), 'pysrc.papers.plot.plotter.Plotter._plot_papers_graph', 'Plotter._plot_papers_graph', (['source', 'self.sparse_papers_graph', 'self.df'], {'topics_tags': 'clusters_description', 'topics_meshs': 'mesh_clusters_description', 'add_callback': '(False)', 'plot_width': 'PLOT_WIDTH', 'plot_height': 'PLOT_WIDTH'}), '(source, self.sparse_papers_graph, self.df,\n topics_tags=clusters_description, topics_meshs=\n mesh_clusters_description, add_callback=False, plot_width=PLOT_WIDTH,\n plot_height=PLOT_WIDTH)\n', (15663, 15860), False, 'from pysrc.papers.plot.plotter import Plotter, PLOT_WIDTH, SHORT_PLOT_HEIGHT, TALL_PLOT_HEIGHT\n'), ((16227, 16293), 'os.path.join', 'os.path.join', (['__file__', '"""../../app/templates/papers_template.html"""'], {}), "(__file__, '../../app/templates/papers_template.html')\n", (16239, 16293), False, 'import os\n'), ((17308, 17334), 'bokeh.plotting.save', 'save', (['keywords_frequencies'], {}), '(keywords_frequencies)\n', (17312, 17334), False, 'from bokeh.plotting import figure, output_file, save\n'), ((17574, 17594), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (17588, 17594), False, 'import os\n'), ((23863, 23897), 'bokeh.models.PrintfTickFormatter', 'PrintfTickFormatter', ([], {'format': '"""%.2f"""'}), "(format='%.2f')\n", (23882, 23897), False, 'from bokeh.models import ColumnDataSource, ColorBar, PrintfTickFormatter, LinearColorMapper\n'), ((25966, 25989), 'json.dumps', 'json.dumps', (['topics_tags'], {}), '(topics_tags)\n', (25976, 25989), False, 'import json\n'), ((26017, 26041), 'json.dumps', 'json.dumps', (['topics_meshs'], {}), '(topics_meshs)\n', (26027, 26041), False, 'import json\n'), ((26072, 26092), 'json.dumps', 'json.dumps', (['graph_cs'], {}), '(graph_cs)\n', (26082, 26092), False, 'import json\n'), ((6554, 6605), 'pysrc.papers.plot.plotter.Plotter._plot_keywords_timeline', 'Plotter._plot_keywords_timeline', (['keywords_df', 'years'], {}), '(keywords_df, years)\n', (6585, 6605), False, 'from pysrc.papers.plot.plotter import Plotter, PLOT_WIDTH, SHORT_PLOT_HEIGHT, TALL_PLOT_HEIGHT\n'), ((9061, 9077), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9075, 9077), False, 'from sklearn.preprocessing import StandardScaler\n'), ((14129, 14234), 'pysrc.papers.plot.plotter.Plotter._plot_topics_hierarchy_with_keywords', 'Plotter._plot_topics_hierarchy_with_keywords', (['self.df', 'self.kwd_df', 'clusters', 'dendrogram'], {'max_words': '(3)'}), '(self.df, self.kwd_df, clusters,\n dendrogram, max_words=3)\n', (14173, 14234), False, 'from pysrc.papers.plot.plotter import Plotter, PLOT_WIDTH, SHORT_PLOT_HEIGHT, TALL_PLOT_HEIGHT\n'), ((14535, 14636), 'pysrc.papers.plot.plotter.Plotter._plot_topics_hierarchy_with_keywords', 'Plotter._plot_topics_hierarchy_with_keywords', (['self.df', 'mesh_df', 'clusters', 'dendrogram'], {'max_words': '(3)'}), '(self.df, mesh_df, clusters,\n dendrogram, max_words=3)\n', (14579, 14636), False, 'from pysrc.papers.plot.plotter import Plotter, PLOT_WIDTH, SHORT_PLOT_HEIGHT, TALL_PLOT_HEIGHT\n'), ((24886, 24928), 'pysrc.papers.utils.cut_authors_list', 'cut_authors_list', (["sel['authors'].values[0]"], {}), "(sel['authors'].values[0])\n", (24902, 24928), False, 'from pysrc.papers.utils import cut_authors_list, factors_colormap, color_to_rgb, topics_palette\n'), ((25920, 25938), 'pysrc.papers.utils.topics_palette', 'topics_palette', (['df'], {}), '(df)\n', (25934, 25938), False, 'from pysrc.papers.utils import cut_authors_list, factors_colormap, color_to_rgb, topics_palette\n'), ((2369, 2394), 'pysrc.version.VERSION.replace', 'VERSION.replace', (['""" """', '"""_"""'], {}), "(' ', '_')\n", (2384, 2394), False, 'from pysrc.version import VERSION\n'), ((9450, 9466), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9464, 9466), False, 'from sklearn.preprocessing import StandardScaler\n'), ((9760, 9797), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': '(42)'}), '(n_components=2, random_state=42)\n', (9764, 9797), False, 'from sklearn.manifold import TSNE\n'), ((17079, 17098), 'itertools.chain', 'chain', (['*self.corpus'], {}), '(*self.corpus)\n', (17084, 17098), False, 'from itertools import product, chain\n'), ((19844, 19866), 'numpy.sum', 'np.sum', (['counts'], {'axis': '(0)'}), '(counts, axis=0)\n', (19850, 19866), True, 'import numpy as np\n'), ((9206, 9243), 'numpy.sum', 'np.sum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (9212, 9243), True, 'import numpy as np\n'), ((25834, 25853), 'jinja2.BaseLoader', 'jinja2.BaseLoader', ([], {}), '()\n', (25851, 25853), False, 'import jinja2\n'), ((9603, 9640), 'numpy.sum', 'np.sum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (9609, 9640), True, 'import numpy as np\n'), ((12429, 12467), 'numpy.asarray', 'np.asarray', (['mesh_corpus_counts[pid, :]'], {}), '(mesh_corpus_counts[pid, :])\n', (12439, 12467), True, 'import numpy as np\n')] |
"""
Python 3.6
append_coord_bounds.py
Calculate & append lat, lon, & time bounds variables to CESM timeseries output files.
Usage
------
python append_coord_bounds.py file.nc
<NAME>
3 April 2020
"""
from __future__ import print_function
import sys
import numpy as np
import logging
from netCDF4 import Dataset
from os.path import isfile, join
from os import listdir, remove
def calc_lat_lon_bnds(coord_vals, coord_name):
"""
Calculate a lat or lon bounds array.
Parameters
-----------
coord_vals : NumPy ndarray
NetCDF coordinate values.
coord_name : str
Name of the coordinate being processed, i.e., "lat" or "lon".
Return
-------
NumPy n x 2 array
Coordinate bound values.
"""
name_long = {'lat' : 'latitude', 'lon' : 'longitude'}
logger.info('Creating {} bounds...'.format(name_long[coord_name]))
num_coords = coord_vals.size
logger.debug('{} size: {}'.format(coord_name, num_coords))
coord_bnds = np.zeros((num_coords, 2)) # n x 2 matrix of lat_bnds
logger.debug('Created zeroed {}_bnds array of size {}'.format(coord_name, coord_bnds.shape))
d_val = abs(coord_vals[1] - coord_vals[0])
logger.debug('d_{}: {}'.format(coord_name, d_val))
curr_bnd = coord_vals[0] - (d_val / 2)
for idx in range(num_coords):
coord_bnds[idx][0] = curr_bnd
curr_bnd += d_val
coord_bnds[idx][1] = curr_bnd
return coord_bnds
def append_lat_lon_bnd(filename):
"""
Read, create, and write lat_bnds & lon_bnds variables to a netcdf file.
Parameters
-----------
filename : str
Path of the CESM netCDF file to process.
Returns
-------
filename : str
Path of the processed CESM output file.
"""
logger.info('Reading {}'.format(filename))
nc = Dataset(filename, 'r+')
# --- Create lat_bnds
lat_bnds = calc_lat_lon_bnds(nc.variables['lat'][:], 'lat')
logger.info('Finished creating latitude bounds, adding them to netcdf file...')
try:
nc.createVariable('lat_bnds', 'float64', ('lat', 'nbnd'))
except RuntimeError as err_rt:
logger.error('The following error was caught while trying to create variable: lat_bnds')
logger.error(err_rt)
logger.info('Overwriting existing lat_bnds values')
except Exception as exp:
logger.error(exp)
raise
nc.variables['lat_bnds'][:] = lat_bnds[:, :]
# --- Create lon_bnds
lon_bnds = calc_lat_lon_bnds(nc.variables['lon'][:], 'lon')
logger.info('Finished creating longitude bounds, adding them to netcdf file...')
try:
nc.createVariable('lon_bnds', 'float64', ('lon', 'nbnd'))
except RuntimeError as err_rt:
logger.error('The following error was caught while trying to create variable: lon_bnds')
logger.error(err_rt)
logger.info('Overwriting existing lon_bnds values')
except Exception as exp:
logger.exception(exp)
raise
nc.variables['lon_bnds'][:, :] = lon_bnds[:, :]
nc.close()
logger.info('Finshed! Closing {}\n'.format(filename))
return filename
def calc_time_bnds(start_year, end_year):
"""
Calculate the time bound array. Designed specifically for CESM output, where
the time variable values fall on the last day of the month.
Parameters
----------
start_year : int
First year of variable data.
end_year : int
Final year of variable data.
Returns
-------
numpy ndarray
2-D array of time_bnd values.
"""
days_per_month = { 1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30,
7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
logger.info('Creating time bounds...')
cum_days = 0
time_bnd = []
for year in range(end_year - start_year + 1):
for curr_month in list(days_per_month.keys()):
bnd_0 = (days_per_month[curr_month] / 2) + cum_days
cum_days += days_per_month[curr_month]
try:
bnd_1 = (days_per_month[curr_month + 1] / 2) + cum_days
except:
# December case. Jan & Dec have 31 days so this should work.
bnd_1 = (days_per_month[curr_month] / 2) + cum_days
time_bnd.append([bnd_0, bnd_1])
time_bnd = np.asarray(time_bnd)
logger.info('Finished creating time bounds!')
return time_bnd
def append_time_bnd(filename):
"""
Calculate the time_bnd variable and append to a CESM netcdf file.
Parameters
----------
filename : str
Path of a CESM output file to append a time bnd to.
Returns
-------
filename : str
Path of the processed CESM output file.
"""
# ADJUST THESE AS NEEDED
start_year = 1999
end_year = 2014
logger.info('Adding time_bnds to {}'.format(filename))
time_bnds = calc_time_bnds(start_year, end_year)
nc = Dataset(filename, 'r+')
try:
nc.createVariable('time_bnds', 'float64', ('time', 'nbnd'))
except RuntimeError as err_rt:
logger.error('The following error was caught while attempting to create time_bnds variable:')
logger.error(err_rt)
logger.info('Overwriting existing time_bnds values')
except Exception as exp:
logger.exception(exp)
raise
nc.variables['time_bnds'][:] = time_bnds[:, :]
nc.close()
logger.info('Finished! Closing {}\n'.format(filename))
return filename
if __name__ == '__main__':
# --- Initialize logger
LOG_LEVEL = 'debug'
log_levels = {'debug': logging.DEBUG,
'info' : logging.INFO,
'warn' : logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
# Remove previous log if it exists as the logging module prefers appending to
# previous log files instead of overwriting. Leave commented out to append to log
if isfile('append_coord_bounds.log'):
remove('append_coord_bounds.log')
log_format = logging.Formatter("%(asctime)s %(levelname)6s: %(message)s", "%Y-%m-%d %H:%M:%S")
# File handler
file_handler = logging.FileHandler('append_coord_bounds.log')
file_handler.setFormatter(log_format)
# Console handler
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
# Configure logger
logger = logging.getLogger('main')
logger.setLevel(log_levels[LOG_LEVEL])
logger.addHandler(file_handler)
logger.addHandler(console_handler)
logger.info("Log created!\n")
# --- Find netcdf files
root_dir = sys.argv[1]
logger.info('Looking for NetCDF files in {}'.format(root_dir))
nc_files = [join(root_dir, f) for f in listdir(root_dir) if isfile(join(root_dir, f))
and f.endswith('.nc')]
logger.info('NetCDF files found: {}'.format(len(nc_files)))
# --- Calculate and append lat & lon bounds
processed = [append_lat_lon_bnd(nc_file) for nc_file in nc_files]
# --- Calculate and append time bounds
processed = [append_time_bnd(nc_file) for nc_file in nc_files]
logger.info('Finished! {} files processed'.format(len(processed)))
| [
"netCDF4.Dataset",
"os.remove",
"logging.FileHandler",
"numpy.asarray",
"logging.StreamHandler",
"numpy.zeros",
"logging.Formatter",
"os.path.isfile",
"os.path.join",
"os.listdir",
"logging.getLogger"
] | [((1018, 1043), 'numpy.zeros', 'np.zeros', (['(num_coords, 2)'], {}), '((num_coords, 2))\n', (1026, 1043), True, 'import numpy as np\n'), ((1875, 1898), 'netCDF4.Dataset', 'Dataset', (['filename', '"""r+"""'], {}), "(filename, 'r+')\n", (1882, 1898), False, 'from netCDF4 import Dataset\n'), ((4394, 4414), 'numpy.asarray', 'np.asarray', (['time_bnd'], {}), '(time_bnd)\n', (4404, 4414), True, 'import numpy as np\n'), ((5025, 5048), 'netCDF4.Dataset', 'Dataset', (['filename', '"""r+"""'], {}), "(filename, 'r+')\n", (5032, 5048), False, 'from netCDF4 import Dataset\n'), ((6088, 6121), 'os.path.isfile', 'isfile', (['"""append_coord_bounds.log"""'], {}), "('append_coord_bounds.log')\n", (6094, 6121), False, 'from os.path import isfile, join\n'), ((6191, 6276), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)6s: %(message)s"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('%(asctime)s %(levelname)6s: %(message)s',\n '%Y-%m-%d %H:%M:%S')\n", (6208, 6276), False, 'import logging\n'), ((6311, 6357), 'logging.FileHandler', 'logging.FileHandler', (['"""append_coord_bounds.log"""'], {}), "('append_coord_bounds.log')\n", (6330, 6357), False, 'import logging\n'), ((6444, 6467), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (6465, 6467), False, 'import logging\n'), ((6549, 6574), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (6566, 6574), False, 'import logging\n'), ((6131, 6164), 'os.remove', 'remove', (['"""append_coord_bounds.log"""'], {}), "('append_coord_bounds.log')\n", (6137, 6164), False, 'from os import listdir, remove\n'), ((6875, 6892), 'os.path.join', 'join', (['root_dir', 'f'], {}), '(root_dir, f)\n', (6879, 6892), False, 'from os.path import isfile, join\n'), ((6902, 6919), 'os.listdir', 'listdir', (['root_dir'], {}), '(root_dir)\n', (6909, 6919), False, 'from os import listdir, remove\n'), ((6930, 6947), 'os.path.join', 'join', (['root_dir', 'f'], {}), '(root_dir, f)\n', (6934, 6947), False, 'from os.path import isfile, join\n')] |
import ga
import numpy
"""
The y=target is to maximize this equation ASAP:
y = w1x1+w2x2+w3x3*w4x4+w5x5
where (x1,x2,x3,x4,x5,x6)=(-4,-12,-3,2,8)
What are the best values for the 6 weights w1 to w6?
We are going to use the genetic algorithm for the
best possible values after a number of generations.
"""
equation_inputs = [-4, -12, -3, 2, 8]
num_weights = len(equation_inputs)
sol_per_pop = 8
num_parents_mating = 4
# Defining the population size.
pop_size = (
sol_per_pop,
num_weights,
)
new_population = numpy.random.uniform(low=-4.0, high=4.0, size=pop_size)
num_generations = 30
for generation in range(num_generations):
print("Generation : ", generation)
# the fitness of each chromosome in the population.
fitness = ga.cal_pop_fitness(equation_inputs, new_population)
# Selecting the best parents in the population for mating.
parents = ga.select_mating_pool(new_population, fitness, num_parents_mating)
# Generating next generation using crossover.
offspring_size=(pop_size[0] - parents.shape[0], num_weights)
offspring_crossover = ga.crossover(parents, offspring_size)
# Adding some variations to the offsrping using mutation.
offspring_mutation = ga.mutation(offspring_crossover)
# Creating the new population based on the parents and offspring.
new_population[0 : parents.shape[0], :] = parents
new_population[parents.shape[0] :, :] = offspring_mutation
# The best result in the current iteration.
print(
"Best result : ", numpy.max(numpy.sum(new_population * equation_inputs, axis=1))
)
# Getting the best solution after iterating finishing all generations.
fitness = ga.cal_pop_fitness(equation_inputs, new_population)
# Then return the index of that solution corresponding to the best fitness.
best_match_idx = numpy.where(fitness == numpy.max(fitness))
best_match_idx = best_match_idx[0][0]
print("Best solution : ", new_population[best_match_idx, :])
print("Best solution fitness : ", fitness[best_match_idx]) | [
"numpy.random.uniform",
"ga.crossover",
"numpy.sum",
"ga.cal_pop_fitness",
"ga.mutation",
"numpy.max",
"ga.select_mating_pool"
] | [((563, 618), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-4.0)', 'high': '(4.0)', 'size': 'pop_size'}), '(low=-4.0, high=4.0, size=pop_size)\n', (583, 618), False, 'import numpy\n'), ((1746, 1797), 'ga.cal_pop_fitness', 'ga.cal_pop_fitness', (['equation_inputs', 'new_population'], {}), '(equation_inputs, new_population)\n', (1764, 1797), False, 'import ga\n'), ((801, 852), 'ga.cal_pop_fitness', 'ga.cal_pop_fitness', (['equation_inputs', 'new_population'], {}), '(equation_inputs, new_population)\n', (819, 852), False, 'import ga\n'), ((934, 1000), 'ga.select_mating_pool', 'ga.select_mating_pool', (['new_population', 'fitness', 'num_parents_mating'], {}), '(new_population, fitness, num_parents_mating)\n', (955, 1000), False, 'import ga\n'), ((1147, 1184), 'ga.crossover', 'ga.crossover', (['parents', 'offspring_size'], {}), '(parents, offspring_size)\n', (1159, 1184), False, 'import ga\n'), ((1276, 1308), 'ga.mutation', 'ga.mutation', (['offspring_crossover'], {}), '(offspring_crossover)\n', (1287, 1308), False, 'import ga\n'), ((1916, 1934), 'numpy.max', 'numpy.max', (['fitness'], {}), '(fitness)\n', (1925, 1934), False, 'import numpy\n'), ((1601, 1652), 'numpy.sum', 'numpy.sum', (['(new_population * equation_inputs)'], {'axis': '(1)'}), '(new_population * equation_inputs, axis=1)\n', (1610, 1652), False, 'import numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: <NAME>
Utility functions for generating plots for use within templates
"""
import datetime
import plotly
import plotly.graph_objs as go
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
COLORS = ['rgba(93, 164, 214, 0.65)',
'rgba(255, 65, 54, 0.65)',
'rgba(207, 114, 255, 0.65)',
'rgba(127, 96, 0, 0.65)']
def to_unix_time(dt):
"""
Convenience function for conversion to timestamp
"""
epoch = datetime.datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds() * 1000
def plotly_distplot(df, column):
"""Creates a Histogram
The function will produce a distplot for displaying
on reports. The distplot
Args:
labels - Data labels
data - A Numpy Series to be plotted
"""
df = df[['STRAIN', column]].dropna(how='any').sort_values([column])
labels = df.STRAIN
data = df[column]
# Plotly does not do a very good job calculating bin-size,
# so we will have to help it out here.
x, y = np.histogram(data, bins='auto')
bin_size = y[1] - y[0]
fig = ff.create_distplot([data],
[data.name],
bin_size=bin_size,
histnorm='count',
show_curve=False)
# Update axis labels
fig['layout']['yaxis1']['title'] = "Count"
fig['layout']['xaxis1']['title'] = data.name
fig['layout']['showlegend'] = False
fig['layout']['margin'] = {'t': 20, 'r': 0, 'l': 80, 'b': 80}
fig.data[0]['hoverinfo'] = 'x+y'
fig.data[1]['text'] = labels
fig.data[1]['hoverinfo'] = 'x+text'
plot = plotly.offline.plot(fig,
output_type='div',
include_plotlyjs=False,
show_link=False,
config={"displayModeBar": False})
return plot
def time_series_plot(df, x_title=None, y_title=None, range=None, colors=COLORS):
"""
Pass in a dataframe (df) with:
First column - dates (x-axis)
2nd, 3rd, 4th, etc. columns - values
Args:
df - the strain dataset
"""
trace_set = []
for n, column in enumerate(df.columns[1:][::-1]):
trace_set.append(go.Scatter(x=df[df.columns[0]],
y=df[column],
name=column,
opacity=0.8,
line=dict(color=(COLORS[n]))
)
)
layout = go.Layout(margin={'t': 0, 'r': 0, 'l': 80, 'b': 60},
xaxis={})
if range:
layout['xaxis']['range'] = range
if x_title:
layout['xaxis']['title'] = x_title
if y_title:
layout['yaxis']['title'] = y_title
fig = go.Figure(data=trace_set, layout=layout)
return plotly.offline.plot(fig,
output_type='div',
include_plotlyjs=False,
show_link=False,
config={"displayModeBar": False})
def pxg_plot(df, trait_name):
"""
Generates a phenotype x genotype plot
Must be feed a dataframe with the markers and genotypes.
Args:
trait_name - The name of the trait (Y-axis)
"""
peak_markers = set(df.MARKER)
trace_set = []
ticktext = []
tickvals = []
offset = 0.0
df.GT = pd.Categorical(df.GT)
for marker_n, marker in enumerate(peak_markers):
mset = df[df.MARKER == marker]
for gt_n, gt in enumerate(set(mset.GT)):
x_coord = marker_n + gt_n + offset
tickvals.append(x_coord)
ticktext.append(gt)
gset = mset[mset.GT == gt]
gset = gset.assign(x=(marker_n + gt_n + offset)*1.0)
gset = gset.assign(x_distr=gset.x + (np.random.standard_normal(len(gset.GT))/15)-0.75)
trace = go.Box(
name=marker+str(marker_n) + str(gt_n),
y=gset.TRAIT,
x=gset.x,
xaxis='x1',
yaxis='y1',
hoverinfo="all",
boxpoints='outlier',
fillcolor=COLORS[gt_n],
whiskerwidth=0.2,
marker=dict(
color=COLORS[gt_n],
opacity=0.5
),
line=dict(width=2)
)
trace_jitter = go.Scatter(
y=gset.TRAIT,
x=gset.x_distr,
xaxis='x1',
yaxis='y1',
text=gset.STRAIN,
hoverinfo="text+y",
mode='markers',
marker=dict(
color=COLORS[gt_n],
size=5,
opacity=0.8,
line=dict(width=1)
),
)
trace_set.append(trace)
trace_set.append(trace_jitter)
offset += 1
# Add marker labels
trace_marker_label = go.Scatter(name=marker,
y=[1],
x=[marker_n + offset-1],
yaxis='y2',
text=[marker],
hoverinfo='none',
mode='text',
textposition='bottom',
textfont=dict(
family='courier',
size=25
)
)
trace_set.append(trace_marker_label)
offset += 2.5
layout = go.Layout(
hovermode='closest',
xaxis=dict(
title="Genotype",
tickmode='array',
tickvals=tickvals,
ticktext=ticktext,
),
yaxis1=dict(
domain=[0, 0.7],
title=trait_name
),
yaxis2=dict(
domain=[0.7, 1],
visible=False
),
margin=dict(
l=80,
r=80,
b=80,
t=0,
),
showlegend=False
)
fig = go.Figure(data=trace_set, layout=layout)
return plotly.offline.plot(fig,
output_type='div',
include_plotlyjs=False,
show_link=False,
config={"displayModeBar": False})
def fine_mapping_plot(df):
pass
| [
"plotly.graph_objs.Layout",
"plotly.offline.plot",
"datetime.datetime.utcfromtimestamp",
"numpy.histogram",
"pandas.Categorical",
"plotly.figure_factory.create_distplot",
"plotly.graph_objs.Figure"
] | [((531, 568), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(0)'], {}), '(0)\n', (565, 568), False, 'import datetime\n'), ((1111, 1142), 'numpy.histogram', 'np.histogram', (['data'], {'bins': '"""auto"""'}), "(data, bins='auto')\n", (1123, 1142), True, 'import numpy as np\n'), ((1180, 1278), 'plotly.figure_factory.create_distplot', 'ff.create_distplot', (['[data]', '[data.name]'], {'bin_size': 'bin_size', 'histnorm': '"""count"""', 'show_curve': '(False)'}), "([data], [data.name], bin_size=bin_size, histnorm='count',\n show_curve=False)\n", (1198, 1278), True, 'import plotly.figure_factory as ff\n'), ((1741, 1863), 'plotly.offline.plot', 'plotly.offline.plot', (['fig'], {'output_type': '"""div"""', 'include_plotlyjs': '(False)', 'show_link': '(False)', 'config': "{'displayModeBar': False}"}), "(fig, output_type='div', include_plotlyjs=False,\n show_link=False, config={'displayModeBar': False})\n", (1760, 1863), False, 'import plotly\n'), ((2702, 2764), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'margin': "{'t': 0, 'r': 0, 'l': 80, 'b': 60}", 'xaxis': '{}'}), "(margin={'t': 0, 'r': 0, 'l': 80, 'b': 60}, xaxis={})\n", (2711, 2764), True, 'import plotly.graph_objs as go\n'), ((2972, 3012), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'trace_set', 'layout': 'layout'}), '(data=trace_set, layout=layout)\n', (2981, 3012), True, 'import plotly.graph_objs as go\n'), ((3024, 3146), 'plotly.offline.plot', 'plotly.offline.plot', (['fig'], {'output_type': '"""div"""', 'include_plotlyjs': '(False)', 'show_link': '(False)', 'config': "{'displayModeBar': False}"}), "(fig, output_type='div', include_plotlyjs=False,\n show_link=False, config={'displayModeBar': False})\n", (3043, 3146), False, 'import plotly\n'), ((3616, 3637), 'pandas.Categorical', 'pd.Categorical', (['df.GT'], {}), '(df.GT)\n', (3630, 3637), True, 'import pandas as pd\n'), ((6494, 6534), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'trace_set', 'layout': 'layout'}), '(data=trace_set, layout=layout)\n', (6503, 6534), True, 'import plotly.graph_objs as go\n'), ((6546, 6668), 'plotly.offline.plot', 'plotly.offline.plot', (['fig'], {'output_type': '"""div"""', 'include_plotlyjs': '(False)', 'show_link': '(False)', 'config': "{'displayModeBar': False}"}), "(fig, output_type='div', include_plotlyjs=False,\n show_link=False, config={'displayModeBar': False})\n", (6565, 6668), False, 'import plotly\n')] |
from contextlib import contextmanager
from dataclasses import dataclass, astuple
from typing import Any, Dict, Generator, List, Optional, Tuple
import numpy as np
import torch
from numpy import int32, float32
from numpy.random import RandomState
from .fnv1a import fnv1a
from .network import Network
from .typing import SearchableEnv, GameState
DEBUG = False
# 300 moves/game x 1000 evaluations/move x 30 next moves/eval
MAX_NODES = 10000000
class SearchTreeFull(Exception):
pass
class SearchTree:
"""
SearchTree stores the explored game tree and its MCTS statistics.
Tree nodes are board positions and edges are moves of the game.
Terminal leaf nodes have
`num_children[node_id] = 0`.
Unevaluated leaf nodes have
`num_children[node_id] = -1`.
* total_value is stored from current node's active player's perspective
(opponent of parent node's player)
* prior_prob is stored from parent node's player's perspective
(opponent of current node's player)
"""
def __init__(self):
self.num_nodes = 0
self.root_id: int32 = 0
# tree edges
self.parent = -np.ones(MAX_NODES, dtype=int32)
self.first_child = -np.ones(MAX_NODES, dtype=int32)
self.num_children = -np.ones(MAX_NODES, dtype=int32)
# MCTS statistics
self.num_visits = -np.ones(MAX_NODES, dtype=float32)
self.total_value = -np.ones(MAX_NODES, dtype=float32)
self.prior_prob = -np.ones(MAX_NODES, dtype=float32)
self.reset()
def reset(self):
"""Clear tree.
"""
self.num_nodes = 1
self.root_id = 0
# create unevaluated root node
self.parent[0] = -1
self.first_child[0] = -1
self.num_children[0] = -1
self.num_visits[0] = 0
self.total_value[0] = 0
self.prior_prob[0] = 1.0
def search(self, game: SearchableEnv, network: Network, *,
num_simulations: int = 100,
temperature: float = 1.0,
exploration_coef: float = 1.0,
exploration_noise_scale: float = 1.0,
exploration_noise_alpha: float = 1.0,
batch_size: int = 10,
rng: Optional[RandomState] = None) \
-> Tuple[np.ndarray, float, Dict[str, Any]]:
"""Plan for next moves with Monte Carlo tree search (AZ flavor).
The search is deterministic by default, but you can provide seeded
random number generators for stochastic results.
:param temperature: Exploration temperature
:param exporation_noise_scale: Exploration noise scale
:param rng: Random number generator (by default seeded with 0)
:return: Next move probabilities, game value, and debug metrics
"""
# avoid circular dependency
from . import mcts
if rng is None:
rng = RandomState(0)
with torch.no_grad():
network.eval()
metrics = mcts.sample_paths(
self, game, network,
num_simulations, batch_size,
exploration_coef,
exploration_noise_scale,
exploration_noise_alpha,
rng)
# play
stats = self.root.move_stats
move_probs = as_distribution(stats.num_visits, temperature)
value = self.root.total_value / self.root.num_visits
metrics['search_root_width'] = np.sum(stats.num_visits > 0)
metrics['search_root_visits'] = np.mean(stats.num_visits)
metrics['search_root_children'] = len(stats.num_visits)
metrics['search_tree_nodes'] = self.num_nodes
return move_probs, value, metrics
def move(self, move_id: int) -> None:
"""Commit move and pick new root node.
Set new tree root to one of current root's child nodes.
Forget about ancestor and sibling nodes.
:param move_id: Action id of move that was made
"""
if not self.root.is_evaluated():
# step to the unknown, nothing to remember
self.reset()
return
assert self.root_id < self.num_nodes, 'root node is unevaluated'
node = self.root.child(move_id)
if not node.is_evaluated():
# step to the unknown, nothing to remember
self.reset()
else:
self.root_id = node.id
@property
def root(self) -> 'SearchNode':
"""Access root node.
"""
return SearchNode(self, self.root_id)
@contextmanager
def search_forward(self, game: SearchableEnv) \
-> Generator['ForwardSearchIterator', None, None]:
"""Snapshot game state and start search.
:returns: Iterator for root node
"""
assert self.root_id >= 0 and self.root_id < self.num_nodes, \
'invalid root node'
assert self.root.is_evaluated(), 'unevaluated root'
assert not self.root.is_terminal(), 'terminal root'
game.snapshot()
try:
yield ForwardSearchIterator(self.root, game)
finally:
game.restore()
class SearchNode:
"""Search tree node, incl. MCTS statistics."""
def __init__(self, tree: SearchTree, node_id: int) -> None:
assert node_id >= 0, 'illegal node'
assert node_id < tree.num_nodes, 'illegal node'
self.tree = tree
self._id = node_id
def __eq__(self, other: 'SearchNode') -> bool:
return self.id == other.id
@property
def id(self) -> int32:
"""Node id (read only).
"""
return self._id
@property
def parent(self) -> 'SearchNode':
"""Get parent node in search tree.
"""
assert not self.is_root(), 'root has no parent'
return SearchNode(self.tree, self.tree.parent[self.id])
def child(self, move_id: int32) -> 'SearchNode':
"""Get N'th child node.
:param move_id: Index of child node
"""
assert self.is_evaluated(), 'unevaluated node'
assert move_id >= 0, 'illegal child'
assert move_id < self.num_children, 'illegal child'
node_id = self.tree.first_child[self.id] + move_id
return SearchNode(self.tree, node_id)
@property
def move_stats(self) -> 'SearchStats':
"""Get vectorized MCTS statistics for all moves from this node.
:returns: MCTS stats for moves from current player's perspective
"""
assert self.is_evaluated(), 'unevaluated nodes have no stats'
first_child = self.tree.first_child[self.id]
node_ids = slice(first_child, first_child + self.num_children)
# priors are already stored from parent's perspective, but
# values are from child nodes' own perspective, so need to be negated
return SearchStats(self.tree.num_visits[node_ids],
-self.tree.total_value[node_ids],
self.tree.prior_prob[node_ids])
@property
def num_children(self) -> int32:
return self.tree.num_children[self.id]
@property
def num_visits(self) -> float32:
return self.tree.num_visits[self.id]
@num_visits.setter
def num_visits(self, visits: float32) -> None:
self.tree.num_visits[self.id] = visits
@property
def total_value(self) -> float32:
return self.tree.total_value[self.id]
@total_value.setter
def total_value(self, value: float32) -> None:
self.tree.total_value[self.id] = value
@property
def prior_prob(self):
return self.tree.prior_prob[self.id]
def is_evaluated(self) -> bool:
"""Test for evaluated nodes.
Unevaluated nodes are leaves, whose children and MCTS stats
are undefined.
"""
return self.num_children >= 0
def is_terminal(self) -> bool:
"""Test for terminal nodes.
Terminal nodes are leaves which have been evaluated and
have no children.
"""
return self.num_children == 0
def is_root(self) -> bool:
"""Test for root node.
"""
return self == self.tree.root
def is_leaf(self) -> bool:
"""Test for leaf nodes.
"""
return (not self.is_evaluated()) or self.is_terminal()
def create_child_nodes(self, num_children: int,
prior_prob: np.ndarray) -> None:
"""Create new child nodes and initialize MCTS statistics.
"""
if self.tree.num_nodes + num_children > MAX_NODES:
raise SearchTreeFull('too many nodes')
first_node_id = self.tree.num_nodes
self.tree.num_nodes += num_children
assert self.tree.num_nodes <= MAX_NODES, 'tree nodes buffer overflow'
self.tree.first_child[self.id] = first_node_id
self.tree.num_children[self.id] = num_children
node_ids = slice(first_node_id, first_node_id + num_children)
self.tree.parent[node_ids] = self.id
self.tree.first_child[node_ids] = -1
self.tree.num_children[node_ids] = -1 # mark as unevaluated
self.tree.num_visits[node_ids] = 0
self.tree.total_value[node_ids] = 0
self.tree.prior_prob[node_ids] = prior_prob
@dataclass
class SearchStats:
num_visits: np.ndarray # dim=1, dtype=float32
total_value: np.ndarray # dim=1, dtype=float32
prior_prob: np.ndarray # dim=1, dtype=float32
class ForwardSearchIterator:
"""Combined game state and tree node when searching forward in game.
Search forward in the game and track search tree node while searching.
"""
def __init__(self, node: SearchNode, game: SearchableEnv) -> None:
self._game = game
self._node = node
if DEBUG:
print(f'search: node {self._node.id} '
f'nch {self._node.num_children} '
f'game {hash(str(astuple(self._game.state)))} '
f'nmv {len(self._game.state.legal_moves)}')
def step(self, move_id: int) -> None:
"""Move one step forward in game and tree.
:param move_id: Ordinal move id among legal moves in current node
"""
state = self._game.state
assert move_id < len(state.legal_moves), \
f"move id {move_id} out of range"
assert not state.result, 'game ended in select'
move = state.legal_moves[move_id]
self._game.step(move)
self._node = self._node.child(move_id)
if DEBUG:
print(f'step {move}: node {self._node.id} '
f'nch {self._node.num_children} '
f'game {hash(str(astuple(self._game.state)))} '
f'nmv {len(self._game.state.legal_moves)}')
#self._game.io.print_state(self._game.state)
@property
def state(self) -> GameState:
"""Retrieve game state for current search position."""
return self._game.state
@property
def node(self) -> SearchNode:
"""Retrieve tree node for current search position."""
return self._node
def as_distribution(counts: np.ndarray, temperature: float = 1.0) \
-> np.ndarray:
"""Normalize count vector as a multinomial distribution.
Apply temperature change if requested.
:returns: Multinomial probability distribution
"""
assert all(counts >= 0)
log_pi = np.log(counts.clip(min=1))
log_pi[counts == 0] = -np.inf
if temperature:
log_pi = log_pi / temperature
else:
log_pi[log_pi < log_pi.max()] = -np.inf
# avoid numerical issues
log_pi = log_pi.astype(np.float64)
log_z = np.logaddexp.reduce(log_pi)
pi = np.exp(log_pi - log_z)
return pi
def print_tree(tree: SearchTree) -> None:
"""Pretty print search tree.
0
├── 5 [0] move 393240 value -0.00/3 (0.43)
│ ├── [22] move 458757 value 0.00/0 (1.00)
│ └── [34] move 196636 value 0.00/0 (0.00)
├── 3 [1] move 393241 value 0.51/2 (0.00)
│ ├── 7 [10] move 262149 value 0.69/1 (0.61)
│ │ ├── [39] move 327682 value 0.00/0 (0.00)
"""
for line in format_tree_lines(tree):
print(line)
def format_tree_lines(tree: SearchTree) -> Generator[str, None, None]:
yield format_node(tree.root)
for line in format_subtree_lines(tree, tree.root, 1, []):
yield line
def format_subtree_lines(tree: SearchTree, node: SearchNode, indent: int,
last: List[bool]) -> Generator[str, None, None]:
indent += 1
for i in range(node.num_children):
child = node.child(i)
last.append(i == node.num_children - 1)
prefix = format_tree_prefix(last)
suffix = format_node(child, move_id=i)
yield prefix + suffix
if not child.is_leaf():
for line in format_subtree_lines(tree, child, indent, last):
yield line
last.pop()
def format_tree_prefix(last: List[bool]) -> str:
if not last:
return ''
prefix = ''.join(' ' if x else '│ ' for x in last[:-1])
prefix += '└── ' if last[-1] else '├── '
return prefix
def format_node(node: SearchNode, move_id: Optional[int] = None) -> str:
num_visits = node.num_visits
value = node.total_value / max(1, num_visits)
prior = node.prior_prob
txt = f'{node.id} '
if move_id is not None:
txt += f'[{move_id}] '
txt += f'value {value:.2f} vis {num_visits:.0f} (pri {prior:.2f})'
return txt
| [
"numpy.sum",
"numpy.ones",
"numpy.random.RandomState",
"numpy.mean",
"numpy.exp",
"numpy.logaddexp.reduce",
"torch.no_grad",
"dataclasses.astuple"
] | [((11638, 11665), 'numpy.logaddexp.reduce', 'np.logaddexp.reduce', (['log_pi'], {}), '(log_pi)\n', (11657, 11665), True, 'import numpy as np\n'), ((11675, 11697), 'numpy.exp', 'np.exp', (['(log_pi - log_z)'], {}), '(log_pi - log_z)\n', (11681, 11697), True, 'import numpy as np\n'), ((3466, 3494), 'numpy.sum', 'np.sum', (['(stats.num_visits > 0)'], {}), '(stats.num_visits > 0)\n', (3472, 3494), True, 'import numpy as np\n'), ((3535, 3560), 'numpy.mean', 'np.mean', (['stats.num_visits'], {}), '(stats.num_visits)\n', (3542, 3560), True, 'import numpy as np\n'), ((1160, 1191), 'numpy.ones', 'np.ones', (['MAX_NODES'], {'dtype': 'int32'}), '(MAX_NODES, dtype=int32)\n', (1167, 1191), True, 'import numpy as np\n'), ((1220, 1251), 'numpy.ones', 'np.ones', (['MAX_NODES'], {'dtype': 'int32'}), '(MAX_NODES, dtype=int32)\n', (1227, 1251), True, 'import numpy as np\n'), ((1281, 1312), 'numpy.ones', 'np.ones', (['MAX_NODES'], {'dtype': 'int32'}), '(MAX_NODES, dtype=int32)\n', (1288, 1312), True, 'import numpy as np\n'), ((1367, 1400), 'numpy.ones', 'np.ones', (['MAX_NODES'], {'dtype': 'float32'}), '(MAX_NODES, dtype=float32)\n', (1374, 1400), True, 'import numpy as np\n'), ((1429, 1462), 'numpy.ones', 'np.ones', (['MAX_NODES'], {'dtype': 'float32'}), '(MAX_NODES, dtype=float32)\n', (1436, 1462), True, 'import numpy as np\n'), ((1490, 1523), 'numpy.ones', 'np.ones', (['MAX_NODES'], {'dtype': 'float32'}), '(MAX_NODES, dtype=float32)\n', (1497, 1523), True, 'import numpy as np\n'), ((2913, 2927), 'numpy.random.RandomState', 'RandomState', (['(0)'], {}), '(0)\n', (2924, 2927), False, 'from numpy.random import RandomState\n'), ((2942, 2957), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2955, 2957), False, 'import torch\n'), ((9914, 9939), 'dataclasses.astuple', 'astuple', (['self._game.state'], {}), '(self._game.state)\n', (9921, 9939), False, 'from dataclasses import dataclass, astuple\n'), ((10653, 10678), 'dataclasses.astuple', 'astuple', (['self._game.state'], {}), '(self._game.state)\n', (10660, 10678), False, 'from dataclasses import dataclass, astuple\n')] |
import os
import sys
import glob
import logging
import datetime
import parse
import shutil
import copy
import numpy as np
import pandas as pd
import warnings
import pickle
from scipy.interpolate import interp1d
logger = logging.getLogger('ceciestunepipe.util.syncutil')
def square_to_edges(x: np.array) -> np.array:
dig = np.squeeze(x)
diff_dig = np.diff(x)
rising = np.where(diff_dig > 0)[0]
falling = np.where(diff_dig < 0)[0]
ttl_frames = np.concatenate((rising, falling))
ttl_states = np.array([1] * len(rising) + [-1] * len(falling))
sort_idxs = np.argsort(ttl_frames)
return ttl_frames[sort_idxs], ttl_states[sort_idxs]
def quick_ttl_threshold(x:np.array) -> np.float:
# assumes values oscilate roughly between two points (no outlier noise)
thresh = np.min(x) + np.ptp(x)/2
return thresh
def sync_to_pattern(x_ttl: np.array, t: np.array, x_0_ttl: np.array, t_0:np.array) -> np.array:
# x_ttl is the array of [2, n_transition] ([transition_sign, sample] (transition = +/-1))
# strategy is
# check:
# first edges are both same sign
# number of edges is the same
# get times to interpolate:
# get the 'actual' times at the edges, i.e the t_0 at the locationn of the edges in x_0, regardless of the sign (two values per cycle, means T/2 is the actual period)
# make an interpolation function with it
# fit the t to those values (we know that t=t_0 at those edges, the rest is the correciton of the interpolation)
n_edges = x_ttl.shape[1]
n_edges_0 = x_0_ttl.shape[1]
if x_ttl[1, 0] != x_0_ttl[1, 0]:
# If the signals don't have the same number of edges there may be an error, better stop and debug
raise ValueError(
'Sign of first edge transition of pattern and target dont match')
if n_edges != n_edges_0:
# If the signals don't have the same number of edges there may be an error, better stop and debug
raise ValueError(
'Number of edges in the syn ttl events of pattern and target dont match')
# if all checks out, do the deed
t_0_edge = t_0[x_0_ttl[0]]
sample_edge = x_ttl[0]
# the interpolation function. fill_value='extrapolate' allows extrapolation from zero and until the last time stamp
# careful, this could lead to negative time, but it is the correct way to do it.
# interpolation function interpolates time as a target, t_0=f(sample) with true values at the edges
t_interp_f = interp1d(sample_edge, t_0_edge,
assume_sorted=True,
fill_value='extrapolate')
n_samples = t.size
t_prime = t_interp_f(np.arange(n_samples))
return t_prime
def tuputamadre(x):
return 3
| [
"numpy.ptp",
"logging.getLogger",
"numpy.argsort",
"numpy.min",
"numpy.diff",
"numpy.where",
"numpy.arange",
"numpy.squeeze",
"scipy.interpolate.interp1d",
"numpy.concatenate"
] | [((222, 271), 'logging.getLogger', 'logging.getLogger', (['"""ceciestunepipe.util.syncutil"""'], {}), "('ceciestunepipe.util.syncutil')\n", (239, 271), False, 'import logging\n'), ((342, 355), 'numpy.squeeze', 'np.squeeze', (['x'], {}), '(x)\n', (352, 355), True, 'import numpy as np\n'), ((375, 385), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (382, 385), True, 'import numpy as np\n'), ((496, 529), 'numpy.concatenate', 'np.concatenate', (['(rising, falling)'], {}), '((rising, falling))\n', (510, 529), True, 'import numpy as np\n'), ((621, 643), 'numpy.argsort', 'np.argsort', (['ttl_frames'], {}), '(ttl_frames)\n', (631, 643), True, 'import numpy as np\n'), ((2540, 2617), 'scipy.interpolate.interp1d', 'interp1d', (['sample_edge', 't_0_edge'], {'assume_sorted': '(True)', 'fill_value': '"""extrapolate"""'}), "(sample_edge, t_0_edge, assume_sorted=True, fill_value='extrapolate')\n", (2548, 2617), False, 'from scipy.interpolate import interp1d\n'), ((404, 426), 'numpy.where', 'np.where', (['(diff_dig > 0)'], {}), '(diff_dig > 0)\n', (412, 426), True, 'import numpy as np\n'), ((448, 470), 'numpy.where', 'np.where', (['(diff_dig < 0)'], {}), '(diff_dig < 0)\n', (456, 470), True, 'import numpy as np\n'), ((844, 853), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (850, 853), True, 'import numpy as np\n'), ((2724, 2744), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (2733, 2744), True, 'import numpy as np\n'), ((856, 865), 'numpy.ptp', 'np.ptp', (['x'], {}), '(x)\n', (862, 865), True, 'import numpy as np\n')] |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reader_ops."""
import os.path
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from syntaxnet import dictionary_pb2
from syntaxnet import graph_builder
from syntaxnet import sparse_pb2
from syntaxnet.ops import gen_parser_ops
FLAGS = tf.app.flags.FLAGS
if not hasattr(FLAGS, 'test_srcdir'):
FLAGS.test_srcdir = ''
if not hasattr(FLAGS, 'test_tmpdir'):
FLAGS.test_tmpdir = tf.test.get_temp_dir()
class ParsingReaderOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
# Creates a task context with the correct testing paths.
initial_task_context = os.path.join(FLAGS.test_srcdir,
'syntaxnet/'
'testdata/context.pbtxt')
self._task_context = os.path.join(FLAGS.test_tmpdir, 'context.pbtxt')
with open(initial_task_context, 'r') as fin:
with open(self._task_context, 'w') as fout:
fout.write(fin.read().replace('SRCDIR', FLAGS.test_srcdir)
.replace('OUTPATH', FLAGS.test_tmpdir))
# Creates necessary term maps.
with self.test_session() as sess:
gen_parser_ops.lexicon_builder(task_context=self._task_context,
corpus_name='training-corpus').run()
self._num_features, self._num_feature_ids, _, self._num_actions = (
sess.run(gen_parser_ops.feature_size(task_context=self._task_context,
arg_prefix='brain_parser')))
def GetMaxId(self, sparse_features):
max_id = 0
for x in sparse_features:
for y in x:
f = sparse_pb2.SparseFeatures()
f.ParseFromString(y)
for i in f.id:
max_id = max(i, max_id)
return max_id
def testParsingReaderOp(self):
# Runs the reader over the test input for two epochs.
num_steps_a = 0
num_actions = 0
num_word_ids = 0
num_tag_ids = 0
num_label_ids = 0
batch_size = 10
with self.test_session() as sess:
(words, tags, labels), epochs, gold_actions = (
gen_parser_ops.gold_parse_reader(self._task_context,
3,
batch_size,
corpus_name='training-corpus'))
while True:
tf_gold_actions, tf_epochs, tf_words, tf_tags, tf_labels = (
sess.run([gold_actions, epochs, words, tags, labels]))
num_steps_a += 1
num_actions = max(num_actions, max(tf_gold_actions) + 1)
num_word_ids = max(num_word_ids, self.GetMaxId(tf_words) + 1)
num_tag_ids = max(num_tag_ids, self.GetMaxId(tf_tags) + 1)
num_label_ids = max(num_label_ids, self.GetMaxId(tf_labels) + 1)
self.assertIn(tf_epochs, [0, 1, 2])
if tf_epochs > 1:
break
# Runs the reader again, this time with a lot of added graph nodes.
num_steps_b = 0
with self.test_session() as sess:
num_features = [6, 6, 4]
num_feature_ids = [num_word_ids, num_tag_ids, num_label_ids]
embedding_sizes = [8, 8, 8]
hidden_layer_sizes = [32, 32]
# Here we aim to test the iteration of the reader op in a complex network,
# not the GraphBuilder.
parser = graph_builder.GreedyParser(
num_actions, num_features, num_feature_ids, embedding_sizes,
hidden_layer_sizes)
parser.AddTraining(self._task_context,
batch_size,
corpus_name='training-corpus')
sess.run(parser.inits.values())
while True:
tf_epochs, tf_cost, _ = sess.run(
[parser.training['epochs'], parser.training['cost'],
parser.training['train_op']])
num_steps_b += 1
self.assertGreaterEqual(tf_cost, 0)
self.assertIn(tf_epochs, [0, 1, 2])
if tf_epochs > 1:
break
# Assert that the two runs made the exact same number of steps.
logging.info('Number of steps in the two runs: %d, %d',
num_steps_a, num_steps_b)
self.assertEqual(num_steps_a, num_steps_b)
def testParsingReaderOpWhileLoop(self):
feature_size = 3
batch_size = 5
def ParserEndpoints():
return gen_parser_ops.gold_parse_reader(self._task_context,
feature_size,
batch_size,
corpus_name='training-corpus')
with self.test_session() as sess:
# The 'condition' and 'body' functions expect as many arguments as there
# are loop variables. 'condition' depends on the 'epoch' loop variable
# only, so we disregard the remaining unused function arguments. 'body'
# returns a list of updated loop variables.
def Condition(epoch, *unused_args):
return tf.less(epoch, 2)
def Body(epoch, num_actions, *feature_args):
# By adding one of the outputs of the reader op ('epoch') as a control
# dependency to the reader op we force the repeated evaluation of the
# reader op.
with epoch.graph.control_dependencies([epoch]):
features, epoch, gold_actions = ParserEndpoints()
num_actions = tf.maximum(num_actions,
tf.reduce_max(gold_actions, [0], False) + 1)
feature_ids = []
for i in range(len(feature_args)):
feature_ids.append(features[i])
return [epoch, num_actions] + feature_ids
epoch = ParserEndpoints()[-2]
num_actions = tf.constant(0)
loop_vars = [epoch, num_actions]
res = sess.run(
tf.while_loop(Condition, Body, loop_vars,
shape_invariants=[tf.TensorShape(None)] * 2,
parallel_iterations=1))
logging.info('Result: %s', res)
self.assertEqual(res[0], 2)
def _token_embedding(self, token, embedding):
e = dictionary_pb2.TokenEmbedding()
e.token = token
e.vector.values.extend(embedding)
return e.SerializeToString()
def testWordEmbeddingInitializer(self):
# Provide embeddings for the first three words in the word map.
records_path = os.path.join(FLAGS.test_tmpdir, 'records1')
writer = tf.python_io.TFRecordWriter(records_path)
writer.write(self._token_embedding('.', [1, 2]))
writer.write(self._token_embedding(',', [3, 4]))
writer.write(self._token_embedding('the', [5, 6]))
del writer
with self.test_session():
embeddings = gen_parser_ops.word_embedding_initializer(
vectors=records_path,
task_context=self._task_context).eval()
self.assertAllClose(
np.array([[1. / (1 + 4) ** .5, 2. / (1 + 4) ** .5],
[3. / (9 + 16) ** .5, 4. / (9 + 16) ** .5],
[5. / (25 + 36) ** .5, 6. / (25 + 36) ** .5]]),
embeddings[:3,])
def testWordEmbeddingInitializerRepeatability(self):
records_path = os.path.join(FLAGS.test_tmpdir, 'records2')
writer = tf.python_io.TFRecordWriter(records_path)
writer.write(self._token_embedding('.', [1, 2, 3])) # 3 dims
del writer
# As long as there is one non-zero seed, the result should be repeatable.
for seed1, seed2 in [(0, 1), (1, 0), (123, 456)]:
with tf.Graph().as_default(), self.test_session():
embeddings1 = gen_parser_ops.word_embedding_initializer(
vectors=records_path,
task_context=self._task_context,
seed=seed1,
seed2=seed2)
embeddings2 = gen_parser_ops.word_embedding_initializer(
vectors=records_path,
task_context=self._task_context,
seed=seed1,
seed2=seed2)
# The number of terms is based on the word map, which may change if the
# test corpus is updated. Just assert that there are some terms.
self.assertGreater(tf.shape(embeddings1)[0].eval(), 0)
self.assertGreater(tf.shape(embeddings2)[0].eval(), 0)
self.assertEqual(tf.shape(embeddings1)[1].eval(), 3)
self.assertEqual(tf.shape(embeddings2)[1].eval(), 3)
self.assertAllEqual(embeddings1.eval(), embeddings2.eval())
if __name__ == '__main__':
googletest.main()
| [
"syntaxnet.dictionary_pb2.TokenEmbedding",
"syntaxnet.ops.gen_parser_ops.gold_parse_reader",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.python.platform.tf_logging.info",
"syntaxnet.sparse_pb2.SparseFeatures",
"tensorflow.less",
"tensorflow.TensorShape",
"tensorflow.constant",
"tensorflow.pyt... | [((1228, 1250), 'tensorflow.test.get_temp_dir', 'tf.test.get_temp_dir', ([], {}), '()\n', (1248, 1250), True, 'import tensorflow as tf\n'), ((9028, 9045), 'tensorflow.python.platform.googletest.main', 'googletest.main', ([], {}), '()\n', (9043, 9045), False, 'from tensorflow.python.platform import googletest\n'), ((4776, 4861), 'tensorflow.python.platform.tf_logging.info', 'logging.info', (['"""Number of steps in the two runs: %d, %d"""', 'num_steps_a', 'num_steps_b'], {}), "('Number of steps in the two runs: %d, %d', num_steps_a,\n num_steps_b)\n", (4788, 4861), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((6754, 6785), 'syntaxnet.dictionary_pb2.TokenEmbedding', 'dictionary_pb2.TokenEmbedding', ([], {}), '()\n', (6783, 6785), False, 'from syntaxnet import dictionary_pb2\n'), ((7064, 7105), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['records_path'], {}), '(records_path)\n', (7091, 7105), True, 'import tensorflow as tf\n'), ((7827, 7868), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['records_path'], {}), '(records_path)\n', (7854, 7868), True, 'import tensorflow as tf\n'), ((2881, 2983), 'syntaxnet.ops.gen_parser_ops.gold_parse_reader', 'gen_parser_ops.gold_parse_reader', (['self._task_context', '(3)', 'batch_size'], {'corpus_name': '"""training-corpus"""'}), "(self._task_context, 3, batch_size,\n corpus_name='training-corpus')\n", (2913, 2983), False, 'from syntaxnet.ops import gen_parser_ops\n'), ((4075, 4186), 'syntaxnet.graph_builder.GreedyParser', 'graph_builder.GreedyParser', (['num_actions', 'num_features', 'num_feature_ids', 'embedding_sizes', 'hidden_layer_sizes'], {}), '(num_actions, num_features, num_feature_ids,\n embedding_sizes, hidden_layer_sizes)\n', (4101, 4186), False, 'from syntaxnet import graph_builder\n'), ((5046, 5159), 'syntaxnet.ops.gen_parser_ops.gold_parse_reader', 'gen_parser_ops.gold_parse_reader', (['self._task_context', 'feature_size', 'batch_size'], {'corpus_name': '"""training-corpus"""'}), "(self._task_context, feature_size,\n batch_size, corpus_name='training-corpus')\n", (5078, 5159), False, 'from syntaxnet.ops import gen_parser_ops\n'), ((6379, 6393), 'tensorflow.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (6390, 6393), True, 'import tensorflow as tf\n'), ((6631, 6662), 'tensorflow.python.platform.tf_logging.info', 'logging.info', (['"""Result: %s"""', 'res'], {}), "('Result: %s', res)\n", (6643, 6662), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((7490, 7653), 'numpy.array', 'np.array', (['[[1.0 / (1 + 4) ** 0.5, 2.0 / (1 + 4) ** 0.5], [3.0 / (9 + 16) ** 0.5, 4.0 /\n (9 + 16) ** 0.5], [5.0 / (25 + 36) ** 0.5, 6.0 / (25 + 36) ** 0.5]]'], {}), '([[1.0 / (1 + 4) ** 0.5, 2.0 / (1 + 4) ** 0.5], [3.0 / (9 + 16) ** \n 0.5, 4.0 / (9 + 16) ** 0.5], [5.0 / (25 + 36) ** 0.5, 6.0 / (25 + 36) **\n 0.5]])\n', (7498, 7653), True, 'import numpy as np\n'), ((2180, 2272), 'syntaxnet.ops.gen_parser_ops.feature_size', 'gen_parser_ops.feature_size', ([], {'task_context': 'self._task_context', 'arg_prefix': '"""brain_parser"""'}), "(task_context=self._task_context, arg_prefix=\n 'brain_parser')\n", (2207, 2272), False, 'from syntaxnet.ops import gen_parser_ops\n'), ((2432, 2459), 'syntaxnet.sparse_pb2.SparseFeatures', 'sparse_pb2.SparseFeatures', ([], {}), '()\n', (2457, 2459), False, 'from syntaxnet import sparse_pb2\n'), ((5674, 5691), 'tensorflow.less', 'tf.less', (['epoch', '(2)'], {}), '(epoch, 2)\n', (5681, 5691), True, 'import tensorflow as tf\n'), ((8162, 8287), 'syntaxnet.ops.gen_parser_ops.word_embedding_initializer', 'gen_parser_ops.word_embedding_initializer', ([], {'vectors': 'records_path', 'task_context': 'self._task_context', 'seed': 'seed1', 'seed2': 'seed2'}), '(vectors=records_path,\n task_context=self._task_context, seed=seed1, seed2=seed2)\n', (8203, 8287), False, 'from syntaxnet.ops import gen_parser_ops\n'), ((8355, 8480), 'syntaxnet.ops.gen_parser_ops.word_embedding_initializer', 'gen_parser_ops.word_embedding_initializer', ([], {'vectors': 'records_path', 'task_context': 'self._task_context', 'seed': 'seed1', 'seed2': 'seed2'}), '(vectors=records_path,\n task_context=self._task_context, seed=seed1, seed2=seed2)\n', (8396, 8480), False, 'from syntaxnet.ops import gen_parser_ops\n'), ((1949, 2048), 'syntaxnet.ops.gen_parser_ops.lexicon_builder', 'gen_parser_ops.lexicon_builder', ([], {'task_context': 'self._task_context', 'corpus_name': '"""training-corpus"""'}), "(task_context=self._task_context, corpus_name\n ='training-corpus')\n", (1979, 2048), False, 'from syntaxnet.ops import gen_parser_ops\n'), ((7332, 7432), 'syntaxnet.ops.gen_parser_ops.word_embedding_initializer', 'gen_parser_ops.word_embedding_initializer', ([], {'vectors': 'records_path', 'task_context': 'self._task_context'}), '(vectors=records_path,\n task_context=self._task_context)\n', (7373, 7432), False, 'from syntaxnet.ops import gen_parser_ops\n'), ((6117, 6156), 'tensorflow.reduce_max', 'tf.reduce_max', (['gold_actions', '[0]', '(False)'], {}), '(gold_actions, [0], False)\n', (6130, 6156), True, 'import tensorflow as tf\n'), ((8094, 8104), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8102, 8104), True, 'import tensorflow as tf\n'), ((6550, 6570), 'tensorflow.TensorShape', 'tf.TensorShape', (['None'], {}), '(None)\n', (6564, 6570), True, 'import tensorflow as tf\n'), ((8708, 8729), 'tensorflow.shape', 'tf.shape', (['embeddings1'], {}), '(embeddings1)\n', (8716, 8729), True, 'import tensorflow as tf\n'), ((8771, 8792), 'tensorflow.shape', 'tf.shape', (['embeddings2'], {}), '(embeddings2)\n', (8779, 8792), True, 'import tensorflow as tf\n'), ((8832, 8853), 'tensorflow.shape', 'tf.shape', (['embeddings1'], {}), '(embeddings1)\n', (8840, 8853), True, 'import tensorflow as tf\n'), ((8893, 8914), 'tensorflow.shape', 'tf.shape', (['embeddings2'], {}), '(embeddings2)\n', (8901, 8914), True, 'import tensorflow as tf\n')] |
import tensorflow as tf
import numpy as np
from .env import Environment
from .registry import register_env, get_reward_augmentation
@register_env
class CoinRun(Environment):
def __init__(self, hparams):
# only support 1 environment currently
super().__init__(hparams)
try:
from coinrun import setup_utils, make
setup_utils.setup_and_load(use_cmd_line_args=False)
self._env = make('standard', num_envs=1)
except ImportError as e:
print(e)
print("please check README for CoinRun installation instruction")
exit()
self.seed(1234)
self._observation_space = self._env.observation_space
self._action_space = self._env.action_space
self._hparams.num_states = self._observation_space.shape[0]
self._hparams.num_actions = self._action_space.n
self._hparams.state_shape = list(self._observation_space.shape)
self._hparams.action_space_type = self._action_space.__class__.__name__
self._hparams.pixel_input = True
if self._hparams.reward_augmentation is not None:
self._reward_augmentation = get_reward_augmentation(
self._hparams.reward_augmentation)
def step(self, action):
"""Run environment's dynamics one step at a time."""
action = np.asarray(action)
if action.ndim < 1:
action = np.expand_dims(action, axis=0)
state, reward, done, info = self._env.step(action)
# remove single dimensional entries
state = self._process_state(state)
reward = np.squeeze(reward)
done = np.squeeze(done)
if self._reward_augmentation is not None:
reward = self._reward_augmentation(state, reward, done, info)
return state, reward, done, info
def reset(self):
""" CoinRun has no reset.
https://github.com/openai/coinrun/blob/master/coinrun/coinrunenv.py#L181
"""
state, _, _, _ = self._env.step_wait()
state = self._process_state(state)
return state
def _process_state(self, state):
""" Convert pixel input to int8 and remove single entry at 0 axis """
if self._hparams.pixel_input:
state = state.astype(np.int8)
# remove single dimensional entry for CoinRun
if state.ndim == len(self._hparams.state_shape) + 1:
state = np.squeeze(state, axis=0)
return state
def close(self):
"""Perform any necessary cleanup when environment closes."""
return
def seed(self, seed):
pass
def render(self, mode='human'):
"""Renders the environment."""
self._env.render()
| [
"numpy.asarray",
"numpy.expand_dims",
"coinrun.make",
"numpy.squeeze",
"coinrun.setup_utils.setup_and_load"
] | [((1248, 1266), 'numpy.asarray', 'np.asarray', (['action'], {}), '(action)\n', (1258, 1266), True, 'import numpy as np\n'), ((1486, 1504), 'numpy.squeeze', 'np.squeeze', (['reward'], {}), '(reward)\n', (1496, 1504), True, 'import numpy as np\n'), ((1516, 1532), 'numpy.squeeze', 'np.squeeze', (['done'], {}), '(done)\n', (1526, 1532), True, 'import numpy as np\n'), ((340, 391), 'coinrun.setup_utils.setup_and_load', 'setup_utils.setup_and_load', ([], {'use_cmd_line_args': '(False)'}), '(use_cmd_line_args=False)\n', (366, 391), False, 'from coinrun import setup_utils, make\n'), ((411, 439), 'coinrun.make', 'make', (['"""standard"""'], {'num_envs': '(1)'}), "('standard', num_envs=1)\n", (415, 439), False, 'from coinrun import setup_utils, make\n'), ((1306, 1336), 'numpy.expand_dims', 'np.expand_dims', (['action'], {'axis': '(0)'}), '(action, axis=0)\n', (1320, 1336), True, 'import numpy as np\n'), ((2223, 2248), 'numpy.squeeze', 'np.squeeze', (['state'], {'axis': '(0)'}), '(state, axis=0)\n', (2233, 2248), True, 'import numpy as np\n')] |
import copy
import ipdb
import math
import os
import torch
import numpy as np
import time
from torch.nn import functional as F
from torch.autograd import Variable
from tqdm import tqdm, trange
from model import Transformer, FastTransformer, INF, TINY, softmax
from data import NormalField, NormalTranslationDataset, TripleTranslationDataset, ParallelDataset, data_path
from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, \
double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged
from time import gmtime, strftime
import copy
from multiset import Multiset
tokenizer = lambda x: x.replace('@@ ', '').split()
def run_fast_transformer(decoder_inputs, decoder_masks,\
sources, source_masks,\
targets,\
encoding,\
model, args, use_argmax=True):
trg_unidx = model.output_decoding( ('trg', targets) )
batch_size, src_len, hsize = encoding[0].size()
all_decodings = []
all_probs = []
iter_ = 0
bleu_hist = [ [] for xx in range(batch_size) ]
output_hist = [ [] for xx in range(batch_size) ]
multiset_hist = [ [] for xx in range(batch_size) ]
num_iters = [ 0 for xx in range(batch_size) ]
done_ = [False for xx in range(batch_size)]
final_decoding = [ None for xx in range(batch_size) ]
while True:
curr_iter = min(iter_, args.num_decs-1)
next_iter = min(iter_+1, args.num_decs-1)
decoding, out, probs = model(encoding, source_masks, decoder_inputs, decoder_masks,
decoding=True, return_probs=True, iter_=curr_iter)
dec_output = decoding.data.cpu().numpy().tolist()
"""
if args.trg_len_option != "reference":
decoder_masks = 0. * decoder_masks
for bidx in range(batch_size):
try:
decoder_masks[bidx,:(dec_output[bidx].index(3))+1] = 1.
except:
decoder_masks[bidx,:] = 1.
"""
if args.adaptive_decoding == "oracle":
out_unidx = model.output_decoding( ('trg', decoding ) )
sentence_bleus = computeBLEU(out_unidx, trg_unidx, corpus=False, tokenizer=tokenizer)
for bidx in range(batch_size):
output_hist[bidx].append( dec_output[bidx] )
bleu_hist[bidx].append(sentence_bleus[bidx])
converged = oracle_converged( bleu_hist, num_items=args.adaptive_window )
for bidx in range(batch_size):
if not done_[bidx] and converged[bidx] and num_iters[bidx] == 0:
num_iters[bidx] = iter_ + 1 - (args.adaptive_window -1)
done_[bidx] = True
final_decoding[bidx] = output_hist[bidx][-args.adaptive_window]
elif args.adaptive_decoding == "equality":
for bidx in range(batch_size):
#if 3 in dec_output[bidx]:
# dec_output[bidx] = dec_output[bidx][:dec_output[bidx].index(3)]
output_hist[bidx].append( dec_output[bidx] )
converged = equality_converged( output_hist, num_items=args.adaptive_window )
for bidx in range(batch_size):
if not done_[bidx] and converged[bidx] and num_iters[bidx] == 0:
num_iters[bidx] = iter_ + 1
done_[bidx] = True
final_decoding[bidx] = output_hist[bidx][-1]
elif args.adaptive_decoding == "jaccard":
for bidx in range(batch_size):
#if 3 in dec_output[bidx]:
# dec_output[bidx] = dec_output[bidx][:dec_output[bidx].index(3)]
output_hist[bidx].append( dec_output[bidx] )
multiset_hist[bidx].append( Multiset(dec_output[bidx]) )
converged = jaccard_converged( multiset_hist, num_items=args.adaptive_window )
for bidx in range(batch_size):
if not done_[bidx] and converged[bidx] and num_iters[bidx] == 0:
num_iters[bidx] = iter_ + 1
done_[bidx] = True
final_decoding[bidx] = output_hist[bidx][-1]
all_decodings.append( decoding )
all_probs.append(probs)
decoder_inputs = 0
if args.next_dec_input in ["both", "emb"]:
if use_argmax:
_, argmax = torch.max(probs, dim=-1)
else:
probs_sz = probs.size()
probs_ = Variable(probs.data, requires_grad=False)
argmax = torch.multinomial(probs_.contiguous().view(-1, probs_sz[-1]), 1).view(*probs_sz[:-1])
emb = F.embedding(argmax, model.decoder[next_iter].out.weight * math.sqrt(args.d_model))
decoder_inputs += emb
if args.next_dec_input in ["both", "out"]:
decoder_inputs += out
iter_ += 1
if iter_ == args.valid_repeat_dec or (False not in done_):
break
if args.adaptive_decoding != None:
for bidx in range(batch_size):
if num_iters[bidx] == 0:
num_iters[bidx] = 20
if final_decoding[bidx] == None:
if args.adaptive_decoding == "oracle":
final_decoding[bidx] = output_hist[bidx][np.argmax(bleu_hist[bidx])]
else:
final_decoding[bidx] = output_hist[bidx][-1]
decoding = Variable(torch.LongTensor(np.array(final_decoding)))
if decoder_masks.is_cuda:
decoding = decoding.cuda()
return decoding, all_decodings, num_iters, all_probs
def decode_model(args, model, dev, evaluate=True, trg_len_dic=None,
decoding_path=None, names=None, maxsteps=None):
args.logger.info("decoding, f_size={}, beam_size={}, alpha={}".format(args.f_size, args.beam_size, args.alpha))
dev.train = False # make iterator volatile=True
if not args.no_tqdm:
progressbar = tqdm(total=200, desc='start decoding')
model.eval()
if not args.debug:
decoding_path.mkdir(parents=True, exist_ok=True)
handles = [(decoding_path / name ).open('w') for name in names]
corpus_size = 0
src_outputs, trg_outputs, dec_outputs, timings = [], [], [], []
all_decs = [ [] for idx in range(args.valid_repeat_dec)]
decoded_words, target_words, decoded_info = 0, 0, 0
attentions = None
decoder = model.decoder[0] if args.model is FastTransformer else model.decoder
pad_id = decoder.field.vocab.stoi['<pad>']
eos_id = decoder.field.vocab.stoi['<eos>']
curr_time = 0
cum_sentences = 0
cum_tokens = 0
cum_images = 0 # used for mscoco
num_iters_total = []
for iters, dev_batch in enumerate(dev):
start_t = time.time()
if args.dataset != "mscoco":
decoder_inputs, decoder_masks,\
targets, target_masks,\
sources, source_masks,\
encoding, batch_size, rest = model.quick_prepare(dev_batch, fast=(type(model) is FastTransformer), trg_len_option=args.trg_len_option, trg_len_ratio=args.trg_len_ratio, trg_len_dic=trg_len_dic, bp=args.bp)
else:
# only use first caption for calculating log likelihood
all_captions = dev_batch[1]
dev_batch[1] = dev_batch[1][0]
decoder_inputs, decoder_masks,\
targets, target_masks,\
_, source_masks,\
encoding, batch_size, rest = model.quick_prepare_mscoco(dev_batch, all_captions=all_captions, fast=(type(model) is FastTransformer), inputs_dec=args.inputs_dec, trg_len_option=args.trg_len_option, max_len=args.max_len, trg_len_dic=trg_len_dic, bp=args.bp, gpu=args.gpu>-1)
sources = None
cum_sentences += batch_size
batch_size, src_len, hsize = encoding[0].size()
# for now
if type(model) is Transformer:
all_decodings = []
decoding = model(encoding, source_masks, decoder_inputs, decoder_masks,
beam=args.beam_size, alpha=args.alpha, \
decoding=True, feedback=attentions)
all_decodings.append( decoding )
num_iters = [0]
elif type(model) is FastTransformer:
decoding, all_decodings, num_iters, argmax_all_probs = run_fast_transformer(decoder_inputs, decoder_masks, \
sources, source_masks, targets, encoding, model, args, use_argmax=True)
num_iters_total.extend( num_iters )
if not args.use_argmax:
for _ in range(args.num_samples):
_, _, _, sampled_all_probs = run_fast_transformer(decoder_inputs, decoder_masks, \
sources, source_masks, encoding, model, args, use_argmax=False)
for iter_ in range(args.valid_repeat_dec):
argmax_all_probs[iter_] = argmax_all_probs[iter_] + sampled_all_probs[iter_]
all_decodings = []
for iter_ in range(args.valid_repeat_dec):
argmax_all_probs[iter_] = argmax_all_probs[iter_] / args.num_samples
all_decodings.append(torch.max(argmax_all_probs[iter_], dim=-1)[-1])
decoding = all_decodings[-1]
used_t = time.time() - start_t
curr_time += used_t
if args.dataset != "mscoco":
if args.remove_repeats:
outputs_unidx = [model.output_decoding(d) for d in [('src', sources), ('trg', targets), ('trg', remove_repeats_tensor(decoding))]]
else:
outputs_unidx = [model.output_decoding(d) for d in [('src', sources), ('trg', targets), ('trg', decoding)]]
else:
# make sure that 5 captions per each example
num_captions = len(all_captions[0])
for c in range(1, len(all_captions)):
assert (num_captions == len(all_captions[c]))
# untokenize reference captions
for n_ref in range(len(all_captions)):
n_caps = len(all_captions[0])
for c in range(n_caps):
all_captions[n_ref][c] = all_captions[n_ref][c].replace("@@ ","")
outputs_unidx = [ list(map(list, zip(*all_captions))) ]
if args.remove_repeats:
all_dec_outputs = [model.output_decoding(d) for d in [('trg', remove_repeats_tensor(all_decodings[ii])) for ii in range(len(all_decodings))]]
else:
all_dec_outputs = [model.output_decoding(d) for d in [('trg', all_decodings[ii]) for ii in range(len(all_decodings))]]
corpus_size += batch_size
if args.dataset != "mscoco":
cum_tokens += sum([len(xx.split(" ")) for xx in outputs_unidx[0]]) # NOTE source tokens, not target
if args.dataset != "mscoco":
src_outputs += outputs_unidx[0]
trg_outputs += outputs_unidx[1]
if args.remove_repeats:
dec_outputs += remove_repeats(outputs_unidx[-1])
else:
dec_outputs += outputs_unidx[-1]
else:
trg_outputs += outputs_unidx[0]
for idx, each_output in enumerate(all_dec_outputs):
if args.remove_repeats:
all_decs[idx] += remove_repeats(each_output)
else:
all_decs[idx] += each_output
#if True:
if False and decoding_path is not None:
for sent_i in range(len(outputs_unidx[0])):
if args.dataset != "mscoco":
print ('SRC')
print (outputs_unidx[0][sent_i])
for ii in range(len(all_decodings)):
print ('DEC iter {}'.format(ii))
print (all_dec_outputs[ii][sent_i])
print ('TRG')
print (outputs_unidx[1][sent_i])
else:
print ('TRG')
trg = outputs_unidx[0]
for subsent_i in range(len(trg[sent_i])):
print ('TRG {}'.format(subsent_i))
print (trg[sent_i][subsent_i])
for ii in range(len(all_decodings)):
print ('DEC iter {}'.format(ii))
print (all_dec_outputs[ii][sent_i])
print ('---------------------------')
timings += [used_t]
if not args.debug:
for s, t, d in zip(outputs_unidx[0], outputs_unidx[1], outputs_unidx[2]):
s, t, d = s.replace('@@ ', ''), t.replace('@@ ', ''), d.replace('@@ ', '')
print(s, file=handles[0], flush=True)
print(t, file=handles[1], flush=True)
print(d, file=handles[2], flush=True)
if not args.no_tqdm:
progressbar.update(iters)
progressbar.set_description('finishing sentences={}/batches={}, \
length={}/average iter={}, speed={} sec/batch'.format(\
corpus_size, iters, src_len, np.mean(np.array(num_iters)), curr_time / (1 + iters)))
if evaluate:
for idx, each_dec in enumerate(all_decs):
if len(all_decs[idx]) != len(trg_outputs):
break
if args.dataset != "mscoco":
bleu_output = computeBLEU(each_dec, trg_outputs, corpus=True, tokenizer=tokenizer)
else:
bleu_output = computeBLEUMSCOCO(each_dec, trg_outputs, corpus=True, tokenizer=tokenizer)
args.logger.info("iter {} | {}".format(idx+1, print_bleu(bleu_output)))
if args.adaptive_decoding != None:
args.logger.info("----------------------------------------------")
args.logger.info("Average # iters {}".format(np.mean(num_iters_total)))
bleu_output = computeBLEU(dec_outputs, trg_outputs, corpus=True, tokenizer=tokenizer)
args.logger.info("Adaptive BLEU | {}".format(print_bleu(bleu_output)))
args.logger.info("----------------------------------------------")
args.logger.info("Decoding speed analysis :")
args.logger.info("{} sentences".format(cum_sentences))
if args.dataset != "mscoco":
args.logger.info("{} tokens".format(cum_tokens))
args.logger.info("{:.3f} seconds".format(curr_time))
args.logger.info("{:.3f} ms / sentence".format((curr_time / float(cum_sentences) * 1000)))
if args.dataset != "mscoco":
args.logger.info("{:.3f} ms / token".format((curr_time / float(cum_tokens) * 1000)))
args.logger.info("{:.3f} sentences / s".format(float(cum_sentences) / curr_time))
if args.dataset != "mscoco":
args.logger.info("{:.3f} tokens / s".format(float(cum_tokens) / curr_time))
args.logger.info("----------------------------------------------")
if args.decode_which > 0:
args.logger.info("Writing to special file")
parent = decoding_path / "speed" / "b_{}{}".format(args.beam_size if args.model is Transformer else args.valid_repeat_dec,
"" if args.model is Transformer else "_{}".format(args.adaptive_decoding != None))
args.logger.info(str(parent))
parent.mkdir(parents=True, exist_ok=True)
speed_handle = (parent / "results.{}".format(args.decode_which) ).open('w')
print("----------------------------------------------", file=speed_handle, flush=True)
print("Decoding speed analysis :", file=speed_handle, flush=True)
print("{} sentences".format(cum_sentences), file=speed_handle, flush=True)
if args.dataset != "mscoco":
print("{} tokens".format(cum_tokens), file=speed_handle, flush=True)
print("{:.3f} seconds".format(curr_time), file=speed_handle, flush=True)
print("{:.3f} ms / sentence".format((curr_time / float(cum_sentences) * 1000)), file=speed_handle, flush=True)
if args.dataset != "mscoco":
print("{:.3f} ms / token".format((curr_time / float(cum_tokens) * 1000)), file=speed_handle, flush=True)
print("{:.3f} sentences / s".format(float(cum_sentences) / curr_time), file=speed_handle, flush=True)
if args.dataset != "mscoco":
print("{:.3f} tokens / s".format(float(cum_tokens) / curr_time), file=speed_handle, flush=True)
print("----------------------------------------------", file=speed_handle, flush=True)
| [
"utils.computeBLEUMSCOCO",
"utils.print_bleu",
"tqdm.tqdm",
"utils.jaccard_converged",
"math.sqrt",
"numpy.argmax",
"torch.autograd.Variable",
"utils.remove_repeats",
"utils.equality_converged",
"time.time",
"utils.computeBLEU",
"utils.remove_repeats_tensor",
"numpy.mean",
"torch.max",
"... | [((6127, 6165), 'tqdm.tqdm', 'tqdm', ([], {'total': '(200)', 'desc': '"""start decoding"""'}), "(total=200, desc='start decoding')\n", (6131, 6165), False, 'from tqdm import tqdm, trange\n'), ((6927, 6938), 'time.time', 'time.time', ([], {}), '()\n', (6936, 6938), False, 'import time\n'), ((14016, 14087), 'utils.computeBLEU', 'computeBLEU', (['dec_outputs', 'trg_outputs'], {'corpus': '(True)', 'tokenizer': 'tokenizer'}), '(dec_outputs, trg_outputs, corpus=True, tokenizer=tokenizer)\n', (14027, 14087), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n'), ((2326, 2394), 'utils.computeBLEU', 'computeBLEU', (['out_unidx', 'trg_unidx'], {'corpus': '(False)', 'tokenizer': 'tokenizer'}), '(out_unidx, trg_unidx, corpus=False, tokenizer=tokenizer)\n', (2337, 2394), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n'), ((2586, 2645), 'utils.oracle_converged', 'oracle_converged', (['bleu_hist'], {'num_items': 'args.adaptive_window'}), '(bleu_hist, num_items=args.adaptive_window)\n', (2602, 2645), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n'), ((9501, 9512), 'time.time', 'time.time', ([], {}), '()\n', (9510, 9512), False, 'import time\n'), ((3280, 3343), 'utils.equality_converged', 'equality_converged', (['output_hist'], {'num_items': 'args.adaptive_window'}), '(output_hist, num_items=args.adaptive_window)\n', (3298, 3343), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n'), ((4556, 4580), 'torch.max', 'torch.max', (['probs'], {'dim': '(-1)'}), '(probs, dim=-1)\n', (4565, 4580), False, 'import torch\n'), ((4664, 4705), 'torch.autograd.Variable', 'Variable', (['probs.data'], {'requires_grad': '(False)'}), '(probs.data, requires_grad=False)\n', (4672, 4705), False, 'from torch.autograd import Variable\n'), ((5618, 5642), 'numpy.array', 'np.array', (['final_decoding'], {}), '(final_decoding)\n', (5626, 5642), True, 'import numpy as np\n'), ((11192, 11225), 'utils.remove_repeats', 'remove_repeats', (['outputs_unidx[-1]'], {}), '(outputs_unidx[-1])\n', (11206, 11225), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n'), ((11482, 11509), 'utils.remove_repeats', 'remove_repeats', (['each_output'], {}), '(each_output)\n', (11496, 11509), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n'), ((13523, 13591), 'utils.computeBLEU', 'computeBLEU', (['each_dec', 'trg_outputs'], {'corpus': '(True)', 'tokenizer': 'tokenizer'}), '(each_dec, trg_outputs, corpus=True, tokenizer=tokenizer)\n', (13534, 13591), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n'), ((13640, 13714), 'utils.computeBLEUMSCOCO', 'computeBLEUMSCOCO', (['each_dec', 'trg_outputs'], {'corpus': '(True)', 'tokenizer': 'tokenizer'}), '(each_dec, trg_outputs, corpus=True, tokenizer=tokenizer)\n', (13657, 13714), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n'), ((13967, 13991), 'numpy.mean', 'np.mean', (['num_iters_total'], {}), '(num_iters_total)\n', (13974, 13991), True, 'import numpy as np\n'), ((14141, 14164), 'utils.print_bleu', 'print_bleu', (['bleu_output'], {}), '(bleu_output)\n', (14151, 14164), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n'), ((4004, 4068), 'utils.jaccard_converged', 'jaccard_converged', (['multiset_hist'], {'num_items': 'args.adaptive_window'}), '(multiset_hist, num_items=args.adaptive_window)\n', (4021, 4068), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n'), ((4893, 4916), 'math.sqrt', 'math.sqrt', (['args.d_model'], {}), '(args.d_model)\n', (4902, 4916), False, 'import math\n'), ((13773, 13796), 'utils.print_bleu', 'print_bleu', (['bleu_output'], {}), '(bleu_output)\n', (13783, 13796), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n'), ((5457, 5483), 'numpy.argmax', 'np.argmax', (['bleu_hist[bidx]'], {}), '(bleu_hist[bidx])\n', (5466, 5483), True, 'import numpy as np\n'), ((13259, 13278), 'numpy.array', 'np.array', (['num_iters'], {}), '(num_iters)\n', (13267, 13278), True, 'import numpy as np\n'), ((3950, 3976), 'multiset.Multiset', 'Multiset', (['dec_output[bidx]'], {}), '(dec_output[bidx])\n', (3958, 3976), False, 'from multiset import Multiset\n'), ((10590, 10630), 'utils.remove_repeats_tensor', 'remove_repeats_tensor', (['all_decodings[ii]'], {}), '(all_decodings[ii])\n', (10611, 10630), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n'), ((9390, 9432), 'torch.max', 'torch.max', (['argmax_all_probs[iter_]'], {'dim': '(-1)'}), '(argmax_all_probs[iter_], dim=-1)\n', (9399, 9432), False, 'import torch\n'), ((9737, 9768), 'utils.remove_repeats_tensor', 'remove_repeats_tensor', (['decoding'], {}), '(decoding)\n', (9758, 9768), False, 'from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged\n')] |
"""
Copyright (R) @huawei.com, all rights reserved
-*- coding:utf-8 -*-
"""
import sys
import os
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path, ".."))
sys.path.append(os.path.join(path, "../../../../common/"))
sys.path.append(os.path.join(path, "../../../../common/acllite"))
import cv2 as cv
import numpy as np
import acl
import base64
import utils
from acllite_imageproc import AclLiteImageProc
import constants as const
from acllite_model import AclLiteModel
from acllite_image import AclLiteImage
from acllite_resource import AclLiteResource
MODEL_WIDTH = 513
MODEL_HEIGHT = 513
SRC_PATH = os.path.realpath(__file__).rsplit("/", 1)[0]
MODEL_PATH = os.path.join(SRC_PATH, "../model/deeplabv3_plus.om")
OUTPUT_DIR = './out/'
def preprocess(picPath):
"""preprocess"""
#read img
bgr_img = cv.imread(picPath)
#get img shape
orig_shape = bgr_img.shape[:2]
#resize img
img = cv.resize(bgr_img, (MODEL_WIDTH, MODEL_HEIGHT)).astype(np.int8)
# save memory C_CONTIGUOUS mode
if not img.flags['C_CONTIGUOUS']:
img = np.ascontiguousarray(img)
return orig_shape, img
def postprocess(result_list, pic, orig_shape, pic_path):
"""postprocess"""
result_img = result_list[0].reshape(513, 513)
result_img = result_img.astype('uint8')
orig_img = cv.imread(pic_path)
img = cv.merge((result_img, result_img, result_img))
bgr_img = cv.resize(img, (orig_shape[1], orig_shape[0]))
bgr_img = (bgr_img + 255)
output_pic = os.path.join(os.path.join(SRC_PATH, "../out"), os.path.basename(pic))
print(output_pic)
cv.imwrite(output_pic, bgr_img)
def main():
"""main"""
#acl init
if (len(sys.argv) != 2):
print("The App arg is invalid")
exit(1)
acl_resource = AclLiteResource()
acl_resource.init()
model = AclLiteModel(MODEL_PATH)
dvpp = AclLiteImageProc(acl_resource)
#From the parameters of the picture storage directory, reasoning by a picture
image_dir = sys.argv[1]
images_list = [os.path.join(image_dir, img)
for img in os.listdir(image_dir)
if os.path.splitext(img)[1] in const.IMG_EXT]
if not os.path.isdir(os.path.join(SRC_PATH, "../out")):
os.mkdir(os.path.join(SRC_PATH, "../out"))
#infer picture
for pic in images_list:
#get pic data
orig_shape, l_data = preprocess(pic)
#inference
result_list = model.execute([l_data])
#postprocess
postprocess(result_list, pic, orig_shape, pic)
print("Execute end")
if __name__ == '__main__':
main()
| [
"os.listdir",
"os.path.abspath",
"acllite_model.AclLiteModel",
"os.path.basename",
"cv2.imwrite",
"os.path.realpath",
"numpy.ascontiguousarray",
"cv2.imread",
"os.path.splitext",
"acllite_imageproc.AclLiteImageProc",
"cv2.merge",
"os.path.join",
"acllite_resource.AclLiteResource",
"cv2.res... | [((693, 745), 'os.path.join', 'os.path.join', (['SRC_PATH', '"""../model/deeplabv3_plus.om"""'], {}), "(SRC_PATH, '../model/deeplabv3_plus.om')\n", (705, 745), False, 'import os\n'), ((120, 145), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (135, 145), False, 'import os\n'), ((163, 187), 'os.path.join', 'os.path.join', (['path', '""".."""'], {}), "(path, '..')\n", (175, 187), False, 'import os\n'), ((205, 246), 'os.path.join', 'os.path.join', (['path', '"""../../../../common/"""'], {}), "(path, '../../../../common/')\n", (217, 246), False, 'import os\n'), ((264, 312), 'os.path.join', 'os.path.join', (['path', '"""../../../../common/acllite"""'], {}), "(path, '../../../../common/acllite')\n", (276, 312), False, 'import os\n'), ((843, 861), 'cv2.imread', 'cv.imread', (['picPath'], {}), '(picPath)\n', (852, 861), True, 'import cv2 as cv\n'), ((1336, 1355), 'cv2.imread', 'cv.imread', (['pic_path'], {}), '(pic_path)\n', (1345, 1355), True, 'import cv2 as cv\n'), ((1366, 1412), 'cv2.merge', 'cv.merge', (['(result_img, result_img, result_img)'], {}), '((result_img, result_img, result_img))\n', (1374, 1412), True, 'import cv2 as cv\n'), ((1427, 1473), 'cv2.resize', 'cv.resize', (['img', '(orig_shape[1], orig_shape[0])'], {}), '(img, (orig_shape[1], orig_shape[0]))\n', (1436, 1473), True, 'import cv2 as cv\n'), ((1622, 1653), 'cv2.imwrite', 'cv.imwrite', (['output_pic', 'bgr_img'], {}), '(output_pic, bgr_img)\n', (1632, 1653), True, 'import cv2 as cv\n'), ((1800, 1817), 'acllite_resource.AclLiteResource', 'AclLiteResource', ([], {}), '()\n', (1815, 1817), False, 'from acllite_resource import AclLiteResource\n'), ((1854, 1878), 'acllite_model.AclLiteModel', 'AclLiteModel', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (1866, 1878), False, 'from acllite_model import AclLiteModel\n'), ((1890, 1920), 'acllite_imageproc.AclLiteImageProc', 'AclLiteImageProc', (['acl_resource'], {}), '(acl_resource)\n', (1906, 1920), False, 'from acllite_imageproc import AclLiteImageProc\n'), ((1094, 1119), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {}), '(img)\n', (1114, 1119), True, 'import numpy as np\n'), ((1539, 1571), 'os.path.join', 'os.path.join', (['SRC_PATH', '"""../out"""'], {}), "(SRC_PATH, '../out')\n", (1551, 1571), False, 'import os\n'), ((1573, 1594), 'os.path.basename', 'os.path.basename', (['pic'], {}), '(pic)\n', (1589, 1594), False, 'import os\n'), ((2051, 2079), 'os.path.join', 'os.path.join', (['image_dir', 'img'], {}), '(image_dir, img)\n', (2063, 2079), False, 'import os\n'), ((635, 661), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (651, 661), False, 'import os\n'), ((942, 989), 'cv2.resize', 'cv.resize', (['bgr_img', '(MODEL_WIDTH, MODEL_HEIGHT)'], {}), '(bgr_img, (MODEL_WIDTH, MODEL_HEIGHT))\n', (951, 989), True, 'import cv2 as cv\n'), ((2110, 2131), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (2120, 2131), False, 'import os\n'), ((2223, 2255), 'os.path.join', 'os.path.join', (['SRC_PATH', '"""../out"""'], {}), "(SRC_PATH, '../out')\n", (2235, 2255), False, 'import os\n'), ((2275, 2307), 'os.path.join', 'os.path.join', (['SRC_PATH', '"""../out"""'], {}), "(SRC_PATH, '../out')\n", (2287, 2307), False, 'import os\n'), ((2154, 2175), 'os.path.splitext', 'os.path.splitext', (['img'], {}), '(img)\n', (2170, 2175), False, 'import os\n')] |
import os
import time
import numpy as np
import torch.distributed as dist
import helper_torch
import networkarch_torch as net
from mpi4py import MPI
import torch.multiprocessing as processing
from Dependency.Aggregation import *
from GlobalParameters import *
from matplotlib import cm
import matplotlib.pyplot as plt
import copy
import time
import sys
import math
import copy
import numpy as np
import random as r
'''ta_train_tensor.shape[1]
# TODO: check the calculation of num_examples
num_batches = int(np.floor(num_examples / params['batch_size']))
# random shuffle the training data
ind = np.arange(num_examples)
np.random.shuffle(ind)
data_train_tensor = data_train_tensor[:, ind, :]
# loop over batches in this file
train_val_error[count, 0] = train_error
train_val_error[count, 1] = val_error
train_val_error[count, 2] = sess.run(regularized_loss, feed_dict=feed_dict_train_loss)
train_val_error[count, 3] = sess.run(regularized_loss, feed_dict=feed_dict_val)
train_val_error[count, 4] = sess.run(loss1, feed_dict=feed_dict_train_loss)
train_val_error[count, 5] = sess.run(loss1, feed_dict=feed_dict_val)
train_val_error[count, 6] = sess.run(loss2, feed_dict=feed_dict_train_loss)
train_val_error[count, 7] = sess.run(loss2, feed_dict=feed_dict_val)
train_val_error[count, 8] = sess.run(loss3, feed_dict=feed_dict_train_loss)
train_val_error[count, 9] = sess.run(loss3, feed_dict=feed_dict_val)
train_val_error[count, 10] = sess.run(loss_Linf, feed_dict=feed_dict_train_loss)
train_val_error[count, 11] = sess.run(loss_Linf, feed_dict=feed_dict_val)
if np.isnan(train_val_error[count, 10]):
params['stop_condition'] = 'loss_Linf is nan'
finished = 1
break
train_val_error[count, 12] = sess.run(loss_L1, feed_dict=feed_dict_train_loss)
train_val_error[count, 13] = sess.run(loss_L1, feed_dict=feed_dict_val)
train_val_error[count, 14] = sess.run(loss_L2, feed_dict=feed_dict_train_loss)
train_val_error[count, 15] = sess.run(loss_L2, feed_dict=feed_dict_val)
np.savetxt(csv_path, train_val_error, delimiter=',')
finished, save_now = helperfns.check_progress(start, best_error, params)
count = count + 1
if save_now:
train_val_error_trunc = train_val_error[range(count), :]
helperfns.save_files(sess, csv_path, train_val_error_trunc, params, weights, biases)
if finished:
break
if step > params['num_steps_per_file_pass']:
params['stop_condition'] = 'reached num_steps_per_file_pass'
break
# SAVE RESULTS
train_val_error = train_val_error[range(count), :]
print(train_val_error)
params['time_exp'] = time.time() - start
saver.restore(sess, params['model_path'])
helperfns.save_files(sess, csv_path, train_val_error, params, weights, biases)
tf.reset_default_graph()
def main_exp(params):
"""Set up and run one random experiment.
Arguments:
params -- dictionary of parameters for experiment
Side effects:
Changes params dict
If doesn't already exist, creates folder params['folder_name']
Saves files in that folder
"""
helperfns.set_defaults(params)
if not os.path.exists(params['folder_name']):
os.makedirs(params['folder_name'])
tf.set_random_seed(params['seed'])
np.random.seed(params['seed'])
# data is num_steps x num_examples x n but load flattened version (matrix instead of tensor)
data_val = np.loadtxt(('./data/%s_val_x.csv' % (params['data_name'])), delimiter=',', dtype=np.float64)
try_net(data_val, params)'''
params = {}
# settings related to dataset
params['data_name'] = 'SIR'
params['len_time'] = 257
n = 2 # dimension of system (and input layer)
num_initial_conditions = 5000 # per training file
params['delta_t'] = 0.02
# settings related to saving results
params['folder_name'] = 'exp2'
# settings related to network architecture
params['num_real'] = 0
params['num_complex_pairs'] = 1
params['num_evals'] = 2
k = params['num_evals'] # dimension of y-coordinates
# defaults related to initialization of parameters
params['dist_weights'] = 'dl'
params['dist_weights_omega'] = 'dl'
# settings related to loss function
params['num_shifts'] = 30
params['num_shifts_middle'] = params['len_time'] - 1
max_shifts = max(params['num_shifts'], params['num_shifts_middle'])
num_examples = num_initial_conditions * (params['len_time'] - max_shifts)
params['recon_lam'] = .001
params['L1_lam'] = 0.0
params['auto_first'] = 1
# settings related to training
params['num_passes_per_file'] = 1542 #15 * 6 * 50
params['num_steps_per_batch'] = 2
params['learning_rate'] = 10 ** (-3)
# settings related to timing
params['max_time'] = 6 * 60 * 60 # 6 hours
params['min_5min'] = .25
params['min_20min'] = .02
params['min_40min'] = .002
params['min_1hr'] = .0002
params['min_2hr'] = .00002
params['min_3hr'] = .000004
params['min_4hr'] = .0000005
params['min_halfway'] = 1
# settings related to LSTM
params['num_LSTM_input_weights'] = 1
params['num_LSTM_hidden_weights'] = 1
params['LSTM_widths'] = [50]
params['data_train_len'] = 2 #r.randint(3, 6)
params['batch_size'] = int(2 ** 2)#(r.randint(7, 9)))
steps_to_see_all = num_examples / params['batch_size']
params['num_steps_per_file_pass'] = (int(steps_to_see_all) + 1) * params['num_steps_per_batch']
params['L2_lam'] = 10 ** (-r.randint(13, 14))
params['Linf_lam'] = 10 ** (-r.randint(7, 10))
d = r.randint(1, 2)
if d == 1:
wopts = np.arange(100, 200, 5)
w = wopts[r.randint(0, len(wopts) - 1)]
params['widths'] = [n, w, k, k, w, n]
elif d == 2:
wopts = np.arange(30, 90, 5)
w = wopts[r.randint(0, len(wopts) - 1)]
params['widths'] = [n, w, w, k, k, w, w, n]
do = r.randint(1, 2)
if do == 1:
wopts = np.arange(140, 190, 5)
wo = wopts[r.randint(0, len(wopts) - 1)]
params['hidden_widths_omega'] = [wo, ]
elif do == 2:
wopts = np.arange(10, 55, 5)
wo = wopts[r.randint(0, len(wopts) - 1)]
params['hidden_widths_omega'] = [wo, wo]
helper_torch.set_defaults(params)
# =================== FL methods (EDITABLE) ====================
def LocalTraining(worker_id: int, init_model: dict, pipe_upload, pipe_download):
network = net.koopman_net(params, device=device, task=task)
network.load_state_dict(init_model)
network.SetTrainingSet() #f'./data/SIR_train_{int((worker_id)%3)}.csv'
pipe_download.recv()
while True:
network.Train(epoch=local_epoch_num)
if device == 'cuda':
tp = copy.deepcopy(network)
tp.to('cpu')
pipe_upload.send((tp.state_dict().copy(), tp.size_trainingset, tp.history_loss_train))
elif device == 'cpu':
pipe_upload.send((network.state_dict().copy(), network.size_trainingset, network.history_loss_train))
print(f'Worker {worker_id} done.')
'''global_model = pipe_download.recv()
network.load_state_dict(global_model)'''
pass
def Aggregation():
final_model = {}
if aggregation_rule == 'FedAvg':
final_model = FedAvg(current_local_models, size_local_dataset)
pass
return final_model.copy()
# =================== FL methods (EDITABLE) ====================
# =================== Statistic methods ====================
def Statistic(keep_graph=False):
global test_model
global local_loss_list
plt.xlabel('Iteration', fontsize=13)
plt.ylabel('Accuracy', fontsize=13)
for l in range(0, num_worker):
plt.plot(local_loss_list[l], color = colors[l])
plt.plot(test_model.history_acc_benign, linestyle='--', label='acc_global')
plt.plot(test_model.history_acc_poisoned, linestyle='--', label='acc_poisoned')
plt.legend()
plt.grid()
if not keep_graph:
plt.pause(0.01)
plt.cla()
else:
plt.ioff()
plt.show()
# =================== Statistic methods ====================
# main process
if __name__ == '__main__':
# =================== global variables/containers ====================
# pool to store parallel threading of training or attacking
process_pool = []
# upload link pipelines
model_pipeline_upload = []
# download link pipelines
model_pipeline_download = []
# local training loss
local_loss_list = []
# global gradient
global_gradient = {}
# global iteration counter
cnt_global_iteration = 0
# plot settings
start = 0.0
stop = 1.0
number_of_lines = num_worker
cm_subsection = np.linspace(start, stop, number_of_lines)
colors = [cm.jet(x) for x in cm_subsection]
# =================== global variables/containers ====================
# =================== Welcome ====================
print('********************************************************')
print('** Welcome to PI federated learning system! **')
print('********************************************************')
print('')
# =================== Welcome ====================
# =================== INITIALIZATION ====================
# checking global parameters
print('Checking global parameters......', end='')
print('Done.')
# initializing global model
test_model = net.koopman_net(params, device, task)
try:
print('Initializing global model contrainer......', end='')
#device=device, model=model, task=task
#test_model.SetTestingSet(f'./data/SIR_train_{task}/test.csv')
#test_model.to('cpu')
print('Done.')
except:
print('\033[31mFailed\033[0m')
sys.exit(-1)
# creating workers
for i in range(0, num_worker):
# creating pipelines for model's communication across processes.
# Note: Pipe() returns two ends of pipe: out, in
try:
print(f'Communication link of worker {i}......', end='')
model_pipeline_upload.append(processing.Pipe())
model_pipeline_download.append(processing.Pipe())
if i < num_worker:
# creating a benign worker process
process_pool.append(processing.Process(target=LocalTraining, args=(
i, test_model.state_dict().copy(), model_pipeline_upload[i][1], model_pipeline_download[i][0])))
print('Done.')
time.sleep(0.1)
except:
print('\033[31mFailed\033[0m')
sys.exit(-1)
test_model.to(device)
# activate worker processes
for i in range(0, num_worker):
try:
print(f'Activating worker {i}......', end='')
process_pool[i].start()
print('Done.')
except:
print('\033[31mFailed\033[0m')
sys.exit(-1)
# switch plt into iteration mode
plt.ion()
# =================== INITIALIZATION ====================
# =================== Server process ====================
for pipe in model_pipeline_download:
pipe[1].send('start')
print('')
print('\033[32mTraining Start!\033[0m')
for i in range(0, global_iteration_num):
print(f'Global iteration {i}......')
start_time = time.perf_counter()
current_local_models = []
size_local_dataset = []
local_loss_list = []
for pipe in model_pipeline_upload:
msg = pipe[0].recv()
current_local_models.append(msg[0])
size_local_dataset.append(msg[1])
local_loss_list.append(msg[2])
'''global_model = Aggregation()
end_time = time.perf_counter()
print(f'Done at {time.asctime(time.localtime(time.time()))}, time cost: {end_time - start_time}s.')
test_model.load_state_dict(global_model.copy())
test_model.TestOnBenignSet()
cnt_global_iteration += 1
if i == global_iteration_num - 1:
Statistic(keep_graph=True)
else:
Statistic()
for pipe in model_pipeline_download:
pipe[1].send(global_model.copy())
# =================== Server process ====================
print(test_model.history_acc_benign)''' | [
"numpy.arange",
"torch.multiprocessing.Pipe",
"random.randint",
"matplotlib.pyplot.cla",
"numpy.linspace",
"matplotlib.pyplot.pause",
"copy.deepcopy",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.cm.jet",
"time.perf_counter",
"time.sleep",
"matplotlib.pyplot.ion",
"mat... | [((5875, 5890), 'random.randint', 'r.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (5884, 5890), True, 'import random as r\n'), ((6167, 6182), 'random.randint', 'r.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (6176, 6182), True, 'import random as r\n'), ((6456, 6489), 'helper_torch.set_defaults', 'helper_torch.set_defaults', (['params'], {}), '(params)\n', (6481, 6489), False, 'import helper_torch\n'), ((5914, 5936), 'numpy.arange', 'np.arange', (['(100)', '(200)', '(5)'], {}), '(100, 200, 5)\n', (5923, 5936), True, 'import numpy as np\n'), ((6207, 6229), 'numpy.arange', 'np.arange', (['(140)', '(190)', '(5)'], {}), '(140, 190, 5)\n', (6216, 6229), True, 'import numpy as np\n'), ((6651, 6700), 'networkarch_torch.koopman_net', 'net.koopman_net', (['params'], {'device': 'device', 'task': 'task'}), '(params, device=device, task=task)\n', (6666, 6700), True, 'import networkarch_torch as net\n'), ((7794, 7830), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {'fontsize': '(13)'}), "('Iteration', fontsize=13)\n", (7804, 7830), True, 'import matplotlib.pyplot as plt\n'), ((7835, 7870), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {'fontsize': '(13)'}), "('Accuracy', fontsize=13)\n", (7845, 7870), True, 'import matplotlib.pyplot as plt\n'), ((7966, 8041), 'matplotlib.pyplot.plot', 'plt.plot', (['test_model.history_acc_benign'], {'linestyle': '"""--"""', 'label': '"""acc_global"""'}), "(test_model.history_acc_benign, linestyle='--', label='acc_global')\n", (7974, 8041), True, 'import matplotlib.pyplot as plt\n'), ((8046, 8125), 'matplotlib.pyplot.plot', 'plt.plot', (['test_model.history_acc_poisoned'], {'linestyle': '"""--"""', 'label': '"""acc_poisoned"""'}), "(test_model.history_acc_poisoned, linestyle='--', label='acc_poisoned')\n", (8054, 8125), True, 'import matplotlib.pyplot as plt\n'), ((8130, 8142), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8140, 8142), True, 'import matplotlib.pyplot as plt\n'), ((8147, 8157), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8155, 8157), True, 'import matplotlib.pyplot as plt\n'), ((8928, 8969), 'numpy.linspace', 'np.linspace', (['start', 'stop', 'number_of_lines'], {}), '(start, stop, number_of_lines)\n', (8939, 8969), True, 'import numpy as np\n'), ((9641, 9678), 'networkarch_torch.koopman_net', 'net.koopman_net', (['params', 'device', 'task'], {}), '(params, device, task)\n', (9656, 9678), True, 'import networkarch_torch as net\n'), ((11168, 11177), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (11175, 11177), True, 'import matplotlib.pyplot as plt\n'), ((5804, 5821), 'random.randint', 'r.randint', (['(13)', '(14)'], {}), '(13, 14)\n', (5813, 5821), True, 'import random as r\n'), ((5852, 5868), 'random.randint', 'r.randint', (['(7)', '(10)'], {}), '(7, 10)\n', (5861, 5868), True, 'import random as r\n'), ((6048, 6068), 'numpy.arange', 'np.arange', (['(30)', '(90)', '(5)'], {}), '(30, 90, 5)\n', (6057, 6068), True, 'import numpy as np\n'), ((6344, 6364), 'numpy.arange', 'np.arange', (['(10)', '(55)', '(5)'], {}), '(10, 55, 5)\n', (6353, 6364), True, 'import numpy as np\n'), ((7914, 7959), 'matplotlib.pyplot.plot', 'plt.plot', (['local_loss_list[l]'], {'color': 'colors[l]'}), '(local_loss_list[l], color=colors[l])\n', (7922, 7959), True, 'import matplotlib.pyplot as plt\n'), ((8189, 8204), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (8198, 8204), True, 'import matplotlib.pyplot as plt\n'), ((8213, 8222), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (8220, 8222), True, 'import matplotlib.pyplot as plt\n'), ((8241, 8251), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (8249, 8251), True, 'import matplotlib.pyplot as plt\n'), ((8260, 8270), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8268, 8270), True, 'import matplotlib.pyplot as plt\n'), ((8984, 8993), 'matplotlib.cm.jet', 'cm.jet', (['x'], {}), '(x)\n', (8990, 8993), False, 'from matplotlib import cm\n'), ((11543, 11562), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11560, 11562), False, 'import time\n'), ((6948, 6970), 'copy.deepcopy', 'copy.deepcopy', (['network'], {}), '(network)\n', (6961, 6970), False, 'import copy\n'), ((9987, 9999), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (9995, 9999), False, 'import sys\n'), ((10716, 10731), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (10726, 10731), False, 'import time\n'), ((10312, 10329), 'torch.multiprocessing.Pipe', 'processing.Pipe', ([], {}), '()\n', (10327, 10329), True, 'import torch.multiprocessing as processing\n'), ((10374, 10391), 'torch.multiprocessing.Pipe', 'processing.Pipe', ([], {}), '()\n', (10389, 10391), True, 'import torch.multiprocessing as processing\n'), ((10803, 10815), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (10811, 10815), False, 'import sys\n'), ((11114, 11126), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (11122, 11126), False, 'import sys\n')] |
import tensorflow as tf
import numpy as np
import scipy as sc
import xarray as xr
import ecubevis as ecv
from . import POSTUPSAMPLING_METHODS
from .utils import crop_array, resize_array, checkarray_ndim
def create_pair_hr_lr(
array,
array_lr,
upsampling,
scale,
patch_size,
static_vars=None,
predictors=None,
season=None,
debug=False,
interpolation='inter_area'):
"""
Create a pair of HR and LR square sub-patches. In this case, the LR
corresponds to a coarsen version of the HR reference with land-ocean mask,
topography and auxiliary predictors added as "image channels".
Parameters
----------
array : np.ndarray
HR gridded data.
array_lr : np.ndarray
LR gridded data. If not provided, then implicit/coarsened pairs are
created from ``array``.
upsampling : str
String with the name of the upsampling method.
scale : int
Scaling factor.
patch_size : int or None
Size of the square patches to be extracted, in pixels for the HR grid.
static_vars : None or list of 2D ndarrays, optional
Static variables such as elevation data or a binary land-ocean mask.
predictors : np.ndarray, optional
Predictor variables in HR. To be concatenated to the LR version of
`array`.
interpolation : str, optional
Interpolation used when upsampling/downsampling the training samples.
By default 'bicubic'.
debug : bool, optional
If True, plots and debugging information are shown.
"""
def preproc_static_vars(var):
if patch_size is not None:
var_hr = crop_array(np.squeeze(var), patch_size, yx=(crop_y, crop_x))
var_hr = checkarray_ndim(var_hr, 3, -1)
if upsampling in POSTUPSAMPLING_METHODS:
var_lr = resize_array(var_hr, (patch_size_lr, patch_size_lr), interpolation)
else:
var_lr = var_hr
else:
var_hr = checkarray_ndim(var, 3, -1)
if upsampling in POSTUPSAMPLING_METHODS:
var_lr = resize_array(var, (lr_x, lr_y), interpolation)
else:
var_lr = var_hr
var_lr = checkarray_ndim(var_lr, 3, -1)
return var_hr, var_lr
# --------------------------------------------------------------------------
hr_array = array
if array_lr is not None:
lr_array = array_lr
lr_is_given = True
else:
lr_is_given = False
if hr_array.ndim == 4:
is_spatiotemp = True
hr_y = hr_array.shape[1]
hr_x = hr_array.shape[2]
elif hr_array.ndim == 3:
is_spatiotemp = False
hr_y = hr_array.shape[0]
hr_x = hr_array.shape[1]
# --------------------------------------------------------------------------
# Cropping/resizing the arrays
if upsampling == 'pin':
if lr_is_given:
if is_spatiotemp:
lr_y = array_lr.shape[1]
lr_x = array_lr.shape[2]
else:
lr_y = array_lr.shape[0]
lr_x = array_lr.shape[1]
# lr grid is upsampled via interpolation
if is_spatiotemp:
lr_array = checkarray_ndim(lr_array, 4, -1)
lr_array_resized = resize_array(lr_array, (hr_x, hr_y), interpolation, squeezed=False)
else:
lr_x, lr_y = int(hr_x / scale), int(hr_y / scale)
# hr grid is downsampled and upsampled via interpolation
lr_array_resized = resize_array(hr_array, (lr_x, lr_y), interpolation, squeezed=False)
# coarsened grid is upsampled via interpolation
lr_array_resized = resize_array(lr_array_resized, (hr_x, hr_y), interpolation, squeezed=False)
if patch_size is not None:
# cropping both hr_array and lr_array (same sizes)
hr_array, crop_y, crop_x = crop_array(np.squeeze(hr_array), patch_size,
yx=None, position=True)
lr_array = crop_array(np.squeeze(lr_array_resized), patch_size, yx=(crop_y, crop_x))
else:
# no cropping
lr_array = lr_array_resized
if is_spatiotemp:
hr_array = checkarray_ndim(hr_array, 4, -1)
lr_array = checkarray_ndim(lr_array, 4, -1)
else:
hr_array = checkarray_ndim(hr_array, 3, -1)
lr_array = checkarray_ndim(lr_array, 3, -1)
if predictors is not None:
if predictors.shape[1] != lr_y or predictors.shape[2] != lr_x:
# we coarsen/interpolate the mid-res or high-res predictors
predictors = resize_array(predictors, (lr_x, lr_y), interpolation)
predictors = resize_array(predictors, (hr_x, hr_y), interpolation)
if patch_size is not None:
# cropping first the predictors
lr_array_predictors, crop_y, crop_x = crop_array(predictors, patch_size,
yx=(crop_y, crop_x), position=True)
else:
lr_array_predictors = predictors
# concatenating the predictors to the lr image
lr_array = np.concatenate([lr_array, lr_array_predictors], axis=-1)
elif upsampling in POSTUPSAMPLING_METHODS:
if patch_size is not None:
patch_size_lr = int(patch_size / scale)
if lr_is_given:
if is_spatiotemp:
lr_y = array_lr.shape[1]
lr_x = array_lr.shape[2]
else:
lr_y = array_lr.shape[0]
lr_x = array_lr.shape[1]
else:
lr_x, lr_y = int(hr_x / scale), int(hr_y / scale)
if predictors is not None:
if predictors.shape[1] != lr_y or predictors.shape[2] != lr_x:
# we coarsen/interpolate the mid-res or high-res predictors
lr_array_predictors = resize_array(predictors, (lr_x, lr_y), interpolation)
else:
lr_array_predictors = predictors
if patch_size is not None:
# cropping the lr predictors
lr_array_predictors, crop_y, crop_x = crop_array(lr_array_predictors, patch_size_lr,
yx=None, position=True)
crop_y_hr = int(crop_y * scale)
crop_x_hr = int(crop_x * scale)
# cropping the hr_array
hr_array = crop_array(np.squeeze(hr_array), patch_size, yx=(crop_y_hr, crop_x_hr))
if lr_is_given:
lr_array = crop_array(lr_array, patch_size_lr, yx=(crop_y, crop_x))
# downsampling the hr array to get lr_array when the lr array is not provided
if not lr_is_given:
lr_array = resize_array(hr_array, (lr_x, lr_y), interpolation, squeezed=False)
if is_spatiotemp:
hr_array = checkarray_ndim(hr_array, 4, -1)
lr_array = checkarray_ndim(lr_array, 4, -1)
else:
hr_array = checkarray_ndim(hr_array, 3, -1)
lr_array = checkarray_ndim(lr_array, 3, -1)
# concatenating the predictors to the lr grid
lr_array = np.concatenate([lr_array, lr_array_predictors], axis=-1)
else:
if patch_size is not None:
if lr_is_given:
# cropping the lr array
lr_array, crop_y, crop_x = crop_array(lr_array, patch_size_lr,
yx=None, position=True)
crop_y_hr = int(crop_y * scale)
crop_x_hr = int(crop_x * scale)
# cropping the hr_array
hr_array = crop_array(np.squeeze(hr_array), patch_size, yx=(crop_y_hr, crop_x_hr))
else:
# cropping the hr array
hr_array, crop_y, crop_x = crop_array(hr_array, patch_size, yx=None, position=True)
# downsampling the hr array to get lr_array
lr_array = resize_array(hr_array, (patch_size_lr, patch_size_lr), interpolation)
else:
if not lr_is_given:
# downsampling the hr array to get lr_array
lr_array = resize_array(hr_array, (lr_x, lr_y), interpolation)
hr_array = checkarray_ndim(hr_array, 3, -1)
lr_array = checkarray_ndim(lr_array, 3, -1)
# --------------------------------------------------------------------------
# Including the static variables and season
static_array_hr = []
if static_vars is not None:
for staticvar in static_vars:
staticvar_hr, staticvar_lr = preproc_static_vars(staticvar)
static_array_hr.append(staticvar_hr)
# for spatial samples, the static array is concatenated to the lr one
if not is_spatiotemp:
lr_array = np.concatenate([lr_array, staticvar_lr], axis=-1)
static_array_hr = np.concatenate(static_array_hr, axis=-1)
if season is not None:
if patch_size is not None:
season_array_hr = _get_season_array_(season, patch_size, patch_size)
static_array_hr = np.concatenate([static_array_hr, season_array_hr], axis=-1)
if upsampling in POSTUPSAMPLING_METHODS:
season_array_lr = _get_season_array_(season, patch_size_lr, patch_size_lr)
else:
season_array_lr = season_array_hr
lr_array = np.concatenate([lr_array, season_array_lr], axis=-1)
else:
season_array_hr = _get_season_array_(season, hr_y, hr_x)
static_array_hr = np.concatenate([static_array_hr, season_array_hr], axis=-1)
if upsampling in POSTUPSAMPLING_METHODS:
season_array_lr = _get_season_array_(season, lr_y, lr_x)
else:
season_array_lr = season_array_hr
# for spatial samples, the season array is concatenated to the lr
if not is_spatiotemp:
lr_array = np.concatenate([lr_array, season_array_lr], axis=-1)
else:
season_array_lr = None
hr_array = np.asarray(hr_array, 'float32')
lr_array = np.asarray(lr_array, 'float32')
if static_vars is not None or season_array_lr is not None:
static_array_hr = np.asanyarray(static_array_hr, 'float32')
else:
static_array_hr = None
if debug:
if is_spatiotemp:
print(f'HR array: {hr_array.shape}, LR array: {lr_array.shape}, Auxiliary array: {season_array_hr.shape}')
if patch_size is not None:
print(f'Crop X,Y: {crop_x}, {crop_y}')
ecv.plot_ndarray(np.squeeze(hr_array), dpi=100, interactive=False, plot_title=('HR array'))
for i in range(lr_array.shape[-1]):
ecv.plot_ndarray(np.squeeze(lr_array[:,:,:,i]), dpi=100, interactive=False,
plot_title=(f'LR array, variable {i+1}'))
if static_array_hr is not None:
ecv.plot_ndarray(tuple(np.moveaxis(static_array_hr, -1, 0)), interactive=False,
dpi=100,plot_title='Auxiliary array HR')
else:
if static_array_hr is not None:
print(f'HR array: {hr_array.shape}, LR array {lr_array.shape}, Auxiliary array HR {static_array_hr.shape}')
else:
print(f'HR array: {hr_array.shape}, LR array {lr_array.shape}')
if patch_size is not None:
print(f'Crop X,Y: {crop_x}, {crop_y}')
ecv.plot_ndarray(np.squeeze(hr_array), dpi=100, interactive=False,
subplot_titles='HR array')
ecv.plot_ndarray(np.moveaxis(np.squeeze(lr_array), -1, 0), dpi=100, interactive=False,
plot_title='LR array')
if static_vars is not None or season is not None:
ecv.plot_ndarray(np.moveaxis(static_array_hr, -1, 0), interactive=False, dpi=100,
plot_title='HR auxiliary array')
if predictors is not None:
ecv.plot_ndarray(np.rollaxis(lr_array_predictors, 2, 0), dpi=100, interactive=False,
plot_title='LR predictors')
if static_vars is not None or season is not None:
return hr_array, lr_array, static_array_hr
else:
return hr_array, lr_array
def create_batch_hr_lr(
all_indices,
index,
array,
array_lr,
upsampling,
scale=4,
batch_size=32,
patch_size=None,
time_window=None,
static_vars=None,
predictors=None,
interpolation='inter_area',
time_metadata=None
):
"""Create a batch of HR/LR samples.
"""
# take a batch of indices (`batch_size` indices randomized temporally)
batch_rand_idx = all_indices[index * batch_size : (index + 1) * batch_size]
batch_hr = []
batch_lr = []
batch_aux_hr = []
# looping to create a batch of samples
for i in batch_rand_idx:
# spatial samples
if time_window is None:
data_i = array[i]
data_lr_i = None if array_lr is None else array_lr[i]
predictors_i = None if predictors is None else predictors[i]
season_i = _get_season_(time_metadata[i], time_window) if time_metadata is not None else None
# spatio-temporal samples
else:
data_i = array[i:i+time_window]
data_lr_i = None if array_lr is None else array_lr[i:i+time_window]
predictors_i = None if predictors is None else predictors[i:i+time_window]
season_i = _get_season_(time_metadata[i:i+time_window], time_window) if time_metadata is not None else None
res = create_pair_hr_lr(
array=data_i,
array_lr=data_lr_i,
upsampling=upsampling,
scale=scale,
patch_size=patch_size,
static_vars=static_vars,
season=season_i,
interpolation=interpolation,
predictors=predictors_i)
if static_vars is not None or season_i is not None:
hr_array, lr_array, static_array_hr = res
batch_aux_hr.append(static_array_hr)
else:
hr_array, lr_array = res
batch_lr.append(lr_array)
batch_hr.append(hr_array)
batch_lr = np.asarray(batch_lr)
batch_hr = np.asarray(batch_hr)
if static_vars is not None or season_i is not None:
batch_aux_hr = np.asarray(batch_aux_hr)
return [batch_lr, batch_aux_hr], [batch_hr]
else:
return [batch_lr], [batch_hr]
class DataGenerator(tf.keras.utils.Sequence):
"""
DataGenerator creates batches of paired training samples according to the
upsampling and other parameters. This class is used within the
``dl4ds.SupervisedTrainer``.
A sequence structure guarantees that the network will only train once on
each sample per epoch which is not the case with generators.
Every Sequence must implement the __getitem__ and the __len__ methods. If
you want to modify your dataset between epochs you may implement
on_epoch_end. The method __getitem__ should return a complete batch.
"""
def __init__(
self,
array,
array_lr,
backbone,
upsampling,
scale,
batch_size=32,
patch_size=None,
time_window=None,
static_vars=None,
predictors=None,
interpolation='inter_area',
repeat=None
):
"""
Parameters
----------
array : np.ndarray
HR gridded data.
array_lr : np.ndarray
LR gridded data. If not provided, then implicit/coarsened pairs are
created from ``array``.
backbone : str
String with the name of the backbone block.
upsampling : str
String with the name of the upsampling method.
scale : int
Scaling factor.
batch_size : int, optional
How many samples are included in each batch.
patch_size : int or None
Size of the square patches to be extracted, in pixels for the HR grid.
time_window : int or None, optional
If not None, then each sample will have a temporal dimension
(``time_window`` slices to the past are grabbed for the LR array).
static_vars : None or list of 2D ndarrays, optional
Static variables such as elevation data or a binary land-ocean mask.
predictors : list of ndarray
List of predictor ndarrays.
interpolation : str, optional
Interpolation used when upsampling/downsampling the training samples.
repeat : int or None, optional
Factor to repeat the samples in ``array``. Useful when ``patch_size``
is not None.
TO-DO
-----
* instead of the in-memory array, we could input the path and load the
netcdf files lazily or memmap a numpy array
"""
if isinstance(array, xr.DataArray):
# self.time_metadata = array.time.copy() # grabbing time metadata
self.time_metadata = None
self.array = array.values
elif isinstance(array, np.ndarray):
self.array = array
self.time_metadata = None
if isinstance(array_lr, xr.DataArray):
self.array_lr = array_lr.values
else:
self.array_lr = array_lr
self.batch_size = batch_size
self.scale = scale
self.upsampling = upsampling
self.backbone = backbone
self.patch_size = patch_size
self.time_window = time_window
self.static_vars = static_vars
if self.static_vars is not None:
for i in range(len(self.static_vars)):
if isinstance(self.static_vars[i], xr.DataArray):
self.static_vars[i] = self.static_vars[i].values
self.predictors = predictors
# concatenating list of ndarray variables along the last dimension
if self.predictors is not None:
self.predictors = np.concatenate(self.predictors, axis=-1)
self.interpolation = interpolation
self.repeat = repeat
# shuffling the order of the available indices (n samples)
if self.time_window is not None:
self.n = self.array.shape[0] - self.time_window
else:
self.n = self.array.shape[0]
self.indices = np.random.permutation(np.arange(self.n))
if self.repeat is not None and isinstance(self.repeat, int):
self.indices = np.hstack([self.indices for i in range(self.repeat)])
if patch_size is not None:
if self.upsampling in POSTUPSAMPLING_METHODS:
if not self.patch_size % self.scale == 0:
raise ValueError('`patch_size` must be divisible by `scale`')
def __len__(self):
"""
Defines the number of batches the DataGenerator can produce per epoch.
A common practice is to set this value to n_samples / batch_size so that
the model sees the training samples at most once per epoch.
"""
n_batches = self.n // self.batch_size
if self.repeat:
return n_batches * self.repeat
else:
return n_batches
def __getitem__(self, index):
"""
Generate one batch of data as (X, y) value pairs where X represents the
input and y represents the output.
"""
res = create_batch_hr_lr(
self.indices,
index,
self.array,
self.array_lr,
upsampling=self.upsampling,
scale=self.scale,
batch_size=self.batch_size,
patch_size=self.patch_size,
time_window=self.time_window,
static_vars=self.static_vars,
predictors=self.predictors,
interpolation=self.interpolation,
time_metadata=self.time_metadata)
return res
def _get_season_(time_metadata, time_window):
""" Get the season for a given sample.
"""
if time_window is None:
month_int = time_metadata.dt.month.values
else:
month_int = sc.stats.mode(time_metadata.time.dt.month.values)
month_int = int(month_int.count)
if month_int in [12, 1, 2]:
season = 'winter'
elif month_int in [3, 4, 5]:
season = 'spring'
elif month_int in [6, 7, 8]:
season = 'summer'
elif month_int in [9, 10, 11]:
season = 'autumn'
return season
def _get_season_array_(season, sizey, sizex):
""" Produce a multichannel array encoding the season.
"""
if season not in ['winter', 'spring', 'summer', 'autumn']:
raise ValueError('``season`` not recognized')
season_array = np.zeros((sizey, sizex, 4))
if season == 'winter':
season_array[:,:,0] += 1
elif season == 'spring':
season_array[:,:,1] += 1
elif season == 'summer':
season_array[:,:,2] += 1
elif season == 'autumn':
season_array[:,:,3] += 1
return season_array | [
"numpy.moveaxis",
"scipy.stats.mode",
"numpy.asarray",
"numpy.asanyarray",
"numpy.zeros",
"numpy.arange",
"numpy.rollaxis",
"numpy.squeeze",
"numpy.concatenate"
] | [((10472, 10503), 'numpy.asarray', 'np.asarray', (['hr_array', '"""float32"""'], {}), "(hr_array, 'float32')\n", (10482, 10503), True, 'import numpy as np\n'), ((10519, 10550), 'numpy.asarray', 'np.asarray', (['lr_array', '"""float32"""'], {}), "(lr_array, 'float32')\n", (10529, 10550), True, 'import numpy as np\n'), ((14778, 14798), 'numpy.asarray', 'np.asarray', (['batch_lr'], {}), '(batch_lr)\n', (14788, 14798), True, 'import numpy as np\n'), ((14814, 14834), 'numpy.asarray', 'np.asarray', (['batch_hr'], {}), '(batch_hr)\n', (14824, 14834), True, 'import numpy as np\n'), ((21375, 21402), 'numpy.zeros', 'np.zeros', (['(sizey, sizex, 4)'], {}), '((sizey, sizex, 4))\n', (21383, 21402), True, 'import numpy as np\n'), ((9288, 9328), 'numpy.concatenate', 'np.concatenate', (['static_array_hr'], {'axis': '(-1)'}), '(static_array_hr, axis=-1)\n', (9302, 9328), True, 'import numpy as np\n'), ((10640, 10681), 'numpy.asanyarray', 'np.asanyarray', (['static_array_hr', '"""float32"""'], {}), "(static_array_hr, 'float32')\n", (10653, 10681), True, 'import numpy as np\n'), ((14915, 14939), 'numpy.asarray', 'np.asarray', (['batch_aux_hr'], {}), '(batch_aux_hr)\n', (14925, 14939), True, 'import numpy as np\n'), ((20776, 20825), 'scipy.stats.mode', 'sc.stats.mode', (['time_metadata.time.dt.month.values'], {}), '(time_metadata.time.dt.month.values)\n', (20789, 20825), True, 'import scipy as sc\n'), ((5355, 5411), 'numpy.concatenate', 'np.concatenate', (['[lr_array, lr_array_predictors]'], {'axis': '(-1)'}), '([lr_array, lr_array_predictors], axis=-1)\n', (5369, 5411), True, 'import numpy as np\n'), ((9504, 9563), 'numpy.concatenate', 'np.concatenate', (['[static_array_hr, season_array_hr]'], {'axis': '(-1)'}), '([static_array_hr, season_array_hr], axis=-1)\n', (9518, 9563), True, 'import numpy as np\n'), ((9800, 9852), 'numpy.concatenate', 'np.concatenate', (['[lr_array, season_array_lr]'], {'axis': '(-1)'}), '([lr_array, season_array_lr], axis=-1)\n', (9814, 9852), True, 'import numpy as np\n'), ((9967, 10026), 'numpy.concatenate', 'np.concatenate', (['[static_array_hr, season_array_hr]'], {'axis': '(-1)'}), '([static_array_hr, season_array_hr], axis=-1)\n', (9981, 10026), True, 'import numpy as np\n'), ((18626, 18666), 'numpy.concatenate', 'np.concatenate', (['self.predictors'], {'axis': '(-1)'}), '(self.predictors, axis=-1)\n', (18640, 18666), True, 'import numpy as np\n'), ((19016, 19033), 'numpy.arange', 'np.arange', (['self.n'], {}), '(self.n)\n', (19025, 19033), True, 'import numpy as np\n'), ((1686, 1701), 'numpy.squeeze', 'np.squeeze', (['var'], {}), '(var)\n', (1696, 1701), True, 'import numpy as np\n'), ((3999, 4019), 'numpy.squeeze', 'np.squeeze', (['hr_array'], {}), '(hr_array)\n', (4009, 4019), True, 'import numpy as np\n'), ((4142, 4170), 'numpy.squeeze', 'np.squeeze', (['lr_array_resized'], {}), '(lr_array_resized)\n', (4152, 4170), True, 'import numpy as np\n'), ((7462, 7518), 'numpy.concatenate', 'np.concatenate', (['[lr_array, lr_array_predictors]'], {'axis': '(-1)'}), '([lr_array, lr_array_predictors], axis=-1)\n', (7476, 7518), True, 'import numpy as np\n'), ((9207, 9256), 'numpy.concatenate', 'np.concatenate', (['[lr_array, staticvar_lr]'], {'axis': '(-1)'}), '([lr_array, staticvar_lr], axis=-1)\n', (9221, 9256), True, 'import numpy as np\n'), ((10362, 10414), 'numpy.concatenate', 'np.concatenate', (['[lr_array, season_array_lr]'], {'axis': '(-1)'}), '([lr_array, season_array_lr], axis=-1)\n', (10376, 10414), True, 'import numpy as np\n'), ((11016, 11036), 'numpy.squeeze', 'np.squeeze', (['hr_array'], {}), '(hr_array)\n', (11026, 11036), True, 'import numpy as np\n'), ((11950, 11970), 'numpy.squeeze', 'np.squeeze', (['hr_array'], {}), '(hr_array)\n', (11960, 11970), True, 'import numpy as np\n'), ((11172, 11204), 'numpy.squeeze', 'np.squeeze', (['lr_array[:, :, :, i]'], {}), '(lr_array[:, :, :, i])\n', (11182, 11204), True, 'import numpy as np\n'), ((12110, 12130), 'numpy.squeeze', 'np.squeeze', (['lr_array'], {}), '(lr_array)\n', (12120, 12130), True, 'import numpy as np\n'), ((12328, 12363), 'numpy.moveaxis', 'np.moveaxis', (['static_array_hr', '(-1)', '(0)'], {}), '(static_array_hr, -1, 0)\n', (12339, 12363), True, 'import numpy as np\n'), ((12532, 12570), 'numpy.rollaxis', 'np.rollaxis', (['lr_array_predictors', '(2)', '(0)'], {}), '(lr_array_predictors, 2, 0)\n', (12543, 12570), True, 'import numpy as np\n'), ((6670, 6690), 'numpy.squeeze', 'np.squeeze', (['hr_array'], {}), '(hr_array)\n', (6680, 6690), True, 'import numpy as np\n'), ((11402, 11437), 'numpy.moveaxis', 'np.moveaxis', (['static_array_hr', '(-1)', '(0)'], {}), '(static_array_hr, -1, 0)\n', (11413, 11437), True, 'import numpy as np\n'), ((8003, 8023), 'numpy.squeeze', 'np.squeeze', (['hr_array'], {}), '(hr_array)\n', (8013, 8023), True, 'import numpy as np\n')] |
import pytest
from numpy import array
from numpy import all
from numpy import isnan
from directional_clustering.plotters import mesh_to_vertices_xyz
from directional_clustering.plotters import trimesh_face_connect
from directional_clustering.plotters import lines_to_start_end_xyz
from directional_clustering.plotters import lines_xyz_to_tables
from directional_clustering.plotters import coord_start_end_none
from directional_clustering.plotters import lines_start_end_connect
from directional_clustering.plotters import vectors_dict_to_array
from directional_clustering.plotters import face_centroids
def test_mesh_to_vertices_xyz(trimesh_attr):
"""
Tests if each list of coordinates is ordered correctly.
"""
x, y, z = mesh_to_vertices_xyz(trimesh_attr)
check_x = [0.0, 1.0, 1.0]
check_y = [0.0, 0.0, 1.0]
check_z = [0.0, 0.0, 0.0]
assert all([[x==check_x], [y==check_y], [z==check_z]])
def test_trimesh_face_connect(trimesh_attr):
"""
Tests output of lists for each face.
"""
i, j, k = trimesh_face_connect(trimesh_attr)
assert all([[i==[0]], [j==[1]], [k==[2]]])
def test_lines_to_start_end_xyz(start, end):
"""
Tests the ordering and the type of the function's output.
"""
ln = (start, end)
sx, sy, sz, ex, ey, ez = lines_to_start_end_xyz([ln])
assert all([[sx==start[0]], [sy==start[1]], [sz==start[2]],
[ex==end[0]], [ey==end[1]], [ez==end[2]]])
def test_lines_xyz_to_tables(start, end):
"""
Tests the ordering of the output.
"""
tx, ty, tz = lines_xyz_to_tables([start[0]], [start[1]], [start[2]],
[end[0]], [end[1]], [end[2]])
assert all([start[0]==tx[0][0], end[0]==tx[0][1],
start[1]==ty[0][0], end[1]==ty[0][1],
start[2]==tz[0][0], end[2]==tz[0][1]])
def test_coord_start_end_none():
"""
Tests the ordering and the type of the function's output.
"""
nums_1st = [0.0, 1.0]
nums_2nd = [2.0, 3.0]
num_lines = 2
c = coord_start_end_none(nums_1st, nums_2nd, num_lines)
assert all([0.0==c[0], 2.0==c[1], isnan(c[2]),
1.0==c[3], 3.0==c[4], isnan(c[5])])
def test_lines_start_end_connect(start, end):
"""
Tests outputs when a list of one line is passed in.
"""
cx, cy, cz = lines_start_end_connect([start[0]], [start[1]], [start[2]],
[end[0]], [end[1]], [end[2]])
test_x = all([start[0]==cx[0], end[0]==cx[1], isnan(cx[2])])
test_y = all([start[1]==cy[0], end[1]==cy[1], isnan(cy[2])])
test_z = all([start[2]==cz[0], end[2]==cz[1], isnan(cz[2])])
assert all([test_x, test_y, test_z])
def test_vectors_dict_to_array():
"""
Test if the input of the function is type(VectorField()).
"""
with pytest.raises(TypeError):
vectors_dict_to_array([0, 1, 2], 1)
def test_face_centroids(quadmesh_no_attr):
"""
Tests function is returning the centroid of a face of a mesh.
"""
cx, cy, cz = face_centroids(quadmesh_no_attr)
assert all([[cx==0.5], [cy==0.5], [cz==0.0]])
| [
"directional_clustering.plotters.coord_start_end_none",
"directional_clustering.plotters.trimesh_face_connect",
"directional_clustering.plotters.face_centroids",
"numpy.isnan",
"directional_clustering.plotters.vectors_dict_to_array",
"pytest.raises",
"directional_clustering.plotters.mesh_to_vertices_xyz... | [((742, 776), 'directional_clustering.plotters.mesh_to_vertices_xyz', 'mesh_to_vertices_xyz', (['trimesh_attr'], {}), '(trimesh_attr)\n', (762, 776), False, 'from directional_clustering.plotters import mesh_to_vertices_xyz\n'), ((879, 932), 'numpy.all', 'all', (['[[x == check_x], [y == check_y], [z == check_z]]'], {}), '([[x == check_x], [y == check_y], [z == check_z]])\n', (882, 932), False, 'from numpy import all\n'), ((1045, 1079), 'directional_clustering.plotters.trimesh_face_connect', 'trimesh_face_connect', (['trimesh_attr'], {}), '(trimesh_attr)\n', (1065, 1079), False, 'from directional_clustering.plotters import trimesh_face_connect\n'), ((1092, 1133), 'numpy.all', 'all', (['[[i == [0]], [j == [1]], [k == [2]]]'], {}), '([[i == [0]], [j == [1]], [k == [2]]])\n', (1095, 1133), False, 'from numpy import all\n'), ((1304, 1332), 'directional_clustering.plotters.lines_to_start_end_xyz', 'lines_to_start_end_xyz', (['[ln]'], {}), '([ln])\n', (1326, 1332), False, 'from directional_clustering.plotters import lines_to_start_end_xyz\n'), ((1345, 1456), 'numpy.all', 'all', (['[[sx == start[0]], [sy == start[1]], [sz == start[2]], [ex == end[0]], [ey ==\n end[1]], [ez == end[2]]]'], {}), '([[sx == start[0]], [sy == start[1]], [sz == start[2]], [ex == end[0]],\n [ey == end[1]], [ez == end[2]]])\n', (1348, 1456), False, 'from numpy import all\n'), ((1572, 1661), 'directional_clustering.plotters.lines_xyz_to_tables', 'lines_xyz_to_tables', (['[start[0]]', '[start[1]]', '[start[2]]', '[end[0]]', '[end[1]]', '[end[2]]'], {}), '([start[0]], [start[1]], [start[2]], [end[0]], [end[1]],\n [end[2]])\n', (1591, 1661), False, 'from directional_clustering.plotters import lines_xyz_to_tables\n'), ((1678, 1813), 'numpy.all', 'all', (['[start[0] == tx[0][0], end[0] == tx[0][1], start[1] == ty[0][0], end[1] ==\n ty[0][1], start[2] == tz[0][0], end[2] == tz[0][1]]'], {}), '([start[0] == tx[0][0], end[0] == tx[0][1], start[1] == ty[0][0], end[1] ==\n ty[0][1], start[2] == tz[0][0], end[2] == tz[0][1]])\n', (1681, 1813), False, 'from numpy import all\n'), ((2021, 2072), 'directional_clustering.plotters.coord_start_end_none', 'coord_start_end_none', (['nums_1st', 'nums_2nd', 'num_lines'], {}), '(nums_1st, nums_2nd, num_lines)\n', (2041, 2072), False, 'from directional_clustering.plotters import coord_start_end_none\n'), ((2314, 2408), 'directional_clustering.plotters.lines_start_end_connect', 'lines_start_end_connect', (['[start[0]]', '[start[1]]', '[start[2]]', '[end[0]]', '[end[1]]', '[end[2]]'], {}), '([start[0]], [start[1]], [start[2]], [end[0]], [end[\n 1]], [end[2]])\n', (2337, 2408), False, 'from directional_clustering.plotters import lines_start_end_connect\n'), ((2640, 2669), 'numpy.all', 'all', (['[test_x, test_y, test_z]'], {}), '([test_x, test_y, test_z])\n', (2643, 2669), False, 'from numpy import all\n'), ((3007, 3039), 'directional_clustering.plotters.face_centroids', 'face_centroids', (['quadmesh_no_attr'], {}), '(quadmesh_no_attr)\n', (3021, 3039), False, 'from directional_clustering.plotters import face_centroids\n'), ((3052, 3096), 'numpy.all', 'all', (['[[cx == 0.5], [cy == 0.5], [cz == 0.0]]'], {}), '([[cx == 0.5], [cy == 0.5], [cz == 0.0]])\n', (3055, 3096), False, 'from numpy import all\n'), ((2793, 2817), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2806, 2817), False, 'import pytest\n'), ((2827, 2862), 'directional_clustering.plotters.vectors_dict_to_array', 'vectors_dict_to_array', (['[0, 1, 2]', '(1)'], {}), '([0, 1, 2], 1)\n', (2848, 2862), False, 'from directional_clustering.plotters import vectors_dict_to_array\n'), ((2112, 2123), 'numpy.isnan', 'isnan', (['c[2]'], {}), '(c[2])\n', (2117, 2123), False, 'from numpy import isnan\n'), ((2163, 2174), 'numpy.isnan', 'isnan', (['c[5]'], {}), '(c[5])\n', (2168, 2174), False, 'from numpy import isnan\n'), ((2483, 2495), 'numpy.isnan', 'isnan', (['cx[2]'], {}), '(cx[2])\n', (2488, 2495), False, 'from numpy import isnan\n'), ((2548, 2560), 'numpy.isnan', 'isnan', (['cy[2]'], {}), '(cy[2])\n', (2553, 2560), False, 'from numpy import isnan\n'), ((2613, 2625), 'numpy.isnan', 'isnan', (['cz[2]'], {}), '(cz[2])\n', (2618, 2625), False, 'from numpy import isnan\n')] |
# implemenation of the compute methods for category
import numpy as np
import random
import time
import os.path
from os import path
import matplotlib.pyplot as plt
import scipy.interpolate
from nodeeditor.say import *
import nodeeditor.store as store
import nodeeditor.pfwrap as pfwrap
print ("reloaded: "+ __file__)
from nodeeditor.cointools import *
def run_FreeCAD_VectorArray(self,*args, **kwargs):
countA=self.getData("countA")
countB=self.getData("countB")
countC=self.getData("countC")
vO=self.getData("vecBase")
vA=self.getData("vecA")
vB=self.getData("vecB")
vC=self.getData("vecC")
rx=self.getData("randomX")
ry=self.getData("randomY")
rz=self.getData("randomZ")
degA=self.getData("degreeA")
degB=self.getData("degreeB")
if countA<degA+1:
degA=countA-1
if countB<degB+1:
degB=countB-1
points=[vO+vA*a+vB*b+vC*c+FreeCAD.Vector((0.5-random.random())*rx,(0.5-random.random())*ry,(0.5-random.random())*rz)
for a in range(countA) for b in range(countB) for c in range(countC)]
if countC != 1:
sayexc("not implemented")
return
if degA==0 or degB==0:
col = []
poles=np.array(points).reshape(countA,countB,3)
for ps in poles:
ps=[FreeCAD.Vector(p) for p in ps]
col += [Part.makePolygon(ps)]
for ps in poles.swapaxes(0,1):
ps=[FreeCAD.Vector(p) for p in ps]
col += [Part.makePolygon(ps)]
shape=Part.makeCompound(col)
else:
poles=np.array(points).reshape(countA,countB,3)
multA=[degA+1]+[1]*(countA-1-degA)+[degA+1]
multB=[degB+1]+[1]*(countB-1-degB)+[degB+1]
knotA=range(len(multA))
knotB=range(len(multB))
sf=Part.BSplineSurface()
sf.buildFromPolesMultsKnots(poles,multA,multB,knotA,knotB,False,False,degA,degB)
shape=sf.toShape()
self.setData('vectors_out',poles.tolist())
#cc=self.getObject()
#try:
# cc.Label=self.objname.getData()
#except:
# pass
#cc.Shape=shape
self.setPinObject('Shape_out',shape)
#Beispiel Nodename setzen
#self.setNodename("HUHU")
# Fehler setzen
#self.setError("raise Exception")
| [
"random.random",
"numpy.array"
] | [((1211, 1227), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1219, 1227), True, 'import numpy as np\n'), ((1560, 1576), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1568, 1576), True, 'import numpy as np\n'), ((933, 948), 'random.random', 'random.random', ([], {}), '()\n', (946, 948), False, 'import random\n'), ((958, 973), 'random.random', 'random.random', ([], {}), '()\n', (971, 973), False, 'import random\n'), ((983, 998), 'random.random', 'random.random', ([], {}), '()\n', (996, 998), False, 'import random\n')] |
import tqdm
import torch
import numpy as np
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.nn.utils import clip_grad_norm_
from .delayed import DelayedKeyboardInterrupt
from .utils import state_dict_to_cpu, TermsHistory
def fit_one_epoch(model, objective, feed, optim, grad_clip=0., callback=None):
"""Run one SGD pass over the data feed for the objective and the model
using the optimizer, and optionally clipping the gradients.
Parameters
----------
model : torch.nn.Module
The updated instance of the model to fit.
objective : BaseObjective
The objective function to optimize, aka regularized loss.
feed : batch iterator
The batch generator to use for SGD in each epoch.
optim : torch.optim.optimizer.Optimizer
The optimizer to use during SGD loop. Supposed to be initialized with
the provided `model`.
grad_clip : float, default=0.
The radius of the L2 ball to clip grads to. Disabled if 0.
callback : callable, default=None
Called after each SGD step, this function receives the loss components,
including the norm of the gradient, if `grad_clip` is positive.
Returns
-------
losses : list of float
The list of the running loss values.
Details
-------
Updates the internal states of the model and optimizer.
"""
model.train() # make sure model is in `train` mode
losses, grad_norm = [], np.nan
for data, target in feed:
with DelayedKeyboardInterrupt("delay"): # fire after SGD step
optim.zero_grad() # clear anywhere before `.backward`
# Compute the composite objective, creating the autograd graph
loss = objective(model, data, target)
loss.backward()
if grad_clip > 0:
grad_norm = clip_grad_norm_(model.parameters(), grad_clip)
# https://pytorch.org/docs/stable/optim.html#optimizer-step-closure
optim.step()
losses.append(float(loss))
if callable(callback):
# track zero-th parameter group's learning rate
lrs = [group.get("lr", np.nan) for group in optim.param_groups]
callback({"lr": lrs[0], "|g|": grad_norm,
**objective.component_values_})
# abort on nan -- no need to waste compute
if np.isnan(losses[-1]):
raise FloatingPointError
return losses
def fit(model, objective, feed, optim, *, sched=None, early=None,
n_epochs=100, grad_clip=0., verbose=True):
"""Fit a model to the objective on the data feed for specified number of
epochs with optimizer, lr-schedule and gradient clipping.
Parameters
----------
model : torch.nn.Module
The updated instance of the model to fit.
objective : BaseObjective
The objective function to optimize, aka regularized loss.
feed : batch iterator
The batch generator to use for SGD in each epoch.
optim : torch.optim.optimizer.Optimizer
The optimizer to use during SGD loop. Supposed to be initialized with
the provided `model`.
sched : torch.optim.ls_scheduler.*, default=None
The learning rate schedule to use after each epoch. Expected to be
already initialized to the provided `optim`.
early : EarlyStopping callback, default=None
An object implementing the early fit termination mechanics, based on
the performance on a held out dataset.
n_epoch : int, default=100
The number of passes over the provided `feed`.
grad_clip : float, default=0.
The radius of the L2 ball to clip grads to. Disabled if 0.
verbose : bool, default=True
Whether to print a progress bar with current learning information.
Returns
-------
model : torch.nn.Module
The updated instance of the model.
emergency : bool
Boolean indicating if either a keyboard interrupt took place, a NAN
was encountered during fit, or the loop was otherwise aborted.
history : dict
A dictionary to the tracked loss components, including the norm of the
gradient, if `grad_clip` is positive.
Details
-------
Forces the model in `train` mode before the nested SGD loop and forces it
into `eval` mode afterwards.
"""
model.train()
history, model_backup = TermsHistory(), {}
with tqdm.tqdm(range(n_epochs), disable=not verbose) as bar:
def history_append(values):
history.append(values)
if verbose:
# format the components of the loss objective
status = " ".join(f"{k} {v:.2e}" for k, v in values.items())
bar.set_postfix_str(status)
# try-catch for graceful return
try:
for epoch in bar:
# checkpointer and early stopper steps
if early is not None:
early.step(model, epoch)
model_backup = state_dict_to_cpu(model.state_dict())
epoch_loss = fit_one_epoch(model, objective, feed, optim,
grad_clip=grad_clip,
callback=history_append)
# scheduler step: the `.step` api isn't standardized
if sched is not None:
if isinstance(sched, ReduceLROnPlateau):
sched.step(np.mean(epoch_loss))
else:
sched.step()
except FloatingPointError as e: # thrown by fit_one_epoch
model.load_state_dict(model_backup)
emergency = e # encountered wild model instability
except KeyboardInterrupt as e:
emergency = e # user requested fit loop termination
except StopIteration as e: # thrown by early stopper
emergency = e # Early Stopping is a benign `emergency`
else: # no exception raised, no loop broken out of -- no emergency
emergency = None
# all exceptions, not handled explicitly, are critical faults
# and constitute severe emergencies.
model.eval()
return model, emergency, dict(history)
| [
"numpy.mean",
"numpy.isnan"
] | [((2388, 2408), 'numpy.isnan', 'np.isnan', (['losses[-1]'], {}), '(losses[-1])\n', (2396, 2408), True, 'import numpy as np\n'), ((5486, 5505), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (5493, 5505), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 23 11:12:25 2020
@author: <NAME>
"""
from kymatio.torch import Scattering2D
import imageio
import numpy as np
import glob
import os
import torch
import tqdm
os.environ["IMAGEIO_FFMPEG_EXE"] = "/usr/bin/ffmpeg"
global NBINS
NBINS = 20
def readvid_gr_features(filename, st, normalized=None):
'''
Loads video from a file, converts it to Grayscale and computes a sequences
of Scattering subband histogram vectors
Parameters
----------
filename : String,
Location of Dynamic Texture Video.
st : Kymatio Torch Module,
2D Scattering transform model.
normalized : Tuple, optional
Scattering parameters (J, L) if the Scattering coefficients are to be
normalized. The default is None.
Returns
-------
feature_sequence : Numpy Array,
Sequence of subband histogram features (N x S x B)
N=Video sequence length, S=# of Scattering channels
B=NBINS=# of Histogram bins.
'''
V = []
vid = imageio.get_reader(filename, 'ffmpeg')
for image in vid.iter_data():
arr = np.float32(np.asarray(image))/255.0
grarr = 0.299*arr[:, :, 0] + 0.587*arr[:, :, 1] + 0.114*arr[:, :, 2]
V.append(extract_hist_features(np.expand_dims(grarr, 0), st,
normalized=normalized))
vid.close()
feature_sequence = np.concatenate(V).reshape(len(V), -1, NBINS)
return feature_sequence
def extract_hist_features(imgs, st, normalized):
'''
Expects a feature vector of Scattering subband histograms from an image
tensor
Parameters
----------
imgs : Numpy Array,
Image array (N x C x H x W) or (C x H x W).
st : Kymatio Torch Module,
2D Scattering transform model.
normalized : Tuple, optional
Scattering parameters (J, L) if the Scattering coefficients are to be
normalized. Otherwise None.
Returns
-------
features : Numpy Array
Subband histogram features (N x C x S x B), S=# of Scattering channels
B=NBINS=# of Histogram bins.
'''
imgs = imgs.reshape(-1, imgs.shape[-3], imgs.shape[-2], imgs.shape[-1])
stimgs = st(torch.tensor(np.float32(imgs), device='cuda')).cpu().numpy()
nchannels = stimgs.shape[-3]
features = np.zeros((imgs.shape[0], imgs.shape[1], nchannels, NBINS))
for i in range(len(imgs)):
for j in range(imgs.shape[1]):
for l in range(nchannels):
if normalized is not None:
J, L = normalized
bns = np.arange(NBINS+1)/NBINS
if l == 0:
smpls = stimgs[i, j, l]
elif l < L*J+1:
smpls = stimgs[i, j, l]/np.abs(imgs[i, j]).mean()
else:
smpls = stimgs[i, j, l]/(stimgs[i, j, 2*(l-L*J-1)
// (L*(J-1))+1]+1e-16)
else:
if l == 0:
bns = np.arange(NBINS+1)/NBINS
else:
bns = np.arange(NBINS+1)/(NBINS*16)
smpls = stimgs[i, j, l]
h, _ = np.histogram(smpls, bns, range=(bns[0], bns[-1]))
h[-1] += (smpls > bns[-1]).sum()
features[i, j, l] = h/h.sum()
return features
st = Scattering2D(4, (288, 352), L=4).cuda()
for dt in ['alpha', 'beta', 'gamma']:
print('Processing split', dt)
features = []
labels = []
for i in tqdm.tqdm(range(len(glob.glob('data/dyntex_' + dt + '/*')))):
files = glob.glob('data/dyntex_' + dt + '/c' + str(i+1) + '_*/*.avi')
for f in files:
labels.append(i)
# Normalized Scattering Transform:
np.save(f[:-4] + '_nst.npy', readvid_gr_features(f, st, (4, 4)))
# Regular Scattering Transform
np.save(f[:-4] + '_st.npy', readvid_gr_features(f, st))
| [
"numpy.abs",
"kymatio.torch.Scattering2D",
"numpy.asarray",
"numpy.float32",
"numpy.zeros",
"numpy.expand_dims",
"numpy.histogram",
"numpy.arange",
"imageio.get_reader",
"glob.glob",
"numpy.concatenate"
] | [((1062, 1100), 'imageio.get_reader', 'imageio.get_reader', (['filename', '"""ffmpeg"""'], {}), "(filename, 'ffmpeg')\n", (1080, 1100), False, 'import imageio\n'), ((2354, 2412), 'numpy.zeros', 'np.zeros', (['(imgs.shape[0], imgs.shape[1], nchannels, NBINS)'], {}), '((imgs.shape[0], imgs.shape[1], nchannels, NBINS))\n', (2362, 2412), True, 'import numpy as np\n'), ((3455, 3487), 'kymatio.torch.Scattering2D', 'Scattering2D', (['(4)', '(288, 352)'], {'L': '(4)'}), '(4, (288, 352), L=4)\n', (3467, 3487), False, 'from kymatio.torch import Scattering2D\n'), ((1434, 1451), 'numpy.concatenate', 'np.concatenate', (['V'], {}), '(V)\n', (1448, 1451), True, 'import numpy as np\n'), ((1161, 1178), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1171, 1178), True, 'import numpy as np\n'), ((1302, 1326), 'numpy.expand_dims', 'np.expand_dims', (['grarr', '(0)'], {}), '(grarr, 0)\n', (1316, 1326), True, 'import numpy as np\n'), ((3283, 3332), 'numpy.histogram', 'np.histogram', (['smpls', 'bns'], {'range': '(bns[0], bns[-1])'}), '(smpls, bns, range=(bns[0], bns[-1]))\n', (3295, 3332), True, 'import numpy as np\n'), ((3635, 3672), 'glob.glob', 'glob.glob', (["('data/dyntex_' + dt + '/*')"], {}), "('data/dyntex_' + dt + '/*')\n", (3644, 3672), False, 'import glob\n'), ((2629, 2649), 'numpy.arange', 'np.arange', (['(NBINS + 1)'], {}), '(NBINS + 1)\n', (2638, 2649), True, 'import numpy as np\n'), ((2258, 2274), 'numpy.float32', 'np.float32', (['imgs'], {}), '(imgs)\n', (2268, 2274), True, 'import numpy as np\n'), ((3105, 3125), 'numpy.arange', 'np.arange', (['(NBINS + 1)'], {}), '(NBINS + 1)\n', (3114, 3125), True, 'import numpy as np\n'), ((3186, 3206), 'numpy.arange', 'np.arange', (['(NBINS + 1)'], {}), '(NBINS + 1)\n', (3195, 3206), True, 'import numpy as np\n'), ((2817, 2835), 'numpy.abs', 'np.abs', (['imgs[i, j]'], {}), '(imgs[i, j])\n', (2823, 2835), True, 'import numpy as np\n')] |
import argparse
import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import PIL.Image as Image
from matplotlib import pyplot as plt
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy import ndimage
from skimage.measure import label
from skimage.measure import regionprops
def adjustment(ImageName, Alpha=1.7, Beta=0):
# Alphafactor for contrast control
# Beta for brightness control
img = cv2.imread(ImageName, 0)
img = np.array(img, dtype=np.uint8)
img2 = cv2.convertScaleAbs(img, alpha=Alpha, beta=Beta)
kernel = np.ones((5, 5), np.uint8)
opening2 = cv2.morphologyEx(img2, cv2.MORPH_OPEN, kernel)
canny2 = cv2.Canny(opening2, 100, 150, 3, L2gradient=True)
return canny2, img2
def compare(major_axis, minor_axis):
""" This function gives the plot of major axis vs minor axis of the ellipse
and compares it with the circle.
Arguments:
major_axis = It is the long axis of the ellipse
minor_axis = It is the short axis of the ellipse"""
plt.clf()
p = np.array(range(100))
q = np.array(range(100))
plt.scatter(major_axis, minor_axis)
plt.plot(p, q)
plt.xlim(0, 80)
plt.ylim(0, 80)
plt.xlabel("Maximum")
plt.ylabel("Minimum")
plt.title("Plot of Minimum vs Maximum")
plt.legend(["Theoretical circle", "Predicted Circle"])
return
def get_circles(img, dp=3, minDist=20, para1=150, para2=50, minradius=0, maxradius=30):
""" The following functions takes in the gray scale image and returns the radius of the circle and the image.
Arguments:
image: Gray scale image input
dp: Inverse ratio of the accumulator resolution to the image resolution.
minDist: Minimum distance between the centers of the detected circles.
para1 : It is the higher threshold of the two passed to the Canny edge
para2 : It is the accumulator threshold for the circle centers at the detection stage.
minRadius : Minimum circle radius.
maxRadius : Maximum circle radius. """
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, dp, minDist,
param1=para1, param2=para2, minRadius=minradius, maxRadius=maxradius)
circles = np.uint16(np.around(circles))
circle_radii = circles[0][:, 2]
for i in circles[0, :]:
# draw the outer circle
cv2.circle(img, (i[0], i[1]), i[2], (0, 255, 0), 2)
# draw the center of the circle
cv2.circle(img, (i[0], i[1]), 2, (0, 0, 255), 3)
return (circle_radii, img)
def getConv(name, mag=0):
mag = getMag(name, mag)
conv = pd.DataFrame([[0, 0, 0, 0], [35000, 157, 2000, 12.7388535], [25000, 111, 2000, 18.01801802], [15000, 167, 5000, 29.94011976], [
12000, 133, 5000, 37.59398496], [10000, 111, 5000, 45.04504505], [6500, 15, 10000, 68.96551724]])
conv.columns = ['Mag', 'Pixels', 'Length [nm]', 'Conversion']
# Finds row that matches the magnification value
row = conv.loc[conv['Mag'] == mag]
convFactor = row.iloc[0]['Conversion'] # Gets conversion factor from row
print("Magnification Level: " + str(mag) + "x")
print("Conversion Factor [nm/pixel]: " + str(convFactor))
print("-----------------------------------------------------")
return convFactor
def get_ellipse(canny_image, mean_contour_Area):
"""This function returns the major and the minor axis of the ellipse.
Arguments:
canny_image : The image whose edges are delected
mean_countour_Area : The mean area of the contours found using the image segmentation """
th, threshed = cv2.threshold(canny_image, 120, 255, cv2.THRESH_BINARY)
threshed = cv2.dilate(threshed, None)
threshed = cv2.erode(threshed, None)
# Finding the countours of the image
cnts = cv2.findContours(threshed, cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_SIMPLE)[-2]
# Draws the Contours
cv2.drawContours(canny_image, cnts, -1, (157, 0, 78), 1, cv2.LINE_AA)
# Calculating the range of the area
Mean_Area = mean_contour_Area
Lower_Area = Mean_Area - 0.1 * Mean_Area
Higher_Area = Mean_Area + 0.1 * Mean_Area
elps = []
for cnt in cnts:
if cnt.size in range(100, 200) or Lower_Area <= (cv2.contourArea(cnt)) <= Higher_Area:
# Fitting the ellipse
Ellipse = cv2.fitEllipse(cnt)
# Adding rllipse to the list
elps.append(Ellipse)
cv2.ellipse(canny_image, Ellipse, (255, 0, 0), 2, cv2.LINE_AA)
# Getting the major and minor axis of the Ellipse
axes = [x[1] for x in elps]
major_axis = [y[1] for y in axes]
minor_axis = [z[0] for z in axes]
return(major_axis, minor_axis)
def getMag(name, mag=0):
img = cv2.imread(name) # Image to be analyzed
while True:
try:
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files (x86)\Tesseract-OCR\tesseract'
# Crops image to magn. details, increases reliability of OCR
magCrop = img[443:465, 168:240]
# Inverts colors, easier for OCR software to read
magCropInv = cv2.bitwise_not(magCrop)
mag = int(pytesseract.image_to_string(Image.fromarray(magCropInv))[
:-1]) # Image to text, removes 'x', makes integer
print("Using tesseract OCR...")
break
except:
try:
# Splits file name by underscores, stores in list
spl1 = name.split('_')
# Selects the last list entry (number.TIF) and splits by
# period, stores in list
spl2 = spl1[-1].split('.')
# Selects the first list entry (number) and converts to integer
mag = int(spl2[0])
print("Using file name...")
break
except:
#**********USER INPUT**********
print("Using user input...")
print(
"If you did not manually enter this magnification level, results will likely be wrong!")
mag = mag
break
return mag
def imageseg(Cont_Image):
"""imageseg('Image Name')
This program takes an image that has been pre-proccessed by an edge finding script as its sole input, segments it, and spits out a segmented image file and a pandas dataframe of individual particle positions.
This function works by creating a binary of an image that has been run through edge detection software, then finding the center of those particles through an Euclidean Distance function. This was chosen over the typical watershed iterative erosion method because of its increased control in finding the center of particles, allowing for greater detection of overlapped and small particles.
Methodology ideas pulled from the SciKit Image example pages (https://scikit-image.org) as well as the Open CV example pages (https://opencv.org) and <NAME>'s blog (https://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/)."""
proccessedImage = np.array(Cont_Image, dtype=np.uint8)
kernel = np.ones((5, 6), np.uint8)
opening = cv2.morphologyEx(Cont_Image, cv2.MORPH_OPEN, kernel)
canny = cv2.Canny(opening, 100, 150, 3, L2gradient=True)
ret, binary = cv2.threshold(
Cont_Image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
distTransform = ndimage.distance_transform_bf(binary)
localMax = peak_local_max(
distTransform, indices=False, min_distance=20, labels=binary)
label = ndimage.label(localMax)[0]
segments = watershed(-distTransform, label, mask=binary)
segment_locations = segmentparser(segments, binary)
return segments, segment_locations, opening, canny
def predict_shape(major_axis, minor_axis):
""" This function predicts whether the object detected is circle or ellispeIt is the short axis of the ellipse
It returns number of circles or the ellipse in the image
Arguments:
major_axis = It is the long axis of the ellipse
minor_axis = It is the short axis of the ellipse """
circular_particle = 0
ellipsoidal_particle = 0
for i in range(len(major_axis)):
x = 0.1 * major_axis[i] + major_axis[i]
y = major_axis[i] - 0.1 * major_axis[i]
if minor_axis[i] <= x and minor_axis[i] >= y:
circular_particle += 1
else:
ellipsoidal_particle += 1
return(circular_particle, ellipsoidal_particle)
def segmentparser(segmented_image, binary):
"""Takes an edge detected image and an image binary and returns a Pandas dataframe of the x-y coordinates and area of the image segments. Both the edge detected image and the binary of the image should be 1D image files of the same size.
Code courtesy <NAME>, pulled from L9 Image Proccessing Lecture.
Parameters:
-----------
Edge Detected Image: 2D array
Output of an edge detection program/canny edge detection algorithm.
Image Binary: 2D array
Bitmap/binary image (should only contain 1's and 0's.
segment_properties: Pandas dataframe, four columns
Example:
X Y Area
0 436.629412 436.629412 170.0
1 55.029162 55.029162 4835.0
2 662.983593 662.983593 1219.0
... ... ... ...
Code courtesy <NAME>, pulled from L9 Image Proccessing Lecture"""
props = regionprops(segmented_image, intensity_image=binary)
x = y = area = perimeter = intensity = np.zeros(len(props))
index_i = 0
for index_j in props:
x[index_i] = index_j.centroid[0]
y[index_i] = index_j.centroid[1]
area[index_i] = index_j.area
index_i = index_i + 1
segment_properties = pd.DataFrame({'X': x, 'Y': y, 'Area': area})
return segment_properties
| [
"matplotlib.pyplot.title",
"skimage.feature.peak_local_max",
"matplotlib.pyplot.clf",
"numpy.ones",
"numpy.around",
"cv2.ellipse",
"cv2.erode",
"skimage.measure.regionprops",
"pandas.DataFrame",
"cv2.contourArea",
"cv2.dilate",
"cv2.fitEllipse",
"cv2.convertScaleAbs",
"cv2.drawContours",
... | [((516, 540), 'cv2.imread', 'cv2.imread', (['ImageName', '(0)'], {}), '(ImageName, 0)\n', (526, 540), False, 'import cv2\n'), ((552, 581), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (560, 581), True, 'import numpy as np\n'), ((594, 642), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['img'], {'alpha': 'Alpha', 'beta': 'Beta'}), '(img, alpha=Alpha, beta=Beta)\n', (613, 642), False, 'import cv2\n'), ((657, 682), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (664, 682), True, 'import numpy as np\n'), ((699, 745), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img2', 'cv2.MORPH_OPEN', 'kernel'], {}), '(img2, cv2.MORPH_OPEN, kernel)\n', (715, 745), False, 'import cv2\n'), ((760, 809), 'cv2.Canny', 'cv2.Canny', (['opening2', '(100)', '(150)', '(3)'], {'L2gradient': '(True)'}), '(opening2, 100, 150, 3, L2gradient=True)\n', (769, 809), False, 'import cv2\n'), ((1127, 1136), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1134, 1136), True, 'from matplotlib import pyplot as plt\n'), ((1202, 1237), 'matplotlib.pyplot.scatter', 'plt.scatter', (['major_axis', 'minor_axis'], {}), '(major_axis, minor_axis)\n', (1213, 1237), True, 'from matplotlib import pyplot as plt\n'), ((1243, 1257), 'matplotlib.pyplot.plot', 'plt.plot', (['p', 'q'], {}), '(p, q)\n', (1251, 1257), True, 'from matplotlib import pyplot as plt\n'), ((1263, 1278), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(80)'], {}), '(0, 80)\n', (1271, 1278), True, 'from matplotlib import pyplot as plt\n'), ((1284, 1299), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(80)'], {}), '(0, 80)\n', (1292, 1299), True, 'from matplotlib import pyplot as plt\n'), ((1305, 1326), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Maximum"""'], {}), "('Maximum')\n", (1315, 1326), True, 'from matplotlib import pyplot as plt\n'), ((1332, 1353), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Minimum"""'], {}), "('Minimum')\n", (1342, 1353), True, 'from matplotlib import pyplot as plt\n'), ((1359, 1398), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot of Minimum vs Maximum"""'], {}), "('Plot of Minimum vs Maximum')\n", (1368, 1398), True, 'from matplotlib import pyplot as plt\n'), ((1404, 1458), 'matplotlib.pyplot.legend', 'plt.legend', (["['Theoretical circle', 'Predicted Circle']"], {}), "(['Theoretical circle', 'Predicted Circle'])\n", (1414, 1458), True, 'from matplotlib import pyplot as plt\n'), ((2151, 2280), 'cv2.HoughCircles', 'cv2.HoughCircles', (['img', 'cv2.HOUGH_GRADIENT', 'dp', 'minDist'], {'param1': 'para1', 'param2': 'para2', 'minRadius': 'minradius', 'maxRadius': 'maxradius'}), '(img, cv2.HOUGH_GRADIENT, dp, minDist, param1=para1, param2\n =para2, minRadius=minradius, maxRadius=maxradius)\n', (2167, 2280), False, 'import cv2\n'), ((2718, 2956), 'pandas.DataFrame', 'pd.DataFrame', (['[[0, 0, 0, 0], [35000, 157, 2000, 12.7388535], [25000, 111, 2000, \n 18.01801802], [15000, 167, 5000, 29.94011976], [12000, 133, 5000, \n 37.59398496], [10000, 111, 5000, 45.04504505], [6500, 15, 10000, \n 68.96551724]]'], {}), '([[0, 0, 0, 0], [35000, 157, 2000, 12.7388535], [25000, 111, \n 2000, 18.01801802], [15000, 167, 5000, 29.94011976], [12000, 133, 5000,\n 37.59398496], [10000, 111, 5000, 45.04504505], [6500, 15, 10000, \n 68.96551724]])\n', (2730, 2956), True, 'import pandas as pd\n'), ((3738, 3793), 'cv2.threshold', 'cv2.threshold', (['canny_image', '(120)', '(255)', 'cv2.THRESH_BINARY'], {}), '(canny_image, 120, 255, cv2.THRESH_BINARY)\n', (3751, 3793), False, 'import cv2\n'), ((3810, 3836), 'cv2.dilate', 'cv2.dilate', (['threshed', 'None'], {}), '(threshed, None)\n', (3820, 3836), False, 'import cv2\n'), ((3853, 3878), 'cv2.erode', 'cv2.erode', (['threshed', 'None'], {}), '(threshed, None)\n', (3862, 3878), False, 'import cv2\n'), ((4065, 4134), 'cv2.drawContours', 'cv2.drawContours', (['canny_image', 'cnts', '(-1)', '(157, 0, 78)', '(1)', 'cv2.LINE_AA'], {}), '(canny_image, cnts, -1, (157, 0, 78), 1, cv2.LINE_AA)\n', (4081, 4134), False, 'import cv2\n'), ((4926, 4942), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (4936, 4942), False, 'import cv2\n'), ((7336, 7372), 'numpy.array', 'np.array', (['Cont_Image'], {'dtype': 'np.uint8'}), '(Cont_Image, dtype=np.uint8)\n', (7344, 7372), True, 'import numpy as np\n'), ((7389, 7414), 'numpy.ones', 'np.ones', (['(5, 6)', 'np.uint8'], {}), '((5, 6), np.uint8)\n', (7396, 7414), True, 'import numpy as np\n'), ((7430, 7482), 'cv2.morphologyEx', 'cv2.morphologyEx', (['Cont_Image', 'cv2.MORPH_OPEN', 'kernel'], {}), '(Cont_Image, cv2.MORPH_OPEN, kernel)\n', (7446, 7482), False, 'import cv2\n'), ((7496, 7544), 'cv2.Canny', 'cv2.Canny', (['opening', '(100)', '(150)', '(3)'], {'L2gradient': '(True)'}), '(opening, 100, 150, 3, L2gradient=True)\n', (7505, 7544), False, 'import cv2\n'), ((7566, 7640), 'cv2.threshold', 'cv2.threshold', (['Cont_Image', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(Cont_Image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (7579, 7640), False, 'import cv2\n'), ((7672, 7709), 'scipy.ndimage.distance_transform_bf', 'ndimage.distance_transform_bf', (['binary'], {}), '(binary)\n', (7701, 7709), False, 'from scipy import ndimage\n'), ((7726, 7802), 'skimage.feature.peak_local_max', 'peak_local_max', (['distTransform'], {'indices': '(False)', 'min_distance': '(20)', 'labels': 'binary'}), '(distTransform, indices=False, min_distance=20, labels=binary)\n', (7740, 7802), False, 'from skimage.feature import peak_local_max\n'), ((7869, 7914), 'skimage.morphology.watershed', 'watershed', (['(-distTransform)', 'label'], {'mask': 'binary'}), '(-distTransform, label, mask=binary)\n', (7878, 7914), False, 'from skimage.morphology import watershed\n'), ((9834, 9886), 'skimage.measure.regionprops', 'regionprops', (['segmented_image'], {'intensity_image': 'binary'}), '(segmented_image, intensity_image=binary)\n', (9845, 9886), False, 'from skimage.measure import regionprops\n'), ((10183, 10227), 'pandas.DataFrame', 'pd.DataFrame', (["{'X': x, 'Y': y, 'Area': area}"], {}), "({'X': x, 'Y': y, 'Area': area})\n", (10195, 10227), True, 'import pandas as pd\n'), ((2333, 2351), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (2342, 2351), True, 'import numpy as np\n'), ((2461, 2512), 'cv2.circle', 'cv2.circle', (['img', '(i[0], i[1])', 'i[2]', '(0, 255, 0)', '(2)'], {}), '(img, (i[0], i[1]), i[2], (0, 255, 0), 2)\n', (2471, 2512), False, 'import cv2\n'), ((2563, 2611), 'cv2.circle', 'cv2.circle', (['img', '(i[0], i[1])', '(2)', '(0, 0, 255)', '(3)'], {}), '(img, (i[0], i[1]), 2, (0, 0, 255), 3)\n', (2573, 2611), False, 'import cv2\n'), ((3933, 4000), 'cv2.findContours', 'cv2.findContours', (['threshed', 'cv2.RETR_CCOMP', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(threshed, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n', (3949, 4000), False, 'import cv2\n'), ((7826, 7849), 'scipy.ndimage.label', 'ndimage.label', (['localMax'], {}), '(localMax)\n', (7839, 7849), False, 'from scipy import ndimage\n'), ((4495, 4514), 'cv2.fitEllipse', 'cv2.fitEllipse', (['cnt'], {}), '(cnt)\n', (4509, 4514), False, 'import cv2\n'), ((4604, 4666), 'cv2.ellipse', 'cv2.ellipse', (['canny_image', 'Ellipse', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(canny_image, Ellipse, (255, 0, 0), 2, cv2.LINE_AA)\n', (4615, 4666), False, 'import cv2\n'), ((5343, 5367), 'cv2.bitwise_not', 'cv2.bitwise_not', (['magCrop'], {}), '(magCrop)\n', (5358, 5367), False, 'import cv2\n'), ((4399, 4419), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (4414, 4419), False, 'import cv2\n'), ((5419, 5446), 'PIL.Image.fromarray', 'Image.fromarray', (['magCropInv'], {}), '(magCropInv)\n', (5434, 5446), True, 'import PIL.Image as Image\n')] |
import collections
import numpy as np
from nengo_loihi.block import Config
MAX_COMPARTMENT_CFGS = 32
MAX_VTH_CFGS = 8
MAX_SYNAPSE_CFGS = 8
class Board:
"""An entire Loihi Board, with multiple Chips"""
def __init__(self, board_id=1):
self.board_id = board_id
self.chips = []
self.chip_idxs = {}
self.synapse_index = {}
self.inputs = []
self.probes = []
# When using snips, this maps to a ProbeSnip instance
# When not using snips, this maps to an NxSDK probe
self.probe_map = {}
@property
def n_chips(self):
return len(self.chips)
@property
def n_cores_per_chip(self):
return [chip.n_cores for chip in self.chips]
@property
def n_synapses_per_core(self):
return [[core.n_synapses for core in chip.cores] for chip in self.chips]
def _add_chip(self, chip):
assert chip not in self.chips
self.chip_idxs[chip] = len(self.chips)
self.chips.append(chip)
def add_input(self, input):
self.inputs.append(input)
def new_chip(self):
chip = Chip(board=self)
self._add_chip(chip)
return chip
def chip_index(self, chip):
return self.chip_idxs[chip]
def index_synapse(self, synapse, chip, core, idxs):
chip_idx = self.chip_index(chip)
core_idx = chip.core_index(core)
self.synapse_index[synapse] = (chip_idx, core_idx, idxs)
def find_block(self, target_block):
for chip in self.chips:
for core in chip.cores:
if target_block not in core.blocks: # early skipping for speed
continue
for block, compartment_idxs, ax_range in core.iterate_blocks():
if target_block is block:
return (
self.chip_index(chip),
chip.core_index(core),
core.blocks.index(block),
compartment_idxs,
ax_range,
)
raise RuntimeError(
"Block is in core, but not found?!"
) # pragma: no cover
return (None, None, None, None, None) # pragma: no cover
def find_synapse(self, synapse):
return self.synapse_index[synapse]
class Chip:
"""A Loihi Chip on a Board, with multiple Cores."""
def __init__(self, board):
self.board = board
self.cores = []
self.core_idxs = {}
@property
def n_cores(self):
return len(self.cores)
def _add_core(self, core):
assert core not in self.cores
self.core_idxs[core] = len(self.cores)
self.cores.append(core)
def new_core(self):
core = Core(chip=self)
self._add_core(core)
return core
def core_index(self, core):
return self.core_idxs[core]
class Core:
"""A Loihi Core, implementing one or more Blocks."""
def __init__(self, chip):
self.chip = chip
self.blocks = []
self.compartment_cfgs = []
self.vth_cfgs = []
self.synapse_cfgs = [None] # keep index 0 unused
self.stdp_pre_cfgs = []
self.synapse_cfg_idxs = {} # one synfmt per Synapse, for now
# for each Synapse, provides a map from axon index to axon id
self.synapse_axons = collections.OrderedDict()
# for each Synapse, provides the indices occupied in the synapse weight table
self.synapse_entries = collections.OrderedDict()
self.learning_coreid = None
@property
def board(self):
return self.chip.board
@property
def synapses(self):
return list(self.synapse_axons)
@property
def n_synapses(self):
return sum(
synapse.size() for block in self.blocks for synapse in block.synapses
)
def iterate_blocks(self):
i0 = 0
a0 = 0
for block in self.blocks:
i1 = i0 + block.compartment.n_compartments
a1 = a0 + sum(ax.n_axons for ax in block.axons)
compartment_idxs = np.arange(i0, i1)
ax_range = (a0, a1)
yield block, compartment_idxs, ax_range
i0 = i1
a0 = a1
def iterate_synapses(self):
for block in self.blocks:
for synapse in block.synapses:
yield synapse
def add_block(self, block):
self.blocks.append(block)
def add_compartment_cfg(self, compartment_cfg):
self.compartment_cfgs.append(compartment_cfg)
return len(self.compartment_cfgs) - 1 # index
def add_vth_cfg(self, vth_cfg):
self.vth_cfgs.append(vth_cfg)
return len(self.vth_cfgs) - 1 # index
def add_stdp_pre_cfg(self, stdp_pre_cfg):
self.stdp_pre_cfgs.append(stdp_pre_cfg)
return len(self.stdp_pre_cfgs) - 1 # index
def add_synapse(self, synapse):
synapse_cfg_idx = self.get_synapse_cfg_idx(synapse.synapse_cfg)
self.synapse_cfg_idxs[synapse] = synapse_cfg_idx
# determine starting ID for this synapse's axons
id0 = 0
if len(self.synapse_axons) > 0:
last = next(reversed(self.synapse_axons))
id0 = self.synapse_axons[last][-1] + 1
# determine the ID for each synapse axon index
idxs_per_synapse = synapse.idxs_per_synapse()
i = id0
ids = []
for idx in range(synapse.n_axons):
base = synapse.axon_compartment_base(idx)
w, _ = synapse.axon_weights_indices(idx)
if base is None or w.size == 0:
# dummy axon, which we will not build
ids.append(None)
else:
ids.append(i)
i += idxs_per_synapse
self.synapse_axons[synapse] = ids
self.board.index_synapse(synapse, self.chip, self, ids)
# determine the indices in the synapse weight table that this synapse occupies
s0 = 0
if len(self.synapse_entries) > 0:
last = next(reversed(self.synapse_entries))
s0 = self.synapse_entries[last][1]
s1 = s0 + synapse.size()
self.synapse_entries[synapse] = (s0, s1)
def get_synapse_cfg(self, synapse):
return self.synapse_cfgs[self.synapse_cfg_idxs[synapse]]
def get_synapse_cfg_idx(self, synapse_cfg):
try:
return self.synapse_cfgs.index(synapse_cfg)
except ValueError:
self.synapse_cfgs.append(synapse_cfg)
return len(self.synapse_cfgs) - 1 # index
class LoihiSpikeInput:
"""Stores information needed to send spikes to the actual chip.
This acts as a bridge between a SpikeInput and the actual chip.
It maps positions in the spike input to actual locations on the chip.
Attributes
----------
axon_map : {int: ndarray(dtype=spike_dtype)}
Map from axon indices in the SpikeInput to LoihiAxons targeting
particular locations on the chip.
Notes
-----
spike_dtype is a numpy datatype to represent a Loihi axon or spike.
It represents the following information.
t : np.int32
The timestep at which to send the spike to the chip (unused for axons)
axon_type : np.int32
The population type of axon. discrete = 0, pop16 = 16, pop32 = 32.
chip_id : np.int32
The actual ID of the target chip on the board.
core_id : np.int32
The actual ID of the target core on the board.
axon_id : np.int32
The actual ID of the target axon on the board.
atom : np.int32
The population index (atom), used if this axon sends population spikes
(i.e. axon_type != 0).
atom_bits_extra : np.int32
The number of extra bits used for the atom (pop16 axons only).
"""
spike_dtype = np.dtype(
[
("t", np.int32),
("axon_type", np.int32),
("chip_id", np.int32),
("core_id", np.int32),
("axon_id", np.int32),
("atom", np.int32),
("atom_bits_extra", np.int32),
]
)
@classmethod
def add_spikes_to_generator(cls, t, spikes, basic_spike_generator):
methods = {
0: basic_spike_generator.addSpike,
16: basic_spike_generator.addPop16Spike,
32: basic_spike_generator.addPop32Spike,
}
for spike in spikes:
axon_type = int(spike["axon_type"])
kwargs = dict(
time=t,
chipId=spike["chip_id"].item(),
coreId=spike["core_id"].item(),
axonId=spike["axon_id"].item(),
)
if axon_type == 0:
assert spike["atom"] == 0, "Atom must be zero for discrete spikes"
else:
kwargs["srcAtom"] = spike["atom"]
if axon_type == 16:
kwargs["atomBits"] = spike["atom_bits_extra"]
methods[axon_type](**kwargs)
def __init__(self):
self.axon_map = {} # maps spike_input idx to axon in self.axons
def set_axons(self, board, nxsdk_board, spike_input):
"""Initialize the axon map for this object.
Parameters
----------
board : Board
The nengo_loihi object representing the Loihi board.
nxsdk_board : NxsdkBoard
The nxsdk object representing the Loihi board.
spike_input : SpikeInput
The SpikeInput containing information about which axons are
to be targeted.
"""
assert len(self.axon_map) == 0
input_idxs = np.arange(spike_input.n_neurons)
for axon in spike_input.axons:
synapse = axon.target
atom_bits_extra = synapse.atom_bits_extra()
tchip_idx, tcore_idx, taxon_ids = board.find_synapse(synapse)
tchip = nxsdk_board.n2Chips[tchip_idx]
tcore = tchip.n2Cores[tcore_idx]
spikes = axon.map_spikes(input_idxs)
for input_idx, spike in zip(input_idxs, spikes):
self.axon_map.setdefault(input_idx, [])
taxon_id = taxon_ids[spike.axon_idx] if spike is not None else None
if taxon_id is None:
continue # this is a dummy axon, so do not connect
self.axon_map[input_idx].append(
np.array(
(
-1,
axon.pop_type,
tchip.id,
tcore.id,
taxon_id,
spike.atom,
atom_bits_extra,
),
dtype=self.spike_dtype,
)
)
def spikes_to_loihi(self, input_idxs):
"""Map spike input indices to axons targeting chip locations.
Parameters
----------
input_idxs : list of int
Indices of positions in the SpikeInput that are currently spiking.
Returns
-------
axons : generator of ndarray(dtype=spike_dtype)
Axons targeting physical locations on the chip.
"""
return (axon for i in input_idxs for axon in self.axon_map[i])
class CompartmentConfig(Config):
DECAY_U_MAX = 4095
DECAY_V_MAX = 4095
REFRACT_DELAY_MAX = 63
params = ("decay_u", "decay_v", "refract_delay", "enable_noise")
def __init__(self, decay_v, decay_u, refract_delay, enable_noise):
super().__init__()
self.decay_v = decay_v
self.decay_u = decay_u
self.refract_delay = refract_delay
self.enable_noise = enable_noise
class VthConfig(Config):
"""Represents the Vth config information of a compartment.
Attributes
----------
vth : int
The mantissa of the voltage threshold for a compartment. To get the
actual voltage threshold, this is multiplied by VTH_EXP.
"""
params = ("vth",)
def __init__(self, vth):
super().__init__()
self.vth = vth
class TraceConfig(Config):
params = ("tau", "spike_int", "spike_frac")
def __init__(self, tau=0, spike_int=0, spike_frac=0):
super().__init__()
self.tau = tau
self.spike_int = spike_int
self.spike_frac = spike_frac
| [
"collections.OrderedDict",
"numpy.dtype",
"numpy.arange",
"numpy.array"
] | [((7909, 8090), 'numpy.dtype', 'np.dtype', (["[('t', np.int32), ('axon_type', np.int32), ('chip_id', np.int32), (\n 'core_id', np.int32), ('axon_id', np.int32), ('atom', np.int32), (\n 'atom_bits_extra', np.int32)]"], {}), "([('t', np.int32), ('axon_type', np.int32), ('chip_id', np.int32),\n ('core_id', np.int32), ('axon_id', np.int32), ('atom', np.int32), (\n 'atom_bits_extra', np.int32)])\n", (7917, 8090), True, 'import numpy as np\n'), ((3436, 3461), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (3459, 3461), False, 'import collections\n'), ((3580, 3605), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (3603, 3605), False, 'import collections\n'), ((9709, 9741), 'numpy.arange', 'np.arange', (['spike_input.n_neurons'], {}), '(spike_input.n_neurons)\n', (9718, 9741), True, 'import numpy as np\n'), ((4183, 4200), 'numpy.arange', 'np.arange', (['i0', 'i1'], {}), '(i0, i1)\n', (4192, 4200), True, 'import numpy as np\n'), ((10471, 10587), 'numpy.array', 'np.array', (['(-1, axon.pop_type, tchip.id, tcore.id, taxon_id, spike.atom, atom_bits_extra)'], {'dtype': 'self.spike_dtype'}), '((-1, axon.pop_type, tchip.id, tcore.id, taxon_id, spike.atom,\n atom_bits_extra), dtype=self.spike_dtype)\n', (10479, 10587), True, 'import numpy as np\n')] |
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
# -----------------------------------------------------------------------
# Author: <NAME> (Clockmender), <NAME> (ermo) Copyright (c) 2019
# -----------------------------------------------------------------------
#
# Common Functions used in more than one place in PDT Operations
import bpy
import bmesh
import bgl
import gpu
import numpy as np
from mathutils import Vector, Quaternion
from gpu_extras.batch import batch_for_shader
from math import cos, sin, pi
from .pdt_msg_strings import (
PDT_ERR_VERT_MODE,
PDT_ERR_SEL_2_V_1_E,
PDT_ERR_SEL_2_OBJS,
PDT_ERR_NO_ACT_OBJ,
PDT_ERR_SEL_1_EDGEM,
)
from . import pdt_exception
PDT_ShaderError = pdt_exception.ShaderError
def debug(msg, prefix=""):
"""Print a debug message to the console if PDT's or Blender's debug flags are set.
Note:
The printed message will be of the form:
{prefix}{caller file name:line number}| {msg}
Args:
msg: Incomming message to display
prefix: Always Blank
Returns:
Nothing.
"""
pdt_debug = bpy.context.preferences.addons[__package__].preferences.debug
if bpy.app.debug or bpy.app.debug_python or pdt_debug:
import traceback
def extract_filename(fullpath):
"""Return only the filename part of fullpath (excluding its path).
Args:
fullpath: Filename's full path
Returns:
filename.
"""
# Expected to end up being a string containing only the filename
# (i.e. excluding its preceding '/' separated path)
filename = fullpath.split('/')[-1]
#print(filename)
# something went wrong
if len(filename) < 1:
return fullpath
# since this is a string, just return it
return filename
# stack frame corresponding to the line where debug(msg) was called
#print(traceback.extract_stack()[-2])
laststack = traceback.extract_stack()[-2]
#print(laststack[0])
# laststack[0] is the caller's full file name, laststack[1] is the line number
print(f"{prefix}{extract_filename(laststack[0])}:{laststack[1]}| {msg}")
def oops(self, context):
"""Error Routine.
Note:
Displays error message in a popup.
Args:
context: Blender bpy.context instance.
Returns:
Nothing.
"""
scene = context.scene
pg = scene.pdt_pg
self.layout.label(text=pg.error)
def set_mode(mode_pl):
"""Sets Active Axes for View Orientation.
Note:
Sets indices of axes for locational vectors:
a3 is normal to screen, or depth
"XY": a1 = x, a2 = y, a3 = z
"XZ": a1 = x, a2 = z, a3 = y
"YZ": a1 = y, a2 = z, a3 = x
Args:
mode_pl: Plane Selector variable as input
Returns:
3 Integer indices.
"""
order = {
"XY": (0, 1, 2),
"XZ": (0, 2, 1),
"YZ": (1, 2, 0),
"LO": (0, 1, 2),
}
return order[mode_pl]
def set_axis(mode_pl):
"""Sets Active Axes for View Orientation.
Note:
Sets indices for axes from taper vectors
Axis order: Rotate Axis, Move Axis, Height Axis
Args:
mode_pl: Taper Axis Selector variable as input
Returns:
3 Integer Indicies.
"""
order = {
"RX-MY": (0, 1, 2),
"RX-MZ": (0, 2, 1),
"RY-MX": (1, 0, 2),
"RY-MZ": (1, 2, 0),
"RZ-MX": (2, 0, 1),
"RZ-MY": (2, 1, 0),
}
return order[mode_pl]
def check_selection(num, bm, obj):
"""Check that the Object's select_history has sufficient entries.
Note:
If selection history is not Verts, clears selection and history.
Args:
num: The number of entries required for each operation
bm: The Bmesh from the Object
obj: The Object
Returns:
list of 3D points as Vectors.
"""
if len(bm.select_history) < num:
return None
active_vertex = bm.select_history[-1]
if isinstance(active_vertex, bmesh.types.BMVert):
vector_a = active_vertex.co
if num == 1:
return vector_a
if num == 2:
vector_b = bm.select_history[-2].co
return vector_a, vector_b
if num == 3:
vector_b = bm.select_history[-2].co
vector_c = bm.select_history[-3].co
return vector_a, vector_b, vector_c
if num == 4:
vector_b = bm.select_history[-2].co
vector_c = bm.select_history[-3].co
vector_d = bm.select_history[-4].co
return vector_a, vector_b, vector_c, vector_d
else:
for f in bm.faces:
f.select_set(False)
for e in bm.edges:
e.select_set(False)
for v in bm.verts:
v.select_set(False)
bmesh.update_edit_mesh(obj.data)
bm.select_history.clear()
return None
def update_sel(bm, verts, edges, faces):
"""Updates Vertex, Edge and Face Selections following a function.
Args:
bm: Object Bmesh
verts: New Selection for Vertices
edges: The Edges on which to operate
faces: The Faces on which to operate
Returns:
Nothing.
"""
for f in bm.faces:
f.select_set(False)
for e in bm.edges:
e.select_set(False)
for v in bm.verts:
v.select_set(False)
for v in verts:
v.select_set(True)
for e in edges:
e.select_set(True)
for f in faces:
f.select_set(True)
def view_coords(x_loc, y_loc, z_loc):
"""Converts input Vector values to new Screen Oriented Vector.
Args:
x_loc: X coordinate from vector
y_loc: Y coordinate from vector
z_loc: Z coordinate from vector
Returns:
Vector adjusted to View's Inverted Tranformation Matrix.
"""
areas = [a for a in bpy.context.screen.areas if a.type == "VIEW_3D"]
if len(areas) > 0:
view_matrix = areas[0].spaces.active.region_3d.view_matrix
view_matrix = view_matrix.to_3x3().normalized().inverted()
view_location = Vector((x_loc, y_loc, z_loc))
new_view_location = view_matrix @ view_location
return new_view_location
return Vector((0, 0, 0))
def view_coords_i(x_loc, y_loc, z_loc):
"""Converts Screen Oriented input Vector values to new World Vector.
Note:
Converts View tranformation Matrix to Rotational Matrix
Args:
x_loc: X coordinate from vector
y_loc: Y coordinate from vector
z_loc: Z coordinate from vector
Returns:
Vector adjusted to View's Transformation Matrix.
"""
areas = [a for a in bpy.context.screen.areas if a.type == "VIEW_3D"]
if len(areas) > 0:
view_matrix = areas[0].spaces.active.region_3d.view_matrix
view_matrix = view_matrix.to_3x3().normalized()
view_location = Vector((x_loc, y_loc, z_loc))
new_view_location = view_matrix @ view_location
return new_view_location
return Vector((0, 0, 0))
def view_dir(dis_v, ang_v):
"""Converts Distance and Angle to View Oriented Vector.
Note:
Converts View Transformation Matrix to Rotational Matrix (3x3)
Angles are Converts to Radians from degrees.
Args:
dis_v: Scene PDT distance
ang_v: Scene PDT angle
Returns:
World Vector.
"""
areas = [a for a in bpy.context.screen.areas if a.type == "VIEW_3D"]
if len(areas) > 0:
view_matrix = areas[0].spaces.active.region_3d.view_matrix
view_matrix = view_matrix.to_3x3().normalized().inverted()
view_location = Vector((0, 0, 0))
view_location.x = dis_v * cos(ang_v * pi / 180)
view_location.y = dis_v * sin(ang_v * pi / 180)
new_view_location = view_matrix @ view_location
return new_view_location
return Vector((0, 0, 0))
def euler_to_quaternion(roll, pitch, yaw):
"""Converts Euler Rotation to Quaternion Rotation.
Args:
roll: Roll in Euler rotation
pitch: Pitch in Euler rotation
yaw: Yaw in Euler rotation
Returns:
Quaternion Rotation.
"""
# fmt: off
quat_x = (np.sin(roll/2) * np.cos(pitch/2) * np.cos(yaw/2)
- np.cos(roll/2) * np.sin(pitch/2) * np.sin(yaw/2))
quat_y = (np.cos(roll/2) * np.sin(pitch/2) * np.cos(yaw/2)
+ np.sin(roll/2) * np.cos(pitch/2) * np.sin(yaw/2))
quat_z = (np.cos(roll/2) * np.cos(pitch/2) * np.sin(yaw/2)
- np.sin(roll/2) * np.sin(pitch/2) * np.cos(yaw/2))
quat_w = (np.cos(roll/2) * np.cos(pitch/2) * np.cos(yaw/2)
+ np.sin(roll/2) * np.sin(pitch/2) * np.sin(yaw/2))
# fmt: on
return Quaternion((quat_w, quat_x, quat_y, quat_z))
def arc_centre(vector_a, vector_b, vector_c):
"""Calculates Centre of Arc from 3 Vector Locations using standard Numpy routine
Args:
vector_a: Active vector location
vector_b: Second vector location
vector_c: Third vector location
Returns:
Vector representing Arc Centre and Float representing Arc Radius.
"""
coord_a = np.array([vector_a.x, vector_a.y, vector_a.z])
coord_b = np.array([vector_b.x, vector_b.y, vector_b.z])
coord_c = np.array([vector_c.x, vector_c.y, vector_c.z])
line_a = np.linalg.norm(coord_c - coord_b)
line_b = np.linalg.norm(coord_c - coord_a)
line_c = np.linalg.norm(coord_b - coord_a)
# fmt: off
line_s = (line_a+line_b+line_c) / 2
radius = (
line_a*line_b*line_c/4
/ np.sqrt(line_s
* (line_s-line_a)
* (line_s-line_b)
* (line_s-line_c))
)
base_1 = line_a*line_a * (line_b*line_b + line_c*line_c - line_a*line_a)
base_2 = line_b*line_b * (line_a*line_a + line_c*line_c - line_b*line_b)
base_3 = line_c*line_c * (line_a*line_a + line_b*line_b - line_c*line_c)
# fmt: on
intersect_coord = np.column_stack((coord_a, coord_b, coord_c))
intersect_coord = intersect_coord.dot(np.hstack((base_1, base_2, base_3)))
intersect_coord /= base_1 + base_2 + base_3
return Vector((intersect_coord[0], intersect_coord[1], intersect_coord[2])), radius
def intersection(vertex_a, vertex_b, vertex_c, vertex_d, plane):
"""Calculates Intersection Point of 2 Imagined Lines from 4 Vectors.
Note:
Calculates Converging Intersect Location and indication of
whether the lines are convergent using standard Numpy Routines
Args:
vertex_a: Active vector location of first line
vertex_b: Second vector location of first line
vertex_c: Third vector location of 2nd line
vertex_d: Fourth vector location of 2nd line
plane: Working Plane 4 Vector Locations representing 2 lines and Working Plane
Returns:
Intersection Vector and Boolean for convergent state.
"""
if plane == "LO":
vertex_offset = vertex_b - vertex_a
vertex_b = view_coords_i(vertex_offset.x, vertex_offset.y, vertex_offset.z)
vertex_offset = vertex_d - vertex_a
vertex_d = view_coords_i(vertex_offset.x, vertex_offset.y, vertex_offset.z)
vertex_offset = vertex_c - vertex_a
vertex_c = view_coords_i(vertex_offset.x, vertex_offset.y, vertex_offset.z)
vector_ref = Vector((0, 0, 0))
coord_a = (vertex_c.x, vertex_c.y)
coord_b = (vertex_d.x, vertex_d.y)
coord_c = (vertex_b.x, vertex_b.y)
coord_d = (vector_ref.x, vector_ref.y)
else:
a1, a2, a3 = set_mode(plane)
coord_a = (vertex_c[a1], vertex_c[a2])
coord_b = (vertex_d[a1], vertex_d[a2])
coord_c = (vertex_a[a1], vertex_a[a2])
coord_d = (vertex_b[a1], vertex_b[a2])
v_stack = np.vstack([coord_a, coord_b, coord_c, coord_d])
h_stack = np.hstack((v_stack, np.ones((4, 1))))
line_a = np.cross(h_stack[0], h_stack[1])
line_b = np.cross(h_stack[2], h_stack[3])
x_loc, y_loc, z_loc = np.cross(line_a, line_b)
if z_loc == 0:
return Vector((0, 0, 0)), False
new_x_loc = x_loc / z_loc
new_z_loc = y_loc / z_loc
if plane == "LO":
new_y_loc = 0
else:
new_y_loc = vertex_a[a3]
# Order Vector Delta
if plane == "XZ":
vector_delta = Vector((new_x_loc, new_y_loc, new_z_loc))
elif plane == "XY":
vector_delta = Vector((new_x_loc, new_z_loc, new_y_loc))
elif plane == "YZ":
vector_delta = Vector((new_y_loc, new_x_loc, new_z_loc))
else:
# Must be Local View Plane
vector_delta = view_coords(new_x_loc, new_z_loc, new_y_loc) + vertex_a
return vector_delta, True
def get_percent(obj, flip_percent, per_v, data, scene):
"""Calculates a Percentage Distance between 2 Vectors.
Note:
Calculates a point that lies a set percentage between two given points
using standard Numpy Routines.
Works for either 2 vertices for an object in Edit mode
or 2 selected objects in Object mode.
Args:
obj: The Object under consideration
flip_percent: Setting this to True measures the percentage starting from the second vector
per_v: Percentage Input Value
data: pg.flip, pg.percent scene variables & Operational Mode
scene: Context Scene
Returns:
World Vector.
"""
pg = scene.pdt_pg
if obj.mode == "EDIT":
bm = bmesh.from_edit_mesh(obj.data)
verts = [v for v in bm.verts if v.select]
if len(verts) == 2:
vector_a = verts[0].co
vector_b = verts[1].co
if vector_a is None:
pg.error = PDT_ERR_VERT_MODE
bpy.context.window_manager.popup_menu(oops, title="Error", icon="ERROR")
return None
else:
pg.error = PDT_ERR_SEL_2_V_1_E + str(len(verts)) + " Vertices"
bpy.context.window_manager.popup_menu(oops, title="Error", icon="ERROR")
return None
coord_a = np.array([vector_a.x, vector_a.y, vector_a.z])
coord_b = np.array([vector_b.x, vector_b.y, vector_b.z])
if obj.mode == "OBJECT":
objs = bpy.context.view_layer.objects.selected
if len(objs) != 2:
pg.error = PDT_ERR_SEL_2_OBJS + str(len(objs)) + ")"
bpy.context.window_manager.popup_menu(oops, title="Error", icon="ERROR")
return None
coord_a = np.array(
[
objs[-1].matrix_world.decompose()[0].x,
objs[-1].matrix_world.decompose()[0].y,
objs[-1].matrix_world.decompose()[0].z,
]
)
coord_b = np.array(
[
objs[-2].matrix_world.decompose()[0].x,
objs[-2].matrix_world.decompose()[0].y,
objs[-2].matrix_world.decompose()[0].z,
]
)
coord_c = coord_b - coord_a
coord_d = np.array([0, 0, 0])
_per_v = per_v
if (flip_percent and data != "MV") or data == "MV":
_per_v = 100 - per_v
coord_out = (coord_d+coord_c) * (_per_v / 100) + coord_a
return Vector((coord_out[0], coord_out[1], coord_out[2]))
def obj_check(obj, scene, operation):
"""Check Object & Selection Validity.
Args:
obj: Active Object
scene: Active Scene
operation: The Operation e.g. Create New Vertex
Returns:
Object Bmesh
Validity Boolean.
"""
pg = scene.pdt_pg
_operation = operation.upper()
if obj is None:
pg.error = PDT_ERR_NO_ACT_OBJ
bpy.context.window_manager.popup_menu(oops, title="Error", icon="ERROR")
return None, False
if obj.mode == "EDIT":
bm = bmesh.from_edit_mesh(obj.data)
if _operation == "S":
if len(bm.edges) < 1:
pg.error = f"{PDT_ERR_SEL_1_EDGEM} {len(bm.edges)})"
bpy.context.window_manager.popup_menu(oops, title="Error", icon="ERROR")
return None, False
return bm, True
if len(bm.select_history) >= 1:
vector_a = None
if _operation not in {"D", "E", "F", "G", "N", "S"}:
vector_a = check_selection(1, bm, obj)
else:
verts = [v for v in bm.verts if v.select]
if len(verts) > 0:
vector_a = verts[0]
if vector_a is None:
pg.error = PDT_ERR_VERT_MODE
bpy.context.window_manager.popup_menu(oops, title="Error", icon="ERROR")
return None, False
return bm, True
return None, True
def dis_ang(values, flip_angle, plane, scene):
"""Set Working Axes when using Direction command.
Args:
values: Input Arguments
flip_angle: Whether to flip the angle
plane: Working Plane
scene: Current Scene
Returns:
Directional Offset as a Vector.
"""
pg = scene.pdt_pg
dis_v = float(values[0])
ang_v = float(values[1])
if flip_angle:
if ang_v > 0:
ang_v = ang_v - 180
else:
ang_v = ang_v + 180
pg.angle = ang_v
if plane == "LO":
vector_delta = view_dir(dis_v, ang_v)
else:
a1, a2, _ = set_mode(plane)
vector_delta = Vector((0, 0, 0))
# fmt: off
vector_delta[a1] = vector_delta[a1] + (dis_v * cos(ang_v * pi/180))
vector_delta[a2] = vector_delta[a2] + (dis_v * sin(ang_v * pi/180))
# fmt: on
return vector_delta
# Shader for displaying the Pivot Point as Graphics.
#
SHADER = gpu.shader.from_builtin("3D_UNIFORM_COLOR") if not bpy.app.background else None
def draw_3d(coords, gtype, rgba, context):
"""Draw Pivot Point Graphics.
Note:
Draws either Lines Points, or Tris using defined shader
Args:
coords: Input Coordinates List
gtype: Graphic Type
rgba: Colour in RGBA format
context: Blender bpy.context instance.
Returns:
Nothing.
"""
batch = batch_for_shader(SHADER, gtype, {"pos": coords})
try:
if coords is not None:
bgl.glEnable(bgl.GL_BLEND)
SHADER.bind()
SHADER.uniform_float("color", rgba)
batch.draw(SHADER)
except:
raise PDT_ShaderError
def draw_callback_3d(self, context):
"""Create Coordinate List for Pivot Point Graphic.
Note:
Creates coordinates for Pivot Point Graphic consisting of 6 Tris
and one Point colour coded Red; X axis, Green; Y axis, Blue; Z axis
and a yellow point based upon screen scale
Args:
context: Blender bpy.context instance.
Returns:
Nothing.
"""
scene = context.scene
pg = scene.pdt_pg
region_width = context.region.width
x_loc = pg.pivot_loc.x
y_loc = pg.pivot_loc.y
z_loc = pg.pivot_loc.z
# Scale it from view
areas = [a for a in context.screen.areas if a.type == "VIEW_3D"]
if len(areas) > 0:
scale_factor = abs(areas[0].spaces.active.region_3d.window_matrix.decompose()[2][1])
# Check for orhtographic view and resize
#if areas[0].spaces.active.region_3d.is_orthographic_side_view:
# dim_a = region_width / sf / 60000 * pg.pivot_size
#else:
# dim_a = region_width / sf / 5000 * pg.pivot_size
dim_a = region_width / scale_factor / 50000 * pg.pivot_size
dim_b = dim_a * 0.65
dim_c = dim_a * 0.05 + (pg.pivot_width * dim_a * 0.02)
dim_o = dim_c / 3
# fmt: off
# X Axis
coords = [
(x_loc, y_loc, z_loc),
(x_loc+dim_b, y_loc-dim_o, z_loc),
(x_loc+dim_b, y_loc+dim_o, z_loc),
(x_loc+dim_a, y_loc, z_loc),
(x_loc+dim_b, y_loc+dim_c, z_loc),
(x_loc+dim_b, y_loc-dim_c, z_loc),
]
# fmt: on
colour = (1.0, 0.0, 0.0, pg.pivot_alpha)
draw_3d(coords, "TRIS", colour, context)
coords = [(x_loc, y_loc, z_loc), (x_loc+dim_a, y_loc, z_loc)]
draw_3d(coords, "LINES", colour, context)
# fmt: off
# Y Axis
coords = [
(x_loc, y_loc, z_loc),
(x_loc-dim_o, y_loc+dim_b, z_loc),
(x_loc+dim_o, y_loc+dim_b, z_loc),
(x_loc, y_loc+dim_a, z_loc),
(x_loc+dim_c, y_loc+dim_b, z_loc),
(x_loc-dim_c, y_loc+dim_b, z_loc),
]
# fmt: on
colour = (0.0, 1.0, 0.0, pg.pivot_alpha)
draw_3d(coords, "TRIS", colour, context)
coords = [(x_loc, y_loc, z_loc), (x_loc, y_loc + dim_a, z_loc)]
draw_3d(coords, "LINES", colour, context)
# fmt: off
# Z Axis
coords = [
(x_loc, y_loc, z_loc),
(x_loc-dim_o, y_loc, z_loc+dim_b),
(x_loc+dim_o, y_loc, z_loc+dim_b),
(x_loc, y_loc, z_loc+dim_a),
(x_loc+dim_c, y_loc, z_loc+dim_b),
(x_loc-dim_c, y_loc, z_loc+dim_b),
]
# fmt: on
colour = (0.2, 0.5, 1.0, pg.pivot_alpha)
draw_3d(coords, "TRIS", colour, context)
coords = [(x_loc, y_loc, z_loc), (x_loc, y_loc, z_loc + dim_a)]
draw_3d(coords, "LINES", colour, context)
# Centre
coords = [(x_loc, y_loc, z_loc)]
colour = (1.0, 1.0, 0.0, pg.pivot_alpha)
draw_3d(coords, "POINTS", colour, context)
def scale_set(self, context):
"""Sets Scale by dividing Pivot Distance by System Distance.
Note:
Sets Pivot Point Scale Factors by Measurement
Uses pg.pivotdis & pg.distance scene variables
Args:
context: Blender bpy.context instance.
Returns:
Status Set.
"""
scene = context.scene
pg = scene.pdt_pg
sys_distance = pg.distance
scale_distance = pg.pivot_dis
if scale_distance > 0:
scale_factor = scale_distance / sys_distance
pg.pivot_scale = Vector((scale_factor, scale_factor, scale_factor))
| [
"mathutils.Quaternion",
"numpy.ones",
"numpy.sin",
"numpy.linalg.norm",
"gpu.shader.from_builtin",
"bgl.glEnable",
"bmesh.update_edit_mesh",
"bpy.context.window_manager.popup_menu",
"math.cos",
"numpy.cross",
"math.sin",
"numpy.hstack",
"bmesh.from_edit_mesh",
"numpy.cos",
"numpy.vstack"... | [((7080, 7097), 'mathutils.Vector', 'Vector', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (7086, 7097), False, 'from mathutils import Vector, Quaternion\n'), ((7873, 7890), 'mathutils.Vector', 'Vector', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (7879, 7890), False, 'from mathutils import Vector, Quaternion\n'), ((8722, 8739), 'mathutils.Vector', 'Vector', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (8728, 8739), False, 'from mathutils import Vector, Quaternion\n'), ((9570, 9614), 'mathutils.Quaternion', 'Quaternion', (['(quat_w, quat_x, quat_y, quat_z)'], {}), '((quat_w, quat_x, quat_y, quat_z))\n', (9580, 9614), False, 'from mathutils import Vector, Quaternion\n'), ((9992, 10038), 'numpy.array', 'np.array', (['[vector_a.x, vector_a.y, vector_a.z]'], {}), '([vector_a.x, vector_a.y, vector_a.z])\n', (10000, 10038), True, 'import numpy as np\n'), ((10053, 10099), 'numpy.array', 'np.array', (['[vector_b.x, vector_b.y, vector_b.z]'], {}), '([vector_b.x, vector_b.y, vector_b.z])\n', (10061, 10099), True, 'import numpy as np\n'), ((10114, 10160), 'numpy.array', 'np.array', (['[vector_c.x, vector_c.y, vector_c.z]'], {}), '([vector_c.x, vector_c.y, vector_c.z])\n', (10122, 10160), True, 'import numpy as np\n'), ((10174, 10207), 'numpy.linalg.norm', 'np.linalg.norm', (['(coord_c - coord_b)'], {}), '(coord_c - coord_b)\n', (10188, 10207), True, 'import numpy as np\n'), ((10221, 10254), 'numpy.linalg.norm', 'np.linalg.norm', (['(coord_c - coord_a)'], {}), '(coord_c - coord_a)\n', (10235, 10254), True, 'import numpy as np\n'), ((10268, 10301), 'numpy.linalg.norm', 'np.linalg.norm', (['(coord_b - coord_a)'], {}), '(coord_b - coord_a)\n', (10282, 10301), True, 'import numpy as np\n'), ((10814, 10858), 'numpy.column_stack', 'np.column_stack', (['(coord_a, coord_b, coord_c)'], {}), '((coord_a, coord_b, coord_c))\n', (10829, 10858), True, 'import numpy as np\n'), ((12629, 12676), 'numpy.vstack', 'np.vstack', (['[coord_a, coord_b, coord_c, coord_d]'], {}), '([coord_a, coord_b, coord_c, coord_d])\n', (12638, 12676), True, 'import numpy as np\n'), ((12742, 12774), 'numpy.cross', 'np.cross', (['h_stack[0]', 'h_stack[1]'], {}), '(h_stack[0], h_stack[1])\n', (12750, 12774), True, 'import numpy as np\n'), ((12788, 12820), 'numpy.cross', 'np.cross', (['h_stack[2]', 'h_stack[3]'], {}), '(h_stack[2], h_stack[3])\n', (12796, 12820), True, 'import numpy as np\n'), ((12847, 12871), 'numpy.cross', 'np.cross', (['line_a', 'line_b'], {}), '(line_a, line_b)\n', (12855, 12871), True, 'import numpy as np\n'), ((15777, 15796), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (15785, 15796), True, 'import numpy as np\n'), ((15973, 16023), 'mathutils.Vector', 'Vector', (['(coord_out[0], coord_out[1], coord_out[2])'], {}), '((coord_out[0], coord_out[1], coord_out[2]))\n', (15979, 16023), False, 'from mathutils import Vector, Quaternion\n'), ((18436, 18479), 'gpu.shader.from_builtin', 'gpu.shader.from_builtin', (['"""3D_UNIFORM_COLOR"""'], {}), "('3D_UNIFORM_COLOR')\n", (18459, 18479), False, 'import gpu\n'), ((18883, 18931), 'gpu_extras.batch.batch_for_shader', 'batch_for_shader', (['SHADER', 'gtype', "{'pos': coords}"], {}), "(SHADER, gtype, {'pos': coords})\n", (18899, 18931), False, 'from gpu_extras.batch import batch_for_shader\n'), ((5672, 5704), 'bmesh.update_edit_mesh', 'bmesh.update_edit_mesh', (['obj.data'], {}), '(obj.data)\n', (5694, 5704), False, 'import bmesh\n'), ((6949, 6978), 'mathutils.Vector', 'Vector', (['(x_loc, y_loc, z_loc)'], {}), '((x_loc, y_loc, z_loc))\n', (6955, 6978), False, 'from mathutils import Vector, Quaternion\n'), ((7742, 7771), 'mathutils.Vector', 'Vector', (['(x_loc, y_loc, z_loc)'], {}), '((x_loc, y_loc, z_loc))\n', (7748, 7771), False, 'from mathutils import Vector, Quaternion\n'), ((8491, 8508), 'mathutils.Vector', 'Vector', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (8497, 8508), False, 'from mathutils import Vector, Quaternion\n'), ((10413, 10488), 'numpy.sqrt', 'np.sqrt', (['(line_s * (line_s - line_a) * (line_s - line_b) * (line_s - line_c))'], {}), '(line_s * (line_s - line_a) * (line_s - line_b) * (line_s - line_c))\n', (10420, 10488), True, 'import numpy as np\n'), ((10901, 10936), 'numpy.hstack', 'np.hstack', (['(base_1, base_2, base_3)'], {}), '((base_1, base_2, base_3))\n', (10910, 10936), True, 'import numpy as np\n'), ((10997, 11065), 'mathutils.Vector', 'Vector', (['(intersect_coord[0], intersect_coord[1], intersect_coord[2])'], {}), '((intersect_coord[0], intersect_coord[1], intersect_coord[2]))\n', (11003, 11065), False, 'from mathutils import Vector, Quaternion\n'), ((12186, 12203), 'mathutils.Vector', 'Vector', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (12192, 12203), False, 'from mathutils import Vector, Quaternion\n'), ((13148, 13189), 'mathutils.Vector', 'Vector', (['(new_x_loc, new_y_loc, new_z_loc)'], {}), '((new_x_loc, new_y_loc, new_z_loc))\n', (13154, 13189), False, 'from mathutils import Vector, Quaternion\n'), ((14276, 14306), 'bmesh.from_edit_mesh', 'bmesh.from_edit_mesh', (['obj.data'], {}), '(obj.data)\n', (14296, 14306), False, 'import bmesh\n'), ((14866, 14912), 'numpy.array', 'np.array', (['[vector_a.x, vector_a.y, vector_a.z]'], {}), '([vector_a.x, vector_a.y, vector_a.z])\n', (14874, 14912), True, 'import numpy as np\n'), ((14931, 14977), 'numpy.array', 'np.array', (['[vector_b.x, vector_b.y, vector_b.z]'], {}), '([vector_b.x, vector_b.y, vector_b.z])\n', (14939, 14977), True, 'import numpy as np\n'), ((16422, 16494), 'bpy.context.window_manager.popup_menu', 'bpy.context.window_manager.popup_menu', (['oops'], {'title': '"""Error"""', 'icon': '"""ERROR"""'}), "(oops, title='Error', icon='ERROR')\n", (16459, 16494), False, 'import bpy\n'), ((16562, 16592), 'bmesh.from_edit_mesh', 'bmesh.from_edit_mesh', (['obj.data'], {}), '(obj.data)\n', (16582, 16592), False, 'import bmesh\n'), ((18139, 18156), 'mathutils.Vector', 'Vector', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (18145, 18156), False, 'from mathutils import Vector, Quaternion\n'), ((22550, 22600), 'mathutils.Vector', 'Vector', (['(scale_factor, scale_factor, scale_factor)'], {}), '((scale_factor, scale_factor, scale_factor))\n', (22556, 22600), False, 'from mathutils import Vector, Quaternion\n'), ((2782, 2807), 'traceback.extract_stack', 'traceback.extract_stack', ([], {}), '()\n', (2805, 2807), False, 'import traceback\n'), ((8543, 8564), 'math.cos', 'cos', (['(ang_v * pi / 180)'], {}), '(ang_v * pi / 180)\n', (8546, 8564), False, 'from math import cos, sin, pi\n'), ((8599, 8620), 'math.sin', 'sin', (['(ang_v * pi / 180)'], {}), '(ang_v * pi / 180)\n', (8602, 8620), False, 'from math import cos, sin, pi\n'), ((9078, 9093), 'numpy.cos', 'np.cos', (['(yaw / 2)'], {}), '(yaw / 2)\n', (9084, 9093), True, 'import numpy as np\n'), ((9143, 9158), 'numpy.sin', 'np.sin', (['(yaw / 2)'], {}), '(yaw / 2)\n', (9149, 9158), True, 'import numpy as np\n'), ((9207, 9222), 'numpy.cos', 'np.cos', (['(yaw / 2)'], {}), '(yaw / 2)\n', (9213, 9222), True, 'import numpy as np\n'), ((9272, 9287), 'numpy.sin', 'np.sin', (['(yaw / 2)'], {}), '(yaw / 2)\n', (9278, 9287), True, 'import numpy as np\n'), ((9336, 9351), 'numpy.sin', 'np.sin', (['(yaw / 2)'], {}), '(yaw / 2)\n', (9342, 9351), True, 'import numpy as np\n'), ((9401, 9416), 'numpy.cos', 'np.cos', (['(yaw / 2)'], {}), '(yaw / 2)\n', (9407, 9416), True, 'import numpy as np\n'), ((9465, 9480), 'numpy.cos', 'np.cos', (['(yaw / 2)'], {}), '(yaw / 2)\n', (9471, 9480), True, 'import numpy as np\n'), ((9530, 9545), 'numpy.sin', 'np.sin', (['(yaw / 2)'], {}), '(yaw / 2)\n', (9536, 9545), True, 'import numpy as np\n'), ((12711, 12726), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (12718, 12726), True, 'import numpy as np\n'), ((12906, 12923), 'mathutils.Vector', 'Vector', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (12912, 12923), False, 'from mathutils import Vector, Quaternion\n'), ((13237, 13278), 'mathutils.Vector', 'Vector', (['(new_x_loc, new_z_loc, new_y_loc)'], {}), '((new_x_loc, new_z_loc, new_y_loc))\n', (13243, 13278), False, 'from mathutils import Vector, Quaternion\n'), ((14751, 14823), 'bpy.context.window_manager.popup_menu', 'bpy.context.window_manager.popup_menu', (['oops'], {'title': '"""Error"""', 'icon': '"""ERROR"""'}), "(oops, title='Error', icon='ERROR')\n", (14788, 14823), False, 'import bpy\n'), ((15166, 15238), 'bpy.context.window_manager.popup_menu', 'bpy.context.window_manager.popup_menu', (['oops'], {'title': '"""Error"""', 'icon': '"""ERROR"""'}), "(oops, title='Error', icon='ERROR')\n", (15203, 15238), False, 'import bpy\n'), ((18985, 19011), 'bgl.glEnable', 'bgl.glEnable', (['bgl.GL_BLEND'], {}), '(bgl.GL_BLEND)\n', (18997, 19011), False, 'import bgl\n'), ((9043, 9059), 'numpy.sin', 'np.sin', (['(roll / 2)'], {}), '(roll / 2)\n', (9049, 9059), True, 'import numpy as np\n'), ((9060, 9077), 'numpy.cos', 'np.cos', (['(pitch / 2)'], {}), '(pitch / 2)\n', (9066, 9077), True, 'import numpy as np\n'), ((9108, 9124), 'numpy.cos', 'np.cos', (['(roll / 2)'], {}), '(roll / 2)\n', (9114, 9124), True, 'import numpy as np\n'), ((9125, 9142), 'numpy.sin', 'np.sin', (['(pitch / 2)'], {}), '(pitch / 2)\n', (9131, 9142), True, 'import numpy as np\n'), ((9172, 9188), 'numpy.cos', 'np.cos', (['(roll / 2)'], {}), '(roll / 2)\n', (9178, 9188), True, 'import numpy as np\n'), ((9189, 9206), 'numpy.sin', 'np.sin', (['(pitch / 2)'], {}), '(pitch / 2)\n', (9195, 9206), True, 'import numpy as np\n'), ((9237, 9253), 'numpy.sin', 'np.sin', (['(roll / 2)'], {}), '(roll / 2)\n', (9243, 9253), True, 'import numpy as np\n'), ((9254, 9271), 'numpy.cos', 'np.cos', (['(pitch / 2)'], {}), '(pitch / 2)\n', (9260, 9271), True, 'import numpy as np\n'), ((9301, 9317), 'numpy.cos', 'np.cos', (['(roll / 2)'], {}), '(roll / 2)\n', (9307, 9317), True, 'import numpy as np\n'), ((9318, 9335), 'numpy.cos', 'np.cos', (['(pitch / 2)'], {}), '(pitch / 2)\n', (9324, 9335), True, 'import numpy as np\n'), ((9366, 9382), 'numpy.sin', 'np.sin', (['(roll / 2)'], {}), '(roll / 2)\n', (9372, 9382), True, 'import numpy as np\n'), ((9383, 9400), 'numpy.sin', 'np.sin', (['(pitch / 2)'], {}), '(pitch / 2)\n', (9389, 9400), True, 'import numpy as np\n'), ((9430, 9446), 'numpy.cos', 'np.cos', (['(roll / 2)'], {}), '(roll / 2)\n', (9436, 9446), True, 'import numpy as np\n'), ((9447, 9464), 'numpy.cos', 'np.cos', (['(pitch / 2)'], {}), '(pitch / 2)\n', (9453, 9464), True, 'import numpy as np\n'), ((9495, 9511), 'numpy.sin', 'np.sin', (['(roll / 2)'], {}), '(roll / 2)\n', (9501, 9511), True, 'import numpy as np\n'), ((9512, 9529), 'numpy.sin', 'np.sin', (['(pitch / 2)'], {}), '(pitch / 2)\n', (9518, 9529), True, 'import numpy as np\n'), ((13326, 13367), 'mathutils.Vector', 'Vector', (['(new_y_loc, new_x_loc, new_z_loc)'], {}), '((new_y_loc, new_x_loc, new_z_loc))\n', (13332, 13367), False, 'from mathutils import Vector, Quaternion\n'), ((14549, 14621), 'bpy.context.window_manager.popup_menu', 'bpy.context.window_manager.popup_menu', (['oops'], {'title': '"""Error"""', 'icon': '"""ERROR"""'}), "(oops, title='Error', icon='ERROR')\n", (14586, 14621), False, 'import bpy\n'), ((16742, 16814), 'bpy.context.window_manager.popup_menu', 'bpy.context.window_manager.popup_menu', (['oops'], {'title': '"""Error"""', 'icon': '"""ERROR"""'}), "(oops, title='Error', icon='ERROR')\n", (16779, 16814), False, 'import bpy\n'), ((17311, 17383), 'bpy.context.window_manager.popup_menu', 'bpy.context.window_manager.popup_menu', (['oops'], {'title': '"""Error"""', 'icon': '"""ERROR"""'}), "(oops, title='Error', icon='ERROR')\n", (17348, 17383), False, 'import bpy\n'), ((18231, 18252), 'math.cos', 'cos', (['(ang_v * pi / 180)'], {}), '(ang_v * pi / 180)\n', (18234, 18252), False, 'from math import cos, sin, pi\n'), ((18307, 18328), 'math.sin', 'sin', (['(ang_v * pi / 180)'], {}), '(ang_v * pi / 180)\n', (18310, 18328), False, 'from math import cos, sin, pi\n')] |
import numpy as np
import pandas as pd
class RidgeRegression:
def __init__(self, num_feature):
"""num_feature shows the number of features, weights are corresponding weight to each feature"""
self.num_feature = num_feature
self.weights = np.random.randn(num_feature, 1)
self.bias = np.random.randn()
def __repr__(self):
return 'RidgeRegression(weight = %r, bias = %r)' % (self.weights, self.bias)
def mse(self, data, labels):
"""use MSE to evaluate the final code"""
predictions = data.dot(self.weights) + self.bias
return sum(np.square(predictions - labels.reshape(labels.shape[0], 1)))
def sgd(self, data, labels, epochs, batch_size, learning_rate, lam):
"""apply stochastic gradient decent to optimize the code"""
ds = np.append(data, labels.reshape(len(labels), 1), axis=1)
for epoch in range(epochs):
np.random.shuffle(ds)
batches = [ds[k: k + batch_size] for k in range(0, len(data), batch_size)]
for batch in batches:
learning_rate = learning_rate * 0.999 + 0.0000001
self.__update_batch(batch, learning_rate, lam)
def __update_batch(self, batch, learning_rate, lam):
"""apply gradient decent on a batch of data"""
data = batch[:, :-1]
labels = batch[:, -1]
errors = data.dot(self.weights) + self.bias - labels.reshape(labels.shape[0], 1)
self.bias -= learning_rate * sum(errors)
self.weights -= learning_rate * (data.T.dot(errors) + lam * self.weights)
def process_data():
"""read data, drop the useless features, fill the null value"""
'''load data'''
train = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test.csv')
data = pd.concat([train, test], keys=('train', 'test'))
'''drop the features whose values are almost null'''
data = data.drop(['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu', 'LotFrontage'], axis=1)
'''split the features into numeric and categorical categories and
fill the null value with mean value or the most-appearing value'''
numeric_data = data.iloc[:, :-1]._get_numeric_data().columns.tolist()
categorical_data = set(data.columns) - set(numeric_data)
for col in numeric_data:
data[col].fillna(data[col].mean(), inplace=True)
for col in categorical_data:
data[col].fillna(data[col].mode()[0], inplace=True)
'''remove features dominated by a single value'''
data = data.drop(["LandSlope", "Condition2", "LandContour", "Street", "ExterCond",
"Condition1", "Functional", "Electrical", "CentralAir",
"Heating", "GarageQual", "RoofMatl", "BsmtCond", "PavedDrive",
"Utilities", "GarageCond", "BsmtFinType2"], axis=1)
'''standardize the data'''
for col in numeric_data:
col_mean = data[col].mean()
col_std = data[col].std()
data[col] = (data[col] - col_mean) / col_std
'''make the price obey normal distribution to make better predictions'''
train['SalePrice'] = np.log1p(train['SalePrice'])
data['SalePrice'] = np.log1p(data['SalePrice'])
'''check the correlation to drop features irrelevant to price'''
corr = train.corr()["SalePrice"]
irrelevant_features = [corr.index[i] for i in range(len(corr)) if abs(corr[i]) < 0.1]
data = data.drop(irrelevant_features, axis=1)
'''get one-hot encoding for the categorical features'''
data = pd.get_dummies(data)
prices = np.array(data.loc['train']['SalePrice'])
train = np.array(data.loc['train'].drop(['SalePrice'], axis=1))
test = np.array(data.loc['test'].drop(['SalePrice'], axis=1))
return train, prices, test
def test(mod, tes):
return np.expm1(tes.dot(mod.weights) + mod.bias)
"""load and process data"""
train_data, train_prices, test_data = process_data()
"""train the code"""
model = RidgeRegression(train_data.shape[1])
print(model.mse(train_data, train_prices))
model.sgd(train_data, train_prices, 50000, 10, 0.0001, 0.05)
"""evaluate the trained code"""
result = test(model, test_data)
print(model.mse(train_data, train_prices))
"""dump to csv"""
result = result.reshape(len(result),)
ids = np.arange(1461, 2920)
df = pd.DataFrame({'Id': ids, 'SalePrice': result})
df.to_csv('result.csv')
| [
"pandas.DataFrame",
"numpy.random.randn",
"pandas.read_csv",
"pandas.get_dummies",
"numpy.arange",
"numpy.array",
"pandas.concat",
"numpy.random.shuffle",
"numpy.log1p"
] | [((4264, 4285), 'numpy.arange', 'np.arange', (['(1461)', '(2920)'], {}), '(1461, 2920)\n', (4273, 4285), True, 'import numpy as np\n'), ((4291, 4337), 'pandas.DataFrame', 'pd.DataFrame', (["{'Id': ids, 'SalePrice': result}"], {}), "({'Id': ids, 'SalePrice': result})\n", (4303, 4337), True, 'import pandas as pd\n'), ((1713, 1745), 'pandas.read_csv', 'pd.read_csv', (['"""../data/train.csv"""'], {}), "('../data/train.csv')\n", (1724, 1745), True, 'import pandas as pd\n'), ((1757, 1788), 'pandas.read_csv', 'pd.read_csv', (['"""../data/test.csv"""'], {}), "('../data/test.csv')\n", (1768, 1788), True, 'import pandas as pd\n'), ((1800, 1848), 'pandas.concat', 'pd.concat', (['[train, test]'], {'keys': "('train', 'test')"}), "([train, test], keys=('train', 'test'))\n", (1809, 1848), True, 'import pandas as pd\n'), ((3129, 3157), 'numpy.log1p', 'np.log1p', (["train['SalePrice']"], {}), "(train['SalePrice'])\n", (3137, 3157), True, 'import numpy as np\n'), ((3182, 3209), 'numpy.log1p', 'np.log1p', (["data['SalePrice']"], {}), "(data['SalePrice'])\n", (3190, 3209), True, 'import numpy as np\n'), ((3527, 3547), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {}), '(data)\n', (3541, 3547), True, 'import pandas as pd\n'), ((3561, 3601), 'numpy.array', 'np.array', (["data.loc['train']['SalePrice']"], {}), "(data.loc['train']['SalePrice'])\n", (3569, 3601), True, 'import numpy as np\n'), ((268, 299), 'numpy.random.randn', 'np.random.randn', (['num_feature', '(1)'], {}), '(num_feature, 1)\n', (283, 299), True, 'import numpy as np\n'), ((320, 337), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (335, 337), True, 'import numpy as np\n'), ((927, 948), 'numpy.random.shuffle', 'np.random.shuffle', (['ds'], {}), '(ds)\n', (944, 948), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from src.fast_scboot.c.sample_index_helper import count_clusts, make_index_matrix
# def test_make_index_matrix():
# strat_array = np.asarray([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
# clust_array = np.asarray([0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 6])
# clust_val = np.asarray([0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 6])
# array = np.squeeze(np.dstack([strat_array, clust_array, clust_val])).astype(np.int32)
# result = make_index_matrix(array, 7)
# answer = np.asarray(
# [
# [0, 0, 0, 0, 1],
# [0, 1, 1, 1, 2],
# [1, 2, 2, 3, 1],
# [1, 3, 3, 4, 1],
# [1, 4, 4, 5, 2],
# [2, 5, 5, 7, 1],
# [2, 6, 6, 8, 3],
# ]
# )
# assert np.all(np.isclose(result, answer))
# strat_array = np.asarray([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
# clust_array = np.asarray([0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7])
# array = np.squeeze(np.dstack([strat_array, clust_array, clust_array])).astype(np.int32)
# result = make_index_matrix(array, 8)
# answer = np.asarray(
# [
# [0, 0, 0, 0, 1],
# [0, 1, 1, 1, 2],
# [1, 2, 2, 3, 1],
# [1, 3, 3, 4, 1],
# [1, 4, 4, 5, 2],
# [2, 5, 5, 7, 1],
# [2, 6, 6, 8, 2],
# [2, 7, 7, 10, 1],
# ]
# )
# assert np.all(np.isclose(result, answer))
def test_count_clust_array():
strat_array = np.asarray([0, 0, 1, 1, 1]).astype(np.int32)
clust_array = np.asarray([0, 1, 2, 2, 3]).astype(np.int32)
result = count_clusts(strat_array, clust_array, 2, len(clust_array))
answer = np.asarray([2, 2])
assert np.all(np.isclose(result, answer))
strat_array = np.asarray([0, 0, 1, 1, 1]).astype(np.int32)
clust_array = np.asarray([0, 1, 2, 3, 4]).astype(np.int32)
result = count_clusts(strat_array, clust_array, 2, len(clust_array))
answer = np.asarray([2, 3])
assert np.all(np.isclose(result, answer))
strat_array = np.asarray([0, 0, 1, 1, 2]).astype(np.int32)
clust_array = np.asarray([0, 1, 2, 3, 4]).astype(np.int32)
result = count_clusts(strat_array, clust_array, 3, len(clust_array))
answer = np.asarray([2, 2, 1])
assert np.all(np.isclose(result, answer))
strat_array = np.asarray([0, 0, 1, 1, 1]).astype(np.int32)
clust_array = np.asarray([0, 1, 3, 3, 3]).astype(np.int32)
result = count_clusts(strat_array, clust_array, 2, len(clust_array))
answer = np.asarray([2, 1])
assert np.all(np.isclose(result, answer))
strat_array = np.arange(5).astype(np.int32)
clust_array = np.arange(5).astype(np.int32)
result = count_clusts(strat_array, clust_array, 5, len(clust_array))
answer = np.asarray([1, 1, 1, 1, 1])
assert np.all(np.isclose(result, answer))
| [
"numpy.asarray",
"numpy.arange",
"numpy.isclose"
] | [((1684, 1702), 'numpy.asarray', 'np.asarray', (['[2, 2]'], {}), '([2, 2])\n', (1694, 1702), True, 'import numpy as np\n'), ((1965, 1983), 'numpy.asarray', 'np.asarray', (['[2, 3]'], {}), '([2, 3])\n', (1975, 1983), True, 'import numpy as np\n'), ((2246, 2267), 'numpy.asarray', 'np.asarray', (['[2, 2, 1]'], {}), '([2, 2, 1])\n', (2256, 2267), True, 'import numpy as np\n'), ((2530, 2548), 'numpy.asarray', 'np.asarray', (['[2, 1]'], {}), '([2, 1])\n', (2540, 2548), True, 'import numpy as np\n'), ((2781, 2808), 'numpy.asarray', 'np.asarray', (['[1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1])\n', (2791, 2808), True, 'import numpy as np\n'), ((1722, 1748), 'numpy.isclose', 'np.isclose', (['result', 'answer'], {}), '(result, answer)\n', (1732, 1748), True, 'import numpy as np\n'), ((2003, 2029), 'numpy.isclose', 'np.isclose', (['result', 'answer'], {}), '(result, answer)\n', (2013, 2029), True, 'import numpy as np\n'), ((2287, 2313), 'numpy.isclose', 'np.isclose', (['result', 'answer'], {}), '(result, answer)\n', (2297, 2313), True, 'import numpy as np\n'), ((2568, 2594), 'numpy.isclose', 'np.isclose', (['result', 'answer'], {}), '(result, answer)\n', (2578, 2594), True, 'import numpy as np\n'), ((2828, 2854), 'numpy.isclose', 'np.isclose', (['result', 'answer'], {}), '(result, answer)\n', (2838, 2854), True, 'import numpy as np\n'), ((1488, 1515), 'numpy.asarray', 'np.asarray', (['[0, 0, 1, 1, 1]'], {}), '([0, 0, 1, 1, 1])\n', (1498, 1515), True, 'import numpy as np\n'), ((1551, 1578), 'numpy.asarray', 'np.asarray', (['[0, 1, 2, 2, 3]'], {}), '([0, 1, 2, 2, 3])\n', (1561, 1578), True, 'import numpy as np\n'), ((1769, 1796), 'numpy.asarray', 'np.asarray', (['[0, 0, 1, 1, 1]'], {}), '([0, 0, 1, 1, 1])\n', (1779, 1796), True, 'import numpy as np\n'), ((1832, 1859), 'numpy.asarray', 'np.asarray', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (1842, 1859), True, 'import numpy as np\n'), ((2050, 2077), 'numpy.asarray', 'np.asarray', (['[0, 0, 1, 1, 2]'], {}), '([0, 0, 1, 1, 2])\n', (2060, 2077), True, 'import numpy as np\n'), ((2113, 2140), 'numpy.asarray', 'np.asarray', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (2123, 2140), True, 'import numpy as np\n'), ((2334, 2361), 'numpy.asarray', 'np.asarray', (['[0, 0, 1, 1, 1]'], {}), '([0, 0, 1, 1, 1])\n', (2344, 2361), True, 'import numpy as np\n'), ((2397, 2424), 'numpy.asarray', 'np.asarray', (['[0, 1, 3, 3, 3]'], {}), '([0, 1, 3, 3, 3])\n', (2407, 2424), True, 'import numpy as np\n'), ((2615, 2627), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2624, 2627), True, 'import numpy as np\n'), ((2663, 2675), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2672, 2675), True, 'import numpy as np\n')] |
"""
Machine Learning and Statistic Project 2020
server.py
Student: <NAME>
Student ID: G00376332
------------------------------------------------------------------------
This server.py is the part of MLS Project.
Program is designed to work with folowing model files:
- neuron.h5
- neuron.json
- poly.sav
"""
from flask import Flask
import numpy as np
# To resolve issue with keras errors and warnings we have to disable logging messages
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
import logging
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
from tensorflow import keras
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
# To import models from file
import joblib
# To resolve issue with keras model we have to load model to the same session
session = tf.Session(graph=tf.Graph())
with session.graph.as_default():
keras.backend.set_session(session)
# Load neuron model from json file which was saved durring model evaluation in jupyter notebook
json_file = open('neuron.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
mn = keras.models.model_from_json(loaded_model_json)
# Load weights into new model
mn.load_weights("neuron.h5")
print("Loaded neuron model from file")
# Load polynomial regression model from file which was saved durring model evaluation in jupyter notebook
poly = PolynomialFeatures(degree = 10)
filename = 'poly.sav'
mp = joblib.load(filename)
print("Loaded poly model from file")
# Create the Flask app
app = Flask(__name__,
static_url_path='',
static_folder='static')
@app.route("/")
def home():
return app.send_static_file('index.html')
# NN api response
# curl http://127.0.0.1:5000/api/outputnn/12.52
@app.route('/api/outputnn/<speed>')
def outputnn(speed):
s = float(speed)
return {"outputnn" : nn_output(s)}
#Poly api response
# curl http://127.0.0.1:5000/api/outputpoly/12.52
@app.route('/api/outputpoly/<speed>')
def outputpoly(speed):
s = float(speed)
return {"outputpoly" : poly_output(s)}
def nn_output(speed):
with session.graph.as_default():
keras.backend.set_session(session)
try:
if speed > 0 and speed < 26:
s = np.array([speed])
r_neuron = mn.predict(s.tolist()).tolist()
return np.round(r_neuron,2).tolist()
else:
return 0
except:
return("Model Error")
def poly_output(speed):
with session.graph.as_default():
keras.backend.set_session(session)
try:
if speed > 0 and speed < 26:
s = np.array([speed])
r_poly = mp.predict(poly.fit_transform([s]).tolist()).tolist()
return np.round(r_poly,2).tolist()
else:
return 0
except:
return("Model Error")
#Run Flask
if __name__ == '__main__' :
app.run(debug= True) | [
"flask.Flask",
"tensorflow.keras.backend.set_session",
"sklearn.preprocessing.PolynomialFeatures",
"numpy.array",
"tensorflow.Graph",
"joblib.load",
"numpy.round",
"logging.getLogger",
"tensorflow.keras.models.model_from_json"
] | [((1449, 1478), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(10)'}), '(degree=10)\n', (1467, 1478), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1508, 1529), 'joblib.load', 'joblib.load', (['filename'], {}), '(filename)\n', (1519, 1529), False, 'import joblib\n'), ((1597, 1656), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '""""""', 'static_folder': '"""static"""'}), "(__name__, static_url_path='', static_folder='static')\n", (1602, 1656), False, 'from flask import Flask\n'), ((541, 572), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (558, 572), False, 'import logging\n'), ((945, 979), 'tensorflow.keras.backend.set_session', 'keras.backend.set_session', (['session'], {}), '(session)\n', (970, 979), False, 'from tensorflow import keras\n'), ((1183, 1230), 'tensorflow.keras.models.model_from_json', 'keras.models.model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (1211, 1230), False, 'from tensorflow import keras\n'), ((898, 908), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (906, 908), True, 'import tensorflow as tf\n'), ((2193, 2227), 'tensorflow.keras.backend.set_session', 'keras.backend.set_session', (['session'], {}), '(session)\n', (2218, 2227), False, 'from tensorflow import keras\n'), ((2533, 2567), 'tensorflow.keras.backend.set_session', 'keras.backend.set_session', (['session'], {}), '(session)\n', (2558, 2567), False, 'from tensorflow import keras\n'), ((2285, 2302), 'numpy.array', 'np.array', (['[speed]'], {}), '([speed])\n', (2293, 2302), True, 'import numpy as np\n'), ((2624, 2641), 'numpy.array', 'np.array', (['[speed]'], {}), '([speed])\n', (2632, 2641), True, 'import numpy as np\n'), ((2369, 2390), 'numpy.round', 'np.round', (['r_neuron', '(2)'], {}), '(r_neuron, 2)\n', (2377, 2390), True, 'import numpy as np\n'), ((2728, 2747), 'numpy.round', 'np.round', (['r_poly', '(2)'], {}), '(r_poly, 2)\n', (2736, 2747), True, 'import numpy as np\n')] |
"""
=====================================
Custom tick formatter for time series
=====================================
When plotting time series, e.g., financial time series, one often wants
to leave out days on which there is no data, i.e. weekends. The example
below shows how to use an 'index formatter' to achieve the desired plot
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
# Load a numpy record array from yahoo csv data with fields date, open, close,
# volume, adj_close from the mpl-data/example directory. The record array
# stores the date as an np.datetime64 with a day unit ('D') in the date column.
with cbook.get_sample_data('goog.npz') as datafile:
r = np.load(datafile)['price_data'].view(np.recarray)
r = r[-30:] # get the last 30 days
# Matplotlib works better with datetime.datetime than np.datetime64, but the
# latter is more portable.
date = r.date.astype('O')
# first we'll do it the default way, with gaps on weekends
fig, axes = plt.subplots(ncols=2, figsize=(8, 4))
ax = axes[0]
ax.plot(date, r.adj_close, 'o-')
ax.set_title("Default")
fig.autofmt_xdate()
# next we'll write a custom formatter
N = len(r)
ind = np.arange(N) # the evenly spaced plot indices
def format_date(x, pos=None):
thisind = np.clip(int(x + 0.5), 0, N - 1)
return date[thisind].strftime('%Y-%m-%d')
ax = axes[1]
ax.plot(ind, r.adj_close, 'o-')
ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))
ax.set_title("Custom tick formatter")
fig.autofmt_xdate()
plt.show()
| [
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.cbook.get_sample_data",
"matplotlib.ticker.FuncFormatter",
"numpy.arange",
"matplotlib.pyplot.subplots"
] | [((1041, 1078), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'figsize': '(8, 4)'}), '(ncols=2, figsize=(8, 4))\n', (1053, 1078), True, 'import matplotlib.pyplot as plt\n'), ((1225, 1237), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1234, 1237), True, 'import numpy as np\n'), ((1565, 1575), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1573, 1575), True, 'import matplotlib.pyplot as plt\n'), ((698, 731), 'matplotlib.cbook.get_sample_data', 'cbook.get_sample_data', (['"""goog.npz"""'], {}), "('goog.npz')\n", (719, 731), True, 'import matplotlib.cbook as cbook\n'), ((1471, 1504), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['format_date'], {}), '(format_date)\n', (1491, 1504), True, 'import matplotlib.ticker as ticker\n'), ((753, 770), 'numpy.load', 'np.load', (['datafile'], {}), '(datafile)\n', (760, 770), True, 'import numpy as np\n')] |
import sys,os
sys.path.append(os.getcwd())
from src.QDT import QdtClassifier
from src.utility_factors import valueFunction1, weightingFunction, logitFunc, CPT_logit
from src.attraction_factors import dummy_attraction, ambiguity_aversion,QDT_attraction
import numpy as np
class QdtPredicter():
"""One qdt classifier per block"""
def __init__(self,
names,
params,
num_util_params,
):
self.clfs=[]
for i in range(5):
clf=QdtClassifier("clf1",
"data/cpc18/train_block{}_games.csv".format(i+1),
names,
params[i][:num_util_params],
CPT_logit,
params[i][num_util_params:],
QDT_attraction,
#dummy_attraction,
)
self.clfs.append(clf)
def classify(self,Ha, pHa, La, LotShapeA, LotNumA, Hb, pHb, Lb, LotShapeB, LotNumB, Amb, Corr):
ret=[]
for clf in self.clfs:
ret.append(clf.classify(Ha, pHa, La, LotShapeA, LotNumA, Hb, pHb, Lb, LotShapeB, LotNumB, Amb, Corr))
return np.array(ret)
def main():
num_util_param=6 #Number of utility parameters
param1=[0.82423076,0.70935712,0.94564408,1.06578517,0.89884976,0.30544674,
0.03596667 ,3.28094825]
param2=[0.87921886 ,0.74456607 ,0.97137993 ,0.98078196, 0.98707779 ,0.31050745,
0.04692646 ,2.71401311]
param3=[0.88313371 ,0.70030474 ,0.98540364 ,0.98057811, 0.9864834 , 0.28892273,
0.05970801, 1.20828292]
param4=[0.89381309 ,0.82025816, 0.95580821 ,0.99715025, 0.97985058 ,0.27428034,
0.02994283 ,1.89892924]
param5=[0.89283243, 0.74684717 ,0.98413806, 1.00156143 ,0.97659442, 0.28034815,
0.09734114 ,0.88784065]
myclf=QdtPredicter("alpha,_lambda,beta,delta,gamma,phi,ambAve,tanhCoe".split(","),
[param1,param2,param3,param4,param5],
num_util_param)
print(myclf.classify(10,0.5,0,"-",1,10,0.9,0,"-",1,0,0))
if __name__=="__main__":
main()
| [
"os.getcwd",
"numpy.array"
] | [((30, 41), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (39, 41), False, 'import sys, os\n'), ((1258, 1271), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (1266, 1271), True, 'import numpy as np\n')] |
import os
import pandas as pd
import numpy as np
import random
from leo_segmentation.utils import meta_classes_selector, print_to_string_io, \
train_logger, val_logger, numpy_to_tensor, load_config
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils, datasets
from PIL import Image
from collections import defaultdict
config = load_config()
DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)),\
"leo_segmentation", "data", config.selected_data)
TRAIN_DIR = os.path.join(DATA_DIR, "train")
VAL_DIR = os.path.join(DATA_DIR, "val")
if config.selected_data == "pascal_5i":
CLASSES = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car",
"cat", "chair", "cow", "diningtable", "dog", "horse",
"motorbike", "person", "pottedplant", "sheep", "sofa",
"train", "tvmonitor"]
CLASSES_DICT = {i:CLASSES[i] for i in range(len(CLASSES))}
def class_to_img_mapping(fold, mode):
def read_file(filename):
with open(filename, "r") as f:
temp_list = f.readlines()
temp_list = [i.strip("\n").split("__") for i in temp_list]
temp_list = [(i[0], int(i[1])) for i in temp_list]
return temp_list
class_img_mapping = defaultdict(list)
class_counts = defaultdict(int)
train_folds = [0, 1, 2, 3]
train_folds.remove(fold)
if mode == "meta_train":
for fold_number in train_folds:
textfile_path = os.path.join(DATA_DIR, 'Binary_map_aug', 'train', f'split{fold_number}_train.txt')
train_files_list = read_file(textfile_path)
for fname, class_idx in train_files_list:
class_img_mapping[CLASSES_DICT[class_idx - 1]].append(fname)
class_counts[CLASSES_DICT[class_idx - 1]] += 1
else:
textfile_path = os.path.join(DATA_DIR, 'Binary_map_aug', 'val', f'split{fold}_val.txt')
val_files_list = read_file(textfile_path)
for fname, class_idx in val_files_list:
class_img_mapping[CLASSES_DICT[class_idx - 1]].append(fname)
class_counts[CLASSES_DICT[class_idx - 1]] += 1
return class_img_mapping, class_counts
class Transform_image:
"""Performs data preprocessing steps on input images
Args:
img_width (int): Input image width
img_height (int): Input image height
"""
def __init__(self, img_width, img_height, normalize=True):
self.img_width = img_width
self.img_height = img_height
self.normalize = normalize
def __call__(self, im):
"""Implements the data preprocessing
Args:
im (PIL.image): PIL image
Returns:
im (np.ndarray): numpy array containing image data
"""
w, h = im.size
if h > w:
im = im.transpose(method=Image.ROTATE_270)
im = im.resize((self.img_width, self.img_height))
im = np.array(im)
im = im.astype(np.float32)
im = (im) / 255.0
im = np.transpose(im, (2, 0, 1))
# normalize for all pytorch pretrained models
if self.normalize:
mean = np.array([0.485, 0.456, 0.406]).reshape(3, 1, 1)
std = np.array([0.229, 0.224, 0.225]).reshape(3, 1, 1)
im = (im - mean) / std
return im
class Transform_mask:
"""Performs data preprocessing steps on input masks
Args:
img_width (int): Input image width
img_height (int): Input image height
"""
def __init__(self, img_width, img_height):
self.img_width = img_width
self.img_height = img_height
def __call__(self, im):
"""Implements the data preprocessing
Args:
im (PIL.image): PIL image
Returns:
im (np.ndarray): numpy array containing image data
"""
w, h = im.size
if h > w:
im = im.transpose(method=Image.ROTATE_270)
# Nearest upsampling
im = im.resize((self.img_width, self.img_height), resample=0)
im = np.array(im) / 255
if im.ndim > 2:
im = im[..., :3]
im = rgb2gray(im)
im = (im > 0.5)
im = im.astype("uint8")
return im
class PascalDatagenerator(Dataset):
"""Sample task data for Meta-train, Meta-val and Meta-train tasks
Args:
dataset (str): dataset name
mode (str): Meta-train, Meta-val or Meta-test
"""
def __init__(self, dataset, data_type="meta_train"):
fold = config.data_params.fold
self._dataset = dataset
self.mode = data_type
self.class_img_mapping, \
self.class_counts = class_to_img_mapping(fold, self.mode)
val_classes = CLASSES[fold * 5:(fold + 1) * 5]
self.classes = list(set(CLASSES) - set(val_classes)) \
if self.mode == "meta_train" else val_classes
img_dims = config.data_params.img_dims
self.transform_image = Transform_image(img_dims.width, img_dims.height)
self.transform_mask = Transform_mask(img_dims.width, img_dims.height)
def __len__(self):
return len(self._dataset)
def __getitem__(self, idx):
_config = config.data_params
dataset_root_path = os.path.join(os.path.dirname(__file__),
config.data_path, self._dataset)
num_classes = _config.num_classes
n_train_per_class = _config.n_train_per_class[self.mode]
n_val_per_class = _config.n_val_per_class[self.mode]
batch_size = _config.num_tasks[self.mode]
if batch_size > len(self.classes):
raise ValueError("number of tasks must be less than the number \
of available classes")
tr_imgs = []
tr_masks = []
val_imgs = []
val_masks = []
classes_selected = []
classes = self.classes.copy()
total_tr_fnames = []
total_vl_fnames = []
for i in range(batch_size):
selected_class = (np.random.choice(classes, num_classes,
replace=False))[0]
fname_list = self.class_img_mapping[selected_class]
classes_selected.append(selected_class)
classes.remove(selected_class)
tr_img_paths = []
tr_masks_paths = []
val_img_paths = []
val_masks_paths = []
# Sample image paths belonging to classes
random.shuffle(fname_list)
if self.mode == "meta_train":
img_paths = list(np.random.choice(fname_list,
n_train_per_class + n_val_per_class, replace=False))
tr_img_paths.extend(img_paths[:n_train_per_class])
tr_masks_paths.extend(img_paths[:n_train_per_class])
val_img_paths.extend(img_paths[n_train_per_class:])
val_masks_paths.extend(img_paths[n_train_per_class:])
total_tr_fnames.extend(img_paths[:n_train_per_class])
total_vl_fnames.extend(img_paths[n_train_per_class:])
else:
t_img_paths = list(np.random.choice(fname_list,
n_train_per_class, replace=False))
v_img_paths = list(set(fname_list) - set(t_img_paths))
tr_img_paths.extend(t_img_paths)
tr_masks_paths.extend(t_img_paths)
val_img_paths.extend(v_img_paths)
val_masks_paths.extend(v_img_paths)
total_tr_fnames.extend(t_img_paths)
total_vl_fnames.extend(v_img_paths)
mode = self.mode.replace("meta_", "")
tr_img_paths = [os.path.join(DATA_DIR, 'JPEGImages', f'{img}.jpg') for img in tr_img_paths]
tr_masks_paths = [os.path.join(DATA_DIR, 'Binary_map_aug', mode, \
f'{CLASSES.index(selected_class) + 1}', f'{mask}.png') for mask in
tr_masks_paths]
val_img_paths = [os.path.join(DATA_DIR, 'JPEGImages', f'{img}.jpg') for img in val_img_paths]
val_masks_paths = [os.path.join(DATA_DIR, 'Binary_map_aug', mode, \
f'{CLASSES.index(selected_class) + 1}', f'{mask}.png') for mask in
val_masks_paths]
# Store np.arrays for train and val images for all data types
# Store only paths of val images for Meta-val and Meta-test
# print(tr_img_paths)
tr_imgs.append(np.array([self.transform_image(Image.open(i))
for i in tr_img_paths]))
tr_masks.append(np.array([self.transform_mask(Image.open(i))
for i in tr_masks_paths]))
if self.mode == "meta_train":
val_imgs.append(np.array([self.transform_image(Image.open(i))
for i in val_img_paths]))
val_masks.append(np.array([self.transform_mask(Image.open(i))
for i in val_masks_paths]))
else:
val_imgs.append(val_img_paths)
val_masks.append(val_masks_paths)
assert len(classes_selected) == len(set(classes_selected)), \
"classes are not unique"
if self.mode == "meta_train":
tr_imgs, tr_masks, val_imgs, val_masks = numpy_to_tensor(np.array(tr_imgs)), \
numpy_to_tensor(np.array(tr_masks)), \
numpy_to_tensor(np.array(val_imgs)), \
numpy_to_tensor(np.array(val_masks))
return tr_imgs, tr_masks, val_imgs, val_masks, \
classes_selected, total_tr_fnames, total_vl_fnames
else:
# val_imgs and val_masks are lists
tr_imgs, tr_masks = numpy_to_tensor(np.array(tr_imgs)), numpy_to_tensor(np.array(tr_masks))
return tr_imgs, tr_masks, val_imgs, val_masks, classes_selected, \
total_tr_fnames, total_vl_fnames
def get_batch_data(self):
return self.__getitem__(0)
class GeneralDatagenerator(Dataset):
"""Sample task data for Meta-train, Meta-val and Meta-train tasks
Args:
dataset (str): dataset name
data_type (str): Meta-train, Meta-val or Meta-test
"""
def __init__(self, dataset, data_type):
self._dataset = dataset
self._data_type = data_type
self.classes_dict = meta_classes_selector(config, dataset)
img_dims = config.data_params.img_dims
self.transform_image = Transform_image(img_dims.width, img_dims.height)
self.transform_mask = Transform_mask(img_dims.width, img_dims.height)
def __len__(self):
return len(self._dataset)
def __getitem__(self, idx):
_config = config.data_params
dataset_root_path = os.path.join(os.path.dirname(__file__),
config.data_path, self._dataset)
classes = self.classes_dict[self._data_type]
num_classes = _config.num_classes
n_train_per_class = _config.n_train_per_class[self._data_type]
n_val_per_class = _config.n_val_per_class[self._data_type]
batch_size = _config.num_tasks[self._data_type] if self._data_type != "meta_test" \
else len(classes)
img_datasets = datasets.ImageFolder(root=os.path.join(
dataset_root_path, "images"))
if batch_size > len(classes):
raise ValueError("number of tasks must be less than the number \
of available classes")
def data_path_assertions(data_path, img_or_mask):
""" Make assertions over selected paths"""
temp = data_path.split(os.sep)
_img_or_mask, _selected_class = temp[-3], temp[-2]
assert _img_or_mask == img_or_mask,\
"wrong data type (image or mask)"
# assert _selected_class == selected_class,\
# "wrong class (selected class)"
def data_path_assertions(data_path, img_or_mask):
temp = data_path.split(os.sep)
_img_or_mask, _selected_class = temp[-3], temp[-2]
assert _img_or_mask == img_or_mask, "wrong data type (image or mask)"
#assert _selected_class == selected_class, "wrong class (selected class)"
tr_imgs = []
tr_masks = []
val_imgs = []
val_masks = []
classes_selected = []
for i in range(batch_size):
selected_class = (np.random.choice(classes, num_classes,
replace=False))[0]
classes_selected.append(selected_class)
classes.remove(selected_class)
tr_img_paths = []
tr_masks_paths = []
val_img_paths = []
val_masks_paths = []
# Sample image paths belonging to classes
img_paths = [i[0] for i in img_datasets.imgs
if selected_class in i[0]]
random.shuffle(img_paths)
if self._data_type == "meta_train":
img_paths = list(np.random.choice(img_paths,
n_train_per_class + n_val_per_class,
replace=False))
for img_path in img_paths:
data_path_assertions(img_path, "images")
# Sample mask paths and convert them to the correct extensions
mask_paths = [i.replace("images", "masks") for i in img_paths]
mask_paths = [i.replace("jpg", "png") if not os.path.exists(i)
else i for i in mask_paths]
# Create a list in the case only one image path is created
img_paths = [img_paths] if type(img_paths) == str else img_paths
mask_paths = [mask_paths] if type(mask_paths) == str else mask_paths
# Divide sample paths to train and val splits
tr_img_paths.extend(img_paths[:n_train_per_class])
tr_masks_paths.extend(mask_paths[:n_train_per_class])
val_img_paths.extend(img_paths[n_train_per_class:])
val_masks_paths.extend(mask_paths[n_train_per_class:])
# Store np.arrays for train and val images for all data types
# Store only paths of val images for Meta-val and Meta-test
tr_imgs.append(np.array([self.transform_image(Image.open(i))
for i in tr_img_paths]))
tr_masks.append(np.array([self.transform_mask(Image.open(i))
for i in tr_masks_paths]))
if self._data_type in ["meta_val", "meta_test"]:
val_imgs.append(val_img_paths)
val_masks.append(val_masks_paths)
else:
val_imgs.append(np.array([self.transform_image(Image.open(i))
for i in val_img_paths]))
val_masks.append(np.array([self.transform_mask(Image.open(i))
for i in val_masks_paths]))
assert len(classes_selected) == len(set(classes_selected)),\
"classes are not unique"
total_tr_img_paths = tr_imgs + tr_masks
total_vl_img_paths = val_imgs + val_masks
if self._data_type == "meta_train":
tr_data, tr_data_masks, val_data, val_masks = numpy_to_tensor(np.array(tr_imgs)),\
numpy_to_tensor(np.array(tr_masks)),\
numpy_to_tensor(np.array(val_imgs)),\
numpy_to_tensor(np.array(val_masks))
return tr_data, tr_data_masks, val_data, val_masks,\
classes_selected, total_tr_img_paths, total_vl_img_paths
else:
tr_data, tr_data_masks = numpy_to_tensor(np.array(tr_imgs)), numpy_to_tensor(np.array(tr_masks))
return tr_data, tr_data_masks, val_imgs, val_masks,\
classes_selected, total_tr_img_paths, total_vl_img_paths
def get_batch_data(self):
return self.__getitem__(0)
class TrainingStats:
""" Stores train statistics data """
def __init__(self):
self._meta_train_stats = []
self._meta_val_stats = []
self._meta_test_stats = []
self._meta_train_ious = []
self._meta_val_ious = []
self._meta_test_ious = []
self._stats = []
self.config = config
self._best_episode = 0
self._best_iou = 0
@property
def best_episode(self):
return self._best_episode
def set_episode(self, episode):
self.episode = episode
def set_mode(self, mode):
self.mode = mode
def set_batch(self, batch):
self.batch = batch
def update_after_restart(self):
msg = f"======= Restarted at Episode {self.episode} ====== \n"
train_logger.debug(msg)
val_logger.debug(msg)
print(msg)
def update_stats(self, **kwargs):
self.total_val_loss = kwargs["total_val_loss"]
self.mean_iou_dict = kwargs["mean_iou_dict"]
self.mean_iou_dict["episode"] = self.episode
_stats = {
"mode": self.mode,
"episode": self.episode,
"total_val_loss": self.total_val_loss,
}
if self.mode == "meta_train":
self._meta_train_stats.append(_stats)
self._meta_train_ious.append(self.mean_iou_dict)
elif self.mode == "meta_val":
self._meta_val_stats.append(_stats)
self._meta_val_ious.append(self.mean_iou_dict)
else:
self._meta_test_stats.append(_stats)
self._meta_test_ious.append(self.mean_iou_dict)
mean_iou_dict = self.mean_iou_dict.copy()
mean_iou_dict.pop("episode")
average_iou = np.mean([v for _, v in mean_iou_dict.items()])
self.update_best_iou(average_iou)
mean_iou_string = print_to_string_io(mean_iou_dict, True)
if self.mode == "meta_val":
start_msg = "\t\t ======= Meta Val IOUs ====== \n"
else:
start_msg = ""
msg = f"Mode: {self.mode} | Episode: {self.episode: 03d} | " \
+ f"Total Val Loss: {self.total_val_loss:2f} " \
+ f"\n{mean_iou_string} " \
if self.mode == "meta_val":
end_msg = f"====> Average of all IOUs: {average_iou}\n" \
+ f" ====> Best Episode: {self._best_episode}"\
+ f" | Best IOU: {self._best_iou}\n"
else:
end_msg = "\n"
self.stats_msg = start_msg + msg + end_msg
if self.mode == "meta_train":
train_logger.debug(self.stats_msg)
else:
val_logger.debug(self.stats_msg)
self._stats.append({
"mode": self.mode,
"episode": self.episode,
"total_val_loss": self.total_val_loss,
"mean_iou_dict": self.mean_iou_dict
})
#self.log_model_stats_to_file()
def get_stats(self, mode):
if mode == "meta_train":
stats = self._meta_train_stats
elif mode == "meta_val":
stats = self._meta_val_stats
else:
stats = self._meta_test_stats
return pd.DataFrame(stats)
def log_model_stats_to_file(self):
model_root = os.path.join(os.path.dirname(__file__), self.config.data_path, "models")
model_dir = os.path.join(model_root, "experiment_{}" \
.format(self.config.experiment.number))
log_file = "train_log.txt" if self.mode == "meta_train" else "val_log.txt"
with open(os.path.join(model_dir, log_file), "a") as f:
mean_iou_string = print_to_string_io(self.mean_iou_dict, pretty_print=True)
msg = f"\nmode:{self.mode}, episode:{self.episode:03d}, "
msg += f"total_val_loss:{self.total_val_loss:2f} \nval_mean_iou:{mean_iou_string}"
f.write(msg)
def get_ious(self, mode):
if mode == "meta_train":
ious = self._meta_train_ious
elif mode == "meta_val":
ious = self._meta_val_ious
else:
ious = self._meta_test_ious
return pd.DataFrame(ious)
def get_latest_stats(self):
return self._stats[-1]
def disp_stats(self):
print(self.stats_msg)
def update_best_iou(self, iou):
if self.mode == "meta_val":
if iou > self._best_iou:
self._best_iou = iou
self._best_episode = self.episode
def rgb2gray(rgb):
""" Convert a RGB Image to gray scale """
# https://stackoverflow.com/questions/12201577/how-can-i-convert-an-rgb-image-into-grayscale-in-python
return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])
| [
"pandas.DataFrame",
"leo_segmentation.utils.meta_classes_selector",
"leo_segmentation.utils.print_to_string_io",
"random.shuffle",
"os.path.dirname",
"numpy.transpose",
"os.path.exists",
"PIL.Image.open",
"collections.defaultdict",
"numpy.array",
"leo_segmentation.utils.load_config",
"leo_segm... | [((372, 385), 'leo_segmentation.utils.load_config', 'load_config', ([], {}), '()\n', (383, 385), False, 'from leo_segmentation.utils import meta_classes_selector, print_to_string_io, train_logger, val_logger, numpy_to_tensor, load_config\n'), ((521, 552), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""train"""'], {}), "(DATA_DIR, 'train')\n", (533, 552), False, 'import os\n'), ((563, 592), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""val"""'], {}), "(DATA_DIR, 'val')\n", (575, 592), False, 'import os\n'), ((1291, 1308), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1302, 1308), False, 'from collections import defaultdict\n'), ((1328, 1344), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1339, 1344), False, 'from collections import defaultdict\n'), ((21167, 21211), 'numpy.dot', 'np.dot', (['rgb[..., :3]', '[0.2989, 0.587, 0.114]'], {}), '(rgb[..., :3], [0.2989, 0.587, 0.114])\n', (21173, 21211), True, 'import numpy as np\n'), ((426, 451), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (441, 451), False, 'import os\n'), ((1870, 1941), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""Binary_map_aug"""', '"""val"""', 'f"""split{fold}_val.txt"""'], {}), "(DATA_DIR, 'Binary_map_aug', 'val', f'split{fold}_val.txt')\n", (1882, 1941), False, 'import os\n'), ((2961, 2973), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (2969, 2973), True, 'import numpy as np\n'), ((3048, 3075), 'numpy.transpose', 'np.transpose', (['im', '(2, 0, 1)'], {}), '(im, (2, 0, 1))\n', (3060, 3075), True, 'import numpy as np\n'), ((10732, 10770), 'leo_segmentation.utils.meta_classes_selector', 'meta_classes_selector', (['config', 'dataset'], {}), '(config, dataset)\n', (10753, 10770), False, 'from leo_segmentation.utils import meta_classes_selector, print_to_string_io, train_logger, val_logger, numpy_to_tensor, load_config\n'), ((17294, 17317), 'leo_segmentation.utils.train_logger.debug', 'train_logger.debug', (['msg'], {}), '(msg)\n', (17312, 17317), False, 'from leo_segmentation.utils import meta_classes_selector, print_to_string_io, train_logger, val_logger, numpy_to_tensor, load_config\n'), ((17326, 17347), 'leo_segmentation.utils.val_logger.debug', 'val_logger.debug', (['msg'], {}), '(msg)\n', (17342, 17347), False, 'from leo_segmentation.utils import meta_classes_selector, print_to_string_io, train_logger, val_logger, numpy_to_tensor, load_config\n'), ((18360, 18399), 'leo_segmentation.utils.print_to_string_io', 'print_to_string_io', (['mean_iou_dict', '(True)'], {}), '(mean_iou_dict, True)\n', (18378, 18399), False, 'from leo_segmentation.utils import meta_classes_selector, print_to_string_io, train_logger, val_logger, numpy_to_tensor, load_config\n'), ((19683, 19702), 'pandas.DataFrame', 'pd.DataFrame', (['stats'], {}), '(stats)\n', (19695, 19702), True, 'import pandas as pd\n'), ((20645, 20663), 'pandas.DataFrame', 'pd.DataFrame', (['ious'], {}), '(ious)\n', (20657, 20663), True, 'import pandas as pd\n'), ((1503, 1589), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""Binary_map_aug"""', '"""train"""', 'f"""split{fold_number}_train.txt"""'], {}), "(DATA_DIR, 'Binary_map_aug', 'train',\n f'split{fold_number}_train.txt')\n", (1515, 1589), False, 'import os\n'), ((4081, 4093), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (4089, 4093), True, 'import numpy as np\n'), ((5296, 5321), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5311, 5321), False, 'import os\n'), ((6529, 6555), 'random.shuffle', 'random.shuffle', (['fname_list'], {}), '(fname_list)\n', (6543, 6555), False, 'import random\n'), ((11145, 11170), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (11160, 11170), False, 'import os\n'), ((13324, 13349), 'random.shuffle', 'random.shuffle', (['img_paths'], {}), '(img_paths)\n', (13338, 13349), False, 'import random\n'), ((19089, 19123), 'leo_segmentation.utils.train_logger.debug', 'train_logger.debug', (['self.stats_msg'], {}), '(self.stats_msg)\n', (19107, 19123), False, 'from leo_segmentation.utils import meta_classes_selector, print_to_string_io, train_logger, val_logger, numpy_to_tensor, load_config\n'), ((19150, 19182), 'leo_segmentation.utils.val_logger.debug', 'val_logger.debug', (['self.stats_msg'], {}), '(self.stats_msg)\n', (19166, 19182), False, 'from leo_segmentation.utils import meta_classes_selector, print_to_string_io, train_logger, val_logger, numpy_to_tensor, load_config\n'), ((19777, 19802), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (19792, 19802), False, 'import os\n'), ((20151, 20208), 'leo_segmentation.utils.print_to_string_io', 'print_to_string_io', (['self.mean_iou_dict'], {'pretty_print': '(True)'}), '(self.mean_iou_dict, pretty_print=True)\n', (20169, 20208), False, 'from leo_segmentation.utils import meta_classes_selector, print_to_string_io, train_logger, val_logger, numpy_to_tensor, load_config\n'), ((6070, 6123), 'numpy.random.choice', 'np.random.choice', (['classes', 'num_classes'], {'replace': '(False)'}), '(classes, num_classes, replace=False)\n', (6086, 6123), True, 'import numpy as np\n'), ((7802, 7852), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""JPEGImages"""', 'f"""{img}.jpg"""'], {}), "(DATA_DIR, 'JPEGImages', f'{img}.jpg')\n", (7814, 7852), False, 'import os\n'), ((8142, 8192), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""JPEGImages"""', 'f"""{img}.jpg"""'], {}), "(DATA_DIR, 'JPEGImages', f'{img}.jpg')\n", (8154, 8192), False, 'import os\n'), ((11663, 11704), 'os.path.join', 'os.path.join', (['dataset_root_path', '"""images"""'], {}), "(dataset_root_path, 'images')\n", (11675, 11704), False, 'import os\n'), ((12839, 12892), 'numpy.random.choice', 'np.random.choice', (['classes', 'num_classes'], {'replace': '(False)'}), '(classes, num_classes, replace=False)\n', (12855, 12892), True, 'import numpy as np\n'), ((20075, 20108), 'os.path.join', 'os.path.join', (['model_dir', 'log_file'], {}), '(model_dir, log_file)\n', (20087, 20108), False, 'import os\n'), ((3177, 3208), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (3185, 3208), True, 'import numpy as np\n'), ((3244, 3275), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (3252, 3275), True, 'import numpy as np\n'), ((6631, 6716), 'numpy.random.choice', 'np.random.choice', (['fname_list', '(n_train_per_class + n_val_per_class)'], {'replace': '(False)'}), '(fname_list, n_train_per_class + n_val_per_class, replace=False\n )\n', (6647, 6716), True, 'import numpy as np\n'), ((7230, 7292), 'numpy.random.choice', 'np.random.choice', (['fname_list', 'n_train_per_class'], {'replace': '(False)'}), '(fname_list, n_train_per_class, replace=False)\n', (7246, 7292), True, 'import numpy as np\n'), ((9580, 9597), 'numpy.array', 'np.array', (['tr_imgs'], {}), '(tr_imgs)\n', (9588, 9597), True, 'import numpy as np\n'), ((9671, 9689), 'numpy.array', 'np.array', (['tr_masks'], {}), '(tr_masks)\n', (9679, 9689), True, 'import numpy as np\n'), ((9763, 9781), 'numpy.array', 'np.array', (['val_imgs'], {}), '(val_imgs)\n', (9771, 9781), True, 'import numpy as np\n'), ((9855, 9874), 'numpy.array', 'np.array', (['val_masks'], {}), '(val_masks)\n', (9863, 9874), True, 'import numpy as np\n'), ((10116, 10133), 'numpy.array', 'np.array', (['tr_imgs'], {}), '(tr_imgs)\n', (10124, 10133), True, 'import numpy as np\n'), ((10152, 10170), 'numpy.array', 'np.array', (['tr_masks'], {}), '(tr_masks)\n', (10160, 10170), True, 'import numpy as np\n'), ((13431, 13510), 'numpy.random.choice', 'np.random.choice', (['img_paths', '(n_train_per_class + n_val_per_class)'], {'replace': '(False)'}), '(img_paths, n_train_per_class + n_val_per_class, replace=False)\n', (13447, 13510), True, 'import numpy as np\n'), ((15742, 15759), 'numpy.array', 'np.array', (['tr_imgs'], {}), '(tr_imgs)\n', (15750, 15759), True, 'import numpy as np\n'), ((15835, 15853), 'numpy.array', 'np.array', (['tr_masks'], {}), '(tr_masks)\n', (15843, 15853), True, 'import numpy as np\n'), ((15929, 15947), 'numpy.array', 'np.array', (['val_imgs'], {}), '(val_imgs)\n', (15937, 15947), True, 'import numpy as np\n'), ((16023, 16042), 'numpy.array', 'np.array', (['val_masks'], {}), '(val_masks)\n', (16031, 16042), True, 'import numpy as np\n'), ((16249, 16266), 'numpy.array', 'np.array', (['tr_imgs'], {}), '(tr_imgs)\n', (16257, 16266), True, 'import numpy as np\n'), ((16285, 16303), 'numpy.array', 'np.array', (['tr_masks'], {}), '(tr_masks)\n', (16293, 16303), True, 'import numpy as np\n'), ((13882, 13899), 'os.path.exists', 'os.path.exists', (['i'], {}), '(i)\n', (13896, 13899), False, 'import os\n'), ((8697, 8710), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (8707, 8710), False, 'from PIL import Image\n'), ((8832, 8845), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (8842, 8845), False, 'from PIL import Image\n'), ((14731, 14744), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (14741, 14744), False, 'from PIL import Image\n'), ((14866, 14879), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (14876, 14879), False, 'from PIL import Image\n'), ((9017, 9030), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (9027, 9030), False, 'from PIL import Image\n'), ((9163, 9176), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (9173, 9176), False, 'from PIL import Image\n'), ((15184, 15197), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (15194, 15197), False, 'from PIL import Image\n'), ((15330, 15343), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (15340, 15343), False, 'from PIL import Image\n')] |
"""
Linear model for evaluating model biases, differences, and other thresholds
using explainable AI for historical data
Author : <NAME>
Date : 18 May 2021
Version : 1 - adds extra class (#8), but tries the MMean
"""
### Import packages
import matplotlib.pyplot as plt
import numpy as np
import sys
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import palettable.cartocolors.qualitative as cc
from sklearn.metrics import accuracy_score
import scipy.stats as sts
import cmasher as cmr
import cmocean
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
variablesall = ['T2M','P','SLP']
variablesall = ['P']
pickSMILEall = [[]]
ridge_penalty = [0,0.01,0.1,1,5]
for va in range(len(variablesall)):
for m in range(len(pickSMILEall)):
weights = []
for ww in range(len(ridge_penalty)):
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
directorydata = '/Users/zlabe/Documents/Research/ModelComparison/Data/'
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/v2-LINEAR/'
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n"]
###############################################################################
###############################################################################
modelGCMs = ['CanESM2-ens','MPI-ens','CSIRO-MK3.6-ens','KNMI-ecearth-ens',
'GFDL-CM3-ens','GFDL-ESM2M-ens','LENS-ens']
datasetsingle = ['SMILE']
dataset_obs = 'ERA5BE'
seasons = ['annual']
variq = variablesall[va]
reg_name = 'SMILEGlobe'
timeper = 'historical'
###############################################################################
###############################################################################
pickSMILE = pickSMILEall[m]
if len(pickSMILE) >= 1:
lenOfPicks = len(pickSMILE) + 1 # For random class
else:
lenOfPicks = len(modelGCMs) + 1 # For random class
###############################################################################
###############################################################################
land_only = False
ocean_only = False
###############################################################################
###############################################################################
rm_merid_mean = False
rm_annual_mean = False
###############################################################################
###############################################################################
rm_ensemble_mean = False
rm_observational_mean = False
###############################################################################
###############################################################################
calculate_anomalies = False
if calculate_anomalies == True:
baseline = np.arange(1951,1980+1,1)
###############################################################################
###############################################################################
window = 0
ensTypeExperi = 'ENS'
# shuffletype = 'TIMEENS'
# shuffletype = 'ALLENSRAND'
# shuffletype = 'ALLENSRANDrmmean'
shuffletype = 'RANDGAUSS'
# integer = 5 # random noise value to add/subtract from each grid point
sizeOfTwin = 1 # number of classes to add to other models
###############################################################################
###############################################################################
if ensTypeExperi == 'ENS':
if window == 0:
rm_standard_dev = False
yearsall = np.arange(1950,2019+1,1)
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
yearsall = np.arange(1950+window,2019+1,1)
ravelmodeltime = False
ravel_modelens = True
elif ensTypeExperi == 'GCM':
if window == 0:
rm_standard_dev = False
yearsall = np.arange(1950,2019+1,1)
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
yearsall = np.arange(1950+window,2019+1,1)
ravelmodeltime = False
ravel_modelens = True
###############################################################################
###############################################################################
numOfEns = 16
if len(modelGCMs) == 6:
lensalso = False
elif len(modelGCMs) == 7:
lensalso = True
lentime = len(yearsall)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
num_of_class = lenOfPicks
###############################################################################
###############################################################################
lrpRule = 'z'
normLRP = True
###############################################################################
modelGCMsNames = np.append(modelGCMs,['MMmean'])
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Picking experiment to save
typeOfAnalysis = 'issueWithExperiment'
# Experiment #1
if rm_ensemble_mean == True:
if window > 1:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-1'
# Experiment #2
if rm_ensemble_mean == True:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-2'
# Experiment #3 (raw data)
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-3'
# Experiment #4
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-4'
# Experiment #5
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-5'
# Experiment #6
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-6'
# Experiment #7
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-7'
# Experiment #8
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-8'
# Experiment #9
if rm_ensemble_mean == False:
if window > 1:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-9'
print('\n<<<<<<<<<<<< Analysis == %s (%s) ! >>>>>>>>>>>>>>>\n' % (typeOfAnalysis,timeper))
if typeOfAnalysis == 'issueWithExperiment':
sys.exit('Wrong parameters selected to analyze')
### Select how to save files
if land_only == True:
saveData = timeper + '_LAND' + '_LINEAR_MODDIF4_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
typemask = 'LAND'
elif ocean_only == True:
saveData = timeper + '_OCEAN' + '_LINEAR_MODDIF4_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
typemask = 'OCEAN'
else:
saveData = timeper + '_LINEAR_MODDIF4_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
typemask = 'GLOBAL'
print('*Filename == < %s >' % saveData)
### Adding new file name for linear model
saveData = saveData + '_L2-%s' % ridge_penalty[ww]
print('\n>>>NEW FILE NAME = %s\n' % saveData)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Read in regression weights
weightsn = np.genfromtxt(directorydata + 'weights_' + saveData + '.txt')
weights.append(weightsn)
### Read in some latitude and longitudes
lat1 = np.load('/Users/zlabe/Documents/Research/ModelComparison/Data/Climatologies/annual_T2M_SMILEGlobe_historical_PointByPoint_lats.npz')['arr_0']
lon1 = np.load('/Users/zlabe/Documents/Research/ModelComparison/Data/Climatologies/annual_T2M_SMILEGlobe_historical_PointByPoint_lons.npz')['arr_0']
###############################################################################
###############################################################################
###############################################################################
### Plot subplot of LRP means training
limit = np.arange(-0.025,0.02501,0.0001)
barlim = np.round(np.arange(-0.025,0.02501,0.025),3)
cmap = cmocean.cm.balance
label = r'\textbf{Linear Regression Weights - [ %s ] - %s}' % (variq,typeOfAnalysis)
fig = plt.figure(figsize=(10,2))
for r in range(len(ridge_penalty)):
var = weights[r]
ax1 = plt.subplot(1,len(ridge_penalty),r+1)
m = Basemap(projection='moll',lon_0=0,resolution='l',area_thresh=10000)
m.drawcoastlines(color='dimgrey',linewidth=0.27)
var, lons_cyclic = addcyclic(var, lon1)
var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lat1)
x, y = m(lon2d, lat2d)
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs1 = m.contourf(x,y,var,limit,extend='both')
cs1.set_cmap(cmap)
if ocean_only == True:
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
elif land_only == True:
m.drawlsmask(land_color=(0,0,0,0),ocean_color='darkgrey',lakes=True,zorder=5)
ax1.annotate(r'\textbf{L$_{2}$=%s}' % ridge_penalty[r],xy=(0,0),xytext=(0.5,1.10),
textcoords='axes fraction',color='dimgrey',fontsize=8,
rotation=0,ha='center',va='center')
ax1.annotate(r'\textbf{[%s]}' % letters[r],xy=(0,0),xytext=(0.86,0.97),
textcoords='axes fraction',color='k',fontsize=6,
rotation=330,ha='center',va='center')
###############################################################################
cbar_ax1 = fig.add_axes([0.36,0.15,0.3,0.03])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='both',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=9,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=5)
cbar1.outline.set_edgecolor('dimgrey')
plt.tight_layout()
if lenOfPicks == 3:
plt.subplots_adjust(top=0.85,wspace=0.02,hspace=0.02,bottom=0.24)
else:
plt.subplots_adjust(top=0.85,wspace=0.02,hspace=0.02,bottom=0.14)
plt.savefig(directoryfigure + '%s/LinearWeightsL2_%s.png' % (typeOfAnalysis,saveData),dpi=300) | [
"numpy.load",
"numpy.meshgrid",
"mpl_toolkits.basemap.shiftgrid",
"mpl_toolkits.basemap.addcyclic",
"numpy.genfromtxt",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.subplots_adjust",
"sys.exit",
"matplotlib.pyplot.tight_layout",
"mpl_... | [((618, 645), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (624, 645), True, 'import matplotlib.pyplot as plt\n'), ((645, 718), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Avant Garde']})\n", (651, 718), True, 'import matplotlib.pyplot as plt\n'), ((12958, 12992), 'numpy.arange', 'np.arange', (['(-0.025)', '(0.02501)', '(0.0001)'], {}), '(-0.025, 0.02501, 0.0001)\n', (12967, 12992), True, 'import numpy as np\n'), ((13162, 13189), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 2)'}), '(figsize=(10, 2))\n', (13172, 13189), True, 'import matplotlib.pyplot as plt\n'), ((14990, 15008), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15006, 15008), True, 'import matplotlib.pyplot as plt\n'), ((15177, 15277), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(directoryfigure + '%s/LinearWeightsL2_%s.png' % (typeOfAnalysis, saveData))"], {'dpi': '(300)'}), "(directoryfigure + '%s/LinearWeightsL2_%s.png' % (typeOfAnalysis,\n saveData), dpi=300)\n", (15188, 15277), True, 'import matplotlib.pyplot as plt\n'), ((13009, 13042), 'numpy.arange', 'np.arange', (['(-0.025)', '(0.02501)', '(0.025)'], {}), '(-0.025, 0.02501, 0.025)\n', (13018, 13042), True, 'import numpy as np\n'), ((13307, 13377), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""moll"""', 'lon_0': '(0)', 'resolution': '"""l"""', 'area_thresh': '(10000)'}), "(projection='moll', lon_0=0, resolution='l', area_thresh=10000)\n", (13314, 13377), False, 'from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\n'), ((13460, 13480), 'mpl_toolkits.basemap.addcyclic', 'addcyclic', (['var', 'lon1'], {}), '(var, lon1)\n', (13469, 13480), False, 'from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\n'), ((13504, 13551), 'mpl_toolkits.basemap.shiftgrid', 'shiftgrid', (['(180.0)', 'var', 'lons_cyclic'], {'start': '(False)'}), '(180.0, var, lons_cyclic, start=False)\n', (13513, 13551), False, 'from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\n'), ((13570, 13600), 'numpy.meshgrid', 'np.meshgrid', (['lons_cyclic', 'lat1'], {}), '(lons_cyclic, lat1)\n', (13581, 13600), True, 'import numpy as np\n'), ((15033, 15101), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.85)', 'wspace': '(0.02)', 'hspace': '(0.02)', 'bottom': '(0.24)'}), '(top=0.85, wspace=0.02, hspace=0.02, bottom=0.24)\n', (15052, 15101), True, 'import matplotlib.pyplot as plt\n'), ((15110, 15178), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.85)', 'wspace': '(0.02)', 'hspace': '(0.02)', 'bottom': '(0.14)'}), '(top=0.85, wspace=0.02, hspace=0.02, bottom=0.14)\n', (15129, 15178), True, 'import matplotlib.pyplot as plt\n'), ((6292, 6324), 'numpy.append', 'np.append', (['modelGCMs', "['MMmean']"], {}), "(modelGCMs, ['MMmean'])\n", (6301, 6324), True, 'import numpy as np\n'), ((12183, 12244), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'weights_' + saveData + '.txt')"], {}), "(directorydata + 'weights_' + saveData + '.txt')\n", (12196, 12244), True, 'import numpy as np\n'), ((3548, 3576), 'numpy.arange', 'np.arange', (['(1951)', '(1980 + 1)', '(1)'], {}), '(1951, 1980 + 1, 1)\n', (3557, 3576), True, 'import numpy as np\n'), ((10588, 10636), 'sys.exit', 'sys.exit', (['"""Wrong parameters selected to analyze"""'], {}), "('Wrong parameters selected to analyze')\n", (10596, 10636), False, 'import sys\n'), ((12367, 12509), 'numpy.load', 'np.load', (['"""/Users/zlabe/Documents/Research/ModelComparison/Data/Climatologies/annual_T2M_SMILEGlobe_historical_PointByPoint_lats.npz"""'], {}), "(\n '/Users/zlabe/Documents/Research/ModelComparison/Data/Climatologies/annual_T2M_SMILEGlobe_historical_PointByPoint_lats.npz'\n )\n", (12374, 12509), True, 'import numpy as np\n'), ((12528, 12670), 'numpy.load', 'np.load', (['"""/Users/zlabe/Documents/Research/ModelComparison/Data/Climatologies/annual_T2M_SMILEGlobe_historical_PointByPoint_lons.npz"""'], {}), "(\n '/Users/zlabe/Documents/Research/ModelComparison/Data/Climatologies/annual_T2M_SMILEGlobe_historical_PointByPoint_lons.npz'\n )\n", (12535, 12670), True, 'import numpy as np\n'), ((4462, 4490), 'numpy.arange', 'np.arange', (['(1950)', '(2019 + 1)', '(1)'], {}), '(1950, 2019 + 1, 1)\n', (4471, 4490), True, 'import numpy as np\n'), ((4669, 4706), 'numpy.arange', 'np.arange', (['(1950 + window)', '(2019 + 1)', '(1)'], {}), '(1950 + window, 2019 + 1, 1)\n', (4678, 4706), True, 'import numpy as np\n'), ((4934, 4962), 'numpy.arange', 'np.arange', (['(1950)', '(2019 + 1)', '(1)'], {}), '(1950, 2019 + 1, 1)\n', (4943, 4962), True, 'import numpy as np\n'), ((5141, 5178), 'numpy.arange', 'np.arange', (['(1950 + window)', '(2019 + 1)', '(1)'], {}), '(1950 + window, 2019 + 1, 1)\n', (5150, 5178), True, 'import numpy as np\n')] |
import os
import sys
import signal
import pickle
import subprocess
import hashlib
import numpy as np
import matplotlib.pyplot as plt
import argparse
import logging
logging.basicConfig(format="%(message)s", level=os.getenv("LOG_LEVEL", logging.INFO))
from .. import load_ifo
from ..gwinc import gwinc
from ..gwinc_matlab import gwinc_matlab
try:
import inspiral_range
except ImportError:
inspiral_range = None
FLO = 5
FHI = 6000
NPOINTS = 3000
def path_hash(path):
"""Calculate SHA1 hash of path, either directory or file"""
if not path or not os.path.exists(path):
return
path = os.path.expanduser(path)
if os.path.isdir(path):
d = path
f = "."
else:
d = os.path.dirname(path)
f = os.path.basename(path)
CWD = os.getcwd()
os.chdir(d)
cmd = 'find {} -type f ! -wholename "*/.*" -print0 | sort -z | xargs -0 sha1sum | sha1sum'.format(
f
)
sha1sum_out = subprocess.check_output(cmd, shell=True)
sha1sum = sha1sum_out.split()[0]
os.chdir(CWD)
return sha1sum.decode()
##################################################
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--plot", "-p", action="store_true", help="plot differences")
parser.add_argument("--save", "-s", help="save plot to file")
parser.add_argument(
"--recalc", "-r", action="store_true", help="recalculate all traces"
)
parser.add_argument(
"--tolerance", "-t", help="fractional tolerance", type=float, default=1e-6
)
parser.add_argument(
"--skip", "-k", action="append", help="traces to skip comparing"
)
parser.add_argument("IFO", help="IFO name or description file")
args = parser.parse_args()
logging.info("loading IFO '{}'...".format(args.IFO))
Budget, ifo, freq, plot_style = load_ifo(args.IFO)
freq = np.logspace(np.log10(FLO), np.log10(FHI), NPOINTS)
##############################
# matgwinc processing
mdata_pkl = os.path.join(os.path.dirname(__file__), "{}.pkl".format(args.IFO))
ifo_hash = hashlib.sha1(ifo.to_txt().encode()).hexdigest()
gwinc_hash = path_hash(os.getenv("GWINCPATH"))
if not gwinc_hash:
logging.warning(
"GWINCPATH not specified or does not exist; skipping check for changes to matgwinc code."
)
mrecalc = args.recalc
if os.path.exists(mdata_pkl) and not mrecalc:
mrecalc = False
logging.info("loading matgwinc data {}...".format(mdata_pkl))
with open(mdata_pkl, "rb") as f:
if sys.version_info.major > 2:
mdata = pickle.load(f, encoding="latin1")
else:
mdata = pickle.load(f)
if mdata["ifo_hash"] != ifo_hash:
logging.info("ifo hash has changed: {}".format(ifo_hash))
mrecalc = True
if gwinc_hash and mdata["gwinc_hash"] != gwinc_hash:
logging.info("matgwinc hash has changed: {}".format(gwinc_hash))
mrecalc = True
if mrecalc:
logging.info("calculating matgwinc noises...")
try:
mscore, mnoises, mifo = gwinc_matlab(freq, ifo)
except (ImportError, OSError):
sys.exit("MATLAB engine not available.")
mdata = dict(
score=mscore,
noises=mnoises,
ifo=mifo,
ifo_hash=ifo_hash,
gwinc_hash=gwinc_hash,
)
with open(mdata_pkl, "wb") as f:
pickle.dump(mdata, f)
mnoises = mdata["noises"]
##############################
# pygwinc processing
logging.info("calculating pygwinc noises...")
score, noises, ifo = gwinc(freq, ifo)
##############################
# calc inspiral ranges
if inspiral_range:
logging.info("calculating inspiral ranges...")
range_func = inspiral_range.range
H = inspiral_range.waveform.CBCWaveform(freq)
mfom = range_func(freq, mnoises["Total"], H=H)
_, mnoises["int73"] = inspiral_range.int73(freq, mnoises["Total"])
logging.info("matgwinc range: {:.2f} Mpc".format(mfom))
fom = range_func(freq, noises["Total"], H=H)
_, noises["int73"] = inspiral_range.int73(freq, noises["Total"])
logging.info("pygwinc range: {:.2f} Mpc".format(fom))
fom_title = """inspiral {func} {m1}/{m2} Msol:
matgwinc: {mfom:.2f} Mpc
pygwinc: {fom:.2f} Mpc""".format(
func=range_func.__name__,
m1=H.params["m1"],
m2=H.params["m2"],
mfom=mfom,
fom=fom,
)
else:
fom_title = ""
##############################
# find differences
skip = args.skip
fractional_tolerance = args.tolerance
diffs = {}
for name, noise in noises.items():
if name in ["Freq"]:
continue
if skip and name in skip:
logging.warning("SKIPPING TEST: '{}'".format(name))
continue
try:
mnoise = mnoises[name]
except KeyError:
continue
# logging.info("compare: {}".format(name))
mn = mnoise
pn = noise
# _, mn = inspiral_range.int73(freq, mnoise)
# _, pn = inspiral_range.int73(freq, noise)
diff = np.sqrt(mn) - np.sqrt(pn)
frac = abs(diff / np.sqrt(pn))
if max(frac) < fractional_tolerance:
continue
logging.warning(
"EXCESSIVE DIFFERENCE: {:{w}} {:6.1f} ppm".format(
name, max(frac) * 1e6, w=max([len(n) for n in noises])
)
)
# logging.warning(" max: {:e}, min: {:e}".format(max(frac), min(frac)))
diffs[name] = (mn, pn, frac)
##############################
# plot
if args.plot:
spec = (len(diffs) + 1, 2)
sharex = None
for i, name in enumerate(diffs):
mn, pn, frac = diffs[name]
axl = plt.subplot2grid(spec, (i, 0), sharex=sharex)
axl.loglog(freq, np.sqrt(pn), label="pygwinc")
axl.loglog(freq, np.sqrt(mn), label="matgwinc")
axl.grid()
axl.legend(loc="upper right")
axl.set_ylabel(name)
if i == 0:
sharex = axl
axr = plt.subplot2grid(spec, (i, 1), sharex=sharex)
axr.loglog(freq, frac)
axr.grid()
axr.axhline(y=max(frac), color="r", linestyle="--")
axr.text(
max(freq) + 4000,
max(frac),
"{:.1f} ppm".format(max(frac) * 1e6),
horizontalalignment="left",
verticalalignment="center",
color="red",
)
if diffs:
axl.set_xlabel("frequency [Hz]")
axr.set_xlabel("frequency [Hz]")
plt.suptitle(
"""{} mat/py gwinc noise comparison
noises that differ by more than {} ppm [(mat-py)/py]
{}""".format(
args.IFO, fractional_tolerance * 1e6, fom_title
)
)
if args.save:
plt.gcf().set_size_inches(11, (len(diffs) + 1) * 4)
plt.savefig(args.save)
else:
plt.show()
else:
logging.warning("All tests passed, so no plot was generated")
##############################
if len(diffs) > 0:
return 1
return 0
##################################################
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal.SIG_DFL)
sys.exit(main())
| [
"pickle.dump",
"inspiral_range.int73",
"argparse.ArgumentParser",
"matplotlib.pyplot.subplot2grid",
"pickle.load",
"os.chdir",
"inspiral_range.waveform.CBCWaveform",
"logging.warning",
"os.path.dirname",
"os.path.exists",
"numpy.log10",
"matplotlib.pyplot.show",
"os.path.basename",
"subpro... | [((615, 639), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (633, 639), False, 'import os\n'), ((647, 666), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (660, 666), False, 'import os\n'), ((790, 801), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (799, 801), False, 'import os\n'), ((806, 817), 'os.chdir', 'os.chdir', (['d'], {}), '(d)\n', (814, 817), False, 'import os\n'), ((955, 995), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (978, 995), False, 'import subprocess\n'), ((1037, 1050), 'os.chdir', 'os.chdir', (['CWD'], {}), '(CWD)\n', (1045, 1050), False, 'import os\n'), ((1159, 1184), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1182, 1184), False, 'import argparse\n'), ((3618, 3663), 'logging.info', 'logging.info', (['"""calculating pygwinc noises..."""'], {}), "('calculating pygwinc noises...')\n", (3630, 3663), False, 'import logging\n'), ((7511, 7555), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_DFL'], {}), '(signal.SIGINT, signal.SIG_DFL)\n', (7524, 7555), False, 'import signal\n'), ((214, 250), 'os.getenv', 'os.getenv', (['"""LOG_LEVEL"""', 'logging.INFO'], {}), "('LOG_LEVEL', logging.INFO)\n", (223, 250), False, 'import os\n'), ((723, 744), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (738, 744), False, 'import os\n'), ((757, 779), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (773, 779), False, 'import os\n'), ((1899, 1912), 'numpy.log10', 'np.log10', (['FLO'], {}), '(FLO)\n', (1907, 1912), True, 'import numpy as np\n'), ((1914, 1927), 'numpy.log10', 'np.log10', (['FHI'], {}), '(FHI)\n', (1922, 1927), True, 'import numpy as np\n'), ((2030, 2055), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2045, 2055), False, 'import os\n'), ((2175, 2197), 'os.getenv', 'os.getenv', (['"""GWINCPATH"""'], {}), "('GWINCPATH')\n", (2184, 2197), False, 'import os\n'), ((2230, 2346), 'logging.warning', 'logging.warning', (['"""GWINCPATH not specified or does not exist; skipping check for changes to matgwinc code."""'], {}), "(\n 'GWINCPATH not specified or does not exist; skipping check for changes to matgwinc code.'\n )\n", (2245, 2346), False, 'import logging\n'), ((2394, 2419), 'os.path.exists', 'os.path.exists', (['mdata_pkl'], {}), '(mdata_pkl)\n', (2408, 2419), False, 'import os\n'), ((3060, 3106), 'logging.info', 'logging.info', (['"""calculating matgwinc noises..."""'], {}), "('calculating matgwinc noises...')\n", (3072, 3106), False, 'import logging\n'), ((3801, 3847), 'logging.info', 'logging.info', (['"""calculating inspiral ranges..."""'], {}), "('calculating inspiral ranges...')\n", (3813, 3847), False, 'import logging\n'), ((3903, 3944), 'inspiral_range.waveform.CBCWaveform', 'inspiral_range.waveform.CBCWaveform', (['freq'], {}), '(freq)\n', (3938, 3944), False, 'import inspiral_range\n'), ((4031, 4075), 'inspiral_range.int73', 'inspiral_range.int73', (['freq', "mnoises['Total']"], {}), "(freq, mnoises['Total'])\n", (4051, 4075), False, 'import inspiral_range\n'), ((4223, 4266), 'inspiral_range.int73', 'inspiral_range.int73', (['freq', "noises['Total']"], {}), "(freq, noises['Total'])\n", (4243, 4266), False, 'import inspiral_range\n'), ((567, 587), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (581, 587), False, 'import os\n'), ((3499, 3520), 'pickle.dump', 'pickle.dump', (['mdata', 'f'], {}), '(mdata, f)\n', (3510, 3520), False, 'import pickle\n'), ((5286, 5297), 'numpy.sqrt', 'np.sqrt', (['mn'], {}), '(mn)\n', (5293, 5297), True, 'import numpy as np\n'), ((5300, 5311), 'numpy.sqrt', 'np.sqrt', (['pn'], {}), '(pn)\n', (5307, 5311), True, 'import numpy as np\n'), ((5943, 5988), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['spec', '(i, 0)'], {'sharex': 'sharex'}), '(spec, (i, 0), sharex=sharex)\n', (5959, 5988), True, 'import matplotlib.pyplot as plt\n'), ((6277, 6322), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['spec', '(i, 1)'], {'sharex': 'sharex'}), '(spec, (i, 1), sharex=sharex)\n', (6293, 6322), True, 'import matplotlib.pyplot as plt\n'), ((7274, 7335), 'logging.warning', 'logging.warning', (['"""All tests passed, so no plot was generated"""'], {}), "('All tests passed, so no plot was generated')\n", (7289, 7335), False, 'import logging\n'), ((2639, 2672), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (2650, 2672), False, 'import pickle\n'), ((2715, 2729), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2726, 2729), False, 'import pickle\n'), ((3231, 3271), 'sys.exit', 'sys.exit', (['"""MATLAB engine not available."""'], {}), "('MATLAB engine not available.')\n", (3239, 3271), False, 'import sys\n'), ((5338, 5349), 'numpy.sqrt', 'np.sqrt', (['pn'], {}), '(pn)\n', (5345, 5349), True, 'import numpy as np\n'), ((6018, 6029), 'numpy.sqrt', 'np.sqrt', (['pn'], {}), '(pn)\n', (6025, 6029), True, 'import numpy as np\n'), ((6077, 6088), 'numpy.sqrt', 'np.sqrt', (['mn'], {}), '(mn)\n', (6084, 6088), True, 'import numpy as np\n'), ((7179, 7201), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.save'], {}), '(args.save)\n', (7190, 7201), True, 'import matplotlib.pyplot as plt\n'), ((7236, 7246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7244, 7246), True, 'import matplotlib.pyplot as plt\n'), ((7111, 7120), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7118, 7120), True, 'import matplotlib.pyplot as plt\n')] |
import io
import numpy as np
import torch
import matplotlib.pyplot as plt
from PIL import Image
from dp.exact_dp import drop_dtw
from models.losses import compute_all_costs
color_code = ['blue', 'orange', 'green', 'red', 'purple', 'brown', 'pink', 'grey', 'olive', 'cyan', 'lime']
shape_code = ["o", "s", "P", "*", "h", ">", 'X', 'd', 'D', 'v', '<']
color_code, shape_code = color_code * 3, shape_code * 3 # to protect against long step sequences
color_code = color_code + ['black']
shape_code = shape_code + ['p']
def visualize_drop_dtw_matching(samples, distractor=None, gamma_f=10, drop_cost='logit', keep_percentile=0.3, shape=(10, 2)):
gamma_f = [gamma_f] * len(samples) if not isinstance(gamma_f, (list, tuple)) else gamma_f
plt.rcParams["figure.figsize"] = (shape[0], shape[1] * len(samples))
for i, (sample_name, sample) in enumerate(samples.items()):
ax = plt.subplot(len(samples), 1, i + 1)
ax.set_title(f"{sample_name}: drop-dtw matching, gamma {gamma_f[i]}")
frame_features = sample['frame_features']
step_features = sample['step_features']
zx_costs, drop_costs, _ = compute_all_costs(
sample, distractor, gamma_f[i], drop_cost_type=drop_cost, keep_percentile=keep_percentile)
zx_costs, drop_costs = [t.detach().cpu().numpy() for t in [zx_costs, drop_costs]]
min_cost, path, frames_dropped = drop_dtw(zx_costs, drop_costs)
frame_labels = np.zeros_like(drop_costs) - 1
for label, frame_id in path:
if frame_id * label > 0 and frame_id not in frames_dropped:
frame_labels[frame_id - 1] = sample['step_ids'][label - 1].item()
gt_labels = np.zeros_like(frame_labels) - 1
for i in range(gt_labels.shape[0]):
for sample_id, start, end in zip(sample['step_ids'], sample['step_starts'], sample['step_ends']):
if (i >= start.item()) and (i <= end.item()):
gt_labels[i] = sample_id.item()
unique_labels = np.unique(sample['step_ids'].numpy())
step_colors = dict(zip(unique_labels, color_code))
step_shapes = dict(zip(unique_labels, shape_code))
tick_freq = 20 if len(frame_labels) > 100 else 10
plt.xticks(np.arange(0, len(frame_labels) * 3.2, tick_freq))
plt.xlim(0, len(frame_labels) * 3.2)
plt.tick_params(bottom=True, top=False, left=True, right=True, labelright=True)
plt.grid()
added_step_ids = []
for si, step_id in enumerate(unique_labels):
gt_x = np.arange(len(gt_labels))[gt_labels == step_id]
pred_x = np.arange(len(frame_labels))[frame_labels == step_id]
step_color, step_shape = step_colors[step_id], step_shapes[step_id]
plt.plot(gt_x * 3.2, [2] * len(gt_x), step_shape, color=step_color)
plt.plot(pred_x * 3.2, [1] * len(pred_x), step_shape, color=step_color)
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close()
buf.seek(0)
img = np.array(Image.open(buf).convert('RGB'))
return img
def visualize_step_strength(samples, distractor=None, gamma_f=10, drop_cost='logit', keep_percentile=0.3, shape=(10, 2)):
gamma_f = [gamma_f] * len(samples) if not isinstance(gamma_f, (list, tuple)) else gamma_f
step_ids = list(samples.values())[0]['step_ids']
unique_step_mask = torch.zeros_like(step_ids).to(torch.bool)
unique_step_ids = []
for si, step_id in enumerate(step_ids):
if step_id.item() not in unique_step_ids:
unique_step_ids.append(step_id.item())
unique_step_mask[si] = True
step_colors = dict(zip(np.sort(unique_step_ids), color_code))
plt.rcParams["figure.figsize"] = (shape[0], shape[1] * len(samples))
for i, (sample_name, sample) in enumerate(samples.items()):
ax = plt.subplot(len(samples), 1, i + 1)
ax.set_title(f"{sample_name}: frame-step product, gamma {gamma_f[i]}")
step_ids = sample['step_ids']
frame_features = sample['frame_features']
step_features = sample['step_features'][unique_step_mask]
descr_clip_similarity = (step_features @ frame_features.T / gamma_f[i]).detach().cpu().numpy()
N_frames = frame_features.shape[0]
tick_freq = 20 if N_frames > 100 else 10
plt.xticks(np.arange(0, N_frames * 3.2, tick_freq))
plt.tick_params(bottom=True, top=False, left=True, right=True, labelright=True)
plt.grid()
added_step_ids = []
for si, step_id in enumerate(step_ids):
step_color = step_colors[step_id.item()]
plt.plot([sample['step_starts_sec'][si], sample['step_ends_sec'][si]],
[descr_clip_similarity.max() + 0.1] * 2, color=step_color)
if step_id not in added_step_ids:
added_step_ids.append(step_id)
step_id_scores = descr_clip_similarity[np.array(unique_step_ids) == step_id.item()][0]
plt.plot(np.arange(N_frames) * 3.2, step_id_scores, color=step_color)
if distractor is not None and i == 0:
distractor_activations = (frame_features @ distractor / gamma_f[i]).detach().cpu().numpy()
plt.plot(np.arange(N_frames) * 3.2, distractor_activations, color=color_code[-1])
if drop_cost == 'logit' and i == 0:
sim_vec = descr_clip_similarity.reshape([-1])
k = max([1, int(sim_vec.shape[0] * keep_percentile)])
baseline_logit = np.sort(sim_vec)[-k]
drop_threshold = np.ones(N_frames) * baseline_logit
plt.plot(np.arange(N_frames) * 3.2, drop_threshold, color=color_code[-1])
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close()
buf.seek(0)
img = np.array(Image.open(buf).convert('RGB'))
return img
| [
"io.BytesIO",
"numpy.zeros_like",
"torch.zeros_like",
"models.losses.compute_all_costs",
"matplotlib.pyplot.close",
"numpy.ones",
"PIL.Image.open",
"numpy.sort",
"dp.exact_dp.drop_dtw",
"numpy.arange",
"numpy.array",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.grid",
"matplotlib.py... | [((2938, 2950), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2948, 2950), False, 'import io\n'), ((2955, 2985), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buf'], {'format': '"""png"""'}), "(buf, format='png')\n", (2966, 2985), True, 'import matplotlib.pyplot as plt\n'), ((2990, 3001), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2999, 3001), True, 'import matplotlib.pyplot as plt\n'), ((5691, 5703), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5701, 5703), False, 'import io\n'), ((5708, 5738), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buf'], {'format': '"""png"""'}), "(buf, format='png')\n", (5719, 5738), True, 'import matplotlib.pyplot as plt\n'), ((5743, 5754), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5752, 5754), True, 'import matplotlib.pyplot as plt\n'), ((1137, 1249), 'models.losses.compute_all_costs', 'compute_all_costs', (['sample', 'distractor', 'gamma_f[i]'], {'drop_cost_type': 'drop_cost', 'keep_percentile': 'keep_percentile'}), '(sample, distractor, gamma_f[i], drop_cost_type=drop_cost,\n keep_percentile=keep_percentile)\n', (1154, 1249), False, 'from models.losses import compute_all_costs\n'), ((1391, 1421), 'dp.exact_dp.drop_dtw', 'drop_dtw', (['zx_costs', 'drop_costs'], {}), '(zx_costs, drop_costs)\n', (1399, 1421), False, 'from dp.exact_dp import drop_dtw\n'), ((2360, 2439), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'bottom': '(True)', 'top': '(False)', 'left': '(True)', 'right': '(True)', 'labelright': '(True)'}), '(bottom=True, top=False, left=True, right=True, labelright=True)\n', (2375, 2439), True, 'import matplotlib.pyplot as plt\n'), ((2448, 2458), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2456, 2458), True, 'import matplotlib.pyplot as plt\n'), ((4380, 4459), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'bottom': '(True)', 'top': '(False)', 'left': '(True)', 'right': '(True)', 'labelright': '(True)'}), '(bottom=True, top=False, left=True, right=True, labelright=True)\n', (4395, 4459), True, 'import matplotlib.pyplot as plt\n'), ((4468, 4478), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4476, 4478), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1470), 'numpy.zeros_like', 'np.zeros_like', (['drop_costs'], {}), '(drop_costs)\n', (1458, 1470), True, 'import numpy as np\n'), ((1699, 1726), 'numpy.zeros_like', 'np.zeros_like', (['frame_labels'], {}), '(frame_labels)\n', (1712, 1726), True, 'import numpy as np\n'), ((3378, 3404), 'torch.zeros_like', 'torch.zeros_like', (['step_ids'], {}), '(step_ids)\n', (3394, 3404), False, 'import torch\n'), ((3657, 3681), 'numpy.sort', 'np.sort', (['unique_step_ids'], {}), '(unique_step_ids)\n', (3664, 3681), True, 'import numpy as np\n'), ((4331, 4370), 'numpy.arange', 'np.arange', (['(0)', '(N_frames * 3.2)', 'tick_freq'], {}), '(0, N_frames * 3.2, tick_freq)\n', (4340, 4370), True, 'import numpy as np\n'), ((3037, 3052), 'PIL.Image.open', 'Image.open', (['buf'], {}), '(buf)\n', (3047, 3052), False, 'from PIL import Image\n'), ((5508, 5524), 'numpy.sort', 'np.sort', (['sim_vec'], {}), '(sim_vec)\n', (5515, 5524), True, 'import numpy as np\n'), ((5559, 5576), 'numpy.ones', 'np.ones', (['N_frames'], {}), '(N_frames)\n', (5566, 5576), True, 'import numpy as np\n'), ((5790, 5805), 'PIL.Image.open', 'Image.open', (['buf'], {}), '(buf)\n', (5800, 5805), False, 'from PIL import Image\n'), ((5238, 5257), 'numpy.arange', 'np.arange', (['N_frames'], {}), '(N_frames)\n', (5247, 5257), True, 'import numpy as np\n'), ((5615, 5634), 'numpy.arange', 'np.arange', (['N_frames'], {}), '(N_frames)\n', (5624, 5634), True, 'import numpy as np\n'), ((5006, 5025), 'numpy.arange', 'np.arange', (['N_frames'], {}), '(N_frames)\n', (5015, 5025), True, 'import numpy as np\n'), ((4933, 4958), 'numpy.array', 'np.array', (['unique_step_ids'], {}), '(unique_step_ids)\n', (4941, 4958), True, 'import numpy as np\n')] |
"""This runs unit tests for functions that can be found in GeneticSearch.py."""
import pytest
import numpy as np
from see import Segmentors
from see import GeneticSearch
from see import base_classes
def test_twoPointCopy():
"""Unit test for twoPointCopy function. Checks test individuals to see
if copy took place successfully."""
np1 = ['FB', 0, 0, 984, 0.09, 92, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
(1, 2), 0, "checkerboard", "checkerboard", 0, 0, 0, 0, 0, 0]
np2 = ['CT', 0, 0, 0, 0, 0, 0, 0, 8, 10, 12, 0, 0, 0, 0,\
(1, 2), 0, "checkerboard", "checkerboard", 0, 0, 0, 0, 0, 0]
new_np1, new_np2 = GeneticSearch.twoPointCopy(np1, np2, True)
assert new_np1 == ['FB', 0, 0, 984, 0.09, 92, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
(1, 2), 0, 'checkerboard', 'checkerboard', 0, 0, 0, 0, 0, 0]
assert new_np2 == ['CT', 0, 0, 0, 0, 0, 0, 0, 8, 10, 12, 0, 0, 0, 0,\
(1, 2), 0, 'checkerboard', 'checkerboard', 0, 0, 0, 0, 0, 0]
def test_skimageCrossRandom():
"""Unit test for skimageCrossRandom function. Checks test individuals to see if crossover
took place successfully."""
np1 = ['FB', 0, 0, 984, 0.09, 92, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
(1, 2), 0, "checkerboard", "checkerboard", 0, 0, 0, 0, 0, 0]
np2 = ['CT', 0, 0, 0, 0, 0, 0, 0, 8, 10, 12, 0, 0, 0, 0,\
(1, 2), 0, "checkerboard", "checkerboard", 0, 0, 0, 0, 0, 0]
new_np1, new_np2 = GeneticSearch.skimageCrossRandom(np1, np2)
assert new_np1 == ['FB', 0, 0, 984, 0.09, 92, 0, 0, 8, 0, 0, 0, 0, 0, 0,\
(1, 2), 0, 'checkerboard', 'checkerboard', 0, 0, 0, 0, 0, 0]
assert new_np2 == ['CT', 0, 0, 0, 0, 0, 0, 0, 0, 10, 12, 0, 0, 0, 0,\
(1, 2), 0, 'checkerboard', 'checkerboard', 0, 0, 0, 0, 0, 0]
def test_mutate():
"""Unit test for mutate function. Checks output type and checks test individual
to see if mutation took place successfully."""
copy_child = ['FB', 0, 0, 984, 0.09, 92, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
(1, 2), 0, "checkerboard", "checkerboard", 0, 0, 0, 0, 0, 0]
all_vals = []
params = Segmentors.parameters()
for key in params.pkeys:
all_vals.append(eval(params.ranges[key]))
assert isinstance(GeneticSearch.mutate(copy_child, all_vals, 0.5, True), list)
assert GeneticSearch.mutate(copy_child, all_vals, 0.5, True) ==\
['FB', 1390, 0.173, 984, 0.09, 9927, 587, 0, 0.55, 0, 0, 0, 0, 1000, 0,\
(1, 2), 0, 'disk', 'checkerboard', 9, 2907, -47, (0.0, 0.0, 0.0), 0, 0]
def test_makeToolbox():
"""Unit test for makeToolbox function. Checks that a toolbox of the
correct size was made."""
assert GeneticSearch.makeToolbox(10, Segmentors.segmentor).population.keywords['n'] == 10
def test_newpopulation():
"""Unit test for newpopulation function. Checks the type and length of
the new population."""
img = np.zeros((20, 20, 3))
img[4:10, 4:10, :] = 1
mask = img[:, :, 0]
data = base_classes.pipedata()
data.img = img
data.gmask = mask
data.fitness = 2
evolv = GeneticSearch.Evolver(Segmentors.segmentor, data, pop_size=10)
assert isinstance(evolv.tool.population(), list)
assert len(evolv.tool.population()) == 10
# def test_popfitness():
# """Unit test for popfitness function. Checks the type and length of the fitness
# values and population."""
# img = np.zeros((20, 20, 3))
# img[4:10, 4:10, :] = 1
# mask = img[:, :, 0]
# evolv = GeneticSearch.Evolver(img, mask)
# fits, tpop = evolv.popfitness(evolv.tool.population())
# assert isinstance(fits, list)
# assert len(fits) == 10
# assert isinstance(tpop, list)
# assert len(tpop) == 10
def test_mutate():
"""Unit test for mutate function. Checks type and length of the
new population after mutation."""
img = np.zeros((20, 20, 3))
img[4:10, 4:10, :] = 1
mask = img[:, :, 0]
data = base_classes.pipedata()
data.img = img
data.gmask = mask
evolv = GeneticSearch.Evolver(Segmentors.segmentor, data, pop_size=10)
tpop = evolv.mutate(evolv.tool.population())
assert isinstance(tpop, list)
assert len(tpop) == 10
def test_nextgen():
"""Unit test for nextgen function. Checks the type and length of the new population,
and checks that the population is evolving."""
img = np.zeros((20, 20, 3))
img[4:10, 4:10, :] = 1
mask = img[:, :, 0]
data = base_classes.pipedata()
data.img = img
data.gmask = mask
evolv = GeneticSearch.Evolver(Segmentors.segmentor, data, pop_size=10)
pop = evolv.tool.population()
tpop = evolv.mutate(pop)
assert isinstance(tpop, list)
assert len(tpop) == 10
assert tpop != pop
def test_run():
"""Unit test for run function. Checks the type and length of the final population,
and checks that the population evolved."""
img = np.zeros((20, 20, 3))
img[4:10, 4:10, :] = 1
mask = img[:, :, 0]
data = base_classes.pipedata()
data.img = img
data.gmask = mask
data.fitness = 2
evolv = GeneticSearch.Evolver(Segmentors.segmentor, data, pop_size=10)
start_pop = evolv.tool.population()
final_pop = evolv.run()
assert isinstance(final_pop, list)
assert len(final_pop) == 10
assert final_pop != start_pop
| [
"see.GeneticSearch.twoPointCopy",
"see.GeneticSearch.makeToolbox",
"see.Segmentors.parameters",
"numpy.zeros",
"see.GeneticSearch.mutate",
"see.GeneticSearch.Evolver",
"see.GeneticSearch.skimageCrossRandom",
"see.base_classes.pipedata"
] | [((624, 666), 'see.GeneticSearch.twoPointCopy', 'GeneticSearch.twoPointCopy', (['np1', 'np2', '(True)'], {}), '(np1, np2, True)\n', (650, 666), False, 'from see import GeneticSearch\n'), ((1393, 1435), 'see.GeneticSearch.skimageCrossRandom', 'GeneticSearch.skimageCrossRandom', (['np1', 'np2'], {}), '(np1, np2)\n', (1425, 1435), False, 'from see import GeneticSearch\n'), ((2046, 2069), 'see.Segmentors.parameters', 'Segmentors.parameters', ([], {}), '()\n', (2067, 2069), False, 'from see import Segmentors\n'), ((2819, 2840), 'numpy.zeros', 'np.zeros', (['(20, 20, 3)'], {}), '((20, 20, 3))\n', (2827, 2840), True, 'import numpy as np\n'), ((2908, 2931), 'see.base_classes.pipedata', 'base_classes.pipedata', ([], {}), '()\n', (2929, 2931), False, 'from see import base_classes\n'), ((3006, 3068), 'see.GeneticSearch.Evolver', 'GeneticSearch.Evolver', (['Segmentors.segmentor', 'data'], {'pop_size': '(10)'}), '(Segmentors.segmentor, data, pop_size=10)\n', (3027, 3068), False, 'from see import GeneticSearch\n'), ((3777, 3798), 'numpy.zeros', 'np.zeros', (['(20, 20, 3)'], {}), '((20, 20, 3))\n', (3785, 3798), True, 'import numpy as np\n'), ((3866, 3889), 'see.base_classes.pipedata', 'base_classes.pipedata', ([], {}), '()\n', (3887, 3889), False, 'from see import base_classes\n'), ((3943, 4005), 'see.GeneticSearch.Evolver', 'GeneticSearch.Evolver', (['Segmentors.segmentor', 'data'], {'pop_size': '(10)'}), '(Segmentors.segmentor, data, pop_size=10)\n', (3964, 4005), False, 'from see import GeneticSearch\n'), ((4288, 4309), 'numpy.zeros', 'np.zeros', (['(20, 20, 3)'], {}), '((20, 20, 3))\n', (4296, 4309), True, 'import numpy as np\n'), ((4377, 4400), 'see.base_classes.pipedata', 'base_classes.pipedata', ([], {}), '()\n', (4398, 4400), False, 'from see import base_classes\n'), ((4454, 4516), 'see.GeneticSearch.Evolver', 'GeneticSearch.Evolver', (['Segmentors.segmentor', 'data'], {'pop_size': '(10)'}), '(Segmentors.segmentor, data, pop_size=10)\n', (4475, 4516), False, 'from see import GeneticSearch\n'), ((4826, 4847), 'numpy.zeros', 'np.zeros', (['(20, 20, 3)'], {}), '((20, 20, 3))\n', (4834, 4847), True, 'import numpy as np\n'), ((4915, 4938), 'see.base_classes.pipedata', 'base_classes.pipedata', ([], {}), '()\n', (4936, 4938), False, 'from see import base_classes\n'), ((5013, 5075), 'see.GeneticSearch.Evolver', 'GeneticSearch.Evolver', (['Segmentors.segmentor', 'data'], {'pop_size': '(10)'}), '(Segmentors.segmentor, data, pop_size=10)\n', (5034, 5075), False, 'from see import GeneticSearch\n'), ((2171, 2224), 'see.GeneticSearch.mutate', 'GeneticSearch.mutate', (['copy_child', 'all_vals', '(0.5)', '(True)'], {}), '(copy_child, all_vals, 0.5, True)\n', (2191, 2224), False, 'from see import GeneticSearch\n'), ((2243, 2296), 'see.GeneticSearch.mutate', 'GeneticSearch.mutate', (['copy_child', 'all_vals', '(0.5)', '(True)'], {}), '(copy_child, all_vals, 0.5, True)\n', (2263, 2296), False, 'from see import GeneticSearch\n'), ((2596, 2647), 'see.GeneticSearch.makeToolbox', 'GeneticSearch.makeToolbox', (['(10)', 'Segmentors.segmentor'], {}), '(10, Segmentors.segmentor)\n', (2621, 2647), False, 'from see import GeneticSearch\n')] |
import os
import numpy as np
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
import pandas as pd
import argparse
from tqdm import trange, tqdm
def make_image_file(coord_groups, output_path: str, line_width, dpi):
plt.gca().set_aspect('equal', adjustable='box')
plt.gca().invert_yaxis()
plt.gca().set_axis_off()
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
for group in coord_groups:
plt.plot(group[:, 0], group[:, 1], linewidth=line_width, c='black')
if dpi is None:
plt.savefig(output_path, bbox_inches='tight', pad_inches=0)
else:
plt.savefig(output_path, bbox_inches='tight', pad_inches=0, dpi=dpi)
plt.close()
def convert(ink_files, out_img_dir, out_label_path, line_width=2, dpi=None):
if not os.path.exists(out_img_dir):
os.mkdir(out_img_dir)
annotations = pd.DataFrame(columns=['id', 'label'])
total_files = len(ink_files)
for _, inkml_file in zip(trange(len(ink_files), desc='Progress'), ink_files):
tree = ET.parse(inkml_file)
root = tree.getroot()
for sample in root.findall('traceGroup'):
sample_id = os.path.splitext(os.path.basename(inkml_file))[0] + '_' + sample.get('id')
sample_label = sample.find('.//Tg_Truth').text
annotations = annotations.append({'id': sample_id, 'label': sample_label}, ignore_index=True)
coord_groups = []
for trace_tag in sample.findall('trace'):
coord_group = []
for coord_text in trace_tag.text.split(','):
if coord_text == '':
continue
coords = coord_text.split(' ')
coords = np.array([int(coord) for coord in coords if coord != ''])
assert len(coords) == 2
coord_group.append(coords)
coord_groups.append(np.stack(coord_group))
make_image_file(coord_groups, os.path.join(out_img_dir, sample_id) + '.png', line_width, dpi)
annotations.to_csv(out_label_path, sep='\t')
def convert_label_only(ink_files, out_label_path):
annotations = pd.DataFrame(columns=['id', 'label'])
total_files = len(ink_files)
for _, inkml_file in zip(trange(len(ink_files), desc='Progress'), ink_files):
tree = ET.parse(inkml_file)
root = tree.getroot()
for sample in root.findall('traceGroup'):
sample_id = os.path.splitext(os.path.basename(inkml_file))[0] + '_' + sample.get('id')
sample_label = sample.find('.//Tg_Truth').text
annotations = annotations.append({'id': sample_id, 'label': sample_label}, ignore_index=True)
annotations.to_csv(out_label_path, sep='\t')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(type=str, dest='level', choices=['word', 'line', 'paragraph'])
parser.add_argument('-w', '--line_width', type=float, dest='line_width', default=2)
parser.add_argument('-dpi', '--dpi', type=int, dest='dpi', default=None)
parser.add_argument('--label_only', action='store_true', dest='label_only')
args = parser.parse_args()
# config path
data_dir = './data'
level = args.level
inkml_dir = os.path.join(data_dir, f'InkData_{level}')
out_label_train = os.path.join(data_dir, f'train_{level}.csv')
out_label_validation = os.path.join(data_dir, f'validation_{level}.csv')
out_label_test = os.path.join(data_dir, f'test_{level}.csv')
out_label_all = os.path.join(data_dir, f'all_{level}.csv')
icfhr_datasplit_dir = os.path.join(data_dir, 'VNOnDB_ICFHR2018_dataSplit')
train_set = os.path.join(icfhr_datasplit_dir, 'train_set.txt')
val_set = os.path.join(icfhr_datasplit_dir, 'validation_set.txt')
test_set = os.path.join(icfhr_datasplit_dir, 'test_set.txt')
with open(train_set) as f:
train_ink_files = [os.path.join(inkml_dir, line.rstrip()) for line in f]
with open(val_set) as f:
val_ink_files = [os.path.join(inkml_dir, line.rstrip()) for line in f]
with open(test_set) as f:
test_ink_files = [os.path.join(inkml_dir, line.rstrip()) for line in f]
print('number train_ink_files:', len(train_ink_files))
print('number val_ink_files:', len(val_ink_files))
print('number test_ink_files:', len(test_ink_files))
if args.label_only:
convert_label_only(train_ink_files, out_label_train)
convert_label_only(val_ink_files, out_label_validation)
convert_label_only(test_ink_files, out_label_test)
else:
out_img_train = os.path.join(data_dir, f'train_{level}')
out_img_validation = os.path.join(data_dir, f'validation_{level}')
out_img_test = os.path.join(data_dir, f'test_{level}')
if not os.path.exists(out_img_train):
os.mkdir(out_img_train)
if not os.path.exists(out_img_validation):
os.mkdir(out_img_validation)
if not os.path.exists(out_img_test):
os.mkdir(out_img_test)
line_width = args.line_width
dpi = args.dpi
convert(train_ink_files, out_img_train, out_label_train, line_width, dpi)
convert(val_ink_files, out_img_validation, out_label_validation, line_width, dpi)
convert(test_ink_files, out_img_test, out_label_test, line_width, dpi)
convert_label_only(train_ink_files+val_ink_files+test_ink_files, out_label_all) | [
"pandas.DataFrame",
"matplotlib.pyplot.NullLocator",
"os.mkdir",
"xml.etree.ElementTree.parse",
"numpy.stack",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"os.path.basename",
"matplotlib.pyplot.close",
"os.path.exists",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.subplots_adjust",
"... | [((348, 421), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(1)', 'bottom': '(0)', 'right': '(1)', 'left': '(0)', 'hspace': '(0)', 'wspace': '(0)'}), '(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n', (367, 421), True, 'import matplotlib.pyplot as plt\n'), ((824, 835), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (833, 835), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1048), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['id', 'label']"}), "(columns=['id', 'label'])\n", (1023, 1048), True, 'import pandas as pd\n'), ((2312, 2349), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['id', 'label']"}), "(columns=['id', 'label'])\n", (2324, 2349), True, 'import pandas as pd\n'), ((2937, 2962), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2960, 2962), False, 'import argparse\n'), ((3411, 3453), 'os.path.join', 'os.path.join', (['data_dir', 'f"""InkData_{level}"""'], {}), "(data_dir, f'InkData_{level}')\n", (3423, 3453), False, 'import os\n'), ((3476, 3520), 'os.path.join', 'os.path.join', (['data_dir', 'f"""train_{level}.csv"""'], {}), "(data_dir, f'train_{level}.csv')\n", (3488, 3520), False, 'import os\n'), ((3548, 3597), 'os.path.join', 'os.path.join', (['data_dir', 'f"""validation_{level}.csv"""'], {}), "(data_dir, f'validation_{level}.csv')\n", (3560, 3597), False, 'import os\n'), ((3619, 3662), 'os.path.join', 'os.path.join', (['data_dir', 'f"""test_{level}.csv"""'], {}), "(data_dir, f'test_{level}.csv')\n", (3631, 3662), False, 'import os\n'), ((3683, 3725), 'os.path.join', 'os.path.join', (['data_dir', 'f"""all_{level}.csv"""'], {}), "(data_dir, f'all_{level}.csv')\n", (3695, 3725), False, 'import os\n'), ((3753, 3805), 'os.path.join', 'os.path.join', (['data_dir', '"""VNOnDB_ICFHR2018_dataSplit"""'], {}), "(data_dir, 'VNOnDB_ICFHR2018_dataSplit')\n", (3765, 3805), False, 'import os\n'), ((3822, 3872), 'os.path.join', 'os.path.join', (['icfhr_datasplit_dir', '"""train_set.txt"""'], {}), "(icfhr_datasplit_dir, 'train_set.txt')\n", (3834, 3872), False, 'import os\n'), ((3887, 3942), 'os.path.join', 'os.path.join', (['icfhr_datasplit_dir', '"""validation_set.txt"""'], {}), "(icfhr_datasplit_dir, 'validation_set.txt')\n", (3899, 3942), False, 'import os\n'), ((3958, 4007), 'os.path.join', 'os.path.join', (['icfhr_datasplit_dir', '"""test_set.txt"""'], {}), "(icfhr_datasplit_dir, 'test_set.txt')\n", (3970, 4007), False, 'import os\n'), ((460, 477), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (475, 477), True, 'import matplotlib.pyplot as plt\n'), ((517, 534), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (532, 534), True, 'import matplotlib.pyplot as plt\n'), ((576, 643), 'matplotlib.pyplot.plot', 'plt.plot', (['group[:, 0]', 'group[:, 1]'], {'linewidth': 'line_width', 'c': '"""black"""'}), "(group[:, 0], group[:, 1], linewidth=line_width, c='black')\n", (584, 643), True, 'import matplotlib.pyplot as plt\n'), ((673, 732), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_path'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "(output_path, bbox_inches='tight', pad_inches=0)\n", (684, 732), True, 'import matplotlib.pyplot as plt\n'), ((751, 819), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_path'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)', 'dpi': 'dpi'}), "(output_path, bbox_inches='tight', pad_inches=0, dpi=dpi)\n", (762, 819), True, 'import matplotlib.pyplot as plt\n'), ((925, 952), 'os.path.exists', 'os.path.exists', (['out_img_dir'], {}), '(out_img_dir)\n', (939, 952), False, 'import os\n'), ((962, 983), 'os.mkdir', 'os.mkdir', (['out_img_dir'], {}), '(out_img_dir)\n', (970, 983), False, 'import os\n'), ((1179, 1199), 'xml.etree.ElementTree.parse', 'ET.parse', (['inkml_file'], {}), '(inkml_file)\n', (1187, 1199), True, 'import xml.etree.ElementTree as ET\n'), ((2480, 2500), 'xml.etree.ElementTree.parse', 'ET.parse', (['inkml_file'], {}), '(inkml_file)\n', (2488, 2500), True, 'import xml.etree.ElementTree as ET\n'), ((4754, 4794), 'os.path.join', 'os.path.join', (['data_dir', 'f"""train_{level}"""'], {}), "(data_dir, f'train_{level}')\n", (4766, 4794), False, 'import os\n'), ((4824, 4869), 'os.path.join', 'os.path.join', (['data_dir', 'f"""validation_{level}"""'], {}), "(data_dir, f'validation_{level}')\n", (4836, 4869), False, 'import os\n'), ((4893, 4932), 'os.path.join', 'os.path.join', (['data_dir', 'f"""test_{level}"""'], {}), "(data_dir, f'test_{level}')\n", (4905, 4932), False, 'import os\n'), ((238, 247), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (245, 247), True, 'import matplotlib.pyplot as plt\n'), ((290, 299), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (297, 299), True, 'import matplotlib.pyplot as plt\n'), ((319, 328), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (326, 328), True, 'import matplotlib.pyplot as plt\n'), ((4949, 4978), 'os.path.exists', 'os.path.exists', (['out_img_train'], {}), '(out_img_train)\n', (4963, 4978), False, 'import os\n'), ((4992, 5015), 'os.mkdir', 'os.mkdir', (['out_img_train'], {}), '(out_img_train)\n', (5000, 5015), False, 'import os\n'), ((5031, 5065), 'os.path.exists', 'os.path.exists', (['out_img_validation'], {}), '(out_img_validation)\n', (5045, 5065), False, 'import os\n'), ((5079, 5107), 'os.mkdir', 'os.mkdir', (['out_img_validation'], {}), '(out_img_validation)\n', (5087, 5107), False, 'import os\n'), ((5123, 5151), 'os.path.exists', 'os.path.exists', (['out_img_test'], {}), '(out_img_test)\n', (5137, 5151), False, 'import os\n'), ((5165, 5187), 'os.mkdir', 'os.mkdir', (['out_img_test'], {}), '(out_img_test)\n', (5173, 5187), False, 'import os\n'), ((426, 435), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (433, 435), True, 'import matplotlib.pyplot as plt\n'), ((483, 492), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (490, 492), True, 'import matplotlib.pyplot as plt\n'), ((2063, 2084), 'numpy.stack', 'np.stack', (['coord_group'], {}), '(coord_group)\n', (2071, 2084), True, 'import numpy as np\n'), ((2128, 2164), 'os.path.join', 'os.path.join', (['out_img_dir', 'sample_id'], {}), '(out_img_dir, sample_id)\n', (2140, 2164), False, 'import os\n'), ((1322, 1350), 'os.path.basename', 'os.path.basename', (['inkml_file'], {}), '(inkml_file)\n', (1338, 1350), False, 'import os\n'), ((2623, 2651), 'os.path.basename', 'os.path.basename', (['inkml_file'], {}), '(inkml_file)\n', (2639, 2651), False, 'import os\n')] |
"""
Source: https://github.com/arnaudvl/differentiable-neural-computer
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Concatenate, Dense, LSTM
from typing import Union
class DNC(tf.keras.Model):
def __init__(
self,
output_dim: int,
memory_shape: tuple = (100, 20),
n_read: int = 3,
name: str = "dnc",
) -> None:
"""
Initialize DNC object.
Parameters
----------
output_dim
Size of output vector.
memory_shape
Shape of memory matrix (rows, cols).
n_read
Number of read heads.
name
Name of DNC.
"""
super(DNC, self).__init__(name=name)
# define output data size
self.output_dim = output_dim # Y
# define size of memory matrix
self.N, self.W = memory_shape # N, W
# define number of read heads
self.R = n_read # R
# size of output vector from controller that defines interactions with
# memory matrix:
# R read keys + R read strengths + write key + write strength + erase
# vector +
# write vector + R free gates + allocation gate + write gate + R read
# modes
self.interface_dim = self.R * self.W + 3 * self.W + 5 * self.R + 3 # I
# neural net output = output of controller + interface vector with
# memory
self.controller_dim = self.output_dim + self.interface_dim # Y+I
# initialize controller output and interface vector with gaussian
# normal
self.output_v = tf.random.truncated_normal(
[1, self.output_dim], stddev=0.1
) # [1,Y]
self.interface = tf.random.truncated_normal(
[1, self.interface_dim], stddev=0.1
) # [1,I]
# initialize memory matrix with zeros
self.M = tf.zeros(memory_shape) # [N,W]
# usage vector records which locations in the memory are used and
# which are free
self.usage = tf.fill([self.N, 1], 1e-6) # [N,1]
# temporal link matrix L[i,j] records to which degree location i was
# written to after j
self.L = tf.zeros([self.N, self.N]) # [N,N]
# precedence vector determines degree to which a memory row was
# written to at t-1
self.W_precedence = tf.zeros([self.N, 1]) # [N,1]
# initialize R read weights and vectors and write weights
self.W_read = tf.fill([self.N, self.R], 1e-6) # [N,R]
self.W_write = tf.fill([self.N, 1], 1e-6) # [N,1]
self.read_v = tf.fill([self.R, self.W], 1e-6) # [R,W]
# controller variables
# initialize controller hidden state
self.h = tf.Variable(
tf.random.truncated_normal([1, self.controller_dim], stddev=0.1),
name="dnc_h",
) # [1,Y+I]
self.c = tf.Variable(
tf.random.truncated_normal([1, self.controller_dim], stddev=0.1),
name="dnc_c",
) # [1,Y+I]
# initialise Dense and LSTM layers of the controller
self.dense = Dense(self.W, activation=None)
self.lstm = LSTM(
self.controller_dim,
return_sequences=False,
return_state=True,
name="dnc_controller",
)
# define and initialize weights for controller output and interface
# vectors
self.W_output = tf.Variable( # [Y+I,Y]
tf.random.truncated_normal(
[self.controller_dim, self.output_dim], stddev=0.1
),
name="dnc_net_output_weights",
)
self.W_interface = tf.Variable( # [Y+I,I]
tf.random.truncated_normal(
[self.controller_dim, self.interface_dim], stddev=0.1
),
name="dnc_interface_weights",
)
# output y = v + W_read_out[r(1), ..., r(R)]
self.W_read_out = tf.Variable( # [R*W,Y]
tf.random.truncated_normal(
[self.R * self.W, self.output_dim], stddev=0.1
),
name="dnc_read_vector_weights",
)
def content_lookup(self, key: tf.Tensor, strength: tf.Tensor) -> tf.Tensor:
"""
Attention mechanism: content based addressing to read from and write
to the memory.
Params
------
key
Key vector emitted by the controller and used to calculate
row-by-row cosine similarity with the memory matrix.
strength
Strength scalar attached to each key vector (1x1 or 1xR).
Returns
-------
Similarity measure for each row in the memory used by the read heads
for associative
recall or by the write head to modify a vector in memory.
"""
# The l2 norm applied to each key and each row in the memory matrix
norm_mem = tf.nn.l2_normalize(self.M, 1) # [N,W]
# [1,W] for write or [R,W] for read
norm_key = tf.nn.l2_normalize(key, 1)
# get similarity measure between both vectors, transpose before
# multiplication
# write: [N*W]*[W*1] -> [N*1]
# read: [N*W]*[W*R] -> [N,R]
sim = tf.matmul(norm_mem, norm_key, transpose_b=True)
return tf.nn.softmax(sim * strength, 0) # [N,1] or [N,R]
def allocation_weighting(self) -> tf.Tensor:
"""
Memory needs to be freed up and allocated in a differentiable way.
The usage vector shows how much each memory row is used.
Unused rows can be written to. Usage of a row increases if
we write to it and can decrease if we read from it, depending on the
free gates.
Allocation weights are then derived from the usage vector.
Returns
-------
Allocation weights for each row in the memory.
"""
# sort usage vector in ascending order and keep original indices of
# sorted usage vector
sorted_usage, free_list = tf.nn.top_k(
-1 * tf.transpose(self.usage), k=self.N
)
sorted_usage *= -1
cumprod = tf.math.cumprod(sorted_usage, axis=1, exclusive=True)
unorder = (1 - sorted_usage) * cumprod
W_alloc = tf.zeros([self.N])
i = tf.constant(np.identity(self.N, dtype=np.float32))
# for each usage vec
for pos, idx in enumerate(tf.unstack(free_list[0])):
# flatten
m = tf.squeeze(tf.slice(i, [idx, 0], [1, -1]))
# add to allocation weight matrix
W_alloc += m * unorder[0, pos]
# return the allocation weighting for each row in memory
return tf.reshape(W_alloc, [self.N, 1])
def controller(self, x: tf.Tensor) -> None:
""" Update the hidden state of the LSTM controller. """
# flatten input and pass through dense layer to avoid shape mismatch
x = tf.reshape(x, [1, -1])
x = self.dense(x) # [1,W]
# concatenate input with read vectors
x_in = tf.expand_dims(
Concatenate(axis=0)([x, self.read_v]), axis=0
) # [1,R+1,W]
# LSTM controller
initial_state = [self.h, self.c]
_, self.h, self.c = self.lstm(x_in, initial_state=initial_state)
def partition_interface(self):
"""
Partition the interface vector in the read and write keys and
strengths, the free, allocation and write gates, read modes and
erase and write vectors.
"""
# convert interface vector into a set of read write vectors
partition = tf.constant(
[
[0] * (self.R * self.W)
+ [1] * self.R
+ [2] * self.W
+ [3]
+ [4] * self.W
+ [5] * self.W
+ [6] * self.R
+ [7]
+ [8]
+ [9] * (self.R * 3)
],
dtype=tf.int32,
)
(
k_read,
b_read,
k_write,
b_write,
erase,
write_v,
free_gates,
alloc_gate,
write_gate,
read_modes,
) = tf.dynamic_partition(self.interface, partition, 10)
# R read keys and strengths
k_read = tf.reshape(k_read, [self.R, self.W]) # [R,W]
b_read = 1 + tf.nn.softplus(tf.expand_dims(b_read, 0)) # [1,R]
# write key, strength, erase and write vectors
k_write = tf.expand_dims(k_write, 0) # [1,W]
b_write = 1 + tf.nn.softplus(tf.expand_dims(b_write, 0)) # [1,1]
erase = tf.nn.sigmoid(tf.expand_dims(erase, 0)) # [1,W]
write_v = tf.expand_dims(write_v, 0) # [1,W]
# the degree to which locations at read heads will be freed
free_gates = tf.nn.sigmoid(tf.expand_dims(free_gates, 0)) # [1,R]
# the fraction of writing that is being allocated in a new location
alloc_gate = tf.reshape(tf.nn.sigmoid(alloc_gate), [1]) # 1
# the amount of information to be written to memory
write_gate = tf.reshape(tf.nn.sigmoid(write_gate), [1]) # 1
# softmax distribution over the 3 read modes (forward, content lookup,
# backward)
read_modes = tf.reshape(read_modes, [3, self.R]) # [3,R]
read_modes = tf.nn.softmax(read_modes, axis=0)
return (
k_read,
b_read,
k_write,
b_write,
erase,
write_v,
free_gates,
alloc_gate,
write_gate,
read_modes,
)
def write(
self,
free_gates: tf.Tensor,
alloc_gate: tf.Tensor,
write_gate: tf.Tensor,
k_write: tf.Tensor,
b_write: tf.Tensor,
erase: tf.Tensor,
write_v: tf.Tensor,
):
""" Write to the memory matrix. """
# memory retention vector represents by how much each location will
# not be freed by the free gates
retention = tf.reduce_prod(1 - free_gates * self.W_read, axis=1)
retention = tf.reshape(retention, [self.N, 1]) # [N,1]
# update usage vector which is used to dynamically allocate memory
self.usage = (
self.usage + self.W_write - self.usage * self.W_write
) * retention
# compute allocation weights using dynamic memory allocation
W_alloc = self.allocation_weighting() # [N,1]
# apply content lookup for the write vector to figure out where to
# write to
W_lookup = self.content_lookup(k_write, b_write)
W_lookup = tf.reshape(W_lookup, [self.N, 1]) # [N,1]
# define our write weights now that we know how much space to allocate
# for them and where to write to
self.W_write = write_gate * (
alloc_gate * W_alloc + (1 - alloc_gate) * W_lookup
)
# update memory matrix: erase memory and write using the write weights
# and vector
self.M = self.M * (1 - tf.matmul(self.W_write, erase)) + tf.matmul(
self.W_write, write_v
)
def read(
self, k_read: tf.Tensor, b_read: tf.Tensor, read_modes: tf.Tensor
):
""" Read from the memory matrix. """
# update memory link matrix used later for the forward and backward
# read modes
W_write_cast = tf.matmul(self.W_write, tf.ones([1, self.N])) # [N,N]
self.L = (
1 - W_write_cast - tf.transpose(W_write_cast)
) * self.L + tf.matmul(
self.W_write, self.W_precedence, transpose_b=True
) # [N,N]
self.L *= tf.ones([self.N, self.N]) - tf.constant(
np.identity(self.N, dtype=np.float32)
)
# update precedence vector which determines degree to which a memory
# row was written to at t-1
self.W_precedence = (
1 - tf.reduce_sum(self.W_write, axis=0)
) * self.W_precedence + self.W_write
# apply content lookup for the read vector(s) to figure out where to
# read from
W_lookup = self.content_lookup(k_read, b_read)
W_lookup = tf.reshape(W_lookup, [self.N, self.R]) # [N,R]
# compute forward and backward read weights using the link matrix
# forward weights recall information written in sequence and backward
# weights in reverse
W_fwd = tf.matmul(self.L, self.W_read) # [N,N]*[N,R] -> [N,R]
W_bwd = tf.matmul(self.L, self.W_read, transpose_a=True) # [N,R]
# 3 modes: forward, backward and content lookup
fwd_mode = read_modes[2] * W_fwd
lookup_mode = read_modes[1] * W_lookup
bwd_mode = read_modes[0] * W_bwd
# read weights = backward + content lookup + forward mode weights
self.W_read = bwd_mode + lookup_mode + fwd_mode # [N,R]
# create read vectors by applying read weights to memory matrix
self.read_v = tf.transpose(
tf.matmul(self.M, self.W_read, transpose_a=True)
) # ([W,N]*[N,R])^T -> [R,W]
def step(self, x: tf.Tensor) -> tf.Tensor:
"""
Update the controller, compute the output and interface vectors,
write to and read from memory and compute the output.
"""
# update controller
self.controller(x)
# compute output and interface vectors
# [1,Y+I] * [Y+I,Y] -> [1,Y]
self.output_v = tf.matmul(self.h, self.W_output)
self.interface = tf.matmul(
self.h, self.W_interface
) # [1,Y+I] * [Y+I,I] -> [1,I]
# partition the interface vector
(
k_read,
b_read,
k_write,
b_write,
erase,
write_v,
free_gates,
alloc_gate,
write_gate,
read_modes,
) = self.partition_interface()
# write to memory
self.write(
free_gates,
alloc_gate,
write_gate,
k_write,
b_write,
erase,
write_v,
)
# read from memory
self.read(k_read, b_read, read_modes)
# flatten read vectors and multiply them with W matrix before adding
# to controller output
read_v_out = tf.matmul(
tf.reshape(self.read_v, [1, self.R * self.W]), self.W_read_out
) # [1,RW]*[RW,Y] -> [1,Y]
# compute output
y = self.output_v + read_v_out
return y
def call(self, x: Union[np.ndarray, tf.Tensor]) -> tf.Tensor:
"""
Unstack the input, run through the DNC and return the stacked output.
"""
y = []
for x_seq in tf.unstack(x, axis=0):
x_seq = tf.expand_dims(x_seq, axis=0)
y_seq = self.step(x_seq)
y.append(y_seq)
return tf.expand_dims(tf.squeeze(tf.stack(y, axis=0)), axis=0)
| [
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.nn.l2_normalize",
"tensorflow.matmul",
"tensorflow.dynamic_partition",
"tensorflow.reduce_prod",
"tensorflow.nn.softmax",
"tensorflow.keras.layers.Concatenate",
"numpy.identity",
"tensorflow.stack",
"te... | [((1638, 1698), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['[1, self.output_dim]'], {'stddev': '(0.1)'}), '([1, self.output_dim], stddev=0.1)\n', (1664, 1698), True, 'import tensorflow as tf\n'), ((1755, 1818), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['[1, self.interface_dim]'], {'stddev': '(0.1)'}), '([1, self.interface_dim], stddev=0.1)\n', (1781, 1818), True, 'import tensorflow as tf\n'), ((1914, 1936), 'tensorflow.zeros', 'tf.zeros', (['memory_shape'], {}), '(memory_shape)\n', (1922, 1936), True, 'import tensorflow as tf\n'), ((2067, 2094), 'tensorflow.fill', 'tf.fill', (['[self.N, 1]', '(1e-06)'], {}), '([self.N, 1], 1e-06)\n', (2074, 2094), True, 'import tensorflow as tf\n'), ((2227, 2253), 'tensorflow.zeros', 'tf.zeros', (['[self.N, self.N]'], {}), '([self.N, self.N])\n', (2235, 2253), True, 'import tensorflow as tf\n'), ((2392, 2413), 'tensorflow.zeros', 'tf.zeros', (['[self.N, 1]'], {}), '([self.N, 1])\n', (2400, 2413), True, 'import tensorflow as tf\n'), ((2512, 2544), 'tensorflow.fill', 'tf.fill', (['[self.N, self.R]', '(1e-06)'], {}), '([self.N, self.R], 1e-06)\n', (2519, 2544), True, 'import tensorflow as tf\n'), ((2576, 2603), 'tensorflow.fill', 'tf.fill', (['[self.N, 1]', '(1e-06)'], {}), '([self.N, 1], 1e-06)\n', (2583, 2603), True, 'import tensorflow as tf\n'), ((2634, 2666), 'tensorflow.fill', 'tf.fill', (['[self.R, self.W]', '(1e-06)'], {}), '([self.R, self.W], 1e-06)\n', (2641, 2666), True, 'import tensorflow as tf\n'), ((3145, 3175), 'tensorflow.keras.layers.Dense', 'Dense', (['self.W'], {'activation': 'None'}), '(self.W, activation=None)\n', (3150, 3175), False, 'from tensorflow.keras.layers import Concatenate, Dense, LSTM\n'), ((3196, 3292), 'tensorflow.keras.layers.LSTM', 'LSTM', (['self.controller_dim'], {'return_sequences': '(False)', 'return_state': '(True)', 'name': '"""dnc_controller"""'}), "(self.controller_dim, return_sequences=False, return_state=True, name=\n 'dnc_controller')\n", (3200, 3292), False, 'from tensorflow.keras.layers import Concatenate, Dense, LSTM\n'), ((4935, 4964), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['self.M', '(1)'], {}), '(self.M, 1)\n', (4953, 4964), True, 'import tensorflow as tf\n'), ((5037, 5063), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['key', '(1)'], {}), '(key, 1)\n', (5055, 5063), True, 'import tensorflow as tf\n'), ((5251, 5298), 'tensorflow.matmul', 'tf.matmul', (['norm_mem', 'norm_key'], {'transpose_b': '(True)'}), '(norm_mem, norm_key, transpose_b=True)\n', (5260, 5298), True, 'import tensorflow as tf\n'), ((5314, 5346), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(sim * strength)', '(0)'], {}), '(sim * strength, 0)\n', (5327, 5346), True, 'import tensorflow as tf\n'), ((6158, 6211), 'tensorflow.math.cumprod', 'tf.math.cumprod', (['sorted_usage'], {'axis': '(1)', 'exclusive': '(True)'}), '(sorted_usage, axis=1, exclusive=True)\n', (6173, 6211), True, 'import tensorflow as tf\n'), ((6278, 6296), 'tensorflow.zeros', 'tf.zeros', (['[self.N]'], {}), '([self.N])\n', (6286, 6296), True, 'import tensorflow as tf\n'), ((6701, 6733), 'tensorflow.reshape', 'tf.reshape', (['W_alloc', '[self.N, 1]'], {}), '(W_alloc, [self.N, 1])\n', (6711, 6733), True, 'import tensorflow as tf\n'), ((6936, 6958), 'tensorflow.reshape', 'tf.reshape', (['x', '[1, -1]'], {}), '(x, [1, -1])\n', (6946, 6958), True, 'import tensorflow as tf\n'), ((7617, 7794), 'tensorflow.constant', 'tf.constant', (['[[0] * (self.R * self.W) + [1] * self.R + [2] * self.W + [3] + [4] * self.W +\n [5] * self.W + [6] * self.R + [7] + [8] + [9] * (self.R * 3)]'], {'dtype': 'tf.int32'}), '([[0] * (self.R * self.W) + [1] * self.R + [2] * self.W + [3] + \n [4] * self.W + [5] * self.W + [6] * self.R + [7] + [8] + [9] * (self.R *\n 3)], dtype=tf.int32)\n', (7628, 7794), True, 'import tensorflow as tf\n'), ((8236, 8287), 'tensorflow.dynamic_partition', 'tf.dynamic_partition', (['self.interface', 'partition', '(10)'], {}), '(self.interface, partition, 10)\n', (8256, 8287), True, 'import tensorflow as tf\n'), ((8342, 8378), 'tensorflow.reshape', 'tf.reshape', (['k_read', '[self.R, self.W]'], {}), '(k_read, [self.R, self.W])\n', (8352, 8378), True, 'import tensorflow as tf\n'), ((8534, 8560), 'tensorflow.expand_dims', 'tf.expand_dims', (['k_write', '(0)'], {}), '(k_write, 0)\n', (8548, 8560), True, 'import tensorflow as tf\n'), ((8727, 8753), 'tensorflow.expand_dims', 'tf.expand_dims', (['write_v', '(0)'], {}), '(write_v, 0)\n', (8741, 8753), True, 'import tensorflow as tf\n'), ((9304, 9339), 'tensorflow.reshape', 'tf.reshape', (['read_modes', '[3, self.R]'], {}), '(read_modes, [3, self.R])\n', (9314, 9339), True, 'import tensorflow as tf\n'), ((9370, 9403), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['read_modes'], {'axis': '(0)'}), '(read_modes, axis=0)\n', (9383, 9403), True, 'import tensorflow as tf\n'), ((10071, 10123), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['(1 - free_gates * self.W_read)'], {'axis': '(1)'}), '(1 - free_gates * self.W_read, axis=1)\n', (10085, 10123), True, 'import tensorflow as tf\n'), ((10144, 10178), 'tensorflow.reshape', 'tf.reshape', (['retention', '[self.N, 1]'], {}), '(retention, [self.N, 1])\n', (10154, 10178), True, 'import tensorflow as tf\n'), ((10671, 10704), 'tensorflow.reshape', 'tf.reshape', (['W_lookup', '[self.N, 1]'], {}), '(W_lookup, [self.N, 1])\n', (10681, 10704), True, 'import tensorflow as tf\n'), ((12205, 12243), 'tensorflow.reshape', 'tf.reshape', (['W_lookup', '[self.N, self.R]'], {}), '(W_lookup, [self.N, self.R])\n', (12215, 12243), True, 'import tensorflow as tf\n'), ((12451, 12481), 'tensorflow.matmul', 'tf.matmul', (['self.L', 'self.W_read'], {}), '(self.L, self.W_read)\n', (12460, 12481), True, 'import tensorflow as tf\n'), ((12522, 12570), 'tensorflow.matmul', 'tf.matmul', (['self.L', 'self.W_read'], {'transpose_a': '(True)'}), '(self.L, self.W_read, transpose_a=True)\n', (12531, 12570), True, 'import tensorflow as tf\n'), ((13485, 13517), 'tensorflow.matmul', 'tf.matmul', (['self.h', 'self.W_output'], {}), '(self.h, self.W_output)\n', (13494, 13517), True, 'import tensorflow as tf\n'), ((13543, 13578), 'tensorflow.matmul', 'tf.matmul', (['self.h', 'self.W_interface'], {}), '(self.h, self.W_interface)\n', (13552, 13578), True, 'import tensorflow as tf\n'), ((14764, 14785), 'tensorflow.unstack', 'tf.unstack', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (14774, 14785), True, 'import tensorflow as tf\n'), ((2794, 2858), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['[1, self.controller_dim]'], {'stddev': '(0.1)'}), '([1, self.controller_dim], stddev=0.1)\n', (2820, 2858), True, 'import tensorflow as tf\n'), ((2949, 3013), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['[1, self.controller_dim]'], {'stddev': '(0.1)'}), '([1, self.controller_dim], stddev=0.1)\n', (2975, 3013), True, 'import tensorflow as tf\n'), ((3502, 3580), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['[self.controller_dim, self.output_dim]'], {'stddev': '(0.1)'}), '([self.controller_dim, self.output_dim], stddev=0.1)\n', (3528, 3580), True, 'import tensorflow as tf\n'), ((3728, 3813), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['[self.controller_dim, self.interface_dim]'], {'stddev': '(0.1)'}), '([self.controller_dim, self.interface_dim],\n stddev=0.1)\n', (3754, 3813), True, 'import tensorflow as tf\n'), ((4009, 4083), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['[self.R * self.W, self.output_dim]'], {'stddev': '(0.1)'}), '([self.R * self.W, self.output_dim], stddev=0.1)\n', (4035, 4083), True, 'import tensorflow as tf\n'), ((6321, 6358), 'numpy.identity', 'np.identity', (['self.N'], {'dtype': 'np.float32'}), '(self.N, dtype=np.float32)\n', (6332, 6358), True, 'import numpy as np\n'), ((6424, 6448), 'tensorflow.unstack', 'tf.unstack', (['free_list[0]'], {}), '(free_list[0])\n', (6434, 6448), True, 'import tensorflow as tf\n'), ((8674, 8698), 'tensorflow.expand_dims', 'tf.expand_dims', (['erase', '(0)'], {}), '(erase, 0)\n', (8688, 8698), True, 'import tensorflow as tf\n'), ((8867, 8896), 'tensorflow.expand_dims', 'tf.expand_dims', (['free_gates', '(0)'], {}), '(free_gates, 0)\n', (8881, 8896), True, 'import tensorflow as tf\n'), ((9016, 9041), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['alloc_gate'], {}), '(alloc_gate)\n', (9029, 9041), True, 'import tensorflow as tf\n'), ((9146, 9171), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['write_gate'], {}), '(write_gate)\n', (9159, 9171), True, 'import tensorflow as tf\n'), ((11112, 11144), 'tensorflow.matmul', 'tf.matmul', (['self.W_write', 'write_v'], {}), '(self.W_write, write_v)\n', (11121, 11144), True, 'import tensorflow as tf\n'), ((11452, 11472), 'tensorflow.ones', 'tf.ones', (['[1, self.N]'], {}), '([1, self.N])\n', (11459, 11472), True, 'import tensorflow as tf\n'), ((11581, 11641), 'tensorflow.matmul', 'tf.matmul', (['self.W_write', 'self.W_precedence'], {'transpose_b': '(True)'}), '(self.W_write, self.W_precedence, transpose_b=True)\n', (11590, 11641), True, 'import tensorflow as tf\n'), ((11691, 11716), 'tensorflow.ones', 'tf.ones', (['[self.N, self.N]'], {}), '([self.N, self.N])\n', (11698, 11716), True, 'import tensorflow as tf\n'), ((13027, 13075), 'tensorflow.matmul', 'tf.matmul', (['self.M', 'self.W_read'], {'transpose_a': '(True)'}), '(self.M, self.W_read, transpose_a=True)\n', (13036, 13075), True, 'import tensorflow as tf\n'), ((14378, 14423), 'tensorflow.reshape', 'tf.reshape', (['self.read_v', '[1, self.R * self.W]'], {}), '(self.read_v, [1, self.R * self.W])\n', (14388, 14423), True, 'import tensorflow as tf\n'), ((14807, 14836), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_seq'], {'axis': '(0)'}), '(x_seq, axis=0)\n', (14821, 14836), True, 'import tensorflow as tf\n'), ((6068, 6092), 'tensorflow.transpose', 'tf.transpose', (['self.usage'], {}), '(self.usage)\n', (6080, 6092), True, 'import tensorflow as tf\n'), ((6500, 6530), 'tensorflow.slice', 'tf.slice', (['i', '[idx, 0]', '[1, -1]'], {}), '(i, [idx, 0], [1, -1])\n', (6508, 6530), True, 'import tensorflow as tf\n'), ((7084, 7103), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(0)'}), '(axis=0)\n', (7095, 7103), False, 'from tensorflow.keras.layers import Concatenate, Dense, LSTM\n'), ((8424, 8449), 'tensorflow.expand_dims', 'tf.expand_dims', (['b_read', '(0)'], {}), '(b_read, 0)\n', (8438, 8449), True, 'import tensorflow as tf\n'), ((8607, 8633), 'tensorflow.expand_dims', 'tf.expand_dims', (['b_write', '(0)'], {}), '(b_write, 0)\n', (8621, 8633), True, 'import tensorflow as tf\n'), ((11744, 11781), 'numpy.identity', 'np.identity', (['self.N'], {'dtype': 'np.float32'}), '(self.N, dtype=np.float32)\n', (11755, 11781), True, 'import numpy as np\n'), ((14943, 14962), 'tensorflow.stack', 'tf.stack', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (14951, 14962), True, 'import tensorflow as tf\n'), ((11078, 11108), 'tensorflow.matmul', 'tf.matmul', (['self.W_write', 'erase'], {}), '(self.W_write, erase)\n', (11087, 11108), True, 'import tensorflow as tf\n'), ((11533, 11559), 'tensorflow.transpose', 'tf.transpose', (['W_write_cast'], {}), '(W_write_cast)\n', (11545, 11559), True, 'import tensorflow as tf\n'), ((11952, 11987), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.W_write'], {'axis': '(0)'}), '(self.W_write, axis=0)\n', (11965, 11987), True, 'import tensorflow as tf\n')] |
import numpy as np
import matplotlib.pyplot as plt
from recipes.logging import LoggingMixin
from scrawl.imagine import _sanitize_data
from scrawl.utils import percentile
def get_bins(data, bins, range=None):
# superset of the automated binning from astropy / numpy
if isinstance(bins, str) and bins in ('blocks', 'knuth', 'freedman'):
from astropy.stats import calculate_bin_edges
return calculate_bin_edges(data, bins, range)
else:
return np.histogram_bin_edges(data, bins, range)
class Histogram(LoggingMixin):
"""A histogram that carries some state"""
bins = 'auto'
range = None
def __init__(self, data, bins=bins, range=None, plims=None, **kws):
# create
super().__init__()
# run
self(data, bins, range, plims, **kws)
def __call__(self, data, bins=bins, range=None, plims=None, **kws):
# compute histogram
data = _sanitize_data(data)
if plims is not None:
# choose range based on data percentile limits
range = percentile(data, plims)
self.bin_edges = self.auto_bins(data, bins, range)
self.counts, _ = np.histogram(data, self.bin_edges, range, **kws)
@property
def bin_centers(self):
return self.bin_edges[:-1] + np.diff(self.bin_edges)
@property
def n(self):
return len(self.counts)
def auto_bins(self, data, bins=bins, range=None):
return get_bins(data, bins, range)
def get_verts(self):
"""vertices for vertical bars"""
if len(self.counts) == 0:
# empty histogram
return []
x01 = [self.bin_edges[:-1], self.bin_edges[1:]]
x = x01 + x01[::-1]
ymin = 0
y = list(np.full((2, self.n), ymin)) + [self.counts] * 2
return np.array([x, y]).T
def get_bars(self, **kws):
# create collection
from matplotlib.collections import PolyCollection
return PolyCollection(self.get_verts(),
array=self.counts / self.counts.max(),
**kws)
def plot(self, ax, **kws):
bars = self.get_bars(**kws)
ax.add_collection(bars)
return bars
def hist(x, bins=100, range=None, normed=False, weights=None, **kws):
"""
Plot a nice looking histogram.
Parameters
----------
x: sequence
Values to histogram
Keywords
--------
axes_labels: sequence
One or two axis labels (x,y)
title: str
The figure title
show_stats: str; option ('mode',)
Show the given statistic of the distribution
* Remaining keywords are passed to ax.hist
Returns
-------
h: tuple
bins, values
ax: axes
"""
# https://en.wikipedia.org/wiki/Quantile#Specialized_quantiles
named_quantiles = {25: 'lower quartile',
50: 'median',
75: 'upper quartile'}
show_stats = kws.pop('show_stats', ())
show_stats_labels = kws.pop('show_stats_labels', True)
fmt_stats = kws.pop('fmt_stats', None)
lbls = kws.pop('axes_labels', ())
title = kws.pop('title', '')
alpha = kws.setdefault('alpha', 0.75)
ax = kws.pop('ax', None)
Q = kws.pop('percentile', [])
# Create figure
if ax is None:
_, ax = plt.subplots(tight_layout=True)
# else:
# fig = ax.figure
# compute bins if heuristic
bins = get_bins(x, bins, range)
# Plot the histogram
h = counts, bins, patches = ax.hist(x, bins, range, normed, weights, **kws)
# Make axis labels and title
xlbl = lbls[0] if len(lbls) else ''
ylbl = lbls[1] if len(lbls) > 1 else ('Density' if normed else 'Counts')
ax.set_xlabel(xlbl)
ax.set_ylabel(ylbl)
ax.set_title(title)
ax.grid()
# Extra summary statistics (point estimators)
stats = {}
if 'min' in show_stats:
stats['min'] = x.min()
if 'max' in show_stats:
stats['max'] = x.max()
if 'mode' in show_stats:
from scipy.stats import mode
stats['mode'] = mode(x).mode.item()
if 'mean' in show_stats:
stats['mean'] = x.mean()
if 'median' in show_stats:
Q.append(50)
# if 'percentile' in show_stats:
# pass
if len(Q): # 'percentile' in show_stats:
P = np.percentile(x, Q)
for p, q in zip(P, Q):
name = named_quantiles.get(q, '$p_{%i}$' % q)
stats[name] = p
if fmt_stats is None:
from recipes.pprint import decimal as fmt_stats
if stats:
from matplotlib.transforms import blended_transform_factory as btf
for key, val in stats.items():
c = patches[0].get_facecolor()
ax.axvline(val, color=c, alpha=1, ls='--', lw=2)
trans = btf(ax.transData, ax.transAxes)
if show_stats_labels:
txt = '%s = %s' % (key, fmt_stats(val))
ax.text(val, 1, txt,
transform=trans,
rotation='vertical', va='top', ha='right')
return h, ax
| [
"scrawl.imagine._sanitize_data",
"numpy.full",
"scipy.stats.mode",
"numpy.histogram_bin_edges",
"numpy.percentile",
"astropy.stats.calculate_bin_edges",
"numpy.histogram",
"numpy.diff",
"numpy.array",
"scrawl.utils.percentile",
"matplotlib.transforms.blended_transform_factory",
"matplotlib.pyp... | [((415, 453), 'astropy.stats.calculate_bin_edges', 'calculate_bin_edges', (['data', 'bins', 'range'], {}), '(data, bins, range)\n', (434, 453), False, 'from astropy.stats import calculate_bin_edges\n'), ((479, 520), 'numpy.histogram_bin_edges', 'np.histogram_bin_edges', (['data', 'bins', 'range'], {}), '(data, bins, range)\n', (501, 520), True, 'import numpy as np\n'), ((929, 949), 'scrawl.imagine._sanitize_data', '_sanitize_data', (['data'], {}), '(data)\n', (943, 949), False, 'from scrawl.imagine import _sanitize_data\n'), ((1168, 1216), 'numpy.histogram', 'np.histogram', (['data', 'self.bin_edges', 'range'], {}), '(data, self.bin_edges, range, **kws)\n', (1180, 1216), True, 'import numpy as np\n'), ((3376, 3407), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'tight_layout': '(True)'}), '(tight_layout=True)\n', (3388, 3407), True, 'import matplotlib.pyplot as plt\n'), ((4382, 4401), 'numpy.percentile', 'np.percentile', (['x', 'Q'], {}), '(x, Q)\n', (4395, 4401), True, 'import numpy as np\n'), ((1059, 1082), 'scrawl.utils.percentile', 'percentile', (['data', 'plims'], {}), '(data, plims)\n', (1069, 1082), False, 'from scrawl.utils import percentile\n'), ((1296, 1319), 'numpy.diff', 'np.diff', (['self.bin_edges'], {}), '(self.bin_edges)\n', (1303, 1319), True, 'import numpy as np\n'), ((1819, 1835), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (1827, 1835), True, 'import numpy as np\n'), ((4856, 4887), 'matplotlib.transforms.blended_transform_factory', 'btf', (['ax.transData', 'ax.transAxes'], {}), '(ax.transData, ax.transAxes)\n', (4859, 4887), True, 'from matplotlib.transforms import blended_transform_factory as btf\n'), ((1755, 1781), 'numpy.full', 'np.full', (['(2, self.n)', 'ymin'], {}), '((2, self.n), ymin)\n', (1762, 1781), True, 'import numpy as np\n'), ((4139, 4146), 'scipy.stats.mode', 'mode', (['x'], {}), '(x)\n', (4143, 4146), False, 'from scipy.stats import mode\n'), ((4962, 4976), 'recipes.pprint.decimal', 'fmt_stats', (['val'], {}), '(val)\n', (4971, 4976), True, 'from recipes.pprint import decimal as fmt_stats\n')] |
import numpy as np
class SudokuIO:
def __init__(self, puzzle_file, board_size = 9):
self.puzzle_file = puzzle_file
self.board_size = board_size
self.puzzles = self.read_sudoku(puzzle_file)
def write_dimacs(self, puzzle=None):
if puzzle is None:
puzzle = self.puzzles[0]
name = self.puzzle_file.split("/")[1]
with open(f'dimacs/puzzles/{name}7', 'w') as out:
for i, row in enumerate(puzzle):
for j, pos in enumerate(row):
if pos > 0:
out.write(f"{i+1}{j+1}{int(pos)} 0\n")
def read_sudoku(self, filename):
boards = []
with open(filename, 'r') as input_file:
lines = input_file.readlines()
for line in lines:
board = np.zeros(shape=(self.board_size, self.board_size))
x = 0
for i in range(self.board_size):
for j in range(self.board_size):
board[i][j] = line[x] if str.isnumeric(line[x]) else 0
x+=1
boards.append(board)
return boards
def main():
io = SudokuIO('sudoku/1000_sudokus.txt')
io.write_dimacs(io.puzzles[7])
if __name__ == '__main__':
main() | [
"numpy.zeros"
] | [((817, 867), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.board_size, self.board_size)'}), '(shape=(self.board_size, self.board_size))\n', (825, 867), True, 'import numpy as np\n')] |
"""Simple example operations that are used to demonstrate some image processing in Xi-CAM.
"""
import numpy as np
from xicam.plugins.operationplugin import (limits, describe_input, describe_output,
operation, opts, output_names, visible)
# Define an operation that inverts the values of an image
@operation # Required - defines the function below as an OperationPlugin
@output_names("output_image") # Required - describes the names of the output(s)
@describe_input("image", "The image to invert") # Optional - Description of an input argument
@describe_output("output_image", "The inverted image") # Optional - Description of an output
@visible("image", is_visible=False) # Optional - Prevents the input image arg from showing up in WorkflowEditor
def invert(image: np.ndarray, x=1) -> np.ndarray:
if issubclass(image.dtype.type, np.integer):
max_value = np.iinfo(image.dtype).max
else:
max_value = np.finfo(image.dtype).max
return np.subtract(max_value, image)
# Define an operation that applies random noise to an image
@operation
@output_names("output_image")
@describe_input("image", "The image to add random noise to")
@describe_input("strength", "The factor of noise to add to the image")
@limits("strength", [0.0, 1.0]) # Optional - Strength can only be from 0.0 to 1.0, inclusive
@opts("strength", step=0.1) # Optional - When modifying in the WorkflowEditor, values will go up/down by 0.1
@visible("image", is_visible=False)
def random_noise(image: np.ndarray, strength: float = 0.5) -> np.ndarray:
if issubclass(image.dtype.type, np.integer):
max_value = np.iinfo(image.dtype).max
else:
max_value = np.finfo(image.dtype).max
return np.random.rand(*image.shape) * (strength * max_value) + image | [
"xicam.plugins.operationplugin.describe_input",
"xicam.plugins.operationplugin.describe_output",
"numpy.subtract",
"xicam.plugins.operationplugin.limits",
"xicam.plugins.operationplugin.opts",
"numpy.iinfo",
"xicam.plugins.operationplugin.output_names",
"numpy.finfo",
"numpy.random.rand",
"xicam.p... | [((417, 445), 'xicam.plugins.operationplugin.output_names', 'output_names', (['"""output_image"""'], {}), "('output_image')\n", (429, 445), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((498, 544), 'xicam.plugins.operationplugin.describe_input', 'describe_input', (['"""image"""', '"""The image to invert"""'], {}), "('image', 'The image to invert')\n", (512, 544), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((593, 646), 'xicam.plugins.operationplugin.describe_output', 'describe_output', (['"""output_image"""', '"""The inverted image"""'], {}), "('output_image', 'The inverted image')\n", (608, 646), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((687, 721), 'xicam.plugins.operationplugin.visible', 'visible', (['"""image"""'], {'is_visible': '(False)'}), "('image', is_visible=False)\n", (694, 721), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((1115, 1143), 'xicam.plugins.operationplugin.output_names', 'output_names', (['"""output_image"""'], {}), "('output_image')\n", (1127, 1143), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((1145, 1204), 'xicam.plugins.operationplugin.describe_input', 'describe_input', (['"""image"""', '"""The image to add random noise to"""'], {}), "('image', 'The image to add random noise to')\n", (1159, 1204), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((1206, 1275), 'xicam.plugins.operationplugin.describe_input', 'describe_input', (['"""strength"""', '"""The factor of noise to add to the image"""'], {}), "('strength', 'The factor of noise to add to the image')\n", (1220, 1275), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((1277, 1307), 'xicam.plugins.operationplugin.limits', 'limits', (['"""strength"""', '[0.0, 1.0]'], {}), "('strength', [0.0, 1.0])\n", (1283, 1307), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((1371, 1397), 'xicam.plugins.operationplugin.opts', 'opts', (['"""strength"""'], {'step': '(0.1)'}), "('strength', step=0.1)\n", (1375, 1397), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((1481, 1515), 'xicam.plugins.operationplugin.visible', 'visible', (['"""image"""'], {'is_visible': '(False)'}), "('image', is_visible=False)\n", (1488, 1515), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((1011, 1040), 'numpy.subtract', 'np.subtract', (['max_value', 'image'], {}), '(max_value, image)\n', (1022, 1040), True, 'import numpy as np\n'), ((918, 939), 'numpy.iinfo', 'np.iinfo', (['image.dtype'], {}), '(image.dtype)\n', (926, 939), True, 'import numpy as np\n'), ((974, 995), 'numpy.finfo', 'np.finfo', (['image.dtype'], {}), '(image.dtype)\n', (982, 995), True, 'import numpy as np\n'), ((1659, 1680), 'numpy.iinfo', 'np.iinfo', (['image.dtype'], {}), '(image.dtype)\n', (1667, 1680), True, 'import numpy as np\n'), ((1715, 1736), 'numpy.finfo', 'np.finfo', (['image.dtype'], {}), '(image.dtype)\n', (1723, 1736), True, 'import numpy as np\n'), ((1752, 1780), 'numpy.random.rand', 'np.random.rand', (['*image.shape'], {}), '(*image.shape)\n', (1766, 1780), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
plot.py
"""
import sys
import os
# Numeric
import numpy as np
# Gaussian filtering for KDE
from scipy.ndimage import gaussian_filter
# DataFrames
import pandas as pd
# Plotting
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
# Scalebars
try:
from matplotlib_scalebar.scalebar import ScaleBar
scalebar_active = True
except ModuleNotFoundError:
scalebar_active = False
# Internal utilities
from .defoc import f_remain
from .utils import (
rad_disp_histogram,
evaluate_diffusion_model,
coarsen_histogram
)
# Use Arial font
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'Arial'
def savefig(out_png, dpi=800, show_result=True):
"""
Save a matplotlib Figure to a PNG.
args
----
out_png : str, save path
dpi : int
"""
plt.tight_layout()
plt.savefig(out_png, dpi=dpi)
plt.close()
if show_result and sys.platform == "darwin":
os.system("open {}".format(out_png))
def kill_ticks(axes, spines=True):
"""
Remove ticks on a matplotlib.axes.Axes object. If *spines*,
also remove the spines.
"""
axes.set_xticks([])
axes.set_yticks([])
if spines:
for s in ['top', 'bottom', 'left', 'right']:
axes.spines[s].set_visible(False)
def plot_jump_length_dist(tracks, occs, diff_coefs, out_prefix, pos_cols=["y", "x"],
n_frames=4, frame_interval=0.00748, pixel_size_um=0.16, loc_error=0.035,
dz=None, max_jump=5.0, max_jump_pmf=2.0, cmap="gray", figsize_mod=1.0,
n_gaps=0, use_entire_track=True, max_jumps_per_track=10):
"""
Given a set of trajectories and a particular diffusive mixture model,
plot the observed and model radial jump histograms alongside each other.
args
----
tracks : pandas.DataFrame, with columns "trajectory", "frame",
and the contents of *pos_cols*
occs : 1D ndarray, fractional state occupations
diff_coefs : 1D ndarray, diffusion coefficients for each
state in um^2 s^-1
out_prefix : str, the prefix for the output plots
pos_cols : list of str, positional coordinate columns
n_frames : int, the number of time points to consider
frame_interval : float, the time between frames in seconds
pixel_size_um : float, size of pixels in um
loc_error : float, localization error in um
dz : float, focal depth in um
max_jump : float, the maximum jump length to show in um
max_jump_pmf : float, the maximum jump length to show in PMF plots
cmap : str, color palette to use for each jump length
figsize_mod : float, modifier for the default figure size
n_gaps : int, the number of gaps allowed during tracking
use_entire_track: bool, use all jumps from every trajectory
max_jumps_per_track: int. If *use_entire_track* is False, the maximum
number of jumps per trajectory to consider
returns
-------
None; plots directly to output plots
"""
# Calculate radial displacement histograms for the trajectories
# in this dataset
H, bin_edges = rad_disp_histogram(tracks, n_frames=n_frames,
pos_cols=pos_cols, bin_size=0.001, max_jump=max_jump,
pixel_size_um=pixel_size_um, n_gaps=n_gaps,
use_entire_track=True, max_jumps_per_track=max_jumps_per_track)
# Empirical PMF
H = H.astype(np.float64)
H = (H.T / H.sum(axis=1)).T
# Aggregate into bins
H_agg, bin_edges_agg = coarsen_histogram(H, bin_edges, 20)
# Empirical CDF
cdf = np.cumsum(H, axis=1)
# Calculate the model PMF and CDF
model_pmf, model_cdf = evaluate_diffusion_model(bin_edges, occs, diff_coefs,
len(pos_cols), frame_interval=frame_interval, loc_error=loc_error,
dz=dz, n_frames=n_frames)
# Plot the jump PMFs
out_png_pmf = "{}_pmf.png".format(out_prefix)
plot_jump_length_pmf(bin_edges_agg, H_agg, model_pmfs=model_pmf, model_bin_edges=bin_edges,
frame_interval=frame_interval, max_jump=max_jump_pmf, cmap=cmap,
figsize_mod=1.0, out_png=out_png_pmf)
# Plot the jump CDFs
out_png_cdf = "{}_cdf.png".format(out_prefix)
plot_jump_length_cdf(bin_edges, cdf, model_cdfs=model_cdf, model_bin_edges=bin_edges,
frame_interval=frame_interval, max_jump=max_jump, cmap=cmap,
figsize_mod=1.0, out_png=out_png_cdf, fontsize=8)
def plot_jump_length_pmf(bin_edges, pmfs, model_pmfs=None, model_bin_edges=None,
frame_interval=0.01, max_jump=2.0, cmap="gray", figsize_mod=1.0, out_png=None):
"""
Plot jump length histograms at different frame intervals, possibly with a model
overlay.
args
----
bin_edges : 1D ndarray of shape (n_bins+1), the edges of each jump
length bin in um
pmfs : 2D ndarray of shape (n_frames, n_bins), the jump length
histogram. This is normalized, if not already normalized.
model_pmfs : 2D ndarray of shape (n_frames, n_bins_model), the
model PMFs for each frame interval in um
model_bin_edges : 1D ndarray of shape (n_bins_model+1), the edges of each
jump length bin for the model PMFs in um. If not given,
this function defaults to *bin_edges*.
frame_interval : float, the time between frames in seconds
max_jump : float, the maximum jump length to show in um
cmap : str, color palette to use for each jump length. If a hex color
(for instance, "#A1A1A1"), then each frame interval is
colored the same.
figsize_mod : float, modifier for the default figure size
out_png : str, a file to save this plot to. If not specified, the plot
is not saved.
returns
-------
(
matplotlib.pyplot.Figure,
1D ndarray of matplotlib.axes.Axes
)
"""
# Check user inputs and get the number of bins and bin size
assert len(pmfs.shape) == 2
n_frames, n_bins = pmfs.shape
exp_bin_size = bin_edges[1] - bin_edges[0]
bin_centers = bin_edges[:-1] + 0.5 * exp_bin_size
assert bin_edges.shape[0] == n_bins + 1
if not model_pmfs is None:
assert model_pmfs.shape[0] == n_frames
_, n_bins_model = model_pmfs.shape
if not model_bin_edges is None:
assert n_bins_model == model_bin_edges.shape[0] - 1
model_bin_size = model_bin_edges[1] - model_bin_edges[0]
model_bin_centers = model_bin_edges[:-1] + model_bin_size * 0.5
else:
assert n_bins_model == n_bins
model_bin_centers = bin_centers
model_bin_size = exp_bin_size
# PMF scaling, accounting for disparate bin sizes
scale_factor = exp_bin_size / model_bin_size
# Bar width for bar plot
width = exp_bin_size * 0.8
# Generate the plot axes
fig, axes = plt.subplots(n_frames, 1, figsize=(4.2*figsize_mod, 0.75*n_frames*figsize_mod),
sharex=True)
if n_frames == 1:
axes = np.array([axes])
# Make colors for each frame interval
assert isinstance(cmap, str)
if cmap[0] == "#":
palette = [cmap for j in range(n_frames)]
else:
palette = sns.color_palette(cmap, n_frames)
# Plot the PMF for each frame interval
for t in range(n_frames):
# Plot the experimental data
if pmfs[t,:].sum() == 0:
exp_pmf = np.zeros(pmfs[t,:].shape, dtype=np.float64)
else:
exp_pmf = pmfs[t,:].astype(np.float64) / pmfs[t,:].sum()
axes[t].bar(bin_centers, exp_pmf, color=palette[t], edgecolor="k", linewidth=1,
width=width, label=None)
# Plot the model
if not model_pmfs is None:
axes[t].plot(model_bin_centers, model_pmfs[t,:]*scale_factor, linestyle='-',
linewidth=1.5, color='k', label=None)
# For labels
axes[t].plot([], [], linestyle="", marker=None, color="w",
label="$\Delta t = ${:.4f} sec".format((t+1)*frame_interval))
axes[t].legend(frameon=False, prop={"size": 6}, loc="upper right")
axes[t].set_yticks([])
# Kill some of the plot spines
for s in ["top", "right", "left"]:
axes[t].spines[s].set_visible(False)
# Only show jumps up to the max jump length
if not max_jump is None:
axes[0].set_xlim((0, max_jump))
axes[-1].set_xlabel("2D radial displacement ($\mu$m)", fontsize=10)
# Save to a file, if desired
if not out_png is None:
savefig(out_png)
return fig, axes
def plot_jump_length_cdf(bin_edges, cdfs, model_cdfs=None, model_bin_edges=None,
frame_interval=0.01, max_jump=5.0, cmap='gray', figsize_mod=1.0, out_png=None,
fontsize=8):
"""
Plot jump length cumulative distribution functions at different frame intervals,
potentially with a model overlay.
args
----
bin_edges : 1D ndarray of shape (n_bins+1), the edges of each jump
length bin in um
cdfs : 2D ndarray of shape (n_frames, n_bins), the jump length
CDFs
model_cdfs : 2D ndarray of shape (n_frames, n_bins_model), the
model CDFs for each frame interval in um
model_bin_edges : 1D ndarray of shape (n_bins_model+1), the edges of each
jump length bin for the model CDFs in um. If not given,
this function defaults to *bin_edges*.
frame_interval : float, the time between frames in seconds
max_jump : float, the maximum jump length to show in um
cmap : str, color palette to use for each jump length. If a hex color
(for instance, "#A1A1A1"), then each frame interval is
colored the same.
figsize_mod : float, modifier for the default figure size
out_png : str, a file to save this plot to. If not specified, the plot
is not saved.
returns
-------
(
matplotlib.pyplot.Figure,
list of matplotlib.axes.Axes
)
"""
# Check user inputs and figure out what kind of plot to make.
# plot_case == 0: plot the experimental CDFs, model overlay, and model residuals
# plot_case == 1: plot the experimental CDFs and model overlay, but no residuals
# plot_case == 2: plot only the experimental CDFs
n_frames, n_bins = cdfs.shape
assert bin_edges.shape[0] == n_bins + 1
bins_right = bin_edges[1:]
bin_size = bin_edges[1] - bin_edges[0]
if not model_cdfs is None:
n_frames_model, n_bins_model = model_cdfs.shape
if not model_bin_edges is None:
assert model_bin_edges.shape[0] == n_bins_model + 1
model_bin_size = model_bin_edges[1] - model_bin_edges[0]
model_bins_right = model_bin_edges[1:]
else:
assert model_cdfs.shape == cdfs.shape
model_bins_right = bins_right
# Choose whether or not to plot the residuals
if model_bins_right.shape == bins_right.shape:
plot_case = 0
else:
plot_case = 1
else:
plot_case = 2
# Configure the colors to use during plotting
assert isinstance(cmap, str)
if cmap[0] == "#":
palette = [cmap for j in range(n_frames)]
else:
palette = sns.color_palette(cmap, n_frames)
# Plot the experimental CDFs with a model overlay and residuals below
if plot_case == 0:
fig, ax = plt.subplots(2, 1, figsize=(3*figsize_mod, 3*figsize_mod),
gridspec_kw={'height_ratios': [3,1]}, sharex=True)
# Plot the experimental CDFs, potentially with a model overlay, and no residuals
else:
fig, ax = plt.subplots(figsize=(3*figsize_mod, 2*figsize_mod))
ax = [ax]
# Plot the experimental CDFs
for t in range(n_frames):
ax[0].plot(bins_right, cdfs[t,:], color=palette[t], linestyle='-',
label="{:.4f} sec".format((t+1)*frame_interval))
# Plot the model CDFs
if plot_case == 0 or plot_case == 1:
for t in range(n_frames):
ax[0].plot(model_bins_right, model_cdfs[t,:], color="k",
linestyle="--", label=None)
ax[0].plot([], [], color="k", linestyle="--", label="Model")
# Plot the model residuals
if plot_case == 0:
residuals = cdfs - model_cdfs
for t in range(n_frames):
ax[1].plot(bins_right, residuals[t,:], color=palette[t], linestyle='-',
label="{:.4f} sec".format((t+1)*frame_interval), linewidth=1)
ax[1].set_xlabel("Jump length ($\mu$m)", fontsize=fontsize)
ax[1].set_ylabel("Residuals", fontsize=fontsize)
# Center the residuals on zero
ax1_ylim = np.abs(residuals).max() * 1.5
ax[1].set_ylim((-ax1_ylim, ax1_ylim))
ax[1].set_xlim((0, max_jump))
ax[1].tick_params(labelsize=fontsize)
# Axis labels and legend
ax[0].set_ylabel("CDF", fontsize=fontsize)
ax[0].set_xlim((0, max_jump))
ax[0].legend(frameon=False, prop={'size': fontsize}, loc="lower right")
ax[0].tick_params(labelsize=fontsize)
# Save to a file, if desired
if not out_png is None:
savefig(out_png)
return fig, ax
def spatial_dist(tracks, attrib_cols, out_png, pixel_size_um=0.16, bin_size=0.01,
kde_width=0.08, cmap="magma", cmap_perc=99.5):
"""
Plot the spatial distribution of states for a set of trajectories.
args
----
tracks : pandas.DataFrame, trajectories
attrib_cols : list of str, a set of columns in *tracks*, each
corresponding to a diffusive state, giving
the likelihood of that state given the corresponding
trajectory
out_png : str, output plot path
pixel_size_um : float, size of pixels in um
bin_size : float, size of the pixels in um
kde_width : float, size of the KDE kernel in um
cmap : str, color map
"""
n_states = len(attrib_cols)
# Spatial limits
n_pixels_y = int(tracks["y"].max()) + 1
n_pixels_x = int(tracks["x"].max()) + 1
# Spatial binning strategy
n_bins_y = int(n_pixels_y * pixel_size_um / bin_size) + 2
n_um_y = n_bins_y * bin_size
n_bins_x = int(n_pixels_x * pixel_size_um / bin_size) + 2
n_um_x = n_bins_x * bin_size
bin_edges_y = np.arange(0, n_um_y+bin_size, bin_size)
bin_edges_x = np.arange(0, n_um_x+bin_size, bin_size)
# Plot layout
M = n_states // 2 + 1 if (n_states % 2 == 1) else n_states // 2
fig, ax = plt.subplots(2, M+1, figsize=((n_states+1)*2.5, 6))
# Raw localization density
H = np.histogram2d(
tracks['y'] * pixel_size_um,
tracks['x'] * pixel_size_um,
bins=(bin_edges_y, bin_edges_x)
)[0].astype(np.float64)
# KDE
loc_density = gaussian_filter(H, kde_width/bin_size)
ax[0,0].imshow(loc_density, cmap="gray", vmin=0,
vmax=np.percentile(loc_density, cmap_perc), origin="bottom")
ax[0,0].set_title("Localization density")
# The maximum likelihood state for each trajectory
X = np.asarray(tracks[attrib_cols])
max_l_states = np.argmax(X, axis=1)
# Scatter plot of maximum likelihood states
colors = sns.color_palette(cmap, n_states+1)
for j, attrib_col in enumerate(attrib_cols):
k = n_states - j - 1
exclude = pd.isnull(tracks[attrib_col])
include = np.logical_and(~exclude, max_l_states==k)
ax[1,0].scatter(
tracks.loc[include, "x"] * pixel_size_um,
tracks.loc[include, "y"] * pixel_size_um,
color=colors[j],
s=1.5,
)
ax[1,0].set_aspect("equal")
ax[1,0].set_title("Maximum likelihood state")
# Spatial distribution of likelihoods for each state
for j, attrib_col in enumerate(attrib_cols):
# Do not include singlets
exclude = pd.isnull(tracks[attrib_col])
# Make a histogram of the likelihood
H = np.histogram2d(
tracks.loc[~exclude, 'y'] * pixel_size_um,
tracks.loc[~exclude, 'x'] * pixel_size_um,
bins=(bin_edges_y, bin_edges_x),
weights=tracks.loc[~exclude, attrib_col]
)[0].astype(np.float64)
# Kernel density estimate
kde = gaussian_filter(H, kde_width/bin_size)
ax_y = j % 2
ax_x = j // 2
ax[ax_y, ax_x+1].imshow(kde, cmap="inferno", vmin=0,
vmax=np.percentile(kde, cmap_perc), origin="bottom")
ax[ax_y, ax_x+1].set_title("State %d" % (j+1))
# Add scale bars, if matplotlib_scalebar is installed
if scalebar_active:
s = ScaleBar(bin_size, "um", frameon=False, color="w",
location="lower right")
ax[0,0].add_artist(s)
for j in range(n_states):
ax_y = j % 2
ax_x = j // 2
s = ScaleBar(bin_size, "um", frameon=False, color="w",
location="lower right")
ax[ax_y,ax_x+1].add_artist(s)
# Remove the ticks
for i in range(2):
for j in range(M+1):
kill_ticks(ax[i,j])
# Save figure
savefig(out_png)
| [
"matplotlib.pyplot.tight_layout",
"numpy.abs",
"numpy.logical_and",
"numpy.argmax",
"matplotlib.pyplot.close",
"scipy.ndimage.gaussian_filter",
"numpy.asarray",
"numpy.zeros",
"numpy.histogram2d",
"pandas.isnull",
"numpy.percentile",
"numpy.cumsum",
"numpy.arange",
"numpy.array",
"seabor... | [((907, 925), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (923, 925), True, 'import matplotlib.pyplot as plt\n'), ((930, 959), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_png'], {'dpi': 'dpi'}), '(out_png, dpi=dpi)\n', (941, 959), True, 'import matplotlib.pyplot as plt\n'), ((964, 975), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (973, 975), True, 'import matplotlib.pyplot as plt\n'), ((3863, 3883), 'numpy.cumsum', 'np.cumsum', (['H'], {'axis': '(1)'}), '(H, axis=1)\n', (3872, 3883), True, 'import numpy as np\n'), ((7391, 7493), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_frames', '(1)'], {'figsize': '(4.2 * figsize_mod, 0.75 * n_frames * figsize_mod)', 'sharex': '(True)'}), '(n_frames, 1, figsize=(4.2 * figsize_mod, 0.75 * n_frames *\n figsize_mod), sharex=True)\n', (7403, 7493), True, 'import matplotlib.pyplot as plt\n'), ((15139, 15180), 'numpy.arange', 'np.arange', (['(0)', '(n_um_y + bin_size)', 'bin_size'], {}), '(0, n_um_y + bin_size, bin_size)\n', (15148, 15180), True, 'import numpy as np\n'), ((15197, 15238), 'numpy.arange', 'np.arange', (['(0)', '(n_um_x + bin_size)', 'bin_size'], {}), '(0, n_um_x + bin_size, bin_size)\n', (15206, 15238), True, 'import numpy as np\n'), ((15338, 15395), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(M + 1)'], {'figsize': '((n_states + 1) * 2.5, 6)'}), '(2, M + 1, figsize=((n_states + 1) * 2.5, 6))\n', (15350, 15395), True, 'import matplotlib.pyplot as plt\n'), ((15617, 15657), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['H', '(kde_width / bin_size)'], {}), '(H, kde_width / bin_size)\n', (15632, 15657), False, 'from scipy.ndimage import gaussian_filter\n'), ((15888, 15919), 'numpy.asarray', 'np.asarray', (['tracks[attrib_cols]'], {}), '(tracks[attrib_cols])\n', (15898, 15919), True, 'import numpy as np\n'), ((15939, 15959), 'numpy.argmax', 'np.argmax', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (15948, 15959), True, 'import numpy as np\n'), ((16022, 16059), 'seaborn.color_palette', 'sns.color_palette', (['cmap', '(n_states + 1)'], {}), '(cmap, n_states + 1)\n', (16039, 16059), True, 'import seaborn as sns\n'), ((7529, 7545), 'numpy.array', 'np.array', (['[axes]'], {}), '([axes])\n', (7537, 7545), True, 'import numpy as np\n'), ((7723, 7756), 'seaborn.color_palette', 'sns.color_palette', (['cmap', 'n_frames'], {}), '(cmap, n_frames)\n', (7740, 7756), True, 'import seaborn as sns\n'), ((12001, 12034), 'seaborn.color_palette', 'sns.color_palette', (['cmap', 'n_frames'], {}), '(cmap, n_frames)\n', (12018, 12034), True, 'import seaborn as sns\n'), ((12151, 12270), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(3 * figsize_mod, 3 * figsize_mod)', 'gridspec_kw': "{'height_ratios': [3, 1]}", 'sharex': '(True)'}), "(2, 1, figsize=(3 * figsize_mod, 3 * figsize_mod), gridspec_kw=\n {'height_ratios': [3, 1]}, sharex=True)\n", (12163, 12270), True, 'import matplotlib.pyplot as plt\n'), ((12387, 12443), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3 * figsize_mod, 2 * figsize_mod)'}), '(figsize=(3 * figsize_mod, 2 * figsize_mod))\n', (12399, 12443), True, 'import matplotlib.pyplot as plt\n'), ((16154, 16183), 'pandas.isnull', 'pd.isnull', (['tracks[attrib_col]'], {}), '(tracks[attrib_col])\n', (16163, 16183), True, 'import pandas as pd\n'), ((16202, 16245), 'numpy.logical_and', 'np.logical_and', (['(~exclude)', '(max_l_states == k)'], {}), '(~exclude, max_l_states == k)\n', (16216, 16245), True, 'import numpy as np\n'), ((16688, 16717), 'pandas.isnull', 'pd.isnull', (['tracks[attrib_col]'], {}), '(tracks[attrib_col])\n', (16697, 16717), True, 'import pandas as pd\n'), ((17081, 17121), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['H', '(kde_width / bin_size)'], {}), '(H, kde_width / bin_size)\n', (17096, 17121), False, 'from scipy.ndimage import gaussian_filter\n'), ((17439, 17513), 'matplotlib_scalebar.scalebar.ScaleBar', 'ScaleBar', (['bin_size', '"""um"""'], {'frameon': '(False)', 'color': '"""w"""', 'location': '"""lower right"""'}), "(bin_size, 'um', frameon=False, color='w', location='lower right')\n", (17447, 17513), False, 'from matplotlib_scalebar.scalebar import ScaleBar\n'), ((7924, 7968), 'numpy.zeros', 'np.zeros', (['pmfs[t, :].shape'], {'dtype': 'np.float64'}), '(pmfs[t, :].shape, dtype=np.float64)\n', (7932, 7968), True, 'import numpy as np\n'), ((15722, 15759), 'numpy.percentile', 'np.percentile', (['loc_density', 'cmap_perc'], {}), '(loc_density, cmap_perc)\n', (15735, 15759), True, 'import numpy as np\n'), ((17658, 17732), 'matplotlib_scalebar.scalebar.ScaleBar', 'ScaleBar', (['bin_size', '"""um"""'], {'frameon': '(False)', 'color': '"""w"""', 'location': '"""lower right"""'}), "(bin_size, 'um', frameon=False, color='w', location='lower right')\n", (17666, 17732), False, 'from matplotlib_scalebar.scalebar import ScaleBar\n'), ((15430, 15539), 'numpy.histogram2d', 'np.histogram2d', (["(tracks['y'] * pixel_size_um)", "(tracks['x'] * pixel_size_um)"], {'bins': '(bin_edges_y, bin_edges_x)'}), "(tracks['y'] * pixel_size_um, tracks['x'] * pixel_size_um,\n bins=(bin_edges_y, bin_edges_x))\n", (15444, 15539), True, 'import numpy as np\n'), ((17241, 17270), 'numpy.percentile', 'np.percentile', (['kde', 'cmap_perc'], {}), '(kde, cmap_perc)\n', (17254, 17270), True, 'import numpy as np\n'), ((13417, 13434), 'numpy.abs', 'np.abs', (['residuals'], {}), '(residuals)\n', (13423, 13434), True, 'import numpy as np\n'), ((16776, 16961), 'numpy.histogram2d', 'np.histogram2d', (["(tracks.loc[~exclude, 'y'] * pixel_size_um)", "(tracks.loc[~exclude, 'x'] * pixel_size_um)"], {'bins': '(bin_edges_y, bin_edges_x)', 'weights': 'tracks.loc[~exclude, attrib_col]'}), "(tracks.loc[~exclude, 'y'] * pixel_size_um, tracks.loc[~\n exclude, 'x'] * pixel_size_um, bins=(bin_edges_y, bin_edges_x), weights\n =tracks.loc[~exclude, attrib_col])\n", (16790, 16961), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
# - - - - most frequently changed output parameters - - -
# 0:a , 1:b , 2:beta , 3:k , 4:m , 5:l , 6:N , 7:p_0 , 8:q , 9:Adv_strategy
# 10:rateRandomness , 11:deltaWS , 12:gammaWS , 13:maxTermRound, 14:sZipf, 15:Type , 16:X , 17:Y
xcol = 3
xlabel = "k"
# xlim = [.0, 3.]
xlim = [1, 30]
# xscale = 'log'
xscale = 'linear'
# - - - - parameters unlikely to be changed - - -
ycol = 17 # last column in the csv file
folder = "data/"
def main():
printDataRates()
printDataFailures()
printMeanTermRound()
def printDataRates():
fig = plt.figure()
filename = folder+'plot_ATI_rate'
partPlot("Agreement", "AgreementRate", ".", 10, filename, "blue")
partPlot("Integrity", "IntegrityRate", "+", 10, filename, "orange")
partPlot("Termination", "TerminationRate", "x", 10, filename, "green")
plt.ylim([0, 1.05])
plt.xscale(xscale)
plt.xlim(xlim)
plt.xlabel(xlabel)
plt.ylabel("Rate")
plt.legend(loc='best')
plt.savefig(filename+'.eps', format='eps')
plt.clf()
def printMeanTermRound():
fig = plt.figure()
filename = folder+'plot_EndRounds'
partPlot("Last node", "MeanTerminationRound",
".", 10, filename, "magenta")
partPlot("All nodes", "MeanLastRound", "s", 5, filename, "darkgreen")
plt.xlim(xlim)
plt.xscale(xscale)
plt.xlabel(xlabel)
plt.ylim((0, 210))
plt.ylabel("Mean Termination Round")
plt.legend(loc='best')
plt.savefig(filename+'.eps', format='eps')
plt.clf()
def partPlot(type, file, marker, markersize, filename, color):
x = loadDatafromRow(file, xcol)
y = loadDatafromRow(file, ycol)
x, y = sort2vecs(x, y)
plt.plot(x, y, linestyle='dashed', color=color, linewidth=1)
plt.plot(x, y, label=type, marker=marker,
linestyle='none', color=color, markersize=markersize)
np.savez(filename+"_"+type, x=x, y=y)
def printDataFailures():
fig = plt.figure()
filename = folder+'plot_ATI_failure'
partPlot2("Agreement", "AgreementRate", ".", 10, filename, "blue")
partPlot2("Integrity", "IntegrityRate", "+", 10, filename, "orange")
partPlot2("Termination", "TerminationRate", "x", 10, filename, "green")
# plt.ylim([0, 1.05])
plt.xscale(xscale)
plt.yscale('log')
plt.xlim(xlim)
plt.xlabel(xlabel)
plt.ylabel("Failure rate")
plt.legend(loc='best')
plt.savefig(filename+'.eps', format='eps')
plt.clf()
def partPlot2(type, file, marker, markersize, filename, color):
x = loadDatafromRow(file, xcol)
y = loadDatafromRow(file, ycol)
x, y = sort2vecs(x, y)
plt.plot(x, 1-y, linestyle='dashed', color=color, linewidth=1)
plt.plot(x, 1-y, label=type, marker=marker,
linestyle='none', color=color, markersize=markersize)
np.savez(filename+"_"+type, x=1-x, y=1-y)
def sort2vecs(x, y):
i = np.argsort(x)
x = x[i]
y = y[i]
return x, y
def loadDatafromRow(datatype, row):
try:
filestr = folder+'result_'+datatype+'.csv'
f = open(filestr, "r")
data = np.loadtxt(f, delimiter=",", skiprows=1, usecols=(row))
return data
except FileNotFoundError:
print(filestr)
print("File not found.")
return []
# needs to be at the very end of the file
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"numpy.savez",
... | [((600, 612), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (610, 612), True, 'import matplotlib.pyplot as plt\n'), ((877, 896), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.05]'], {}), '([0, 1.05])\n', (885, 896), True, 'import matplotlib.pyplot as plt\n'), ((901, 919), 'matplotlib.pyplot.xscale', 'plt.xscale', (['xscale'], {}), '(xscale)\n', (911, 919), True, 'import matplotlib.pyplot as plt\n'), ((924, 938), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (932, 938), True, 'import matplotlib.pyplot as plt\n'), ((943, 961), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (953, 961), True, 'import matplotlib.pyplot as plt\n'), ((966, 984), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Rate"""'], {}), "('Rate')\n", (976, 984), True, 'import matplotlib.pyplot as plt\n'), ((989, 1011), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (999, 1011), True, 'import matplotlib.pyplot as plt\n'), ((1016, 1060), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '.eps')"], {'format': '"""eps"""'}), "(filename + '.eps', format='eps')\n", (1027, 1060), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1072), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1070, 1072), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1123), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1121, 1123), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1350), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (1344, 1350), True, 'import matplotlib.pyplot as plt\n'), ((1355, 1373), 'matplotlib.pyplot.xscale', 'plt.xscale', (['xscale'], {}), '(xscale)\n', (1365, 1373), True, 'import matplotlib.pyplot as plt\n'), ((1378, 1396), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (1388, 1396), True, 'import matplotlib.pyplot as plt\n'), ((1401, 1419), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 210)'], {}), '((0, 210))\n', (1409, 1419), True, 'import matplotlib.pyplot as plt\n'), ((1424, 1460), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Termination Round"""'], {}), "('Mean Termination Round')\n", (1434, 1460), True, 'import matplotlib.pyplot as plt\n'), ((1465, 1487), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1475, 1487), True, 'import matplotlib.pyplot as plt\n'), ((1492, 1536), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '.eps')"], {'format': '"""eps"""'}), "(filename + '.eps', format='eps')\n", (1503, 1536), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1548), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1546, 1548), True, 'import matplotlib.pyplot as plt\n'), ((1717, 1777), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'linestyle': '"""dashed"""', 'color': 'color', 'linewidth': '(1)'}), "(x, y, linestyle='dashed', color=color, linewidth=1)\n", (1725, 1777), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1881), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': 'type', 'marker': 'marker', 'linestyle': '"""none"""', 'color': 'color', 'markersize': 'markersize'}), "(x, y, label=type, marker=marker, linestyle='none', color=color,\n markersize=markersize)\n", (1790, 1881), True, 'import matplotlib.pyplot as plt\n'), ((1895, 1936), 'numpy.savez', 'np.savez', (["(filename + '_' + type)"], {'x': 'x', 'y': 'y'}), "(filename + '_' + type, x=x, y=y)\n", (1903, 1936), True, 'import numpy as np\n'), ((1970, 1982), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1980, 1982), True, 'import matplotlib.pyplot as plt\n'), ((2279, 2297), 'matplotlib.pyplot.xscale', 'plt.xscale', (['xscale'], {}), '(xscale)\n', (2289, 2297), True, 'import matplotlib.pyplot as plt\n'), ((2302, 2319), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2312, 2319), True, 'import matplotlib.pyplot as plt\n'), ((2324, 2338), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (2332, 2338), True, 'import matplotlib.pyplot as plt\n'), ((2343, 2361), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (2353, 2361), True, 'import matplotlib.pyplot as plt\n'), ((2366, 2392), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Failure rate"""'], {}), "('Failure rate')\n", (2376, 2392), True, 'import matplotlib.pyplot as plt\n'), ((2397, 2419), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2407, 2419), True, 'import matplotlib.pyplot as plt\n'), ((2424, 2468), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '.eps')"], {'format': '"""eps"""'}), "(filename + '.eps', format='eps')\n", (2435, 2468), True, 'import matplotlib.pyplot as plt\n'), ((2471, 2480), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2478, 2480), True, 'import matplotlib.pyplot as plt\n'), ((2650, 2714), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(1 - y)'], {'linestyle': '"""dashed"""', 'color': 'color', 'linewidth': '(1)'}), "(x, 1 - y, linestyle='dashed', color=color, linewidth=1)\n", (2658, 2714), True, 'import matplotlib.pyplot as plt\n'), ((2717, 2820), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(1 - y)'], {'label': 'type', 'marker': 'marker', 'linestyle': '"""none"""', 'color': 'color', 'markersize': 'markersize'}), "(x, 1 - y, label=type, marker=marker, linestyle='none', color=color,\n markersize=markersize)\n", (2725, 2820), True, 'import matplotlib.pyplot as plt\n'), ((2832, 2881), 'numpy.savez', 'np.savez', (["(filename + '_' + type)"], {'x': '(1 - x)', 'y': '(1 - y)'}), "(filename + '_' + type, x=1 - x, y=1 - y)\n", (2840, 2881), True, 'import numpy as np\n'), ((2905, 2918), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (2915, 2918), True, 'import numpy as np\n'), ((3105, 3158), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""', 'skiprows': '(1)', 'usecols': 'row'}), "(f, delimiter=',', skiprows=1, usecols=row)\n", (3115, 3158), True, 'import numpy as np\n')] |
__doc__ = \
"""
======================================================================
Multi-modal shale CT imaging analysis (:mod:`mango.application.shale`)
======================================================================
.. currentmodule:: mango.application.shale
Analysis of shale dry, dry-after, Iodine-stained and Diiodomethane-stained CT images.
Functions
=========
.. autosummary::
:toctree: generated/
convertShaleHist2dToTernary - Converts 2D histogram data to ternary Histogram data.
resolveHistogramDuplicateEntries - Resolves duplicate/close ternary coordinates for triangulation.
generateShaleTernaryPlot - Generates ternary histogram plots from micro-porosity segmented data.
"""
from .io import readCsvHistData
from .plot import ternaryPlot
import numpy as np
import scipy as sp
import mango.mpi as mpi
logger, rootLogger = mpi.getLoggers(__name__)
class MicroPorosityBinToPercentMapper:
"""
Maps micro-porosity segmentation class values to percentage value.
"""
def __init__(self, bin0percent, bin100percent):
self.bin0percent = bin0percent
self.bin100percent = bin100percent
def __call__(self, binIdx):
if (binIdx <= self.bin0percent):
percentVal = 0.0
elif (binIdx >= self.bin100percent):
percentVal = 100.0
else:
percentVal = 100.0*(binIdx - self.bin0percent)/float((self.bin100percent-self.bin0percent))
return percentVal
INVALID_PROPORTION_RESCALE = 0
INVALID_PROPORTION_DISCARD = 1
def convertShaleHist2dToTernary(
histData,
invalidProportionMethod=INVALID_PROPORTION_RESCALE,
cropRange=None,
cropIndex=None
):
"""
Returns a :samp:`(N,4)` shaped :obj:`numpy.array` of ternary
*(mineral,pore,organic,frequency)* data. The input :samp:`histData`
is 2D histogram data generated from a pair of *micro-porosity* segmented
images. The x-axis data is assumed to be the CH2I2 differenced data.
:type histData: :obj:`mango.application.io.HistData`
:param histData: 2D histogram data of micro-porosity segmentation image pair
(micro-porosity segmentation of CH2I2-image minus dry-after-image image and
micro-porosity segmentation of I2-image minus dry-image). Assumes that
the CH2I2 data is the x-axis of the :samp:`histData`.
:type invalidProportionMethod: int
:param invalidProportionMethod: Method used to resolve data
points where :samp:`pore_percent+organic_percent` exceeds 100%.
:rtype: :obj:`numpy.ndarray`
:rtype: A :samp:`(N,4)` shaped :obj:`numpy.ndarray`, where :samp:`N=num_x_bins*num_y_bins`.
Each row of the returned array is :samp:`(mineral-percent, pore-percent, organic-percent, count)`.
"""
numPorosityBins = histData.hist1dData0.size
numOrganicityBins = histData.hist1dData1.size
pMapper = MicroPorosityBinToPercentMapper(1.0, numPorosityBins-2.0)
oMapper = MicroPorosityBinToPercentMapper(1.0, numOrganicityBins-2.0)
#pMapper = MicroPorosityBinToPercentMapper(0.0, numPorosityBins-1.0)
#oMapper = MicroPorosityBinToPercentMapper(0.0, numOrganicityBins-1.0)
ternList = []
for pIdx in range(0, numPorosityBins):
for oIdx in range(0, numOrganicityBins):
porosity = 100.0 - pMapper(pIdx)
organicity = 100.0 - oMapper(oIdx)
if (porosity + organicity > 100.0):
if ((invalidProportionMethod == INVALID_PROPORTION_RESCALE)):
f = 100.0/(porosity + organicity + 1.0e-3)
porosity *= f
organicity *= f
elif (invalidProportionMethod == INVALID_PROPORTION_DISCARD):
continue
minerality = 100.0-porosity-organicity
valList = [minerality, porosity, organicity]
if (
((cropRange == None) or (cropIndex == None))
or
((valList[cropIndex] >= cropRange[0]) and (valList[cropIndex] <= cropRange[1]))
):
if ((cropRange != None) and (cropIndex!=None)):
valList[cropIndex] = 100.0*(valList[cropIndex] - cropRange[0])/(cropRange[1]-cropRange[0])
minerality, porosity, organicity = valList
ternList.append([minerality, porosity, organicity, histData.hist2dData[oIdx, pIdx]])
return sp.array(ternList, dtype="float64")
def resolveHistogramDuplicateEntries(ternaryArray, tol=1.0e-4):
"""
Remove duplicate/close coordinates in the :samp:`(N,4)` shaped :samp:`ternaryArray`
histogram.
"""
numCoords = ternaryArray.shape[0]
msk = sp.ones((numCoords,), dtype="bool")
coordArray = ternaryArray[:, 0:3]
nonDupList = []
for i in range(0, numCoords):
if (msk[i]):
coord = coordArray[i,:]
d = coordArray - coord
d = sp.sqrt(sp.sum(d*d, axis=1))
nearCoordIdxs = sp.where(sp.logical_and(d < tol, msk))
nonDupList.append(coord.tolist() + [sp.sum(ternaryArray[:,-1][nearCoordIdxs]),])
msk[nearCoordIdxs] = False
return sp.array(nonDupList, dtype=ternaryArray.dtype)
def generateShaleTernaryPlot(
histData,
cropRange = None,
cropIndex = None,
invalidProportionMethodList=[INVALID_PROPORTION_RESCALE,INVALID_PROPORTION_DISCARD],
doLogScale = True,
cmap = None,
contourNumLevels = 32,
contourNumLines = None,
doContourColourBar = False,
shading='gouraud'
):
"""
Returns a list of (:obj:`matplotlib.figure.Figure`, :obj:`str`) pairs with ternary
*mineral-pore-organic* 2D histogram plots.
:type histData: :obj:`mango.application.io.HistData`
:param histData: 2D histogram data of micro-porosity segmentation image pair
(micro-porosity segmentation of CH2I2-image minus dry-after-image image and
micro-porosity segmentation of I2-image minus dry-image). Assumes that
the CH2I2 data is the x-axis of the :samp:`histData`.
:rtype: :obj:`list` of pairs
:return: List of (:obj:`matplotlib.figure.Figure`, :obj:`str`) pairs.
"""
import matplotlib.pyplot as plt
if (cmap == None):
cmap = plt.cm.get_cmap("gray_r")
figList = []
origCountSum = sp.sum(histData.hist2dData)
labels=("mineral", "pore", "organic")
for invalidProportionMethod in invalidProportionMethodList:
ternaryArray = convertShaleHist2dToTernary(histData, invalidProportionMethod, cropRange=cropRange, cropIndex=cropIndex)
ternaryArray = resolveHistogramDuplicateEntries(ternaryArray, tol=0.9990)
countSum = sp.sum(ternaryArray[:,-1])
percentCountsDiscarded = 100.0*(origCountSum-countSum)/float(origCountSum)
titleOffset = 1.08
fontSize = "small"
if (invalidProportionMethod == INVALID_PROPORTION_DISCARD):
invalidProportionMethodStr = "discard"
titleStr = "Percent Counts Discarded = %g%%" % percentCountsDiscarded
else:
invalidProportionMethodStr = "rescale"
titleStr = "Rescaled Points"
if (cropIndex != None) and (cropRange != None):
titleStr += " (%s cropped to range [%s%%,%s%%])" % ((labels[cropIndex], ) + cropRange)
fontSize = "x-small"
logger.info(titleStr)
logger.debug("ternaryArray.shape=%s", (ternaryArray.shape,))
logger.debug("ternaryArray:\n")
logger.debug(str(ternaryArray))
logger.debug(
"ternaryArray (min-x,min-y,min-z)=(%s,%s,%s)"
%
(np.min(ternaryArray[:,0]), np.min(ternaryArray[:,1]), np.min(ternaryArray[:,2]))
)
logger.debug(
"ternaryArray (max-x,max-y,max-z)=(%s,%s,%s)"
%
(np.max(ternaryArray[:,0]), np.max(ternaryArray[:,1]), np.max(ternaryArray[:,2]))
)
ternaryPlotData, ternAxes = ternaryPlot(ternaryArray[:, 0:3], labels=labels)
logger.debug("ternaryPlotData.shape=%s", (ternaryPlotData.shape,))
logger.debug("ternaryPlotData:\n")
logger.debug(str(ternaryPlotData))
ax, fig = ternAxes.createAxes()
ax.scatter(ternaryPlotData[:,0], ternaryPlotData[:,1])
figList.append((fig, "coords_%s" % invalidProportionMethodStr))
if (doLogScale):
ternaryArray[:,-1] = sp.log(1.0+ternaryArray[:,-1])
pass
ax, fig = ternAxes.createAxes()
ax.triplot(ternaryPlotData[:,0], ternaryPlotData[:,1])
figList.append((fig, "coords_triangulated_%s" % invalidProportionMethodStr))
ax, fig = ternAxes.createAxes()
ax.tripcolor(ternaryPlotData[:,0], ternaryPlotData[:,1], ternaryArray[:,-1], shading=shading, cmap=cmap)
t = plt.title(titleStr, fontsize=fontSize)
t.set_y(titleOffset)
figList.append((fig, "ternary_triangulated_%s" % invalidProportionMethodStr))
ax, fig = ternAxes.createAxes()
if (contourNumLines == None):
contourNumLines = contourNumLevels//2
cs = ax.tricontourf(ternaryPlotData[:,0], ternaryPlotData[:,1], ternaryArray[:,-1],contourNumLevels, cmap=cmap)
if (doContourColourBar):
fig.colorbar(cs, shrink=0.9)
contourPlt = ax.tricontour(ternaryPlotData[:,0], ternaryPlotData[:,1], ternaryArray[:,-1],contourNumLines, colors='k', linewidths=1)
t = plt.title(titleStr, fontsize=fontSize)
t.set_y(titleOffset)
figList.append((fig, "ternary_contour_triangulated_%s" % invalidProportionMethodStr))
return figList
__all__ = [s for s in dir() if not s.startswith('_')]
| [
"matplotlib.pyplot.title",
"mango.mpi.getLoggers",
"scipy.sum",
"scipy.ones",
"scipy.logical_and",
"scipy.log",
"numpy.min",
"numpy.max",
"scipy.array",
"matplotlib.pyplot.cm.get_cmap"
] | [((868, 892), 'mango.mpi.getLoggers', 'mpi.getLoggers', (['__name__'], {}), '(__name__)\n', (882, 892), True, 'import mango.mpi as mpi\n'), ((4434, 4469), 'scipy.array', 'sp.array', (['ternList'], {'dtype': '"""float64"""'}), "(ternList, dtype='float64')\n", (4442, 4469), True, 'import scipy as sp\n'), ((4702, 4737), 'scipy.ones', 'sp.ones', (['(numCoords,)'], {'dtype': '"""bool"""'}), "((numCoords,), dtype='bool')\n", (4709, 4737), True, 'import scipy as sp\n'), ((5195, 5241), 'scipy.array', 'sp.array', (['nonDupList'], {'dtype': 'ternaryArray.dtype'}), '(nonDupList, dtype=ternaryArray.dtype)\n', (5203, 5241), True, 'import scipy as sp\n'), ((6333, 6360), 'scipy.sum', 'sp.sum', (['histData.hist2dData'], {}), '(histData.hist2dData)\n', (6339, 6360), True, 'import scipy as sp\n'), ((6271, 6296), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""gray_r"""'], {}), "('gray_r')\n", (6286, 6296), True, 'import matplotlib.pyplot as plt\n'), ((6696, 6723), 'scipy.sum', 'sp.sum', (['ternaryArray[:, -1]'], {}), '(ternaryArray[:, -1])\n', (6702, 6723), True, 'import scipy as sp\n'), ((8844, 8882), 'matplotlib.pyplot.title', 'plt.title', (['titleStr'], {'fontsize': 'fontSize'}), '(titleStr, fontsize=fontSize)\n', (8853, 8882), True, 'import matplotlib.pyplot as plt\n'), ((9483, 9521), 'matplotlib.pyplot.title', 'plt.title', (['titleStr'], {'fontsize': 'fontSize'}), '(titleStr, fontsize=fontSize)\n', (9492, 9521), True, 'import matplotlib.pyplot as plt\n'), ((8436, 8469), 'scipy.log', 'sp.log', (['(1.0 + ternaryArray[:, -1])'], {}), '(1.0 + ternaryArray[:, -1])\n', (8442, 8469), True, 'import scipy as sp\n'), ((4946, 4967), 'scipy.sum', 'sp.sum', (['(d * d)'], {'axis': '(1)'}), '(d * d, axis=1)\n', (4952, 4967), True, 'import scipy as sp\n'), ((5017, 5045), 'scipy.logical_and', 'sp.logical_and', (['(d < tol)', 'msk'], {}), '(d < tol, msk)\n', (5031, 5045), True, 'import scipy as sp\n'), ((7660, 7686), 'numpy.min', 'np.min', (['ternaryArray[:, 0]'], {}), '(ternaryArray[:, 0])\n', (7666, 7686), True, 'import numpy as np\n'), ((7687, 7713), 'numpy.min', 'np.min', (['ternaryArray[:, 1]'], {}), '(ternaryArray[:, 1])\n', (7693, 7713), True, 'import numpy as np\n'), ((7714, 7740), 'numpy.min', 'np.min', (['ternaryArray[:, 2]'], {}), '(ternaryArray[:, 2])\n', (7720, 7740), True, 'import numpy as np\n'), ((7858, 7884), 'numpy.max', 'np.max', (['ternaryArray[:, 0]'], {}), '(ternaryArray[:, 0])\n', (7864, 7884), True, 'import numpy as np\n'), ((7885, 7911), 'numpy.max', 'np.max', (['ternaryArray[:, 1]'], {}), '(ternaryArray[:, 1])\n', (7891, 7911), True, 'import numpy as np\n'), ((7912, 7938), 'numpy.max', 'np.max', (['ternaryArray[:, 2]'], {}), '(ternaryArray[:, 2])\n', (7918, 7938), True, 'import numpy as np\n'), ((5095, 5137), 'scipy.sum', 'sp.sum', (['ternaryArray[:, -1][nearCoordIdxs]'], {}), '(ternaryArray[:, -1][nearCoordIdxs])\n', (5101, 5137), True, 'import scipy as sp\n')] |
import os
import sys
sys.path.append("..")
import argparse
from pathlib import Path
# Import teaching utils
import pandas as pd
import numpy as np
from utils.neuralnetwork import NeuralNetwork
# Import sklearn metrics
from sklearn import metrics
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
def load_mnist(data_path):
img_path = os.path.join(data_path, 'mnist_img.csv')
label_path = os.path.join(data_path, 'mnist_label.csv')
if os.path.isfile(img_path) and os.path.isfile(label_path):
img = pd.read_csv(img_path)
label = pd.read_csv(label_path).squeeze() # Squeezes DataFrame into Series
else:
if not os.path.isdir(data_path):
os.mkdir(data_path)
img, label = fetch_openml('mnist_784', version=1, return_X_y=True)
img.to_csv(img_path, sep=',', encoding='utf-8', index=False)
label.to_csv(label_path, sep=',', encoding='utf-8', index=False)
# We might need to excplicitly convert to numpy arrays for some versions of pandas and sklearn
return (np.array(img), np.array(label))
def main(data_path, epochs):
# Load data as np arrays
img, label = load_mnist(data_path)
# We are assuming the min and max values for pixel intensities
# are between 0 and 255. The minmax normalization from session 7
# might give values between say 10 and 230, which might not work
# well when given a new image that has pixel values above or below those
img = img / 255.0 # normalize pixel vals to between 0 and 1 as float
classes = sorted(set(label))
num_classes = len(classes)
# Split our data 80/20 - train/test
img_train, img_test, label_train, label_test = train_test_split(img, label, random_state=1337, test_size=0.2)
# Convert labels to binary representation (e.g. 2 becomes [0,0,1,0,0,0,0,0,0,0])
label_train = LabelBinarizer().fit_transform(label_train)
label_test = LabelBinarizer().fit_transform(label_test)
# Specify the neural network structure
neural_network = NeuralNetwork([img_train.shape[1], 32, 16, num_classes]) # 1 input node for every pixel in images, 1 output node for every class
# Train the model
neural_network.fit(img_train, label_train, epochs=epochs)
# Make predictions on all test images
label_pred = neural_network.predict(img_test)
label_pred = label_pred.argmax(axis=1) # Give us the highest probability label
# Generate comparative metrics with test data
classifier_metrics = metrics.classification_report(label_test.argmax(axis=1), label_pred)
print(classifier_metrics)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "train neural network on the full MNIST dataset and view the classifier metrics")
parser.add_argument("-d", "--data_path", default = Path('../data/'), type = Path, help = "path to where the MNIST csv-files dataset is saved or where to save it")
parser.add_argument("-e", "--epochs", default = 5, type = int, help = "numbers of epochs to train")
args = parser.parse_args()
main(data_path = args.data_path, epochs = args.epochs) | [
"sys.path.append",
"os.mkdir",
"sklearn.preprocessing.LabelBinarizer",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"os.path.isdir",
"utils.neuralnetwork.NeuralNetwork",
"os.path.isfile",
"pathlib.Path",
"numpy.array",
"sklearn.datasets.fetch_openml... | [((21, 42), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (36, 42), False, 'import sys\n'), ((437, 477), 'os.path.join', 'os.path.join', (['data_path', '"""mnist_img.csv"""'], {}), "(data_path, 'mnist_img.csv')\n", (449, 477), False, 'import os\n'), ((495, 537), 'os.path.join', 'os.path.join', (['data_path', '"""mnist_label.csv"""'], {}), "(data_path, 'mnist_label.csv')\n", (507, 537), False, 'import os\n'), ((1780, 1842), 'sklearn.model_selection.train_test_split', 'train_test_split', (['img', 'label'], {'random_state': '(1337)', 'test_size': '(0.2)'}), '(img, label, random_state=1337, test_size=0.2)\n', (1796, 1842), False, 'from sklearn.model_selection import train_test_split\n'), ((2116, 2172), 'utils.neuralnetwork.NeuralNetwork', 'NeuralNetwork', (['[img_train.shape[1], 32, 16, num_classes]'], {}), '([img_train.shape[1], 32, 16, num_classes])\n', (2129, 2172), False, 'from utils.neuralnetwork import NeuralNetwork\n'), ((2724, 2851), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""train neural network on the full MNIST dataset and view the classifier metrics"""'}), "(description=\n 'train neural network on the full MNIST dataset and view the classifier metrics'\n )\n", (2747, 2851), False, 'import argparse\n'), ((546, 570), 'os.path.isfile', 'os.path.isfile', (['img_path'], {}), '(img_path)\n', (560, 570), False, 'import os\n'), ((575, 601), 'os.path.isfile', 'os.path.isfile', (['label_path'], {}), '(label_path)\n', (589, 601), False, 'import os\n'), ((617, 638), 'pandas.read_csv', 'pd.read_csv', (['img_path'], {}), '(img_path)\n', (628, 638), True, 'import pandas as pd\n'), ((827, 880), 'sklearn.datasets.fetch_openml', 'fetch_openml', (['"""mnist_784"""'], {'version': '(1)', 'return_X_y': '(True)'}), "('mnist_784', version=1, return_X_y=True)\n", (839, 880), False, 'from sklearn.datasets import fetch_openml\n'), ((1135, 1148), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1143, 1148), True, 'import numpy as np\n'), ((1150, 1165), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (1158, 1165), True, 'import numpy as np\n'), ((747, 771), 'os.path.isdir', 'os.path.isdir', (['data_path'], {}), '(data_path)\n', (760, 771), False, 'import os\n'), ((785, 804), 'os.mkdir', 'os.mkdir', (['data_path'], {}), '(data_path)\n', (793, 804), False, 'import os\n'), ((1947, 1963), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (1961, 1963), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((2008, 2024), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (2022, 2024), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((2900, 2916), 'pathlib.Path', 'Path', (['"""../data/"""'], {}), "('../data/')\n", (2904, 2916), False, 'from pathlib import Path\n'), ((655, 678), 'pandas.read_csv', 'pd.read_csv', (['label_path'], {}), '(label_path)\n', (666, 678), True, 'import pandas as pd\n')] |
from gpflow.actions import Action, Loop
from gpflow.training import NatGradOptimizer, AdamOptimizer, ScipyOptimizer
from gpflow import settings
from gpflow.transforms import Transform
from ..logging import logging
import tensorflow as tf
import os
import numpy as np
class PrintAction(Action):
def __init__(self, model, text):
self.model = model
self.text = text
def run(self, ctx):
likelihood = ctx.session.run(self.model.likelihood_tensor)
logging.warning('{}: iteration {} likelihood {:.4f}'.format(self.text, ctx.iteration, likelihood))
class SendSummary(Action):
def __init__(self, model, writer, write_period=10):
self.write_period = write_period
self.iteration = 0
self.model = model
self.writer = writer
def init(self):
parameters = list(self.model.parameters)
other_summaries = tf.summary.merge_all()
if other_summaries is None:
other_summaries = []
else:
if not isinstance(other_summaries, (list,tuple)):
other_summaries = [other_summaries]
other_summaries = list(other_summaries)
# Add scalar parameters
scalar_summaries = [tf.summary.scalar(p.pathname, tf.reshape(p.constrained_tensor, []))
for p in parameters if (p.size == 1 and p.trainable)]
scalar_summaries.append(tf.summary.scalar("optimisation/likelihood",
self.model._likelihood_tensor))
self.scalar_summary = tf.summary.merge(scalar_summaries)
# Add non-scalar parameters
# self.hist_summary = tf.summary.merge([
# tf.summary.histogram('q_mu',model.q_mu.constrained_tensor),
# tf.summary.histogram('q_sqrt',model.q_sqrt.unconstrained_tensor)
# ])
hist_summaries = [tf.summary.histogram(p.pathname, p.constrained_tensor)
for p in parameters if p.size > 1]
self.hist_summary = tf.summary.merge(hist_summaries)
self.summary = tf.summary.merge([self.scalar_summary,self.hist_summary] + other_summaries)
def run(self, ctx):
if self.iteration % self.write_period == 0:
summary = ctx.session.run(self.summary)
self.writer.add_summary(summary,global_step=ctx.iteration)
self.iteration += 1
class SaveModel(Action):
def __init__(self, checkpoint_dir, save_period=1000):
self.checkpoint_dir = os.path.abspath(checkpoint_dir)
os.makedirs(self.checkpoint_dir,exist_ok=True)
self.save_period = save_period
self.iteration = 0
self.saver = tf.train.Saver(max_to_keep=1)
def run(self, ctx):
if self.iteration % self.save_period == 0:
self.saver.save(ctx.session, self.checkpoint_dir,
global_step=ctx.iteration)
self.iteration += 1
def restore_session(session, checkpoint_dir):
"""
Restores Tensorflow session from the latest checkpoint.
:param session: The TF session
:param checkpoint_dir: checkpoint files directory.
"""
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
logger = settings.logger()
if logger.isEnabledFor(logging.INFO):
logger.info("Restoring session from `%s`.", checkpoint_path)
saver = tf.train.Saver(max_to_keep=1)
saver.restore(session, checkpoint_path)
def train_with_adam(model, iterations, callback=None, **kwargs):
#,initial_learning_rate=0.03,learning_rate_steps=2.3,
# learning_rate_decay=1.5,
with tf.variable_scope("learning_rate"):
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.03
decay_steps = int(iterations/2.)
decay_rate = 1./1.5
learning_rate = tf.train.exponential_decay(starter_learning_rate,
tf.assign_add(global_step,1), decay_steps, decay_rate, staircase=True)
tf.summary.scalar("optimisation/learning_rate",learning_rate)
sess = model.enquire_session()
tf_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='learning_rate')
sess.run(tf.variables_initializer(var_list=tf_vars))
assert isinstance(callback, (tuple,list))
adam = AdamOptimizer(learning_rate).make_optimize_action(model)
actions = [adam]
actions = actions if callback is None else actions + callback
for c in callback:
try:
c.init()
except:
pass
Loop(actions, stop=iterations)()
model.anchor(model.enquire_session())
def train_with_bfgs(model, learning_rate, iterations, callback=None):
sess = model.enquire_session()
assert isinstance(callback, (tuple,list))
for c in callback:
c.init()
adam = ScipyOptimizer().make_optimize_action(model)
actions = [adam]
actions = actions if callback is None else actions + callback
Loop(actions)()
model.anchor(model.enquire_session())
class GammaSchedule(Action):
def __init__(self, op_increment_gamma):
self.op_increment_gamma = op_increment_gamma
def run(self, ctx):
ctx.session.run(self.op_increment_gamma)
def train_with_nat_and_adam(model, initial_learning_rate=0.03,learning_rate_steps=2,
learning_rate_decay=1.5,gamma_start=1e-5,gamma_add=1e-3,gamma_mul=1.1,
gamma_max=0.1,gamma_fallback=1e-1,iterations=500, var_list=None, callback=None, **kwargs):
# we'll make use of this later when we use a XiTransform
if var_list is None:
var_list = [[model.q_mu, model.q_sqrt]]
# we don't want adam optimizing these
model.q_mu.set_trainable(False)
model.q_sqrt.set_trainable(False)
with tf.variable_scope("learning_rate"):
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = initial_learning_rate
decay_steps = int(iterations/learning_rate_steps)
decay_rate = 1./learning_rate_decay
learning_rate = tf.train.exponential_decay(starter_learning_rate,
tf.assign_add(global_step,1), decay_steps, decay_rate, staircase=True)
tf.summary.scalar("optimisation/learning_rate",learning_rate)
sess = model.enquire_session()
tf_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='learning_rate')
sess.run(tf.variables_initializer(var_list=tf_vars))
with tf.variable_scope("gamma"):
# gamma = tf.Variable(gamma_start, dtype=tf.float64)
# beta = tf.Variable(1.,dtype=tf.float64)
gamma_start = tf.cast(gamma_start,tf.float64)
gamma_max = tf.cast(gamma_max,tf.float64)
mul_step = tf.cast(gamma_mul,tf.float64)
add_step = tf.cast(gamma_add,tf.float64)
gamma = tf.Variable(gamma_start, dtype=tf.float64)
gamma_ref = tf.identity(gamma)
gamma_fallback = tf.cast(gamma_fallback, tf.float64) # we'll reduce by this factor if there's a cholesky failure
op_fallback_gamma = tf.assign(gamma, gamma * gamma_fallback)
diff = tf.where(gamma_ref*mul_step < add_step, gamma_ref*mul_step, add_step)
op_gamma_inc = tf.assign(gamma, tf.where(gamma_ref + diff > gamma_max, gamma_max, gamma_ref + diff))
tf.summary.scalar("optimisation/gamma",gamma)
sess = model.enquire_session()
tf_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='gamma')
sess.run(tf.variables_initializer(var_list=tf_vars))
natgrad = NatGradOptimizer(gamma_ref).make_optimize_action(model, var_list=var_list)
adam = AdamOptimizer(learning_rate).make_optimize_action(model)
actions = [adam, natgrad, GammaSchedule(op_gamma_inc)]
actions = actions if callback is None else actions + callback
for c in callback:
try:
c.init()
except:
pass
sess = model.enquire_session()
it = 0
while it < iterations:
try:
looper = Loop(actions, start=it, stop=iterations)
looper()
it = looper.iteration
except tf.errors.InvalidArgumentError:
it = looper.iteration
g, gf = sess.run([gamma_ref, op_fallback_gamma])
logging.info('gamma = {} on iteration {} is too big! Falling back to {}'.format(g, it, gf))
model.anchor(model.enquire_session())
def train_with_nat(model, gamma_start=1e-5,gamma_add=1e-3,gamma_mul=1.04,
gamma_max=0.1,gamma_fallback=1e-1,iterations=500, var_list=None, callback=None, **kwargs):
# we'll make use of this later when we use a XiTransform
if var_list is None:
var_list = [[model.q_mu, model.q_sqrt]]
with tf.variable_scope("gamma"):
gamma_start = tf.cast(gamma_start,tf.float64)
gamma_max = tf.cast(gamma_max,tf.float64)
mul_step = tf.cast(gamma_mul,tf.float64)
add_step = tf.cast(gamma_add,tf.float64)
gamma = tf.Variable(gamma_start, dtype=tf.float64, trainable=False)
gamma_ref = tf.identity(gamma)
gamma_fallback = tf.cast(gamma_fallback, tf.float64) # we'll reduce by this factor if there's a cholesky failure
op_fallback_gamma = tf.assign(gamma, gamma_ref * gamma_fallback)
diff = tf.where(gamma_ref*mul_step < add_step, gamma_ref*mul_step, add_step)
op_gamma_inc = tf.assign(gamma, tf.where(gamma_ref + diff > gamma_max, gamma_max, gamma_ref + diff))
tf.summary.scalar("optimisation/gamma",gamma)
sess = model.enquire_session()
tf_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='gamma')
sess.run(tf.variables_initializer(var_list=tf_vars))
natgrad = NatGradOptimizer(gamma_ref).make_optimize_action(model, var_list=var_list)
actions = [natgrad, GammaSchedule(op_gamma_inc)]
actions = actions if callback is None else actions + callback
for c in callback:
try:
c.init()
except:
pass
sess = model.enquire_session()
it = 0
while it < iterations:
try:
looper = Loop(actions, start=it, stop=iterations)
looper()
it = looper.iteration
except tf.errors.InvalidArgumentError:
it = looper.iteration
g,gf = sess.run([gamma_ref, op_fallback_gamma])
logging.info('gamma = {} on iteration {} is too big! Falling back to {}'.format(g, it, gf))
model.anchor(model.enquire_session())
class Reshape(Transform):
"""
The exponential transform:
y = x
y is of shape y_shape, x is of shape x_shape and are compatible
"""
def __init__(self, y_shape, x_shape):
self._xshape = x_shape
self._yshape = y_shape
def forward_tensor(self, x):
return tf.reshape(x, self._yshape)
def backward_tensor(self, y):
return tf.reshape(y, self._xshape)
def forward(self, x):
return np.reshape(x,self._yshape)
def backward(self, y):
return np.reshape(y,self._xshape)
def log_jacobian_tensor(self, x):
return tf.zeros((1,), settings.float_type)
def __str__(self):
return 'Reshape'
class MatrixSquare(Transform):
"""
The exponential transform:
y = x
y is of shape y_shape, x is of shape x_shape and are compatible
"""
def __init__(self):
pass
def forward_tensor(self, x):
return tf.matmul(x,x,transpose_b=True)
def backward_tensor(self, y):
return tf.cholesky(y)
def forward(self, x):
return np.einsum("bij,bkj->bik",x,x)
def backward(self, y):
return np.stack([np.linalg.cholesky(yi) for yi in y],axis=0)
def log_jacobian_tensor(self, x):
"""
Input (N,L,L)
"""
return tf.reduce_sum(tf.log(tf.matrix_diag_part(L)))
def __str__(self):
return 'MatrixSquare'
class RescaleVec(Transform):
"""
A transform that can linearly rescale parameters:
.. math::
y = factor * x
Use `Chain` to combine this with another transform such as Log1pe:
`Chain(Rescale(), otherTransform())` results in
y = factor * t(x)
`Chain(otherTransform(), Rescale())` results in
y = t(factor * x)
This is useful for avoiding overly large or small scales in optimization/MCMC.
If you want a transform for a positive quantity of a given scale, you want
>>> Rescale(scale)(positive)
"""
def __init__(self, factor=1.0):
self.factor = np.array(factor)
def forward_tensor(self, x):
return x * self.factor
def forward(self, x):
return x * self.factor
def backward_tensor(self, y):
return y / self.factor
def backward(self, y):
return y / self.factor
def log_jacobian_tensor(self, x):
factor = tf.cast(self.factor, dtype=settings.float_type)
log_factor = tf.log(factor)
return tf.reduce_sum(log_factor)
def __str__(self):
return "{}*".format(self.factor)
class LogisticVec(Transform):
"""
The logistic transform, useful for keeping variables constrained between the limits a and b:
.. math::
y = a + (b-a) s(x)
s(x) = 1 / (1 + \exp(-x))
"""
def __init__(self, a, b):
self.a, self.b = np.array(a), np.array(b)
def forward_tensor(self, x):
ex = tf.exp(-x)
return self.a + (self.b - self.a) / (1. + ex)
def forward(self, x):
ex = np.exp(-x)
return self.a + (self.b - self.a) / (1. + ex)
def backward_tensor(self, y):
return -tf.log((self.b - self.a) / (y - self.a) - 1.)
def backward(self, y):
return -np.log((self.b - self.a) / (y - self.a) - 1.)
def log_jacobian_tensor(self, x):
return tf.reduce_sum(x - 2. * tf.log(tf.exp(x) + 1.) + np.log(self.b - self.a))
def __str__(self):
return "[{}, {}]".format(self.a, self.b)
| [
"tensorflow.reduce_sum",
"tensorflow.get_collection",
"tensorflow.identity",
"tensorflow.variables_initializer",
"tensorflow.reshape",
"numpy.einsum",
"gpflow.settings.logger",
"tensorflow.matmul",
"tensorflow.assign",
"tensorflow.train.latest_checkpoint",
"tensorflow.Variable",
"numpy.exp",
... | [((3182, 3224), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (3208, 3224), True, 'import tensorflow as tf\n'), ((3238, 3255), 'gpflow.settings.logger', 'settings.logger', ([], {}), '()\n', (3253, 3255), False, 'from gpflow import settings\n'), ((3380, 3409), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(1)'}), '(max_to_keep=1)\n', (3394, 3409), True, 'import tensorflow as tf\n'), ((4024, 4086), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""optimisation/learning_rate"""', 'learning_rate'], {}), "('optimisation/learning_rate', learning_rate)\n", (4041, 4086), True, 'import tensorflow as tf\n'), ((4135, 4206), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""learning_rate"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='learning_rate')\n", (4152, 4206), True, 'import tensorflow as tf\n'), ((6238, 6300), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""optimisation/learning_rate"""', 'learning_rate'], {}), "('optimisation/learning_rate', learning_rate)\n", (6255, 6300), True, 'import tensorflow as tf\n'), ((6349, 6420), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""learning_rate"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='learning_rate')\n", (6366, 6420), True, 'import tensorflow as tf\n'), ((7344, 7390), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""optimisation/gamma"""', 'gamma'], {}), "('optimisation/gamma', gamma)\n", (7361, 7390), True, 'import tensorflow as tf\n'), ((7439, 7502), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""gamma"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='gamma')\n", (7456, 7502), True, 'import tensorflow as tf\n'), ((9534, 9580), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""optimisation/gamma"""', 'gamma'], {}), "('optimisation/gamma', gamma)\n", (9551, 9580), True, 'import tensorflow as tf\n'), ((9629, 9692), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""gamma"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='gamma')\n", (9646, 9692), True, 'import tensorflow as tf\n'), ((897, 919), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (917, 919), True, 'import tensorflow as tf\n'), ((1574, 1608), 'tensorflow.summary.merge', 'tf.summary.merge', (['scalar_summaries'], {}), '(scalar_summaries)\n', (1590, 1608), True, 'import tensorflow as tf\n'), ((2032, 2064), 'tensorflow.summary.merge', 'tf.summary.merge', (['hist_summaries'], {}), '(hist_summaries)\n', (2048, 2064), True, 'import tensorflow as tf\n'), ((2089, 2165), 'tensorflow.summary.merge', 'tf.summary.merge', (['([self.scalar_summary, self.hist_summary] + other_summaries)'], {}), '([self.scalar_summary, self.hist_summary] + other_summaries)\n', (2105, 2165), True, 'import tensorflow as tf\n'), ((2516, 2547), 'os.path.abspath', 'os.path.abspath', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (2531, 2547), False, 'import os\n'), ((2556, 2603), 'os.makedirs', 'os.makedirs', (['self.checkpoint_dir'], {'exist_ok': '(True)'}), '(self.checkpoint_dir, exist_ok=True)\n', (2567, 2603), False, 'import os\n'), ((2691, 2720), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(1)'}), '(max_to_keep=1)\n', (2705, 2720), True, 'import tensorflow as tf\n'), ((3629, 3663), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""learning_rate"""'], {}), "('learning_rate')\n", (3646, 3663), True, 'import tensorflow as tf\n'), ((3687, 3718), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (3698, 3718), True, 'import tensorflow as tf\n'), ((4220, 4262), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'tf_vars'}), '(var_list=tf_vars)\n', (4244, 4262), True, 'import tensorflow as tf\n'), ((4570, 4600), 'gpflow.actions.Loop', 'Loop', (['actions'], {'stop': 'iterations'}), '(actions, stop=iterations)\n', (4574, 4600), False, 'from gpflow.actions import Action, Loop\n'), ((4997, 5010), 'gpflow.actions.Loop', 'Loop', (['actions'], {}), '(actions)\n', (5001, 5010), False, 'from gpflow.actions import Action, Loop\n'), ((5793, 5827), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""learning_rate"""'], {}), "('learning_rate')\n", (5810, 5827), True, 'import tensorflow as tf\n'), ((5851, 5882), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (5862, 5882), True, 'import tensorflow as tf\n'), ((6434, 6476), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'tf_vars'}), '(var_list=tf_vars)\n', (6458, 6476), True, 'import tensorflow as tf\n'), ((6493, 6519), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""gamma"""'], {}), "('gamma')\n", (6510, 6519), True, 'import tensorflow as tf\n'), ((6662, 6694), 'tensorflow.cast', 'tf.cast', (['gamma_start', 'tf.float64'], {}), '(gamma_start, tf.float64)\n', (6669, 6694), True, 'import tensorflow as tf\n'), ((6714, 6744), 'tensorflow.cast', 'tf.cast', (['gamma_max', 'tf.float64'], {}), '(gamma_max, tf.float64)\n', (6721, 6744), True, 'import tensorflow as tf\n'), ((6763, 6793), 'tensorflow.cast', 'tf.cast', (['gamma_mul', 'tf.float64'], {}), '(gamma_mul, tf.float64)\n', (6770, 6793), True, 'import tensorflow as tf\n'), ((6812, 6842), 'tensorflow.cast', 'tf.cast', (['gamma_add', 'tf.float64'], {}), '(gamma_add, tf.float64)\n', (6819, 6842), True, 'import tensorflow as tf\n'), ((6858, 6900), 'tensorflow.Variable', 'tf.Variable', (['gamma_start'], {'dtype': 'tf.float64'}), '(gamma_start, dtype=tf.float64)\n', (6869, 6900), True, 'import tensorflow as tf\n'), ((6922, 6940), 'tensorflow.identity', 'tf.identity', (['gamma'], {}), '(gamma)\n', (6933, 6940), True, 'import tensorflow as tf\n'), ((6976, 7011), 'tensorflow.cast', 'tf.cast', (['gamma_fallback', 'tf.float64'], {}), '(gamma_fallback, tf.float64)\n', (6983, 7011), True, 'import tensorflow as tf\n'), ((7103, 7143), 'tensorflow.assign', 'tf.assign', (['gamma', '(gamma * gamma_fallback)'], {}), '(gamma, gamma * gamma_fallback)\n', (7112, 7143), True, 'import tensorflow as tf\n'), ((7160, 7233), 'tensorflow.where', 'tf.where', (['(gamma_ref * mul_step < add_step)', '(gamma_ref * mul_step)', 'add_step'], {}), '(gamma_ref * mul_step < add_step, gamma_ref * mul_step, add_step)\n', (7168, 7233), True, 'import tensorflow as tf\n'), ((7516, 7558), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'tf_vars'}), '(var_list=tf_vars)\n', (7540, 7558), True, 'import tensorflow as tf\n'), ((8771, 8797), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""gamma"""'], {}), "('gamma')\n", (8788, 8797), True, 'import tensorflow as tf\n'), ((8831, 8863), 'tensorflow.cast', 'tf.cast', (['gamma_start', 'tf.float64'], {}), '(gamma_start, tf.float64)\n', (8838, 8863), True, 'import tensorflow as tf\n'), ((8883, 8913), 'tensorflow.cast', 'tf.cast', (['gamma_max', 'tf.float64'], {}), '(gamma_max, tf.float64)\n', (8890, 8913), True, 'import tensorflow as tf\n'), ((8932, 8962), 'tensorflow.cast', 'tf.cast', (['gamma_mul', 'tf.float64'], {}), '(gamma_mul, tf.float64)\n', (8939, 8962), True, 'import tensorflow as tf\n'), ((8981, 9011), 'tensorflow.cast', 'tf.cast', (['gamma_add', 'tf.float64'], {}), '(gamma_add, tf.float64)\n', (8988, 9011), True, 'import tensorflow as tf\n'), ((9027, 9086), 'tensorflow.Variable', 'tf.Variable', (['gamma_start'], {'dtype': 'tf.float64', 'trainable': '(False)'}), '(gamma_start, dtype=tf.float64, trainable=False)\n', (9038, 9086), True, 'import tensorflow as tf\n'), ((9108, 9126), 'tensorflow.identity', 'tf.identity', (['gamma'], {}), '(gamma)\n', (9119, 9126), True, 'import tensorflow as tf\n'), ((9162, 9197), 'tensorflow.cast', 'tf.cast', (['gamma_fallback', 'tf.float64'], {}), '(gamma_fallback, tf.float64)\n', (9169, 9197), True, 'import tensorflow as tf\n'), ((9289, 9333), 'tensorflow.assign', 'tf.assign', (['gamma', '(gamma_ref * gamma_fallback)'], {}), '(gamma, gamma_ref * gamma_fallback)\n', (9298, 9333), True, 'import tensorflow as tf\n'), ((9350, 9423), 'tensorflow.where', 'tf.where', (['(gamma_ref * mul_step < add_step)', '(gamma_ref * mul_step)', 'add_step'], {}), '(gamma_ref * mul_step < add_step, gamma_ref * mul_step, add_step)\n', (9358, 9423), True, 'import tensorflow as tf\n'), ((9706, 9748), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'tf_vars'}), '(var_list=tf_vars)\n', (9730, 9748), True, 'import tensorflow as tf\n'), ((10871, 10898), 'tensorflow.reshape', 'tf.reshape', (['x', 'self._yshape'], {}), '(x, self._yshape)\n', (10881, 10898), True, 'import tensorflow as tf\n'), ((10949, 10976), 'tensorflow.reshape', 'tf.reshape', (['y', 'self._xshape'], {}), '(y, self._xshape)\n', (10959, 10976), True, 'import tensorflow as tf\n'), ((11019, 11046), 'numpy.reshape', 'np.reshape', (['x', 'self._yshape'], {}), '(x, self._yshape)\n', (11029, 11046), True, 'import numpy as np\n'), ((11089, 11116), 'numpy.reshape', 'np.reshape', (['y', 'self._xshape'], {}), '(y, self._xshape)\n', (11099, 11116), True, 'import numpy as np\n'), ((11170, 11205), 'tensorflow.zeros', 'tf.zeros', (['(1,)', 'settings.float_type'], {}), '((1,), settings.float_type)\n', (11178, 11205), True, 'import tensorflow as tf\n'), ((11504, 11537), 'tensorflow.matmul', 'tf.matmul', (['x', 'x'], {'transpose_b': '(True)'}), '(x, x, transpose_b=True)\n', (11513, 11537), True, 'import tensorflow as tf\n'), ((11586, 11600), 'tensorflow.cholesky', 'tf.cholesky', (['y'], {}), '(y)\n', (11597, 11600), True, 'import tensorflow as tf\n'), ((11643, 11674), 'numpy.einsum', 'np.einsum', (['"""bij,bkj->bik"""', 'x', 'x'], {}), "('bij,bkj->bik', x, x)\n", (11652, 11674), True, 'import numpy as np\n'), ((12585, 12601), 'numpy.array', 'np.array', (['factor'], {}), '(factor)\n', (12593, 12601), True, 'import numpy as np\n'), ((12906, 12953), 'tensorflow.cast', 'tf.cast', (['self.factor'], {'dtype': 'settings.float_type'}), '(self.factor, dtype=settings.float_type)\n', (12913, 12953), True, 'import tensorflow as tf\n'), ((12975, 12989), 'tensorflow.log', 'tf.log', (['factor'], {}), '(factor)\n', (12981, 12989), True, 'import tensorflow as tf\n'), ((13005, 13030), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['log_factor'], {}), '(log_factor)\n', (13018, 13030), True, 'import tensorflow as tf\n'), ((13440, 13450), 'tensorflow.exp', 'tf.exp', (['(-x)'], {}), '(-x)\n', (13446, 13450), True, 'import tensorflow as tf\n'), ((13545, 13555), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (13551, 13555), True, 'import numpy as np\n'), ((1420, 1495), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""optimisation/likelihood"""', 'self.model._likelihood_tensor'], {}), "('optimisation/likelihood', self.model._likelihood_tensor)\n", (1437, 1495), True, 'import tensorflow as tf\n'), ((1888, 1942), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['p.pathname', 'p.constrained_tensor'], {}), '(p.pathname, p.constrained_tensor)\n', (1908, 1942), True, 'import tensorflow as tf\n'), ((3949, 3978), 'tensorflow.assign_add', 'tf.assign_add', (['global_step', '(1)'], {}), '(global_step, 1)\n', (3962, 3978), True, 'import tensorflow as tf\n'), ((4330, 4358), 'gpflow.training.AdamOptimizer', 'AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (4343, 4358), False, 'from gpflow.training import NatGradOptimizer, AdamOptimizer, ScipyOptimizer\n'), ((4860, 4876), 'gpflow.training.ScipyOptimizer', 'ScipyOptimizer', ([], {}), '()\n', (4874, 4876), False, 'from gpflow.training import NatGradOptimizer, AdamOptimizer, ScipyOptimizer\n'), ((6163, 6192), 'tensorflow.assign_add', 'tf.assign_add', (['global_step', '(1)'], {}), '(global_step, 1)\n', (6176, 6192), True, 'import tensorflow as tf\n'), ((7270, 7337), 'tensorflow.where', 'tf.where', (['(gamma_ref + diff > gamma_max)', 'gamma_max', '(gamma_ref + diff)'], {}), '(gamma_ref + diff > gamma_max, gamma_max, gamma_ref + diff)\n', (7278, 7337), True, 'import tensorflow as tf\n'), ((7575, 7602), 'gpflow.training.NatGradOptimizer', 'NatGradOptimizer', (['gamma_ref'], {}), '(gamma_ref)\n', (7591, 7602), False, 'from gpflow.training import NatGradOptimizer, AdamOptimizer, ScipyOptimizer\n'), ((7661, 7689), 'gpflow.training.AdamOptimizer', 'AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (7674, 7689), False, 'from gpflow.training import NatGradOptimizer, AdamOptimizer, ScipyOptimizer\n'), ((8042, 8082), 'gpflow.actions.Loop', 'Loop', (['actions'], {'start': 'it', 'stop': 'iterations'}), '(actions, start=it, stop=iterations)\n', (8046, 8082), False, 'from gpflow.actions import Action, Loop\n'), ((9460, 9527), 'tensorflow.where', 'tf.where', (['(gamma_ref + diff > gamma_max)', 'gamma_max', '(gamma_ref + diff)'], {}), '(gamma_ref + diff > gamma_max, gamma_max, gamma_ref + diff)\n', (9468, 9527), True, 'import tensorflow as tf\n'), ((9765, 9792), 'gpflow.training.NatGradOptimizer', 'NatGradOptimizer', (['gamma_ref'], {}), '(gamma_ref)\n', (9781, 9792), False, 'from gpflow.training import NatGradOptimizer, AdamOptimizer, ScipyOptimizer\n'), ((10163, 10203), 'gpflow.actions.Loop', 'Loop', (['actions'], {'start': 'it', 'stop': 'iterations'}), '(actions, start=it, stop=iterations)\n', (10167, 10203), False, 'from gpflow.actions import Action, Loop\n'), ((13368, 13379), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (13376, 13379), True, 'import numpy as np\n'), ((13381, 13392), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (13389, 13392), True, 'import numpy as np\n'), ((13661, 13707), 'tensorflow.log', 'tf.log', (['((self.b - self.a) / (y - self.a) - 1.0)'], {}), '((self.b - self.a) / (y - self.a) - 1.0)\n', (13667, 13707), True, 'import tensorflow as tf\n'), ((13751, 13797), 'numpy.log', 'np.log', (['((self.b - self.a) / (y - self.a) - 1.0)'], {}), '((self.b - self.a) / (y - self.a) - 1.0)\n', (13757, 13797), True, 'import numpy as np\n'), ((1269, 1305), 'tensorflow.reshape', 'tf.reshape', (['p.constrained_tensor', '[]'], {}), '(p.constrained_tensor, [])\n', (1279, 1305), True, 'import tensorflow as tf\n'), ((11726, 11748), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['yi'], {}), '(yi)\n', (11744, 11748), True, 'import numpy as np\n'), ((11891, 11913), 'tensorflow.matrix_diag_part', 'tf.matrix_diag_part', (['L'], {}), '(L)\n', (11910, 11913), True, 'import tensorflow as tf\n'), ((13899, 13922), 'numpy.log', 'np.log', (['(self.b - self.a)'], {}), '(self.b - self.a)\n', (13905, 13922), True, 'import numpy as np\n'), ((13881, 13890), 'tensorflow.exp', 'tf.exp', (['x'], {}), '(x)\n', (13887, 13890), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: <EMAIL>
@Software: PyCharm
@File: LmdbDataset.py
@Time: 2020/8/20 16:55
@Overview:
"""
import os
import random
import lmdb
import numpy as np
from kaldi_io import read_mat
from torch.utils.data import Dataset
from tqdm import tqdm
import Process_Data.constants as c
def _read_data_lmdb(txn, key, size):
"""read data array from lmdb with key (w/ and w/o fixed size)
size: feat-dim"""
# with env.begin(write=False) as txn:
buf = txn.get(key.encode('ascii'))
data_flat = np.frombuffer(buf, dtype=np.float32)
return data_flat.reshape(int(data_flat.shape[0] / size), size)
class LmdbVerifyDataset(Dataset):
def __init__(self, dir, xvectors_dir, trials_file='trials', loader=np.load, return_uid=False):
feat_scp = xvectors_dir + '/xvectors.scp'
trials = dir + '/%s' % trials_file
if not os.path.exists(feat_scp):
raise FileExistsError(feat_scp)
if not os.path.exists(trials):
raise FileExistsError(trials)
uid2feat = {}
with open(feat_scp, 'r') as f:
for line in f.readlines():
uid, feat_offset = line.split()
uid2feat[uid] = feat_offset
print('\n==> There are {} utterances in Verification trials.'.format(len(uid2feat)))
trials_pair = []
positive_pairs = 0
with open(trials, 'r') as t:
all_pairs = t.readlines()
for line in all_pairs:
pair = line.split()
if pair[2] == 'nontarget' or pair[2] == '0':
pair_true = False
else:
pair_true = True
positive_pairs += 1
trials_pair.append((pair[0], pair[1], pair_true))
trials_pair = np.array(trials_pair)
trials_pair = trials_pair[trials_pair[:, 2].argsort()[::-1]]
print(' There are {} pairs in trials with {} positive pairs'.format(len(trials_pair),
positive_pairs))
self.uid2feat = uid2feat
self.trials_pair = trials_pair
self.numofpositive = positive_pairs
self.loader = loader
self.return_uid = return_uid
def __getitem__(self, index):
uid_a, uid_b, label = self.trials_pair[index]
feat_a = self.uid2feat[uid_a]
feat_b = self.uid2feat[uid_b]
data_a = self.loader(feat_a)
data_b = self.loader(feat_b)
if label == 'True' or label == True:
label = True
else:
label = False
if self.return_uid:
# pdb.set_trace()
# print(uid_a, uid_b)
return data_a, data_b, label, uid_a, uid_b
return data_a, data_b, label
def partition(self, num):
if num > len(self.trials_pair):
print('%d is greater than the total number of pairs')
elif num * 0.3 > self.numofpositive:
indices = list(range(self.numofpositive, len(self.trials_pair)))
random.shuffle(indices)
indices = indices[:(num - self.numofpositive)]
positive_idx = list(range(self.numofpositive))
positive_pairs = self.trials_pair[positive_idx].copy()
nagative_pairs = self.trials_pair[indices].copy()
self.trials_pair = np.concatenate((positive_pairs, nagative_pairs), axis=0)
else:
indices = list(range(self.numofpositive, len(self.trials_pair)))
random.shuffle(indices)
indices = indices[:(num - int(0.3 * num))]
positive_idx = list(range(self.numofpositive))
random.shuffle(positive_idx)
positive_idx = positive_idx[:int(0.3 * num)]
positive_pairs = self.trials_pair[positive_idx].copy()
nagative_pairs = self.trials_pair[indices].copy()
self.numofpositive = len(positive_pairs)
self.trials_pair = np.concatenate((positive_pairs, nagative_pairs), axis=0)
assert len(self.trials_pair) == num
num_positive = 0
for x, y, z in self.trials_pair:
if z == 'True':
num_positive += 1
assert len(self.trials_pair) == num, '%d != %d' % (len(self.trials_pair), num)
assert self.numofpositive == num_positive, '%d != %d' % (self.numofpositive, num_positive)
print('%d positive pairs remain.' % num_positive)
def __len__(self):
return len(self.trials_pair)
class LmdbTrainDataset(Dataset):
def __init__(self, dir, feat_dim, samples_per_speaker, transform, loader=_read_data_lmdb, num_valid=5,
return_uid=False):
# feat_scp = dir + '/feats.scp'
spk2utt = dir + '/spk2utt'
utt2spk = dir + '/utt2spk'
# utt2num_frames = dir + '/utt2num_frames'
lmdb_file = dir + '/feat'
if not os.path.exists(lmdb_file):
raise FileExistsError(lmdb_file)
if not os.path.exists(spk2utt):
raise FileExistsError(spk2utt)
dataset = {}
with open(spk2utt, 'r') as u:
all_cls = u.readlines()
for line in all_cls:
spk_utt = line.split()
spk_name = spk_utt[0]
if spk_name not in dataset.keys():
dataset[spk_name] = [x for x in spk_utt[1:]]
# dataset[spk_name] = [x for x in spk_utt[1:] if x not in invalid_uid]
utt2spk_dict = {}
with open(utt2spk, 'r') as u:
all_cls = u.readlines()
for line in all_cls:
utt_spk = line.split()
uid = utt_spk[0]
# if uid in invalid_uid:
# continue
if uid not in utt2spk_dict.keys():
utt2spk_dict[uid] = utt_spk[-1]
# pdb.set_trace()
speakers = [spk for spk in dataset.keys()]
speakers.sort()
print('==> There are {} speakers in Dataset.'.format(len(speakers)))
spk_to_idx = {speakers[i]: i for i in range(len(speakers))}
idx_to_spk = {i: speakers[i] for i in range(len(speakers))}
print(' There are {} utterances in Train Dataset'.format(len(utt2spk_dict.keys())))
if num_valid > 0:
valid_set = {}
valid_utt2spk_dict = {}
for spk in speakers:
if spk not in valid_set.keys():
valid_set[spk] = []
for i in range(num_valid):
if len(dataset[spk]) <= 1:
break
j = np.random.randint(len(dataset[spk]))
utt = dataset[spk].pop(j)
valid_set[spk].append(utt)
valid_utt2spk_dict[utt] = utt2spk_dict[utt]
print(' Spliting {} utterances for Validation.'.format(len(valid_utt2spk_dict.keys())))
self.valid_set = valid_set
self.valid_utt2spk_dict = valid_utt2spk_dict
self.all_utts = list(utt2spk_dict.keys())
# for uid in uid2feat.keys():
# for i in range(int(np.ceil(utt2len_dict[uid] / c.NUM_FRAMES_SPECT))):
# self.all_utts.append(uid)
env = lmdb.open(lmdb_file, readonly=True, lock=False, readahead=False,
meminit=False)
self.env = env.begin(write=False, buffers=True) # as txn:
self.speakers = speakers
self.dataset = dataset
self.spk_to_idx = spk_to_idx
self.idx_to_spk = idx_to_spk
self.num_spks = len(speakers)
self.utt2spk_dict = utt2spk_dict
self.feat_dim = feat_dim
self.loader = loader
self.transform = transform
self.samples_per_speaker = samples_per_speaker
self.return_uid = return_uid
# if self.return_uid:
# self.utt_dataset = []
# for i in range(self.samples_per_speaker * self.num_spks):
# sid = i % self.num_spks
# spk = self.idx_to_spk[sid]
# utts = self.dataset[spk]
# uid = utts[random.randrange(0, len(utts))]
# self.utt_dataset.append([uid, sid])
def __getitem__(self, sid):
# start_time = time.time()
# if self.return_uid:
# uid, label = self.utt_dataset[sid]
# y = self.loader(self.uid2feat[uid])
# feature = self.transform(y)
# return feature, label, uid
sid %= self.num_spks
spk = self.idx_to_spk[sid]
utts = self.dataset[spk]
y = np.array([[]]).reshape(0, self.feat_dim)
while len(y) < c.N_SAMPLES:
uid = random.randrange(0, len(utts))
feature = self.loader(self.env, utts[uid], self.feat_dim)
y = np.concatenate((y, feature), axis=0)
feature = self.transform(y)
# print(sid)
label = sid
return feature, label
def __len__(self):
return self.samples_per_speaker * len(self.speakers) # 返回一个epoch的采样数
class LmdbValidDataset(Dataset):
def __init__(self, valid_set, spk_to_idx, env, valid_utt2spk_dict, transform, feat_dim, loader=_read_data_lmdb,
return_uid=False):
self.env = env
self.feat_dim = feat_dim
speakers = [spk for spk in valid_set.keys()]
speakers.sort()
self.speakers = speakers
self.valid_set = valid_set
uids = list(valid_utt2spk_dict.keys())
uids.sort()
print(uids[:10])
self.uids = uids
self.utt2spk_dict = valid_utt2spk_dict
self.spk_to_idx = spk_to_idx
self.num_spks = len(speakers)
self.loader = loader
self.transform = transform
self.return_uid = return_uid
def __getitem__(self, index):
uid = self.uids[index]
spk = self.utt2spk_dict[uid]
y = self.loader(self.env, uid, self.feat_dim)
feature = self.transform(y)
label = self.spk_to_idx[spk]
if self.return_uid:
return feature, label, uid
return feature, label
def __len__(self):
return len(self.uids)
class LmdbTestDataset(Dataset):
def __init__(self, dir, transform, feat_dim, loader=_read_data_lmdb, return_uid=False):
lmdb_file = dir + '/feat'
spk2utt = dir + '/spk2utt'
trials = dir + '/trials'
if not os.path.exists(lmdb_file):
raise FileExistsError(lmdb_file)
if not os.path.exists(spk2utt):
raise FileExistsError(spk2utt)
if not os.path.exists(trials):
raise FileExistsError(trials)
dataset = {}
with open(spk2utt, 'r') as u:
all_cls = u.readlines()
for line in all_cls:
spk_utt = line.split(' ')
spk_name = spk_utt[0]
if spk_name not in dataset.keys():
spk_utt[-1] = spk_utt[-1].rstrip('\n')
dataset[spk_name] = spk_utt[1:]
speakers = [spk for spk in dataset.keys()]
speakers.sort()
print(' There are {} speakers in Test Dataset.'.format(len(speakers)))
trials_pair = []
positive_pairs = 0
with open(trials, 'r') as t:
all_pairs = t.readlines()
for line in all_pairs:
pair = line.split()
if pair[2] == 'nontarget' or pair[2] == '0':
pair_true = False
else:
pair_true = True
positive_pairs += 1
trials_pair.append((pair[0], pair[1], pair_true))
trials_pair = np.array(trials_pair)
trials_pair = trials_pair[trials_pair[:, 2].argsort()[::-1]]
print('==>There are {} pairs in test Dataset with {} positive pairs'.format(len(trials_pair), positive_pairs))
env = lmdb.open(lmdb_file, readonly=True, lock=False, readahead=False,
meminit=False)
self.env = env.begin(write=False, buffers=True)
self.feat_dim = feat_dim
self.speakers = speakers
self.trials_pair = trials_pair
self.num_spks = len(speakers)
self.numofpositive = positive_pairs
self.loader = loader
self.transform = transform
self.return_uid = return_uid
def __getitem__(self, index):
uid_a, uid_b, label = self.trials_pair[index]
y_a = self.loader(self.env, uid_a, self.feat_dim)
y_b = self.loader(self.env, uid_b, self.feat_dim)
data_a = self.transform(y_a)
data_b = self.transform(y_b)
if label == 'True' or label == True:
label = True
else:
label = False
if self.return_uid:
# pdb.set_trace()
# print(uid_a, uid_b)
return data_a, data_b, label, uid_a, uid_b
return data_a, data_b, label
def partition(self, num):
if num > len(self.trials_pair):
print('%d is greater than the total number of pairs')
elif num * 0.3 > self.numofpositive:
indices = list(range(self.numofpositive, len(self.trials_pair)))
random.shuffle(indices)
indices = indices[:(num - self.numofpositive)]
positive_idx = list(range(self.numofpositive))
positive_pairs = self.trials_pair[positive_idx].copy()
nagative_pairs = self.trials_pair[indices].copy()
self.trials_pair = np.concatenate((positive_pairs, nagative_pairs), axis=0)
else:
indices = list(range(self.numofpositive, len(self.trials_pair)))
random.shuffle(indices)
indices = indices[:(num - int(0.3 * num))]
positive_idx = list(range(self.numofpositive))
random.shuffle(positive_idx)
positive_idx = positive_idx[:int(0.3 * num)]
positive_pairs = self.trials_pair[positive_idx].copy()
nagative_pairs = self.trials_pair[indices].copy()
self.numofpositive = len(positive_pairs)
self.trials_pair = np.concatenate((positive_pairs, nagative_pairs), axis=0)
assert len(self.trials_pair) == num
num_positive = 0
for x, y, z in self.trials_pair:
if z == 'True':
num_positive += 1
assert len(self.trials_pair) == num, '%d != %d' % (len(self.trials_pair), num)
assert self.numofpositive == num_positive, '%d != %d' % (self.numofpositive, num_positive)
print(' %d positive pairs remain.' % num_positive)
def __len__(self):
return len(self.trials_pair)
class EgsDataset(Dataset):
def __init__(self, dir, feat_dim, transform, loader=read_mat, domain=False,
random_chunk=[], batch_size=0):
feat_scp = dir + '/feats.scp'
if not os.path.exists(feat_scp):
raise FileExistsError(feat_scp)
dataset = []
spks = set([])
doms = set([])
with open(feat_scp, 'r') as u:
all_cls_upath = tqdm(u.readlines())
for line in all_cls_upath:
try:
cls, upath = line.split()
dom_cls = -1
except ValueError as v:
cls, dom_cls, upath = line.split()
dom_cls = int(dom_cls)
cls = int(cls)
dataset.append((cls, dom_cls, upath))
doms.add(dom_cls)
spks.add(cls)
print('==> There are {} speakers in Dataset.'.format(len(spks)))
print(' There are {} egs in Dataset'.format(len(dataset)))
self.dataset = dataset
self.feat_dim = feat_dim
self.loader = loader
self.transform = transform
self.num_spks = len(spks)
self.num_doms = len(doms)
self.domain = domain
self.chunk_size = []
self.batch_size = batch_size
def __getitem__(self, idx):
# time_s = time.time()
# print('Starting loading...')
label, dom_label, upath = self.dataset[idx]
y = self.loader(upath)
feature = self.transform(y)
# time_e = time.time()
# print('Using %d for loading egs' % (time_e - time_s))
if self.domain:
return feature, label, dom_label
else:
return feature, label
def __len__(self):
return len(self.dataset) # 返回一个epoch的采样数
| [
"numpy.frombuffer",
"random.shuffle",
"os.path.exists",
"numpy.array",
"lmdb.open",
"numpy.concatenate"
] | [((567, 603), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': 'np.float32'}), '(buf, dtype=np.float32)\n', (580, 603), True, 'import numpy as np\n'), ((1842, 1863), 'numpy.array', 'np.array', (['trials_pair'], {}), '(trials_pair)\n', (1850, 1863), True, 'import numpy as np\n'), ((7330, 7409), 'lmdb.open', 'lmdb.open', (['lmdb_file'], {'readonly': '(True)', 'lock': '(False)', 'readahead': '(False)', 'meminit': '(False)'}), '(lmdb_file, readonly=True, lock=False, readahead=False, meminit=False)\n', (7339, 7409), False, 'import lmdb\n'), ((11758, 11779), 'numpy.array', 'np.array', (['trials_pair'], {}), '(trials_pair)\n', (11766, 11779), True, 'import numpy as np\n'), ((11984, 12063), 'lmdb.open', 'lmdb.open', (['lmdb_file'], {'readonly': '(True)', 'lock': '(False)', 'readahead': '(False)', 'meminit': '(False)'}), '(lmdb_file, readonly=True, lock=False, readahead=False, meminit=False)\n', (11993, 12063), False, 'import lmdb\n'), ((917, 941), 'os.path.exists', 'os.path.exists', (['feat_scp'], {}), '(feat_scp)\n', (931, 941), False, 'import os\n'), ((1002, 1024), 'os.path.exists', 'os.path.exists', (['trials'], {}), '(trials)\n', (1016, 1024), False, 'import os\n'), ((4961, 4986), 'os.path.exists', 'os.path.exists', (['lmdb_file'], {}), '(lmdb_file)\n', (4975, 4986), False, 'import os\n'), ((5048, 5071), 'os.path.exists', 'os.path.exists', (['spk2utt'], {}), '(spk2utt)\n', (5062, 5071), False, 'import os\n'), ((8898, 8934), 'numpy.concatenate', 'np.concatenate', (['(y, feature)'], {'axis': '(0)'}), '((y, feature), axis=0)\n', (8912, 8934), True, 'import numpy as np\n'), ((10507, 10532), 'os.path.exists', 'os.path.exists', (['lmdb_file'], {}), '(lmdb_file)\n', (10521, 10532), False, 'import os\n'), ((10594, 10617), 'os.path.exists', 'os.path.exists', (['spk2utt'], {}), '(spk2utt)\n', (10608, 10617), False, 'import os\n'), ((10677, 10699), 'os.path.exists', 'os.path.exists', (['trials'], {}), '(trials)\n', (10691, 10699), False, 'import os\n'), ((14951, 14975), 'os.path.exists', 'os.path.exists', (['feat_scp'], {}), '(feat_scp)\n', (14965, 14975), False, 'import os\n'), ((3120, 3143), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (3134, 3143), False, 'import random\n'), ((3424, 3480), 'numpy.concatenate', 'np.concatenate', (['(positive_pairs, nagative_pairs)'], {'axis': '(0)'}), '((positive_pairs, nagative_pairs), axis=0)\n', (3438, 3480), True, 'import numpy as np\n'), ((3584, 3607), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (3598, 3607), False, 'import random\n'), ((3735, 3763), 'random.shuffle', 'random.shuffle', (['positive_idx'], {}), '(positive_idx)\n', (3749, 3763), False, 'import random\n'), ((4035, 4091), 'numpy.concatenate', 'np.concatenate', (['(positive_pairs, nagative_pairs)'], {'axis': '(0)'}), '((positive_pairs, nagative_pairs), axis=0)\n', (4049, 4091), True, 'import numpy as np\n'), ((8684, 8698), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (8692, 8698), True, 'import numpy as np\n'), ((13283, 13306), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (13297, 13306), False, 'import random\n'), ((13587, 13643), 'numpy.concatenate', 'np.concatenate', (['(positive_pairs, nagative_pairs)'], {'axis': '(0)'}), '((positive_pairs, nagative_pairs), axis=0)\n', (13601, 13643), True, 'import numpy as np\n'), ((13747, 13770), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (13761, 13770), False, 'import random\n'), ((13898, 13926), 'random.shuffle', 'random.shuffle', (['positive_idx'], {}), '(positive_idx)\n', (13912, 13926), False, 'import random\n'), ((14198, 14254), 'numpy.concatenate', 'np.concatenate', (['(positive_pairs, nagative_pairs)'], {'axis': '(0)'}), '((positive_pairs, nagative_pairs), axis=0)\n', (14212, 14254), True, 'import numpy as np\n')] |
""" Components can be added to Material objects to change the optical properties of the
volume include: absorption, scattering and luminescence (absorption and reemission).
"""
from dataclasses import replace
import numpy as np
from pvtrace.material.distribution import Distribution
from pvtrace.material.utils import isotropic, gaussian
import logging
logger = logging.getLogger(__name__)
q = 1.60217662e-19 # C
kB = 1.380649e-23 / q # eV K-1
class Component(object):
""" Base class for all things that can be added to a host material.
"""
def __init__(self, name="Component"):
super(Component, self).__init__()
self.name = name
def is_radiative(self, ray):
return False
class Scatterer(Component):
"""Describes a scatterer centre with attenuation coefficient per unit length.
Examples
--------
Create `Scatterer` with isotropic and constant probability of scattering::
Scattering(1.0)
With spectrally varying scattering probability using a numpy array::
arr = numpy.column_stack((x, y))
Scatterer(arr)
With spectrally varying scattering probability using `x` lists::
Scatterer(y, x=x)
"""
def __init__(
self,
coefficient,
x=None,
quantum_yield=1.0,
phase_function=None,
hist=False,
name="Scatterer",
):
"""
Parameters
----------
coefficient: float, list, tuple or numpy.ndarray
Specifies the scattering coefficient per unit length. Constant values
can be supplied or a spectrum per nanometer per unit length.
x: list, tuple of numpy.ndarray (optional)
Wavelength values in nanometers. Required when specifying a the
`coefficient` with an list or tuple.
quantum_yield: float (optional)
Default value is 1.0. To include non-radiative scattering use values
between less than 1.0.
phase_function callable (optional)
Determines the direction of scattering. If None is supplied scattering
is isotropic.
hist: Bool
Specifies how the coefficient spectrum is sampled. If `True` the values
are treated as a histogram. If `False` the values are linearly
interpolated.
name: str
A user-defined identifier string
"""
super(Scatterer, self).__init__(name=name)
# Make absorption/scattering spectrum distribution
self._coefficient = coefficient
if coefficient is None:
raise ValueError("Coefficient must be specified.")
elif isinstance(coefficient, (float, np.float)):
self._abs_dist = Distribution(x=None, y=coefficient, hist=hist)
elif isinstance(coefficient, np.ndarray):
self._abs_dist = Distribution(
x=coefficient[:, 0], y=coefficient[:, 1], hist=hist
)
elif isinstance(coefficient, (list, tuple)):
if x is None:
raise ValueError("Requires `x`.")
self._abs_dist = Distribution.from_functions(x, coefficient, hist=hist)
self.quantum_yield = quantum_yield
self.phase_function = (
phase_function if phase_function is not None else isotropic
)
def coefficient(self, wavelength):
""" Returns the scattering coefficient at `wavelength`.
"""
value = self._abs_dist(wavelength)
return value
def is_radiative(self, ray):
""" Monte-Carlo sampling to determine of the event is radiative.
"""
return np.random.uniform() < self.quantum_yield
def emit(self, ray: "Ray", **kwargs) -> "Ray":
""" Change ray direction or wavelength based on physics of the interaction.
"""
direction = self.phase_function()
ray = replace(ray, direction=direction, source=self.name)
return ray
class Absorber(Scatterer):
""" A component that attenuates light by non-radiative absorption.
Examples
--------
Create `Absorber` with isotropic and constant probability of scattering::
Absorber(1.0)
With spectrally varying scattering probability using a numpy array::
arr = numpy.column_stack((x, y))
Absorber(arr)
With spectrally varying scattering probability using `x` lists::
Absorber(y, x=x)
"""
def __init__(self, coefficient, x=None, name="Absorber", hist=False):
""" coefficient: float, list, tuple or numpy.ndarray
Specifies the absorption coefficient per unit length. Constant values
can be supplied or a spectrum per nanometer per unit length.
If using a list of tuple you should also specify the wavelengths using
the `x` keyword.
If using a numpy array use `column_stack` to supply a single array with
a wavelength and coefficient values::
x: list, tuple of numpy.ndarray (optional)
Wavelength values in nanometers. Required when specifying a the
`coefficient` with an list or tuple.
quantum_yield: float (optional)
Ignored.
phase_function callable (optional)
Ignored.
hist: Bool
Specifies how the coefficient spectrum is sampled. If `True` the values
are treated as a histogram. If `False` the values are linearly
interpolated.
name: str
A user-defined identifier string
"""
super(Absorber, self).__init__(
coefficient,
x=x,
quantum_yield=0.0,
phase_function=None,
hist=hist,
name=name,
)
def is_radiative(self, ray):
""" Returns `False` (overridden superclass method).
"""
return False
class Luminophore(Scatterer):
""" Describes molecule, nanocrystal or material which absorbs and emits light.
Examples
--------
Create `Luminophore` with absorption coefficient and emission spectrum.
Emission will be isotropic and the quantum yield is unity::
absorption_spectrum = np.column_stack((x_abs, y_abs))
emission_spectrum = np.column_stack((x_ems, y_ems))
Luminophore(
absorption_spectrum=absorption_spectrum,
emission=emission_spectrum,
quantum_yield=1.0
)
If input data are histograms rather than continuous spectrum use `hist=True`.
absorption_histogram = np.column_stack((x_abs, y_abs))
emission_histogram = np.column_stack((x_ems, y_ems))
Luminophore(
absorption_spectrum=absorption_histogram,
emission=emission_histogram,
quantum_yield=1.0,
hist=True
)
This prevent `pvtrace` from using interpolation on the data set which will
preserve any discontinuities in the emission or absorption data.
"""
def __init__(
self,
coefficient,
emission=None,
x=None,
hist=False,
quantum_yield=1.0,
phase_function=None,
name="Luminophore",
):
""" coefficient: float, list, tuple or numpy.ndarray
Specifies the absorption coefficient per unit length. Constant values
can be supplied or a spectrum per nanometer per unit length.
If using a list of tuple you should also specify the wavelengths using
the `x` keyword.
If using a numpy array use `column_stack` to supply a single array with
a wavelength and coefficient values.
emission: float, list, tuple or numpy.ndarray (optional)
Specifies the emission line-shape per nanometer.
If `None` will use a Gaussian centred at 600nm.
If using a list of tuple you should also specify the wavelengths using
the `x` keyword.
If using a numpy array use `column_stack` to supply a single array with
a wavelength and coefficient values.
x: list, tuple of numpy.ndarray (optional)
Wavelength values in nanometers. Required when specifying a the
`coefficient` with an list or tuple.
quantum_yield: float (optional)
The probability of re-emitting a ray.
phase_function callable (optional)
Specifies the direction of emitted rays.
hist: Bool
Specifies how the absorption and emission spectra are sampled. If `True`
the values are treated as a histogram. If `False` the values are
linearly interpolated.
name: str
A user-defined identifier string
"""
super(Luminophore, self).__init__(
coefficient,
x=x,
quantum_yield=quantum_yield,
phase_function=phase_function,
hist=hist,
name=name,
)
# Make emission spectrum distribution
self._emission = emission
if emission is None:
self._ems_dist = Distribution.from_functions(
x, [lambda x: gaussian(x, 1.0, 600.0, 40.0)], hist=hist
)
elif isinstance(emission, np.ndarray):
self._ems_dist = Distribution(x=emission[:, 0], y=emission[:, 1], hist=hist)
elif isinstance(emission, (tuple, list)):
if x is None:
raise ValueError("Requires `x`.")
self._ems_dist = Distribution.from_functions(x, emission, hist=hist)
else:
raise ValueError("Luminophore `emission` arg has wrong type.")
def emit(self, ray: "Ray", method="kT", T=300.0, **kwargs) -> "Ray":
""" Change ray direction or wavelength based on physics of the interaction.
Parameters
----------
ray: Ray
The ray when it was absorbed.
method: str
Either `'kT'`, `'redshift'` or `'full'`.
`'kT'` option allowed emitted rays to have a wavelength
within 3kT of the absorbed value.
`'redshift'` option ensures the emitted ray has a longer of equal
wavelength.
`'full'` option samples the full emission spectrum allowing the emitted
ray to take any value.
T: float
The temperature to use in the `'kT'` method.
"""
direction = self.phase_function()
dist = self._ems_dist
nm = ray.wavelength
# Different ways of sampling the emission distribution.
if method == "kT":
# Known issue: this can blue shift outside simulation range!
# Emission energy can be within 3kT above current value. Simple bolzmann.
eV = 1240.0 / nm
eV = eV + 3 / 2 * kB * T # Assumes 3 dimensional degrees of freedom
nm = 1240.0 / eV
p1 = dist.lookup(nm)
elif method == "boltzmann":
# Convolve the emission spectrum with a bolzmann factor centered at
# the current photon energy. This will allow the energy to go up via
# the tail in the distribution but will favor lower energy states.
raise NotImplementedError()
elif method == "redshift":
# Emission energy must always redshift
p1 = dist.lookup(nm)
elif method == "full":
# Emission energy is sampled from full distribution
p1 = 0.0
p2 = 1.0
gamma = np.random.uniform(p1, p2)
wavelength = dist.sample(gamma)
ray = replace(ray, direction=direction, wavelength=wavelength, source=self.name)
return ray
| [
"numpy.random.uniform",
"pvtrace.material.distribution.Distribution.from_functions",
"pvtrace.material.utils.gaussian",
"pvtrace.material.distribution.Distribution",
"dataclasses.replace",
"logging.getLogger"
] | [((363, 390), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (380, 390), False, 'import logging\n'), ((3938, 3989), 'dataclasses.replace', 'replace', (['ray'], {'direction': 'direction', 'source': 'self.name'}), '(ray, direction=direction, source=self.name)\n', (3945, 3989), False, 'from dataclasses import replace\n'), ((12053, 12078), 'numpy.random.uniform', 'np.random.uniform', (['p1', 'p2'], {}), '(p1, p2)\n', (12070, 12078), True, 'import numpy as np\n'), ((12133, 12207), 'dataclasses.replace', 'replace', (['ray'], {'direction': 'direction', 'wavelength': 'wavelength', 'source': 'self.name'}), '(ray, direction=direction, wavelength=wavelength, source=self.name)\n', (12140, 12207), False, 'from dataclasses import replace\n'), ((3693, 3712), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3710, 3712), True, 'import numpy as np\n'), ((2786, 2832), 'pvtrace.material.distribution.Distribution', 'Distribution', ([], {'x': 'None', 'y': 'coefficient', 'hist': 'hist'}), '(x=None, y=coefficient, hist=hist)\n', (2798, 2832), False, 'from pvtrace.material.distribution import Distribution\n'), ((9742, 9801), 'pvtrace.material.distribution.Distribution', 'Distribution', ([], {'x': 'emission[:, 0]', 'y': 'emission[:, 1]', 'hist': 'hist'}), '(x=emission[:, 0], y=emission[:, 1], hist=hist)\n', (9754, 9801), False, 'from pvtrace.material.distribution import Distribution\n'), ((2912, 2977), 'pvtrace.material.distribution.Distribution', 'Distribution', ([], {'x': 'coefficient[:, 0]', 'y': 'coefficient[:, 1]', 'hist': 'hist'}), '(x=coefficient[:, 0], y=coefficient[:, 1], hist=hist)\n', (2924, 2977), False, 'from pvtrace.material.distribution import Distribution\n'), ((9957, 10008), 'pvtrace.material.distribution.Distribution.from_functions', 'Distribution.from_functions', (['x', 'emission'], {'hist': 'hist'}), '(x, emission, hist=hist)\n', (9984, 10008), False, 'from pvtrace.material.distribution import Distribution\n'), ((3166, 3220), 'pvtrace.material.distribution.Distribution.from_functions', 'Distribution.from_functions', (['x', 'coefficient'], {'hist': 'hist'}), '(x, coefficient, hist=hist)\n', (3193, 3220), False, 'from pvtrace.material.distribution import Distribution\n'), ((9610, 9639), 'pvtrace.material.utils.gaussian', 'gaussian', (['x', '(1.0)', '(600.0)', '(40.0)'], {}), '(x, 1.0, 600.0, 40.0)\n', (9618, 9639), False, 'from pvtrace.material.utils import isotropic, gaussian\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import configparser
import numpy as np
from tkinter import filedialog
import tkinter
class Saver_Opener:
def __init__(self):
# Test run parameters
if len(sys.argv) > 1:
self.test_flag = sys.argv[1]
else:
self.test_flag = 'None'
# for open directory specified in the config file
#path_to_main = os.path.abspath(os.getcwd())
self.path_to_main = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..'))
#os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'templates'))
# configuration data
#path_config_file = os.path.join(path_to_main,'atomize/config.ini')
path_config_file = os.path.join(self.path_to_main,'config.ini')
config = configparser.ConfigParser()
config.read(path_config_file)
# directories
self.open_dir = str(config['DEFAULT']['open_dir'])
self.script_dir = str(config['DEFAULT']['script_dir'])
if self.test_flag == 'test':
self.test_header_array = np.array(['header1', 'header2'])
self.test_data = np.arange(1000, 2)
self.test_data_2d = np.meshgrid(self.test_data, self.test_data)
self.test_file_path = os.path.join(os.path.abspath(os.getcwd()), 'test')
self.test_file_param_path = os.path.join(os.path.abspath(os.getcwd()), 'test.param')
def open_1D(self, path, header = 0):
if self.test_flag != 'test':
header_array = []
file_to_read = open(str(path), 'r')
for i, line in enumerate(file_to_read):
if i is header: break
temp = line.split(":")
header_array.append(temp)
file_to_read.close()
temp = np.genfromtxt(str(path), dtype = float, delimiter = ',')
data = np.transpose(temp)
return header_array, data
elif self.test_flag == 'test':
return self.test_header_array, self.test_data
def open_1D_dialog(self, directory = '', header = 0):
if self.test_flag != 'test':
file_path = self.file_dialog(directory = directory, mode = 'Open')
header_array = [];
file_to_read = open(file_path, 'r')
for i, line in enumerate(file_to_read):
if i is header: break
temp = line.split(":")
header_array.append(temp)
file_to_read.close()
temp = np.genfromtxt(file_path, dtype = float, delimiter = ',')
data = np.transpose(temp)
return header_array, data
elif self.test_flag == 'test':
return self.test_header_array, self.test_data
def save_1D_dialog(self, data, directory = '', header = ''):
if self.test_flag != 'test':
file_path = self.file_dialog(directory = directory, mode = 'Save')
np.savetxt(file_path, np.transpose(data), fmt = '%.4e', delimiter = ',',\
newline = '\n', header = header, footer = '', comments = '# ', encoding = None)
elif self.test_flag == 'test':
pass
def open_2D(self, path, header = 0):
if self.test_flag != 'test':
header_array = []
file_to_read = open(str(path), 'r')
for i, line in enumerate(file_to_read):
if i is header: break
temp = line.split(":")
header_array.append(temp)
file_to_read.close()
temp = np.genfromtxt(str(path), dtype = float, delimiter = ',')
data = temp
return header_array, data
elif self.test_flag == 'test':
return header_array, self.test_data_2d
def open_2D_dialog(self, directory = '', header = 0):
if self.test_flag != 'test':
file_path = self.file_dialog(directory = directory, mode = 'Open')
header_array = []
file_to_read = open(file_path, 'r')
for i, line in enumerate(file_to_read):
if i is header: break
temp = line.split(":")
header_array.append(temp)
file_to_read.close()
temp = np.genfromtxt(file_path, dtype = float, delimiter = ',')
data = temp
return header_array, data
elif self.test_flag == 'test':
return self.test_header_array, self.test_data_2d
def open_2D_appended(self, path, header = 0, chunk_size = 1):
if self.test_flag != 'test':
header_array = []
file_to_read = open(str(path), 'r')
for i, line in enumerate(file_to_read):
if i is header: break
temp = line.split(":")
header_array.append(temp)
file_to_read.close()
temp = np.genfromtxt(str(path), dtype = float, delimiter = ',')
data = np.array_split(temp, chunk_size)
return header_array, data
elif self.test_flag == 'test':
return self.test_header_array, self.test_data_2d
def open_2D_appended_dialog(self, directory = '', header = 0, chunk_size = 1):
if self.test_flag != 'test':
file_path = self.file_dialog(directory = directory, mode = 'Open')
header_array = []
file_to_read = open(file_path, 'r')
for i, line in enumerate(file_to_read):
if i is header: break
temp = line.split(":")
header_array.append(temp)
file_to_read.close()
temp = np.genfromtxt(file_path, dtype = float, delimiter = ',')
data = np.array_split(temp, chunk_size)
return header_array, data
elif self.test_flag == 'test':
return self.test_header_array, self.test_data_2d
def save_2D_dialog(self, data, directory = '', header = ''):
if self.test_flag != 'test':
file_path = self.file_dialog(directory = directory, mode = 'Save')
np.savetxt(file_path, data, fmt = '%.4e', delimiter = ',', newline = '\n', \
header = header, footer = '', comments = '#', encoding = None)
elif self.test_flag == 'test':
pass
def create_file_dialog(self, directory = ''):
if self.test_flag != 'test':
file_path = self.file_dialog(directory = directory, mode = 'Save')
open(file_path, "w").close()
return file_path
elif self.test_flag == 'test':
return self.test_file_path
def create_file_parameters(self, add_name, directory = ''):
if self.test_flag != 'test':
try:
file_name = self.create_file_dialog()
file_save_param = file_name.split('.csv')[0] + str(add_name)
# pressed cancel Tk_kinter
except TypeError:
file_name = os.path.join(self.path_to_main, 'temp.csv')
file_save_param = file_name.split('.csv')[0] + str(add_name)
# pressed cancel PyQt
except FileNotFoundError:
file_name = os.path.join(self.path_to_main, 'temp.csv')
file_save_param = file_name.split('.csv')[0] + str(add_name)
return file_name, file_save_param
elif self.test_flag == 'test':
return self.test_file_path, self.test_file_param_path
def save_header(self, filename, header = '', mode = 'w'):
if self.test_flag != 'test':
file_for_save = open(filename, mode)
np.savetxt(file_for_save, [], fmt='%.4e', delimiter=',', \
newline='\n', header=header, footer='', comments='# ', encoding=None)
file_for_save.close()
elif self.test_flag == 'test':
file_for_save = open(filename, mode)
file_for_save.close()
def save_data(self, filename, data, header = '', mode = 'w'):
if self.test_flag != 'test':
if len( data.shape ) == 2:
file_for_save = open(filename, mode)
np.savetxt(file_for_save, data, fmt='%.4e', delimiter=',', \
newline='\n', header=header, footer='', comments='# ', encoding=None)
file_for_save.close()
elif len( data.shape ) == 3:
for i in range( 0, int( data.shape[0] ) ):
if i == 0:
file_for_save_i = filename
file_for_save = open(file_for_save_i, mode)
np.savetxt(file_for_save, np.transpose( data[i] ), fmt='%.4e', delimiter=',', \
newline='\n', header=header, footer='', comments='# ', encoding=None)
file_for_save.close()
else:
file_for_save_i = filename.split('.csv')[0] + '_' + str(i) + '.csv'
file_for_save = open(file_for_save_i, mode)
np.savetxt(file_for_save, np.transpose( data[i] ), fmt='%.4e', delimiter=',', \
newline='\n', header=header, footer='', comments='# ', encoding=None)
file_for_save.close()
elif self.test_flag == 'test':
file_for_save = open(filename, mode)
file_for_save.close()
def file_dialog(self, directory = '', mode = 'Open'):
root = tkinter.Tk()
root.withdraw()
if mode == 'Open':
file_path = filedialog.askopenfilename(**dict(
initialdir = self.open_dir,
filetypes = [("CSV", "*.csv"), ("TXT", "*.txt"),\
("DAT", "*.dat"), ("all", "*.*")],
title = 'Select file to open')
)
elif mode == 'Save':
file_path = filedialog.asksaveasfilename(**dict(
initialdir = self.open_dir,
filetypes = [("CSV", "*.csv"), ("TXT", "*.txt"),\
("DAT", "*.dat"), ("all", "*.*")],
title = 'Select file to save')
)
return file_path
def main():
pass
if __name__ == '__main__':
main() | [
"numpy.meshgrid",
"os.getcwd",
"os.path.dirname",
"numpy.savetxt",
"numpy.transpose",
"numpy.genfromtxt",
"numpy.array",
"numpy.arange",
"numpy.array_split",
"configparser.ConfigParser",
"os.path.join",
"tkinter.Tk"
] | [((775, 820), 'os.path.join', 'os.path.join', (['self.path_to_main', '"""config.ini"""'], {}), "(self.path_to_main, 'config.ini')\n", (787, 820), False, 'import os\n'), ((837, 864), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (862, 864), False, 'import configparser\n'), ((9576, 9588), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (9586, 9588), False, 'import tkinter\n'), ((1122, 1154), 'numpy.array', 'np.array', (["['header1', 'header2']"], {}), "(['header1', 'header2'])\n", (1130, 1154), True, 'import numpy as np\n'), ((1184, 1202), 'numpy.arange', 'np.arange', (['(1000)', '(2)'], {}), '(1000, 2)\n', (1193, 1202), True, 'import numpy as np\n'), ((1235, 1278), 'numpy.meshgrid', 'np.meshgrid', (['self.test_data', 'self.test_data'], {}), '(self.test_data, self.test_data)\n', (1246, 1278), True, 'import numpy as np\n'), ((1920, 1938), 'numpy.transpose', 'np.transpose', (['temp'], {}), '(temp)\n', (1932, 1938), True, 'import numpy as np\n'), ((2555, 2607), 'numpy.genfromtxt', 'np.genfromtxt', (['file_path'], {'dtype': 'float', 'delimiter': '""","""'}), "(file_path, dtype=float, delimiter=',')\n", (2568, 2607), True, 'import numpy as np\n'), ((2632, 2650), 'numpy.transpose', 'np.transpose', (['temp'], {}), '(temp)\n', (2644, 2650), True, 'import numpy as np\n'), ((4280, 4332), 'numpy.genfromtxt', 'np.genfromtxt', (['file_path'], {'dtype': 'float', 'delimiter': '""","""'}), "(file_path, dtype=float, delimiter=',')\n", (4293, 4332), True, 'import numpy as np\n'), ((4984, 5016), 'numpy.array_split', 'np.array_split', (['temp', 'chunk_size'], {}), '(temp, chunk_size)\n', (4998, 5016), True, 'import numpy as np\n'), ((5659, 5711), 'numpy.genfromtxt', 'np.genfromtxt', (['file_path'], {'dtype': 'float', 'delimiter': '""","""'}), "(file_path, dtype=float, delimiter=',')\n", (5672, 5711), True, 'import numpy as np\n'), ((5735, 5767), 'numpy.array_split', 'np.array_split', (['temp', 'chunk_size'], {}), '(temp, chunk_size)\n', (5749, 5767), True, 'import numpy as np\n'), ((6101, 6229), 'numpy.savetxt', 'np.savetxt', (['file_path', 'data'], {'fmt': '"""%.4e"""', 'delimiter': '""","""', 'newline': '"""\n"""', 'header': 'header', 'footer': '""""""', 'comments': '"""#"""', 'encoding': 'None'}), "(file_path, data, fmt='%.4e', delimiter=',', newline='\\n', header\n =header, footer='', comments='#', encoding=None)\n", (6111, 6229), True, 'import numpy as np\n'), ((7646, 7776), 'numpy.savetxt', 'np.savetxt', (['file_for_save', '[]'], {'fmt': '"""%.4e"""', 'delimiter': '""","""', 'newline': '"""\n"""', 'header': 'header', 'footer': '""""""', 'comments': '"""# """', 'encoding': 'None'}), "(file_for_save, [], fmt='%.4e', delimiter=',', newline='\\n',\n header=header, footer='', comments='# ', encoding=None)\n", (7656, 7776), True, 'import numpy as np\n'), ((520, 545), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (535, 545), False, 'import os\n'), ((3004, 3022), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (3016, 3022), True, 'import numpy as np\n'), ((8183, 8315), 'numpy.savetxt', 'np.savetxt', (['file_for_save', 'data'], {'fmt': '"""%.4e"""', 'delimiter': '""","""', 'newline': '"""\n"""', 'header': 'header', 'footer': '""""""', 'comments': '"""# """', 'encoding': 'None'}), "(file_for_save, data, fmt='%.4e', delimiter=',', newline='\\n',\n header=header, footer='', comments='# ', encoding=None)\n", (8193, 8315), True, 'import numpy as np\n'), ((1342, 1353), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1351, 1353), False, 'import os\n'), ((1433, 1444), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1442, 1444), False, 'import os\n'), ((6990, 7033), 'os.path.join', 'os.path.join', (['self.path_to_main', '"""temp.csv"""'], {}), "(self.path_to_main, 'temp.csv')\n", (7002, 7033), False, 'import os\n'), ((7211, 7254), 'os.path.join', 'os.path.join', (['self.path_to_main', '"""temp.csv"""'], {}), "(self.path_to_main, 'temp.csv')\n", (7223, 7254), False, 'import os\n'), ((8697, 8718), 'numpy.transpose', 'np.transpose', (['data[i]'], {}), '(data[i])\n', (8709, 8718), True, 'import numpy as np\n'), ((9157, 9178), 'numpy.transpose', 'np.transpose', (['data[i]'], {}), '(data[i])\n', (9169, 9178), True, 'import numpy as np\n')] |
from matplotlib import pyplot as plt
import numpy as np
import math
def find_days_to_final_goal(start_value, end_value, iteration_amount):
"""
Returns the total amount of days it will take to reach the goal calculated from the start_value, end_value, and
iteration_amount.
:param start_value: The starting value of the goal.
:param end_value: The end value of the goal.
:param iteration_amount: the percentage that represents the intensity of the goal in the form 0.1
:return:
"""
return int(abs(math.ceil(math.log(end_value / start_value) / iteration_amount)))
def graph_goal_progress(goal_name, iteration_amount, iteration_towards_goal, start_value, end_value, current_value):
"""
Returns the name of the image created from the values input.
:param goal_name: the name of the goal used to create the filename.
:param iteration_amount: the percentage that the starting value will be increased by overtime.
:param iteration_towards_goal: the days of progress the user has made.
:param start_value: The value the user started there journey with.
:param end_value: The value the user wants to achieve.
:param current_value: The current value of the goal the user has done.
:return: Returns the goal_name with the suffix .png, also saves an image with the same style.
"""
total_days = find_days_to_final_goal(start_value, end_value, iteration_amount)
goal_graph = np.linspace(0,total_days, total_days)
goal_graph = goal_graph.astype(dtype=np.int64)
current_progress_graph = np.linspace(0,iteration_towards_goal, iteration_towards_goal).astype(dtype=np.int64)
ygoal_graph = start_value * np.exp(iteration_amount * goal_graph.copy())
ygoal_graph.astype(np.int64)
ycurrent_progress_graph = start_value * np.exp(iteration_amount * current_progress_graph)
plt.plot(goal_graph, ygoal_graph, color='black')
plt.plot(current_progress_graph, ycurrent_progress_graph, color='red')
plt.savefig(f'Images\{goal_name}.png')
plt.figure().clear()
return f'{goal_name}.png' | [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.exp",
"numpy.linspace",
"math.log",
"matplotlib.pyplot.savefig"
] | [((1448, 1486), 'numpy.linspace', 'np.linspace', (['(0)', 'total_days', 'total_days'], {}), '(0, total_days, total_days)\n', (1459, 1486), True, 'import numpy as np\n'), ((1859, 1907), 'matplotlib.pyplot.plot', 'plt.plot', (['goal_graph', 'ygoal_graph'], {'color': '"""black"""'}), "(goal_graph, ygoal_graph, color='black')\n", (1867, 1907), True, 'from matplotlib import pyplot as plt\n'), ((1912, 1982), 'matplotlib.pyplot.plot', 'plt.plot', (['current_progress_graph', 'ycurrent_progress_graph'], {'color': '"""red"""'}), "(current_progress_graph, ycurrent_progress_graph, color='red')\n", (1920, 1982), True, 'from matplotlib import pyplot as plt\n'), ((1987, 2026), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""Images\\\\{goal_name}.png"""'], {}), "(f'Images\\\\{goal_name}.png')\n", (1998, 2026), True, 'from matplotlib import pyplot as plt\n'), ((1805, 1854), 'numpy.exp', 'np.exp', (['(iteration_amount * current_progress_graph)'], {}), '(iteration_amount * current_progress_graph)\n', (1811, 1854), True, 'import numpy as np\n'), ((1566, 1628), 'numpy.linspace', 'np.linspace', (['(0)', 'iteration_towards_goal', 'iteration_towards_goal'], {}), '(0, iteration_towards_goal, iteration_towards_goal)\n', (1577, 1628), True, 'import numpy as np\n'), ((2030, 2042), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2040, 2042), True, 'from matplotlib import pyplot as plt\n'), ((543, 576), 'math.log', 'math.log', (['(end_value / start_value)'], {}), '(end_value / start_value)\n', (551, 576), False, 'import math\n')] |
# To add a new cell, type '#%%'
# To add a new markdown cell, type '#%% [markdown]'
#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
# ms-python.python added
import os
try:
os.chdir(os.path.join(os.getcwd(), 'Chapter3'))
print(os.getcwd())
except:
pass
#%% [markdown]
# # 第3章 k近邻法
#%% [markdown]
# 1.$k$近邻法是基本且简单的分类与回归方法。$k$近邻法的基本做法是:对给定的训练实例点和输入实例点,首先确定输入实例点的$k$个最近邻训练实例点,然后利用这$k$个训练实例点的类的多数来预测输入实例点的类。
#
# 2.$k$近邻模型对应于基于训练数据集对特征空间的一个划分。$k$近邻法中,当训练集、距离度量、$k$值及分类决策规则确定后,其结果唯一确定。
#
# 3.$k$近邻法三要素:距离度量、$k$值的选择和分类决策规则。常用的距离度量是欧氏距离及更一般的**pL**距离。$k$值小时,$k$近邻模型更复杂;$k$值大时,$k$近邻模型更简单。$k$值的选择反映了对近似误差与估计误差之间的权衡,通常由交叉验证选择最优的$k$。
#
# 常用的分类决策规则是多数表决,对应于经验风险最小化。
#
# 4.$k$近邻法的实现需要考虑如何快速搜索k个最近邻点。**kd**树是一种便于对k维空间中的数据进行快速检索的数据结构。kd树是二叉树,表示对$k$维空间的一个划分,其每个结点对应于$k$维空间划分中的一个超矩形区域。利用**kd**树可以省去对大部分数据点的搜索, 从而减少搜索的计算量。
#%% [markdown]
# ### 距离度量
#%% [markdown]
# 设特征空间$x$是$n$维实数向量空间 ,$x_{i}, x_{j} \in \mathcal{X}$,$x_{i}=\left(x_{i}^{(1)}, x_{i}^{(2)}, \cdots, x_{i}^{(n)}\right)^{\mathrm{T}}$,$x_{j}=\left(x_{j}^{(1)}, x_{j}^{(2)}, \cdots, x_{j}^{(n)}\right)^{\mathrm{T}}$
# ,则:$x_i$,$x_j$的$L_p$距离定义为:
#
#
# $L_{p}\left(x_{i}, x_{j}\right)=\left(\sum_{i=1}^{n}\left|x_{i}^{(i)}-x_{j}^{(l)}\right|^{p}\right)^{\frac{1}{p}}$
#
# - $p= 1$ 曼哈顿距离
# - $p= 2$ 欧氏距离
# - $p= inf$ 闵式距离minkowski_distance
#%%
import math
from itertools import combinations
#%%
def L(x, y, p=2):
if len(x) == len(y) and len(x) > 1:
sum = 0
for i in range(len(x)):
sum += math.pow(abs(x[i] - y[i]), p)
return math.pow(sum, 1 / p)
else:
return 0
#%% [markdown]
# ### 课本例3.1
#%%
x1 = [1, 1]
x2 = [5, 1]
x3 = [4, 4]
#%%
#考虑距离度量中p为1到4的情况
for i in range(1, 5):
r = {'最近点:{}'.format(c): L(x1, c, p=i) for c in [x2, x3]}
print(min(zip(r.values(), r.keys())))
#%% [markdown]
# # KNN实现
#%% [markdown]
# ## 1.导入必要的库
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from collections import Counter
#%% [markdown]
# ## 2.导入数据集
#%% [markdown]
# #### 这里使用的是鸢尾属植物数据集,该数据集测量了所有150个样本的4个特征,分别是:
# #### sepal length(花萼长度)、sepal width(花萼宽度)、petal length(花瓣长度)、petal width(花瓣宽度)
# #### 此实验中,我们只考察前两个特征
#%%
#将导入的数据设置为DataFrame格式,并设置其列名
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
#%%
df
#%%
# 绘制前两类数据(标签为0和1的数据)的散点图,(只考虑sepal length和sepal width这两个属性)
plt.scatter(df[:50]['sepal length'], df[:50]['sepal width'], label='0')
plt.scatter(df[50:100]['sepal length'], df[50:100]['sepal width'], label='1')
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.legend()
#%% [markdown]
# ## 3.数据集切分
#%%
# 只考虑数据的前两列属性以及最后一列的标签
data = np.array(df.iloc[:100, [0, 1, -1]])
X, y = data[:,:-1], data[:,-1]
# 从样本中,随机按照80%和20%的比例抽取训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
#%% [markdown]
# ## 4.实现KNN
#%%
class KNN:
def __init__(self, X_train, y_train, n_neighbors=3, p=2):
"""
parameter: n_neighbors 临近点个数,即k值
parameter: p 距离度量
"""
self.n = n_neighbors
self.p = p
self.X_train = X_train
self.y_train = y_train
# 预测未知类别的点X所属的类别
def predict(self, X):
# 先取出前n个点,分别计算X与这n个点的范数
knn_list = []
for i in range(self.n):
dist = np.linalg.norm(X - self.X_train[i], ord=self.p)
knn_list.append((dist, self.y_train[i]))
# 对于n个点后面的所有点,计算其与X的范数,如果值小于上面前n个点范数的最大值,则进行替换,最后得到所有点中,距离X最近的n个点
for i in range(self.n, len(self.X_train)):
max_index = knn_list.index(max(knn_list, key=lambda x: x[0]))
dist = np.linalg.norm(X - self.X_train[i], ord=self.p)
if knn_list[max_index][0] > dist:
knn_list[max_index] = (dist, self.y_train[i])
# 分类决策,n个点中,多数点所属的类别即为预测所得的X所属类别
knn = [k[-1] for k in knn_list]
count_pairs = Counter(knn)
max_count = sorted(count_pairs.items(), key=lambda x: x[1])[-1][0]
return max_count
# 得分函数,用测试数据集进行测试,得到此方法的预测准确度
def score(self, X_test, y_test):
right_count = 0
n = 10
for X, y in zip(X_test, y_test):
label = self.predict(X)
if label == y:
right_count += 1
return right_count / len(X_test)
#%% [markdown]
# ## 5.创建KNN实例
#%%
clf = KNN(X_train, y_train)
#%% [markdown]
# ## 6.模型准确率
#%%
print('准确率:{:.2%}'.format(clf.score(X_test, y_test)))
#%% [markdown]
# ## 7.使用新的测试点进行预测
#%%
test_point = [6.0, 3.0]
print('测试点X所属类别:{}'.format(clf.predict(test_point)))
#%%
plt.scatter(df[:50]['sepal length'], df[:50]['sepal width'], label='0')
plt.scatter(df[50:100]['sepal length'], df[50:100]['sepal width'], label='1')
plt.plot(test_point[0], test_point[1], 'bo', label='test_point')
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.legend()
#%% [markdown]
# ### scikit-learn实例
#%%
from sklearn.neighbors import KNeighborsClassifier
#%%
clf_sk = KNeighborsClassifier()
clf_sk.fit(X_train, y_train)
#%%
clf_sk.score(X_test, y_test)
#%% [markdown]
# ### sklearn.neighbors.KNeighborsClassifier
#
# - n_neighbors: 临近点个数
# - p: 距离度量
# - algorithm: 近邻算法,可选{'auto', 'ball_tree', 'kd_tree', 'brute'}
# - weights: 确定近邻的权重
#%% [markdown]
# # kd树
#%% [markdown]
# k近邻法最简单的实现方法是线性扫描,这时要计算输入实例与每一个训练实例的距离,当训练集很大时,计算非常耗时,为了提高k近邻搜索的效率,可以考虑使用特殊的结构存储训练数据,以减少计算距离的次数,kd树就是其中的一种方法
# **kd**树是一种对k维空间中的实例点进行存储以便对其进行快速检索的树形数据结构。
#
# **kd**树是二叉树,表示对$k$维空间的一个划分(partition)。构造**kd**树相当于不断地用垂直于坐标轴的超平面将$k$维空间切分,构成一系列的k维超矩形区域。kd树的每个结点对应于一个$k$维超矩形区域。
#
# 构造**kd**树的方法如下:
#
# 构造根结点,使根结点对应于$k$维空间中包含所有实例点的超矩形区域;通过下面的递归方法,不断地对$k$维空间进行切分,生成子结点。在超矩形区域(结点)上选择一个坐标轴和在此坐标轴上的一个切分点,确定一个超平面,这个超平面通过选定的切分点并垂直于选定的坐标轴,将当前超矩形区域切分为左右两个子区域
# (子结点);这时,实例被分到两个子区域。这个过程直到子区域内没有实例时终止(终止时的结点为叶结点)。在此过程中,将实例保存在相应的结点上。
#
# 通常,依次选择坐标轴对空间切分,选择训练实例点在选定坐标轴上的中位数
# (median)为切分点,这样得到的**kd**树是平衡的。注意,平衡的**kd**树搜索时的效率未必是最优的。
#
#%% [markdown]
# ### 构造平衡kd树算法
# 输入:$k$维空间数据集$T=\{x_1,x_2,…,x_N\}$,
#
# 其中$x_{i}=\left(x_{i}^{(1)}, x_{i}^{(2)}, \cdots, x_{i}^{(k)}\right)^{\mathrm{T}}$ ,$i=1,2,…,N$;
#
# 输出:**kd**树。
#
# (1)开始:构造根结点,根结点对应于包含$T$的$k$维空间的超矩形区域。
#
# 选择$x^{(1)}$为坐标轴,以T中所有实例的$x^{(1)}$坐标的中位数为切分点,将根结点对应的超矩形区域切分为两个子区域。切分由通过切分点并与坐标轴$x^{(1)}$垂直的超平面实现。
#
# 由根结点生成深度为1的左、右子结点:左子结点对应坐标$x^{(1)}$小于切分点的子区域, 右子结点对应于坐标$x^{(1)}$大于切分点的子区域。
#
# 将落在切分超平面上的实例点保存在根结点。
#
# (2)重复:对深度为$j$的结点,选择$x^{(1)}$为切分的坐标轴,$l=j(modk)+1$,以该结点的区域中所有实例的$x^{(1)}$坐标的中位数为切分点,将该结点对应的超矩形区域切分为两个子区域。切分由通过切分点并与坐标轴$x^{(1)}$垂直的超平面实现。
#
# 由该结点生成深度为$j+1$的左、右子结点:左子结点对应坐标$x^{(1)}$小于切分点的子区域,右子结点对应坐标$x^{(1)}$大于切分点的子区域。
#
# 将落在切分超平面上的实例点保存在该结点。
#
# (3)直到两个子区域没有实例存在时停止。从而形成**kd**树的区域划分。
#%% [markdown]
# ## 1.构建kd树
#%%
# kd-tree每个结点中主要包含的数据结构如下
class KdNode(object):
def __init__(self, dom_elt, split, left, right):
self.dom_elt = dom_elt # k维向量节点(k维空间中的一个样本点)
self.split = split # 整数(进行分割维度的序号)
self.left = left # 该结点分割超平面左子空间构成的kd-tree
self.right = right # 该结点分割超平面右子空间构成的kd-tree
# 构建kd树
class KdTree(object):
def __init__(self, data):
k = len(data[0]) # 数据维度
# 创建结点
def CreateNode(split, data_set): # 按第split维划分数据集data_set创建KdNode
if not data_set: # 数据集为空
return None
# key参数的值为一个函数,此函数只有一个参数且返回一个值用来进行比较
# operator模块提供的itemgetter函数用于获取对象的哪些维的数据,参数为需要获取的数据在对象中的序号
# data_set.sort(key=itemgetter(split)) # 按要进行分割的那一维数据排序
data_set.sort(key=lambda x: x[split]) # 将结点按照第split维进行排序
split_pos = len(data_set) // 2 # //为Python中的整数除法
median = data_set[split_pos] # 中位数分割点
split_next = (split + 1) % k # 下一次进行分割的维度
# 递归的创建kd树
return KdNode(
median,
split,
CreateNode(split_next, data_set[:split_pos]), # 创建左子树
CreateNode(split_next, data_set[split_pos + 1:])) # 创建右子树
self.root = CreateNode(0, data) # 从第0维分量开始构建kd树,返回根节点
# KDTree的前序遍历
def preOrder(root):
print(root.dom_elt, "split = ", root.split)
print("*" * 50)
if root.left: # 节点不为空
print(root.dom_elt, "的下一个左结点是", root.left.dom_elt)
preOrder(root.left)
else:
print(root.dom_elt, "的下一个左结点是叶结点。")
if root.right:
print(root.dom_elt, "的下一个右结点是", root.right.dom_elt)
preOrder(root.right)
else:
print(root.dom_elt, "的下一个右结点是叶结点。")
#%% [markdown]
# ### 例3.2
#%%
import pandas as pd
import numpy as np
# data = [[2, 3],[2, 1],[2, 4],[100, 0]]
df = pd.read_csv("Chapter3/datasets/test1.csv", header=None)
data = np.array(df)
data = data.tolist()
#%%
kd = KdTree(data)
preOrder(kd.root)
#%% [markdown]
# ## 2.搜索kd树
#%%
# 对构建好的kd树进行搜索,寻找与目标点最近的样本点:
from math import sqrt
from collections import namedtuple
# 定义一个namedtuple,分别存放最近坐标点、最近距离和访问过的节点数
result = namedtuple("Result_tuple","nearest_point nearest_dist nodes_visited")
# 搜索kd树,找出与point距离最近的点
def find_nearest(tree, point):
k = len(point) # 数据维度
def travel(kd_node, target, max_dist):
if kd_node is None:
return result([0] * k, float("inf"),0) # python中用float("inf")和float("-inf")表示正负无穷
nodes_visited = 1
s = kd_node.split # 进行分割的维度
pivot = kd_node.dom_elt # 结点
#-----------------------------------------------------------------------------------
#寻找point所属区域对应的叶结点
if target[s] <= pivot[s]: # 如果目标点第s维小于分割轴的对应值(目标离左子树更近)
nearer_node = kd_node.left # 下一个访问节点为左子树根节点
further_node = kd_node.right # 同时记录下右子树
else: # 目标离右子树更近
nearer_node = kd_node.right # 下一个访问节点为右子树根节点
further_node = kd_node.left
temp1 = travel(nearer_node, target, max_dist) # 进行遍历找到包含目标点的区域
print("temp1 = ", temp1)
#-------------------------------------------------------------------------------------
#以此叶节点作为“当前最近点”
nearest = temp1.nearest_point
dist = temp1.nearest_dist # 更新最近距离
print("nearest: {}".format(nearest))
print("dist: {}".format(dist))
nodes_visited += temp1.nodes_visited
print("nodes_visited: {}".format(nodes_visited))
if dist < max_dist:
max_dist = dist # 最近点将在以目标点为球心,max_dist为半径的超球体内
temp_dist = abs(pivot[s] - target[s]) # 第s维上目标点与分割超平面的距离
print("超球体半径:{}".format(temp_dist))
if max_dist < temp_dist: # 判断超球体是否与超平面相交
return result(nearest, dist, nodes_visited) # 不相交则可以直接返回,不用继续判断
#----------------------------------------------------------------------
# 计算目标点与分割点的欧氏距离
temp_dist = sqrt(sum((p1 - p2)**2 for p1, p2 in zip(pivot, target)))
print("temp_dist: {}".format(temp_dist))
if temp_dist < dist: # 如果“更近”
nearest = pivot # 更新最近点
dist = temp_dist # 更新最近距离
max_dist = dist # 更新超球体半径
# 检查另一个子结点对应的区域是否有更近的点
temp2 = travel(further_node, target, max_dist)
print("temp2: {}".format(temp2))
nodes_visited += temp2.nodes_visited
if temp2.nearest_dist < dist: # 如果另一个子结点内存在更近距离
nearest = temp2.nearest_point # 更新最近点
dist = temp2.nearest_dist # 更新最近距离
return result(nearest, dist, nodes_visited)
return travel(tree.root, point, float("inf")) # 从根节点开始递归
#%%
ret = find_nearest(kd, [3, 1, 4])
print (ret)
#%%
from time import clock
from random import random
# 产生一个k维随机向量,每维分量值在0~1之间
def random_point(k):
return [random() for _ in range(k)]
# 产生n个k维随机向量
def random_points(k, n):
return [random_point(k) for _ in range(n)]
#%%
N = 400000
t0 = clock()
kd2 = KdTree(random_points(3, N)) # 构建包含四十万个3维空间样本点的kd树
ret2 = find_nearest(kd2, [0.1,0.5,0.8]) # 四十万个样本点中寻找离目标最近的点
t1 = clock()
print ("time: ",t1-t0, "s")
print (ret2)
#%%
| [
"pandas.DataFrame",
"sklearn.datasets.load_iris",
"matplotlib.pyplot.plot",
"math.pow",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"sklearn.model_selection.train_test_split",
"os.getcwd",
"collections.Counter",
"time.clock",
"random.random",
"sklearn.neighbor... | [((2392, 2403), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (2401, 2403), False, 'from sklearn.datasets import load_iris\n'), ((2409, 2460), 'pandas.DataFrame', 'pd.DataFrame', (['iris.data'], {'columns': 'iris.feature_names'}), '(iris.data, columns=iris.feature_names)\n', (2421, 2460), True, 'import pandas as pd\n'), ((2647, 2718), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df[:50]['sepal length']", "df[:50]['sepal width']"], {'label': '"""0"""'}), "(df[:50]['sepal length'], df[:50]['sepal width'], label='0')\n", (2658, 2718), True, 'import matplotlib.pyplot as plt\n'), ((2719, 2796), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df[50:100]['sepal length']", "df[50:100]['sepal width']"], {'label': '"""1"""'}), "(df[50:100]['sepal length'], df[50:100]['sepal width'], label='1')\n", (2730, 2796), True, 'import matplotlib.pyplot as plt\n'), ((2797, 2823), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""sepal length"""'], {}), "('sepal length')\n", (2807, 2823), True, 'import matplotlib.pyplot as plt\n'), ((2824, 2849), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""sepal width"""'], {}), "('sepal width')\n", (2834, 2849), True, 'import matplotlib.pyplot as plt\n'), ((2850, 2862), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2860, 2862), True, 'import matplotlib.pyplot as plt\n'), ((2927, 2962), 'numpy.array', 'np.array', (['df.iloc[:100, [0, 1, -1]]'], {}), '(df.iloc[:100, [0, 1, -1]])\n', (2935, 2962), True, 'import numpy as np\n'), ((3061, 3098), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (3077, 3098), False, 'from sklearn.model_selection import train_test_split\n'), ((4817, 4888), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df[:50]['sepal length']", "df[:50]['sepal width']"], {'label': '"""0"""'}), "(df[:50]['sepal length'], df[:50]['sepal width'], label='0')\n", (4828, 4888), True, 'import matplotlib.pyplot as plt\n'), ((4889, 4966), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df[50:100]['sepal length']", "df[50:100]['sepal width']"], {'label': '"""1"""'}), "(df[50:100]['sepal length'], df[50:100]['sepal width'], label='1')\n", (4900, 4966), True, 'import matplotlib.pyplot as plt\n'), ((4967, 5031), 'matplotlib.pyplot.plot', 'plt.plot', (['test_point[0]', 'test_point[1]', '"""bo"""'], {'label': '"""test_point"""'}), "(test_point[0], test_point[1], 'bo', label='test_point')\n", (4975, 5031), True, 'import matplotlib.pyplot as plt\n'), ((5032, 5058), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""sepal length"""'], {}), "('sepal length')\n", (5042, 5058), True, 'import matplotlib.pyplot as plt\n'), ((5059, 5084), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""sepal width"""'], {}), "('sepal width')\n", (5069, 5084), True, 'import matplotlib.pyplot as plt\n'), ((5085, 5097), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5095, 5097), True, 'import matplotlib.pyplot as plt\n'), ((5206, 5228), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (5226, 5228), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((8789, 8844), 'pandas.read_csv', 'pd.read_csv', (['"""Chapter3/datasets/test1.csv"""'], {'header': 'None'}), "('Chapter3/datasets/test1.csv', header=None)\n", (8800, 8844), True, 'import pandas as pd\n'), ((8852, 8864), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (8860, 8864), True, 'import numpy as np\n'), ((9097, 9169), 'collections.namedtuple', 'namedtuple', (['"""Result_tuple"""', '"""nearest_point nearest_dist nodes_visited"""'], {}), "('Result_tuple', 'nearest_point nearest_dist nodes_visited')\n", (9107, 9169), False, 'from collections import namedtuple\n'), ((12013, 12020), 'time.clock', 'clock', ([], {}), '()\n', (12018, 12020), False, 'from time import clock\n'), ((12158, 12165), 'time.clock', 'clock', ([], {}), '()\n', (12163, 12165), False, 'from time import clock\n'), ((337, 348), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (346, 348), False, 'import os\n'), ((1632, 1652), 'math.pow', 'math.pow', (['sum', '(1 / p)'], {}), '(sum, 1 / p)\n', (1640, 1652), False, 'import math\n'), ((4140, 4152), 'collections.Counter', 'Counter', (['knn'], {}), '(knn)\n', (4147, 4152), False, 'from collections import Counter\n'), ((11869, 11877), 'random.random', 'random', ([], {}), '()\n', (11875, 11877), False, 'from random import random\n'), ((304, 315), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (313, 315), False, 'import os\n'), ((3560, 3607), 'numpy.linalg.norm', 'np.linalg.norm', (['(X - self.X_train[i])'], {'ord': 'self.p'}), '(X - self.X_train[i], ord=self.p)\n', (3574, 3607), True, 'import numpy as np\n'), ((3880, 3927), 'numpy.linalg.norm', 'np.linalg.norm', (['(X - self.X_train[i])'], {'ord': 'self.p'}), '(X - self.X_train[i], ord=self.p)\n', (3894, 3927), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.