code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import time
import numpy as np
import multiprocessing as mp
import ctypes
from rlpyt.samplers.base import BaseSampler
from rlpyt.samplers.utils import build_samples_buffer, build_step_buffer
from rlpyt.samplers.parallel_worker import sampling_process
from rlpyt.samplers.gpu.collectors import EvalCollector
from rlpyt.utils.logging import logger
from rlpyt.agents.base import AgentInputs
from rlpyt.utils.collections import AttrDict
EVAL_TRAJ_CHECK = 0.2 # Seconds.
class AsyncGpuSampler(BaseSampler):
###########################################################################
# Master runner methods.
###########################################################################
def master_runner_initialize(self, agent, bootstrap_value=False,
traj_info_kwargs=None):
# Construct an example of each kind of data that needs to be stored.
env = self.EnvCls(**self.env_kwargs)
agent.initialize(env.spaces, share_memory=True) # Actual agent initialization, keep.
samples_pyt, samples_np, examples = build_samples_buffer(agent, env,
self.batch_spec, bootstrap_value, agent_shared=True, env_shared=True,
subprocess=False) # Would like subprocess=True, but might hang?
_, samples_np2, _ = build_samples_buffer(agent, env, self.batch_spec,
bootstrap_value, agent_shared=True, env_shared=True, subprocess=False)
env.close()
del env
if traj_info_kwargs:
for k, v in traj_info_kwargs.items():
setattr(self.TrajInfoCls, "_" + k, v)
self.double_buffer = double_buffer = (samples_np, samples_np2)
self.examples = examples
return double_buffer, examples
###########################################################################
# Sampler runner methods (forked).
###########################################################################
def sample_runner_initialize(self, affinity):
n_server = len(affinity)
n_worker = sum(len(aff["workers_cpus"]) for aff in affinity)
n_envs_list = [self.batch_spec.B // n_worker] * n_worker
if not self.batch_spec.B % n_worker == 0:
logger.log("WARNING: unequal number of envs per process, from "
f"batch_B {self.batch_spec.B} and n_parallel {n_worker} "
"(possible suboptimal speed).")
for b in range(self.batch_spec.B % n_worker):
n_envs_list[b] += 1
if self.eval_n_envs > 0:
eval_n_envs_per = max(1, self.eval_n_envs // len(n_envs_list))
eval_n_envs = eval_n_envs_per * n_worker
logger.log(f"Total parallel evaluation envs: {eval_n_envs}.")
self.eval_max_T = 1 + int(self.eval_max_steps // eval_n_envs)
self.eval_n_envs_per = eval_n_envs_per
else:
self.eval_n_envs_per = 0
self.eval_max_T = 0
ctrl = AttrDict(
quit=mp.RawValue(ctypes.c_bool, False),
barrier_in=mp.Barrier(n_server + n_worker + 1),
barrier_out=mp.Barrier(n_server + n_worker + 1),
do_eval=mp.RawValue(ctypes.c_bool, False),
itr=mp.RawValue(ctypes.c_long, 0),
)
traj_infos_queue = mp.Queue()
common_kwargs = dict(
ctrl=ctrl,
traj_infos_queue=traj_infos_queue,
)
servers_kwargs = assemble_servers_kwargs(affinity, n_envs_list,
self.seed, self.double_buffer)
servers = [mp.Process(target=self.action_server_process,
kwargs=s_kwargs.update(**common_kwargs))
for s_kwargs in servers_kwargs]
for s in servers:
s.start()
self.servers = servers
self.ctrl = ctrl
self.traj_infos_queue = traj_infos_queue
def obtain_samples(self, itr):
self.ctrl.barrier_in.wait()
# Sampling in sub-processes here.
self.ctrl.barrier_out.wait()
traj_infos = list()
while self.traj_infos_queue.qsize():
traj_infos.append(self.traj_infos_queue.get())
return traj_infos
def evaluate_agent(self, itr):
self.ctrl.do_eval = True
self.sync.stop_eval.value = False
self.ctrl.barrier_in.wait()
traj_infos = list()
if self.eval_max_trajectories is not None:
while True:
time.sleep(EVAL_TRAJ_CHECK)
while self.traj_infos_queue.qsize():
traj_infos.append(self.traj_infos_queue.get())
if len(traj_infos) >= self.eval_max_trajectories:
self.sync.stop_eval.value = True
logger.log("Evaluation reached max num trajectories "
f"({self.eval_max_trajectories}).")
break # Stop possibly before workers reach max_T.
if self.ctrl.barrier_out.parties - self.ctrl.barrier_out.n_waiting == 1:
logger.log("Evaluation reached max num time steps "
f"({self.eval_max_T}).")
break # Workers reached max_T.
self.ctrl.barrier_out.wait()
while self.traj_infos_queue.qsize():
traj_infos.append(self.traj_infos_queue.get())
self.ctrl.do_eval.value = False
return traj_infos
def shutdown(self):
self.ctrl.quit.value = True
self.ctrl.barrier_in.wait()
for s in self.servers:
s.join()
###########################################################################
# Methods in forked action server process.
###########################################################################
def action_server_process(self, double_buffer_slice, ctrl, traj_infos_queue,
affinity, seed, n_envs_list):
"""Runs in forked process, inherits from original process, so can easily
pass args to env worker processes, forked from here."""
self.ctrl = ctrl
self.launch_workers(double_buffer_slice, traj_infos_queue, affinity,
seed, n_envs_list)
self.agent.initialize_cuda(cuda_idx=affinity["cuda_idx"], ddp=False)
while True:
self.ctrl.barrier_in.wait()
if self.ctrl.quit.value:
break
self.agent.recv_shared_memory()
if self.ctrl.do_eval.value:
self.agent.eval_mode(self.ctrl.itr.value)
self.serve_actions_evaluation()
else:
self.agent.sample_mode(self.ctrl.itr.value)
self.serve_actions()
self.ctrl.barrier_out.wait()
self.shutdown_workers()
def serve_actions(self):
step_blockers, act_waiters = self.sync.step_blockers, self.sync.act_waiters
step_np, step_pyt = self.step_buffer_np, self.step_buffer_pyt
agent_inputs = AgentInputs(step_pyt.observation, step_pyt.action,
step_pyt.reward) # Fixed buffer objects.
for t in range(self.batch_spec.T):
for b in step_blockers:
b.acquire() # Workers written obs and rew, first prev_act.
if self.mid_batch_reset and np.any(step_np.done):
for b_reset in np.where(step_np.done)[0]:
step_np.action[b_reset] = 0 # Null prev_action into agent.
step_np.reward[b_reset] = 0 # Null prev_reward into agent.
self.agent.reset_one(idx=b_reset)
action, agent_info = self.agent.step(*agent_inputs)
step_np.action[:] = action # Worker applies to env.
step_np.agent_info[:] = agent_info # Worker sends to traj_info.
for w in act_waiters:
w.release() # Signal to worker.
for b in step_blockers:
b.acquire()
if "bootstrap_value" in self.samples_np.agent:
self.samples_np.agent.bootstrap_value[:] = self.agent.value(
*agent_inputs)
if np.any(step_np.done): # Reset at end of batch; ready for next.
for b_reset in np.where(step_np.done)[0]:
step_np.action[b_reset] = 0 # Null prev_action into agent.
step_np.reward[b_reset] = 0 # Null prev_reward into agent.
self.agent.reset_one(idx=b_reset)
# step_np.done[:] = False # Worker resets at start of next.
def serve_actions_evaluation(self, itr):
step_blockers, act_waiters = self.sync.step_blockers, self.sync.act_waiters
step_np, step_pyt = self.eval_step_buffer_np, self.eval_step_buffer_pyt
self.agent.reset()
agent_inputs = AgentInputs(step_pyt.observation, step_pyt.action,
step_pyt.reward) # Fixed buffer objects.
for t in range(self.eval_max_T):
for b in step_blockers:
b.acquire()
for b_reset in np.where(step_np.done)[0]:
step_np.action[b_reset] = 0 # Null prev_action.
step_np.reward[b_reset] = 0 # Null prev_reward.
self.agent.reset_one(idx=b_reset)
action, agent_info = self.agent.step(*agent_inputs)
step_np.action[:] = action
step_np.agent_info[:] = agent_info
for w in act_waiters:
w.release()
if self.sync.stop_eval.value:
break # TODO: Double-check where this goes relative to semas.
for b in step_blockers:
b.acquire() # Workers always do extra release; drain it.
def launch_workers(self, double_buffer, traj_infos_queue, affinity,
seed, n_envs_list, eval_n_envs_per):
n_worker = len(affinity["workers_cpus"])
sync = AttrDict(
step_blockers=[mp.Semaphore(0) for _ in range(n_worker)],
act_waiters=[mp.Semaphore(0) for _ in range(n_worker)],
stop_eval=mp.RawValue(ctypes.c_bool, False),
)
step_buffer_pyt, step_buffer_np = build_step_buffer(self.examples,
sum(n_envs_list))
if self.eval_n_envs_per > 0:
eval_n_envs = self.eval_n_envs_per * n_worker
eval_step_buffer_pyt, eval_step_buffer_np = build_step_buffer(
self.examples, eval_n_envs)
self.eval_step_buffer_pyt = eval_step_buffer_pyt
self.eval_step_buffer_np = eval_step_buffer_np
else:
eval_step_buffer_np = None
common_kwargs = dict(
EnvCls=self.EnvCls,
env_kwargs=self.env_kwargs,
agent=None,
batch_T=self.batch_spec.T,
CollectorCls=self.CollectorCls,
TrajInfoCls=self.TrajInfoCls,
traj_infos_queue=traj_infos_queue,
ctrl=self.ctrl,
max_decorrelation_steps=self.max_decorrelation_steps,
eval_n_envs=self.eval_n_envs_per,
eval_CollectorCls=self.eval_CollectorCls or EvalCollector,
eval_env_kwargs=self.eval_env_kwargs,
eval_max_T=self.eval_max_T,
)
workers_kwargs = assemble_workers_kwargs(affinity, seed, double_buffer,
n_envs_list, step_buffer_np, sync, self.eval_n_envs_per,
eval_step_buffer_np)
workers = [mp.Process(target=sampling_process,
kwargs=dict(common_kwargs=common_kwargs, worker_kwargs=w_kwargs))
for w_kwargs in workers_kwargs]
for w in workers:
w.start()
self.workers = workers
self.step_buffer_pyt = step_buffer_pyt
self.step_buffer_np = step_buffer_np
self.sync = sync
self.mid_batch_reset = self.CollectorCls.mid_batch_reset
def shutdown_workers(self):
for w in self.workers:
w.join() # Already signaled by central master.
def assemble_servers_kwargs(double_buffer, affinity, n_envs_list, seed):
servers_kwargs = list()
i_env = 0
i_worker = 0
for rank in range(len(affinity)):
n_worker = len(affinity["workers_cpus"])
n_env = sum(n_envs_list[i_worker:i_worker + n_worker])
slice_B = slice(i_env, i_env + n_env)
server_kwargs = dict(
double_buffer_slice=tuple(buf[:, slice_B] for buf in double_buffer),
affinity=affinity[rank],
n_envs_list=n_envs_list[i_worker:i_worker + n_worker],
seed=seed + i_worker,
)
servers_kwargs.append(server_kwargs)
i_worker += n_worker
i_env += n_env
return servers_kwargs
def assemble_workers_kwargs(affinity, seed, double_buffer, n_envs_list,
step_buffer_np, sync, eval_n_envs, eval_step_buffer_np):
workers_kwargs = list()
i_env = 0
for rank in range(len(affinity["workers_cpus"])):
n_envs = n_envs_list[rank]
slice_B = slice(i_env, i_env + n_envs)
w_sync = AttrDict(
step_blocker=sync.step_blockers[rank],
act_waiter=sync.act_waiters[rank],
stop_eval=sync.stop_eval,
)
worker_kwargs = dict(
rank=rank,
seed=seed + rank,
cpus=affinity["workers_cpus"][rank],
n_envs=n_envs,
samples_np=tuple(buf[:, slice_B] for buf in double_buffer),
step_buffer_np=step_buffer_np[slice_B],
sync=w_sync,
)
i_env += n_envs
if eval_n_envs > 0:
eval_slice_B = slice(rank * eval_n_envs, (rank + 1) * eval_n_envs)
worker_kwargs["eval_step_buffer_np"] = eval_step_buffer_np[eval_slice_B]
workers_kwargs.append(worker_kwargs)
return workers_kwargs
| [
"rlpyt.utils.collections.AttrDict",
"rlpyt.samplers.utils.build_samples_buffer",
"numpy.where",
"numpy.any",
"rlpyt.utils.logging.logger.log",
"multiprocessing.RawValue",
"time.sleep",
"multiprocessing.Barrier",
"multiprocessing.Semaphore",
"rlpyt.agents.base.AgentInputs",
"multiprocessing.Queue... | [((1065, 1189), 'rlpyt.samplers.utils.build_samples_buffer', 'build_samples_buffer', (['agent', 'env', 'self.batch_spec', 'bootstrap_value'], {'agent_shared': '(True)', 'env_shared': '(True)', 'subprocess': '(False)'}), '(agent, env, self.batch_spec, bootstrap_value,\n agent_shared=True, env_shared=True, subprocess=False)\n', (1085, 1189), False, 'from rlpyt.samplers.utils import build_samples_buffer, build_step_buffer\n'), ((1285, 1409), 'rlpyt.samplers.utils.build_samples_buffer', 'build_samples_buffer', (['agent', 'env', 'self.batch_spec', 'bootstrap_value'], {'agent_shared': '(True)', 'env_shared': '(True)', 'subprocess': '(False)'}), '(agent, env, self.batch_spec, bootstrap_value,\n agent_shared=True, env_shared=True, subprocess=False)\n', (1305, 1409), False, 'from rlpyt.samplers.utils import build_samples_buffer, build_step_buffer\n'), ((3272, 3282), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (3280, 3282), True, 'import multiprocessing as mp\n'), ((6885, 6952), 'rlpyt.agents.base.AgentInputs', 'AgentInputs', (['step_pyt.observation', 'step_pyt.action', 'step_pyt.reward'], {}), '(step_pyt.observation, step_pyt.action, step_pyt.reward)\n', (6896, 6952), False, 'from rlpyt.agents.base import AgentInputs\n'), ((7996, 8016), 'numpy.any', 'np.any', (['step_np.done'], {}), '(step_np.done)\n', (8002, 8016), True, 'import numpy as np\n'), ((8649, 8716), 'rlpyt.agents.base.AgentInputs', 'AgentInputs', (['step_pyt.observation', 'step_pyt.action', 'step_pyt.reward'], {}), '(step_pyt.observation, step_pyt.action, step_pyt.reward)\n', (8660, 8716), False, 'from rlpyt.agents.base import AgentInputs\n'), ((12829, 12942), 'rlpyt.utils.collections.AttrDict', 'AttrDict', ([], {'step_blocker': 'sync.step_blockers[rank]', 'act_waiter': 'sync.act_waiters[rank]', 'stop_eval': 'sync.stop_eval'}), '(step_blocker=sync.step_blockers[rank], act_waiter=sync.act_waiters\n [rank], stop_eval=sync.stop_eval)\n', (12837, 12942), False, 'from rlpyt.utils.collections import AttrDict\n'), ((2210, 2367), 'rlpyt.utils.logging.logger.log', 'logger.log', (['f"""WARNING: unequal number of envs per process, from batch_B {self.batch_spec.B} and n_parallel {n_worker} (possible suboptimal speed)."""'], {}), "(\n f'WARNING: unequal number of envs per process, from batch_B {self.batch_spec.B} and n_parallel {n_worker} (possible suboptimal speed).'\n )\n", (2220, 2367), False, 'from rlpyt.utils.logging import logger\n'), ((2664, 2725), 'rlpyt.utils.logging.logger.log', 'logger.log', (['f"""Total parallel evaluation envs: {eval_n_envs}."""'], {}), "(f'Total parallel evaluation envs: {eval_n_envs}.')\n", (2674, 2725), False, 'from rlpyt.utils.logging import logger\n'), ((10187, 10232), 'rlpyt.samplers.utils.build_step_buffer', 'build_step_buffer', (['self.examples', 'eval_n_envs'], {}), '(self.examples, eval_n_envs)\n', (10204, 10232), False, 'from rlpyt.samplers.utils import build_samples_buffer, build_step_buffer\n'), ((2977, 3010), 'multiprocessing.RawValue', 'mp.RawValue', (['ctypes.c_bool', '(False)'], {}), '(ctypes.c_bool, False)\n', (2988, 3010), True, 'import multiprocessing as mp\n'), ((3035, 3070), 'multiprocessing.Barrier', 'mp.Barrier', (['(n_server + n_worker + 1)'], {}), '(n_server + n_worker + 1)\n', (3045, 3070), True, 'import multiprocessing as mp\n'), ((3096, 3131), 'multiprocessing.Barrier', 'mp.Barrier', (['(n_server + n_worker + 1)'], {}), '(n_server + n_worker + 1)\n', (3106, 3131), True, 'import multiprocessing as mp\n'), ((3153, 3186), 'multiprocessing.RawValue', 'mp.RawValue', (['ctypes.c_bool', '(False)'], {}), '(ctypes.c_bool, False)\n', (3164, 3186), True, 'import multiprocessing as mp\n'), ((3204, 3233), 'multiprocessing.RawValue', 'mp.RawValue', (['ctypes.c_long', '(0)'], {}), '(ctypes.c_long, 0)\n', (3215, 3233), True, 'import multiprocessing as mp\n'), ((4403, 4430), 'time.sleep', 'time.sleep', (['EVAL_TRAJ_CHECK'], {}), '(EVAL_TRAJ_CHECK)\n', (4413, 4430), False, 'import time\n'), ((7186, 7206), 'numpy.any', 'np.any', (['step_np.done'], {}), '(step_np.done)\n', (7192, 7206), True, 'import numpy as np\n'), ((8087, 8109), 'numpy.where', 'np.where', (['step_np.done'], {}), '(step_np.done)\n', (8095, 8109), True, 'import numpy as np\n'), ((8887, 8909), 'numpy.where', 'np.where', (['step_np.done'], {}), '(step_np.done)\n', (8895, 8909), True, 'import numpy as np\n'), ((9885, 9918), 'multiprocessing.RawValue', 'mp.RawValue', (['ctypes.c_bool', '(False)'], {}), '(ctypes.c_bool, False)\n', (9896, 9918), True, 'import multiprocessing as mp\n'), ((4690, 4781), 'rlpyt.utils.logging.logger.log', 'logger.log', (['f"""Evaluation reached max num trajectories ({self.eval_max_trajectories})."""'], {}), "(\n f'Evaluation reached max num trajectories ({self.eval_max_trajectories}).')\n", (4700, 4781), False, 'from rlpyt.utils.logging import logger\n'), ((4984, 5057), 'rlpyt.utils.logging.logger.log', 'logger.log', (['f"""Evaluation reached max num time steps ({self.eval_max_T})."""'], {}), "(f'Evaluation reached max num time steps ({self.eval_max_T}).')\n", (4994, 5057), False, 'from rlpyt.utils.logging import logger\n'), ((7239, 7261), 'numpy.where', 'np.where', (['step_np.done'], {}), '(step_np.done)\n', (7247, 7261), True, 'import numpy as np\n'), ((9752, 9767), 'multiprocessing.Semaphore', 'mp.Semaphore', (['(0)'], {}), '(0)\n', (9764, 9767), True, 'import multiprocessing as mp\n'), ((9820, 9835), 'multiprocessing.Semaphore', 'mp.Semaphore', (['(0)'], {}), '(0)\n', (9832, 9835), True, 'import multiprocessing as mp\n')] |
import numpy as np
def _recall_values(labels, x_absolute=False, y_absolute=False):
n_docs = len(labels)
n_pos_docs = sum(labels)
x = np.arange(1, n_docs + 1)
recall = np.cumsum(labels)
if not x_absolute:
x = x / n_docs
if y_absolute:
y = recall
else:
y = recall / n_pos_docs
return x.tolist(), y.tolist()
def _wss_values(labels, x_absolute=False, y_absolute=False):
n_docs = len(labels)
n_pos_docs = sum(labels)
docs_found = np.cumsum(labels)
docs_found_random = np.round(np.linspace(0, n_pos_docs, n_docs))
# Get the first occurrence of 1, 2, 3, ..., n_pos_docs in both arrays.
when_found = np.searchsorted(docs_found, np.arange(1, n_pos_docs + 1))
when_found_random = np.searchsorted(docs_found_random,
np.arange(1, n_pos_docs + 1))
n_found_earlier = when_found_random - when_found
x = np.arange(1, n_pos_docs + 1)
if not x_absolute:
x = x / n_pos_docs
if y_absolute:
y = n_found_earlier
else:
y = n_found_earlier / n_docs
return x.tolist(), y.tolist()
def _erf_values(labels, x_absolute=False, y_absolute=False):
n_docs = len(labels)
n_pos_docs = sum(labels)
docs_found = np.cumsum(labels)
docs_found_random = np.round(np.linspace(0, n_pos_docs, n_docs))
extra_records_found = docs_found - docs_found_random
x = np.arange(1, n_docs + 1)
if not x_absolute:
x = x / n_docs
if y_absolute:
y = extra_records_found
else:
y = extra_records_found / n_pos_docs
return x.tolist(), y.tolist()
| [
"numpy.cumsum",
"numpy.linspace",
"numpy.arange"
] | [((148, 172), 'numpy.arange', 'np.arange', (['(1)', '(n_docs + 1)'], {}), '(1, n_docs + 1)\n', (157, 172), True, 'import numpy as np\n'), ((186, 203), 'numpy.cumsum', 'np.cumsum', (['labels'], {}), '(labels)\n', (195, 203), True, 'import numpy as np\n'), ((502, 519), 'numpy.cumsum', 'np.cumsum', (['labels'], {}), '(labels)\n', (511, 519), True, 'import numpy as np\n'), ((931, 959), 'numpy.arange', 'np.arange', (['(1)', '(n_pos_docs + 1)'], {}), '(1, n_pos_docs + 1)\n', (940, 959), True, 'import numpy as np\n'), ((1276, 1293), 'numpy.cumsum', 'np.cumsum', (['labels'], {}), '(labels)\n', (1285, 1293), True, 'import numpy as np\n'), ((1430, 1454), 'numpy.arange', 'np.arange', (['(1)', '(n_docs + 1)'], {}), '(1, n_docs + 1)\n', (1439, 1454), True, 'import numpy as np\n'), ((553, 587), 'numpy.linspace', 'np.linspace', (['(0)', 'n_pos_docs', 'n_docs'], {}), '(0, n_pos_docs, n_docs)\n', (564, 587), True, 'import numpy as np\n'), ((710, 738), 'numpy.arange', 'np.arange', (['(1)', '(n_pos_docs + 1)'], {}), '(1, n_pos_docs + 1)\n', (719, 738), True, 'import numpy as np\n'), ((839, 867), 'numpy.arange', 'np.arange', (['(1)', '(n_pos_docs + 1)'], {}), '(1, n_pos_docs + 1)\n', (848, 867), True, 'import numpy as np\n'), ((1327, 1361), 'numpy.linspace', 'np.linspace', (['(0)', 'n_pos_docs', 'n_docs'], {}), '(0, n_pos_docs, n_docs)\n', (1338, 1361), True, 'import numpy as np\n')] |
"""Plots Grad-CAM output (guided and unguided class-activation maps)."""
import os
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as pyplot
from gewittergefahr.gg_utils import general_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import gradcam
from gewittergefahr.deep_learning import training_validation_io as trainval_io
from gewittergefahr.plotting import plotting_utils
from gewittergefahr.plotting import cam_plotting
from gewittergefahr.plotting import saliency_plotting
from gewittergefahr.scripts import plot_input_examples as plot_examples
TIME_FORMAT = '%Y-%m-%d-%H%M%S'
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
COLOUR_BAR_FONT_SIZE = plot_examples.DEFAULT_CBAR_FONT_SIZE
FIGURE_RESOLUTION_DPI = 300
INPUT_FILE_ARG_NAME = 'input_file_name'
COLOUR_MAP_ARG_NAME = 'colour_map_name'
MIN_UNGUIDED_VALUE_ARG_NAME = 'min_unguided_value'
MAX_UNGUIDED_VALUE_ARG_NAME = 'max_unguided_value'
NUM_UNGUIDED_CONTOURS_ARG_NAME = 'num_unguided_contours'
MAX_GUIDED_VALUE_ARG_NAME = 'max_guided_value'
HALF_NUM_GUIDED_CONTOURS_ARG_NAME = 'half_num_guided_contours'
SMOOTHING_RADIUS_ARG_NAME = 'smoothing_radius_grid_cells'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
ALLOW_WHITESPACE_ARG_NAME = plot_examples.ALLOW_WHITESPACE_ARG_NAME
PLOT_PANEL_NAMES_ARG_NAME = plot_examples.PLOT_PANEL_NAMES_ARG_NAME
ADD_TITLES_ARG_NAME = plot_examples.ADD_TITLES_ARG_NAME
LABEL_CBARS_ARG_NAME = plot_examples.LABEL_CBARS_ARG_NAME
CBAR_LENGTH_ARG_NAME = plot_examples.CBAR_LENGTH_ARG_NAME
INPUT_FILE_HELP_STRING = (
'Path to input file. Will be read by `gradcam.read_file`.'
)
COLOUR_MAP_HELP_STRING = (
'Name of colour map for class activations. The same colour map will be '
'used for all predictors and examples. This argument supports only pyplot '
'colour maps (those accepted by `pyplot.get_cmap`).'
)
MIN_UNGUIDED_VALUE_HELP_STRING = (
'Minimum value in colour scheme for unguided CAMs. Keep in mind that '
'unguided class activation >= 0 always.'
)
MAX_UNGUIDED_VALUE_HELP_STRING = (
'Max value in colour scheme for unguided CAMs. Keep in mind that unguided '
'class activation >= 0 always.'
)
NUM_UNGUIDED_CONTOURS_HELP_STRING = 'Number of contours for unguided CAMs.'
MAX_GUIDED_VALUE_HELP_STRING = (
'Max value in colour scheme for guided CAMs. Keep in mind that the colour '
'scheme encodes *absolute* value, with positive values in solid contours '
'and negative values in dashed contours.'
)
HALF_NUM_GUIDED_CONTOURS_HELP_STRING = (
'Number of contours on each side of zero for guided CAMs.'
)
SMOOTHING_RADIUS_HELP_STRING = (
'e-folding radius for Gaussian smoother (num grid cells). If you do not '
'want to smooth class-activation maps, make this non-positive.'
)
OUTPUT_DIR_HELP_STRING = (
'Path to output directory. Figures will be saved here.'
)
ALLOW_WHITESPACE_HELP_STRING = plot_examples.ALLOW_WHITESPACE_HELP_STRING
PLOT_PANEL_NAMES_HELP_STRING = plot_examples.PLOT_PANEL_NAMES_HELP_STRING
ADD_TITLES_HELP_STRING = plot_examples.ADD_TITLES_HELP_STRING
LABEL_CBARS_HELP_STRING = plot_examples.LABEL_CBARS_HELP_STRING
CBAR_LENGTH_HELP_STRING = plot_examples.CBAR_LENGTH_HELP_STRING
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + INPUT_FILE_ARG_NAME, type=str, required=True,
help=INPUT_FILE_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + COLOUR_MAP_ARG_NAME, type=str, required=False, default='gist_yarg',
help=COLOUR_MAP_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MAX_UNGUIDED_VALUE_ARG_NAME, type=float, required=False,
default=10 ** 1.5, help=MAX_UNGUIDED_VALUE_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MIN_UNGUIDED_VALUE_ARG_NAME, type=float, required=False,
default=0.01, help=MIN_UNGUIDED_VALUE_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_UNGUIDED_CONTOURS_ARG_NAME, type=int, required=False,
default=15, help=NUM_UNGUIDED_CONTOURS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MAX_GUIDED_VALUE_ARG_NAME, type=float, required=False,
default=0.5, help=MAX_GUIDED_VALUE_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + HALF_NUM_GUIDED_CONTOURS_ARG_NAME, type=int, required=False,
default=10, help=HALF_NUM_GUIDED_CONTOURS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + SMOOTHING_RADIUS_ARG_NAME, type=float, required=False,
default=2., help=SMOOTHING_RADIUS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + ALLOW_WHITESPACE_ARG_NAME, type=int, required=False, default=1,
help=ALLOW_WHITESPACE_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + PLOT_PANEL_NAMES_ARG_NAME, type=int, required=False, default=1,
help=PLOT_PANEL_NAMES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + ADD_TITLES_ARG_NAME, type=int, required=False, default=1,
help=ADD_TITLES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + LABEL_CBARS_ARG_NAME, type=int, required=False, default=0,
help=LABEL_CBARS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + CBAR_LENGTH_ARG_NAME, type=float, required=False, default=0.8,
help=CBAR_LENGTH_HELP_STRING
)
def _plot_3d_radar_cam(
colour_map_object, min_unguided_value, max_unguided_value,
num_unguided_contours, max_guided_value, half_num_guided_contours,
label_colour_bars, colour_bar_length, figure_objects,
axes_object_matrices, model_metadata_dict, output_dir_name,
cam_matrix=None, guided_cam_matrix=None, full_storm_id_string=None,
storm_time_unix_sec=None):
"""Plots class-activation map for 3-D radar data.
M = number of rows in spatial grid
N = number of columns in spatial grid
H = number of heights in spatial grid
F = number of radar fields
If this method is plotting a composite rather than single example (storm
object), `full_storm_id_string` and `storm_time_unix_sec` can be None.
:param colour_map_object: See documentation at top of file.
:param min_unguided_value: Same.
:param max_unguided_value: Same.
:param num_unguided_contours: Same.
:param max_guided_value: Same.
:param half_num_guided_contours: Same.
:param label_colour_bars: Same.
:param colour_bar_length: Same.
:param figure_objects: See doc for
`plot_input_examples._plot_3d_radar_scan`.
:param axes_object_matrices: Same.
:param model_metadata_dict: Dictionary returned by
`cnn.read_model_metadata`.
:param output_dir_name: Path to output directory. Figure(s) will be saved
here.
:param cam_matrix: M-by-N-by-H numpy array of unguided class activations.
:param guided_cam_matrix: [used only if `cam_matrix is None`]
M-by-N-by-H-by-F numpy array of guided class activations.
:param full_storm_id_string: Full storm ID.
:param storm_time_unix_sec: Storm time.
"""
pmm_flag = full_storm_id_string is None and storm_time_unix_sec is None
conv_2d3d = model_metadata_dict[cnn.CONV_2D3D_KEY]
if conv_2d3d:
loop_max = 1
radar_field_names = ['reflectivity']
else:
loop_max = len(figure_objects)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
radar_field_names = training_option_dict[trainval_io.RADAR_FIELDS_KEY]
min_unguided_value_log10 = numpy.log10(min_unguided_value)
max_unguided_value_log10 = numpy.log10(max_unguided_value)
contour_interval_log10 = (
(max_unguided_value_log10 - min_unguided_value_log10) /
(num_unguided_contours - 1)
)
for j in range(loop_max):
if cam_matrix is None:
saliency_plotting.plot_many_2d_grids_with_contours(
saliency_matrix_3d=numpy.flip(
guided_cam_matrix[..., j], axis=0
),
axes_object_matrix=axes_object_matrices[j],
colour_map_object=colour_map_object,
max_absolute_contour_level=max_guided_value,
contour_interval=max_guided_value / half_num_guided_contours
)
this_colour_bar_object = plotting_utils.plot_linear_colour_bar(
axes_object_or_matrix=axes_object_matrices[j],
data_matrix=guided_cam_matrix[..., j],
colour_map_object=colour_map_object, min_value=0.,
max_value=max_guided_value, orientation_string='horizontal',
fraction_of_axis_length=colour_bar_length,
extend_min=False, extend_max=True,
font_size=COLOUR_BAR_FONT_SIZE
)
if label_colour_bars:
this_colour_bar_object.set_label(
'Absolute guided class activation',
fontsize=COLOUR_BAR_FONT_SIZE
)
else:
cam_matrix_log10 = numpy.log10(cam_matrix)
cam_plotting.plot_many_2d_grids(
class_activation_matrix_3d=numpy.flip(cam_matrix_log10, axis=0),
axes_object_matrix=axes_object_matrices[j],
colour_map_object=colour_map_object,
min_contour_level=min_unguided_value_log10,
max_contour_level=max_unguided_value_log10,
contour_interval=contour_interval_log10
)
this_colour_bar_object = plotting_utils.plot_linear_colour_bar(
axes_object_or_matrix=axes_object_matrices[j],
data_matrix=cam_matrix_log10,
colour_map_object=colour_map_object,
min_value=min_unguided_value_log10,
max_value=max_unguided_value_log10,
orientation_string='horizontal',
fraction_of_axis_length=colour_bar_length,
extend_min=True, extend_max=True,
font_size=COLOUR_BAR_FONT_SIZE
)
these_tick_values = this_colour_bar_object.get_ticks()
these_tick_strings = [
'{0:.2f}'.format(10 ** v)[:4] for v in these_tick_values
]
this_colour_bar_object.set_ticks(these_tick_values)
this_colour_bar_object.set_ticklabels(these_tick_strings)
if label_colour_bars:
this_colour_bar_object.set_label(
'Class activation', fontsize=COLOUR_BAR_FONT_SIZE
)
this_file_name = plot_examples.metadata_to_file_name(
output_dir_name=output_dir_name, is_sounding=False,
pmm_flag=pmm_flag, full_storm_id_string=full_storm_id_string,
storm_time_unix_sec=storm_time_unix_sec,
radar_field_name=radar_field_names[j]
)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_objects[j].savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_objects[j])
def _plot_2d_radar_cam(
colour_map_object, min_unguided_value, max_unguided_value,
num_unguided_contours, max_guided_value, half_num_guided_contours,
label_colour_bars, colour_bar_length, figure_objects,
axes_object_matrices, model_metadata_dict, output_dir_name,
cam_matrix=None, guided_cam_matrix=None, full_storm_id_string=None,
storm_time_unix_sec=None):
"""Plots class-activation map for 2-D radar data.
M = number of rows in spatial grid
N = number of columns in spatial grid
F = number of radar fields
If this method is plotting a composite rather than single example (storm
object), `full_storm_id_string` and `storm_time_unix_sec` can be None.
:param colour_map_object: See doc for `_plot_3d_radar_cam`.
:param min_unguided_value: Same.
:param max_unguided_value: Same.
:param num_unguided_contours: Same.
:param max_guided_value: Same.
:param half_num_guided_contours: Same.
:param label_colour_bars: Same.
:param colour_bar_length: Same.
:param figure_objects: See doc for
`plot_input_examples._plot_2d_radar_scan`.
:param axes_object_matrices: Same.
:param model_metadata_dict: See doc for `_plot_3d_radar_cam`.
:param output_dir_name: Same.
:param cam_matrix: M-by-N numpy array of unguided class activations.
:param guided_cam_matrix: [used only if `cam_matrix is None`]
M-by-N-by-F numpy array of guided class activations.
:param full_storm_id_string: Full storm ID.
:param storm_time_unix_sec: Storm time.
"""
pmm_flag = full_storm_id_string is None and storm_time_unix_sec is None
conv_2d3d = model_metadata_dict[cnn.CONV_2D3D_KEY]
if conv_2d3d:
figure_index = 1
radar_field_name = 'shear'
else:
figure_index = 0
radar_field_name = None
list_of_layer_operation_dicts = model_metadata_dict[
cnn.LAYER_OPERATIONS_KEY]
if list_of_layer_operation_dicts is None:
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
radar_field_names = training_option_dict[trainval_io.RADAR_FIELDS_KEY]
num_channels = len(radar_field_names)
else:
num_channels = len(list_of_layer_operation_dicts)
min_unguided_value_log10 = numpy.log10(min_unguided_value)
max_unguided_value_log10 = numpy.log10(max_unguided_value)
contour_interval_log10 = (
(max_unguided_value_log10 - min_unguided_value_log10) /
(num_unguided_contours - 1)
)
if cam_matrix is None:
saliency_plotting.plot_many_2d_grids_with_contours(
saliency_matrix_3d=numpy.flip(guided_cam_matrix, axis=0),
axes_object_matrix=axes_object_matrices[figure_index],
colour_map_object=colour_map_object,
max_absolute_contour_level=max_guided_value,
contour_interval=max_guided_value / half_num_guided_contours,
row_major=False
)
this_colour_bar_object = plotting_utils.plot_linear_colour_bar(
axes_object_or_matrix=axes_object_matrices[figure_index],
data_matrix=guided_cam_matrix,
colour_map_object=colour_map_object, min_value=0.,
max_value=max_guided_value, orientation_string='horizontal',
fraction_of_axis_length=colour_bar_length / (1 + int(conv_2d3d)),
extend_min=False, extend_max=True,
font_size=COLOUR_BAR_FONT_SIZE
)
if label_colour_bars:
this_colour_bar_object.set_label(
'Absolute guided class activation',
fontsize=COLOUR_BAR_FONT_SIZE
)
else:
this_cam_matrix_log10 = numpy.log10(
numpy.expand_dims(cam_matrix, axis=-1)
)
this_cam_matrix_log10 = numpy.repeat(
this_cam_matrix_log10, repeats=num_channels, axis=-1
)
cam_plotting.plot_many_2d_grids(
class_activation_matrix_3d=numpy.flip(
this_cam_matrix_log10, axis=0
),
axes_object_matrix=axes_object_matrices[figure_index],
colour_map_object=colour_map_object,
min_contour_level=min_unguided_value_log10,
max_contour_level=max_unguided_value_log10,
contour_interval=contour_interval_log10, row_major=False
)
this_colour_bar_object = plotting_utils.plot_linear_colour_bar(
axes_object_or_matrix=axes_object_matrices[figure_index],
data_matrix=this_cam_matrix_log10,
colour_map_object=colour_map_object,
min_value=min_unguided_value_log10,
max_value=max_unguided_value_log10,
orientation_string='horizontal',
fraction_of_axis_length=colour_bar_length / (1 + int(conv_2d3d)),
extend_min=True, extend_max=True,
font_size=COLOUR_BAR_FONT_SIZE
)
these_tick_values = this_colour_bar_object.get_ticks()
these_tick_strings = [
'{0:.2f}'.format(10 ** v)[:4] for v in these_tick_values
]
this_colour_bar_object.set_ticks(these_tick_values)
this_colour_bar_object.set_ticklabels(these_tick_strings)
if label_colour_bars:
this_colour_bar_object.set_label(
'Class activation', fontsize=COLOUR_BAR_FONT_SIZE
)
output_file_name = plot_examples.metadata_to_file_name(
output_dir_name=output_dir_name, is_sounding=False, pmm_flag=pmm_flag,
full_storm_id_string=full_storm_id_string,
storm_time_unix_sec=storm_time_unix_sec,
radar_field_name=radar_field_name
)
print('Saving figure to: "{0:s}"...'.format(output_file_name))
figure_objects[figure_index].savefig(
output_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_objects[figure_index])
def _smooth_maps(cam_matrices, guided_cam_matrices,
smoothing_radius_grid_cells):
"""Smooths guided and unguided class-activation maps, using Gaussian filter.
T = number of input tensors to the model
:param cam_matrices: length-T list of numpy arrays with unguided class-
activation maps (CAMs).
:param guided_cam_matrices: length-T list of numpy arrays with guided CAMs.
:param smoothing_radius_grid_cells: e-folding radius (number of grid cells).
:return: cam_matrices: Smoothed version of input.
:return: guided_cam_matrices: Smoothed version of input.
"""
print((
'Smoothing guided and unguided CAMs with Gaussian filter (e-folding '
'radius of {0:.1f} grid cells)...'
).format(
smoothing_radius_grid_cells
))
num_matrices = len(cam_matrices)
for j in range(num_matrices):
if cam_matrices[j] is None:
continue
num_examples = cam_matrices[j].shape[0]
this_num_channels = guided_cam_matrices[j].shape[-1]
for i in range(num_examples):
cam_matrices[j][i, ...] = general_utils.apply_gaussian_filter(
input_matrix=cam_matrices[j][i, ...],
e_folding_radius_grid_cells=smoothing_radius_grid_cells
)
for k in range(this_num_channels):
guided_cam_matrices[j][i, ..., k] = (
general_utils.apply_gaussian_filter(
input_matrix=guided_cam_matrices[j][i, ..., k],
e_folding_radius_grid_cells=smoothing_radius_grid_cells
)
)
return cam_matrices, guided_cam_matrices
def _run(input_file_name, colour_map_name, min_unguided_value,
max_unguided_value, max_guided_value, num_unguided_contours,
half_num_guided_contours, smoothing_radius_grid_cells,
allow_whitespace, plot_panel_names, add_titles, label_colour_bars,
colour_bar_length, top_output_dir_name):
"""Plots Grad-CAM output (guided and unguided class-activation maps).
This is effectively the main method.
:param input_file_name: See documentation at top of file.
:param colour_map_name: Same.
:param min_unguided_value: Same.
:param max_unguided_value: Same.
:param max_guided_value: Same.
:param num_unguided_contours: Same.
:param half_num_guided_contours: Same.
:param smoothing_radius_grid_cells: Same.
:param allow_whitespace: Same.
:param plot_panel_names: Same.
:param add_titles: Same.
:param label_colour_bars: Same.
:param colour_bar_length: Same.
:param top_output_dir_name: Same.
"""
if smoothing_radius_grid_cells <= 0:
smoothing_radius_grid_cells = None
unguided_cam_dir_name = '{0:s}/main_gradcam'.format(top_output_dir_name)
guided_cam_dir_name = '{0:s}/guided_gradcam'.format(top_output_dir_name)
file_system_utils.mkdir_recursive_if_necessary(
directory_name=unguided_cam_dir_name
)
file_system_utils.mkdir_recursive_if_necessary(
directory_name=guided_cam_dir_name
)
# Check input args.
colour_map_object = pyplot.get_cmap(colour_map_name)
error_checking.assert_is_greater(min_unguided_value, 0.)
error_checking.assert_is_greater(max_unguided_value, min_unguided_value)
error_checking.assert_is_greater(max_guided_value, 0.)
error_checking.assert_is_geq(num_unguided_contours, 10)
error_checking.assert_is_geq(half_num_guided_contours, 5)
print('Reading data from: "{0:s}"...'.format(input_file_name))
gradcam_dict, pmm_flag = gradcam.read_file(input_file_name)
if pmm_flag:
predictor_matrices = gradcam_dict.pop(
gradcam.MEAN_PREDICTOR_MATRICES_KEY
)
cam_matrices = gradcam_dict.pop(gradcam.MEAN_CAM_MATRICES_KEY)
guided_cam_matrices = gradcam_dict.pop(
gradcam.MEAN_GUIDED_CAM_MATRICES_KEY
)
full_storm_id_strings = [None]
storm_times_unix_sec = [None]
for j in range(len(predictor_matrices)):
predictor_matrices[j] = numpy.expand_dims(
predictor_matrices[j], axis=0
)
if cam_matrices[j] is None:
continue
cam_matrices[j] = numpy.expand_dims(
cam_matrices[j], axis=0
)
guided_cam_matrices[j] = numpy.expand_dims(
guided_cam_matrices[j], axis=0
)
else:
predictor_matrices = gradcam_dict.pop(gradcam.PREDICTOR_MATRICES_KEY)
cam_matrices = gradcam_dict.pop(gradcam.CAM_MATRICES_KEY)
guided_cam_matrices = gradcam_dict.pop(gradcam.GUIDED_CAM_MATRICES_KEY)
full_storm_id_strings = gradcam_dict[gradcam.FULL_STORM_IDS_KEY]
storm_times_unix_sec = gradcam_dict[gradcam.STORM_TIMES_KEY]
if smoothing_radius_grid_cells is not None:
cam_matrices, guided_cam_matrices = _smooth_maps(
cam_matrices=cam_matrices, guided_cam_matrices=guided_cam_matrices,
smoothing_radius_grid_cells=smoothing_radius_grid_cells
)
# Read metadata for CNN.
model_file_name = gradcam_dict[gradcam.MODEL_FILE_KEY]
model_metafile_name = '{0:s}/model_metadata.p'.format(
os.path.split(model_file_name)[0]
)
print('Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
print(SEPARATOR_STRING)
num_examples = predictor_matrices[0].shape[0]
num_matrices = len(predictor_matrices)
for i in range(num_examples):
this_handle_dict = plot_examples.plot_one_example(
list_of_predictor_matrices=predictor_matrices,
model_metadata_dict=model_metadata_dict, pmm_flag=pmm_flag,
example_index=i, plot_sounding=False,
allow_whitespace=allow_whitespace,
plot_panel_names=plot_panel_names, add_titles=add_titles,
label_colour_bars=label_colour_bars,
colour_bar_length=colour_bar_length
)
these_figure_objects = this_handle_dict[plot_examples.RADAR_FIGURES_KEY]
these_axes_object_matrices = (
this_handle_dict[plot_examples.RADAR_AXES_KEY]
)
for j in range(num_matrices):
if cam_matrices[j] is None:
continue
# print(numpy.percentile(cam_matrices[j][i, ...], 0.))
# print(numpy.percentile(cam_matrices[j][i, ...], 1.))
# print(numpy.percentile(cam_matrices[j][i, ...], 99.))
# print(numpy.percentile(cam_matrices[j][i, ...], 100.))
#
# print('\n\n')
#
# print(numpy.percentile(guided_cam_matrices[j][i, ...], 0.))
# print(numpy.percentile(guided_cam_matrices[j][i, ...], 1.))
# print(numpy.percentile(guided_cam_matrices[j][i, ...], 99.))
# print(numpy.percentile(guided_cam_matrices[j][i, ...], 100.))
#
# print('\n\n------------------------------\n\n')
this_num_spatial_dim = len(predictor_matrices[j].shape) - 2
if this_num_spatial_dim == 3:
_plot_3d_radar_cam(
colour_map_object=colour_map_object,
min_unguided_value=min_unguided_value,
max_unguided_value=max_unguided_value,
num_unguided_contours=num_unguided_contours,
max_guided_value=max_guided_value,
half_num_guided_contours=half_num_guided_contours,
label_colour_bars=label_colour_bars,
colour_bar_length=colour_bar_length,
figure_objects=these_figure_objects,
axes_object_matrices=these_axes_object_matrices,
model_metadata_dict=model_metadata_dict,
output_dir_name=unguided_cam_dir_name,
cam_matrix=cam_matrices[j][i, ...],
full_storm_id_string=full_storm_id_strings[i],
storm_time_unix_sec=storm_times_unix_sec[i]
)
else:
_plot_2d_radar_cam(
colour_map_object=colour_map_object,
min_unguided_value=min_unguided_value,
max_unguided_value=max_unguided_value,
num_unguided_contours=num_unguided_contours,
max_guided_value=max_guided_value,
half_num_guided_contours=half_num_guided_contours,
label_colour_bars=label_colour_bars,
colour_bar_length=colour_bar_length,
figure_objects=these_figure_objects,
axes_object_matrices=these_axes_object_matrices,
model_metadata_dict=model_metadata_dict,
output_dir_name=unguided_cam_dir_name,
cam_matrix=cam_matrices[j][i, ...],
full_storm_id_string=full_storm_id_strings[i],
storm_time_unix_sec=storm_times_unix_sec[i]
)
this_handle_dict = plot_examples.plot_one_example(
list_of_predictor_matrices=predictor_matrices,
model_metadata_dict=model_metadata_dict, pmm_flag=pmm_flag,
example_index=i, plot_sounding=False,
allow_whitespace=allow_whitespace,
plot_panel_names=plot_panel_names, add_titles=add_titles,
label_colour_bars=label_colour_bars,
colour_bar_length=colour_bar_length
)
these_figure_objects = this_handle_dict[plot_examples.RADAR_FIGURES_KEY]
these_axes_object_matrices = (
this_handle_dict[plot_examples.RADAR_AXES_KEY]
)
for j in range(num_matrices):
if guided_cam_matrices[j] is None:
continue
this_num_spatial_dim = len(predictor_matrices[j].shape) - 2
if this_num_spatial_dim == 3:
_plot_3d_radar_cam(
colour_map_object=colour_map_object,
min_unguided_value=min_unguided_value,
max_unguided_value=max_unguided_value,
num_unguided_contours=num_unguided_contours,
max_guided_value=max_guided_value,
half_num_guided_contours=half_num_guided_contours,
label_colour_bars=label_colour_bars,
colour_bar_length=colour_bar_length,
figure_objects=these_figure_objects,
axes_object_matrices=these_axes_object_matrices,
model_metadata_dict=model_metadata_dict,
output_dir_name=guided_cam_dir_name,
guided_cam_matrix=guided_cam_matrices[j][i, ...],
full_storm_id_string=full_storm_id_strings[i],
storm_time_unix_sec=storm_times_unix_sec[i]
)
else:
_plot_2d_radar_cam(
colour_map_object=colour_map_object,
min_unguided_value=min_unguided_value,
max_unguided_value=max_unguided_value,
num_unguided_contours=num_unguided_contours,
max_guided_value=max_guided_value,
half_num_guided_contours=half_num_guided_contours,
label_colour_bars=label_colour_bars,
colour_bar_length=colour_bar_length,
figure_objects=these_figure_objects,
axes_object_matrices=these_axes_object_matrices,
model_metadata_dict=model_metadata_dict,
output_dir_name=guided_cam_dir_name,
guided_cam_matrix=guided_cam_matrices[j][i, ...],
full_storm_id_string=full_storm_id_strings[i],
storm_time_unix_sec=storm_times_unix_sec[i]
)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
input_file_name=getattr(INPUT_ARG_OBJECT, INPUT_FILE_ARG_NAME),
colour_map_name=getattr(INPUT_ARG_OBJECT, COLOUR_MAP_ARG_NAME),
min_unguided_value=getattr(
INPUT_ARG_OBJECT, MIN_UNGUIDED_VALUE_ARG_NAME
),
max_unguided_value=getattr(
INPUT_ARG_OBJECT, MAX_UNGUIDED_VALUE_ARG_NAME
),
num_unguided_contours=getattr(
INPUT_ARG_OBJECT, NUM_UNGUIDED_CONTOURS_ARG_NAME
),
max_guided_value=getattr(INPUT_ARG_OBJECT, MAX_GUIDED_VALUE_ARG_NAME),
half_num_guided_contours=getattr(
INPUT_ARG_OBJECT, HALF_NUM_GUIDED_CONTOURS_ARG_NAME
),
smoothing_radius_grid_cells=getattr(
INPUT_ARG_OBJECT, SMOOTHING_RADIUS_ARG_NAME
),
allow_whitespace=bool(getattr(
INPUT_ARG_OBJECT, ALLOW_WHITESPACE_ARG_NAME
)),
plot_panel_names=bool(getattr(
INPUT_ARG_OBJECT, PLOT_PANEL_NAMES_ARG_NAME
)),
add_titles=bool(getattr(INPUT_ARG_OBJECT, ADD_TITLES_ARG_NAME)),
label_colour_bars=bool(getattr(
INPUT_ARG_OBJECT, LABEL_CBARS_ARG_NAME
)),
colour_bar_length=getattr(INPUT_ARG_OBJECT, CBAR_LENGTH_ARG_NAME),
top_output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
| [
"gewittergefahr.gg_utils.general_utils.apply_gaussian_filter",
"numpy.flip",
"numpy.log10",
"gewittergefahr.gg_utils.error_checking.assert_is_geq",
"gewittergefahr.deep_learning.cnn.read_model_metadata",
"argparse.ArgumentParser",
"numpy.repeat",
"matplotlib.use",
"gewittergefahr.scripts.plot_input_... | [((131, 152), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (145, 152), False, 'import matplotlib\n'), ((3371, 3396), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3394, 3396), False, 'import argparse\n'), ((7601, 7632), 'numpy.log10', 'numpy.log10', (['min_unguided_value'], {}), '(min_unguided_value)\n', (7612, 7632), False, 'import numpy\n'), ((7664, 7695), 'numpy.log10', 'numpy.log10', (['max_unguided_value'], {}), '(max_unguided_value)\n', (7675, 7695), False, 'import numpy\n'), ((13501, 13532), 'numpy.log10', 'numpy.log10', (['min_unguided_value'], {}), '(min_unguided_value)\n', (13512, 13532), False, 'import numpy\n'), ((13564, 13595), 'numpy.log10', 'numpy.log10', (['max_unguided_value'], {}), '(max_unguided_value)\n', (13575, 13595), False, 'import numpy\n'), ((16601, 16839), 'gewittergefahr.scripts.plot_input_examples.metadata_to_file_name', 'plot_examples.metadata_to_file_name', ([], {'output_dir_name': 'output_dir_name', 'is_sounding': '(False)', 'pmm_flag': 'pmm_flag', 'full_storm_id_string': 'full_storm_id_string', 'storm_time_unix_sec': 'storm_time_unix_sec', 'radar_field_name': 'radar_field_name'}), '(output_dir_name=output_dir_name,\n is_sounding=False, pmm_flag=pmm_flag, full_storm_id_string=\n full_storm_id_string, storm_time_unix_sec=storm_time_unix_sec,\n radar_field_name=radar_field_name)\n', (16636, 16839), True, 'from gewittergefahr.scripts import plot_input_examples as plot_examples\n'), ((17080, 17122), 'matplotlib.pyplot.close', 'pyplot.close', (['figure_objects[figure_index]'], {}), '(figure_objects[figure_index])\n', (17092, 17122), True, 'import matplotlib.pyplot as pyplot\n'), ((20064, 20153), 'gewittergefahr.gg_utils.file_system_utils.mkdir_recursive_if_necessary', 'file_system_utils.mkdir_recursive_if_necessary', ([], {'directory_name': 'unguided_cam_dir_name'}), '(directory_name=\n unguided_cam_dir_name)\n', (20110, 20153), False, 'from gewittergefahr.gg_utils import file_system_utils\n'), ((20167, 20254), 'gewittergefahr.gg_utils.file_system_utils.mkdir_recursive_if_necessary', 'file_system_utils.mkdir_recursive_if_necessary', ([], {'directory_name': 'guided_cam_dir_name'}), '(directory_name=\n guided_cam_dir_name)\n', (20213, 20254), False, 'from gewittergefahr.gg_utils import file_system_utils\n'), ((20313, 20345), 'matplotlib.pyplot.get_cmap', 'pyplot.get_cmap', (['colour_map_name'], {}), '(colour_map_name)\n', (20328, 20345), True, 'import matplotlib.pyplot as pyplot\n'), ((20350, 20407), 'gewittergefahr.gg_utils.error_checking.assert_is_greater', 'error_checking.assert_is_greater', (['min_unguided_value', '(0.0)'], {}), '(min_unguided_value, 0.0)\n', (20382, 20407), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((20411, 20483), 'gewittergefahr.gg_utils.error_checking.assert_is_greater', 'error_checking.assert_is_greater', (['max_unguided_value', 'min_unguided_value'], {}), '(max_unguided_value, min_unguided_value)\n', (20443, 20483), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((20488, 20543), 'gewittergefahr.gg_utils.error_checking.assert_is_greater', 'error_checking.assert_is_greater', (['max_guided_value', '(0.0)'], {}), '(max_guided_value, 0.0)\n', (20520, 20543), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((20547, 20602), 'gewittergefahr.gg_utils.error_checking.assert_is_geq', 'error_checking.assert_is_geq', (['num_unguided_contours', '(10)'], {}), '(num_unguided_contours, 10)\n', (20575, 20602), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((20607, 20664), 'gewittergefahr.gg_utils.error_checking.assert_is_geq', 'error_checking.assert_is_geq', (['half_num_guided_contours', '(5)'], {}), '(half_num_guided_contours, 5)\n', (20635, 20664), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((20762, 20796), 'gewittergefahr.deep_learning.gradcam.read_file', 'gradcam.read_file', (['input_file_name'], {}), '(input_file_name)\n', (20779, 20796), False, 'from gewittergefahr.deep_learning import gradcam\n'), ((22574, 22618), 'gewittergefahr.deep_learning.cnn.read_model_metadata', 'cnn.read_model_metadata', (['model_metafile_name'], {}), '(model_metafile_name)\n', (22597, 22618), False, 'from gewittergefahr.deep_learning import cnn\n'), ((10647, 10889), 'gewittergefahr.scripts.plot_input_examples.metadata_to_file_name', 'plot_examples.metadata_to_file_name', ([], {'output_dir_name': 'output_dir_name', 'is_sounding': '(False)', 'pmm_flag': 'pmm_flag', 'full_storm_id_string': 'full_storm_id_string', 'storm_time_unix_sec': 'storm_time_unix_sec', 'radar_field_name': 'radar_field_names[j]'}), '(output_dir_name=output_dir_name,\n is_sounding=False, pmm_flag=pmm_flag, full_storm_id_string=\n full_storm_id_string, storm_time_unix_sec=storm_time_unix_sec,\n radar_field_name=radar_field_names[j])\n', (10682, 10889), True, 'from gewittergefahr.scripts import plot_input_examples as plot_examples\n'), ((11159, 11190), 'matplotlib.pyplot.close', 'pyplot.close', (['figure_objects[j]'], {}), '(figure_objects[j])\n', (11171, 11190), True, 'import matplotlib.pyplot as pyplot\n'), ((15013, 15079), 'numpy.repeat', 'numpy.repeat', (['this_cam_matrix_log10'], {'repeats': 'num_channels', 'axis': '(-1)'}), '(this_cam_matrix_log10, repeats=num_channels, axis=-1)\n', (15025, 15079), False, 'import numpy\n'), ((22803, 23170), 'gewittergefahr.scripts.plot_input_examples.plot_one_example', 'plot_examples.plot_one_example', ([], {'list_of_predictor_matrices': 'predictor_matrices', 'model_metadata_dict': 'model_metadata_dict', 'pmm_flag': 'pmm_flag', 'example_index': 'i', 'plot_sounding': '(False)', 'allow_whitespace': 'allow_whitespace', 'plot_panel_names': 'plot_panel_names', 'add_titles': 'add_titles', 'label_colour_bars': 'label_colour_bars', 'colour_bar_length': 'colour_bar_length'}), '(list_of_predictor_matrices=\n predictor_matrices, model_metadata_dict=model_metadata_dict, pmm_flag=\n pmm_flag, example_index=i, plot_sounding=False, allow_whitespace=\n allow_whitespace, plot_panel_names=plot_panel_names, add_titles=\n add_titles, label_colour_bars=label_colour_bars, colour_bar_length=\n colour_bar_length)\n', (22833, 23170), True, 'from gewittergefahr.scripts import plot_input_examples as plot_examples\n'), ((26333, 26700), 'gewittergefahr.scripts.plot_input_examples.plot_one_example', 'plot_examples.plot_one_example', ([], {'list_of_predictor_matrices': 'predictor_matrices', 'model_metadata_dict': 'model_metadata_dict', 'pmm_flag': 'pmm_flag', 'example_index': 'i', 'plot_sounding': '(False)', 'allow_whitespace': 'allow_whitespace', 'plot_panel_names': 'plot_panel_names', 'add_titles': 'add_titles', 'label_colour_bars': 'label_colour_bars', 'colour_bar_length': 'colour_bar_length'}), '(list_of_predictor_matrices=\n predictor_matrices, model_metadata_dict=model_metadata_dict, pmm_flag=\n pmm_flag, example_index=i, plot_sounding=False, allow_whitespace=\n allow_whitespace, plot_panel_names=plot_panel_names, add_titles=\n add_titles, label_colour_bars=label_colour_bars, colour_bar_length=\n colour_bar_length)\n', (26363, 26700), True, 'from gewittergefahr.scripts import plot_input_examples as plot_examples\n'), ((8382, 8751), 'gewittergefahr.plotting.plotting_utils.plot_linear_colour_bar', 'plotting_utils.plot_linear_colour_bar', ([], {'axes_object_or_matrix': 'axes_object_matrices[j]', 'data_matrix': 'guided_cam_matrix[..., j]', 'colour_map_object': 'colour_map_object', 'min_value': '(0.0)', 'max_value': 'max_guided_value', 'orientation_string': '"""horizontal"""', 'fraction_of_axis_length': 'colour_bar_length', 'extend_min': '(False)', 'extend_max': '(True)', 'font_size': 'COLOUR_BAR_FONT_SIZE'}), "(axes_object_or_matrix=\n axes_object_matrices[j], data_matrix=guided_cam_matrix[..., j],\n colour_map_object=colour_map_object, min_value=0.0, max_value=\n max_guided_value, orientation_string='horizontal',\n fraction_of_axis_length=colour_bar_length, extend_min=False, extend_max\n =True, font_size=COLOUR_BAR_FONT_SIZE)\n", (8419, 8751), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((9108, 9131), 'numpy.log10', 'numpy.log10', (['cam_matrix'], {}), '(cam_matrix)\n', (9119, 9131), False, 'import numpy\n'), ((9600, 9987), 'gewittergefahr.plotting.plotting_utils.plot_linear_colour_bar', 'plotting_utils.plot_linear_colour_bar', ([], {'axes_object_or_matrix': 'axes_object_matrices[j]', 'data_matrix': 'cam_matrix_log10', 'colour_map_object': 'colour_map_object', 'min_value': 'min_unguided_value_log10', 'max_value': 'max_unguided_value_log10', 'orientation_string': '"""horizontal"""', 'fraction_of_axis_length': 'colour_bar_length', 'extend_min': '(True)', 'extend_max': '(True)', 'font_size': 'COLOUR_BAR_FONT_SIZE'}), "(axes_object_or_matrix=\n axes_object_matrices[j], data_matrix=cam_matrix_log10,\n colour_map_object=colour_map_object, min_value=min_unguided_value_log10,\n max_value=max_unguided_value_log10, orientation_string='horizontal',\n fraction_of_axis_length=colour_bar_length, extend_min=True, extend_max=\n True, font_size=COLOUR_BAR_FONT_SIZE)\n", (9637, 9987), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((14932, 14970), 'numpy.expand_dims', 'numpy.expand_dims', (['cam_matrix'], {'axis': '(-1)'}), '(cam_matrix, axis=-1)\n', (14949, 14970), False, 'import numpy\n'), ((18252, 18386), 'gewittergefahr.gg_utils.general_utils.apply_gaussian_filter', 'general_utils.apply_gaussian_filter', ([], {'input_matrix': 'cam_matrices[j][i, ...]', 'e_folding_radius_grid_cells': 'smoothing_radius_grid_cells'}), '(input_matrix=cam_matrices[j][i, ...],\n e_folding_radius_grid_cells=smoothing_radius_grid_cells)\n', (18287, 18386), False, 'from gewittergefahr.gg_utils import general_utils\n'), ((21262, 21310), 'numpy.expand_dims', 'numpy.expand_dims', (['predictor_matrices[j]'], {'axis': '(0)'}), '(predictor_matrices[j], axis=0)\n', (21279, 21310), False, 'import numpy\n'), ((21438, 21480), 'numpy.expand_dims', 'numpy.expand_dims', (['cam_matrices[j]'], {'axis': '(0)'}), '(cam_matrices[j], axis=0)\n', (21455, 21480), False, 'import numpy\n'), ((21548, 21597), 'numpy.expand_dims', 'numpy.expand_dims', (['guided_cam_matrices[j]'], {'axis': '(0)'}), '(guided_cam_matrices[j], axis=0)\n', (21565, 21597), False, 'import numpy\n'), ((22426, 22456), 'os.path.split', 'os.path.split', (['model_file_name'], {}), '(model_file_name)\n', (22439, 22456), False, 'import os\n'), ((13852, 13889), 'numpy.flip', 'numpy.flip', (['guided_cam_matrix'], {'axis': '(0)'}), '(guided_cam_matrix, axis=0)\n', (13862, 13889), False, 'import numpy\n'), ((15183, 15224), 'numpy.flip', 'numpy.flip', (['this_cam_matrix_log10'], {'axis': '(0)'}), '(this_cam_matrix_log10, axis=0)\n', (15193, 15224), False, 'import numpy\n'), ((18551, 18695), 'gewittergefahr.gg_utils.general_utils.apply_gaussian_filter', 'general_utils.apply_gaussian_filter', ([], {'input_matrix': 'guided_cam_matrices[j][i, ..., k]', 'e_folding_radius_grid_cells': 'smoothing_radius_grid_cells'}), '(input_matrix=guided_cam_matrices[j][i,\n ..., k], e_folding_radius_grid_cells=smoothing_radius_grid_cells)\n', (18586, 18695), False, 'from gewittergefahr.gg_utils import general_utils\n'), ((7994, 8039), 'numpy.flip', 'numpy.flip', (['guided_cam_matrix[..., j]'], {'axis': '(0)'}), '(guided_cam_matrix[..., j], axis=0)\n', (8004, 8039), False, 'import numpy\n'), ((9221, 9257), 'numpy.flip', 'numpy.flip', (['cam_matrix_log10'], {'axis': '(0)'}), '(cam_matrix_log10, axis=0)\n', (9231, 9257), False, 'import numpy\n')] |
import os, sys
sys.path.append(os.getcwd())
import time
from utils import load, save_images, Adamp, SGDNM
import numpy as np
import torch
import torchvision
from torch import nn
from torch import autograd
from torch import optim
import cifar10
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
# Download CIFAR-10 (Python version) at
# https://www.cs.toronto.edu/~kriz/cifar.html and fill in the path to the
# extracted files here!
# DATA_DIR = '/mnt/dataset2'
# if not os.path.exists(DATA_DIR):
# DATA_DIR = '/u/pezeshki/cifar-10-batches-py'
# else:
# if not os.path.exists('results_2/cifar10'):
# os.makedirs('results_2/cifar10')
DATA_DIR = '/network/data1/cifar-10-batches-py/'
if len(DATA_DIR) == 0:
raise Exception('Please specify path to data directory in gan_cifar.py!')
print('DATA_DIR: ' + DATA_DIR)
# MODE = 'wgan-gp' # Valid options are dcgan, wgan, or wgan-gp
mode = str(sys.argv[1])
# mode = 'mixed_adam_plus'
print('Mode: ' + mode)
DIM = 128
# LAMBDA = float(sys.argv[6])
LAMBDA = 0
BATCH_SIZE = 64
ITERS = 50000
OUTPUT_DIM = 3072
LR = float(sys.argv[2])
# LR = 0.00001
print('LR: ' + str(LR))
MOM = float(sys.argv[3])
# MOM = -0.46
print('MOM: ' + str(MOM))
bn = str(sys.argv[4])
# bn = 'no'
print('bn: ' + str(bn))
CRITIC_ITERS = int(sys.argv[5])
# CRITIC_ITERS = 1
name = str(mode) + '_lr_' + str(LR) + '_mom_' + str(MOM) + '_bn_' + bn + '_dis_iter_' + str(CRITIC_ITERS)
if not os.path.exists('results_2/' + str(name)):
os.makedirs('results_2/' + str(name))
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
if bn == 'yes':
preprocess = nn.Sequential(
nn.Linear(128, 4 * 4 * 4 * DIM),
nn.BatchNorm1d(4 * 4 * 4 * DIM),
nn.ReLU(True))
block1 = nn.Sequential(
nn.ConvTranspose2d(4 * DIM, 2 * DIM, 2, stride=2),
nn.BatchNorm2d(2 * DIM),
nn.ReLU(True))
block2 = nn.Sequential(
nn.ConvTranspose2d(2 * DIM, DIM, 2, stride=2),
nn.BatchNorm2d(DIM),
nn.ReLU(True))
else:
preprocess = nn.Sequential(
nn.Linear(128, 4 * 4 * 4 * DIM),
nn.ReLU(True))
block1 = nn.Sequential(
nn.ConvTranspose2d(4 * DIM, 2 * DIM, 2, stride=2),
nn.ReLU(True))
block2 = nn.Sequential(
nn.ConvTranspose2d(2 * DIM, DIM, 2, stride=2),
nn.ReLU(True))
deconv_out = nn.ConvTranspose2d(DIM, 3, 2, stride=2)
self.preprocess = preprocess
self.block1 = block1
self.block2 = block2
self.deconv_out = deconv_out
self.tanh = nn.Tanh()
def forward(self, input):
output = self.preprocess(input)
output = output.view(-1, 4 * DIM, 4, 4)
output = self.block1(output)
output = self.block2(output)
output = self.deconv_out(output)
output = self.tanh(output)
return output.view(-1, 3, 32, 32)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
main = nn.Sequential(
nn.Conv2d(3, DIM, 3, 2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(DIM, 2 * DIM, 3, 2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(2 * DIM, 4 * DIM, 3, 2, padding=1),
nn.LeakyReLU(),
)
self.main = main
self.linear = nn.Linear(4 * 4 * 4 * DIM, 1)
def forward(self, input):
output = self.main(input)
output = output.view(-1, 4 * 4 * 4 * DIM)
output = self.linear(output)
return output
netG = Generator()
netD = Discriminator()
# print(netG)
# print(netD)
use_cuda = torch.cuda.is_available()
if use_cuda:
gpu = 0
if use_cuda:
netD = netD.cuda(gpu)
netG = netG.cuda(gpu)
one = torch.FloatTensor([1])
mone = one * -1
if use_cuda:
one = one.cuda(gpu)
mone = mone.cuda(gpu)
if mode == 'gp' or mode == 'dc' or mode == 'cp':
optimizerD = optim.Adam(netD.parameters(), lr=LR, betas=(0.5, 0.9))
optimizerG = optim.Adam(netG.parameters(), lr=LR, betas=(0.5, 0.9))
if mode == 'nm':
optimizerD = SGDNM(netD.parameters(), lr=LR, momentum=MOM)
optimizerG = SGDNM(netG.parameters(), lr=LR, momentum=MOM)
if 'adamp' in mode:
optimizerD = Adamp(netD.parameters(), lr=LR, betas=(MOM, 0.9))
optimizerG = Adamp(netG.parameters(), lr=LR, betas=(MOM, 0.9))
if 'mixed_adam_plus' in mode:
optimizerD = Adamp(netD.parameters(), lr=LR, betas=(MOM, 0.9))
optimizerG = Adamp(netG.parameters(), lr=LR, betas=(0.5, 0.9))
if ('mixed_adam' in mode) and ('mixed_adam_plus' not in mode):
optimizerD = Adamp(netD.parameters(), lr=LR, betas=(MOM, 0.9), md=0)
optimizerG = Adamp(netG.parameters(), lr=LR, betas=(0.5, 0.9), md=0)
# if 'mixed_sgd' in mode:
# optimizerD = SGDNM(netD.parameters(), lr=LR, momentum=MOM)
# optimizerG = SGD(netG.parameters(), lr=LR, momentum=0.9)
def calc_gradient_penalty(netD, real_data, fake_data):
# print "real_data: ", real_data.size(), fake_data.size()
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(
BATCH_SIZE,
int(real_data.nelement() / BATCH_SIZE)).contiguous().view(
BATCH_SIZE, 3, 32, 32)
alpha = alpha.cuda(gpu) if use_cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if use_cuda:
interpolates = interpolates.cuda(gpu)
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
if use_cuda:
gradients = autograd.grad(
outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(gpu),
create_graph=True, retain_graph=True, only_inputs=True)[0]
else:
gradients = autograd.grad(
outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
# For generating samples
def generate_image(frame, netG):
fixed_noise_128 = torch.randn(128, 128)
if use_cuda:
fixed_noise_128 = fixed_noise_128.cuda(gpu)
with torch.no_grad():
noisev = autograd.Variable(fixed_noise_128)
samples = netG(noisev)
samples = samples.view(-1, 3, 32, 32)
samples = samples.mul(0.5).add(0.5)
samples = samples.cpu().data.numpy()
save_images(
samples, 'results_2/' + str(name) + '/samples_' + str(frame) + '.png')
# save_images(samples, './samples_{}.jpg'.format(frame))
# Dataset iterator
# train_gen = load(BATCH_SIZE, data_dir=DATA_DIR)
train_gen, dev_gen = cifar10.load(BATCH_SIZE, data_dir=DATA_DIR)
def inf_train_gen():
while True:
for images in train_gen():
yield images
gen = inf_train_gen()
preprocess = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
criterion = nn.BCEWithLogitsLoss()
label = torch.FloatTensor(BATCH_SIZE)
if use_cuda:
criterion.cuda()
label = label.cuda()
G_costs = []
D_costs = []
for iteration in range(ITERS):
start_time = time.time()
############################
# (1) Update D network
###########################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for i in range(CRITIC_ITERS):
_data = gen.next()
# _data = gen.next()
netD.zero_grad()
# train with real
_data = _data.reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1)
real_data = torch.stack([preprocess(item) for item in _data])
if use_cuda:
real_data = real_data.cuda(gpu)
real_data_v = autograd.Variable(real_data)
label.resize_(BATCH_SIZE, 1).fill_(1)
labelv = autograd.Variable(label)
output = netD(real_data_v)
D_cost_real = criterion(output, labelv)
D_cost_real.backward(retain_graph=True)
# train with fake
noise = torch.randn(BATCH_SIZE, 128)
if use_cuda:
noise = noise.cuda(gpu)
with torch.no_grad():
noisev = autograd.Variable(noise) # totally freeze netG
fake = autograd.Variable(netG(noisev).data)
inputv = fake
label.resize_(BATCH_SIZE, 1).fill_(0)
labelv = autograd.Variable(label)
output = netD(inputv)
D_cost_fake = criterion(output, labelv)
D_cost_fake.backward(retain_graph=True)
if 'gp' in mode:
# train with gradient penalty
gradient_penalty = calc_gradient_penalty(
netD, real_data_v.data, fake.data)
D_cost = D_cost_real + D_cost_fake + gradient_penalty
gradient_penalty.backward()
else:
D_cost = D_cost_real + D_cost_fake
# D_cost.backward()
optimizerD.step()
############################
# (2) Update G network
###########################
for p in netD.parameters():
p.requires_grad = False # to avoid computation
netG.zero_grad()
noise = torch.randn(BATCH_SIZE, 128)
if use_cuda:
noise = noise.cuda(gpu)
noisev = autograd.Variable(noise)
fake = netG(noisev)
label.resize_(BATCH_SIZE, 1).fill_(1)
labelv = autograd.Variable(label)
output = netD(fake)
if 'sat' in mode:
label.resize_(BATCH_SIZE, 1).fill_(0)
labelv = autograd.Varirble(label)
G_cost = - criterion(output, labelv)
else:
G_cost = criterion(output, labelv)
G_cost.backward()
optimizerG.step()
# Calculate dev loss and generate samples every 100 iters
if iteration % 10 == 0:
print('iter: ' + str(iteration) + ', ' +
'G_cost: ' + str(G_cost.cpu().data.numpy()) + ', ' +
'D_cost: ' + str(D_cost.cpu().data.numpy()) + ', ')
G_costs += [G_cost.cpu().data.numpy()]
D_costs += [D_cost.cpu().data.numpy()]
if iteration % 5000 == 0:
generate_image(iteration, netG)
# np.save('./G_costs', np.array(G_costs))
# np.save('./D_costs', np.array(D_costs))
np.save('results_2/' + str(name) + '/G_costs', np.array(G_costs))
np.save('results_2/' + str(name) + '/D_costs', np.array(D_costs))
torch.save(netG.state_dict(), 'results_2/' + str(name) + '/gen_' + str(iteration))
torch.save(netD.state_dict(), 'results_2/' + str(name) + '/dis_' + str(iteration))
| [
"torch.nn.ReLU",
"cifar10.load",
"torch.nn.Tanh",
"numpy.array",
"torch.nn.BatchNorm1d",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.autograd.Varirble",
"numpy.random.seed",
"torchvision.transforms.ToTensor",
"torch.autograd.Variable",
"torch.randn",
"torch.nn.LeakyReLU",
"to... | [((245, 265), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (259, 265), True, 'import numpy as np\n'), ((266, 289), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (283, 289), False, 'import torch\n'), ((290, 322), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(1234)'], {}), '(1234)\n', (316, 322), False, 'import torch\n'), ((3826, 3851), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3849, 3851), False, 'import torch\n'), ((3949, 3971), 'torch.FloatTensor', 'torch.FloatTensor', (['[1]'], {}), '([1])\n', (3966, 3971), False, 'import torch\n'), ((6980, 7023), 'cifar10.load', 'cifar10.load', (['BATCH_SIZE'], {'data_dir': 'DATA_DIR'}), '(BATCH_SIZE, data_dir=DATA_DIR)\n', (6992, 7023), False, 'import cifar10\n'), ((7316, 7338), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (7336, 7338), False, 'from torch import nn\n'), ((7347, 7376), 'torch.FloatTensor', 'torch.FloatTensor', (['BATCH_SIZE'], {}), '(BATCH_SIZE)\n', (7364, 7376), False, 'import torch\n'), ((31, 42), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (40, 42), False, 'import os, sys\n'), ((5205, 5230), 'torch.rand', 'torch.rand', (['BATCH_SIZE', '(1)'], {}), '(BATCH_SIZE, 1)\n', (5215, 5230), False, 'import torch\n'), ((5579, 5630), 'torch.autograd.Variable', 'autograd.Variable', (['interpolates'], {'requires_grad': '(True)'}), '(interpolates, requires_grad=True)\n', (5596, 5630), False, 'from torch import autograd\n'), ((6413, 6434), 'torch.randn', 'torch.randn', (['(128)', '(128)'], {}), '(128, 128)\n', (6424, 6434), False, 'import torch\n'), ((7511, 7522), 'time.time', 'time.time', ([], {}), '()\n', (7520, 7522), False, 'import time\n'), ((9498, 9526), 'torch.randn', 'torch.randn', (['BATCH_SIZE', '(128)'], {}), '(BATCH_SIZE, 128)\n', (9509, 9526), False, 'import torch\n'), ((9589, 9613), 'torch.autograd.Variable', 'autograd.Variable', (['noise'], {}), '(noise)\n', (9606, 9613), False, 'from torch import autograd\n'), ((9694, 9718), 'torch.autograd.Variable', 'autograd.Variable', (['label'], {}), '(label)\n', (9711, 9718), False, 'from torch import autograd\n'), ((2589, 2628), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['DIM', '(3)', '(2)'], {'stride': '(2)'}), '(DIM, 3, 2, stride=2)\n', (2607, 2628), False, 'from torch import nn\n'), ((2782, 2791), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2789, 2791), False, 'from torch import nn\n'), ((3539, 3568), 'torch.nn.Linear', 'nn.Linear', (['(4 * 4 * 4 * DIM)', '(1)'], {}), '(4 * 4 * 4 * DIM, 1)\n', (3548, 3568), False, 'from torch import nn\n'), ((6513, 6528), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6526, 6528), False, 'import torch\n'), ((6547, 6581), 'torch.autograd.Variable', 'autograd.Variable', (['fixed_noise_128'], {}), '(fixed_noise_128)\n', (6564, 6581), False, 'from torch import autograd\n'), ((7195, 7228), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (7226, 7228), False, 'import torchvision\n'), ((7234, 7300), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (7266, 7300), False, 'import torchvision\n'), ((8122, 8150), 'torch.autograd.Variable', 'autograd.Variable', (['real_data'], {}), '(real_data)\n', (8139, 8150), False, 'from torch import autograd\n'), ((8215, 8239), 'torch.autograd.Variable', 'autograd.Variable', (['label'], {}), '(label)\n', (8232, 8239), False, 'from torch import autograd\n'), ((8414, 8442), 'torch.randn', 'torch.randn', (['BATCH_SIZE', '(128)'], {}), '(BATCH_SIZE, 128)\n', (8425, 8442), False, 'import torch\n'), ((8737, 8761), 'torch.autograd.Variable', 'autograd.Variable', (['label'], {}), '(label)\n', (8754, 8761), False, 'from torch import autograd\n'), ((9828, 9852), 'torch.autograd.Varirble', 'autograd.Varirble', (['label'], {}), '(label)\n', (9845, 9852), False, 'from torch import autograd\n'), ((3249, 3283), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'DIM', '(3)', '(2)'], {'padding': '(1)'}), '(3, DIM, 3, 2, padding=1)\n', (3258, 3283), False, 'from torch import nn\n'), ((3297, 3311), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3309, 3311), False, 'from torch import nn\n'), ((3325, 3365), 'torch.nn.Conv2d', 'nn.Conv2d', (['DIM', '(2 * DIM)', '(3)', '(2)'], {'padding': '(1)'}), '(DIM, 2 * DIM, 3, 2, padding=1)\n', (3334, 3365), False, 'from torch import nn\n'), ((3379, 3393), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3391, 3393), False, 'from torch import nn\n'), ((3407, 3451), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2 * DIM)', '(4 * DIM)', '(3)', '(2)'], {'padding': '(1)'}), '(2 * DIM, 4 * DIM, 3, 2, padding=1)\n', (3416, 3451), False, 'from torch import nn\n'), ((3465, 3479), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3477, 3479), False, 'from torch import nn\n'), ((8513, 8528), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8526, 8528), False, 'import torch\n'), ((8551, 8575), 'torch.autograd.Variable', 'autograd.Variable', (['noise'], {}), '(noise)\n', (8568, 8575), False, 'from torch import autograd\n'), ((10580, 10597), 'numpy.array', 'np.array', (['G_costs'], {}), '(G_costs)\n', (10588, 10597), True, 'import numpy as np\n'), ((10654, 10671), 'numpy.array', 'np.array', (['D_costs'], {}), '(D_costs)\n', (10662, 10671), True, 'import numpy as np\n'), ((1714, 1745), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(4 * 4 * 4 * DIM)'], {}), '(128, 4 * 4 * 4 * DIM)\n', (1723, 1745), False, 'from torch import nn\n'), ((1763, 1794), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(4 * 4 * 4 * DIM)'], {}), '(4 * 4 * 4 * DIM)\n', (1777, 1794), False, 'from torch import nn\n'), ((1812, 1825), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1819, 1825), False, 'from torch import nn\n'), ((1879, 1928), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(4 * DIM)', '(2 * DIM)', '(2)'], {'stride': '(2)'}), '(4 * DIM, 2 * DIM, 2, stride=2)\n', (1897, 1928), False, 'from torch import nn\n'), ((1946, 1969), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(2 * DIM)'], {}), '(2 * DIM)\n', (1960, 1969), False, 'from torch import nn\n'), ((1987, 2000), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1994, 2000), False, 'from torch import nn\n'), ((2054, 2099), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(2 * DIM)', 'DIM', '(2)'], {'stride': '(2)'}), '(2 * DIM, DIM, 2, stride=2)\n', (2072, 2099), False, 'from torch import nn\n'), ((2117, 2136), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['DIM'], {}), '(DIM)\n', (2131, 2136), False, 'from torch import nn\n'), ((2154, 2167), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2161, 2167), False, 'from torch import nn\n'), ((2239, 2270), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(4 * 4 * 4 * DIM)'], {}), '(128, 4 * 4 * 4 * DIM)\n', (2248, 2270), False, 'from torch import nn\n'), ((2288, 2301), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2295, 2301), False, 'from torch import nn\n'), ((2355, 2404), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(4 * DIM)', '(2 * DIM)', '(2)'], {'stride': '(2)'}), '(4 * DIM, 2 * DIM, 2, stride=2)\n', (2373, 2404), False, 'from torch import nn\n'), ((2422, 2435), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2429, 2435), False, 'from torch import nn\n'), ((2489, 2534), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(2 * DIM)', 'DIM', '(2)'], {'stride': '(2)'}), '(2 * DIM, DIM, 2, stride=2)\n', (2507, 2534), False, 'from torch import nn\n'), ((2552, 2565), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2559, 2565), False, 'from torch import nn\n')] |
from src.datasets.util import read_files, get_vocab, pad_sequences, text_to_rank, splits, clean_doc, splitsNonInt
from sklearn.model_selection import StratifiedShuffleSplit,train_test_split
from tensorflow import keras
import numpy as np
from src.models.embedding import *
from sklearn.datasets import fetch_20newsgroups
class NewsGroups:
def __init__(self, clean=True):
docs = fetch_20newsgroups(subset='all', shuffle=True, random_state=1) # remove=('headers', 'footers', 'quotes')
if clean:
docs_data_clean = [clean_doc(doc) for doc in docs.data]
else:
docs_data_clean = docs.data
size_train = 9846
size_val = 4500
X_docs, X_test_docs, y_labels, y_test_labels = train_test_split(
docs_data_clean, docs.target,
stratify=docs.target,
test_size=4500,
random_state=42
)
self.labels = y_labels
self.data = X_docs
self.test_labels = y_test_labels
self.test_data = X_test_docs
self.vocab = get_vocab(self.data)
def getRankedDataSplits(self, vocab_size, max_sequence_length, n_splits=5, test_size=0.5, random_state=1):
X = text_to_rank(self.data, self.vocab, vocab_size)
X = pad_sequences(X, maxlen=max_sequence_length)
y = keras.utils.to_categorical(self.labels)
X_eval = text_to_rank(self.test_data, self.vocab, vocab_size)
X_eval = pad_sequences(X_eval, maxlen=max_sequence_length)
y_eval = keras.utils.to_categorical(self.test_labels)
X_train, y_train, X_test, y_test = splits(n_splits, X, y, test_size, random_state)
return X_train, y_train, X_test, y_test, X_eval, y_eval
def getRawDataSplits(self, n_splits=5, test_size=0.5, random_state=1):
X = self.data
y = keras.utils.to_categorical(self.labels)
X_eval = self.test_data
y_eval = keras.utils.to_categorical(self.test_labels)
seq, word_index = get_data(X+X_eval, len(X))
X = seq[:len(X)]
X_eval = seq[-len(X_eval):]
allData = np.concatenate([X, X_eval])
X_train, y_train, X_test, y_test = splits(n_splits, X, y, test_size, random_state)
return X_train, y_train, X_test, y_test, X_eval, y_eval, word_index
def getDataSplits(self, n_splits=5, test_size=0.5, random_state=1):
X = self.data
y = self.labels
X_eval = self.test_data
y_eval = self.test_labels
allData = np.concatenate([X, X_eval])
train_index_list, test_index_list = splitsNonInt(n_splits, X, y, test_size, random_state)
return X, y, train_index_list, test_index_list, X_eval, y_eval
def getVocab(self):
return self.vocab
def getData(self):
return [*self.data, *self.test_data], [*self.labels, *self.test_labels]
| [
"tensorflow.keras.utils.to_categorical",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.fetch_20newsgroups",
"src.datasets.util.splitsNonInt",
"src.datasets.util.text_to_rank",
"src.datasets.util.clean_doc",
"src.datasets.util.pad_sequences",
"numpy.concatenate",
"src.datasets.util.ge... | [((398, 460), 'sklearn.datasets.fetch_20newsgroups', 'fetch_20newsgroups', ([], {'subset': '"""all"""', 'shuffle': '(True)', 'random_state': '(1)'}), "(subset='all', shuffle=True, random_state=1)\n", (416, 460), False, 'from sklearn.datasets import fetch_20newsgroups\n'), ((780, 885), 'sklearn.model_selection.train_test_split', 'train_test_split', (['docs_data_clean', 'docs.target'], {'stratify': 'docs.target', 'test_size': '(4500)', 'random_state': '(42)'}), '(docs_data_clean, docs.target, stratify=docs.target,\n test_size=4500, random_state=42)\n', (796, 885), False, 'from sklearn.model_selection import StratifiedShuffleSplit, train_test_split\n'), ((1130, 1150), 'src.datasets.util.get_vocab', 'get_vocab', (['self.data'], {}), '(self.data)\n', (1139, 1150), False, 'from src.datasets.util import read_files, get_vocab, pad_sequences, text_to_rank, splits, clean_doc, splitsNonInt\n'), ((1288, 1335), 'src.datasets.util.text_to_rank', 'text_to_rank', (['self.data', 'self.vocab', 'vocab_size'], {}), '(self.data, self.vocab, vocab_size)\n', (1300, 1335), False, 'from src.datasets.util import read_files, get_vocab, pad_sequences, text_to_rank, splits, clean_doc, splitsNonInt\n'), ((1348, 1392), 'src.datasets.util.pad_sequences', 'pad_sequences', (['X'], {'maxlen': 'max_sequence_length'}), '(X, maxlen=max_sequence_length)\n', (1361, 1392), False, 'from src.datasets.util import read_files, get_vocab, pad_sequences, text_to_rank, splits, clean_doc, splitsNonInt\n'), ((1405, 1444), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['self.labels'], {}), '(self.labels)\n', (1431, 1444), False, 'from tensorflow import keras\n'), ((1471, 1523), 'src.datasets.util.text_to_rank', 'text_to_rank', (['self.test_data', 'self.vocab', 'vocab_size'], {}), '(self.test_data, self.vocab, vocab_size)\n', (1483, 1523), False, 'from src.datasets.util import read_files, get_vocab, pad_sequences, text_to_rank, splits, clean_doc, splitsNonInt\n'), ((1541, 1590), 'src.datasets.util.pad_sequences', 'pad_sequences', (['X_eval'], {'maxlen': 'max_sequence_length'}), '(X_eval, maxlen=max_sequence_length)\n', (1554, 1590), False, 'from src.datasets.util import read_files, get_vocab, pad_sequences, text_to_rank, splits, clean_doc, splitsNonInt\n'), ((1608, 1652), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['self.test_labels'], {}), '(self.test_labels)\n', (1634, 1652), False, 'from tensorflow import keras\n'), ((1710, 1757), 'src.datasets.util.splits', 'splits', (['n_splits', 'X', 'y', 'test_size', 'random_state'], {}), '(n_splits, X, y, test_size, random_state)\n', (1716, 1757), False, 'from src.datasets.util import read_files, get_vocab, pad_sequences, text_to_rank, splits, clean_doc, splitsNonInt\n'), ((1945, 1984), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['self.labels'], {}), '(self.labels)\n', (1971, 1984), False, 'from tensorflow import keras\n'), ((2035, 2079), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['self.test_labels'], {}), '(self.test_labels)\n', (2061, 2079), False, 'from tensorflow import keras\n'), ((2239, 2266), 'numpy.concatenate', 'np.concatenate', (['[X, X_eval]'], {}), '([X, X_eval])\n', (2253, 2266), True, 'import numpy as np\n'), ((2310, 2357), 'src.datasets.util.splits', 'splits', (['n_splits', 'X', 'y', 'test_size', 'random_state'], {}), '(n_splits, X, y, test_size, random_state)\n', (2316, 2357), False, 'from src.datasets.util import read_files, get_vocab, pad_sequences, text_to_rank, splits, clean_doc, splitsNonInt\n'), ((2662, 2689), 'numpy.concatenate', 'np.concatenate', (['[X, X_eval]'], {}), '([X, X_eval])\n', (2676, 2689), True, 'import numpy as np\n'), ((2734, 2787), 'src.datasets.util.splitsNonInt', 'splitsNonInt', (['n_splits', 'X', 'y', 'test_size', 'random_state'], {}), '(n_splits, X, y, test_size, random_state)\n', (2746, 2787), False, 'from src.datasets.util import read_files, get_vocab, pad_sequences, text_to_rank, splits, clean_doc, splitsNonInt\n'), ((561, 575), 'src.datasets.util.clean_doc', 'clean_doc', (['doc'], {}), '(doc)\n', (570, 575), False, 'from src.datasets.util import read_files, get_vocab, pad_sequences, text_to_rank, splits, clean_doc, splitsNonInt\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.dataset.NatConTimeSeriesDataset.py
#
# Copyright (C) 2015-2017 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Represents native contacts as a function of time
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
if __name__ == "__main__":
__package__ = str("moldynplot.dataset")
import moldynplot.dataset
from IPython import embed
import h5py
import numpy as np
import pandas as pd
import six
from .TimeSeriesDataset import TimeSeriesDataset
from ..myplotspec import wiprint
################################### CLASSES ###################################
class NatConTimeSeriesDataset(TimeSeriesDataset):
"""
Represents native contacts as a function of time
"""
def __init__(self, downsample=None, calc_pdist=True, **kwargs):
"""
Arguments:
infile (str): Path to input file, may contain environment
variables
usecols (list): Columns to select from DataFrame, once
dataframe has already been loaded
dt (float): Time interval between points; units unspecified
toffset (float): Time offset to be added to all points (i.e.
time of first point)
cutoff (float): Minimum distance within which a contact is
considered to be formed
downsample (int): Interval by which to downsample points using
mode
pdist (bool): Calculate probability distribution
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
"""
verbose = kwargs.get("verbose", 1)
# Load
super(NatConTimeSeriesDataset, self).__init__(**kwargs)
dataframe = self.dataframe
n_contacts = self.dataframe.shape[1]
# Convert minimum distances to percent native contacts
cutoff = kwargs.get("cutoff", 5.5)
percent = pd.DataFrame(
data=(dataframe.values <= cutoff).sum(axis=1) / dataframe.shape[1],
index=dataframe.index, columns=["percent_native_contacts"])
dataframe = self.dataframe = percent
# Downsample; flag included in function definition to prevent
# superclass from downsampling before applying cutoff
if downsample is not None:
from scipy.stats.mstats import mode
if verbose >= 1:
print("downsampling by factor of {0} using mode".format(
downsample))
reduced = dataframe.values[
:dataframe.shape[0] - (dataframe.shape[0] % downsample), :]
new_shape = (int(reduced.shape[0] / downsample), downsample,
reduced.shape[1])
index = np.reshape(dataframe.index.values[
:dataframe.shape[0] - (dataframe.shape[0] % downsample)],
new_shape[:-1]).mean(axis=1)
reduced = np.reshape(reduced, new_shape)
reduced = np.squeeze(mode(reduced, axis=1)[0])
reduced = pd.DataFrame(data=reduced, index=index,
columns=dataframe.columns.values)
reduced.index.name = "time"
dataframe = self.dataframe = reduced
# Calculate probability distribution
if calc_pdist:
if verbose >= 1:
print("calculating probability distribution using histogram")
bins = np.linspace(0 - ((1 / n_contacts) / 2),
1 + ((1 / n_contacts) / 2), n_contacts + 1)
pdist, _ = np.histogram(self.dataframe.values, bins)
pdist = np.array(pdist, np.float) / pdist.sum()
pdist_x = np.zeros(bins.size * 2)
pdist_y = np.zeros(bins.size * 2)
pdist_x[::2] = pdist_x[1::2] = bins
pdist_y[1:-1:2] = pdist_y[2:-1:2] = pdist
self.pdist_x = pdist_x
self.pdist_y = pdist_y
self.timeseries = dataframe
#################################### MAIN #####################################
if __name__ == "__main__":
NatConTimeSeriesDataset.main() | [
"numpy.histogram",
"numpy.reshape",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"scipy.stats.mstats.mode",
"pandas.DataFrame"
] | [((3116, 3146), 'numpy.reshape', 'np.reshape', (['reduced', 'new_shape'], {}), '(reduced, new_shape)\n', (3126, 3146), True, 'import numpy as np\n'), ((3228, 3301), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'reduced', 'index': 'index', 'columns': 'dataframe.columns.values'}), '(data=reduced, index=index, columns=dataframe.columns.values)\n', (3240, 3301), True, 'import pandas as pd\n'), ((3600, 3675), 'numpy.linspace', 'np.linspace', (['(0 - 1 / n_contacts / 2)', '(1 + 1 / n_contacts / 2)', '(n_contacts + 1)'], {}), '(0 - 1 / n_contacts / 2, 1 + 1 / n_contacts / 2, n_contacts + 1)\n', (3611, 3675), True, 'import numpy as np\n'), ((3721, 3762), 'numpy.histogram', 'np.histogram', (['self.dataframe.values', 'bins'], {}), '(self.dataframe.values, bins)\n', (3733, 3762), True, 'import numpy as np\n'), ((3845, 3868), 'numpy.zeros', 'np.zeros', (['(bins.size * 2)'], {}), '(bins.size * 2)\n', (3853, 3868), True, 'import numpy as np\n'), ((3891, 3914), 'numpy.zeros', 'np.zeros', (['(bins.size * 2)'], {}), '(bins.size * 2)\n', (3899, 3914), True, 'import numpy as np\n'), ((3783, 3808), 'numpy.array', 'np.array', (['pdist', 'np.float'], {}), '(pdist, np.float)\n', (3791, 3808), True, 'import numpy as np\n'), ((2946, 3055), 'numpy.reshape', 'np.reshape', (['dataframe.index.values[:dataframe.shape[0] - dataframe.shape[0] % downsample]', 'new_shape[:-1]'], {}), '(dataframe.index.values[:dataframe.shape[0] - dataframe.shape[0] %\n downsample], new_shape[:-1])\n', (2956, 3055), True, 'import numpy as np\n'), ((3180, 3201), 'scipy.stats.mstats.mode', 'mode', (['reduced'], {'axis': '(1)'}), '(reduced, axis=1)\n', (3184, 3201), False, 'from scipy.stats.mstats import mode\n')] |
import tensorflow as tf
import numpy as np
with tf.profiler.experimental.Profile('/home/Tiexin-RS/profiles'):
with tf.device('gpu'):
list_data = np.load('/home/Tiexin-RS/code/workspace/wjz/segment-with-nn/serving/load_test/locust_tfserving/a.npy')
payload = {"inputs": {'input_1': list_data.tolist()}}
tensor_data = tf.constant(list_data)
| [
"tensorflow.device",
"numpy.load",
"tensorflow.constant",
"tensorflow.profiler.experimental.Profile"
] | [((48, 108), 'tensorflow.profiler.experimental.Profile', 'tf.profiler.experimental.Profile', (['"""/home/Tiexin-RS/profiles"""'], {}), "('/home/Tiexin-RS/profiles')\n", (80, 108), True, 'import tensorflow as tf\n'), ((119, 135), 'tensorflow.device', 'tf.device', (['"""gpu"""'], {}), "('gpu')\n", (128, 135), True, 'import tensorflow as tf\n'), ((157, 269), 'numpy.load', 'np.load', (['"""/home/Tiexin-RS/code/workspace/wjz/segment-with-nn/serving/load_test/locust_tfserving/a.npy"""'], {}), "(\n '/home/Tiexin-RS/code/workspace/wjz/segment-with-nn/serving/load_test/locust_tfserving/a.npy'\n )\n", (164, 269), True, 'import numpy as np\n'), ((344, 366), 'tensorflow.constant', 'tf.constant', (['list_data'], {}), '(list_data)\n', (355, 366), True, 'import tensorflow as tf\n')] |
import os
import tkinter as tk
import numpy as np
from src.globals import current_dir
from src.preprocessor import deskew_image, dots_to_image
from src.utils import draw_digit, save_digit
class InputGUI:
def __init__(self, root, n=None):
self.root = root
self.n = n # NeuralNetwork object
self.dots = [] # Array to store locations of dots
self.scale = 15 # Stroke size
self.size = 28 * 10 - 1
self.root.minsize(700, 400)
self.root.title('Digits')
desc = \
'Draw a single digit in the canvas.\n' + \
'For best output, try to ensure it is centered\n' + \
'in the frame and nearly fills it.'
self.label_desc = tk.Label(root, text=desc)
self.label_desc.grid(column=1, row=0, columnspan=3)
self.label_prediction = tk.Label(root, text='')
self.label_prediction.grid(column=1, row=2)
self.label_confidence = tk.Label(root, text='')
self.label_confidence.grid(column=2, row=2)
# TODO: Add text field(s) to display prediction and confidence
# REVIEW: Perhaps allow user to enter correct answer and save
# as additional test data?
# Perhaps make new training data out of it?
self.canvas = tk.Canvas(
self.root, width=self.size, height=self.size,
highlightthickness=2, highlightbackground='black'
)
self.canvas.grid(column=1, row=1, columnspan=3)
self.canvas.bind('<Button-1>', self.draw)
self.canvas.bind('<B1-Motion>', self.draw)
self.clear_button = tk.Button(
self.root, text='Clear', command=self.clear
)
self.predict_button = tk.Button(
self.root, text='Predict', command=self.predict
)
self.close_button = tk.Button(
self.root, text='Close', command=self.root.destroy
)
self.clear_button.grid(column=1, row=3)
self.predict_button.grid(column=2, row=3)
self.close_button.grid(column=3, row=3)
# Needed for center alignment
self.root.grid_columnconfigure(0, weight=1)
self.root.grid_columnconfigure(4, weight=1)
def clear(self):
self.canvas.delete('all')
self.dots = []
def predict(self):
prediction_dir = get_prediction_dir()
data = dots_to_image(self.dots, self.scale)
deskewed = deskew_image(data)
save_digit(data, os.path.join(prediction_dir, 'raw.png'))
save_digit(deskewed, os.path.join(prediction_dir, 'deskewed.png'))
# DEBUG: Draw digits to check deskewing
draw_digit(data)
draw_digit(deskewed)
if self.n:
# TODO: Save raw as well as deskewed prediction as json
# prediction = self.n.predict(data.reshape((784,)))
prediction = self.n.predict(deskewed.reshape((784,)))
digit = np.argmax(prediction)
print('Prediction: %s, confidence: %d%%' % (
digit, prediction[digit] * 100
))
print(prediction)
def draw(self, event):
x, y = event.x, event.y
if 0 <= x < self.size and 0 <= y < self.size:
self.dots.append((x, y))
self.canvas.create_oval(
x - self.scale, y - self.scale,
x + self.scale, y + self.scale,
fill='#222222'
)
def run_gui(n=None):
root = tk.Tk()
InputGUI(root, n)
root.mainloop()
def get_prediction_dir():
prediction_dir = os.path.join(current_dir, 'predictions')
if not os.path.isdir(prediction_dir):
os.mkdir(prediction_dir)
i = 1
while os.path.isdir(os.path.join(prediction_dir, str(i))):
i += 1
path = os.path.join(prediction_dir, str(i))
os.mkdir(path)
return path
if __name__ == '__main__':
run_gui()
| [
"src.preprocessor.dots_to_image",
"os.path.join",
"numpy.argmax",
"tkinter.Button",
"tkinter.Canvas",
"tkinter.Tk",
"os.path.isdir",
"os.mkdir",
"tkinter.Label",
"src.utils.draw_digit",
"src.preprocessor.deskew_image"
] | [((2946, 2953), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (2951, 2953), True, 'import tkinter as tk\n'), ((3036, 3076), 'os.path.join', 'os.path.join', (['current_dir', '"""predictions"""'], {}), "(current_dir, 'predictions')\n", (3048, 3076), False, 'import os\n'), ((3267, 3281), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (3275, 3281), False, 'import os\n'), ((644, 669), 'tkinter.Label', 'tk.Label', (['root'], {'text': 'desc'}), '(root, text=desc)\n', (652, 669), True, 'import tkinter as tk\n'), ((751, 774), 'tkinter.Label', 'tk.Label', (['root'], {'text': '""""""'}), "(root, text='')\n", (759, 774), True, 'import tkinter as tk\n'), ((848, 871), 'tkinter.Label', 'tk.Label', (['root'], {'text': '""""""'}), "(root, text='')\n", (856, 871), True, 'import tkinter as tk\n'), ((1141, 1252), 'tkinter.Canvas', 'tk.Canvas', (['self.root'], {'width': 'self.size', 'height': 'self.size', 'highlightthickness': '(2)', 'highlightbackground': '"""black"""'}), "(self.root, width=self.size, height=self.size, highlightthickness=\n 2, highlightbackground='black')\n", (1150, 1252), True, 'import tkinter as tk\n'), ((1420, 1474), 'tkinter.Button', 'tk.Button', (['self.root'], {'text': '"""Clear"""', 'command': 'self.clear'}), "(self.root, text='Clear', command=self.clear)\n", (1429, 1474), True, 'import tkinter as tk\n'), ((1506, 1564), 'tkinter.Button', 'tk.Button', (['self.root'], {'text': '"""Predict"""', 'command': 'self.predict'}), "(self.root, text='Predict', command=self.predict)\n", (1515, 1564), True, 'import tkinter as tk\n'), ((1594, 1655), 'tkinter.Button', 'tk.Button', (['self.root'], {'text': '"""Close"""', 'command': 'self.root.destroy'}), "(self.root, text='Close', command=self.root.destroy)\n", (1603, 1655), True, 'import tkinter as tk\n'), ((2052, 2088), 'src.preprocessor.dots_to_image', 'dots_to_image', (['self.dots', 'self.scale'], {}), '(self.dots, self.scale)\n', (2065, 2088), False, 'from src.preprocessor import deskew_image, dots_to_image\n'), ((2102, 2120), 'src.preprocessor.deskew_image', 'deskew_image', (['data'], {}), '(data)\n', (2114, 2120), False, 'from src.preprocessor import deskew_image, dots_to_image\n'), ((2296, 2312), 'src.utils.draw_digit', 'draw_digit', (['data'], {}), '(data)\n', (2306, 2312), False, 'from src.utils import draw_digit, save_digit\n'), ((2315, 2335), 'src.utils.draw_digit', 'draw_digit', (['deskewed'], {}), '(deskewed)\n', (2325, 2335), False, 'from src.utils import draw_digit, save_digit\n'), ((3085, 3114), 'os.path.isdir', 'os.path.isdir', (['prediction_dir'], {}), '(prediction_dir)\n', (3098, 3114), False, 'import os\n'), ((3118, 3142), 'os.mkdir', 'os.mkdir', (['prediction_dir'], {}), '(prediction_dir)\n', (3126, 3142), False, 'import os\n'), ((2141, 2180), 'os.path.join', 'os.path.join', (['prediction_dir', '"""raw.png"""'], {}), "(prediction_dir, 'raw.png')\n", (2153, 2180), False, 'import os\n'), ((2205, 2249), 'os.path.join', 'os.path.join', (['prediction_dir', '"""deskewed.png"""'], {}), "(prediction_dir, 'deskewed.png')\n", (2217, 2249), False, 'import os\n'), ((2532, 2553), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (2541, 2553), True, 'import numpy as np\n')] |
import warnings
from itertools import tee, starmap
from operator import gt
from copy import copy
import numpy as np
import pandas as pd
import bioframe
def assign_view_paired(
features,
view_df,
cols_paired=["chrom1", "start1", "end1", "chrom2", "start2", "end2"],
cols_view=["chrom", "start", "end"],
features_view_cols=["region1", "region2"],
view_name_col="name",
drop_unassigned=False,
):
"""Assign region names from the view to each feature
Assigns a regular 1D view independently to each side of a bedpe-style dataframe.
Will add two columns with region names (`features_view_cols`)
Parameters
----------
features : pd.DataFrame
bedpe-style dataframe
view_df : pandas.DataFrame
ViewFrame specifying region start and ends for assignment. Attempts to
convert dictionary and pd.Series formats to viewFrames.
cols_paired : list of str
he names of columns containing the chromosome, start and end of the
genomic intervals. The default values are 'chrom', 'start', 'end'.
cols_view : list of str
The names of columns containing the chromosome, start and end of the
genomic intervals in the view. The default values are 'chrom', 'start', 'end'.
features_view_cols : list of str
Names of the columns where to save the assigned region names
view_name_col : str
Column of ``view_df`` with region names. Default 'name'.
drop_unassigned : bool
If True, drop intervals in df that do not overlap a region in the view.
Default False.
"""
features = features.copy()
features.reset_index(inplace=True, drop=True)
cols_left = cols_paired[:3]
cols_right = cols_paired[3:]
bioframe.core.checks.is_bedframe(features, raise_errors=True, cols=cols_left)
bioframe.core.checks.is_bedframe(features, raise_errors=True, cols=cols_right)
view_df = bioframe.core.construction.make_viewframe(
view_df, view_name_col=view_name_col, cols=cols_view
)
features = bioframe.assign_view(
features,
view_df,
drop_unassigned=drop_unassigned,
df_view_col=features_view_cols[0],
view_name_col=view_name_col,
cols=cols_left,
cols_view=cols_view,
)
features[cols_right[1:]] = features[cols_right[1:]].astype(
int
) # gets cast to float above...
features = bioframe.assign_view(
features,
view_df,
drop_unassigned=drop_unassigned,
df_view_col=features_view_cols[1],
view_name_col=view_name_col,
cols=cols_right,
cols_view=cols_view,
)
return features
def assign_regions(features, supports):
"""
DEPRECATED. Will be removed in the future versions and replaced with bioframe.overlap()
For each feature in features dataframe assign the genomic region (support)
that overlaps with it. In case if feature overlaps multiple supports, the
region with largest overlap will be reported.
"""
index_name = features.index.name # Store the name of index
features = (
features.copy()
.reset_index()
.rename({"index" if index_name is None else index_name: "native_order"}, axis=1)
) # Store the original features' order as a column with original index
if "chrom" in features.columns:
overlap = bioframe.overlap(
features,
supports,
how="left",
cols1=["chrom", "start", "end"],
cols2=["chrom", "start", "end"],
keep_order=True,
return_overlap=True,
suffixes=("_1", "_2"),
)
overlap_columns = overlap.columns # To filter out duplicates later
overlap["overlap_length"] = overlap["overlap_end"] - overlap["overlap_start"]
# Filter out overlaps with multiple regions:
overlap = (
overlap.sort_values("overlap_length", ascending=False)
.drop_duplicates(overlap_columns, keep="first")
.sort_index()
)
# Copy single column with overlapping region name:
features["region"] = overlap["name_2"]
if "chrom1" in features.columns:
for idx in ("1", "2"):
overlap = bioframe.overlap(
features,
supports,
how="left",
cols1=[f"chrom{idx}", f"start{idx}", f"end{idx}"],
cols2=[f"chrom", f"start", f"end"],
keep_order=True,
return_overlap=True,
suffixes=("_1", "_2"),
)
overlap_columns = overlap.columns # To filter out duplicates later
overlap[f"overlap_length{idx}"] = (
overlap[f"overlap_end{idx}"] - overlap[f"overlap_start{idx}"]
)
# Filter out overlaps with multiple regions:
overlap = (
overlap.sort_values(f"overlap_length{idx}", ascending=False)
.drop_duplicates(overlap_columns, keep="first")
.sort_index()
)
# Copy single column with overlapping region name:
features[f"region{idx}"] = overlap["name_2"]
# Form a single column with region names where region1 == region2, and np.nan in other cases:
features["region"] = np.where(
features["region1"] == features["region2"], features["region1"], np.nan
)
features = features.drop(
["region1", "region2"], axis=1
) # Remove unnecessary columns
features = features.set_index("native_order") # Restore the original index
features.index.name = index_name # Restore original index title
return features
def assign_supports(features, supports, labels=False, suffix=""):
"""
Assign support regions to a table of genomic intervals.
Obsolete, replaced by assign_regions now.
Parameters
----------
features : DataFrame
Dataframe with columns `chrom`, `start`, `end`
or `chrom1`, `start1`, `end1`, `chrom2`, `start2`, `end2`
supports : array-like
Support areas
"""
features = features.copy()
supp_col = pd.Series(index=features.index, data=np.nan)
c = "chrom" + suffix
s = "start" + suffix
e = "end" + suffix
for col in (c, s, e):
if col not in features.columns:
raise ValueError(
'Column "{}" not found in features data frame.'.format(col)
)
for i, region in enumerate(supports):
# single-region support
if len(region) in [3, 4]:
sel = (features[c] == region[0]) & (features[e] > region[1])
if region[2] is not None:
sel &= features[s] < region[2]
# paired-region support
elif len(region) == 2:
region1, region2 = region
sel1 = (features[c] == region1[0]) & (features[e] > region1[1])
if region1[2] is not None:
sel1 &= features[s] < region1[2]
sel2 = (features[c] == region2[0]) & (features[e] > region2[1])
if region2[2] is not None:
sel2 &= features[s] < region2[2]
sel = sel1 | sel2
supp_col.loc[sel] = i
if labels:
supp_col = supp_col.map(lambda i: supports[int(i)], na_action="ignore")
return supp_col
def assign_regions_to_bins(bin_ids, regions_span):
regions_binsorted = (
regions_span[(regions_span["bin_start"] >= 0) & (regions_span["bin_end"] >= 0)]
.sort_values(["bin_start", "bin_end"])
.reset_index()
)
bin_reg_idx_lo = regions_span["bin_start"].searchsorted(bin_ids, "right") - 1
bin_reg_idx_hi = regions_span["bin_end"].searchsorted(bin_ids, "right")
mask_assigned = (bin_reg_idx_lo == bin_reg_idx_hi) & (bin_reg_idx_lo >= 0)
region_ids = pd.array([pd.NA] * len(bin_ids))
region_ids[mask_assigned] = regions_span["name"][bin_reg_idx_lo[mask_assigned]]
return region_ids
def make_cooler_view(clr, ucsc_names=False):
"""
Generate a full chromosome viewframe
using cooler's chromsizes
Parameters
----------
clr : cooler
cooler-object to extract chromsizes
ucsc_names : bool
Use full UCSC formatted names instead
of short chromosome names.
Returns
-------
cooler_view : viewframe
full chromosome viewframe
"""
cooler_view = bioframe.make_viewframe(clr.chromsizes)
if ucsc_names:
# UCSC formatted names
return cooler_view
else:
# rename back to short chromnames
cooler_view["name"] = cooler_view["chrom"]
return cooler_view
def view_from_track(track_df):
bioframe.core.checks._verify_columns(track_df, ["chrom", "start", "end"])
return bioframe.make_viewframe(
[
(chrom, df.start.min(), df.end.max())
for chrom, df in track_df.groupby("chrom")
]
)
def mask_cooler_bad_bins(track, bintable):
"""
Mask (set to NaN) values in track where bin is masked in bintable.
Currently used in `cli.get_saddle()`.
TODO: determine if this should be used elsewhere.
Parameters
----------
track : tuple of (DataFrame, str)
bedGraph-like dataframe along with the name of the value column.
bintable : tuple of (DataFrame, str)
bedGraph-like dataframe along with the name of the weight column.
Returns
-------
track : DataFrame
New bedGraph-like dataframe with bad bins masked in the value column
"""
# TODO: update to new track format
track, name = track
bintable, clr_weight_name = bintable
track = pd.merge(
track[["chrom", "start", "end", name]], bintable, on=["chrom", "start", "end"]
)
track.loc[~np.isfinite(track[clr_weight_name]), name] = np.nan
track = track[["chrom", "start", "end", name]]
return track
def align_track_with_cooler(
track, clr, view_df=None, clr_weight_name="weight", mask_bad_bins=True
):
"""
Sync a track dataframe with a cooler bintable.
Checks that bin sizes match between a track and a cooler,
merges the cooler bintable with the track, and
propagates masked regions from a cooler bintable to a track.
Parameters
----------
track : pd.DataFrame
bedGraph-like track DataFrame to check
clr : cooler
cooler object to check against
view_df : bioframe.viewframe or None
Optional viewframe of regions to check for their number of bins with assigned track values.
If None, constructs a view_df from cooler chromsizes.
clr_weight_name : str
Name of the column in the bin table with weight
mask_bad_bins : bool
Whether to propagate null bins from cooler bintable column clr_weight_name
to the 'value' column of the output clr_track. Default True.
Returns
-------
clr_track
track dataframe that has been aligned with the cooler bintable
and has columns ['chrom','start','end','value']
"""
from .checks import is_track, is_cooler_balanced
try:
is_track(track, raise_errors=True)
except Exception as e:
raise ValueError("invalid input track") from e
# since tracks are currently allowed to have flexible column names
c, s, e, v = track.columns[:4]
# using median to allow for shorter / longer last bin on any chromosome
track_bin_width = int((track[e] - track[s]).median())
if not (track_bin_width == clr.binsize):
raise ValueError(
"mismatch between track and cooler bin size, check track resolution"
)
clr_track = (
(clr.bins()[:])
.copy()
.merge(
track.rename(columns={c: "chrom", s: "start", e: "end", v: "value"}),
how="left",
on=["chrom", "start"],
suffixes=("", "_"),
)
)
if clr_weight_name:
try:
is_cooler_balanced(clr, clr_weight_name=clr_weight_name, raise_errors=True)
except Exception as e:
raise ValueError(
f"no column {clr_weight_name} detected in input cooler bintable"
) from e
else:
clr_track[clr_weight_name] = 1.0
valid_bins = clr_track[clr_weight_name].notna()
num_valid_bins = valid_bins.sum()
num_assigned_bins = (clr_track["value"][valid_bins].notna()).sum()
if num_assigned_bins == 0:
raise ValueError("no track values assigned to cooler bintable")
elif num_assigned_bins < 0.5 * np.sum(valid_bins):
warnings.warn("less than 50% of valid bins have been assigned a value")
view_df = make_cooler_view(clr) if view_df is None else view_df
for region in view_df.itertuples(index=False):
track_region = bioframe.select(clr_track, region)
num_assigned_region_bins = track_region["value"].notna().sum()
if num_assigned_region_bins == 0:
raise ValueError(
f"no track values assigned to region {bioframe.to_ucsc_string(region)}"
)
if mask_bad_bins:
clr_track.loc[~valid_bins, "value"] = np.nan
return clr_track[["chrom", "start", "end", "value"]]
| [
"pandas.Series",
"bioframe.to_ucsc_string",
"bioframe.core.checks._verify_columns",
"bioframe.core.construction.make_viewframe",
"numpy.where",
"bioframe.make_viewframe",
"pandas.merge",
"bioframe.assign_view",
"numpy.sum",
"bioframe.select",
"numpy.isfinite",
"warnings.warn",
"bioframe.over... | [((1759, 1836), 'bioframe.core.checks.is_bedframe', 'bioframe.core.checks.is_bedframe', (['features'], {'raise_errors': '(True)', 'cols': 'cols_left'}), '(features, raise_errors=True, cols=cols_left)\n', (1791, 1836), False, 'import bioframe\n'), ((1841, 1919), 'bioframe.core.checks.is_bedframe', 'bioframe.core.checks.is_bedframe', (['features'], {'raise_errors': '(True)', 'cols': 'cols_right'}), '(features, raise_errors=True, cols=cols_right)\n', (1873, 1919), False, 'import bioframe\n'), ((1934, 2034), 'bioframe.core.construction.make_viewframe', 'bioframe.core.construction.make_viewframe', (['view_df'], {'view_name_col': 'view_name_col', 'cols': 'cols_view'}), '(view_df, view_name_col=\n view_name_col, cols=cols_view)\n', (1975, 2034), False, 'import bioframe\n'), ((2059, 2241), 'bioframe.assign_view', 'bioframe.assign_view', (['features', 'view_df'], {'drop_unassigned': 'drop_unassigned', 'df_view_col': 'features_view_cols[0]', 'view_name_col': 'view_name_col', 'cols': 'cols_left', 'cols_view': 'cols_view'}), '(features, view_df, drop_unassigned=drop_unassigned,\n df_view_col=features_view_cols[0], view_name_col=view_name_col, cols=\n cols_left, cols_view=cols_view)\n', (2079, 2241), False, 'import bioframe\n'), ((2424, 2607), 'bioframe.assign_view', 'bioframe.assign_view', (['features', 'view_df'], {'drop_unassigned': 'drop_unassigned', 'df_view_col': 'features_view_cols[1]', 'view_name_col': 'view_name_col', 'cols': 'cols_right', 'cols_view': 'cols_view'}), '(features, view_df, drop_unassigned=drop_unassigned,\n df_view_col=features_view_cols[1], view_name_col=view_name_col, cols=\n cols_right, cols_view=cols_view)\n', (2444, 2607), False, 'import bioframe\n'), ((6197, 6241), 'pandas.Series', 'pd.Series', ([], {'index': 'features.index', 'data': 'np.nan'}), '(index=features.index, data=np.nan)\n', (6206, 6241), True, 'import pandas as pd\n'), ((8449, 8488), 'bioframe.make_viewframe', 'bioframe.make_viewframe', (['clr.chromsizes'], {}), '(clr.chromsizes)\n', (8472, 8488), False, 'import bioframe\n'), ((8733, 8806), 'bioframe.core.checks._verify_columns', 'bioframe.core.checks._verify_columns', (['track_df', "['chrom', 'start', 'end']"], {}), "(track_df, ['chrom', 'start', 'end'])\n", (8769, 8806), False, 'import bioframe\n'), ((9703, 9795), 'pandas.merge', 'pd.merge', (["track[['chrom', 'start', 'end', name]]", 'bintable'], {'on': "['chrom', 'start', 'end']"}), "(track[['chrom', 'start', 'end', name]], bintable, on=['chrom',\n 'start', 'end'])\n", (9711, 9795), True, 'import pandas as pd\n'), ((3388, 3571), 'bioframe.overlap', 'bioframe.overlap', (['features', 'supports'], {'how': '"""left"""', 'cols1': "['chrom', 'start', 'end']", 'cols2': "['chrom', 'start', 'end']", 'keep_order': '(True)', 'return_overlap': '(True)', 'suffixes': "('_1', '_2')"}), "(features, supports, how='left', cols1=['chrom', 'start',\n 'end'], cols2=['chrom', 'start', 'end'], keep_order=True,\n return_overlap=True, suffixes=('_1', '_2'))\n", (3404, 3571), False, 'import bioframe\n'), ((5344, 5429), 'numpy.where', 'np.where', (["(features['region1'] == features['region2'])", "features['region1']", 'np.nan'], {}), "(features['region1'] == features['region2'], features['region1'],\n np.nan)\n", (5352, 5429), True, 'import numpy as np\n'), ((12826, 12860), 'bioframe.select', 'bioframe.select', (['clr_track', 'region'], {}), '(clr_track, region)\n', (12841, 12860), False, 'import bioframe\n'), ((4266, 4470), 'bioframe.overlap', 'bioframe.overlap', (['features', 'supports'], {'how': '"""left"""', 'cols1': "[f'chrom{idx}', f'start{idx}', f'end{idx}']", 'cols2': "[f'chrom', f'start', f'end']", 'keep_order': '(True)', 'return_overlap': '(True)', 'suffixes': "('_1', '_2')"}), "(features, supports, how='left', cols1=[f'chrom{idx}',\n f'start{idx}', f'end{idx}'], cols2=[f'chrom', f'start', f'end'],\n keep_order=True, return_overlap=True, suffixes=('_1', '_2'))\n", (4282, 4470), False, 'import bioframe\n'), ((12611, 12682), 'warnings.warn', 'warnings.warn', (['"""less than 50% of valid bins have been assigned a value"""'], {}), "('less than 50% of valid bins have been assigned a value')\n", (12624, 12682), False, 'import warnings\n'), ((9821, 9856), 'numpy.isfinite', 'np.isfinite', (['track[clr_weight_name]'], {}), '(track[clr_weight_name])\n', (9832, 9856), True, 'import numpy as np\n'), ((12583, 12601), 'numpy.sum', 'np.sum', (['valid_bins'], {}), '(valid_bins)\n', (12589, 12601), True, 'import numpy as np\n'), ((13058, 13089), 'bioframe.to_ucsc_string', 'bioframe.to_ucsc_string', (['region'], {}), '(region)\n', (13081, 13089), False, 'import bioframe\n')] |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the MIT License.
# To view a copy of this license, visit https://opensource.org/licenses/MIT
from bongard import LineAction, ArcAction, OneStrokeShape, BongardImage, BongardProblem, BongardImagePainter, \
BongardProblemPainter
from bongard.util_funcs import get_human_designed_shape_annotations, get_attribute_sampling_candidates
from bongard.plot import create_visualized_bongard_problem
import numpy as np
import os
def create_shape(shape_annotation):
# shape_annotation: (shape_name, super_class, num_actions_computed, base_action_func_names,
# base_action_func_parameters, directions, angles)
line_arc_types = ["normal", "zigzag", "circle", "triangle"]
base_action_func_names = shape_annotation[3]
base_action_func_parameters = shape_annotation[4]
directions = shape_annotation[5]
angles = shape_annotation[6]
base_actions = []
for base_action_func_name, base_action_func_parameter, direction, angle in zip(
base_action_func_names, base_action_func_parameters, directions, angles):
if base_action_func_name == "line":
action = LineAction(line_length=base_action_func_parameter[0],
line_type=np.random.choice(line_arc_types),
turn_direction=direction, turn_angle=angle)
elif base_action_func_name == "arc":
action = ArcAction(arc_angle=base_action_func_parameter[1],
arc_type=np.random.choice(line_arc_types),
turn_direction=direction, turn_angle=angle,
arc_radius=base_action_func_parameter[0])
else:
raise Exception("Unknown action type!")
base_actions.append(action)
shape = OneStrokeShape(basic_actions=base_actions, start_coordinates=None, start_orientation=None)
return shape
def create_bongard_problem(positive_shapes, negative_shapes, shape_annocation_dict):
bongard_problem_name = "Convex VS Concave"
# Typically Bongard program consists of seven images for positive images and negative images, respectively.
# The first six images would be used for "training", and the last image would be reserved for "test"
bongard_problem_positive_images = []
bongard_problem_negative_images = []
for positive_shape in positive_shapes:
shape_annotation = shape_annocation_dict[positive_shape]
shape = create_shape(shape_annotation=shape_annotation)
bongard_image = BongardImage(one_stroke_shapes=[shape])
bongard_problem_positive_images.append(bongard_image)
for negative_shape in negative_shapes:
shape_annotation = shape_annocation_dict[negative_shape]
shape = create_shape(shape_annotation=shape_annotation)
bongard_image = BongardImage(one_stroke_shapes=[shape])
bongard_problem_negative_images.append(bongard_image)
bongard_problem = BongardProblem(positive_bongard_images=bongard_problem_positive_images,
negative_bongard_images=bongard_problem_negative_images,
problem_name=bongard_problem_name, positive_rules=None,
negative_rules=None)
return bongard_problem
if __name__ == "__main__":
random_seed = 0
bongard_problem_ps_dir = "./demo/ps"
bongard_problem_png_dir = "./demo/png"
bongard_problem_vis_filepath = "./demo/bongard_demo.png"
annotated_shape_table_filepath = "../../data/human_designed_shapes.tsv"
attribute_table_filepath = "../../data/human_designed_shapes_attributes.tsv"
if not os.path.exists(bongard_problem_ps_dir):
os.makedirs(bongard_problem_ps_dir)
if not os.path.exists(bongard_problem_png_dir):
os.makedirs(bongard_problem_png_dir)
np.random.seed(random_seed)
shape_annocation_dict = get_human_designed_shape_annotations(
annotated_shape_table_filepath=annotated_shape_table_filepath)
attribute_candidates = get_attribute_sampling_candidates(attribute_table_filepath=attribute_table_filepath,
min_num_positives=7, min_num_negatives=7)
convex_shapes = attribute_candidates["convex"][0]
concave_shapes = attribute_candidates["convex"][1]
positive_shapes = np.random.choice(convex_shapes, size=7, replace=False)
negative_shapes = np.random.choice(concave_shapes, size=7, replace=False)
# Create an instance of Bongard problem based our design.
bongard_problem = create_bongard_problem(positive_shapes=positive_shapes, negative_shapes=negative_shapes,
shape_annocation_dict=shape_annocation_dict)
# Use Bongard problem painter to draw Bongard problems.
# The Bongard problem painter supports creating Bongard problems whose image has at most two shapes.
bongard_problem_painter = BongardProblemPainter(random_seed=random_seed)
# The Bongard painter will automatically create Bongard problems in the specified directories.
# The Bongard images created will be save to hard drive
bongard_problem_painter.create_bongard_problem(bongard_problem=bongard_problem,
bongard_problem_ps_dir=bongard_problem_ps_dir,
bongard_problem_png_dir=bongard_problem_png_dir)
# Create a merged image for Bongard problem human-readable visualization, using the helper function.
create_visualized_bongard_problem(bongard_problem_dir=bongard_problem_png_dir,
bongard_problem_visualized_filepath=bongard_problem_vis_filepath)
| [
"bongard.util_funcs.get_attribute_sampling_candidates",
"os.path.exists",
"os.makedirs",
"numpy.random.choice",
"bongard.BongardProblemPainter",
"bongard.plot.create_visualized_bongard_problem",
"numpy.random.seed",
"bongard.BongardImage",
"bongard.util_funcs.get_human_designed_shape_annotations",
... | [((1885, 1979), 'bongard.OneStrokeShape', 'OneStrokeShape', ([], {'basic_actions': 'base_actions', 'start_coordinates': 'None', 'start_orientation': 'None'}), '(basic_actions=base_actions, start_coordinates=None,\n start_orientation=None)\n', (1899, 1979), False, 'from bongard import LineAction, ArcAction, OneStrokeShape, BongardImage, BongardProblem, BongardImagePainter, BongardProblemPainter\n'), ((3050, 3264), 'bongard.BongardProblem', 'BongardProblem', ([], {'positive_bongard_images': 'bongard_problem_positive_images', 'negative_bongard_images': 'bongard_problem_negative_images', 'problem_name': 'bongard_problem_name', 'positive_rules': 'None', 'negative_rules': 'None'}), '(positive_bongard_images=bongard_problem_positive_images,\n negative_bongard_images=bongard_problem_negative_images, problem_name=\n bongard_problem_name, positive_rules=None, negative_rules=None)\n', (3064, 3264), False, 'from bongard import LineAction, ArcAction, OneStrokeShape, BongardImage, BongardProblem, BongardImagePainter, BongardProblemPainter\n'), ((3946, 3973), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (3960, 3973), True, 'import numpy as np\n'), ((4003, 4107), 'bongard.util_funcs.get_human_designed_shape_annotations', 'get_human_designed_shape_annotations', ([], {'annotated_shape_table_filepath': 'annotated_shape_table_filepath'}), '(annotated_shape_table_filepath=\n annotated_shape_table_filepath)\n', (4039, 4107), False, 'from bongard.util_funcs import get_human_designed_shape_annotations, get_attribute_sampling_candidates\n'), ((4139, 4270), 'bongard.util_funcs.get_attribute_sampling_candidates', 'get_attribute_sampling_candidates', ([], {'attribute_table_filepath': 'attribute_table_filepath', 'min_num_positives': '(7)', 'min_num_negatives': '(7)'}), '(attribute_table_filepath=\n attribute_table_filepath, min_num_positives=7, min_num_negatives=7)\n', (4172, 4270), False, 'from bongard.util_funcs import get_human_designed_shape_annotations, get_attribute_sampling_candidates\n'), ((4460, 4514), 'numpy.random.choice', 'np.random.choice', (['convex_shapes'], {'size': '(7)', 'replace': '(False)'}), '(convex_shapes, size=7, replace=False)\n', (4476, 4514), True, 'import numpy as np\n'), ((4537, 4592), 'numpy.random.choice', 'np.random.choice', (['concave_shapes'], {'size': '(7)', 'replace': '(False)'}), '(concave_shapes, size=7, replace=False)\n', (4553, 4592), True, 'import numpy as np\n'), ((5052, 5098), 'bongard.BongardProblemPainter', 'BongardProblemPainter', ([], {'random_seed': 'random_seed'}), '(random_seed=random_seed)\n', (5073, 5098), False, 'from bongard import LineAction, ArcAction, OneStrokeShape, BongardImage, BongardProblem, BongardImagePainter, BongardProblemPainter\n'), ((5649, 5803), 'bongard.plot.create_visualized_bongard_problem', 'create_visualized_bongard_problem', ([], {'bongard_problem_dir': 'bongard_problem_png_dir', 'bongard_problem_visualized_filepath': 'bongard_problem_vis_filepath'}), '(bongard_problem_dir=\n bongard_problem_png_dir, bongard_problem_visualized_filepath=\n bongard_problem_vis_filepath)\n', (5682, 5803), False, 'from bongard.plot import create_visualized_bongard_problem\n'), ((2626, 2665), 'bongard.BongardImage', 'BongardImage', ([], {'one_stroke_shapes': '[shape]'}), '(one_stroke_shapes=[shape])\n', (2638, 2665), False, 'from bongard import LineAction, ArcAction, OneStrokeShape, BongardImage, BongardProblem, BongardImagePainter, BongardProblemPainter\n'), ((2925, 2964), 'bongard.BongardImage', 'BongardImage', ([], {'one_stroke_shapes': '[shape]'}), '(one_stroke_shapes=[shape])\n', (2937, 2964), False, 'from bongard import LineAction, ArcAction, OneStrokeShape, BongardImage, BongardProblem, BongardImagePainter, BongardProblemPainter\n'), ((3760, 3798), 'os.path.exists', 'os.path.exists', (['bongard_problem_ps_dir'], {}), '(bongard_problem_ps_dir)\n', (3774, 3798), False, 'import os\n'), ((3808, 3843), 'os.makedirs', 'os.makedirs', (['bongard_problem_ps_dir'], {}), '(bongard_problem_ps_dir)\n', (3819, 3843), False, 'import os\n'), ((3855, 3894), 'os.path.exists', 'os.path.exists', (['bongard_problem_png_dir'], {}), '(bongard_problem_png_dir)\n', (3869, 3894), False, 'import os\n'), ((3904, 3940), 'os.makedirs', 'os.makedirs', (['bongard_problem_png_dir'], {}), '(bongard_problem_png_dir)\n', (3915, 3940), False, 'import os\n'), ((1321, 1353), 'numpy.random.choice', 'np.random.choice', (['line_arc_types'], {}), '(line_arc_types)\n', (1337, 1353), True, 'import numpy as np\n'), ((1588, 1620), 'numpy.random.choice', 'np.random.choice', (['line_arc_types'], {}), '(line_arc_types)\n', (1604, 1620), True, 'import numpy as np\n')] |
"""
Line styles
-----------
The :meth:`pygmt.Figure.plot` method can plot lines in different styles.
The default line style is a 0.25-point wide, black, solid line, and can be
customized via the ``pen`` argument.
A *pen* in GMT has three attributes: *width*, *color*, and *style*.
The *style* attribute controls the appearance of the line.
Giving “dotted” or “.” yields a dotted line, whereas a dashed pen is requested
with “dashed” or “-”. Also combinations of dots and dashes, like “.-” for a
dot-dashed line, are allowed.
For more advanced *pen* attributes, see the GMT cookbook
:gmt-docs:`cookbook/features.html#wpen-attrib`.
"""
import numpy as np
import pygmt
# Generate a sample line for plotting
x = np.linspace(0, 10, 500)
y = np.sin(x)
fig = pygmt.Figure()
fig.basemap(region=[0, 10, -3, 3], projection="X15c/8c", frame=["xaf", "yaf", "WSrt"])
# Plot the line using the default line style
fig.plot(x=x, y=y)
# Plot the lines using different line styles
fig.plot(x=x, y=y + 1.5, pen="1p,red,-")
fig.plot(x=x, y=y + 1.0, pen="2p,blue,.")
fig.plot(x=x, y=y + 0.5, pen="1p,red,-.")
fig.plot(x=x, y=y - 0.5, pen="2p,blue,..-")
fig.plot(x=x, y=y - 1.0, pen="3p,tomato,--.")
fig.plot(x=x, y=y - 1.5, pen="3p,tomato,4_2:2p")
fig.show()
| [
"numpy.sin",
"numpy.linspace",
"pygmt.Figure"
] | [((714, 737), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(500)'], {}), '(0, 10, 500)\n', (725, 737), True, 'import numpy as np\n'), ((742, 751), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (748, 751), True, 'import numpy as np\n'), ((759, 773), 'pygmt.Figure', 'pygmt.Figure', ([], {}), '()\n', (771, 773), False, 'import pygmt\n')] |
'''
This script gets a file with metadata, learning features, and target
values for each line, and plots different features against the target.
'''
FEATURE_NAMES = ['query_num_of_columns','query_num_of_rows','query_row_column_ratio','query_max_mean','query_max_outlier_percentage','query_max_skewness','query_max_kurtosis','query_max_unique','candidate_num_of_columns','candidate_num_rows','candidate_row_column_ratio','candidate_max_mean','candidate_max_outlier_percentage','candidate_max_skewness','candidate_max_kurtosis','candidate_max_unique','query_target_max_pearson','query_target_max_spearman','query_target_max_covariance','query_target_max_mutual_info','candidate_target_max_pearson','candidate_target_max_spearman','candidate_target_max_covariance','candidate_target_max_mutual_info','max_pearson_difference', 'containment_fraction']
TARGET_GAIN_IN_R2_SCORE_ID = -1
FIRST_FEATURE_ID = 3
FIRST_TARGET_ID = -4
OUTLIER_THRESHOLD_MAD = 2
OUTLIER_THRESHOLD_ZSCORES = 3
import numpy as np
import sys
import matplotlib.pyplot as plt
from scipy.stats import median_absolute_deviation
def normalize(data):
max_ = max(data)
return [i/max_ for i in data]
def remove_outliers_based_on_zscores(x_data, y_data):
mean_x = np.mean(x_data)
std_x = np.std(x_data)
mean_y = np.mean(y_data)
std_y = np.std(y_data)
filtered_x = []
filtered_y = []
for x, y in zip(x_data, y_data):
if np.fabs((x - mean_x)/std_x) < OUTLIER_THRESHOLD_ZSCORES and np.fabs((y - mean_y)/std_y) < OUTLIER_THRESHOLD_ZSCORES:
filtered_x.append(x)
filtered_y.append(y)
return filtered_x, filtered_y
def remove_outliers_based_on_mad(x_data, y_data):
mad_x = median_absolute_deviation(x_data)
median_x = np.median(x_data)
mad_y = median_absolute_deviation(y_data)
median_y = np.median(y_data)
filtered_x = []
filtered_y = []
for x, y in zip(x_data, y_data):
if np.fabs((x - median_x)/mad_x) < OUTLIER_THRESHOLD_MAD and np.fabs((y - median_y)/mad_y) < OUTLIER_THRESHOLD_MAD:
filtered_x.append(x)
filtered_y.append(y)
return filtered_x, filtered_y
def plot_scatterplot(feature_data, target_data, image_name, xlabel, ylabel):
feature_data, target_data = remove_outliers_based_on_zscores(feature_data, target_data)
if not feature_data or not target_data:
return
#plt.scatter(normalize(feature_data), normalize(target_data), alpha=0.5)
plt.scatter(feature_data, target_data, alpha=0.5)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
plt.savefig(image_name, dpi=300)
plt.close()
if __name__ == '__main__':
filename = sys.argv[1]
lines = open(sys.argv[1]).readlines()
features = []
target = []
for line in lines:
fields = line.strip().split(',')
features.append([float(i) for i in fields[FIRST_FEATURE_ID:FIRST_TARGET_ID]])
target.append(float(fields[TARGET_GAIN_IN_R2_SCORE_ID]))
features = np.array(features)
target = np.array(target)
num_features = features.shape[1]
for i in range(num_features):
plot_scatterplot(features[:,i], target, FEATURE_NAMES[i] + '_vs_gain_in_r2_score.png', FEATURE_NAMES[i], 'gain_in_r2_score')
| [
"numpy.mean",
"numpy.fabs",
"numpy.median",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"scipy.stats.median_absolute_deviation",
"numpy.array",
"matplotlib.pyplot.scatter",
"numpy.std",
"matplotlib.pyplot.tight_layout"
] | [((1237, 1252), 'numpy.mean', 'np.mean', (['x_data'], {}), '(x_data)\n', (1244, 1252), True, 'import numpy as np\n'), ((1265, 1279), 'numpy.std', 'np.std', (['x_data'], {}), '(x_data)\n', (1271, 1279), True, 'import numpy as np\n'), ((1293, 1308), 'numpy.mean', 'np.mean', (['y_data'], {}), '(y_data)\n', (1300, 1308), True, 'import numpy as np\n'), ((1321, 1335), 'numpy.std', 'np.std', (['y_data'], {}), '(y_data)\n', (1327, 1335), True, 'import numpy as np\n'), ((1704, 1737), 'scipy.stats.median_absolute_deviation', 'median_absolute_deviation', (['x_data'], {}), '(x_data)\n', (1729, 1737), False, 'from scipy.stats import median_absolute_deviation\n'), ((1753, 1770), 'numpy.median', 'np.median', (['x_data'], {}), '(x_data)\n', (1762, 1770), True, 'import numpy as np\n'), ((1783, 1816), 'scipy.stats.median_absolute_deviation', 'median_absolute_deviation', (['y_data'], {}), '(y_data)\n', (1808, 1816), False, 'from scipy.stats import median_absolute_deviation\n'), ((1832, 1849), 'numpy.median', 'np.median', (['y_data'], {}), '(y_data)\n', (1841, 1849), True, 'import numpy as np\n'), ((2461, 2510), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_data', 'target_data'], {'alpha': '(0.5)'}), '(feature_data, target_data, alpha=0.5)\n', (2472, 2510), True, 'import matplotlib.pyplot as plt\n'), ((2515, 2533), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (2525, 2533), True, 'import matplotlib.pyplot as plt\n'), ((2538, 2556), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (2548, 2556), True, 'import matplotlib.pyplot as plt\n'), ((2561, 2579), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2577, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2616), 'matplotlib.pyplot.savefig', 'plt.savefig', (['image_name'], {'dpi': '(300)'}), '(image_name, dpi=300)\n', (2595, 2616), True, 'import matplotlib.pyplot as plt\n'), ((2621, 2632), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2630, 2632), True, 'import matplotlib.pyplot as plt\n'), ((3000, 3018), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (3008, 3018), True, 'import numpy as np\n'), ((3032, 3048), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (3040, 3048), True, 'import numpy as np\n'), ((1424, 1453), 'numpy.fabs', 'np.fabs', (['((x - mean_x) / std_x)'], {}), '((x - mean_x) / std_x)\n', (1431, 1453), True, 'import numpy as np\n'), ((1484, 1513), 'numpy.fabs', 'np.fabs', (['((y - mean_y) / std_y)'], {}), '((y - mean_y) / std_y)\n', (1491, 1513), True, 'import numpy as np\n'), ((1938, 1969), 'numpy.fabs', 'np.fabs', (['((x - median_x) / mad_x)'], {}), '((x - median_x) / mad_x)\n', (1945, 1969), True, 'import numpy as np\n'), ((1996, 2027), 'numpy.fabs', 'np.fabs', (['((y - median_y) / mad_y)'], {}), '((y - median_y) / mad_y)\n', (2003, 2027), True, 'import numpy as np\n')] |
from __future__ import annotations
__all__ = ['lock_seed', 'trace', 'trace_module', 'whereami']
import gc
import inspect
import os
import random
import types
from collections.abc import Iterator
from contextlib import suppress
from itertools import islice
from types import FrameType
import numpy as np
import wrapt
from ._import_hook import register_post_import_hook
def _get_module(frame: FrameType) -> str:
if (module := inspect.getmodule(frame)) and module.__spec__:
return module.__spec__.name
return '__main__'
def _get_function(frame: FrameType) -> str:
function = frame.f_code.co_name
function = next(
(f.__qualname__
for f in gc.get_referrers(frame.f_code) if inspect.isfunction(f)),
function)
return '' if function == '<module>' else function
def _stack(frame: FrameType | None) -> Iterator[str]:
while frame:
yield f'{_get_module(frame)}:{_get_function(frame)}:{frame.f_lineno}'
if frame.f_code.co_name == '<module>': # Stop on module-level scope
return
frame = frame.f_back
def stack(skip: int = 0, limit: int | None = None) -> Iterator[str]:
"""Returns iterator of FrameInfos, stopping on module-level scope"""
frame = inspect.currentframe()
calls = _stack(frame)
calls = islice(calls, skip + 1, None) # Skip 'skip' outerless frames
if not limit:
return calls
return islice(calls, limit) # Keep at most `limit` outer frames
def whereami(skip: int = 0, limit: int | None = None) -> str:
calls = stack(skip + 1, limit)
return ' -> '.join(reversed([*calls]))
@wrapt.decorator
def trace(fn, _, args, kwargs):
print(
f'<({whereami(3)})> : {fn.__module__ or ""}.{fn.__qualname__}',
flush=True)
return fn(*args, **kwargs)
def _set_trace(obj, seen=None, prefix=None, module=None):
# TODO: rewrite using unittest.mock
if isinstance(obj, types.ModuleType):
if seen is None:
seen = set()
prefix = obj.__name__
if not obj.__name__.startswith(prefix) or obj.__name__ in seen:
return
seen.add(obj.__name__)
for name in dir(obj):
_set_trace(
getattr(obj, name), module=obj, seen=seen, prefix=prefix)
if not callable(obj):
return
if not hasattr(obj, '__dict__'):
setattr(module, obj.__qualname__, trace(obj))
print(f'wraps "{module.__name__}:{obj.__qualname__}"')
return
for name in obj.__dict__:
with suppress(AttributeError, TypeError):
member = getattr(obj, name)
if not callable(member):
continue
decorated = trace(member)
for m in (decorated, member, obj):
with suppress(AttributeError):
decorated.__module__ = m.__module__
break
else:
decorated.__module__ = getattr(module, '__name__', '')
setattr(obj, name, decorated)
print(f'wraps "{module.__name__}:{obj.__qualname__}.{name}"')
def trace_module(name):
"""Enables call logging for each callable inside module name"""
register_post_import_hook(_set_trace, name)
# ---------------------------------------------------------------------------
def lock_seed(seed: int) -> None:
"""Set seed for all modules: random/numpy/torch"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
def _torch_seed(torch):
import torch
import torch.backends.cudnn
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
register_post_import_hook(_torch_seed, 'torch')
| [
"torch.manual_seed",
"itertools.islice",
"inspect.currentframe",
"inspect.getmodule",
"random.seed",
"contextlib.suppress",
"numpy.random.seed",
"gc.get_referrers",
"inspect.isfunction"
] | [((1246, 1268), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1266, 1268), False, 'import inspect\n'), ((1307, 1336), 'itertools.islice', 'islice', (['calls', '(skip + 1)', 'None'], {}), '(calls, skip + 1, None)\n', (1313, 1336), False, 'from itertools import islice\n'), ((1419, 1439), 'itertools.islice', 'islice', (['calls', 'limit'], {}), '(calls, limit)\n', (1425, 1439), False, 'from itertools import islice\n'), ((3412, 3429), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3423, 3429), False, 'import random\n'), ((3479, 3499), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3493, 3499), True, 'import numpy as np\n'), ((3595, 3618), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3612, 3618), False, 'import torch\n'), ((434, 458), 'inspect.getmodule', 'inspect.getmodule', (['frame'], {}), '(frame)\n', (451, 458), False, 'import inspect\n'), ((2536, 2571), 'contextlib.suppress', 'suppress', (['AttributeError', 'TypeError'], {}), '(AttributeError, TypeError)\n', (2544, 2571), False, 'from contextlib import suppress\n'), ((684, 714), 'gc.get_referrers', 'gc.get_referrers', (['frame.f_code'], {}), '(frame.f_code)\n', (700, 714), False, 'import gc\n'), ((718, 739), 'inspect.isfunction', 'inspect.isfunction', (['f'], {}), '(f)\n', (736, 739), False, 'import inspect\n'), ((2782, 2806), 'contextlib.suppress', 'suppress', (['AttributeError'], {}), '(AttributeError)\n', (2790, 2806), False, 'from contextlib import suppress\n')] |
"""
ucf crime class
['Normal','Abuse', 'Arrest', 'Arson', 'Assault', 'Burglary', 'Explosion', 'Fighting', 'RoadAccidents', 'Robbery', 'Shooting',
'Shoplifting', 'Stealing', 'Vandalism']
two branch ['Normal' ,'Abnormal' ]
unmerged video feature
for self-reason framwork
"""
import torch
import torch.nn as nn
from torch.utils.data import Dataset,DataLoader
import os
import glob
import cv2
import numpy as np
from PIL import Image
import joblib
from net.utils.parser import parse_args,load_config
from .build import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class Ucf_Crime_Feature_SRF_Normal(Dataset):
"""
UCF Crime dataset
trianing:
800 normal
810 abnormal
split it to over-lapped seg
each video has multi seg seg_num=video_len//16
"""
def __init__(self,mode,cfg):
assert mode in ["update_epoch"] # only train data is included
self.video_root=r"E:\datasets\UCF_C3D_Features_Npy"
#cfg.UCF_CRIME_FEATURE.PATH_TO_DATA_DIR
self.cfg=cfg
self.mode="train"
self.single_feature_len=4096
self.normal_feature_paths = []
self._consturct()
self.normal_feature_num=len(self.normal_feature_paths)
# print()
def _consturct(self):
"""
Training-Normal-Videos-segment
Training-Abnormal-Videos-segment
laod feature
:return:
"""
self.normal_feature_paths=(
glob.glob(
os.path.join(
self.video_root, self.mode, "Normal", "*.npy"
)
)
)
def __getitem__(self, index):
"""
:param index:
:return: [1,T,4096]
"""
# load normal feature for update memory bank
normal_feature = self.load_feature(self.normal_feature_paths[index])
return normal_feature
def load_feature(self,feature_path):
feature=np.load(feature_path) # [size,4096]
tensor_feature = torch.from_numpy(feature)
return tensor_feature
def __len__(self):
return len(self.normal_feature_paths)
if __name__=="__main__":
args=parse_args()
cfg=load_config(args)
# # print(type(cfg))
# cfg=None
data_loader=DataLoader(Ucf_Crime_Feature_SRF_Normal(mode="train",cfg=cfg),batch_size=1,shuffle=False)
#
for step ,(n_feature) in enumerate(data_loader):
print("step",step)
print(n_feature.shape)
print("")
| [
"net.utils.parser.load_config",
"os.path.join",
"torch.from_numpy",
"net.utils.parser.parse_args",
"numpy.load"
] | [((2199, 2211), 'net.utils.parser.parse_args', 'parse_args', ([], {}), '()\n', (2209, 2211), False, 'from net.utils.parser import parse_args, load_config\n'), ((2220, 2237), 'net.utils.parser.load_config', 'load_config', (['args'], {}), '(args)\n', (2231, 2237), False, 'from net.utils.parser import parse_args, load_config\n'), ((1967, 1988), 'numpy.load', 'np.load', (['feature_path'], {}), '(feature_path)\n', (1974, 1988), True, 'import numpy as np\n'), ((2028, 2053), 'torch.from_numpy', 'torch.from_numpy', (['feature'], {}), '(feature)\n', (2044, 2053), False, 'import torch\n'), ((1488, 1547), 'os.path.join', 'os.path.join', (['self.video_root', 'self.mode', '"""Normal"""', '"""*.npy"""'], {}), "(self.video_root, self.mode, 'Normal', '*.npy')\n", (1500, 1547), False, 'import os\n')] |
import LPRLite as pr
import cv2
import os
import numpy as np
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
def compute_iou(rec1, rec2):
"""
computing IoU
:param rec1: (y0, x0, y1, x1), which reflects
(top, left, bottom, right)
:param rec2: (y0, x0, y1, x1)
:return: scala value of IoU
"""
# computing area of each rectangles
S_rec1 = (rec1[2]) * (rec1[3] )
S_rec2 = (rec2[2] ) * (rec2[3] )
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[1] + rec1[3], rec2[1] + rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[0] + rec1[2], rec2[0] + rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
return intersect / (sum_area - intersect)
fontC = ImageFont.truetype("Font/platech.ttf", 30, 0)
def drawRectBox(image):
img = Image.fromarray(image)
draw = ImageDraw.Draw(img)
for i in range(len(rec_plate)):
x = box_rect[i][0]
y = box_rect[i][1]
w = box_rect[i][0] + box_rect[i][2]
h = box_rect[i][1] + box_rect[i][3]
draw.line([x, y, w, y], (0, 255, 0), width=5)
draw.line([x, y, x, h], (0, 255, 0), width=5)
draw.line([x, h, w, h], (0, 255, 0), width=5)
draw.line([w, y, w, h], (0, 255, 0), width=5)
# cv2.rectangle(image, (int(box_rect[i][0]), int(box_rect[i][1])), (int(box_rect[i][0] + box_rect[i][2]), int(box_rect[i][1] + box_rect[i][3])), (0, 255, 0), 2,
# cv2.LINE_AA)
draw.text((int(box_rect[i][0] + 1), int(box_rect[i][1] - 30)), rec_plate[i].encode("utf-8").decode('utf-8'),
(0, 0, 255),
font=fontC)
imagex = np.array(img)
return imagex
testdata_path = 'image'
model = pr.LPR("model/cascade.xml", "model/model12.h5", "model/ocr_plate_all_gru.h5")
for images in os.listdir(testdata_path):
print("图片名称:", images)
filename = os.path.splitext(os.path.split(images)[1])[0]
file_path = testdata_path + "/" + images
grr = cv2.imread(file_path)
box_rect = []
rec_plate = []
confidencelist = []
remove = []
for pstr, confidence, rect in model.SimpleRecognizePlateByE2E(grr):
if confidence > 0.7:
box_rect.append(rect)
# rec_plate.append(pstr + " " + str(round(confidence, 2)))
rec_plate.append(pstr)
confidencelist.append(confidence)
print("plate_str:", pstr)
print("plate_confidence", str(round(confidence, 2)))
#iou去重
for i in range(len(rec_plate)):
for j in range(len(rec_plate)):
iou = compute_iou(box_rect[i], box_rect[j])
# print(iou)
if iou > 0.5 and iou < 0.98:
if confidencelist[i] < confidencelist[j]:
remove.append(i)
else:
remove.append(j)
print(box_rect)
remove = list(set(remove))#列表去重
print(remove)
flag = False
if len(remove) < 2:
for i in range(len(remove)):
box_rect.remove(box_rect[remove[i]])
rec_plate.remove(rec_plate[remove[i]])
else:
for i in range(len(remove)):
if flag == False:
box_rect.remove(box_rect[remove[i]])
rec_plate.remove(rec_plate[remove[i]])
flag = True
else:
box_rect.remove(box_rect[remove[i-1]])
rec_plate.remove(rec_plate[remove[i-1]])
image = drawRectBox(grr)
cv2.imwrite('images_rec/' + filename + '.jpg', image)
| [
"cv2.imwrite",
"PIL.Image.fromarray",
"os.listdir",
"LPRLite.LPR",
"PIL.ImageFont.truetype",
"os.path.split",
"numpy.array",
"PIL.ImageDraw.Draw",
"cv2.imread"
] | [((1029, 1074), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""Font/platech.ttf"""', '(30)', '(0)'], {}), "('Font/platech.ttf', 30, 0)\n", (1047, 1074), False, 'from PIL import ImageFont\n'), ((2022, 2099), 'LPRLite.LPR', 'pr.LPR', (['"""model/cascade.xml"""', '"""model/model12.h5"""', '"""model/ocr_plate_all_gru.h5"""'], {}), "('model/cascade.xml', 'model/model12.h5', 'model/ocr_plate_all_gru.h5')\n", (2028, 2099), True, 'import LPRLite as pr\n'), ((2114, 2139), 'os.listdir', 'os.listdir', (['testdata_path'], {}), '(testdata_path)\n', (2124, 2139), False, 'import os\n'), ((1111, 1133), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (1126, 1133), False, 'from PIL import Image\n'), ((1145, 1164), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (1159, 1164), False, 'from PIL import ImageDraw\n'), ((1956, 1969), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1964, 1969), True, 'import numpy as np\n'), ((2284, 2305), 'cv2.imread', 'cv2.imread', (['file_path'], {}), '(file_path)\n', (2294, 2305), False, 'import cv2\n'), ((3765, 3818), 'cv2.imwrite', 'cv2.imwrite', (["('images_rec/' + filename + '.jpg')", 'image'], {}), "('images_rec/' + filename + '.jpg', image)\n", (3776, 3818), False, 'import cv2\n'), ((2200, 2221), 'os.path.split', 'os.path.split', (['images'], {}), '(images)\n', (2213, 2221), False, 'import os\n')] |
import os
import pickle
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
# ----- CONFIGURE TENSORFLOW -----
# This step might be needed in case cuDNN
# gives problems with convolutions
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
# --------------------------------
from datasets.celeba.dataloader import DataSequence
from utils.file_utils import makedir_if_not_exists
from utils.visualization_utils import save_plot_batch
from callbacks import ImagesLoggingCallback
# Load an experiment
#from experiments.train_base_dcgan import *
from experiments.train_hinge_dcgan_spect_norm_pixelnorm_minibatchstd_self_attention import *
# FILE PARAMETERS
model_save_dir = "saved_models/{}/".format(model_name)
model_images_save_base_dir = "gen/{}".format(model_name)
model_gen_sample_dir = "gen/{}/sample/".format(model_name)
model_gen_real_dir = "gen/{}/real_cond/".format(model_name)
# make model directories if they no exist
makedir_if_not_exists(model_save_dir)
makedir_if_not_exists(model_gen_sample_dir)
makedir_if_not_exists(model_gen_real_dir)
# prepare train data sequence
train_data_df = pd.read_csv(dataset_attr_file_train)
training_generator = DataSequence(train_data_df, dataset_images_path, batch_size=batch_size)
# take first batch of validation dataset for visual results report
# (i.e. conditioned generation based on first batch conditions)
valid_cond_batch = DataSequence(train_data_df, dataset_images_path, batch_size=batch_size, mode="valid")
_, real_view_conditions = next(iter(valid_cond_batch))
real_view_conditions = real_view_conditions[:25]
# take apart a batch for reconstruction
view_cond = np.zeros((25, conditional_dim), dtype=np.float32)
view_cond[:, 31] = 1.0 # all smile
view_cond = view_cond.astype(np.float32)
if load_model:
# fit just for shape (no steps are performed)
gan_model.fit(training_generator, epochs=1, steps_per_epoch=1)
# load model's weights
gan_model.load_weights(model_save_dir+"/model_{}.h5".format(load_epoch))
# load optimizer's state
with open('saved_optimizers/{}_d_optimizer_weights.pkl'.format(model_name), 'rb') as f:
weights = pickle.load(f)
# set manually
for i in range(len(weights)):
gan_model.d_optimizer.weights[i] = weights[i]
with open('saved_optimizers/{}_g_optimizer_weights.pkl'.format(model_name), 'rb') as f:
weights = pickle.load(f)
# set manually
for i in range(len(weights)):
gan_model.g_optimizer.weights[i] = weights[i]
else:
load_epoch = 0
train_callbacks.append(ImagesLoggingCallback(25, latent_dim, view_cond, real_view_conditions, model_images_save_base_dir))
# Train the model
history = gan_model.fit(training_generator,
use_multiprocessing=True,
workers=8,
epochs=n_epochs,
callbacks=train_callbacks,
initial_epoch=load_epoch
)
"""
# Save optimizer's state manually
import pickle
with open('saved_optimizers/{}_d_optimizer_weights.pkl'.format(model_name), 'wb') as f:
pickle.dump(getattr(gan_model.d_optimizer, 'weights'), f)
with open('saved_optimizers/{}_g_optimizer_weights.pkl'.format(model_name), 'wb') as f:
pickle.dump(getattr(gan_model.g_optimizer, 'weights'), f)
# gan_model.save('dcgan_spect_norm/model_dcgan') # should also save optimizer state
""" | [
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.InteractiveSession",
"callbacks.ImagesLoggingCallback",
"pandas.read_csv",
"pickle.load",
"numpy.zeros",
"datasets.celeba.dataloader.DataSequence",
"utils.file_utils.makedir_if_not_exists"
] | [((308, 321), 'tensorflow.compat.v1.ConfigProto', 'ConfigProto', ([], {}), '()\n', (319, 321), False, 'from tensorflow.compat.v1 import ConfigProto\n'), ((371, 404), 'tensorflow.compat.v1.InteractiveSession', 'InteractiveSession', ([], {'config': 'config'}), '(config=config)\n', (389, 404), False, 'from tensorflow.compat.v1 import InteractiveSession\n'), ((1096, 1133), 'utils.file_utils.makedir_if_not_exists', 'makedir_if_not_exists', (['model_save_dir'], {}), '(model_save_dir)\n', (1117, 1133), False, 'from utils.file_utils import makedir_if_not_exists\n'), ((1134, 1177), 'utils.file_utils.makedir_if_not_exists', 'makedir_if_not_exists', (['model_gen_sample_dir'], {}), '(model_gen_sample_dir)\n', (1155, 1177), False, 'from utils.file_utils import makedir_if_not_exists\n'), ((1178, 1219), 'utils.file_utils.makedir_if_not_exists', 'makedir_if_not_exists', (['model_gen_real_dir'], {}), '(model_gen_real_dir)\n', (1199, 1219), False, 'from utils.file_utils import makedir_if_not_exists\n'), ((1267, 1303), 'pandas.read_csv', 'pd.read_csv', (['dataset_attr_file_train'], {}), '(dataset_attr_file_train)\n', (1278, 1303), True, 'import pandas as pd\n'), ((1325, 1396), 'datasets.celeba.dataloader.DataSequence', 'DataSequence', (['train_data_df', 'dataset_images_path'], {'batch_size': 'batch_size'}), '(train_data_df, dataset_images_path, batch_size=batch_size)\n', (1337, 1396), False, 'from datasets.celeba.dataloader import DataSequence\n'), ((1550, 1639), 'datasets.celeba.dataloader.DataSequence', 'DataSequence', (['train_data_df', 'dataset_images_path'], {'batch_size': 'batch_size', 'mode': '"""valid"""'}), "(train_data_df, dataset_images_path, batch_size=batch_size,\n mode='valid')\n", (1562, 1639), False, 'from datasets.celeba.dataloader import DataSequence\n'), ((1794, 1843), 'numpy.zeros', 'np.zeros', (['(25, conditional_dim)'], {'dtype': 'np.float32'}), '((25, conditional_dim), dtype=np.float32)\n', (1802, 1843), True, 'import numpy as np\n'), ((2723, 2825), 'callbacks.ImagesLoggingCallback', 'ImagesLoggingCallback', (['(25)', 'latent_dim', 'view_cond', 'real_view_conditions', 'model_images_save_base_dir'], {}), '(25, latent_dim, view_cond, real_view_conditions,\n model_images_save_base_dir)\n', (2744, 2825), False, 'from callbacks import ImagesLoggingCallback\n'), ((2296, 2310), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2307, 2310), False, 'import pickle\n'), ((2540, 2554), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2551, 2554), False, 'import pickle\n')] |
# Importar paquetes
import numpy as np
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
import pandas_datareader.data as web
from scipy.stats import norm
# Función para descargar precios de cierre ajustados de varios activos a la vez:
def get_closes(tickers, start_date=None, end_date=None, freq=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Frecuencia de muestreo por defecto (freq='d')
# Importamos paquetes necesarios
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
import pandas_datareader.data as web
# Creamos DataFrame vacío de precios, con el índice de las fechas
closes = pd.DataFrame(columns = tickers, index=web.YahooDailyReader(symbols=tickers[0], start=start_date, end=end_date, interval=freq).read().index)
# Agregamos cada uno de los precios con YahooDailyReader
for ticker in tickers:
df = web.YahooDailyReader(symbols=ticker, start=start_date, end=end_date, interval=freq).read()
closes[ticker]=df['Adj Close']
closes.index_name = 'Date'
closes = closes.sort_index()
return closes
##metodo parametrico, 1 mes
ticker=['GMEXICO','BECLE','LIVERPOOL', 'VOLARIS']
start,end='2017-08-24','2019-08-24' ##poner un día menos del actual
closes=pd.read_csv('../Data/datos.csv')
ret=closes.pct_change().dropna()
cov=ret.cov()
numberport=int(input("Quantity of stocks: "))
prices=np.empty((numberport,1))
for x in range(0,numberport):
prices[x][0] = closes[ticker[x]].iloc[-1]
titles=np.empty((numberport,1))
for x in range(0,numberport):
titles[x][0]=int(input("Quantity of titles of stock in order: "))
totalmatrix=np.multiply(prices, titles)
exposure = 0
for n in totalmatrix:
exposure += n
exposure=float(exposure)
print(totalmatrix)
print (exposure)
ws=totalmatrix/exposure
wt=np.transpose(ws)
cov=np.matrix(cov)
x=wt*cov*ws
risk=norm.ppf(1-((float(input("risk level in percentage ")))/100))
var=risk*(exposure)*np.sqrt(x)
print(var)
### metodo no parametrico, 1 año
start,end='2018-08-22','2019-08-20'
closes=get_closes(ticker,start,end,freq='d')
ret=closes.pct_change().dropna()
ret=pd.DataFrame(ret)
prodw=[]
for r in range(0,len(ret)):
row=np.matrix(ret.iloc[r])
row=np.transpose(row)
sumpro=np.multiply(ws,row)
sumprod = 0
for n in sumpro:
sumprod += n
prodw.append(float(sumprod))
p = np.percentile(prodw, 2.5)
print(p)
print(exposure)
var2=p*exposure
print(var2) | [
"numpy.multiply",
"numpy.sqrt",
"pandas.read_csv",
"pandas_datareader.data.YahooDailyReader",
"numpy.empty",
"pandas.DataFrame",
"numpy.percentile",
"numpy.matrix",
"numpy.transpose"
] | [((1333, 1365), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/datos.csv"""'], {}), "('../Data/datos.csv')\n", (1344, 1365), True, 'import pandas as pd\n'), ((1466, 1491), 'numpy.empty', 'np.empty', (['(numberport, 1)'], {}), '((numberport, 1))\n', (1474, 1491), True, 'import numpy as np\n'), ((1574, 1599), 'numpy.empty', 'np.empty', (['(numberport, 1)'], {}), '((numberport, 1))\n', (1582, 1599), True, 'import numpy as np\n'), ((1711, 1738), 'numpy.multiply', 'np.multiply', (['prices', 'titles'], {}), '(prices, titles)\n', (1722, 1738), True, 'import numpy as np\n'), ((1885, 1901), 'numpy.transpose', 'np.transpose', (['ws'], {}), '(ws)\n', (1897, 1901), True, 'import numpy as np\n'), ((1906, 1920), 'numpy.matrix', 'np.matrix', (['cov'], {}), '(cov)\n', (1915, 1920), True, 'import numpy as np\n'), ((2193, 2210), 'pandas.DataFrame', 'pd.DataFrame', (['ret'], {}), '(ret)\n', (2205, 2210), True, 'import pandas as pd\n'), ((2431, 2456), 'numpy.percentile', 'np.percentile', (['prodw', '(2.5)'], {}), '(prodw, 2.5)\n', (2444, 2456), True, 'import numpy as np\n'), ((2020, 2030), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (2027, 2030), True, 'import numpy as np\n'), ((2256, 2278), 'numpy.matrix', 'np.matrix', (['ret.iloc[r]'], {}), '(ret.iloc[r])\n', (2265, 2278), True, 'import numpy as np\n'), ((2287, 2304), 'numpy.transpose', 'np.transpose', (['row'], {}), '(row)\n', (2299, 2304), True, 'import numpy as np\n'), ((2316, 2336), 'numpy.multiply', 'np.multiply', (['ws', 'row'], {}), '(ws, row)\n', (2327, 2336), True, 'import numpy as np\n'), ((968, 1055), 'pandas_datareader.data.YahooDailyReader', 'web.YahooDailyReader', ([], {'symbols': 'ticker', 'start': 'start_date', 'end': 'end_date', 'interval': 'freq'}), '(symbols=ticker, start=start_date, end=end_date,\n interval=freq)\n', (988, 1055), True, 'import pandas_datareader.data as web\n'), ((765, 856), 'pandas_datareader.data.YahooDailyReader', 'web.YahooDailyReader', ([], {'symbols': 'tickers[0]', 'start': 'start_date', 'end': 'end_date', 'interval': 'freq'}), '(symbols=tickers[0], start=start_date, end=end_date,\n interval=freq)\n', (785, 856), True, 'import pandas_datareader.data as web\n')] |
"""Function derivatives for error propagation."""
import sys
import numpy as np
import copy
__all__ = ['derivatives', 'propagate_1', 'propagate_2']
STEP_SIZE = np.sqrt(sys.float_info.epsilon)
@np.vectorize
def _deriv_pow_0(x, y):
"""Partial derivative of x**y in x."""
if y == 0:
return 0.0
if x != 0 or y % 1 == 0:
return y*x**(y-1)
return numerical_derivative(np.power, 0)(x, y)
@np.vectorize
def _deriv_pow_1(x, y):
"""Partial derivative of x**y in y."""
if x == 0 and y > 0:
return 0.0
return np.log(x)*np.power(x, y)
@np.vectorize
def _deriv_mod_1(x, y):
"""Partial derivative of x%y in y."""
if x % y < STEP_SIZE:
return np.inf
else:
return numerical_derivative(np.mod, 1)(x, y)
@np.vectorize
def _deriv_fabs(x):
if x >= 0:
return 1
return -1
@np.vectorize
def _deriv_copysign(x, y):
if x >= 0:
return np.copysign(1, y)
return -np.copysign(1, y)
erf_coef = 2/np.sqrt(np.pi)
# Function partial derivatives for x and y
derivatives = {
'add': (lambda x, y: 1.0,
lambda x, y: 1.0),
'sub': (lambda x, y: 1.0,
lambda x, y: -1.0),
'div': (lambda x, y: 1/y,
lambda x, y: -x/(y**2)),
'truediv': (lambda x, y: 1/y,
lambda x, y: -x/(y**2)),
'floordiv': (lambda x, y: 0.0,
lambda x, y: 0.0),
'mod': (lambda x, y: 1.0,
_deriv_mod_1),
'mul': (lambda x, y: y,
lambda x, y: x),
'pow': (_deriv_pow_0, _deriv_pow_1),
# numpy fixed derivatives
'arccos': (lambda x: -1/np.sqrt(1-x**2)),
'arccosh': (lambda x: 1/np.sqrt(x**2-1)),
'arcsin': (lambda x: 1/np.sqrt(1-x**2)),
'arcsinh': (lambda x: 1/np.sqrt(1+x**2)),
'arctan': (lambda x: 1/(1+x**2)),
'arctan2': (lambda y, x: x/(x**2+y**2), # Correct for x == 0
lambda y, x: -y/(x**2+y**2)), # Correct for x == 0
'arctanh': (lambda x: 1/(1-x**2)),
'copysign': (_deriv_copysign,
lambda x, y: 0),
'cos': (lambda x: -np.sin(x)),
'cosh': (np.sinh),
'exp': (np.exp),
'expm1': (np.exp),
'exp2': (lambda x: np.exp2(x)*np.log(2)),
'fabs': (_deriv_fabs),
'log': (lambda x: 1/x), # for np, log=ln
'log10': (lambda x: 1/x/np.log(10)),
'log2': (lambda x: 1/x/np.log(10)),
'log1p': (lambda x: 1/(1+x)),
'sin': (np.cos),
'sinh': (np.cosh),
'tan': (lambda x: 1+np.tan(x)**2),
'tanh': (lambda x: 1-np.tanh(x)**2)
}
def propagate_1(func, fx, x, sx):
"""Propagate errors using function derivatives.
Parameters
----------
func: string
Function name to perform error propagation. Must be in derivatives
keys.
fxy: float or array_like
Numerical result of f(x, y).
x: float or array_like
Variable of the function.
sx: float or array_like
1-sigma errors of the function variable.
Returns
-------
sf: float or array_like
1-sigma uncorrelated error associated to the operation.
"""
if func not in derivatives.keys():
raise ValueError(f'func {func} not in derivatives.')
if hasattr(derivatives[func], '__len__'):
raise ValueError(f'func {func} is not a 1 variable function.')
try:
deriv = derivatives[func](x)
sf = deriv*sx
return sf
except (ValueError, ZeroDivisionError, OverflowError):
shape = np.shape(fx)
if len(shape) == 0:
return np.nan
else:
return np.empty(shape).fill(np.nan)
def propagate_2(func, fxy, x, y, sx, sy):
"""Propagate errors using function derivatives.
Parameters
----------
func: string
Function name to perform error propagation. Must be in derivatives
keys.
fxy: float or array_like
Numerical result of f(x, y).
x, y: float or array_like
Variables of the function.
sx, sy: float or array_like
1-sigma errors of the function variables.
Returns
-------
sf: float or array_like
1-sigma uncorrelated error associated to the operation.
"""
if func not in derivatives.keys():
raise ValueError(f'func {func} not in derivatives.')
if len(derivatives[func]) != 2:
raise ValueError(f'func {func} is not a 2 variable function.')
deriv_x, deriv_y = derivatives[func]
try:
del_x2 = np.square(deriv_x(x, y))
del_y2 = np.square(deriv_y(x, y))
sx2 = np.square(sx)
sy2 = np.square(sy)
sf = np.sqrt(del_x2*sx2 + del_y2*sy2)
return sf
except (ValueError, ZeroDivisionError, OverflowError):
shape = np.shape(fxy)
if len(shape) == 0:
return np.nan
else:
return np.empty(shape).fill(np.nan)
def numerical_derivative(func, arg_ref, step=STEP_SIZE):
"""Create a function to compute a numerical derivative of func.
Parameters
----------
func: callable
The function to compute the numerical derivative.
arg_ref: int or string
Variable to be used for diferentiation. If int, a position will be
used. If string, a variable name will be used.
step: float (optional)
Epsilon to compute the numerical derivative, using the
(-epsilon, +epsioln) method.
Returns
-------
derivative_wrapper: callable
Partial derivative function.
Notes
-----
- Implementation based on `uncertainties` package.
"""
if not callable(func):
raise ValueError(f'function {func} not callable.')
# If reference variable is in kwargs (str) instead of args (int).
change_kwargs = isinstance(arg_ref, str)
@np.vectorize
def derivative_wrapper(*args, **kwargs):
"""
Partial derivative, calculated with the (-epsilon, +epsilon)
method, which is more precise than the (0, +epsilon) method.
"""
# Compute the epsilon relative to the variyng parameter
if change_kwargs:
var_v = kwargs[arg_ref]
else:
var_v = args[arg_ref]
eps = step*abs(var_v)
if not eps:
# Arbitrary, but "small" with respect to 1:
eps = step
# Copy args and assing the variable function with values plus
# and minus the epsilon
# This will cause more memory to be used, but do not change the
# original objects.
kwargs_p = copy.copy(kwargs)
kwargs_m = copy.copy(kwargs)
args_p = list(args)
args_m = list(args)
if change_kwargs:
kwargs_p[arg_ref] += eps
kwargs_m[arg_ref] -= eps
else:
args_p[arg_ref] += eps
args_m[arg_ref] -= eps
# Compute the function values with shifted vvariable
f_plus = func(*args_p, **kwargs_p)
f_minus = func(*args_m, **kwargs_m)
return (f_plus - f_minus)/2/eps
return derivative_wrapper
| [
"numpy.shape",
"numpy.sqrt",
"numpy.tan",
"numpy.power",
"numpy.exp2",
"numpy.log",
"numpy.tanh",
"numpy.square",
"numpy.empty",
"numpy.sin",
"copy.copy",
"numpy.copysign"
] | [((165, 196), 'numpy.sqrt', 'np.sqrt', (['sys.float_info.epsilon'], {}), '(sys.float_info.epsilon)\n', (172, 196), True, 'import numpy as np\n'), ((994, 1008), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (1001, 1008), True, 'import numpy as np\n'), ((558, 567), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (564, 567), True, 'import numpy as np\n'), ((568, 582), 'numpy.power', 'np.power', (['x', 'y'], {}), '(x, y)\n', (576, 582), True, 'import numpy as np\n'), ((931, 948), 'numpy.copysign', 'np.copysign', (['(1)', 'y'], {}), '(1, y)\n', (942, 948), True, 'import numpy as np\n'), ((961, 978), 'numpy.copysign', 'np.copysign', (['(1)', 'y'], {}), '(1, y)\n', (972, 978), True, 'import numpy as np\n'), ((4505, 4518), 'numpy.square', 'np.square', (['sx'], {}), '(sx)\n', (4514, 4518), True, 'import numpy as np\n'), ((4533, 4546), 'numpy.square', 'np.square', (['sy'], {}), '(sy)\n', (4542, 4546), True, 'import numpy as np\n'), ((4560, 4596), 'numpy.sqrt', 'np.sqrt', (['(del_x2 * sx2 + del_y2 * sy2)'], {}), '(del_x2 * sx2 + del_y2 * sy2)\n', (4567, 4596), True, 'import numpy as np\n'), ((6468, 6485), 'copy.copy', 'copy.copy', (['kwargs'], {}), '(kwargs)\n', (6477, 6485), False, 'import copy\n'), ((6505, 6522), 'copy.copy', 'copy.copy', (['kwargs'], {}), '(kwargs)\n', (6514, 6522), False, 'import copy\n'), ((1619, 1638), 'numpy.sqrt', 'np.sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (1626, 1638), True, 'import numpy as np\n'), ((1665, 1684), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 - 1)'], {}), '(x ** 2 - 1)\n', (1672, 1684), True, 'import numpy as np\n'), ((1710, 1729), 'numpy.sqrt', 'np.sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (1717, 1729), True, 'import numpy as np\n'), ((1756, 1775), 'numpy.sqrt', 'np.sqrt', (['(1 + x ** 2)'], {}), '(1 + x ** 2)\n', (1763, 1775), True, 'import numpy as np\n'), ((2076, 2085), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (2082, 2085), True, 'import numpy as np\n'), ((2178, 2188), 'numpy.exp2', 'np.exp2', (['x'], {}), '(x)\n', (2185, 2188), True, 'import numpy as np\n'), ((2189, 2198), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2195, 2198), True, 'import numpy as np\n'), ((2302, 2312), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (2308, 2312), True, 'import numpy as np\n'), ((2342, 2352), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (2348, 2352), True, 'import numpy as np\n'), ((3448, 3460), 'numpy.shape', 'np.shape', (['fx'], {}), '(fx)\n', (3456, 3460), True, 'import numpy as np\n'), ((4686, 4699), 'numpy.shape', 'np.shape', (['fxy'], {}), '(fxy)\n', (4694, 4699), True, 'import numpy as np\n'), ((2457, 2466), 'numpy.tan', 'np.tan', (['x'], {}), '(x)\n', (2463, 2466), True, 'import numpy as np\n'), ((2497, 2507), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (2504, 2507), True, 'import numpy as np\n'), ((3548, 3563), 'numpy.empty', 'np.empty', (['shape'], {}), '(shape)\n', (3556, 3563), True, 'import numpy as np\n'), ((4787, 4802), 'numpy.empty', 'np.empty', (['shape'], {}), '(shape)\n', (4795, 4802), True, 'import numpy as np\n')] |
import argparse
from time import sleep
import numpy as np
from smartredis import Client
def init_client(nnDB):
if (nnDB==1):
client = Client(cluster=False)
else:
client = Client(cluster=True)
return client
def main():
# Import and initialize MPI
import mpi4py
mpi4py.rc.initialize = False
mpi4py.rc.threads = True
mpi4py.rc.thread_level = 'multiple'
from mpi4py import MPI
if not MPI.Is_initialized():
MPI.Init_thread()
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# Parse arguments
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dbnodes',default=1,type=int,help='Number of database nodes')
args = parser.parse_args()
# Initialize SmartRedis clients
client = init_client(args.dbnodes)
comm.Barrier()
if (rank==0):
print('All SmartRedis clients initialized')
# Set parameters for array of random numbers to be set as inference data
# In this example we create inference data for a simple function
# y=f(x), which has 1 input (x) and 1 output (y)
# The domain for the function is from 0 to 10
# The inference data is obtained from a uniform distribution over the domain
nSamples = 64
xmin = 0.0
xmax = 10.0
nInputs = 1
nOutputs = 1
# Send array used to communicate whether to keep running data loader or ML
if (rank==0):
arrMLrun = np.array([1, 1])
client.put_tensor('check-run', arrMLrun)
# Send some information regarding the training data size
if (rank==0):
arrInfo = np.array([nSamples, nInputs+nOutputs, nInputs, size])
client.put_tensor('sizeInfo', arrInfo)
print('Sent size info of training data to database')
# Emulate integration of PDEs with a do loop
numts = 1000
stepInfo = np.zeros(2, dtype=int)
for its in range(numts):
# Sleep for a few seconds to emulate the time required by PDE integration
sleep(10)
# First off check if ML is done training, if so exit from loop
arrMLrun = client.get_tensor('check-run')
if (arrMLrun[0]<0.5):
break
# Generate the training data for the polynomial y=f(x)=x**2 + 3*x + 1
# place output in first column and input in second column
inputs = np.random.uniform(low=xmin, high=xmax, size=(nSamples,1))
outputs = inputs**2 + 3*inputs + 1
sendArr = np.concatenate((outputs, inputs), axis=1)
# Send training data to database
send_key = 'y.'+str(rank)+'.'+str(its+1)
if (rank==0):
print(f'Sending training data with key {send_key} and shape {sendArr.shape}')
client.put_tensor(send_key, sendArr)
comm.Barrier()
if (rank==0):
print(f'All ranks finished sending training data')
# Send the time step number, used by ML program to determine
# when new data is available
if (rank==0):
stepInfo[0] = int(its+1)
client.put_tensor('step', stepInfo)
if (rank==0):
print('Exiting ...')
if __name__ == '__main__':
main()
| [
"mpi4py.MPI.Init_thread",
"argparse.ArgumentParser",
"mpi4py.MPI.Is_initialized",
"smartredis.Client",
"time.sleep",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"numpy.random.uniform"
] | [((602, 641), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (625, 641), False, 'import argparse\n'), ((1865, 1887), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'int'}), '(2, dtype=int)\n', (1873, 1887), True, 'import numpy as np\n'), ((147, 168), 'smartredis.Client', 'Client', ([], {'cluster': '(False)'}), '(cluster=False)\n', (153, 168), False, 'from smartredis import Client\n'), ((196, 216), 'smartredis.Client', 'Client', ([], {'cluster': '(True)'}), '(cluster=True)\n', (202, 216), False, 'from smartredis import Client\n'), ((438, 458), 'mpi4py.MPI.Is_initialized', 'MPI.Is_initialized', ([], {}), '()\n', (456, 458), False, 'from mpi4py import MPI\n'), ((468, 485), 'mpi4py.MPI.Init_thread', 'MPI.Init_thread', ([], {}), '()\n', (483, 485), False, 'from mpi4py import MPI\n'), ((1457, 1473), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (1465, 1473), True, 'import numpy as np\n'), ((1621, 1676), 'numpy.array', 'np.array', (['[nSamples, nInputs + nOutputs, nInputs, size]'], {}), '([nSamples, nInputs + nOutputs, nInputs, size])\n', (1629, 1676), True, 'import numpy as np\n'), ((2007, 2016), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (2012, 2016), False, 'from time import sleep\n'), ((2349, 2407), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'xmin', 'high': 'xmax', 'size': '(nSamples, 1)'}), '(low=xmin, high=xmax, size=(nSamples, 1))\n', (2366, 2407), True, 'import numpy as np\n'), ((2468, 2509), 'numpy.concatenate', 'np.concatenate', (['(outputs, inputs)'], {'axis': '(1)'}), '((outputs, inputs), axis=1)\n', (2482, 2509), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
from engine import Engine
from utils import use_cuda, resume_checkpoint
from torchvision.models import resnet18
PADDING_IDX = 0
class BertCNN(torch.nn.Module):
def __init__(self, config):
super(BertCNN, self).__init__()
self.config = config
self.num_users = config['num_users']
self.num_items = config['num_items']
self.latent_dim = config['latent_dim']
if config['title_embeddings']:
title_embeddings = np.load(config['title_embeddings'])
self.item_title = torch.nn.Embedding(self.num_items, title_embeddings.shape[1])
self.item_title.weights = torch.nn.Parameter(torch.as_tensor(title_embeddings[:-1, :]))
self.item_title.weights.requires_grad = True
self.embedding_user = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim)
self.conv_net = nn.Sequential(
nn.Conv2d(1, 8, kernel_size=3, padding=1),
nn.MaxPool2d(2),
nn.BatchNorm2d(8),
nn.ReLU(True),
nn.Conv2d(8, 16, kernel_size=3, padding=1),
nn.MaxPool2d(2),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.Conv2d(16, 32, kernel_size=3, padding=1),
nn.MaxPool2d(2),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Conv2d(32, 64, kernel_size=3, padding=1),
nn.MaxPool2d(2),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.MaxPool2d(2),
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.MaxPool2d(2),
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.Flatten()
)
self.linear = nn.Sequential(
nn.Linear(512, 128),
nn.BatchNorm1d(128),
nn.Dropout(0.1),
nn.ReLU(True),
nn.Linear(128, 1)
)
self.logistic = torch.nn.Sigmoid()
def forward(self, user_indices, item_indices):
user_embedding = self.embedding_user(user_indices).view(-1, self.latent_dim, 1)
title_embedding = self.item_title(item_indices).view(-1, 1, 128)
emb_maps = user_embedding @ title_embedding
conv_net_res = self.conv_net(emb_maps.unsqueeze(1))
logits = self.linear(conv_net_res)
rating = self.logistic(logits)
return rating
def init_weight(self):
pass
class BertCNNEngine(Engine):
"""Engine for training & evaluating BertCNN model"""
def __init__(self, config):
self.model = BertCNN(config)
if config['use_cuda']:
use_cuda(True, config['device_id'])
self.model.cuda()
super(BertCNNEngine, self).__init__(config)
print(self.model) | [
"torch.nn.Sigmoid",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.as_tensor",
"torch.nn.Flatten",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"utils.use_cuda",
"numpy.load",
"torch.nn.Embedding"
] | [((851, 936), 'torch.nn.Embedding', 'torch.nn.Embedding', ([], {'num_embeddings': 'self.num_users', 'embedding_dim': 'self.latent_dim'}), '(num_embeddings=self.num_users, embedding_dim=self.latent_dim\n )\n', (869, 936), False, 'import torch\n'), ((2124, 2142), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (2140, 2142), False, 'import torch\n'), ((527, 562), 'numpy.load', 'np.load', (["config['title_embeddings']"], {}), "(config['title_embeddings'])\n", (534, 562), True, 'import numpy as np\n'), ((593, 654), 'torch.nn.Embedding', 'torch.nn.Embedding', (['self.num_items', 'title_embeddings.shape[1]'], {}), '(self.num_items, title_embeddings.shape[1])\n', (611, 654), False, 'import torch\n'), ((984, 1025), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(1, 8, kernel_size=3, padding=1)\n', (993, 1025), True, 'import torch.nn as nn\n'), ((1039, 1054), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1051, 1054), True, 'import torch.nn as nn\n'), ((1068, 1085), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(8)'], {}), '(8)\n', (1082, 1085), True, 'import torch.nn as nn\n'), ((1099, 1112), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1106, 1112), True, 'import torch.nn as nn\n'), ((1127, 1169), 'torch.nn.Conv2d', 'nn.Conv2d', (['(8)', '(16)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(8, 16, kernel_size=3, padding=1)\n', (1136, 1169), True, 'import torch.nn as nn\n'), ((1183, 1198), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1195, 1198), True, 'import torch.nn as nn\n'), ((1212, 1230), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (1226, 1230), True, 'import torch.nn as nn\n'), ((1244, 1257), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1251, 1257), True, 'import torch.nn as nn\n'), ((1272, 1315), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(16, 32, kernel_size=3, padding=1)\n', (1281, 1315), True, 'import torch.nn as nn\n'), ((1329, 1344), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1341, 1344), True, 'import torch.nn as nn\n'), ((1358, 1376), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (1372, 1376), True, 'import torch.nn as nn\n'), ((1390, 1403), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1397, 1403), True, 'import torch.nn as nn\n'), ((1418, 1461), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(32, 64, kernel_size=3, padding=1)\n', (1427, 1461), True, 'import torch.nn as nn\n'), ((1475, 1490), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1487, 1490), True, 'import torch.nn as nn\n'), ((1504, 1522), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (1518, 1522), True, 'import torch.nn as nn\n'), ((1536, 1549), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1543, 1549), True, 'import torch.nn as nn\n'), ((1564, 1608), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(64, 128, kernel_size=3, padding=1)\n', (1573, 1608), True, 'import torch.nn as nn\n'), ((1622, 1637), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1634, 1637), True, 'import torch.nn as nn\n'), ((1651, 1670), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (1665, 1670), True, 'import torch.nn as nn\n'), ((1684, 1697), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1691, 1697), True, 'import torch.nn as nn\n'), ((1712, 1757), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(128, 256, kernel_size=3, padding=1)\n', (1721, 1757), True, 'import torch.nn as nn\n'), ((1771, 1786), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1783, 1786), True, 'import torch.nn as nn\n'), ((1800, 1819), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (1814, 1819), True, 'import torch.nn as nn\n'), ((1833, 1846), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1840, 1846), True, 'import torch.nn as nn\n'), ((1860, 1872), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (1870, 1872), True, 'import torch.nn as nn\n'), ((1941, 1960), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(128)'], {}), '(512, 128)\n', (1950, 1960), True, 'import torch.nn as nn\n'), ((1974, 1993), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (1988, 1993), True, 'import torch.nn as nn\n'), ((2007, 2022), 'torch.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (2017, 2022), True, 'import torch.nn as nn\n'), ((2036, 2049), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2043, 2049), True, 'import torch.nn as nn\n'), ((2063, 2080), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(1)'], {}), '(128, 1)\n', (2072, 2080), True, 'import torch.nn as nn\n'), ((2813, 2848), 'utils.use_cuda', 'use_cuda', (['(True)', "config['device_id']"], {}), "(True, config['device_id'])\n", (2821, 2848), False, 'from utils import use_cuda, resume_checkpoint\n'), ((712, 753), 'torch.as_tensor', 'torch.as_tensor', (['title_embeddings[:-1, :]'], {}), '(title_embeddings[:-1, :])\n', (727, 753), False, 'import torch\n')] |
from io import BytesIO
import gzip
import os
import os.path as op
import json
from glob import glob
import boto3
import s3fs
import numpy as np
import pandas as pd
import nibabel as nib
import dipy.data as dpd
from dipy.data.fetcher import _make_fetcher
from dipy.io.streamline import load_tractogram, load_trk
from dipy.segment.metric import (AveragePointwiseEuclideanMetric,
ResampleFeature)
from dipy.segment.clustering import QuickBundles
import AFQ.registration as reg
__all__ = ["fetch_callosum_templates", "read_callosum_templates",
"fetch_templates", "read_templates", "fetch_hcp",
"fetch_stanford_hardi_tractography",
"read_stanford_hardi_tractography",
"organize_stanford_data"]
afq_home = op.join(op.expanduser('~'), 'AFQ_data')
baseurl = "https://ndownloader.figshare.com/files/"
callosum_fnames = ["Callosum_midsag.nii.gz",
"L_AntFrontal.nii.gz",
"L_Motor.nii.gz",
"L_Occipital.nii.gz",
"L_Orbital.nii.gz",
"L_PostParietal.nii.gz",
"L_SupFrontal.nii.gz",
"L_SupParietal.nii.gz",
"L_Temporal.nii.gz",
"R_AntFrontal.nii.gz",
"R_Motor.nii.gz",
"R_Occipital.nii.gz",
"R_Orbital.nii.gz",
"R_PostParietal.nii.gz",
"R_SupFrontal.nii.gz",
"R_SupParietal.nii.gz",
"R_Temporal.nii.gz"]
callosum_remote_fnames = ["5273794", "5273797", "5273800", "5273803",
"5273806", "5273809", "5273812", "5273815",
"5273821", "5273818", "5273824", "5273827",
"5273830", "5273833", "5273836", "5273839",
"5273842"]
callosum_md5_hashes = ["709fa90baadeacd64f1d62b5049a4125",
"987c6169de807c4e93dc2cbd7a25d506",
"0da114123d0b0097b96fe450a459550b",
"6d845bd10504f67f1dc17f9000076d7e",
"e16c7873ef4b08d26b77ef746dab8237",
"47193fd4df1ea17367817466de798b90",
"7e78bf9671e6945f4b2f5e7c30595a3c",
"8adbb947377ff7b484c88d8c0ffc2125",
"0fd981a4d0847e0642ff96e84fe44e47",
"87c4855efa406d8fb004cffb8259180e",
"c7969bcf5f2343fd9ce9c49b336cf14c",
"bb4372b88991932150205ffb22aa6cb7",
"d198d4e7db18ddc7236cf143ecb8342e",
"d0f6edef64b0c710c92e634496085dda",
"85eaee44665f244db5adae2e259833f6",
"25f24eb22879a05d12bda007c81ea55a",
"2664e0b8c2d9c59f13649a89bfcce399"]
fetch_callosum_templates = _make_fetcher("fetch_callosum_templates",
op.join(afq_home,
'callosum_templates'),
baseurl, callosum_remote_fnames,
callosum_fnames,
md5_list=callosum_md5_hashes,
doc="Download AFQ callosum templates")
def read_callosum_templates(resample_to=False):
"""Load AFQ callosum templates from file
Returns
-------
dict with: keys: names of template ROIs and values: nibabel Nifti1Image
objects from each of the ROI nifti files.
"""
files, folder = fetch_callosum_templates()
template_dict = {}
for f in files:
img = nib.load(op.join(folder, f))
if resample_to:
if isinstance(resample_to, str):
resample_to = nib.load(resample_to)
img = nib.Nifti1Image(reg.resample(img.get_fdata(),
resample_to,
img.affine,
resample_to.affine),
resample_to.affine)
template_dict[f.split('.')[0]] = img
return template_dict
template_fnames = ["ATR_roi1_L.nii.gz",
"ATR_roi1_R.nii.gz",
"ATR_roi2_L.nii.gz",
"ATR_roi2_R.nii.gz",
"ATR_L_prob_map.nii.gz",
"ATR_R_prob_map.nii.gz",
"CGC_roi1_L.nii.gz",
"CGC_roi1_R.nii.gz",
"CGC_roi2_L.nii.gz",
"CGC_roi2_R.nii.gz",
"CGC_L_prob_map.nii.gz",
"CGC_R_prob_map.nii.gz",
"CST_roi1_L.nii.gz",
"CST_roi1_R.nii.gz",
"CST_roi2_L.nii.gz",
"CST_roi2_R.nii.gz",
"CST_L_prob_map.nii.gz",
"CST_R_prob_map.nii.gz",
"FA_L.nii.gz",
"FA_R.nii.gz",
"FA_prob_map.nii.gz",
"FP_L.nii.gz",
"FP_R.nii.gz",
"FP_prob_map.nii.gz",
"HCC_roi1_L.nii.gz",
"HCC_roi1_R.nii.gz",
"HCC_roi2_L.nii.gz",
"HCC_roi2_R.nii.gz",
"HCC_L_prob_map.nii.gz",
"HCC_R_prob_map.nii.gz",
"IFO_roi1_L.nii.gz",
"IFO_roi1_R.nii.gz",
"IFO_roi2_L.nii.gz",
"IFO_roi2_R.nii.gz",
"IFO_L_prob_map.nii.gz",
"IFO_R_prob_map.nii.gz",
"ILF_roi1_L.nii.gz",
"ILF_roi1_R.nii.gz",
"ILF_roi2_L.nii.gz",
"ILF_roi2_R.nii.gz",
"ILF_L_prob_map.nii.gz",
"ILF_R_prob_map.nii.gz",
"SLF_roi1_L.nii.gz",
"SLF_roi1_R.nii.gz",
"SLF_roi2_L.nii.gz",
"SLF_roi2_R.nii.gz",
"SLFt_roi2_L.nii.gz",
"SLFt_roi2_R.nii.gz",
"SLF_L_prob_map.nii.gz",
"SLF_R_prob_map.nii.gz",
"UNC_roi1_L.nii.gz",
"UNC_roi1_R.nii.gz",
"UNC_roi2_L.nii.gz",
"UNC_roi2_R.nii.gz",
"UNC_L_prob_map.nii.gz",
"UNC_R_prob_map.nii.gz",
"ARC_L_prob_map.nii.gz",
"ARC_R_prob_map.nii.gz"]
template_remote_fnames = ["5273680", "5273683", "5273686", "5273689",
"11458274", "11458277",
"5273695", "5273692", "5273698", "5273701",
"11458268", "11458271",
"5273704", "5273707", "5273710", "5273713",
"11458262", "11458265",
"5273716", "5273719",
"11458220",
"5273722", "5273725",
"11458226",
"5273728", "5273731", "5273734", "5273746",
"11458259", "11458256",
"5273737", "5273740", "5273743", "5273749",
"11458250", "11458253",
"5273752", "5273755", "5273758", "5273761",
"11458244", "11458247",
"5273764", "5273767", "5273770", "5273773",
"5273776", "5273791",
"11458238", "11458241",
"5273779", "5273782", "5273785", "5273788",
"11458223", "11458229",
"11458232", "11458235"]
template_md5_hashes = ["6b7aaed1a2982fd0ea436a223133908b",
"fd60d46d4e3cbd906c86e4c9e4fd6e2a",
"3aba60b169a35c38640de4ec29d362c8",
"12716a5688a1809fbaed1d58d2e68b59",
"c5637f471df861d9bbb45604db34770b",
"850cc4c04d7241747063fe3cd440b2ce",
"8e8973bc7838c8744914d402f52d91ca",
"<KEY>",
"e1fab77f21d5303ed52285f015e24f0b",
"<KEY>",
"<KEY>",
"7e73ab02db30a3ad6bd9e82148c2486e",
"<KEY>",
"73941510c798c1ed1b03e2bd481cd5c7",
"660cdc031ee0716d60159c7d933119ea",
"660cdc031ee0716d60159c7d933119ea",
"fd012bc89f6bed7bd54530195496bac4",
"<KEY>",
"<KEY>",
"<KEY>",
"627d7bb2e6d55f8243da815a36d9ff1a",
"55adbe9b8279185eedbe342149e1ff90",
"<KEY>",
"<KEY>",
"ba453196ff179b0e31172806e313b52c",
"<KEY>",
"<KEY>",
"9806e82c250e4604534b96917f87b7e8",
"<KEY>",
"<KEY>",
"<KEY>",
"d45020a87ee4bb496edd350631d91f6a",
"<KEY>",
"55d616ea9e0c646adc1aafa0f5fbe625",
"<KEY>",
"a13eef7059c98568adfefbab660e434e",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"7bdf5111265107091c7a2fca9215de30",
"<KEY>",
"af2bcedf47e193686af329b9a8e259da",
"9a1122943579d11ba169d3ad87a75625",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"d3e068997ebc60407bd6e9576e47dede",
"<KEY>",
"fa141bb2d951bec486916acda3652d95",
"d391d073e86e28588be9a6d01b2e7a82",
"<KEY>",
"d65c67910807504735e034f7ea92d590",
"93cb24a9128db1a6c34a09eaf79fe7f0",
"<KEY>",
"19590c712f1776da1fdba64d4eb7f1f6",
"04d5af0feb2c1b5b52a87ccbbf148e4b",
"53c277be990d00f7de04f2ea35e74d73"]
fetch_templates = _make_fetcher("fetch_templates",
op.join(afq_home, 'templates'),
baseurl, template_remote_fnames,
template_fnames, md5_list=template_md5_hashes,
doc="Download AFQ templates")
def read_templates(resample_to=False):
"""Load AFQ templates from file
Returns
-------
dict with: keys: names of template ROIs and values: nibabel Nifti1Image
objects from each of the ROI nifti files.
"""
files, folder = fetch_templates()
template_dict = {}
for f in files:
img = nib.load(op.join(folder, f))
if resample_to:
if isinstance(resample_to, str):
resample_to = nib.load(resample_to)
img = nib.Nifti1Image(reg.resample(img.get_fdata(),
resample_to,
img.affine,
resample_to.affine),
resample_to.affine)
template_dict[f.split('.')[0]] = img
return template_dict
def fetch_hcp(subjects,
hcp_bucket='hcp-openaccess',
profile_name="hcp",
path=None,
aws_access_key_id=None,
aws_secret_access_key=None):
"""
Fetch HCP diffusion data and arrange it in a manner that resembles the
BIDS [1]_ specification.
Parameters
----------
subjects : list
Each item is an integer, identifying one of the HCP subjects
hcp_bucket : string, optional
The name of the HCP S3 bucket. Default: "hcp-openaccess"
profile_name : string, optional
The name of the AWS profile used for access. Default: "hcp"
path : string, optional
Path to save files into. Default: '~/AFQ_data'
aws_access_key_id : string, optional
AWS credentials to HCP AWS S3. Will only be used if `profile_name` is
set to False.
aws_secret_access_key : string, optional
AWS credentials to HCP AWS S3. Will only be used if `profile_name` is
set to False.
Returns
-------
dict with remote and local names of these files.
Notes
-----
To use this function with its default setting, you need to have a
file '~/.aws/credentials', that includes a section:
[hcp]
AWS_ACCESS_KEY_ID=<KEY>
AWS_SECRET_ACCESS_KEY=<KEY>
The keys are credentials that you can get from HCP
(see https://wiki.humanconnectome.org/display/PublicData/How+To+Connect+to+Connectome+Data+via+AWS) # noqa
Local filenames are changed to match our expected conventions.
.. [1] <NAME> al. (2016). The brain imaging data structure,
a format for organizing and describing outputs of neuroimaging
experiments. Scientific Data, 3::160044. DOI: 10.1038/sdata.2016.44.
"""
if profile_name:
boto3.setup_default_session(profile_name=profile_name)
elif aws_access_key_id is not None and aws_secret_access_key is not None:
boto3.setup_default_session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
else:
raise ValueError("Must provide either a `profile_name` or ",
"both `aws_access_key_id` and ",
"`aws_secret_access_key` as input to 'fetch_hcp'")
s3 = boto3.resource('s3')
bucket = s3.Bucket(hcp_bucket)
if path is None:
my_path = afq_home
else:
base_dir = path
base_dir = op.join(my_path, 'HCP', 'derivatives', 'dmriprep')
if not os.path.exists(base_dir):
os.makedirs(base_dir, exist_ok=True)
data_files = {}
for subject in subjects:
# We make a single session folder per subject for this case, because
# AFQ api expects session structure:
sub_dir = op.join(base_dir, 'sub-%s' % subject)
sess_dir = op.join(sub_dir, "sess-01")
if not os.path.exists(sub_dir):
os.makedirs(os.path.join(sess_dir, 'dwi'), exist_ok=True)
os.makedirs(os.path.join(sess_dir, 'anat'), exist_ok=True)
data_files[op.join(sess_dir, 'dwi', 'sub-%s_dwi.bval' % subject)] =\
'HCP_1200/%s/T1w/Diffusion/bvals' % subject
data_files[op.join(sess_dir, 'dwi', 'sub-%s_dwi.bvec' % subject)] =\
'HCP_1200/%s/T1w/Diffusion/bvecs' % subject
data_files[op.join(sess_dir, 'dwi', 'sub-%s_dwi.nii.gz' % subject)] =\
'HCP_1200/%s/T1w/Diffusion/data.nii.gz' % subject
data_files[op.join(sess_dir, 'anat', 'sub-%s_T1w.nii.gz' % subject)] =\
'HCP_1200/%s/T1w/T1w_acpc_dc.nii.gz' % subject
data_files[op.join(sess_dir, 'anat',
'sub-%s_aparc+aseg.nii.gz' % subject)] =\
'HCP_1200/%s/T1w/aparc+aseg.nii.gz' % subject
for k in data_files.keys():
if not op.exists(k):
bucket.download_file(data_files[k], k)
# Create the BIDS dataset description file text
dataset_description = {
"BIDSVersion": "1.0.0",
"Name": "HCP",
"Acknowledgements": """Data were provided by the Human Connectome Project, WU-Minn Consortium (Principal Investigators: <NAME> and <NAME>; 1U54MH091657) funded by the 16 NIH Institutes and Centers that support the NIH Blueprint for Neuroscience Research; and by the McDonnell Center for Systems Neuroscience at Washington University.""", # noqa
"Subjects": subjects}
desc_file = op.join(my_path, 'HCP', 'dataset_description.json')
with open(desc_file, 'w') as outfile:
json.dump(dataset_description, outfile)
return data_files
stanford_hardi_tractography_remote_fnames = ["5325715", "5325718"]
stanford_hardi_tractography_hashes = ['6f4bdae702031a48d1cd3811e7a42ef9',
'f20854b4f710577c58bd01072cfb4de6']
stanford_hardi_tractography_fnames = ['mapping.nii.gz',
'tractography_subsampled.trk']
fetch_stanford_hardi_tractography = _make_fetcher(
"fetch_stanford_hardi_tractography",
op.join(afq_home,
'stanford_hardi_tractography'),
baseurl,
stanford_hardi_tractography_remote_fnames,
stanford_hardi_tractography_fnames,
md5_list=stanford_hardi_tractography_hashes,
doc="""Download Stanford HARDI tractography and mapping. For testing
purposes""")
def read_stanford_hardi_tractography():
"""
Reads a minimal tractography from the Stanford dataset.
"""
files, folder = fetch_stanford_hardi_tractography()
files_dict = {}
files_dict['mapping.nii.gz'] = nib.load(
op.join(afq_home,
'stanford_hardi_tractography',
'mapping.nii.gz'))
files_dict['tractography_subsampled.trk'] = load_trk(
op.join(afq_home,
'stanford_hardi_tractography',
'tractography_subsampled.trk'),
nib.Nifti1Image(np.zeros((10, 10, 10)), np.eye(4)),
bbox_valid_check=False,
trk_header_check=False).streamlines
return files_dict
def organize_stanford_data(path=None):
"""
Create the expected file-system structure for the Stanford HARDI data-set.
"""
dpd.fetch_stanford_hardi()
if path is None:
if not op.exists(afq_home):
os.mkdir(afq_home)
my_path = afq_home
else:
my_path = path
base_folder = op.join(my_path, 'stanford_hardi',
'derivatives', 'dmriprep')
if not op.exists(base_folder):
anat_folder = op.join(base_folder, 'sub-01', 'sess-01', 'anat')
os.makedirs(anat_folder, exist_ok=True)
dwi_folder = op.join(base_folder, 'sub-01', 'sess-01', 'dwi')
os.makedirs(dwi_folder, exist_ok=True)
t1_img = dpd.read_stanford_t1()
nib.save(t1_img, op.join(anat_folder, 'sub-01_sess-01_T1w.nii.gz'))
seg_img = dpd.read_stanford_labels()[-1]
nib.save(seg_img, op.join(anat_folder,
'sub-01_sess-01_aparc+aseg.nii.gz'))
dwi_img, gtab = dpd.read_stanford_hardi()
nib.save(dwi_img, op.join(dwi_folder, 'sub-01_sess-01_dwi.nii.gz'))
np.savetxt(op.join(dwi_folder, 'sub-01_sess-01_dwi.bvecs'), gtab.bvecs)
np.savetxt(op.join(dwi_folder, 'sub-01_sess-01_dwi.bvals'), gtab.bvals)
dataset_description = {
"BIDSVersion": "1.0.0",
"Name": "<NAME>",
"Subjects": ["sub-01"]}
desc_file = op.join(my_path, 'stanford_hardi', 'dataset_description.json')
with open(desc_file, 'w') as outfile:
json.dump(dataset_description, outfile)
fetch_hcp_atlas_16_bundles = _make_fetcher(
"fetch_hcp_atlas_16_bundles",
op.join(afq_home,
'hcp_atlas_16_bundles'),
'https://ndownloader.figshare.com/files/',
["11921522"],
["atlas_16_bundles.zip"],
md5_list=["b071f3e851f21ba1749c02fc6beb3118"],
doc="Download minimal Recobundles atlas",
unzip=True)
def read_hcp_atlas_16_bundles():
"""
XXX
"""
bundle_dict = {}
_, folder = fetch_hcp_atlas_16_bundles()
whole_brain = load_tractogram(op.join(folder,
'Atlas_in_MNI_Space_16_bundles',
'whole_brain',
'whole_brain_MNI.trk'),
'same', bbox_valid_check=False).streamlines
bundle_dict['whole_brain'] = whole_brain
bundle_files = glob(
op.join(folder, "Atlas_in_MNI_Space_16_bundles", "bundles", "*.trk"))
for bundle_file in bundle_files:
bundle = op.splitext(op.split(bundle_file)[-1])[0]
bundle_dict[bundle] = {}
bundle_dict[bundle]['sl'] = load_tractogram(bundle_file,
'same',
bbox_valid_check=False)\
.streamlines
feature = ResampleFeature(nb_points=100)
metric = AveragePointwiseEuclideanMetric(feature)
qb = QuickBundles(np.inf, metric=metric)
cluster = qb.cluster(bundle_dict[bundle]['sl'])
bundle_dict[bundle]['centroid'] = cluster.centroids[0]
# For some reason, this file-name has a 0 in it, instead of an O:
bundle_dict["IFOF_R"] = bundle_dict["IF0F_R"]
del bundle_dict["IF0F_R"]
return bundle_dict
fetch_aal_atlas = _make_fetcher(
"fetch_aal_atlas",
op.join(afq_home,
'aal_atlas'),
'https://digital.lib.washington.edu' + '/researchworks' + \
'/bitstream/handle/1773/44951/',
["MNI_AAL_AndMore.nii.gz",
"MNI_AAL.txt"],
["MNI_AAL_AndMore.nii.gz",
"MNI_AAL.txt"],
md5_list=["69395b75a16f00294a80eb9428bf7855",
"59fd3284b17de2fbe411ca1c7afe8c65"],
doc="Download the AAL atlas",
unzip=False)
def read_aal_atlas():
"""
Reads the AAL atlas [1]_.
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>. (2002). Automated anatomical
labeling of activations in SPM using a macroscopic anatomical
parcellation of the MNI MRI single-subject brain. Neuroimage. 2002;
15(1):273-89.
"""
file_dict, folder = fetch_aal_atlas()
out_dict = {}
for f in file_dict:
if f.endswith('.txt'):
out_dict['labels'] = pd.read_csv(op.join(folder, f))
else:
out_dict['atlas'] = nib.load(op.join(folder, f))
return out_dict
def aal_to_regions(regions, atlas=None):
"""
Queries for large regions containing multiple AAL ROIs
Parameters
----------
regions : string or list of strings
The name of the requested region. This can either be an AAL-defined ROI
name (e.g, 'Occipital_Sup_L') or one of:
{'leftfrontal' | 'leftoccipital' | 'lefttemporal' | 'leftparietal'
| 'leftanttemporal' | 'leftparietal' | 'leftanttemporal'
| 'leftuncinatefront' | 'leftifoffront' | 'leftinfparietal'
| 'cerebellum' | 'leftarcfrontal' | 'leftarctemp' | 'leftcingpost'}
each of which there is an equivalent 'right' region for. In addition,
there are a few bilateral regions: {'occipital' | 'temporal'}, which
encompass both the right and left region of this name, as well as:
{'cstinferior' | 'cstsuperior'}
atlas : 4D array
Contains the AAL atlas in the correct coordinate frame with additional
volumes for CST and cingulate ROIs ("AAL and more").
Returns
------
3D indices to the requested region in the atlas volume
Notes
-----
Several regions can be referred to by multiple names:
'leftuncinatetemp' = 'leftilftemp'= 'leftanttemporal'
'rightuncinatetemp' = 'rightilftemp' = 'rightanttemporal'
'leftslfpar'] = 'leftinfparietal'
'rightslfpar' = 'rightinfparietal'
'leftslffrontal' = 'leftarcfrontal'
'rightslffrontal' = 'rightarcfrontal'
"""
if atlas is None:
atlas = read_aal_atlas()['atlas']
atlas_vals = {'leftfrontal': np.arange(1, 26, 2),
# Occipital regions do not include fusiform:
'leftoccipital': np.arange(43, 54, 2),
# Temporal regions include fusiform:
'lefttemporal': np.concatenate([np.arange(37, 42, 2),
np.array([55]),
np.arange(79, 90, 2)]),
'leftparietal': np.array([57, 67, 2]),
'leftanttemporal': np.array([41, 83, 87]),
'leftuncinatefront': np.array([5, 9, 15, 25]),
'leftifoffront': np.array([3, 5, 7, 9, 13, 15, 25]),
'leftinfparietal': np.array([61, 63, 65]),
'cerebellum': np.arange(91, 117),
'leftarcfrontal': np.array([1, 11, 13]),
'leftarctemp': np.array([79, 81, 85, 89]),
}
# Right symmetrical is off by one:
atlas_vals['rightfrontal'] = atlas_vals['leftfrontal'] + 1
atlas_vals['rightoccipital'] = atlas_vals['leftoccipital'] + 1
atlas_vals['righttemporal'] = atlas_vals['lefttemporal'] + 1
atlas_vals['rightparietal'] = atlas_vals['leftparietal'] + 1
atlas_vals['rightanttemporal'] = atlas_vals['leftanttemporal'] + 1
atlas_vals['rightuncinatefront'] = atlas_vals['leftuncinatefront'] + 1
atlas_vals['rightifoffront'] = atlas_vals['leftifoffront'] + 1
atlas_vals['rightinfparietal'] = atlas_vals['leftinfparietal'] + 1
atlas_vals['rightarcfrontal'] = atlas_vals['leftarcfrontal'] + 1
atlas_vals['rightarctemp'] = atlas_vals['leftarctemp'] + 1
# Multiply named regions:
atlas_vals['leftuncinatetemp'] = atlas_vals['leftilftemp'] =\
atlas_vals['leftanttemporal']
atlas_vals['rightuncinatetemp'] = atlas_vals['rightilftemp'] =\
atlas_vals['rightanttemporal']
atlas_vals['leftslfpar'] = atlas_vals['leftinfparietal']
atlas_vals['rightslfpar'] = atlas_vals['rightinfparietal']
atlas_vals['leftslffrontal'] = atlas_vals['leftarcfrontal']
atlas_vals['rightslffrontal'] = atlas_vals['rightarcfrontal']
# Bilateral regions:
atlas_vals['occipital'] = np.union1d(atlas_vals['leftoccipital'],
atlas_vals['rightoccipital'])
atlas_vals['temporal'] = np.union1d(atlas_vals['lefttemporal'],
atlas_vals['righttemporal'])
if isinstance(regions, str):
regions = [regions]
idxes = []
for region in regions:
region = region.lower() # Just to be sure
if region in atlas_vals.keys():
vol_idx = 0
vals = atlas_vals[region]
elif region == 'cstinferior':
vol_idx = 1
vals = np.array([1])
elif region == 'cstsuperior':
vol_idx = 2
vals = np.array([1])
elif region == 'leftcingpost':
vol_idx = 3
vals = np.array([1])
elif region == 'rightcingpost':
vol_idx = 4
vals = np.array([1])
# Broadcast vals, to test for equality over all three dimensions:
is_in = atlas[..., vol_idx] == vals[:, None, None, None]
# Then collapse the 4th dimension (each val), to get the 3D array:
is_in = np.sum(is_in, 0)
idxes.append(np.array(np.where(is_in)).T)
return np.concatenate(idxes, axis=0)
def bundles_to_aal(bundles, atlas=None):
"""
Given a sequence of AFQ bundle names, give back a sequence of lists
with [target0, target1] being each NX3 arrays of the endpoint indices
for the first and last node of the streamlines in this bundle.
"""
if atlas is None:
atlas = read_aal_atlas()['atlas']
endpoint_dict = {
"ATR_L": [None, ['leftfrontal']],
"ATR_R": [None, ['rightfrontal']],
"CST_L": [['cstinferior'], ['cstsuperior']],
"CST_R": [['cstinferior'], ['cstsuperior']],
"CGC_L": [['leftcingpost'], None],
"CGC_R": [['rightcingpost'], None],
"HCC_L": [None, None],
"HCC_R": [None, None],
"FP": [['leftoccipital'], ['rightoccipital']],
"FA": [['leftfrontal'], ['rightfrontal']],
"IFO_L": [['leftoccipital'], ['leftifoffront']],
"IFO_R": [['rightoccipital'], ['rightifoffront']],
"ILF_L": [['leftoccipital'], ['leftilftemp']],
"ILF_R": [['rightoccipital'], ['rightilftemp']],
"SLF_L": [['leftinfparietal'], ['leftslffrontal']],
"SLF_R": [['rightinfparietal'], ['rightslffrontal']],
"UNC_L": [['leftanttemporal'], ['leftuncinatefront']],
"UNC_R": [['rightanttemporal'], ['rightuncinatefront']],
"ARC_L": [['leftfrontal'], ['leftarctemp']],
"ARC_R": [['rightfrontal'], ['rightarctemp']]}
targets = []
for bundle in bundles:
targets.append([])
for region in endpoint_dict[bundle]:
if region is None:
targets[-1].append(None)
else:
targets[-1].append(aal_to_regions(region, atlas=atlas))
return targets
def s3fs_nifti_write(img, fname, fs=None):
"""
Write a nifti file straight to S3
Paramters
---------
img : nib.Nifti1Image class instance
The image containing data to be written into S3
fname : string
Full path (including bucket name and extension) to the S3 location
where the file is to be saved.
fs : an s3fs.S3FileSystem class instance, optional
A file-system to refer to. Default to create a new file-system
"""
if fs is None:
fs = s3fs.S3FileSystem()
bio = BytesIO()
file_map = img.make_file_map({'image': bio, 'header': bio})
img.to_file_map(file_map)
data = gzip.compress(bio.getvalue())
with fs.open(fname, 'wb') as ff:
ff.write(data)
def s3fs_nifti_read(fname, fs=None):
"""
Lazily reads a nifti image from S3.
Paramters
---------
fname : string
Full path (including bucket name and extension) to the S3 location
of the file to be read.
fs : an s3fs.S3FileSystem class instance, optional
A file-system to refer to. Default to create a new file-system.
Returns
-------
nib.Nifti1Image class instance
Note
----
Because the image is lazily loaded, data stored in the file
is not transferred until `get_fdata` is called.
"""
if fs is None:
fs = s3fs.S3FileSystem()
with fs.open(fname) as ff:
zz = gzip.open(ff)
rr = zz.read()
bb = BytesIO(rr)
fh = nib.FileHolder(fileobj=bb)
img = nib.Nifti1Image.from_file_map({'header': fh, 'image': fh})
return img
def write_json(fname, data):
"""
Write data to JSON file.
Parameters
----------
fname : str
Full path to the file to write.
data : dict
A dict containing the data to write.
Returns
-------
None
"""
with open(fname, 'w') as ff:
json.dump(data, ff)
def read_json(fname):
"""
Read data from a JSON file.
Parameters
----------
fname : str
Full path to the data-containing file
Returns
-------
dict
"""
with open(fname, 'w') as ff:
out = json.load(ff)
return out
def s3fs_json_read(fname, fs=None):
"""
Reads json directly from S3
Paramters
---------
fname : str
Full path (including bucket name and extension) to the file on S3.
fs : an s3fs.S3FileSystem class instance, optional
A file-system to refer to. Default to create a new file-system.
"""
if fs is None:
fs = s3fs.S3FileSystem()
with fs.open(fname) as ff:
data = json.load(ff)
return data
def s3fs_json_write(data, fname, fs=None):
"""
Writes json from a dict directly into S3
Parameters
----------
data : dict
The json to be written out
fname : str
Full path (including bucket name and extension) to the file to
be written out on S3
fs : an s3fs.S3FileSystem class instance, optional
A file-system to refer to. Default to create a new file-system.
"""
if fs is None:
fs = s3fs.S3FileSystem()
with fs.open(fname, 'w') as ff:
json.dump(data, ff)
| [
"numpy.union1d",
"s3fs.S3FileSystem",
"gzip.open",
"nibabel.load",
"io.BytesIO",
"numpy.array",
"nibabel.Nifti1Image.from_file_map",
"dipy.data.read_stanford_labels",
"dipy.data.read_stanford_hardi",
"numpy.arange",
"os.path.exists",
"dipy.data.fetch_stanford_hardi",
"numpy.where",
"os.pat... | [((794, 812), 'os.path.expanduser', 'op.expanduser', (['"""~"""'], {}), "('~')\n", (807, 812), True, 'import os.path as op\n'), ((3014, 3053), 'os.path.join', 'op.join', (['afq_home', '"""callosum_templates"""'], {}), "(afq_home, 'callosum_templates')\n", (3021, 3053), True, 'import os.path as op\n'), ((10683, 10713), 'os.path.join', 'op.join', (['afq_home', '"""templates"""'], {}), "(afq_home, 'templates')\n", (10690, 10713), True, 'import os.path as op\n'), ((14074, 14094), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (14088, 14094), False, 'import boto3\n'), ((14229, 14279), 'os.path.join', 'op.join', (['my_path', '"""HCP"""', '"""derivatives"""', '"""dmriprep"""'], {}), "(my_path, 'HCP', 'derivatives', 'dmriprep')\n", (14236, 14279), True, 'import os.path as op\n'), ((16190, 16241), 'os.path.join', 'op.join', (['my_path', '"""HCP"""', '"""dataset_description.json"""'], {}), "(my_path, 'HCP', 'dataset_description.json')\n", (16197, 16241), True, 'import os.path as op\n'), ((16794, 16842), 'os.path.join', 'op.join', (['afq_home', '"""stanford_hardi_tractography"""'], {}), "(afq_home, 'stanford_hardi_tractography')\n", (16801, 16842), True, 'import os.path as op\n'), ((17928, 17954), 'dipy.data.fetch_stanford_hardi', 'dpd.fetch_stanford_hardi', ([], {}), '()\n', (17952, 17954), True, 'import dipy.data as dpd\n'), ((18123, 18184), 'os.path.join', 'op.join', (['my_path', '"""stanford_hardi"""', '"""derivatives"""', '"""dmriprep"""'], {}), "(my_path, 'stanford_hardi', 'derivatives', 'dmriprep')\n", (18130, 18184), True, 'import os.path as op\n'), ((19189, 19251), 'os.path.join', 'op.join', (['my_path', '"""stanford_hardi"""', '"""dataset_description.json"""'], {}), "(my_path, 'stanford_hardi', 'dataset_description.json')\n", (19196, 19251), True, 'import os.path as op\n'), ((19427, 19468), 'os.path.join', 'op.join', (['afq_home', '"""hcp_atlas_16_bundles"""'], {}), "(afq_home, 'hcp_atlas_16_bundles')\n", (19434, 19468), True, 'import os.path as op\n'), ((21157, 21187), 'os.path.join', 'op.join', (['afq_home', '"""aal_atlas"""'], {}), "(afq_home, 'aal_atlas')\n", (21164, 21187), True, 'import os.path as op\n'), ((25991, 26060), 'numpy.union1d', 'np.union1d', (["atlas_vals['leftoccipital']", "atlas_vals['rightoccipital']"], {}), "(atlas_vals['leftoccipital'], atlas_vals['rightoccipital'])\n", (26001, 26060), True, 'import numpy as np\n'), ((26131, 26198), 'numpy.union1d', 'np.union1d', (["atlas_vals['lefttemporal']", "atlas_vals['righttemporal']"], {}), "(atlas_vals['lefttemporal'], atlas_vals['righttemporal'])\n", (26141, 26198), True, 'import numpy as np\n'), ((27190, 27219), 'numpy.concatenate', 'np.concatenate', (['idxes'], {'axis': '(0)'}), '(idxes, axis=0)\n', (27204, 27219), True, 'import numpy as np\n'), ((29457, 29466), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (29464, 29466), False, 'from io import BytesIO\n'), ((13575, 13629), 'boto3.setup_default_session', 'boto3.setup_default_session', ([], {'profile_name': 'profile_name'}), '(profile_name=profile_name)\n', (13602, 13629), False, 'import boto3\n'), ((14292, 14316), 'os.path.exists', 'os.path.exists', (['base_dir'], {}), '(base_dir)\n', (14306, 14316), False, 'import os\n'), ((14326, 14362), 'os.makedirs', 'os.makedirs', (['base_dir'], {'exist_ok': '(True)'}), '(base_dir, exist_ok=True)\n', (14337, 14362), False, 'import os\n'), ((14553, 14590), 'os.path.join', 'op.join', (['base_dir', "('sub-%s' % subject)"], {}), "(base_dir, 'sub-%s' % subject)\n", (14560, 14590), True, 'import os.path as op\n'), ((14610, 14637), 'os.path.join', 'op.join', (['sub_dir', '"""sess-01"""'], {}), "(sub_dir, 'sess-01')\n", (14617, 14637), True, 'import os.path as op\n'), ((16292, 16331), 'json.dump', 'json.dump', (['dataset_description', 'outfile'], {}), '(dataset_description, outfile)\n', (16301, 16331), False, 'import json\n'), ((17349, 17415), 'os.path.join', 'op.join', (['afq_home', '"""stanford_hardi_tractography"""', '"""mapping.nii.gz"""'], {}), "(afq_home, 'stanford_hardi_tractography', 'mapping.nii.gz')\n", (17356, 17415), True, 'import os.path as op\n'), ((18223, 18245), 'os.path.exists', 'op.exists', (['base_folder'], {}), '(base_folder)\n', (18232, 18245), True, 'import os.path as op\n'), ((18269, 18318), 'os.path.join', 'op.join', (['base_folder', '"""sub-01"""', '"""sess-01"""', '"""anat"""'], {}), "(base_folder, 'sub-01', 'sess-01', 'anat')\n", (18276, 18318), True, 'import os.path as op\n'), ((18327, 18366), 'os.makedirs', 'os.makedirs', (['anat_folder'], {'exist_ok': '(True)'}), '(anat_folder, exist_ok=True)\n', (18338, 18366), False, 'import os\n'), ((18388, 18436), 'os.path.join', 'op.join', (['base_folder', '"""sub-01"""', '"""sess-01"""', '"""dwi"""'], {}), "(base_folder, 'sub-01', 'sess-01', 'dwi')\n", (18395, 18436), True, 'import os.path as op\n'), ((18445, 18483), 'os.makedirs', 'os.makedirs', (['dwi_folder'], {'exist_ok': '(True)'}), '(dwi_folder, exist_ok=True)\n', (18456, 18483), False, 'import os\n'), ((18501, 18523), 'dipy.data.read_stanford_t1', 'dpd.read_stanford_t1', ([], {}), '()\n', (18521, 18523), True, 'import dipy.data as dpd\n'), ((18791, 18816), 'dipy.data.read_stanford_hardi', 'dpd.read_stanford_hardi', ([], {}), '()\n', (18814, 18816), True, 'import dipy.data as dpd\n'), ((19303, 19342), 'json.dump', 'json.dump', (['dataset_description', 'outfile'], {}), '(dataset_description, outfile)\n', (19312, 19342), False, 'import json\n'), ((20219, 20287), 'os.path.join', 'op.join', (['folder', '"""Atlas_in_MNI_Space_16_bundles"""', '"""bundles"""', '"""*.trk"""'], {}), "(folder, 'Atlas_in_MNI_Space_16_bundles', 'bundles', '*.trk')\n", (20226, 20287), True, 'import os.path as op\n'), ((20664, 20694), 'dipy.segment.metric.ResampleFeature', 'ResampleFeature', ([], {'nb_points': '(100)'}), '(nb_points=100)\n', (20679, 20694), False, 'from dipy.segment.metric import AveragePointwiseEuclideanMetric, ResampleFeature\n'), ((20712, 20752), 'dipy.segment.metric.AveragePointwiseEuclideanMetric', 'AveragePointwiseEuclideanMetric', (['feature'], {}), '(feature)\n', (20743, 20752), False, 'from dipy.segment.metric import AveragePointwiseEuclideanMetric, ResampleFeature\n'), ((20766, 20801), 'dipy.segment.clustering.QuickBundles', 'QuickBundles', (['np.inf'], {'metric': 'metric'}), '(np.inf, metric=metric)\n', (20778, 20801), False, 'from dipy.segment.clustering import QuickBundles\n'), ((23808, 23827), 'numpy.arange', 'np.arange', (['(1)', '(26)', '(2)'], {}), '(1, 26, 2)\n', (23817, 23827), True, 'import numpy as np\n'), ((23927, 23947), 'numpy.arange', 'np.arange', (['(43)', '(54)', '(2)'], {}), '(43, 54, 2)\n', (23936, 23947), True, 'import numpy as np\n'), ((24250, 24271), 'numpy.array', 'np.array', (['[57, 67, 2]'], {}), '([57, 67, 2])\n', (24258, 24271), True, 'import numpy as np\n'), ((24310, 24332), 'numpy.array', 'np.array', (['[41, 83, 87]'], {}), '([41, 83, 87])\n', (24318, 24332), True, 'import numpy as np\n'), ((24373, 24397), 'numpy.array', 'np.array', (['[5, 9, 15, 25]'], {}), '([5, 9, 15, 25])\n', (24381, 24397), True, 'import numpy as np\n'), ((24434, 24468), 'numpy.array', 'np.array', (['[3, 5, 7, 9, 13, 15, 25]'], {}), '([3, 5, 7, 9, 13, 15, 25])\n', (24442, 24468), True, 'import numpy as np\n'), ((24507, 24529), 'numpy.array', 'np.array', (['[61, 63, 65]'], {}), '([61, 63, 65])\n', (24515, 24529), True, 'import numpy as np\n'), ((24563, 24581), 'numpy.arange', 'np.arange', (['(91)', '(117)'], {}), '(91, 117)\n', (24572, 24581), True, 'import numpy as np\n'), ((24619, 24640), 'numpy.array', 'np.array', (['[1, 11, 13]'], {}), '([1, 11, 13])\n', (24627, 24640), True, 'import numpy as np\n'), ((24675, 24701), 'numpy.array', 'np.array', (['[79, 81, 85, 89]'], {}), '([79, 81, 85, 89])\n', (24683, 24701), True, 'import numpy as np\n'), ((27111, 27127), 'numpy.sum', 'np.sum', (['is_in', '(0)'], {}), '(is_in, 0)\n', (27117, 27127), True, 'import numpy as np\n'), ((29426, 29445), 's3fs.S3FileSystem', 's3fs.S3FileSystem', ([], {}), '()\n', (29443, 29445), False, 'import s3fs\n'), ((30267, 30286), 's3fs.S3FileSystem', 's3fs.S3FileSystem', ([], {}), '()\n', (30284, 30286), False, 'import s3fs\n'), ((30331, 30344), 'gzip.open', 'gzip.open', (['ff'], {}), '(ff)\n', (30340, 30344), False, 'import gzip\n'), ((30381, 30392), 'io.BytesIO', 'BytesIO', (['rr'], {}), '(rr)\n', (30388, 30392), False, 'from io import BytesIO\n'), ((30406, 30432), 'nibabel.FileHolder', 'nib.FileHolder', ([], {'fileobj': 'bb'}), '(fileobj=bb)\n', (30420, 30432), True, 'import nibabel as nib\n'), ((30447, 30505), 'nibabel.Nifti1Image.from_file_map', 'nib.Nifti1Image.from_file_map', (["{'header': fh, 'image': fh}"], {}), "({'header': fh, 'image': fh})\n", (30476, 30505), True, 'import nibabel as nib\n'), ((30821, 30840), 'json.dump', 'json.dump', (['data', 'ff'], {}), '(data, ff)\n', (30830, 30840), False, 'import json\n'), ((31087, 31100), 'json.load', 'json.load', (['ff'], {}), '(ff)\n', (31096, 31100), False, 'import json\n'), ((31482, 31501), 's3fs.S3FileSystem', 's3fs.S3FileSystem', ([], {}), '()\n', (31499, 31501), False, 'import s3fs\n'), ((31548, 31561), 'json.load', 'json.load', (['ff'], {}), '(ff)\n', (31557, 31561), False, 'import json\n'), ((32041, 32060), 's3fs.S3FileSystem', 's3fs.S3FileSystem', ([], {}), '()\n', (32058, 32060), False, 'import s3fs\n'), ((32105, 32124), 'json.dump', 'json.dump', (['data', 'ff'], {}), '(data, ff)\n', (32114, 32124), False, 'import json\n'), ((3750, 3768), 'os.path.join', 'op.join', (['folder', 'f'], {}), '(folder, f)\n', (3757, 3768), True, 'import os.path as op\n'), ((11257, 11275), 'os.path.join', 'op.join', (['folder', 'f'], {}), '(folder, f)\n', (11264, 11275), True, 'import os.path as op\n'), ((13716, 13829), 'boto3.setup_default_session', 'boto3.setup_default_session', ([], {'aws_access_key_id': 'aws_access_key_id', 'aws_secret_access_key': 'aws_secret_access_key'}), '(aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key)\n', (13743, 13829), False, 'import boto3\n'), ((14653, 14676), 'os.path.exists', 'os.path.exists', (['sub_dir'], {}), '(sub_dir)\n', (14667, 14676), False, 'import os\n'), ((14838, 14891), 'os.path.join', 'op.join', (['sess_dir', '"""dwi"""', "('sub-%s_dwi.bval' % subject)"], {}), "(sess_dir, 'dwi', 'sub-%s_dwi.bval' % subject)\n", (14845, 14891), True, 'import os.path as op\n'), ((14971, 15024), 'os.path.join', 'op.join', (['sess_dir', '"""dwi"""', "('sub-%s_dwi.bvec' % subject)"], {}), "(sess_dir, 'dwi', 'sub-%s_dwi.bvec' % subject)\n", (14978, 15024), True, 'import os.path as op\n'), ((15104, 15159), 'os.path.join', 'op.join', (['sess_dir', '"""dwi"""', "('sub-%s_dwi.nii.gz' % subject)"], {}), "(sess_dir, 'dwi', 'sub-%s_dwi.nii.gz' % subject)\n", (15111, 15159), True, 'import os.path as op\n'), ((15245, 15301), 'os.path.join', 'op.join', (['sess_dir', '"""anat"""', "('sub-%s_T1w.nii.gz' % subject)"], {}), "(sess_dir, 'anat', 'sub-%s_T1w.nii.gz' % subject)\n", (15252, 15301), True, 'import os.path as op\n'), ((15384, 15447), 'os.path.join', 'op.join', (['sess_dir', '"""anat"""', "('sub-%s_aparc+aseg.nii.gz' % subject)"], {}), "(sess_dir, 'anat', 'sub-%s_aparc+aseg.nii.gz' % subject)\n", (15391, 15447), True, 'import os.path as op\n'), ((15585, 15597), 'os.path.exists', 'op.exists', (['k'], {}), '(k)\n', (15594, 15597), True, 'import os.path as op\n'), ((17516, 17595), 'os.path.join', 'op.join', (['afq_home', '"""stanford_hardi_tractography"""', '"""tractography_subsampled.trk"""'], {}), "(afq_home, 'stanford_hardi_tractography', 'tractography_subsampled.trk')\n", (17523, 17595), True, 'import os.path as op\n'), ((17992, 18011), 'os.path.exists', 'op.exists', (['afq_home'], {}), '(afq_home)\n', (18001, 18011), True, 'import os.path as op\n'), ((18025, 18043), 'os.mkdir', 'os.mkdir', (['afq_home'], {}), '(afq_home)\n', (18033, 18043), False, 'import os\n'), ((18549, 18598), 'os.path.join', 'op.join', (['anat_folder', '"""sub-01_sess-01_T1w.nii.gz"""'], {}), "(anat_folder, 'sub-01_sess-01_T1w.nii.gz')\n", (18556, 18598), True, 'import os.path as op\n'), ((18618, 18644), 'dipy.data.read_stanford_labels', 'dpd.read_stanford_labels', ([], {}), '()\n', (18642, 18644), True, 'import dipy.data as dpd\n'), ((18675, 18731), 'os.path.join', 'op.join', (['anat_folder', '"""sub-01_sess-01_aparc+aseg.nii.gz"""'], {}), "(anat_folder, 'sub-01_sess-01_aparc+aseg.nii.gz')\n", (18682, 18731), True, 'import os.path as op\n'), ((18843, 18891), 'os.path.join', 'op.join', (['dwi_folder', '"""sub-01_sess-01_dwi.nii.gz"""'], {}), "(dwi_folder, 'sub-01_sess-01_dwi.nii.gz')\n", (18850, 18891), True, 'import os.path as op\n'), ((18912, 18959), 'os.path.join', 'op.join', (['dwi_folder', '"""sub-01_sess-01_dwi.bvecs"""'], {}), "(dwi_folder, 'sub-01_sess-01_dwi.bvecs')\n", (18919, 18959), True, 'import os.path as op\n'), ((18992, 19039), 'os.path.join', 'op.join', (['dwi_folder', '"""sub-01_sess-01_dwi.bvals"""'], {}), "(dwi_folder, 'sub-01_sess-01_dwi.bvals')\n", (18999, 19039), True, 'import os.path as op\n'), ((19849, 19939), 'os.path.join', 'op.join', (['folder', '"""Atlas_in_MNI_Space_16_bundles"""', '"""whole_brain"""', '"""whole_brain_MNI.trk"""'], {}), "(folder, 'Atlas_in_MNI_Space_16_bundles', 'whole_brain',\n 'whole_brain_MNI.trk')\n", (19856, 19939), True, 'import os.path as op\n'), ((20454, 20514), 'dipy.io.streamline.load_tractogram', 'load_tractogram', (['bundle_file', '"""same"""'], {'bbox_valid_check': '(False)'}), "(bundle_file, 'same', bbox_valid_check=False)\n", (20469, 20514), False, 'from dipy.io.streamline import load_tractogram, load_trk\n'), ((3869, 3890), 'nibabel.load', 'nib.load', (['resample_to'], {}), '(resample_to)\n', (3877, 3890), True, 'import nibabel as nib\n'), ((11376, 11397), 'nibabel.load', 'nib.load', (['resample_to'], {}), '(resample_to)\n', (11384, 11397), True, 'import nibabel as nib\n'), ((14702, 14731), 'os.path.join', 'os.path.join', (['sess_dir', '"""dwi"""'], {}), "(sess_dir, 'dwi')\n", (14714, 14731), False, 'import os\n'), ((14772, 14802), 'os.path.join', 'os.path.join', (['sess_dir', '"""anat"""'], {}), "(sess_dir, 'anat')\n", (14784, 14802), False, 'import os\n'), ((17653, 17675), 'numpy.zeros', 'np.zeros', (['(10, 10, 10)'], {}), '((10, 10, 10))\n', (17661, 17675), True, 'import numpy as np\n'), ((17677, 17686), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (17683, 17686), True, 'import numpy as np\n'), ((22081, 22099), 'os.path.join', 'op.join', (['folder', 'f'], {}), '(folder, f)\n', (22088, 22099), True, 'import os.path as op\n'), ((22156, 22174), 'os.path.join', 'op.join', (['folder', 'f'], {}), '(folder, f)\n', (22163, 22174), True, 'import os.path as op\n'), ((24054, 24074), 'numpy.arange', 'np.arange', (['(37)', '(42)', '(2)'], {}), '(37, 42, 2)\n', (24063, 24074), True, 'import numpy as np\n'), ((24126, 24140), 'numpy.array', 'np.array', (['[55]'], {}), '([55])\n', (24134, 24140), True, 'import numpy as np\n'), ((24192, 24212), 'numpy.arange', 'np.arange', (['(79)', '(90)', '(2)'], {}), '(79, 90, 2)\n', (24201, 24212), True, 'import numpy as np\n'), ((26578, 26591), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (26586, 26591), True, 'import numpy as np\n'), ((20355, 20376), 'os.path.split', 'op.split', (['bundle_file'], {}), '(bundle_file)\n', (20363, 20376), True, 'import os.path as op\n'), ((26673, 26686), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (26681, 26686), True, 'import numpy as np\n'), ((27158, 27173), 'numpy.where', 'np.where', (['is_in'], {}), '(is_in)\n', (27166, 27173), True, 'import numpy as np\n'), ((26769, 26782), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (26777, 26782), True, 'import numpy as np\n'), ((26866, 26879), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (26874, 26879), True, 'import numpy as np\n')] |
import csv
import os
import mxnet as mx
import numpy as np
import pylab as plt
from matplotlib import ticker
from mxnet import nd, gluon, autograd
# from mxnet.contrib import amp
from mxnet.gluon.nn import HybridBlock
from mxnet.gluon.utils import split_and_load as sal
from numpy.ma import masked_array
from skimage.measure import regionprops
from skimage.transform import resize
from skimage.util import montage
from sklearn.preprocessing import RobustScaler
from networks.discriminators import Discriminator
from utils import metrics
from utils.dataloader import DataLoader
from utils.batchify_fn import BatchifyFn
from utils.datasets import RadPath
from utils.learning_rate_scheduler import *
from utils.load_pretrained_net import pretrained_net, extract_encoder
from utils.losses import L1Loss_v2, L2Loss_v2, DiceLoss, L2LogLoss, L1LogLoss, LogCoshLoss, PhotometricLoss, \
corrcoefLoss, HuberLoss
from utils.optimizers import get_optimizer_dict
from utils.sampler import BatchSampler, RandomSamplerStratify2Sets
from utils.transformations import joint_transform
from utils.loss_margin import LossMargin
from collections import OrderedDict
from pydicom.dataset import Dataset, FileDataset
from imgaug.augmentables.heatmaps import HeatmapsOnImage
from skimage.color import label2rgb
# from utils import lookahead_optimizer
# amp.init()
filename_unsup_pred = 'dummy/analyze_unsup_pred/data/pred_unsup'
# filename_pretrained_weights = 'dummy/analyze_unsup_pred/netG.params'
filename_pretrained_weights = 'results/ImprovedSemi_PostMICCAI_5\drnnGR4_lCL0_ENSL_lUS1_lC0_l2_nc8_stsa0.9_sd11_normal_v2b_check_last_iter_issues_TSA0.90\checkpoints\iter_2499'
# Refer: https://mxnet.incubator.apache.org/versions/master/tutorials/amp/amp_tutorial.html
inits = {
'none': mx.init.Uniform(),
'normal': mx.init.Normal(.05),
'xavier': mx.init.Xavier(magnitude=2.2),
'he': mx.init.MSRAPrelu(),
}
class Init:
"""Initialize training parameters and directories"""
def __init__(self, args):
self.__dict__.update(args.__dict__)
if isinstance(self.run_id, int):
self.result_folder = 'results/%s/run_%03d/' % (self.experiment_name, self.run_id)
else:
self.result_folder = 'results/%s/%s/' % (self.experiment_name, self.run_id)
self.result_folder_checkpoint = '%s/checkpoints' % self.result_folder
self.result_folder_figure_train = '%s/figures/train' % self.result_folder
self.result_folder_figure_train_unsup = '%s/figures/train_unsup' % self.result_folder
self.result_folder_figure_val = '%s/figures/val' % self.result_folder
self.result_folder_figure_test = '%s/figures/test' % self.result_folder
self.result_folder_logs = '%s/logs' % self.result_folder
# suffice = '_adjacent' if self.use_adjacent else ''
suffice = '_adjacent_%s' % self.density_type # input file always has the '_adjacent_' suffix
self.dataset_file = r'inputs/%s%s.npy' % (self.dataset_file, suffice)
self.dataset_file_org = r'inputs/%s.npy' % self.dataset_file_org
folders = [field for field in list(self.__dict__.keys()) if 'folder' in field]
for folder in folders:
if not os.path.exists(self.__getattribute__(folder)):
os.makedirs(self.__getattribute__(folder))
self.ctx = [mx.gpu(int(i)) for i in self.gpu_id.split(',')]
if not os.path.exists(self.result_folder):
os.makedirs(self.result_folder)
self.batch_size = args.batch_size
self.save_setting()
self.image_pool = ImagePool(args.pool_size)
self.density_range = [-1, 1]
Aus, wp_us = self.load_data()
Aus = self.normalize(Aus, mask=wp_us)
val_dataset = RadPath(
Aus, Aus, wp_us, wp_us, wp_us, # duplicate to simplify the modifications
transform=joint_transform,
is_val=True,
input_size=self.input_size,
density_range=self.density_range,
)
self.val_iter = DataLoader(val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
last_batch='keep',
shuffle=False,
thread_pool=False,
prefetch=None,
)
def load_data(self):
x = np.load(r'%s' % self.mr_file).transpose(
(2, 0, 1, 3)).astype('float32')[..., [0, 1, 3]] # exclude the target ROI
x[np.isnan(x)] = 0
A = x[..., :2].astype(self.dtype)
wp = x[..., 2:].astype(self.dtype)
return A, wp
def save_setting(self):
"""save input setting into a csv file"""
with open('%s/parameters.csv' % self.result_folder, 'w') as f:
w = csv.writer(f)
for key, val in self.__dict__.items():
w.writerow([key, val])
@staticmethod
def normalize(_A, _B=None, _C=None, mask=None, to_11=False, norm_0mean=False, train_idx_A=None, outlier_thr=1,
root=1):
"""Norm A (MRI-T2): filtering top 0.1% values by assigning them to the top_thr (the value at the 99th percentage)
then map values to [0 1] range by dividing by the max intensity within the prostate for each slide"""
thr = .01 # .01
mask = np.ones_like(_A) if mask is None else mask
if not norm_0mean:
x = np.zeros_like(_A)
for c in range(_A.shape[-1]):
for i in range(_A.shape[0]):
if mask[i, ..., 0].sum() == 0:
continue
tmp = _A[i, ..., c][mask[i, ..., 0] == 1].reshape((-1, 1))
tmp_n = RobustScaler().fit_transform(X=tmp)[..., 0]
tmp_n1 = x[i, ..., c]
tmp_n1[np.where(mask[i, ..., 0] == 1)] = tmp_n
x[i, ..., c] = tmp_n1
_A = x.copy()
def find_threshold(_x=None, _mask=None, _outlier_thr=1): # .999
_x = _x[_mask == 1] if _mask is not None else _x
_x = _x.flatten() if _x.ndim > 1 else _x
_x = np.sort(_x)
thr_val = _x[int(_outlier_thr * _x.__len__())]
return thr_val
if (_B is not None) and (_C is not None):
"""Norm B & C (Density maps): simply map density values to [0 1] range by dividing by the max intensity within the prostate, and mask for each slide, respectively"""
thr = 1 if outlier_thr == 1 else find_threshold(_C[train_idx_A], mask[train_idx_A], outlier_thr)
thr = np.round(thr, 5)
_C **= (1 / root)
_B[_B > thr] = thr
_C[_C > thr] = thr
# _C_tmp = _C[train_idx_A]
# _C_tmp[_C_tmp > thr] = thr
# _C[train_idx_A] = _C_tmp
# print(thr)
if to_11:
# original_density_range = [_C[mask == 1].min(), _C[mask == 1].max()]
_B = (_B - _B.min()) / (_B.max() / 2 - _B.min()) - 1
_C = (_C - _C.min()) / (_C.max() / 2 - _C.min()) - 1 # _C[train_idx_A].max() will be 1
# _C = (_C - _C[mask == 1].min()) / (_C[mask == 1].max() - _C[mask == 1].min())
density_range = [-1, 1]
else:
# original_density_range = [_C[mask == 1].min(), _C[mask == 1].max()]
_B = (_B - _B.min()) / (_B.max() - _B.min())
# _C = (_C - _C.min()) / (_C.max() - _C.min())
_C = (_C - _C[mask == 1].min()) / (_C[mask == 1].max() - _C[mask == 1].min())
density_range = [0, 1]
# print(thr)
# _C[train_idx_A == False][mask[train_idx_A == False] > 0][100]
return _A, _B, _C, density_range, thr
else:
return _A
def replace_conv2D(net, first_conv):
"""Replace the first convolution layer by a layer having the same in_channels with the number of input channels"""
for key, layer in net._children.items():
if isinstance(layer, gluon.nn.Conv2D):
with net.name_scope():
net.register_child(first_conv, key)
class FeatureComparator(HybridBlock):
"""Generate features from a given image
Modify intermediate layers following "https://discuss.mxnet.io/t/modify-structure-of-loaded-network/1537"
"""
def __init__(self, in_channels=1, ctx=None):
super(FeatureComparator, self).__init__()
from mxnet.gluon.model_zoo import vision
from mxnet.initializer import Constant
self.net = vision.resnet18_v1(pretrained=True, ctx=ctx)
first_conv = gluon.nn.Conv2D(64, kernel_size=7, strides=2,
padding=3, in_channels=in_channels)
first_conv.initialize(init=Constant(self.net.features[0].weight.data(ctx=ctx[0])[:, 0:in_channels]),
force_reinit=True, ctx=ctx)
replace_conv2D(self.net.features, first_conv)
def forward(self, x, *args):
return self.net(x)
class PixUDA(Init):
def __init__(self, args):
super(PixUDA, self).__init__(args=args)
self.set_lr_scheduler()
self.set_networks()
self.def_loss()
self.set_metric()
def network_init(self, net):
for param in net.collect_params().values():
self.param_init(param, self.ctx)
def set_networks(self):
n_in = 2 if '5channels' in self.dataset_file else 1
if self.true_density_generator == 'unet':
from networks.unet_padding import UNet
self.netG = UNet(base_channel=self.base_channel_unet, backbone_name=self.backbone_name)
elif self.true_density_generator == 'unetpp':
from networks.unetpp_padding import UNet
self.netG = UNet(base_channel=self.base_channel_unet, backbone_name=self.backbone_name)
elif self.true_density_generator == 'drnn':
from networks.drnn import DenseMultipathNet, Init as init_net_params
opts = init_net_params(num_fpg=self.num_fpg, growth_rate=self.growth_rate,
init_channels=self.base_channel_drnn,
num_channels_out=self.num_channels_out)
self.netG = DenseMultipathNet(opts)
elif self.true_density_generator == 'deeplabv3':
from networks.deeplabv3 import DeepLabV3
self.netG = DeepLabV3(1, backbone='resnet50', pretrained_base=False,
ctx=self.ctx)
if self.true_density_generator == 'deeplabv3plus':
from networks.deeplabv3b_plus import DeepLabWV3Plus
self.netG = DeepLabWV3Plus(1, backbone='wideresnet', ctx=self.ctx, base_size=self.input_size,
crop_size=self.input_size)
self.netG.initialize(inits[self.initializer], ctx=self.ctx, force_reinit=True)
if self.resumed_it > -1:
self.load_checkpoints(prefix=self.checkpoint_prefix)
elif self.use_pretrained:
self.load_checkpoints(pretrained_dir=filename_pretrained_weights)
self.use_l_coefs = False
coefs = []
for l in ['0', '_aux', '_C', '_consistency', '_D', '_unsup']:
if self.__getattribute__('lambda%s' % l) > 0:
self.use_l_coefs = True
self.netG.__setattr__('coef%s' % l,
gluon.Parameter('coef%s' % l, shape=1, init=mx.init.Constant(.6), lr_mult=1))
self.netG.__getattribute__('coef%s' % l).initialize(ctx=self.ctx)
coefs.append(self.netG.__getattribute__('coef%s' % l))
if self.use_l_coefs:
self.netG.coef_G = gluon.Parameter('coefG', shape=1, init=mx.init.Constant(.6), lr_mult=1)
self.netG.coef_G.initialize(ctx=self.ctx)
coefs.append(self.netG.coef_G)
self.trainer_coefs = gluon.Trainer(coefs,
optimizer=self.optimizer,
optimizer_params=get_optimizer_dict(
self.optimizer,
lr=self.base_lr,
lr_scheduler=self._lr_scheduler,
wd=self.wd,
beta1=self.beta1,
),
# update_on_kvstore=False,
)
self.trainerG = gluon.Trainer(self.netG.collect_params(),
optimizer=self.optimizer,
optimizer_params=get_optimizer_dict(
self.optimizer,
lr=self.base_lr,
lr_scheduler=self._lr_scheduler,
wd=self.wd,
beta1=self.beta1,
),
# update_on_kvstore=False,
)
# amp.init_trainer(self.trainerG) # automatic mixed precision
largest_batch_size = int(np.ceil(self.batch_size / len(self.gpu_id.split(','))))
largest_batch_size *= self.unsup_ratio if self.lambda_unsup > 0 else 1
if self.show_generator_summary:
[self.netG.summary(
nd.random.normal(0, 1, shape=(largest_batch_size, n_in, self.input_size, self.input_size), ctx=ctx)) for
ctx
in self.ctx]
self.D_features = FeatureComparator(in_channels=1, ctx=self.ctx) if self.lambda_D > 0 else None
self.netGE = extract_encoder(self.netG if self.lambda_aux <= 0 else self.netG.shared_net,
self.num_downs) if (
(self.lambda_unsup > 0) and (self.compare_embedding_unsup > 0)) else None
# GAN discriminator
if self.lambda0 > 0:
self.netD = Discriminator(in_channels=self.n_A_channel_idx + 1)
# Initialize parameters
self.network_init(self.netD)
self.trainerD = gluon.Trainer(self.netD.collect_params(), self.optimizer,
{'learning_rate': self.base_lr, 'beta1': self.beta1, 'wd': 1e-5},
update_on_kvstore=False)
# amp.init_trainer(self.trainerD) # automatic mixed precision
def def_loss(self):
# Loss
self.criterionGAN = gluon.loss.SigmoidBinaryCrossEntropyLoss()
# self.trueDensity_train = gluon.loss.L1Loss()
# self.trueDensity_val = gluon.loss.L1Loss()
loss_fn = {
'l1': L1Loss_v2,
'l2': L2Loss_v2,
'huber': HuberLoss,
'l2log': L2LogLoss,
'l1log': L1LogLoss,
'logcosh': LogCoshLoss,
'photomtrc': PhotometricLoss,
'l2org': mx.gluon.loss.L2Loss,
'rloss': corrcoefLoss,
}
self.density_corr = loss_fn['rloss']()
# self.trueDensity_train = loss_fn[self.l_type](with_DepthAware=self.with_DepthAware)
self.trueDensity_train = loss_fn[self.l_type]()
self.trueDensity_val = loss_fn[self.l_type]()
self.feature_difference = gluon.loss.CosineEmbeddingLoss() if self.lambda_D > 0 else None
if self.compare_embedding_unsup:
self.density_unsup = gluon.loss.CosineEmbeddingLoss() if self.lambda_unsup > 0 else None
else:
# self.density_unsup = loss_fn[self.l_type](
# with_DepthAware=self.with_DepthAware, scale_invar=False) if self.lambda_unsup > 0 else None
self.density_unsup = loss_fn[self.l_type]()
# self.density_unsup = loss_fn['photomtrc'](
# with_DepthAware=self.with_DepthAware, scale_invar=False) if self.lambda_unsup > 0 else None
if self.lambda_aux > 0:
self.aux_fn = loss_fn[self.l_type]() if self.reconstruct_input else DiceLoss()
def set_metric(self):
self.metric = mx.metric.CustomMetric(self.facc)
def set_inputs(self, **kwargs):
trp = [0, 3, 1, 2]
for key, value in kwargs.items():
# self.__setattr__(key, value.transpose(trp).as_in_context(self.ctx).astype(self.dtype))
self.__setattr__(key, sal(value.transpose(trp), ctx_list=self.ctx, even_split=False))
def set_lr_scheduler(self):
"""Setup learning rate scheduler"""
self.lr_steps = [int(lr) for lr in self.lr_steps.split(',')]
schedules = {
'one_cycle': OneCycleSchedule(
start_lr=self.min_lr, max_lr=self.max_lr, cycle_length=self.cycle_length,
cooldown_length=self.cooldown_length, finish_lr=self.finish_lr, inc_fraction=self.inc_fraction,
),
'triangular': TriangularSchedule(
min_lr=self.min_lr, max_lr=self.max_lr, cycle_length=self.cycle_length, inc_fraction=self.inc_fraction,
),
'factor': mx.lr_scheduler.FactorScheduler(
step=self.lr_step, factor=self.lr_factor, warmup_mode=self.warmup_mode,
warmup_steps=self.warmup_steps, warmup_begin_lr=self.warmup_begin_lr, base_lr=self.base_lr,
),
'multifactor': mx.lr_scheduler.MultiFactorScheduler(
step=self.lr_steps, factor=self.lr_factor, base_lr=self.base_lr, warmup_mode=self.warmup_mode,
warmup_begin_lr=self.warmup_begin_lr, warmup_steps=self.warmup_steps,
),
'poly': mx.lr_scheduler.PolyScheduler(
max_update=self.cycle_length, base_lr=self.base_lr, pwr=2, final_lr=self.min_lr,
),
'cycle': CyclicalSchedule(
TriangularSchedule, min_lr=self.min_lr, max_lr=self.max_lr, cycle_length=self.cycle_length,
inc_fraction=self.inc_fraction,
cycle_length_decay=self.cycle_length_decay,
cycle_magnitude_decay=self.cycle_magnitude_decay,
# stop_decay_iter=self.stop_decay_iter,
final_drop_iter=self.final_drop_iter,
),
'cosine': LinearWarmUp(
OneCycleSchedule(start_lr=self.min_lr, max_lr=self.max_lr, cycle_length=self.cycle_length,
cooldown_length=self.cooldown_length, finish_lr=self.finish_lr),
start_lr=self.warmup_begin_lr,
length=self.warmup_steps,
)
}
self._lr_scheduler = schedules[self.lr_scheduler]
def compare_unsup(self):
"""Get unsupervised loss"""
if self.compare_embedding_unsup:
self.fake_out_unsup = [nd.squeeze(self.netGE(A_unsup)) for A_unsup in self.A_unsup]
self.fake_out_unsup_aug = [nd.squeeze(self.netGE(A_rp_unsup)) for A_rp_unsup in self.A_rp_unsup]
if self.lambda_aux > 0:
self.fake_out_unsup = [fake_out_unsup[0] for fake_out_unsup in self.fake_out_unsup]
self.fake_out_unsup_aug = [fake_out_unsup_aug[0] for fake_out_unsup_aug in self.fake_out_unsup_aug]
self.unsup_loss = [
self.density_unsup(
fake_out_unsup,
fake_out_unsup_aug,
nd.ones(fake_out_unsup.shape[0], ctx=fake_out_unsup.context),
)
for fake_out_unsup, fake_out_unsup_aug
in zip(self.fake_out_unsup, self.fake_out_unsup_aug, )]
else:
self.fake_out_unsup = [self.netG(A_unsup) for A_unsup in self.A_unsup]
self.fake_out_unsup_aug = [nd.flip(self.netG(A_rp_unsup), 3) for A_rp_unsup in self.A_rp_unsup]
if self.lambda_aux > 0:
self.fake_out_unsup = [fake_out_unsup[0] for fake_out_unsup in self.fake_out_unsup]
self.fake_out_unsup_aug = [fake_out_unsup_aug[0] for fake_out_unsup_aug in self.fake_out_unsup_aug]
self.fake_out_unsup = [nd.where(wp_unsup, fake_out_unsup, wp_unsup - 1) for wp_unsup, fake_out_unsup in
zip(self.wp_unsup, self.fake_out_unsup)]
self.fake_out_unsup_aug = [nd.where(wp_unsup, fake_out_unsup_aug, wp_unsup - 1) for
wp_unsup, fake_out_unsup_aug in zip(self.wp_unsup, self.fake_out_unsup_aug)]
self.unsup_loss = [
self.density_unsup(
fake_out_unsup,
fake_out_unsup_aug,
wp_unsup,
# _margin_unsup / self.C_thr,
None,
)
for fake_out_unsup, fake_out_unsup_aug, wp_unsup, _margin_unsup
in zip(self.fake_out_unsup, self.fake_out_unsup_aug, self.wp_unsup, self._margin_unsup)]
if self.monitor_unsup_outputs:
im = np.hstack(
(montage(self.fake_out_unsup[0].asnumpy()[:9, 0]),
montage(self.fake_out_unsup_aug[0].asnumpy()[:9, 0]),
montage(
np.abs(
self.fake_out_unsup[0].asnumpy()[:9, 0] - self.fake_out_unsup_aug[0].asnumpy()[:9, 0]))))
[plt.imsave('%s/ep%04d_%02d_%d' % (
self.result_folder_figure_train_unsup, self.current_epoch, self.current_it, i), im) for i in
range(1)]
def optimize_D(self):
if hasattr(self, 'A_rp_unsup'): # choose unsup data if avail.
tmp_input = self.A_rp_unsup
else:
tmp_input = self.A_rp
fake_out = [self.netG(A_rp) for A_rp in tmp_input]
fake_out = [fo[0] if self.lambda_aux > 0 else fo for fo in fake_out]
if hasattr(self, 'wp_unsup'):
tmp_wp = self.wp_unsup
else:
tmp_wp = self.wp
fake_out = [nd.where(wp, fo, wp - 1) for wp, fo in zip(tmp_wp, fake_out)]
fake_concat = [self.image_pool.query(nd.concat(A_rp, fo, dim=1)) for A_rp, fo in zip(self.A_rp, fake_out)]
with autograd.record():
# Train with fake image
# Use image pooling to utilize history images
output = [self.netD(fc) for fc in fake_concat]
fake_label = [nd.zeros_like(op) for op in output]
err_DB_fake = [self.criterionGAN(op, fl) for op, fl in zip(output, fake_label)]
[self.metric.update([fl, ], [op, ]) for fl, op in zip(fake_label, output)]
# self.metric.update([fake_label[0], ], [output[0], ])
# Train with real image
real_concat = [nd.concat(A_rp, _C, dim=1) for A_rp, _C in zip(self.A_rp, self.C)]
output = [self.netD(rc) for rc in real_concat]
real_label = [nd.ones_like(op) for op in output]
err_DB_real = [self.criterionGAN(op, rl) for op, rl in zip(output, real_label)]
self.err_DB = [(edb + edf) * 0.5 for edb, edf in zip(err_DB_real, err_DB_fake)]
[self.metric.update([rl, ], [op, ]) for rl, op in zip(real_label, output)]
for err_DB in self.err_DB:
err_DB.backward()
# with amp.scale_loss(self.err_DB, self.trainerD) as scaled_loss:
# autograd.backward(scaled_loss)
self.trainerD.step(self.batch_size)
def create_net(self, upscale_factor=1):
from mxnet.gluon import nn
import mxnet.gluon.contrib.nn as contrib_nn
def conv_factory(opts, num_filters, kernel_size, stride=1, group=1):
"""A convenience function for convolution with BatchNorm & activation"""
pad = int((kernel_size - 1) / 2)
out = nn.HybridSequential()
out.add(nn.BatchNorm())
if opts.activation == 'leaky':
out.add(nn.LeakyReLU(opts.alpha))
else:
out.add(nn.Activation(opts.activation))
out.add(nn.Conv2D(channels=num_filters, kernel_size=(kernel_size, kernel_size),
strides=(stride, stride), use_bias=opts.use_bias,
padding=(pad, pad), groups=group))
return out
class Options:
""""""
def __init__(self):
super(Options, self).__init__()
self.activation = 'relu'
self.use_bias = False
class SuperResolutionNet(gluon.HybridBlock):
def __init__(self, upscale_factor, opts):
super(SuperResolutionNet, self).__init__()
with self.name_scope():
self.conv1 = conv_factory(opts, num_filters=64, kernel_size=5, stride=1)
self.conv2 = conv_factory(opts, num_filters=64, kernel_size=3, stride=1)
self.conv3 = conv_factory(opts, num_filters=32, kernel_size=3, stride=1)
self.conv4 = conv_factory(opts, num_filters=upscale_factor ** 2, kernel_size=3, stride=1)
self.pxshuf = contrib_nn.PixelShuffle2D(upscale_factor)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = F.tanh(self.pxshuf(x))
return x
return SuperResolutionNet(upscale_factor, opts=Options())
def optimize_G(self):
"""Optimize generator"""
if np.array([self.lambda_C, self.lambda_D, self.lambda_consistency, self.lambda_unsup, self.lambda0,
self.lambda_aux]).sum() == 0: # No extra loss
with autograd.record():
self.fake_out = [self.netG(A_rp) for A_rp in self.A_rp]
self.loss_true_density_train = [self.trueDensity_train(fake_out, C, m, margin) for
C, fake_out, m, margin in
zip(self.C, self.fake_out, self.m, self._margin)]
self.loss_G = self.loss_true_density_train
[loss_G.backward() for loss_G in self.loss_G]
else:
with autograd.record():
self.fake_out = [self.netG(A_rp) for A_rp in self.A_rp]
# Supervised learning
self.var0 = [nd.square(coef) for coef in self.netG.coef_G._data]
self.loss_true_density_train = [self.trueDensity_train(fake_out, C, m, margin) for
fake_out, C, m, margin in
zip(self.fake_out, self.C, self.m, self._margin)]
self.loss_G = [((1 / var) * l + nd.log(var)) for l, var in
zip(self.loss_true_density_train, self.var0)]
############################### Consistency Loss ###############################
if self.lambda_consistency > 0:
fake_out_T2 = [self.netG(A_rp) for A_rp in
[nd.concat(A_rp[:, 0:1], nd.zeros_like(A_rp[:, 0:1]), dim=1) for A_rp in
self.A_rp]] # masked out ADC channel
fake_out_ADC = [self.netG(A_rp) for A_rp in
[nd.concat(nd.zeros_like(A_rp[:, 1:2]), A_rp[:, 1:2], dim=1) for A_rp in
self.A_rp]] # masked out T2 channel
self.loss_consistency_train = [self.density_corr(_fake_out_T2, _fake_out_ADC, wp) for
_fake_out_T2, _fake_out_ADC, wp in
zip(fake_out_T2, fake_out_ADC, self.wp)]
self.var1 = [nd.square(coef) for coef in self.netG.coef_consistency._data]
self.loss_G = [l0 + ((1 / var) * l1 + nd.log(var)) for l0, l1, var in
zip(self.loss_G, self.loss_consistency_train, self.var1)]
############################### Correlation Loss ###############################
if self.lambda_C > 0:
self.var2 = [nd.square(coef) for coef in self.netG.coef_C._data]
self.loss_corr_train = [self.density_corr(fake_out, C, m) for
C, fake_out, m in
zip(self.C, self.fake_out, self.m)]
self.loss_G = [l0 + ((1 / var) * l1 + nd.log(var)) for l0, l1, var in
zip(self.loss_G, self.loss_corr_train, self.var2)]
############################### Unsupervised learning ###############################
if self.lambda_unsup > 0:
self.compare_unsup()
self.var3 = [nd.square(coef) for coef in self.netG.coef_unsup._data]
self.loss_G = [l0 + ((1 / var) * l1 + nd.log(var)) for l0, l1, var in
zip(self.loss_G, self.unsup_loss, self.var3)]
############################## Feature Comparision ###############################
if self.lambda_D > 0:
self.var4 = [nd.square(coef) for coef in self.netG.coef_D._data]
self.loss_features = [self.feature_difference(
self.D_features(nd.where(m, C, m - 1)),
self.D_features(nd.where(m, fake_out, m - 1)),
nd.ones((C.shape[0]), ctx=C.context)
).mean() for m, C, fake_out in zip(self.m, self.C, self.fake_out)]
self.loss_G = [l0 + ((1 / var) * l1 * .1 + nd.log(var)) for l0, l1, var in
zip(self.loss_G, self.loss_features, self.var4)]
[loss_G.backward() for loss_G in self.loss_G]
self.trainerG.step(1, ignore_stale_grad=False)
if self.use_l_coefs:
self.trainer_coefs.step(1, ignore_stale_grad=False)
[self.save_training_outputs(self.A_rp[i], self.fake_out[i], self.C[i], self.m[i], prefix='',
suffix='_%02d_%d' % (self.current_it, i)) if self.monitor_training_outputs else None
for i in range(len(self.ctx))]
def update_running_loss(self, first_iter=False, num_batch=None):
"""Compute running loss"""
if num_batch is None:
if first_iter:
loss_fields = [field for field in self.__dict__.keys() if ('loss' in field) or ('err' in field)]
self.running_loss_fields = ['running_' + field for field in loss_fields]
[self.__setattr__(field, 0.) for field in self.running_loss_fields]
for loss_field in self.running_loss_fields:
_loss = nd.concatenate(list(self.__getattribute__(loss_field.replace('running_', ''))))
self.__setattr__(loss_field, (self.__getattribute__(loss_field) + _loss.mean().asscalar()))
else:
for loss_field in self.running_loss_fields:
self.__setattr__(loss_field, (self.__getattribute__(loss_field) / num_batch))
def update_mxboard(self, sw, epoch, val_data=None):
""" SHOW STATS AND IMAGES ON TENSORBOARD. THIS SHOULD BE RUN AFTER RUnNNING UPDATE_RUNNING_LOSS """
for loss_field in self.running_loss_fields:
_loss = self.__getattribute__(loss_field)
_loss = _loss.mean().asscalar() if isinstance(_loss, nd.NDArray) else _loss.mean()
if 'loss_true_density' in loss_field: # True density
sw.add_scalar('loss/true_density_loss', _loss, global_step=epoch)
else: # GAN loss
loss_type = loss_field.split('_')[0] + '_' + \
loss_field.split('_')[1] + '_' + \
loss_field.split('_')[2]
# sw.add_scalar('loss/' + loss_type, {loss_field: _loss}, global_step=epoch)
sw.add_scalar('loss/' + loss_type, _loss, global_step=epoch)
if hasattr(self, 'running_loss_true_density_val'):
sw.add_scalar('loss/true_density_loss_val', self.running_loss_true_density_val, global_step=epoch)
metric_list = metrics.update_mxboard_metric_v1(sw, data=val_data, global_step=epoch,
metric_names=[
'r_whole', 'l1_whole', 'ssim_whole',
'rmse_whole', 'rmse_log_whole',
't1', 't2', 't3',
'abs_rel_diff', 'sqr_rel_diff',
'ta1', 'ta2',
],
prefix='validation_',
num_input_channels=self.n_A_channel_idx,
c_thr=self.C_thr,
density_range=self.density_range,
root=self.root) # 'r', 'l1', 'ssim', 'nmi',
# if hasattr(self, 'current_margin'):
sw.add_scalar('loss_margin', self.current_margin, global_step=epoch)
#######################################
# Map input data to 0 - 1
for c in range(val_data[0].shape[1]):
val_data[0][:, c] = (val_data[0][:, c] - val_data[0][:, c].min()) / (
val_data[0][:, c].max() - val_data[0][:, c].min()) * val_data[4][:, 0]
""" MULTIPLE CHANNELS OF EACH IMAGE ARE SPLIT INTO SEPARATE IMAGES """
_val_data = []
for i in range(len(val_data)):
for j in range(val_data[i].shape[1]):
_val_data.append(val_data[i][:, j:j + 1])
#######################################
""" NORM TO 0-1 RANGE IF NECESSARY """
if self.to_11: # Normalize image from [-1, 1] to [0, 1]
for i in range(-4, -2): # prediction and label
_val_data[i] = self.normalize_01(_val_data[i], [-1, 1]) * _val_data[-1]
#######################################
""" SAVE FIRST IMAGE TO FOLDER & UPDATE BEST METRICS """
to_save_montage = self.update_best_metrics(metric_list)
print(self.best_metrics)
if to_save_montage:
self.save_montage_im(_val_data)
#######################################
""" DROP LAST CHANNEL (WP) IN _val_data BECAUSE IT IS NO LONGER NECESSARY """
_val_data = _val_data[:-1]
#######################################
return metric_list
@staticmethod
def linear_scale(x, vmin=-1, vmax=1, tmin=0, tmax=1):
return ((x - vmin) / (vmax - vmin)) * (tmax - tmin) + tmin
def _gen_unsup_pred(self):
"""Generate predictions for unsupvised data"""
input_list, pred_list, wp_list = [], [], []
for i, (_, _, C, m, wp, A_rp) in enumerate(self.val_iter):
# Inputs to GPUs (or CPUs)
self.set_inputs(A_rp_val=A_rp, C_val=C, m_val=m, wp_val=wp)
pred = nd.concatenate([self.netG(A_rp_val) for A_rp_val in self.A_rp_val])
# merge data across all used GPUs
self.C_val, self.m_val, self.A_rp_val, self.wp_val = [
nd.concatenate(list(x)) for x in [self.C_val,
self.m_val,
self.A_rp_val,
self.wp_val]
]
wp_val = nd.tile(self.wp_val, (1, pred.shape[1], 1, 1))
pred = nd.where(wp_val, pred, wp_val - 1) # wp-masked
input_list.append(self.A_rp_val.asnumpy())
pred_list.append(self.linear_scale(pred.asnumpy()))
wp_list.append(self.wp_val.asnumpy())
return np.concatenate(input_list), \
np.concatenate([*pred_list]), \
np.concatenate([*wp_list]), # duplicate to simplify the modifications
def save_montage_im(self, im, prefix=''):
_im = np.squeeze(im)[:-2]
_im_contour = np.tile(np.squeeze(im)[-2], (len(im) - 2, 1, 1, 1))
_im_wp = np.tile(np.squeeze(im)[-1], (len(im) - 2, 1, 1, 1))
_im_wp[_im_wp == 1] = 2
for i in range(self.n_A_channel_idx):
_im_wp[i] = _im[i]
_im_wp = montage(np.concatenate(_im_wp, axis=2))
_im = montage(np.concatenate(_im, axis=2))
_im_wp = masked_array(_im_wp, _im_wp == 2)
plt.imshow(_im, cmap='jet', vmin=0, vmax=.7, interpolation='nearest')
plt.imshow(_im_wp, cmap='gray', vmin=0, vmax=1, interpolation='nearest')
plt.contour((montage(np.concatenate(_im_contour, axis=2))).astype(int), linewidths=.14, colors='white')
self.save_fig(folder=self.result_folder_figure_test) if self.test_mode else self.save_fig(
folder=self.result_folder_figure_val)
def save_fig(self, folder, prefix='', suffix='', dpi=500):
ax = plt.gca()
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
plt.savefig(
'%s/%sep%04d%s.png' % (folder, prefix, self.current_epoch, suffix),
pad_inches=0,
bbox_inches='tight',
transparent=True,
dpi=dpi,
)
plt.close('all')
def update_best_metrics(self, metric_list):
to_save_montage = False
"""update current best metrics"""
if metric_list['r_whole'].mean() > self.best_metrics['r_whole_best']:
self.best_metrics['r_whole_best'] = metric_list['r_whole'].mean()
to_save_montage = True
if metric_list['l1_whole'].mean() < self.best_metrics['l1_whole_best']:
self.best_metrics['l1_whole_best'] = metric_list['l1_whole'].mean()
to_save_montage = True
if to_save_montage:
if self.best_metrics['r_whole_best'] < .3:
to_save_montage = False
if self.current_epoch == 0:
to_save_montage = True
to_save_montage = False
if self.current_it == 1999:
to_save_montage = True
return to_save_montage
def save_checkpoints(self):
"""Saved parameters"""
self.result_folder_checkpoint_current_iter = '%s/iter_%04d' % (
self.result_folder_checkpoint, self.current_it)
os.makedirs(self.result_folder_checkpoint_current_iter) if not os.path.exists(
self.result_folder_checkpoint_current_iter) else None
self.netG_filename = '%s/netG.params' % (self.result_folder_checkpoint_current_iter,)
self.netG.save_parameters(self.netG_filename)
def load_checkpoints(self, prefix='best_', pretrained_dir=None):
if pretrained_dir:
self.netG.load_parameters(pretrained_dir, ctx=self.ctx,
ignore_extra=True)
else:
self.result_folder_checkpoint_iter = '%s/iter_%04d' % (
self.result_folder_checkpoint, self.resumed_it) if self.resumed_it > -1 else '%s/%scheckpoints' % (
self.result_folder_checkpoint, prefix)
self.netG_filename = '%s/netG.params' % (self.result_folder_checkpoint_iter,)
"""Load parameters from checkpoints"""
self.netG.load_parameters(self.netG_filename, ctx=self.ctx,
ignore_extra=True)
def hybridize_networks(self):
if self.lambda0 > 0:
self.netD.hybridize(static_alloc=True, static_shape=True)
if self.lambda_D > 0:
self.D_features.hybridize(static_alloc=True, static_shape=True)
# self.D_features_unsup.hybridize(static_alloc=True, static_shape=True)
self.netG.hybridize(static_alloc=True, static_shape=True)
@staticmethod
def param_init(param, ctx):
"""Initialize discriminator parameters"""
if param.name.find('conv') != -1:
if param.name.find('weight') != -1:
param.initialize(init=mx.init.Normal(0.02), ctx=ctx)
else:
param.initialize(init=mx.init.Zero(), ctx=ctx)
elif param.name.find('batchnorm') != -1:
param.initialize(init=mx.init.Zero(), ctx=ctx)
# Initialize gamma from normal distribution with mean 1 and std 0.02
if param.name.find('gamma') != -1:
for _ctx in ctx:
param.set_data(nd.random_normal(1, 0.02, param.data(ctx=_ctx).shape))
@staticmethod
def chunks(l, n):
n = max(1, n)
return (l[i:i + n] for i in range(0, len(l), n))
@staticmethod
def normalize_01(img, predef_range=None, ):
if predef_range is None:
return (img - img.min()) / (img.max() - img.min())
else:
return (img - predef_range[0]) / (predef_range[1] - predef_range[0])
@staticmethod
def facc(label, pred):
pred = pred.ravel()
label = label.ravel()
return ((pred > 0.5) == label).mean()
@staticmethod
def resize_wp(wp, ref):
wp = resize(wp.asnumpy(), (wp.shape[0], wp.shape[1], ref.shape[-2], ref.shape[-1]), order=0,
anti_aliasing=False)
return nd.array(wp, ctx=ref.context)
@staticmethod
def show_im(im, to_show=True):
im = im.asnumpy()[:, 0]
plt.imshow(montage(im), cmap='gray')
plt.show() if to_show else None
@staticmethod
def create_concat_image(_val_data):
for i in range(_val_data[0].shape[0]):
info = regionprops(_val_data[-1][i, 0].astype(int))
# masking predictions and ground truth images
_val_data[-2][i] *= _val_data[-1][i]
_val_data[-3][i] *= _val_data[-1][i]
val_data_cropped = []
for j in range(len(_val_data) - 1):
val_data_cropped.append(
_val_data[j][i, :, info[0].bbox[0]: info[0].bbox[2], info[0].bbox[1]:info[0].bbox[3]])
val_data_cropped_rsz = resize(np.asarray(val_data_cropped),
(val_data_cropped.__len__(), _val_data[-3].shape[1], 200, 200))
tmp = np.concatenate([val_data_cropped_rsz[k, 0] for k in range(val_data_cropped_rsz.shape[0])], axis=1)
img_concat = tmp if i == 0 else np.concatenate([img_concat, tmp], axis=0)
img_concat[img_concat < 0] = 0
img_concat[img_concat > 1] = 1
return img_concat
def save_training_outputs(self, img, pred, label, roi, prefix, suffix):
def my_concat(im1, im2, im3):
im_cc = nd.concatenate([im1, im2, im3], axis=1)
im_cc = nd.concatenate([*nd.concatenate([*nd.transpose(im_cc, (1, 0, 2, 3))], axis=2)], axis=0).asnumpy()
return im_cc
a = my_concat(img, pred, label)
roi_masked = my_concat(nd.ones_like(img), roi * -1 + 1, roi * -1 + 1)
roi_masked = masked_array(a, roi_masked == 1)
img_masked = my_concat(img, nd.ones_like(roi), nd.ones_like(roi))
img_masked = masked_array(a, img_masked == 1)
plt.imshow(a, cmap='gray', vmin=self.density_range[0], vmax=self.density_range[1])
plt.imshow(img_masked, cmap='gray', vmin=0, vmax=1)
plt.imshow(roi_masked, cmap='jet', vmin=self.density_range[0], vmax=self.density_range[1])
plt.axis('off')
self.save_fig(folder=self.result_folder_figure_train, prefix=prefix, suffix=suffix, dpi=500)
def generate_test_figures(self, val_data):
""" MULTIPLE CHANNELS OF EACH IMAGE ARE SPLIT INTO SEPARATE IMAGES """
_val_data = []
for i in range(len(val_data)):
for j in range(val_data[i].shape[1]):
_val_data.append(val_data[i][:, j:j + 1])
#######################################
""" NORM TO 0-1 RANGE IF NECESSARY """
if self.to_11: # Normalize image from [-1, 1] to [0, 1]
for i in range(-4, -2): # prediction and label
_val_data[i] = self.normalize_01(_val_data[i], [-1, 1]) * _val_data[-1]
if self.norm_0mean: # norm inputs
for i in range(val_data.__len__() - 4): # excludes (pred, label, ROIs and wp)
_val_data[i] = self.normalize_01(_val_data[i]) * _val_data[-1]
#######################################
""" SAVE FIRST IMAGE TO FOLDER & UPDATE BEST METRICS """
self.save_montage_im(_val_data)
def decay_loss_margin(self, margin):
"""Get loss margin by iteration index"""
self.current_margin = self.lss_margin.get_margin(self.current_it)
for _mg in margin:
_mg = self.current_margin
return margin
def fade_signal(self, m):
"""Remove training signal with respect to the current training iteration"""
num_signal = int(
min(np.floor(self.current_it / (self.total_iter * (9 / 10) / m.shape[0])) + 1, m.shape[0]))
if num_signal == self.batch_size:
return m
else:
signal = np.zeros(self.batch_size)[:, np.newaxis, np.newaxis, np.newaxis]
signal[np.random.permutation(self.batch_size)[:num_signal]] = 1
return m * nd.array(signal)
def expand_dataset(self):
"""Gradually Expand dataset with respect to the current training iteration"""
if self.num_expand_level <= 1:
return
current_it = self.trainerG.optimizer.num_update
interval = int((self.total_iter * (9 / 10)) / self.num_expand_level)
self.train_iter._batch_sampler._sampler._length1 = int(min(
self.data_inc_unit * (np.floor(current_it / interval) + 1),
self.train_iter._dataset.__len__()))
class ImagePool:
"""Pooling"""
def __init__(self, pool_size):
"""Initialization"""
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return images
ret_imgs = []
for i in range(images.shape[0]):
image = nd.expand_dims(images[i], axis=0)
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
ret_imgs.append(image)
else:
p = nd.random_uniform(0, 1, shape=(1,)).asscalar()
if p > 0.5:
random_id = nd.random_uniform(0, self.pool_size - 1, shape=(1,)).astype(np.uint8).asscalar()
tmp = self.images[random_id].copy()
self.images[random_id] = image
ret_imgs.append(tmp.as_in_context(image.context))
else:
ret_imgs.append(image)
ret_imgs = nd.concat(*ret_imgs, dim=0)
return ret_imgs
def get_color():
"""
:return: a colormap jet
"""
from pylab import cm
cm_jet = np.reshape(np.concatenate([cm.jet(i) for i in range(255)], axis=0), (255, 4))
return cm_jet[:, :3]
JET = get_color()
class ColorBar:
def __init__(self, height, width):
""""""
bar_tmp = np.zeros((101, 10))
for i in range(100):
bar_tmp[i] = 99 - i + 1
# bar position
self.end_bar_x = int(width - 3)
self.start_bar_x = int(self.end_bar_x - 10 + 1)
self.start_bar_y = int(np.floor((height - 101) / 2))
self.end_bar_y = int(self.start_bar_y + 101 - 1)
# Index Image generation
self.bar = bar_tmp * 2.551 # colorbar scale from 0 to 255
self.numimg = np.load(r'extra_data/mr_collateral_numing.npy') / 255
def insert_num_board(self, rgbIMG, maxIMG, maxWWL):
"""
:param indIMG: a 2D image
:return: add min - max numbers to the colorbar
"""
# insert min number
for ch in range(rgbIMG.shape[-1]):
rgbIMG[self.end_bar_y:self.end_bar_y + 9, self.end_bar_x - 6:self.end_bar_x, ch] = self.numimg[..., 0, 0]
# insert max number
max_num_str = str((maxIMG * maxWWL * 255).astype('uint8'))
str_length = len(max_num_str)
num_board = np.zeros((9, str_length * 6, 3))
for i in range(str_length):
selected_num = int(max_num_str[i])
num_board[:, i * 6:(i + 1) * 6, 0] = self.numimg[:, :, 0, selected_num]
num_board[:, i * 6:(i + 1) * 6, 1] = self.numimg[:, :, 0, selected_num]
num_board[:, i * 6:(i + 1) * 6, 2] = self.numimg[:, :, 0, selected_num]
rgbIMG[self.start_bar_y - 9:self.start_bar_y, self.end_bar_x - str_length * 6 + 1:self.end_bar_x + 1] = \
num_board
return rgbIMG
def insert_color_bar(self, indIMG):
"""
:param indIMG: a 2D image
:return: an image with a color bar on the right side
"""
# insert bar
indIMG[self.start_bar_y:self.end_bar_y + 1, self.start_bar_x:self.end_bar_x + 1] = self.bar
return indIMG
def gen_dicoms(save_dir_path, density, mri, mask, suffix='', insert_bar=True):
# Populate required values for file meta information (pseudo-values)
file_meta = Dataset()
file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'
file_meta.MediaStorageSOPInstanceUID = "1.2.3"
file_meta.ImplementationClassUID = "1.2.3.4"
hdr = FileDataset('collateral_phase_maps', {}, file_meta=file_meta, preamble=b"\0" * 128)
print('Generating DICOM files...')
density = density.transpose([1, 0, 2, 3])
mask = mask.transpose([1, 0, 2, 3])
T2 = normalize_01(mri.transpose([1, 0, 2, 3])[0:1]) * mask
ADC = normalize_01(mri.transpose([1, 0, 2, 3])[1:2]) * mask
combined = np.concatenate((T2, ADC, density), 0)
number_of_slice = combined.shape[1]
outlier = [0, 0, 0]
col_sf = 1
density_dir = 'DICOM' + suffix
presentations = ['GrayScale', 'Color']
types = ['T2', 'ADC', 'EPI-Density']
density_dir_org = save_dir_path
density_dir_paths = {presentations[0]: {}, presentations[1]: {}}
for presentation in presentations:
for k in range(combined.shape[0]):
density_dir_paths[presentation][k] = density_dir_org + '/' + presentation + '/' + '%d_%s' % (k, types[k])
if not os.path.exists(density_dir_paths[presentation][k]):
os.makedirs(density_dir_paths[presentation][k])
new_sn_gray = 10000 + 1
new_sn_color = 15000 + 1
color_bar = ColorBar(*density.shape[-2:]) if insert_bar else None
# Save Dicom Files
[save_color_dcm(combined[k, slice_loop], hdr, new_sn_color, k, slice_loop, outlier, density_dir_paths['Color'][k],
mask[0, slice_loop], color_bar, col_sf,
rescaling_first=False)
for k in [len(combined) - 1, ] for slice_loop in range(number_of_slice)]
[save_grayscale_dcm(combined[k, slice_loop], hdr, new_sn_gray, k, slice_loop, outlier,
density_dir_paths['GrayScale'][k],
mask[0, slice_loop], col_sf)
for k in range(len(combined) - 1) for slice_loop in range(number_of_slice)]
def save_color_dcm(phase_map, ds, new_sn, k, slice_loop, outlier, col_dir_path, mask, color_bar=None, col_sf=1,
num_bits=8, rescaling_first=False):
"""
Save a phase map into a gray scale dicom series
:param num_bits:
:param rescaling_first:
:param color_bar:
:param phase_map: 2D phase map
:param ds: pydicom dataset instance
:param new_sn: new serial number
:param k: phase index
:param slice_loop: slice index
:param outlier: outlier list
:param col_dir_path: destination directory storing phase maps
:param mask: a brain mask
:param col_sf: *
"""
SeriesDescription = [
'T2', 'ADC', 'Density-EPI'
]
phase_map = mr_collateral_gen_color_image(phase_map, outlier, mask, rescaling_first, color_bar)
phase_map = (phase_map * (2 ** num_bits - 1)).astype('uint%d' % num_bits)
ds.SeriesDescription = SeriesDescription[k]
ds.SeriesInstanceUID = str(new_sn + k)
ds.SeriesNumber = new_sn + k
ds.AcquisitionNumber = slice_loop
ds.InstanceNumber = slice_loop
ds.PixelSpacing = [1, 1]
ds.PixelData = phase_map.tostring()
ds.Rows, ds.Columns = phase_map.shape[:2]
ds.SamplesPerPixel = 3
ds.PhotometricInterpretation = 'RGB'
ds.BitsAllocated = num_bits
ds.BitsStored = num_bits
ds.HighBit = 7
ds.PixelRepresentation = 0
ds.SmallestImagePixelValue = 0
ds.LargestImagePixelValue = 255
ds.WindowCenter = 128
ds.WindowWidth = 255
path = "%s/%s_%03d.dcm" % (col_dir_path, ds.SeriesDescription, slice_loop)
ds.save_as(path)
def mr_collateral_gen_color_image(inIMG, outlier, mask, rescaling_first=False, color_bar=None):
"""
:param inIMG:
:param outlier:
:param mask:
:param rescaling_first:
:param color_bar:
:return:
"""
if mask.sum() == 0:
rgb = np.zeros((inIMG.shape + (3,)))
return rgb
minIMG, maxIMG = inIMG[mask > 0].min(), inIMG[mask > 0].max()
if rescaling_first:
if mask is None:
mask = np.ones_like(inIMG)
# Image Signal Normalization
nIMG = inIMG / maxIMG
outlier_low = (outlier / 100) / 2
outlier_high = 1 - ((outlier / 100) / 2)
WWL = stretch_lim(nIMG, [outlier_low, outlier_high]) # auto window width / level
minWWL, maxWWL = WWL.min(), WWL.max()
# Rescaled Image
# rsIMG = imadjust(nIMG, WWL, [])
else:
rsIMG = inIMG
maxWWL = 1
indIMG = rsIMG * 255
indIMG = color_bar.insert_color_bar(indIMG) if color_bar else indIMG
rgb = label2rgb(indIMG.astype('uint8'), bg_label=0, colors=JET)
# TODO: I cannot understand why the intensity of the bar is higher than the brain intensity, regarding 'indIMG', but seems to be the equal regarding 'rgb'
rgb = color_bar.insert_num_board(rgbIMG=rgb, maxIMG=maxIMG, maxWWL=maxWWL) if color_bar else rgb
return rgb
def stretch_lim(img, tol):
"""
Mimic the stretchlim function in MATLAB
:param img:
:param tol:
:return:
"""
nbins = 65536
tol_low = tol[0]
tol_high = tol[1]
N = np.histogram(img, nbins)[0]
cdf = np.cumsum(N) / sum(N) # cumulative distribution function
ilow = np.where(cdf > tol_low)[0][0]
ihigh = np.where(cdf >= tol_high)[0][0]
if ilow == ihigh: # this could happen if img is flat
ilowhigh = np.array([1, nbins])
else:
ilowhigh = np.array([ilow, ihigh])
lowhigh = ilowhigh / (nbins - 1) # convert to range [0 1]
return lowhigh
def save_grayscale_dcm(phase_map, ds, new_sn, k, slice_loop, outlier, col_dir_path, mask, col_sf=1, num_bits=16):
"""
Save a phase map into a gray scale dicom series
:param num_bits:
:param phase_map: 2D phase map
:param ds: pydicom dataset instance
:param new_sn: new serial number
:param k: phase index
:param slice_loop: slice index
:param outlier: outlier list
:param col_dir_path: destination directory storing phase maps
:param mask: a brain mask
:param col_sf: *
"""
SeriesDescription = [
'T2', 'ADC', 'Density-EPI'
]
phase_map = (phase_map * (2 ** num_bits - 1)).astype('uint%d' % num_bits)
MIN, MAX, WW, WL = mr_collateral_find_WWL(phase_map * col_sf, mask, outlier[k])
ds.SeriesDescription = SeriesDescription[k]
ds.SeriesInstanceUID = str(new_sn + k)
ds.SeriesNumber = new_sn + k
ds.AcquisitionNumber = slice_loop
ds.InstanceNumber = slice_loop
ds.PixelSpacing = [1, 1]
ds.PixelData = phase_map.tostring()
ds.Rows, ds.Columns = phase_map.shape[:2]
ds.SamplesPerPixel = 1
ds.PhotometricInterpretation = 'MONOCHROME2'
ds.BitsAllocated = num_bits
ds.BitsStored = num_bits
ds.SmallestImagePixelValue = int(MIN)
ds.LargestImagePixelValue = int(MAX)
ds.WindowCenter = int(WL)
ds.WindowWidth = int(WW)
ds.PixelRepresentation = 0
path = "%s/%s_%03d.dcm" % (col_dir_path, ds.SeriesDescription, slice_loop)
ds.save_as(path)
def mr_collateral_find_WWL(inIMG, mask, outlier):
"""
:param inIMG:
:param mask:
:param outlier:
:return:
"""
if mask.sum() == 0:
return 0, 0, 0, 0
MIN = inIMG.min()
MAX = inIMG.max()
# Image Signal Normalization
nIMG = inIMG / MAX
outlier_low = (outlier / 100) / 2
outlier_high = 1 - ((outlier / 100) / 2)
WWL = stretch_lim(nIMG[mask > 0], [outlier_low, outlier_high]) # auto window width / level
minWWL = WWL.min()
maxWWL = WWL.max()
# Window width / level calculation
WW = np.floor((MAX * maxWWL) - (MAX * minWWL))
WL = np.floor(MAX * minWWL) + np.floor(WW / 2)
return MIN, MAX, WW, WL
def normalize_01(img, predef_range=None, ):
if predef_range is None:
return (img - img.min()) / (img.max() - img.min())
else:
return (img - predef_range[0]) / (predef_range[1] - predef_range[0])
| [
"mxnet.nd.expand_dims",
"pydicom.dataset.Dataset",
"mxnet.nd.concat",
"mxnet.gluon.model_zoo.vision.resnet18_v1",
"numpy.concatenate",
"numpy.ma.masked_array",
"numpy.round",
"pydicom.dataset.FileDataset",
"csv.writer",
"numpy.floor",
"utils.dataloader.DataLoader",
"mxnet.gluon.loss.CosineEmbe... | [((1775, 1792), 'mxnet.init.Uniform', 'mx.init.Uniform', ([], {}), '()\n', (1790, 1792), True, 'import mxnet as mx\n'), ((1808, 1828), 'mxnet.init.Normal', 'mx.init.Normal', (['(0.05)'], {}), '(0.05)\n', (1822, 1828), True, 'import mxnet as mx\n'), ((1843, 1872), 'mxnet.init.Xavier', 'mx.init.Xavier', ([], {'magnitude': '(2.2)'}), '(magnitude=2.2)\n', (1857, 1872), True, 'import mxnet as mx\n'), ((1884, 1903), 'mxnet.init.MSRAPrelu', 'mx.init.MSRAPrelu', ([], {}), '()\n', (1901, 1903), True, 'import mxnet as mx\n'), ((49738, 49747), 'pydicom.dataset.Dataset', 'Dataset', ([], {}), '()\n', (49745, 49747), False, 'from pydicom.dataset import Dataset, FileDataset\n'), ((49926, 50016), 'pydicom.dataset.FileDataset', 'FileDataset', (['"""collateral_phase_maps"""', '{}'], {'file_meta': 'file_meta', 'preamble': "(b'\\x00' * 128)"}), "('collateral_phase_maps', {}, file_meta=file_meta, preamble=\n b'\\x00' * 128)\n", (49937, 50016), False, 'from pydicom.dataset import Dataset, FileDataset\n'), ((50278, 50315), 'numpy.concatenate', 'np.concatenate', (['(T2, ADC, density)', '(0)'], {}), '((T2, ADC, density), 0)\n', (50292, 50315), True, 'import numpy as np\n'), ((57271, 57308), 'numpy.floor', 'np.floor', (['(MAX * maxWWL - MAX * minWWL)'], {}), '(MAX * maxWWL - MAX * minWWL)\n', (57279, 57308), True, 'import numpy as np\n'), ((3754, 3899), 'utils.datasets.RadPath', 'RadPath', (['Aus', 'Aus', 'wp_us', 'wp_us', 'wp_us'], {'transform': 'joint_transform', 'is_val': '(True)', 'input_size': 'self.input_size', 'density_range': 'self.density_range'}), '(Aus, Aus, wp_us, wp_us, wp_us, transform=joint_transform, is_val=\n True, input_size=self.input_size, density_range=self.density_range)\n', (3761, 3899), False, 'from utils.datasets import RadPath\n'), ((4034, 4192), 'utils.dataloader.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers', 'last_batch': '"""keep"""', 'shuffle': '(False)', 'thread_pool': '(False)', 'prefetch': 'None'}), "(val_dataset, batch_size=self.batch_size, num_workers=self.\n num_workers, last_batch='keep', shuffle=False, thread_pool=False,\n prefetch=None)\n", (4044, 4192), False, 'from utils.dataloader import DataLoader\n'), ((8666, 8710), 'mxnet.gluon.model_zoo.vision.resnet18_v1', 'vision.resnet18_v1', ([], {'pretrained': '(True)', 'ctx': 'ctx'}), '(pretrained=True, ctx=ctx)\n', (8684, 8710), False, 'from mxnet.gluon.model_zoo import vision\n'), ((8732, 8818), 'mxnet.gluon.nn.Conv2D', 'gluon.nn.Conv2D', (['(64)'], {'kernel_size': '(7)', 'strides': '(2)', 'padding': '(3)', 'in_channels': 'in_channels'}), '(64, kernel_size=7, strides=2, padding=3, in_channels=\n in_channels)\n', (8747, 8818), False, 'from mxnet import nd, gluon, autograd\n'), ((14833, 14875), 'mxnet.gluon.loss.SigmoidBinaryCrossEntropyLoss', 'gluon.loss.SigmoidBinaryCrossEntropyLoss', ([], {}), '()\n', (14873, 14875), False, 'from mxnet import nd, gluon, autograd\n'), ((16392, 16425), 'mxnet.metric.CustomMetric', 'mx.metric.CustomMetric', (['self.facc'], {}), '(self.facc)\n', (16414, 16425), True, 'import mxnet as mx\n'), ((32524, 32894), 'utils.metrics.update_mxboard_metric_v1', 'metrics.update_mxboard_metric_v1', (['sw'], {'data': 'val_data', 'global_step': 'epoch', 'metric_names': "['r_whole', 'l1_whole', 'ssim_whole', 'rmse_whole', 'rmse_log_whole', 't1',\n 't2', 't3', 'abs_rel_diff', 'sqr_rel_diff', 'ta1', 'ta2']", 'prefix': '"""validation_"""', 'num_input_channels': 'self.n_A_channel_idx', 'c_thr': 'self.C_thr', 'density_range': 'self.density_range', 'root': 'self.root'}), "(sw, data=val_data, global_step=epoch,\n metric_names=['r_whole', 'l1_whole', 'ssim_whole', 'rmse_whole',\n 'rmse_log_whole', 't1', 't2', 't3', 'abs_rel_diff', 'sqr_rel_diff',\n 'ta1', 'ta2'], prefix='validation_', num_input_channels=self.\n n_A_channel_idx, c_thr=self.C_thr, density_range=self.density_range,\n root=self.root)\n", (32556, 32894), False, 'from utils import metrics\n'), ((36965, 36998), 'numpy.ma.masked_array', 'masked_array', (['_im_wp', '(_im_wp == 2)'], {}), '(_im_wp, _im_wp == 2)\n', (36977, 36998), False, 'from numpy.ma import masked_array\n'), ((37007, 37077), 'pylab.imshow', 'plt.imshow', (['_im'], {'cmap': '"""jet"""', 'vmin': '(0)', 'vmax': '(0.7)', 'interpolation': '"""nearest"""'}), "(_im, cmap='jet', vmin=0, vmax=0.7, interpolation='nearest')\n", (37017, 37077), True, 'import pylab as plt\n'), ((37085, 37157), 'pylab.imshow', 'plt.imshow', (['_im_wp'], {'cmap': '"""gray"""', 'vmin': '(0)', 'vmax': '(1)', 'interpolation': '"""nearest"""'}), "(_im_wp, cmap='gray', vmin=0, vmax=1, interpolation='nearest')\n", (37095, 37157), True, 'import pylab as plt\n'), ((37496, 37505), 'pylab.gca', 'plt.gca', ([], {}), '()\n', (37503, 37505), True, 'import pylab as plt\n'), ((37628, 37773), 'pylab.savefig', 'plt.savefig', (["('%s/%sep%04d%s.png' % (folder, prefix, self.current_epoch, suffix))"], {'pad_inches': '(0)', 'bbox_inches': '"""tight"""', 'transparent': '(True)', 'dpi': 'dpi'}), "('%s/%sep%04d%s.png' % (folder, prefix, self.current_epoch,\n suffix), pad_inches=0, bbox_inches='tight', transparent=True, dpi=dpi)\n", (37639, 37773), True, 'import pylab as plt\n'), ((37849, 37865), 'pylab.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (37858, 37865), True, 'import pylab as plt\n'), ((41770, 41799), 'mxnet.nd.array', 'nd.array', (['wp'], {'ctx': 'ref.context'}), '(wp, ctx=ref.context)\n', (41778, 41799), False, 'from mxnet import nd, gluon, autograd\n'), ((43483, 43515), 'numpy.ma.masked_array', 'masked_array', (['a', '(roi_masked == 1)'], {}), '(a, roi_masked == 1)\n', (43495, 43515), False, 'from numpy.ma import masked_array\n'), ((43611, 43643), 'numpy.ma.masked_array', 'masked_array', (['a', '(img_masked == 1)'], {}), '(a, img_masked == 1)\n', (43623, 43643), False, 'from numpy.ma import masked_array\n'), ((43652, 43739), 'pylab.imshow', 'plt.imshow', (['a'], {'cmap': '"""gray"""', 'vmin': 'self.density_range[0]', 'vmax': 'self.density_range[1]'}), "(a, cmap='gray', vmin=self.density_range[0], vmax=self.\n density_range[1])\n", (43662, 43739), True, 'import pylab as plt\n'), ((43743, 43794), 'pylab.imshow', 'plt.imshow', (['img_masked'], {'cmap': '"""gray"""', 'vmin': '(0)', 'vmax': '(1)'}), "(img_masked, cmap='gray', vmin=0, vmax=1)\n", (43753, 43794), True, 'import pylab as plt\n'), ((43803, 43898), 'pylab.imshow', 'plt.imshow', (['roi_masked'], {'cmap': '"""jet"""', 'vmin': 'self.density_range[0]', 'vmax': 'self.density_range[1]'}), "(roi_masked, cmap='jet', vmin=self.density_range[0], vmax=self.\n density_range[1])\n", (43813, 43898), True, 'import pylab as plt\n'), ((43902, 43917), 'pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (43910, 43917), True, 'import pylab as plt\n'), ((47362, 47389), 'mxnet.nd.concat', 'nd.concat', (['*ret_imgs'], {'dim': '(0)'}), '(*ret_imgs, dim=0)\n', (47371, 47389), False, 'from mxnet import nd, gluon, autograd\n'), ((47728, 47747), 'numpy.zeros', 'np.zeros', (['(101, 10)'], {}), '((101, 10))\n', (47736, 47747), True, 'import numpy as np\n'), ((48739, 48771), 'numpy.zeros', 'np.zeros', (['(9, str_length * 6, 3)'], {}), '((9, str_length * 6, 3))\n', (48747, 48771), True, 'import numpy as np\n'), ((53553, 53581), 'numpy.zeros', 'np.zeros', (['(inIMG.shape + (3,))'], {}), '(inIMG.shape + (3,))\n', (53561, 53581), True, 'import numpy as np\n'), ((54816, 54840), 'numpy.histogram', 'np.histogram', (['img', 'nbins'], {}), '(img, nbins)\n', (54828, 54840), True, 'import numpy as np\n'), ((54854, 54866), 'numpy.cumsum', 'np.cumsum', (['N'], {}), '(N)\n', (54863, 54866), True, 'import numpy as np\n'), ((55074, 55094), 'numpy.array', 'np.array', (['[1, nbins]'], {}), '([1, nbins])\n', (55082, 55094), True, 'import numpy as np\n'), ((55124, 55147), 'numpy.array', 'np.array', (['[ilow, ihigh]'], {}), '([ilow, ihigh])\n', (55132, 55147), True, 'import numpy as np\n'), ((57322, 57344), 'numpy.floor', 'np.floor', (['(MAX * minWWL)'], {}), '(MAX * minWWL)\n', (57330, 57344), True, 'import numpy as np\n'), ((57347, 57363), 'numpy.floor', 'np.floor', (['(WW / 2)'], {}), '(WW / 2)\n', (57355, 57363), True, 'import numpy as np\n'), ((3408, 3442), 'os.path.exists', 'os.path.exists', (['self.result_folder'], {}), '(self.result_folder)\n', (3422, 3442), False, 'import os\n'), ((3456, 3487), 'os.makedirs', 'os.makedirs', (['self.result_folder'], {}), '(self.result_folder)\n', (3467, 3487), False, 'import os\n'), ((4606, 4617), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (4614, 4617), True, 'import numpy as np\n'), ((4894, 4907), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4904, 4907), False, 'import csv\n'), ((5431, 5447), 'numpy.ones_like', 'np.ones_like', (['_A'], {}), '(_A)\n', (5443, 5447), True, 'import numpy as np\n'), ((5517, 5534), 'numpy.zeros_like', 'np.zeros_like', (['_A'], {}), '(_A)\n', (5530, 5534), True, 'import numpy as np\n'), ((6239, 6250), 'numpy.sort', 'np.sort', (['_x'], {}), '(_x)\n', (6246, 6250), True, 'import numpy as np\n'), ((6693, 6709), 'numpy.round', 'np.round', (['thr', '(5)'], {}), '(thr, 5)\n', (6701, 6709), True, 'import numpy as np\n'), ((9689, 9764), 'networks.unetpp_padding.UNet', 'UNet', ([], {'base_channel': 'self.base_channel_unet', 'backbone_name': 'self.backbone_name'}), '(base_channel=self.base_channel_unet, backbone_name=self.backbone_name)\n', (9693, 9764), False, 'from networks.unetpp_padding import UNet\n'), ((10774, 10887), 'networks.deeplabv3b_plus.DeepLabWV3Plus', 'DeepLabWV3Plus', (['(1)'], {'backbone': '"""wideresnet"""', 'ctx': 'self.ctx', 'base_size': 'self.input_size', 'crop_size': 'self.input_size'}), "(1, backbone='wideresnet', ctx=self.ctx, base_size=self.\n input_size, crop_size=self.input_size)\n", (10788, 10887), False, 'from networks.deeplabv3b_plus import DeepLabWV3Plus\n'), ((13993, 14089), 'utils.load_pretrained_net.extract_encoder', 'extract_encoder', (['(self.netG if self.lambda_aux <= 0 else self.netG.shared_net)', 'self.num_downs'], {}), '(self.netG if self.lambda_aux <= 0 else self.netG.shared_net,\n self.num_downs)\n', (14008, 14089), False, 'from utils.load_pretrained_net import pretrained_net, extract_encoder\n'), ((14300, 14351), 'networks.discriminators.Discriminator', 'Discriminator', ([], {'in_channels': '(self.n_A_channel_idx + 1)'}), '(in_channels=self.n_A_channel_idx + 1)\n', (14313, 14351), False, 'from networks.discriminators import Discriminator\n'), ((15610, 15642), 'mxnet.gluon.loss.CosineEmbeddingLoss', 'gluon.loss.CosineEmbeddingLoss', ([], {}), '()\n', (15640, 15642), False, 'from mxnet import nd, gluon, autograd\n'), ((17362, 17565), 'mxnet.lr_scheduler.FactorScheduler', 'mx.lr_scheduler.FactorScheduler', ([], {'step': 'self.lr_step', 'factor': 'self.lr_factor', 'warmup_mode': 'self.warmup_mode', 'warmup_steps': 'self.warmup_steps', 'warmup_begin_lr': 'self.warmup_begin_lr', 'base_lr': 'self.base_lr'}), '(step=self.lr_step, factor=self.lr_factor,\n warmup_mode=self.warmup_mode, warmup_steps=self.warmup_steps,\n warmup_begin_lr=self.warmup_begin_lr, base_lr=self.base_lr)\n', (17393, 17565), True, 'import mxnet as mx\n'), ((17633, 17843), 'mxnet.lr_scheduler.MultiFactorScheduler', 'mx.lr_scheduler.MultiFactorScheduler', ([], {'step': 'self.lr_steps', 'factor': 'self.lr_factor', 'base_lr': 'self.base_lr', 'warmup_mode': 'self.warmup_mode', 'warmup_begin_lr': 'self.warmup_begin_lr', 'warmup_steps': 'self.warmup_steps'}), '(step=self.lr_steps, factor=self.\n lr_factor, base_lr=self.base_lr, warmup_mode=self.warmup_mode,\n warmup_begin_lr=self.warmup_begin_lr, warmup_steps=self.warmup_steps)\n', (17669, 17843), True, 'import mxnet as mx\n'), ((17903, 18018), 'mxnet.lr_scheduler.PolyScheduler', 'mx.lr_scheduler.PolyScheduler', ([], {'max_update': 'self.cycle_length', 'base_lr': 'self.base_lr', 'pwr': '(2)', 'final_lr': 'self.min_lr'}), '(max_update=self.cycle_length, base_lr=self.\n base_lr, pwr=2, final_lr=self.min_lr)\n', (17932, 18018), True, 'import mxnet as mx\n'), ((22210, 22234), 'mxnet.nd.where', 'nd.where', (['wp', 'fo', '(wp - 1)'], {}), '(wp, fo, wp - 1)\n', (22218, 22234), False, 'from mxnet import nd, gluon, autograd\n'), ((22400, 22417), 'mxnet.autograd.record', 'autograd.record', ([], {}), '()\n', (22415, 22417), False, 'from mxnet import nd, gluon, autograd\n'), ((23990, 24011), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {}), '()\n', (24009, 24011), False, 'from mxnet.gluon import nn\n'), ((36043, 36089), 'mxnet.nd.tile', 'nd.tile', (['self.wp_val', '(1, pred.shape[1], 1, 1)'], {}), '(self.wp_val, (1, pred.shape[1], 1, 1))\n', (36050, 36089), False, 'from mxnet import nd, gluon, autograd\n'), ((36109, 36143), 'mxnet.nd.where', 'nd.where', (['wp_val', 'pred', '(wp_val - 1)'], {}), '(wp_val, pred, wp_val - 1)\n', (36117, 36143), False, 'from mxnet import nd, gluon, autograd\n'), ((36343, 36369), 'numpy.concatenate', 'np.concatenate', (['input_list'], {}), '(input_list)\n', (36357, 36369), True, 'import numpy as np\n'), ((36388, 36416), 'numpy.concatenate', 'np.concatenate', (['[*pred_list]'], {}), '([*pred_list])\n', (36402, 36416), True, 'import numpy as np\n'), ((36435, 36461), 'numpy.concatenate', 'np.concatenate', (['[*wp_list]'], {}), '([*wp_list])\n', (36449, 36461), True, 'import numpy as np\n'), ((36567, 36581), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (36577, 36581), True, 'import numpy as np\n'), ((36865, 36895), 'numpy.concatenate', 'np.concatenate', (['_im_wp'], {'axis': '(2)'}), '(_im_wp, axis=2)\n', (36879, 36895), True, 'import numpy as np\n'), ((36919, 36946), 'numpy.concatenate', 'np.concatenate', (['_im'], {'axis': '(2)'}), '(_im, axis=2)\n', (36933, 36946), True, 'import numpy as np\n'), ((37541, 37561), 'matplotlib.ticker.NullLocator', 'ticker.NullLocator', ([], {}), '()\n', (37559, 37561), False, 'from matplotlib import ticker\n'), ((37598, 37618), 'matplotlib.ticker.NullLocator', 'ticker.NullLocator', ([], {}), '()\n', (37616, 37618), False, 'from matplotlib import ticker\n'), ((38911, 38966), 'os.makedirs', 'os.makedirs', (['self.result_folder_checkpoint_current_iter'], {}), '(self.result_folder_checkpoint_current_iter)\n', (38922, 38966), False, 'import os\n'), ((41905, 41916), 'skimage.util.montage', 'montage', (['im'], {}), '(im)\n', (41912, 41916), False, 'from skimage.util import montage\n'), ((41939, 41949), 'pylab.show', 'plt.show', ([], {}), '()\n', (41947, 41949), True, 'import pylab as plt\n'), ((43160, 43199), 'mxnet.nd.concatenate', 'nd.concatenate', (['[im1, im2, im3]'], {'axis': '(1)'}), '([im1, im2, im3], axis=1)\n', (43174, 43199), False, 'from mxnet import nd, gluon, autograd\n'), ((43415, 43432), 'mxnet.nd.ones_like', 'nd.ones_like', (['img'], {}), '(img)\n', (43427, 43432), False, 'from mxnet import nd, gluon, autograd\n'), ((43552, 43569), 'mxnet.nd.ones_like', 'nd.ones_like', (['roi'], {}), '(roi)\n', (43564, 43569), False, 'from mxnet import nd, gluon, autograd\n'), ((43571, 43588), 'mxnet.nd.ones_like', 'nd.ones_like', (['roi'], {}), '(roi)\n', (43583, 43588), False, 'from mxnet import nd, gluon, autograd\n'), ((46663, 46696), 'mxnet.nd.expand_dims', 'nd.expand_dims', (['images[i]'], {'axis': '(0)'}), '(images[i], axis=0)\n', (46677, 46696), False, 'from mxnet import nd, gluon, autograd\n'), ((47963, 47991), 'numpy.floor', 'np.floor', (['((height - 101) / 2)'], {}), '((height - 101) / 2)\n', (47971, 47991), True, 'import numpy as np\n'), ((48172, 48218), 'numpy.load', 'np.load', (['"""extra_data/mr_collateral_numing.npy"""'], {}), "('extra_data/mr_collateral_numing.npy')\n", (48179, 48218), True, 'import numpy as np\n'), ((53738, 53757), 'numpy.ones_like', 'np.ones_like', (['inIMG'], {}), '(inIMG)\n', (53750, 53757), True, 'import numpy as np\n'), ((54923, 54946), 'numpy.where', 'np.where', (['(cdf > tol_low)'], {}), '(cdf > tol_low)\n', (54931, 54946), True, 'import numpy as np\n'), ((54965, 54990), 'numpy.where', 'np.where', (['(cdf >= tol_high)'], {}), '(cdf >= tol_high)\n', (54973, 54990), True, 'import numpy as np\n'), ((9896, 9971), 'networks.unetpp_padding.UNet', 'UNet', ([], {'base_channel': 'self.base_channel_unet', 'backbone_name': 'self.backbone_name'}), '(base_channel=self.base_channel_unet, backbone_name=self.backbone_name)\n', (9900, 9971), False, 'from networks.unetpp_padding import UNet\n'), ((12913, 13032), 'utils.optimizers.get_optimizer_dict', 'get_optimizer_dict', (['self.optimizer'], {'lr': 'self.base_lr', 'lr_scheduler': 'self._lr_scheduler', 'wd': 'self.wd', 'beta1': 'self.beta1'}), '(self.optimizer, lr=self.base_lr, lr_scheduler=self.\n _lr_scheduler, wd=self.wd, beta1=self.beta1)\n', (12931, 13032), False, 'from utils.optimizers import get_optimizer_dict\n'), ((15748, 15780), 'mxnet.gluon.loss.CosineEmbeddingLoss', 'gluon.loss.CosineEmbeddingLoss', ([], {}), '()\n', (15778, 15780), False, 'from mxnet import nd, gluon, autograd\n'), ((16332, 16342), 'utils.losses.DiceLoss', 'DiceLoss', ([], {}), '()\n', (16340, 16342), False, 'from utils.losses import L1Loss_v2, L2Loss_v2, DiceLoss, L2LogLoss, L1LogLoss, LogCoshLoss, PhotometricLoss, corrcoefLoss, HuberLoss\n'), ((20332, 20380), 'mxnet.nd.where', 'nd.where', (['wp_unsup', 'fake_out_unsup', '(wp_unsup - 1)'], {}), '(wp_unsup, fake_out_unsup, wp_unsup - 1)\n', (20340, 20380), False, 'from mxnet import nd, gluon, autograd\n'), ((20529, 20581), 'mxnet.nd.where', 'nd.where', (['wp_unsup', 'fake_out_unsup_aug', '(wp_unsup - 1)'], {}), '(wp_unsup, fake_out_unsup_aug, wp_unsup - 1)\n', (20537, 20581), False, 'from mxnet import nd, gluon, autograd\n'), ((22317, 22343), 'mxnet.nd.concat', 'nd.concat', (['A_rp', 'fo'], {'dim': '(1)'}), '(A_rp, fo, dim=1)\n', (22326, 22343), False, 'from mxnet import nd, gluon, autograd\n'), ((22598, 22615), 'mxnet.nd.zeros_like', 'nd.zeros_like', (['op'], {}), '(op)\n', (22611, 22615), False, 'from mxnet import nd, gluon, autograd\n'), ((22944, 22970), 'mxnet.nd.concat', 'nd.concat', (['A_rp', '_C'], {'dim': '(1)'}), '(A_rp, _C, dim=1)\n', (22953, 22970), False, 'from mxnet import nd, gluon, autograd\n'), ((23096, 23112), 'mxnet.nd.ones_like', 'nd.ones_like', (['op'], {}), '(op)\n', (23108, 23112), False, 'from mxnet import nd, gluon, autograd\n'), ((24032, 24046), 'mxnet.gluon.nn.BatchNorm', 'nn.BatchNorm', ([], {}), '()\n', (24044, 24046), False, 'from mxnet.gluon import nn\n'), ((24236, 24399), 'mxnet.gluon.nn.Conv2D', 'nn.Conv2D', ([], {'channels': 'num_filters', 'kernel_size': '(kernel_size, kernel_size)', 'strides': '(stride, stride)', 'use_bias': 'opts.use_bias', 'padding': '(pad, pad)', 'groups': 'group'}), '(channels=num_filters, kernel_size=(kernel_size, kernel_size),\n strides=(stride, stride), use_bias=opts.use_bias, padding=(pad, pad),\n groups=group)\n', (24245, 24399), False, 'from mxnet.gluon import nn\n'), ((25921, 25938), 'mxnet.autograd.record', 'autograd.record', ([], {}), '()\n', (25936, 25938), False, 'from mxnet import nd, gluon, autograd\n'), ((26435, 26452), 'mxnet.autograd.record', 'autograd.record', ([], {}), '()\n', (26450, 26452), False, 'from mxnet import nd, gluon, autograd\n'), ((36617, 36631), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (36627, 36631), True, 'import numpy as np\n'), ((36686, 36700), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (36696, 36700), True, 'import numpy as np\n'), ((38974, 39032), 'os.path.exists', 'os.path.exists', (['self.result_folder_checkpoint_current_iter'], {}), '(self.result_folder_checkpoint_current_iter)\n', (38988, 39032), False, 'import os\n'), ((42878, 42919), 'numpy.concatenate', 'np.concatenate', (['[img_concat, tmp]'], {'axis': '(0)'}), '([img_concat, tmp], axis=0)\n', (42892, 42919), True, 'import numpy as np\n'), ((45587, 45612), 'numpy.zeros', 'np.zeros', (['self.batch_size'], {}), '(self.batch_size)\n', (45595, 45612), True, 'import numpy as np\n'), ((45751, 45767), 'mxnet.nd.array', 'nd.array', (['signal'], {}), '(signal)\n', (45759, 45767), False, 'from mxnet import nd, gluon, autograd\n'), ((47542, 47551), 'pylab.cm.jet', 'cm.jet', (['i'], {}), '(i)\n', (47548, 47551), False, 'from pylab import cm\n'), ((50840, 50890), 'os.path.exists', 'os.path.exists', (['density_dir_paths[presentation][k]'], {}), '(density_dir_paths[presentation][k])\n', (50854, 50890), False, 'import os\n'), ((50908, 50955), 'os.makedirs', 'os.makedirs', (['density_dir_paths[presentation][k]'], {}), '(density_dir_paths[presentation][k])\n', (50919, 50955), False, 'import os\n'), ((10124, 10278), 'networks.drnn.Init', 'init_net_params', ([], {'num_fpg': 'self.num_fpg', 'growth_rate': 'self.growth_rate', 'init_channels': 'self.base_channel_drnn', 'num_channels_out': 'self.num_channels_out'}), '(num_fpg=self.num_fpg, growth_rate=self.growth_rate,\n init_channels=self.base_channel_drnn, num_channels_out=self.\n num_channels_out)\n', (10139, 10278), True, 'from networks.drnn import DenseMultipathNet, Init as init_net_params\n'), ((10364, 10387), 'networks.drnn.DenseMultipathNet', 'DenseMultipathNet', (['opts'], {}), '(opts)\n', (10381, 10387), False, 'from networks.drnn import DenseMultipathNet, Init as init_net_params\n'), ((11862, 11883), 'mxnet.init.Constant', 'mx.init.Constant', (['(0.6)'], {}), '(0.6)\n', (11878, 11883), True, 'import mxnet as mx\n'), ((12183, 12302), 'utils.optimizers.get_optimizer_dict', 'get_optimizer_dict', (['self.optimizer'], {'lr': 'self.base_lr', 'lr_scheduler': 'self._lr_scheduler', 'wd': 'self.wd', 'beta1': 'self.beta1'}), '(self.optimizer, lr=self.base_lr, lr_scheduler=self.\n _lr_scheduler, wd=self.wd, beta1=self.beta1)\n', (12201, 12302), False, 'from utils.optimizers import get_optimizer_dict\n'), ((13713, 13816), 'mxnet.nd.random.normal', 'nd.random.normal', (['(0)', '(1)'], {'shape': '(largest_batch_size, n_in, self.input_size, self.input_size)', 'ctx': 'ctx'}), '(0, 1, shape=(largest_batch_size, n_in, self.input_size,\n self.input_size), ctx=ctx)\n', (13729, 13816), False, 'from mxnet import nd, gluon, autograd\n'), ((19632, 19692), 'mxnet.nd.ones', 'nd.ones', (['fake_out_unsup.shape[0]'], {'ctx': 'fake_out_unsup.context'}), '(fake_out_unsup.shape[0], ctx=fake_out_unsup.context)\n', (19639, 19692), False, 'from mxnet import nd, gluon, autograd\n'), ((21577, 21698), 'pylab.imsave', 'plt.imsave', (["('%s/ep%04d_%02d_%d' % (self.result_folder_figure_train_unsup, self.\n current_epoch, self.current_it, i))", 'im'], {}), "('%s/ep%04d_%02d_%d' % (self.result_folder_figure_train_unsup,\n self.current_epoch, self.current_it, i), im)\n", (21587, 21698), True, 'import pylab as plt\n'), ((24115, 24139), 'mxnet.gluon.nn.LeakyReLU', 'nn.LeakyReLU', (['opts.alpha'], {}), '(opts.alpha)\n', (24127, 24139), False, 'from mxnet.gluon import nn\n'), ((24183, 24213), 'mxnet.gluon.nn.Activation', 'nn.Activation', (['opts.activation'], {}), '(opts.activation)\n', (24196, 24213), False, 'from mxnet.gluon import nn\n'), ((25309, 25350), 'mxnet.gluon.contrib.nn.PixelShuffle2D', 'contrib_nn.PixelShuffle2D', (['upscale_factor'], {}), '(upscale_factor)\n', (25334, 25350), True, 'import mxnet.gluon.contrib.nn as contrib_nn\n'), ((25738, 25858), 'numpy.array', 'np.array', (['[self.lambda_C, self.lambda_D, self.lambda_consistency, self.lambda_unsup,\n self.lambda0, self.lambda_aux]'], {}), '([self.lambda_C, self.lambda_D, self.lambda_consistency, self.\n lambda_unsup, self.lambda0, self.lambda_aux])\n', (25746, 25858), True, 'import numpy as np\n'), ((26593, 26608), 'mxnet.nd.square', 'nd.square', (['coef'], {}), '(coef)\n', (26602, 26608), False, 'from mxnet import nd, gluon, autograd\n'), ((42573, 42601), 'numpy.asarray', 'np.asarray', (['val_data_cropped'], {}), '(val_data_cropped)\n', (42583, 42601), True, 'import numpy as np\n'), ((45401, 45470), 'numpy.floor', 'np.floor', (['(self.current_it / (self.total_iter * (9 / 10) / m.shape[0]))'], {}), '(self.current_it / (self.total_iter * (9 / 10) / m.shape[0]))\n', (45409, 45470), True, 'import numpy as np\n'), ((45671, 45709), 'numpy.random.permutation', 'np.random.permutation', (['self.batch_size'], {}), '(self.batch_size)\n', (45692, 45709), True, 'import numpy as np\n'), ((5926, 5956), 'numpy.where', 'np.where', (['(mask[i, ..., 0] == 1)'], {}), '(mask[i, ..., 0] == 1)\n', (5934, 5956), True, 'import numpy as np\n'), ((10522, 10592), 'networks.deeplabv3.DeepLabV3', 'DeepLabV3', (['(1)'], {'backbone': '"""resnet50"""', 'pretrained_base': '(False)', 'ctx': 'self.ctx'}), "(1, backbone='resnet50', pretrained_base=False, ctx=self.ctx)\n", (10531, 10592), False, 'from networks.deeplabv3 import DeepLabV3\n'), ((26964, 26975), 'mxnet.nd.log', 'nd.log', (['var'], {}), '(var)\n', (26970, 26975), False, 'from mxnet import nd, gluon, autograd\n'), ((28022, 28037), 'mxnet.nd.square', 'nd.square', (['coef'], {}), '(coef)\n', (28031, 28037), False, 'from mxnet import nd, gluon, autograd\n'), ((28435, 28450), 'mxnet.nd.square', 'nd.square', (['coef'], {}), '(coef)\n', (28444, 28450), False, 'from mxnet import nd, gluon, autograd\n'), ((29105, 29120), 'mxnet.nd.square', 'nd.square', (['coef'], {}), '(coef)\n', (29114, 29120), False, 'from mxnet import nd, gluon, autograd\n'), ((29502, 29517), 'mxnet.nd.square', 'nd.square', (['coef'], {}), '(coef)\n', (29511, 29517), False, 'from mxnet import nd, gluon, autograd\n'), ((37187, 37222), 'numpy.concatenate', 'np.concatenate', (['_im_contour'], {'axis': '(2)'}), '(_im_contour, axis=2)\n', (37201, 37222), True, 'import numpy as np\n'), ((40567, 40587), 'mxnet.init.Normal', 'mx.init.Normal', (['(0.02)'], {}), '(0.02)\n', (40581, 40587), True, 'import mxnet as mx\n'), ((40654, 40668), 'mxnet.init.Zero', 'mx.init.Zero', ([], {}), '()\n', (40666, 40668), True, 'import mxnet as mx\n'), ((40762, 40776), 'mxnet.init.Zero', 'mx.init.Zero', ([], {}), '()\n', (40774, 40776), True, 'import mxnet as mx\n'), ((46178, 46209), 'numpy.floor', 'np.floor', (['(current_it / interval)'], {}), '(current_it / interval)\n', (46186, 46209), True, 'import numpy as np\n'), ((46913, 46948), 'mxnet.nd.random_uniform', 'nd.random_uniform', (['(0)', '(1)'], {'shape': '(1,)'}), '(0, 1, shape=(1,))\n', (46930, 46948), False, 'from mxnet import nd, gluon, autograd\n'), ((4469, 4497), 'numpy.load', 'np.load', (["('%s' % self.mr_file)"], {}), "('%s' % self.mr_file)\n", (4476, 4497), True, 'import numpy as np\n'), ((11575, 11596), 'mxnet.init.Constant', 'mx.init.Constant', (['(0.6)'], {}), '(0.6)\n', (11591, 11596), True, 'import mxnet as mx\n'), ((5813, 5827), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (5825, 5827), False, 'from sklearn.preprocessing import RobustScaler\n'), ((28142, 28153), 'mxnet.nd.log', 'nd.log', (['var'], {}), '(var)\n', (28148, 28153), False, 'from mxnet import nd, gluon, autograd\n'), ((28769, 28780), 'mxnet.nd.log', 'nd.log', (['var'], {}), '(var)\n', (28775, 28780), False, 'from mxnet import nd, gluon, autograd\n'), ((29219, 29230), 'mxnet.nd.log', 'nd.log', (['var'], {}), '(var)\n', (29225, 29230), False, 'from mxnet import nd, gluon, autograd\n'), ((29967, 29978), 'mxnet.nd.log', 'nd.log', (['var'], {}), '(var)\n', (29973, 29978), False, 'from mxnet import nd, gluon, autograd\n'), ((27336, 27363), 'mxnet.nd.zeros_like', 'nd.zeros_like', (['A_rp[:, 0:1]'], {}), '(A_rp[:, 0:1])\n', (27349, 27363), False, 'from mxnet import nd, gluon, autograd\n'), ((27569, 27596), 'mxnet.nd.zeros_like', 'nd.zeros_like', (['A_rp[:, 1:2]'], {}), '(A_rp[:, 1:2])\n', (27582, 27596), False, 'from mxnet import nd, gluon, autograd\n'), ((29780, 29814), 'mxnet.nd.ones', 'nd.ones', (['C.shape[0]'], {'ctx': 'C.context'}), '(C.shape[0], ctx=C.context)\n', (29787, 29814), False, 'from mxnet import nd, gluon, autograd\n'), ((47020, 47072), 'mxnet.nd.random_uniform', 'nd.random_uniform', (['(0)', '(self.pool_size - 1)'], {'shape': '(1,)'}), '(0, self.pool_size - 1, shape=(1,))\n', (47037, 47072), False, 'from mxnet import nd, gluon, autograd\n'), ((29661, 29682), 'mxnet.nd.where', 'nd.where', (['m', 'C', '(m - 1)'], {}), '(m, C, m - 1)\n', (29669, 29682), False, 'from mxnet import nd, gluon, autograd\n'), ((29725, 29753), 'mxnet.nd.where', 'nd.where', (['m', 'fake_out', '(m - 1)'], {}), '(m, fake_out, m - 1)\n', (29733, 29753), False, 'from mxnet import nd, gluon, autograd\n'), ((43254, 43287), 'mxnet.nd.transpose', 'nd.transpose', (['im_cc', '(1, 0, 2, 3)'], {}), '(im_cc, (1, 0, 2, 3))\n', (43266, 43287), False, 'from mxnet import nd, gluon, autograd\n')] |
import numpy as np
import os
import pandas as pd
import tensorflow as tf
import skimage.transform as sktransform
import random
import matplotlib.image as mpimg
import shutil
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau, LearningRateScheduler
from keras.losses import logcosh
from sklearn import model_selection
from datetime import datetime
from sklearn.utils import shuffle
from tqdm import tqdm
path = '/opt/carnd_p3/data'
cameras = ['left', 'center', 'right']
cameras_steering_correction = [0.25, 0.0, -0.25]
def preprocess(image, top_offset=.375, bottom_offset=.125):
"""
Applies preprocessing pipeline to an image: crops `top_offset` and `bottom_offset`
portions of image, resizes to 66x200 px and scales pixel values to [0, 1].
"""
top = int(top_offset * image.shape[0])
bottom = int(bottom_offset * image.shape[0])
#print(image[top:-bottom, :].shape)
image = sktransform.resize(image[top:-bottom, :], (66, 200, 3))
return image
def flip( x, y):
'''
Flips random half of the dataset images and values to distribute the data
equally on both the sides.
'''
flip_indices = random.sample(range(x.shape[0]), int(x.shape[0] / 2))
x[flip_indices] = x[flip_indices, :, ::-1, :]
y[flip_indices] = -y[flip_indices]
return (x,y)
def make_dataset( dataset, train = True, cases = None):
'''
Augments the data and make the dataset: Adds the augmented values and images of
left and right steering( Since the given data is extremely biased). Returns a
dataset.
'''
n = int(len(dataset)/4)
x = []
y = []
if train:
for i in tqdm(range(len(dataset)), desc= "Loading"):
val = dataset.steering.values[i]
j = np.random.randint(len(cameras))
img = mpimg.imread(os.path.join(path, dataset[cameras[j]].values[i].strip()))
if val!=0:
indices = np.random.randint(17)
for k in range(indices):
shift = np.random.uniform(-0.07,0.07)
#print(shift)
x.append(preprocess(img,top_offset= .375 + shift, bottom_offset = .125 + shift))
y.append((val + round(np.random.normal(shift,0.0003),5)+ cameras_steering_correction[j])*10)
x.append(preprocess(img))
y.append(( val + cameras_steering_correction[j])*10)
x = np.array(x)#/127
y = np.array(y)#/127
x, y = flip(x,y)
else:
if cases == None:
x = np.array([ preprocess(mpimg.imread(os.path.join(path, \
dataset[cameras[1]].values[i].strip())))\
for i in tqdm(range(len(dataset)), desc= "Loading")])
y = np.array([dataset.steering.values[i]*10 for i in range(len(dataset))])
elif isinstance(cases,int):
indices = random.sample(range(len(dataset)), cases)
x = np.array([ preprocess(mpimg.imread(os.path.join(path, \
dataset[cameras[1]].values[i].strip())))\
for i in indices])
y = np.array([dataset.steering.values[i] for i in indices])
else:
raise ValueError("Invalid type for cases!")
return shuffle(x, y)
if __name__ == '__main__':
log = open("log.txt" , "a")
try:
log.write(str(datetime.now().strftime('%c')))
test_size = 0.3
df = pd.read_csv(os.path.join(path, 'driving_log.csv'))
train, valid = model_selection.train_test_split(df, test_size=test_size)
train_x , train_y = make_dataset(train)
print(len(train_y))
#print(sum(train_y > 0))
plt.figure();
plt.hist(train_y/10,bins=101,alpha=0.5)
plt.title('Data Distribution after augmentation')
plt.ylabel('Frequency')
plt.xlabel('Steering Angle')
plt.savefig('Data_Distribution.png')
plt.close()
print("Training Data acquired!")
valid_x , valid_y = make_dataset(valid , False)
print("Validation Data acquired!")
# Model architecture from NVIDIA Research Paper
model = Sequential()
#model.add(Lambda(lambda x: x / 64 - 0.5, input_shape=(66, 200, 3)))
model.add(Conv2D(24, 5, strides = (2,2), input_shape=(66, 200, 3), activation='relu'))
model.add(Conv2D(36, 5, strides = (2,2), input_shape=(31, 98, 24), activation='relu'))
model.add(Conv2D(48, 5, strides = (2,2), input_shape=(14, 47, 36), activation='relu'))
model.add(Conv2D(64, 3, input_shape=(5, 22, 48), activation='relu'))
model.add(Conv2D(64, 3, input_shape=(3, 20, 64), activation='relu'))
model.add(Flatten())
model.add(Dense(1164, activation='relu'))
model.add(Dropout(0.5)) #Added dropout layer to avoid overfitting
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.5)) #Added dropout layer to avoid overfitting
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
model.build()
model.compile(optimizer=Adam(lr=2e-04), loss= "mse")
model.summary(print_fn = lambda x : log.write(x+'\n'))
print("Model built!")
datagen = ImageDataGenerator(
brightness_range = (0.7,0.9)
) #Further data augmentation
datagen.fit(train_x)
history = model.fit_generator(
datagen.flow(train_x,train_y, batch_size = 64) ,
epochs=64,
validation_data= (valid_x, valid_y)
)
log.write(str(history.history))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['training loss', 'validation loss'], loc='upper left')
plt.savefig('model_loss.png')
plt.close()
model.save("model.h5")
with open(os.path.join('./', 'model.json'), 'w') as file:
file.write(model.to_json())
log.write("\n=================================================================================\n\n")
print("Finished!")
except Exception as e:
log.write(str(e))
finally:
log.close() | [
"keras.layers.Conv2D",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"keras.preprocessing.image.ImageDataGenerator",
"numpy.array",
"keras.layers.Dense",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"keras.optimizers.Adam",
"numpy.random.normal",
"ma... | [((1137, 1192), 'skimage.transform.resize', 'sktransform.resize', (['image[top:-bottom, :]', '(66, 200, 3)'], {}), '(image[top:-bottom, :], (66, 200, 3))\n', (1155, 1192), True, 'import skimage.transform as sktransform\n'), ((3554, 3567), 'sklearn.utils.shuffle', 'shuffle', (['x', 'y'], {}), '(x, y)\n', (3561, 3567), False, 'from sklearn.utils import shuffle\n'), ((2626, 2637), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2634, 2637), True, 'import numpy as np\n'), ((2655, 2666), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2663, 2666), True, 'import numpy as np\n'), ((3832, 3889), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['df'], {'test_size': 'test_size'}), '(df, test_size=test_size)\n', (3864, 3889), False, 'from sklearn import model_selection\n'), ((4007, 4019), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4017, 4019), True, 'import matplotlib.pyplot as plt\n'), ((4029, 4072), 'matplotlib.pyplot.hist', 'plt.hist', (['(train_y / 10)'], {'bins': '(101)', 'alpha': '(0.5)'}), '(train_y / 10, bins=101, alpha=0.5)\n', (4037, 4072), True, 'import matplotlib.pyplot as plt\n'), ((4077, 4126), 'matplotlib.pyplot.title', 'plt.title', (['"""Data Distribution after augmentation"""'], {}), "('Data Distribution after augmentation')\n", (4086, 4126), True, 'import matplotlib.pyplot as plt\n'), ((4135, 4158), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (4145, 4158), True, 'import matplotlib.pyplot as plt\n'), ((4167, 4195), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Steering Angle"""'], {}), "('Steering Angle')\n", (4177, 4195), True, 'import matplotlib.pyplot as plt\n'), ((4204, 4240), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Data_Distribution.png"""'], {}), "('Data_Distribution.png')\n", (4215, 4240), True, 'import matplotlib.pyplot as plt\n'), ((4249, 4260), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4258, 4260), True, 'import matplotlib.pyplot as plt\n'), ((4483, 4495), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4493, 4495), False, 'from keras.models import Sequential\n'), ((5607, 5654), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'brightness_range': '(0.7, 0.9)'}), '(brightness_range=(0.7, 0.9))\n', (5625, 5654), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((5999, 6032), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (6007, 6032), True, 'import matplotlib.pyplot as plt\n'), ((6041, 6078), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (6049, 6078), True, 'import matplotlib.pyplot as plt\n'), ((6087, 6110), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (6096, 6110), True, 'import matplotlib.pyplot as plt\n'), ((6119, 6137), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (6129, 6137), True, 'import matplotlib.pyplot as plt\n'), ((6146, 6165), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6156, 6165), True, 'import matplotlib.pyplot as plt\n'), ((6174, 6240), 'matplotlib.pyplot.legend', 'plt.legend', (["['training loss', 'validation loss']"], {'loc': '"""upper left"""'}), "(['training loss', 'validation loss'], loc='upper left')\n", (6184, 6240), True, 'import matplotlib.pyplot as plt\n'), ((6249, 6278), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""model_loss.png"""'], {}), "('model_loss.png')\n", (6260, 6278), True, 'import matplotlib.pyplot as plt\n'), ((6287, 6298), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6296, 6298), True, 'import matplotlib.pyplot as plt\n'), ((3770, 3807), 'os.path.join', 'os.path.join', (['path', '"""driving_log.csv"""'], {}), "(path, 'driving_log.csv')\n", (3782, 3807), False, 'import os\n'), ((4591, 4665), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5)'], {'strides': '(2, 2)', 'input_shape': '(66, 200, 3)', 'activation': '"""relu"""'}), "(24, 5, strides=(2, 2), input_shape=(66, 200, 3), activation='relu')\n", (4597, 4665), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((4686, 4760), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5)'], {'strides': '(2, 2)', 'input_shape': '(31, 98, 24)', 'activation': '"""relu"""'}), "(36, 5, strides=(2, 2), input_shape=(31, 98, 24), activation='relu')\n", (4692, 4760), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((4781, 4855), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5)'], {'strides': '(2, 2)', 'input_shape': '(14, 47, 36)', 'activation': '"""relu"""'}), "(48, 5, strides=(2, 2), input_shape=(14, 47, 36), activation='relu')\n", (4787, 4855), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((4876, 4933), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'input_shape': '(5, 22, 48)', 'activation': '"""relu"""'}), "(64, 3, input_shape=(5, 22, 48), activation='relu')\n", (4882, 4933), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((4953, 5010), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'input_shape': '(3, 20, 64)', 'activation': '"""relu"""'}), "(64, 3, input_shape=(3, 20, 64), activation='relu')\n", (4959, 5010), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((5030, 5039), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5037, 5039), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((5059, 5089), 'keras.layers.Dense', 'Dense', (['(1164)'], {'activation': '"""relu"""'}), "(1164, activation='relu')\n", (5064, 5089), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((5109, 5121), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5116, 5121), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((5183, 5212), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (5188, 5212), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((5232, 5260), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (5237, 5260), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((5280, 5292), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5287, 5292), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((5354, 5382), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5359, 5382), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((5402, 5410), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (5407, 5410), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten, Lambda\n'), ((2142, 2163), 'numpy.random.randint', 'np.random.randint', (['(17)'], {}), '(17)\n', (2159, 2163), True, 'import numpy as np\n'), ((3417, 3472), 'numpy.array', 'np.array', (['[dataset.steering.values[i] for i in indices]'], {}), '([dataset.steering.values[i] for i in indices])\n', (3425, 3472), True, 'import numpy as np\n'), ((5466, 5481), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)'}), '(lr=0.0002)\n', (5470, 5481), False, 'from keras.optimizers import Adam\n'), ((6350, 6382), 'os.path.join', 'os.path.join', (['"""./"""', '"""model.json"""'], {}), "('./', 'model.json')\n", (6362, 6382), False, 'import os\n'), ((2233, 2263), 'numpy.random.uniform', 'np.random.uniform', (['(-0.07)', '(0.07)'], {}), '(-0.07, 0.07)\n', (2250, 2263), True, 'import numpy as np\n'), ((3687, 3701), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3699, 3701), False, 'from datetime import datetime\n'), ((2440, 2471), 'numpy.random.normal', 'np.random.normal', (['shift', '(0.0003)'], {}), '(shift, 0.0003)\n', (2456, 2471), True, 'import numpy as np\n')] |
from __future__ import print_function
import os
import sys
import json
import argparse
import re
import numpy as np
from konlpy.tag import Mecab, Kkma
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from ..dataset import Dictionary
from ...utils.registry import dictionary_dict
def tokenize_kvqa(s):
return s.replace('.', ' ').replace(' ', ' ')
def create_dictionary(dataroot, tk='mecab'):
dictionary = Dictionary()
if tk == 'mecab':
tokenizer = Mecab()
elif tk == 'kkma':
tokenizer = Kkma()
files = [
'KVQA_annotations_train.json',
'KVQA_annotations_val.json',
'KVQA_annotations_test.json'
]
for path in files:
question_path = os.path.join(dataroot, path)
qs = json.load(open(question_path, encoding='utf-8'))
for q in qs:
dictionary.tokenize(tokenize_kvqa(q['question']), True, tokenizer.morphs)
return dictionary
def create_embedding_init(idx2word, glove_file, format='stanford'):
word2emb = {}
with open(glove_file, 'r', encoding='utf-8') as f:
entries = f.readlines()
if format == 'stanford':
emb_dim = len(entries[0].split(' ')) - 1
elif format == 'fasttext':
entries = entries[1:]
emb_dim = len(entries[0].strip().split(' ')) - 1
elif format == 'word2vec':
entries = ''.join(entries).replace('\n', '').split(']')
_, word, vec = entries[0].split('\t')
vals = list(map(float, re.sub('\s+', ' ', vec.replace('[', '')).strip().split(' ')))
emb_dim = len(vals)
print('embedding dim is %d' % emb_dim)
weights = np.zeros((len(idx2word), emb_dim), dtype=np.float32)
for entry in entries:
if format in ('stanford', 'fasttext'):
vals = entry.strip().split(' ')
word = vals[0]
vals = list(map(float, vals[1:]))
else:
if entry == '':
continue
_, word, vec = entry.split('\t')
vals = list(map(float, re.sub('\s+', ' ', vec.replace('[', '')).strip().split(' ')))
word2emb[word] = np.array(vals)
notFound = 0
for idx, word in enumerate(idx2word):
if word not in word2emb:
notFound += 1
print(word)
continue
weights[idx] = word2emb[word]
print('not found %d/%d words' % (notFound, len(idx2word)))
return weights, word2emb
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--embedding', default='glove-rg', type=str)
args = parser.parse_args()
dataroot = 'data'
emb = dictionary_dict[args.embedding]
d = create_dictionary(dataroot, emb['tokenizer'])
dict_path = os.path.join(dataroot, emb['dict'])
d.dump_to_file(dict_path)
d = Dictionary.load_from_file(dict_path)
embedding_path = os.path.join(dataroot, emb['path'])
weights, word2emb = create_embedding_init(d.idx2word, embedding_path, emb['format'])
np.save(os.path.join(dataroot, emb['embedding']), weights)
| [
"argparse.ArgumentParser",
"konlpy.tag.Mecab",
"os.path.join",
"numpy.array",
"konlpy.tag.Kkma",
"os.path.abspath"
] | [((2475, 2500), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2498, 2500), False, 'import argparse\n'), ((2736, 2771), 'os.path.join', 'os.path.join', (['dataroot', "emb['dict']"], {}), "(dataroot, emb['dict'])\n", (2748, 2771), False, 'import os\n'), ((2869, 2904), 'os.path.join', 'os.path.join', (['dataroot', "emb['path']"], {}), "(dataroot, emb['path'])\n", (2881, 2904), False, 'import os\n'), ((498, 505), 'konlpy.tag.Mecab', 'Mecab', ([], {}), '()\n', (503, 505), False, 'from konlpy.tag import Mecab, Kkma\n'), ((736, 764), 'os.path.join', 'os.path.join', (['dataroot', 'path'], {}), '(dataroot, path)\n', (748, 764), False, 'import os\n'), ((2124, 2138), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (2132, 2138), True, 'import numpy as np\n'), ((3006, 3046), 'os.path.join', 'os.path.join', (['dataroot', "emb['embedding']"], {}), "(dataroot, emb['embedding'])\n", (3018, 3046), False, 'import os\n'), ((199, 224), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (214, 224), False, 'import os\n'), ((549, 555), 'konlpy.tag.Kkma', 'Kkma', ([], {}), '()\n', (553, 555), False, 'from konlpy.tag import Mecab, Kkma\n')] |
#!/usr/bin/env python2
import os
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='stamp state estimate that are marked using id')
parser.add_argument('state_est',
help='state estimate file that starts with id')
parser.add_argument('--matches', type=str,
help='id pairs: state estimate -> groundtruth',
default='groundtruth_matches.txt')
parser.add_argument('--groundtruth', type=str,
help='id, stamp, gt', default='groundtruth.txt')
args = parser.parse_args()
assert os.path.exists(args.state_est)
assert os.path.exists(args.matches)
assert os.path.exists(args.groundtruth)
outdir = os.path.dirname(os.path.abspath(args.state_est))
outfn = os.path.join(outdir, 'stamped_' + os.path.basename(args.state_est))
print("Going to stamp {0} with {1} and write to {2}".format(args.state_est,
args.matches,
outfn))
id_state_est = np.loadtxt(args.state_est)
print("Loaded {0} states.".format(id_state_est.shape[0]))
est_gt_id_map = []
with open(args.matches) as f:
content = f.readlines()
content = [x.strip().split(' ')
for x in content if not x.startswith('#')]
est_gt_id_map = [(int(l[0]), int(l[1])) for l in content]
est_gt_id_map = dict(est_gt_id_map)
print("Loaded {0} id pairs.".format(len(est_gt_id_map)))
id_stamp_map = []
with open(args.groundtruth) as f:
content = f.readlines()
content = [x.strip().split(' ')
for x in content if not x.startswith('#')]
id_stamp_map = [(int(l[0]), float(l[1])) for l in content]
id_stamp_map = dict(id_stamp_map)
print("Loaded {0} id pairs.".format(len(id_stamp_map)))
stamped_states = []
for id_s in id_state_est.tolist():
cur_id = int(id_s[0])
if cur_id in est_gt_id_map:
stamped_s = id_s[:]
stamped_s[0] = id_stamp_map[est_gt_id_map[cur_id]]
stamped_states.append(stamped_s)
np.savetxt(outfn, stamped_states, header='time x y z qx qy qz qw')
print("Found matches and written for {0} states.".format(
len(stamped_states)))
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.path.basename",
"numpy.savetxt",
"os.path.abspath",
"numpy.loadtxt"
] | [((111, 200), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""stamp state estimate that are marked using id"""'}), "(description=\n 'stamp state estimate that are marked using id')\n", (134, 200), False, 'import argparse\n'), ((659, 689), 'os.path.exists', 'os.path.exists', (['args.state_est'], {}), '(args.state_est)\n', (673, 689), False, 'import os\n'), ((701, 729), 'os.path.exists', 'os.path.exists', (['args.matches'], {}), '(args.matches)\n', (715, 729), False, 'import os\n'), ((741, 773), 'os.path.exists', 'os.path.exists', (['args.groundtruth'], {}), '(args.groundtruth)\n', (755, 773), False, 'import os\n'), ((1167, 1193), 'numpy.loadtxt', 'np.loadtxt', (['args.state_est'], {}), '(args.state_est)\n', (1177, 1193), True, 'import numpy as np\n'), ((2256, 2322), 'numpy.savetxt', 'np.savetxt', (['outfn', 'stamped_states'], {'header': '"""time x y z qx qy qz qw"""'}), "(outfn, stamped_states, header='time x y z qx qy qz qw')\n", (2266, 2322), True, 'import numpy as np\n'), ((803, 834), 'os.path.abspath', 'os.path.abspath', (['args.state_est'], {}), '(args.state_est)\n', (818, 834), False, 'import os\n'), ((882, 914), 'os.path.basename', 'os.path.basename', (['args.state_est'], {}), '(args.state_est)\n', (898, 914), False, 'import os\n')] |
# pylint: disable=import-error
from pathlib import Path
import pandas as pd
import numpy as np
import optuna
from optuna.samplers import TPESampler
from argparse import Namespace
import argparse
from PCM.optuna import (
Objective_ST,
Objective_ST_ext,
Objective_MT,
Objective_MT_withPRT,
)
from pytorch_lightning import seed_everything
def arg_parser():
parser = argparse.ArgumentParser(
description="Run Optune to determine optimal model HPs"
)
parser.add_argument("--model_dir", type=str,
help="Model directory", required=True)
parser.add_argument(
"--method",
type=str,
help="Which approach to use",
required=True,
)
parser.add_argument("--censored", action="store_true")
parser.add_argument("--batch_size", type=int,
help="Batch size", default=512)
parser.add_argument(
"--noise",
type=float,
help="Noise level applied to cmp and prt at training time",
default=0.05,
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = vars(arg_parser())
print(args)
# prepare data
model_dir = Path(args["model_dir"])
optuna_dir = Path(model_dir / "OptunaHPSearch")
par = {
"censored": args["censored"],
"batch_size": args["batch_size"],
"noise": args["noise"],
}
# Get the data
N_train = 1000
N_val = 100
cmp_tr = np.random.rand(N_train, 512) * 2 - 1.0
prt_tr = np.random.rand(N_train, 256)
pIC50_tr = np.random.rand(N_train) + 5
prefixes_tr = np.random.randint(low=-1, high=2, size=N_train)
cmp_val = np.random.rand(N_val, 512) * 2 - 1.0
prt_val = np.random.rand(N_val, 256)
pIC50_val = np.random.rand(N_val) + 5
prefixes_val = np.random.randint(low=-1, high=2, size=N_val)
if args["method"] == "PCM":
asy_tr = np.vstack([[0, 1], [1, 0]] * int(N_train / 2))
asy_val = np.vstack([[0, 1], [1, 0]] * int(N_val / 2))
data_params = Namespace(**par)
data_train = {
"prt": prt_tr,
"cmp": cmp_tr,
"asy": asy_tr,
"pIC50": pIC50_tr,
"prefixes": prefixes_tr,
}
data_val = {
"prt": prt_val,
"cmp": cmp_val,
"asy": asy_val,
"pIC50": pIC50_val,
"prefixes": prefixes_val,
}
objective = Objective_ST(
optuna_dir, data_params, data_train, data_val, data_test=None
)
elif args["method"] == "PCM_ext":
par["num_tasks"] = 5
data_params = Namespace(**par)
asy_tr = np.array([0, 1, 2, 3, 4] * int(N_train / 5)).astype(int)
asy_val = np.array([0, 1, 2, 3, 4] * int(N_val / 5)).astype(int)
data_train = {
"prt": prt_tr,
"cmp": cmp_tr,
"asy": asy_tr,
"pIC50": pIC50_tr,
"prefixes": prefixes_tr,
}
data_val = {
"prt": prt_val,
"cmp": cmp_val,
"asy": asy_val,
"pIC50": pIC50_val,
"prefixes": prefixes_val,
}
objective = Objective_ST_ext(
optuna_dir, data_params, data_train, data_val, data_test=None
)
elif args["method"] == "PCM_MT":
par["num_tasks"] = 5
taskind_tr = np.array([0, 1, 2, 3, 4] * int(N_train / 5)).astype(int)
taskind_val = np.array([0, 1, 2, 3, 4] * int(N_val / 5)).astype(int)
data_params = Namespace(**par)
data_train = {
"cmp": cmp_tr,
"pIC50": pIC50_tr,
"prefixes": prefixes_tr,
"taskind": taskind_tr,
}
data_val = {
"cmp": cmp_val,
"pIC50": pIC50_val,
"prefixes": prefixes_val,
"taskind": taskind_val,
}
objective = Objective_MT(
optuna_dir, data_params, data_train, data_val, data_test=None
)
elif args["method"] == "PCM_MT_withPRT":
par["num_tasks"] = 5
taskind_tr = np.array([0, 1, 2, 3, 4] * int(N_train / 5)).astype(int)
taskind_val = np.array([0, 1, 2, 3, 4] * int(N_val / 5)).astype(int)
data_params = Namespace(**par)
data_train = {
"prt": prt_tr,
"cmp": cmp_tr,
"pIC50": pIC50_tr,
"prefixes": prefixes_tr,
"taskind": taskind_tr,
}
data_val = {
"prt": prt_val,
"cmp": cmp_val,
"pIC50": pIC50_val,
"prefixes": prefixes_val,
"taskind": taskind_val,
}
objective = Objective_MT_withPRT(
optuna_dir, data_params, data_train, data_val, data_test=None
)
seed_everything(42, workers=True)
study_name = args["method"]
sampler = TPESampler(seed=10)
study = optuna.create_study(
study_name=study_name,
storage="sqlite:///%s/%s.db" % (str(optuna_dir), study_name),
pruner=optuna.pruners.MedianPruner(n_warmup_steps=10),
direction="maximize",
load_if_exists=True,
sampler=sampler,
)
study.optimize(objective, n_trials=2)
print("Number of finished trials: {}".format(len(study.trials)))
print("Best trial:")
trial = study.best_trial
print(" Value: {}".format(trial.value))
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
| [
"numpy.random.rand",
"argparse.ArgumentParser",
"pathlib.Path",
"pytorch_lightning.seed_everything",
"PCM.optuna.Objective_MT_withPRT",
"PCM.optuna.Objective_ST",
"numpy.random.randint",
"argparse.Namespace",
"PCM.optuna.Objective_MT",
"optuna.pruners.MedianPruner",
"optuna.samplers.TPESampler",... | [((386, 471), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run Optune to determine optimal model HPs"""'}), "(description='Run Optune to determine optimal model HPs'\n )\n", (409, 471), False, 'import argparse\n'), ((1208, 1231), 'pathlib.Path', 'Path', (["args['model_dir']"], {}), "(args['model_dir'])\n", (1212, 1231), False, 'from pathlib import Path\n'), ((1249, 1283), 'pathlib.Path', 'Path', (["(model_dir / 'OptunaHPSearch')"], {}), "(model_dir / 'OptunaHPSearch')\n", (1253, 1283), False, 'from pathlib import Path\n'), ((1535, 1563), 'numpy.random.rand', 'np.random.rand', (['N_train', '(256)'], {}), '(N_train, 256)\n', (1549, 1563), True, 'import numpy as np\n'), ((1625, 1672), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(-1)', 'high': '(2)', 'size': 'N_train'}), '(low=-1, high=2, size=N_train)\n', (1642, 1672), True, 'import numpy as np\n'), ((1739, 1765), 'numpy.random.rand', 'np.random.rand', (['N_val', '(256)'], {}), '(N_val, 256)\n', (1753, 1765), True, 'import numpy as np\n'), ((1827, 1872), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(-1)', 'high': '(2)', 'size': 'N_val'}), '(low=-1, high=2, size=N_val)\n', (1844, 1872), True, 'import numpy as np\n'), ((4798, 4831), 'pytorch_lightning.seed_everything', 'seed_everything', (['(42)'], {'workers': '(True)'}), '(42, workers=True)\n', (4813, 4831), False, 'from pytorch_lightning import seed_everything\n'), ((4878, 4897), 'optuna.samplers.TPESampler', 'TPESampler', ([], {'seed': '(10)'}), '(seed=10)\n', (4888, 4897), False, 'from optuna.samplers import TPESampler\n'), ((1579, 1602), 'numpy.random.rand', 'np.random.rand', (['N_train'], {}), '(N_train)\n', (1593, 1602), True, 'import numpy as np\n'), ((1782, 1803), 'numpy.random.rand', 'np.random.rand', (['N_val'], {}), '(N_val)\n', (1796, 1803), True, 'import numpy as np\n'), ((2057, 2073), 'argparse.Namespace', 'Namespace', ([], {}), '(**par)\n', (2066, 2073), False, 'from argparse import Namespace\n'), ((2461, 2536), 'PCM.optuna.Objective_ST', 'Objective_ST', (['optuna_dir', 'data_params', 'data_train', 'data_val'], {'data_test': 'None'}), '(optuna_dir, data_params, data_train, data_val, data_test=None)\n', (2473, 2536), False, 'from PCM.optuna import Objective_ST, Objective_ST_ext, Objective_MT, Objective_MT_withPRT\n'), ((1483, 1511), 'numpy.random.rand', 'np.random.rand', (['N_train', '(512)'], {}), '(N_train, 512)\n', (1497, 1511), True, 'import numpy as np\n'), ((1688, 1714), 'numpy.random.rand', 'np.random.rand', (['N_val', '(512)'], {}), '(N_val, 512)\n', (1702, 1714), True, 'import numpy as np\n'), ((2650, 2666), 'argparse.Namespace', 'Namespace', ([], {}), '(**par)\n', (2659, 2666), False, 'from argparse import Namespace\n'), ((3202, 3281), 'PCM.optuna.Objective_ST_ext', 'Objective_ST_ext', (['optuna_dir', 'data_params', 'data_train', 'data_val'], {'data_test': 'None'}), '(optuna_dir, data_params, data_train, data_val, data_test=None)\n', (3218, 3281), False, 'from PCM.optuna import Objective_ST, Objective_ST_ext, Objective_MT, Objective_MT_withPRT\n'), ((5047, 5093), 'optuna.pruners.MedianPruner', 'optuna.pruners.MedianPruner', ([], {'n_warmup_steps': '(10)'}), '(n_warmup_steps=10)\n', (5074, 5093), False, 'import optuna\n'), ((3550, 3566), 'argparse.Namespace', 'Namespace', ([], {}), '(**par)\n', (3559, 3566), False, 'from argparse import Namespace\n'), ((3915, 3990), 'PCM.optuna.Objective_MT', 'Objective_MT', (['optuna_dir', 'data_params', 'data_train', 'data_val'], {'data_test': 'None'}), '(optuna_dir, data_params, data_train, data_val, data_test=None)\n', (3927, 3990), False, 'from PCM.optuna import Objective_ST, Objective_ST_ext, Objective_MT, Objective_MT_withPRT\n'), ((4267, 4283), 'argparse.Namespace', 'Namespace', ([], {}), '(**par)\n', (4276, 4283), False, 'from argparse import Namespace\n'), ((4687, 4774), 'PCM.optuna.Objective_MT_withPRT', 'Objective_MT_withPRT', (['optuna_dir', 'data_params', 'data_train', 'data_val'], {'data_test': 'None'}), '(optuna_dir, data_params, data_train, data_val,\n data_test=None)\n', (4707, 4774), False, 'from PCM.optuna import Objective_ST, Objective_ST_ext, Objective_MT, Objective_MT_withPRT\n')] |
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
import gym
import numpy as np
from dm_control import mjcf
from bisk.helpers import add_box, add_fwd_corridor
from bisk.single_robot import BiskSingleRobotWithBallEnv
log = logging.getLogger(__name__)
class BiskGoalWallEnv(BiskSingleRobotWithBallEnv):
'''
Goal wall shooting. In the dense-reward setting we allow for falling over
since the reward is the negative distance to the closest goal.
'''
def __init__(self,
robot: str,
features: str,
init_distance: float,
touch_ball_reward: float):
self.init_distance = init_distance
super().__init__(robot, features)
self.touch_ball_reward = touch_ball_reward
if self.touch_ball_reward > 0:
self.observation_space = gym.spaces.Dict([
('ball', self.observation_space.spaces['ball']),
('touched_ball',
gym.spaces.Box(low=0, high=1, shape=(1, ), dtype=np.float32)),
('observation', self.observation_space.spaces['observation']),
])
self.ball_geom = self.p.named.model.body_geomadr['ball']
self.wall_geom = self.p.named.model.geom_type.axes.row.names.index(
'wall')
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
# Add wall
W = 3
WH = 1
WD = 4 + self.init_distance
root.asset.add('material',
name='mat_wall',
reflectance=0.5,
shininess=1,
emission=0.5,
specular=1)
root.worldbody.add('geom',
type='plane',
name='wall',
material='mat_wall',
xyaxes=[0, -1, 0, 0, 0, 1],
size=[W, WH, 1],
pos=[WD, 0, WH],
rgba=[0, 0.5, 0.1, 1])
# Add a visual marker
root.asset.add('texture',
name='tex_dnc',
builtin='checker',
width=50,
height=50,
rgb1=[0, 0, 0],
rgb2=[1, 0.8, 0],
type='2d')
root.asset.add('material',
name='mat_dnc',
reflectance=0.5,
shininess=1,
specular=1,
texrepeat=[1, 10],
texuniform=False,
texture='tex_dnc')
root.worldbody.add('site',
type='box',
name='line',
size=[
0.1,
W,
0.01,
],
pos=[1.5 + self.init_distance, 0, 0.02],
material='mat_dnc')
#rgba=[1, 0, 0, 0.3])
# Add goals on wall
if self.is_2d:
GP = WH
root.worldbody.add('site',
type='ellipsoid',
name='goal',
material='mat_wall',
size=[
0.01,
0.4,
0.4,
],
pos=[WD, 0, GP],
rgba=[1, 1, 1, 1])
root.worldbody.add('site',
type='ellipsoid',
name='goalb',
material='mat_wall',
size=[
0.005,
0.45,
0.45,
],
pos=[WD, 0, GP],
rgba=[1, 0, 0, 1])
else:
root.worldbody.add('site',
type='ellipsoid',
name='goal1',
material='mat_wall',
size=[
0.01,
0.4,
0.4,
],
pos=[WD, -1, WH - 0.35],
rgba=[1, 1, 1, 1])
root.worldbody.add('site',
type='ellipsoid',
name='goal1b',
material='mat_wall',
size=[
0.005,
0.45,
0.45,
],
pos=[WD, -1, WH - 0.35],
rgba=[1, 0, 0, 1])
root.worldbody.add('site',
type='ellipsoid',
name='goal2',
material='mat_wall',
size=[
0.01,
0.4,
0.4,
],
pos=[WD, 1, WH + 0.35],
rgba=[1, 1, 1, 1])
root.worldbody.add('site',
type='ellipsoid',
name='goal2b',
material='mat_wall',
size=[
0.005,
0.45,
0.45,
],
pos=[WD, 1, WH + 0.35],
rgba=[1, 0, 0, 1])
# This is the camera we'll use by default
euler = [80, -5, 0]
if root.compiler.angle == 'radian':
euler = [np.deg2rad(e) for e in euler]
root.worldbody.add('camera',
name='sideline',
mode='fixed',
pos=[WD / 3, -9, 2],
euler=euler)
super().init_sim(root, frameskip)
def get_observation(self):
obs = super().get_observation()
if self.touch_ball_reward > 0:
obs['touched_ball'] = np.array([float(self.ball_touched)])
return obs
def reset_state(self) -> None:
super().reset_state()
# Place ball
ball_size = self.p.named.model.geom_size['ball'][0]
if self.is_2d:
self.p.named.data.qpos['ball-x'] += self.init_distance
self.p.named.data.qpos['ball-z'] += ball_size + 0.1
else:
self.p.named.data.qpos['ball'][0] += self.init_distance
self.p.named.data.qpos['ball'][2] += ball_size + 0.1
self.ball_yz = None
self.ball_touched = False
def on_step_single_frame(self):
contact = self.p.data.contact
ball_wall = (np.in1d(contact.geom1, self.wall_geom)
& np.in1d(contact.geom2, self.ball_geom))
touching = contact.dist <= 0
if np.any(ball_wall & touching):
if self.is_2d:
self.ball_yz = [0, self.p.named.data.qpos['ball-z'][0]]
else:
self.ball_yz = self.p.named.data.qpos['ball'][1:3].copy()
if not self.ball_touched:
for c in contact:
names = self.p.named.model.name_geomadr.axes.row.names
if names[c.geom1].startswith('ball') and names[
c.geom2].startswith('robot') and c.dist < 0:
self.ball_touched = True
def step(self, action):
self.ball_yz = None
btbefore = self.ball_touched
obs, reward, done, info = super().step(action)
goal_hit = None
goal_dists = []
goal_sizes = []
if self.ball_yz is not None:
if self.is_2d:
goals = ('goal', )
else:
goals = ('goal1', 'goal2')
for g in goals:
d = np.linalg.norm(self.ball_yz -
self.p.named.data.site_xpos[g][1:3])
goal_dists.append(d)
goal_sizes.append(self.p.named.model.site_size[g][2])
if d <= self.p.named.model.site_size[g][2]:
goal_hit = g
break
score = 0
if goal_hit == 'goal' or goal_hit == 'goal1':
score = 1
elif goal_hit == 'goal2':
score = 2
info['score'] = score
reward = score
if self.touch_ball_reward > 0 and self.ball_touched != btbefore:
reward += self.touch_ball_reward
# Zero reward if we're beyond the line
lpos = self.p.named.data.site_xpos['line', 'x']
if self.robot_pos[0] > lpos:
reward = 0
# Once we've hit the wall we're done
if self.ball_yz is not None:
done = True
if info.get('fell_over', False):
reward = -1
done = True
return obs, reward, done, info
| [
"logging.getLogger",
"numpy.in1d",
"numpy.any",
"gym.spaces.Box",
"numpy.deg2rad",
"numpy.linalg.norm"
] | [((395, 422), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (412, 422), False, 'import logging\n'), ((7637, 7665), 'numpy.any', 'np.any', (['(ball_wall & touching)'], {}), '(ball_wall & touching)\n', (7643, 7665), True, 'import numpy as np\n'), ((7487, 7525), 'numpy.in1d', 'np.in1d', (['contact.geom1', 'self.wall_geom'], {}), '(contact.geom1, self.wall_geom)\n', (7494, 7525), True, 'import numpy as np\n'), ((7549, 7587), 'numpy.in1d', 'np.in1d', (['contact.geom2', 'self.ball_geom'], {}), '(contact.geom2, self.ball_geom)\n', (7556, 7587), True, 'import numpy as np\n'), ((6395, 6408), 'numpy.deg2rad', 'np.deg2rad', (['e'], {}), '(e)\n', (6405, 6408), True, 'import numpy as np\n'), ((8602, 8668), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.ball_yz - self.p.named.data.site_xpos[g][1:3])'], {}), '(self.ball_yz - self.p.named.data.site_xpos[g][1:3])\n', (8616, 8668), True, 'import numpy as np\n'), ((1150, 1209), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(1,)', 'dtype': 'np.float32'}), '(low=0, high=1, shape=(1,), dtype=np.float32)\n', (1164, 1209), False, 'import gym\n')] |
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, unicode_literals,
print_function)
import json
import logging
import os
import re
import subprocess
import warnings
import dlib
import numpy
import pathlib2
import six
import skimage
import skimage.color
import skimage.exposure
import skimage.feature
import skimage.io
import skimage.transform
import zbar
from PIL import Image
from pycolorname.pantone.pantonepaint import PantonePaint
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError
from file_metadata.generic_file import GenericFile
from file_metadata.utilities import (DictNoNone, app_dir, bz2_decompress,
download, to_cstr, memoized, DATA_PATH)
# A Decompression Bomb is a small compressed image file which when decompressed
# uses a uge amount of RAM. For example, a monochrome PNG file with 100kx100k
# pixels. This tells PIL to make this warning into an error.
warnings.simplefilter('error', Image.DecompressionBombWarning)
class ImageFile(GenericFile):
mimetypes = ()
def config(self, key, new_defaults=()):
defaults = {
"max_decompressed_size": int(1024 ** 3 / 4 / 3) # In bytes
}
defaults.update(dict(new_defaults)) # Update the defaults from child
return super(ImageFile, self).config(key, new_defaults=defaults)
@classmethod
def create(cls, *args, **kwargs):
cls_file = cls(*args, **kwargs)
mime = cls_file.mime()
_type, subtype = mime.split('/', 1)
if mime == 'image/jpeg':
from file_metadata.image.jpeg_file import JPEGFile
return JPEGFile.create(*args, **kwargs)
elif _type in ('image', 'application') and subtype == 'x-xcf':
from file_metadata.image.xcf_file import XCFFile
return XCFFile.create(*args, **kwargs)
elif mime == 'image/tiff':
from file_metadata.image.tiff_file import TIFFFile
return TIFFFile.create(*args, **kwargs)
elif cls_file.is_type('svg'):
from file_metadata.image.svg_file import SVGFile
return SVGFile.create(*args, **kwargs)
return cls(*args, **kwargs)
def is_type(self, key):
if key == 'alpha':
return self.fetch('pillow').mode in ('LA', 'RGBA')
return super(ImageFile, self).is_type(key)
@memoized
def fetch(self, key=''):
if key == 'filename_raster':
# A raster filename holds the file in a raster graphic format
return self.fetch('filename')
elif key == 'filename_zxing':
return pathlib2.Path(self.fetch('filename_raster')).as_uri()
elif key == 'ndarray':
Image.MAX_IMAGE_PIXELS = self.config('max_decompressed_size')
try:
image_array = skimage.io.imread(self.fetch('filename_raster'))
if image_array.shape == (2,):
# Assume this is related to
# https://github.com/scikit-image/scikit-image/issues/2154
return image_array[0]
return image_array
except Image.DecompressionBombWarning:
logging.warn('The file "{0}" contains a lot of pixels and '
'can take a lot of memory when decompressed. '
'To allow larger images, modify the '
'"max_decompressed_size" config.'
.format(self.fetch('filename')))
# Use empty array as the file cannot be read.
return numpy.ndarray(0)
elif key == 'ndarray_grey':
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return skimage.img_as_ubyte(
skimage.color.rgb2grey(self.fetch('ndarray')))
elif key == 'ndarray_hsv':
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return skimage.img_as_ubyte(
skimage.color.rgb2hsv(self.fetch('ndarray_noalpha')))
elif key == 'ndarray_noalpha':
if self.is_type('alpha'):
return self.alpha_blend(self.fetch('ndarray'))
return self.fetch('ndarray')
elif key == 'pillow':
pillow_img = Image.open(self.fetch('filename_raster'))
self.closables.append(pillow_img)
return pillow_img
return super(ImageFile, self).fetch(key)
@staticmethod
def alpha_blend(img, background=255):
"""
Take an image, assume the last channel is a alpha channel and remove it
by using the appropriate background.
:param img: The image to alpha blend into given background.
:param background: The background color to use when alpha blending.
A scalar is expected, which is used for all
the channels.
"""
alpha = img[..., -1] / 255.0
channels = img[..., :-1]
new_img = numpy.zeros_like(channels)
for ichan in range(channels.shape[-1]):
new_img[..., ichan] = numpy.clip(
(1 - alpha) * background + alpha * channels[..., ichan],
a_min=0, a_max=255)
return new_img
def analyze_geolocation(self, use_nominatim=True):
"""
Find the location where the photo was taken initially. This is
information which is got using the latitude/longitude in EXIF data.
:param use_nominatim: Whether to use reverse geocoding from nominatim
or not.
:return: dict with the keys:
- Composite:Country - The country the photo was taken.
- Composite:City - The city the photo was taken.
"""
exif = self.exiftool()
data = {}
def dms2dec(dms_str, sign=None):
"""
Return decimal representation of DMS string: DDD deg MM' SS.SS"
"""
dms_regex = r'(?P<deg>-?\d+) deg (?P<min>\d+)\' (?P<sec>\d+\.\d+)"'
dms = re.match(dms_regex, dms_str.strip().lower()).groups()
_deg, _min, _sec = map(float, dms)
dec = _deg + _min / 60 + _sec / 3600
if '-' in dms_str: # Use negative sign if given
return dec
elif re.search('[sw]', dms_str.lower()): # Use S/W if given
return -dec
elif ((isinstance(sign, (int, float)) and sign > 0) or
(isinstance(sign, six.string_types) and
sign.strip().lower().startswith('n'))):
return dec
elif ((isinstance(sign, (int, float)) and sign < 0) or
(isinstance(sign, six.string_types) and
sign.strip().lower().startswith('s'))):
return -dec
return dec
lat, lon = None, None
for grp in ('EXIF', 'XMP'):
lat_ref = exif.get(grp + ':GPSLatitudeRef', '')
lon_ref = exif.get(grp + ':GPSLongitudeRef', '')
lat_dms = exif.get(grp + ':GPSLatitude', '')
lon_dms = exif.get(grp + ':GPSLongitude', '')
if not (lat_dms and lon_dms):
continue
lat, lon = dms2dec(lat_dms, lat_ref), dms2dec(lon_dms, lon_ref)
if lat is None or lon is None:
return {}
data = DictNoNone({'Composite:GPSLatitude': lat,
'Composite:GPSLongitude': lon})
if use_nominatim:
# Zoom levels: country = 0, megacity = 10, district = 10,
# city = 13, village = 15, street = 16, house = 18
url = ('http://nominatim.openstreetmap.org/reverse?format=json'
'&accept-language=en&lat={lat}&lon={lon}&zoom={zoom}'
.format(lat=lat, lon=lon, zoom=13))
try:
response = urlopen(url)
location = json.loads(response.read().decode('utf-8'))
except URLError as err:
logging.warn('An issue occured while querying nominatim '
'with: ' + url)
logging.exception(err)
return data
if isinstance(location, list) and len(location) == 0:
return data # No location found
addr = location.get('address', {})
data['Composite:GPSCountry'] = addr.get('country')
data['Composite:GPSState'] = addr.get('state')
data['Composite:GPSCity'] = addr.get('city')
return data
def analyze_color_calibration_target(self):
"""
Find whether there is a color calibration strip on top of the image.
"""
grey_array = self.fetch('ndarray_grey')
image_array = self.fetch('ndarray')
if grey_array is None:
return {}
# For the images we're testing, the IT8 bar takes about 20% of the
# image and also in the 20% we need the mid area
bary = int(0.2 * grey_array.shape[0])
def bar_intensity(x):
sampley = max(int(0.1 * x.shape[0]), 2)
return numpy.mean(
x[(x.shape[0] - sampley) // 2:(x.shape[0] + sampley) // 2,
:, ...],
axis=0)
topbar = bar_intensity(grey_array[:bary, :, ...])
botbar = bar_intensity(grey_array[-bary:, :, ...])
def _merge_near(arr):
out = []
last_elem = arr[0]
out.append(last_elem)
for elem in arr[1:]:
if elem != last_elem:
out.append(elem)
last_elem = elem
return numpy.asarray(out)
# Bottom bars seem to have smaller intensity because of the background
# Hence, we set a smaller threshold for peaks in bottom bars.
bot_spikes = _merge_near((numpy.diff(botbar)) > -2.5).sum()
top_spikes = _merge_near((numpy.diff(topbar)) < 3).sum()
top_grey_mse, bot_grey_mse = 0, 0
if image_array.ndim == 3:
for chan in range(image_array.shape[2]):
top_grey_mse += (
(image_array[bary:, :, chan] -
grey_array[bary:]) ** 2).mean()
bot_grey_mse += (
(image_array[-bary, :, chan] -
grey_array[-bary]) ** 2).mean()
top_grey_mse /= 3.0
bot_grey_mse /= 3.0
data = {}
if 15 < top_spikes < 25:
data['Color:IT8TopBar'] = top_spikes
data['Color:IT8TopBarGreyMSE'] = top_grey_mse
if 15 < bot_spikes < 25:
data['Color:IT8BottomBar'] = bot_spikes
data['Color:IT8BottomBarGreyMSE'] = bot_grey_mse
return data
def analyze_stereo_card(self):
"""
Find whether the given image is a stereo card or not.
"""
image_array = self.fetch('ndarray_grey')
if image_array is None:
return {}
def _full_histogram(img):
return numpy.histogram(img, bins=range(256))[0]
h, w = image_array.shape[:2]
# Remove corners as that's probably the edges and gradient etc.
roi = image_array[int(0.1 * h):int(0.9 * h),
int(0.1 * w):int(0.9 * w), ...]
_, width = roi.shape[:2]
left = roi[:, :width // 2]
right = roi[:, width // 2 + (width % 2):]
mean_square_err = ((left - right) ** 2).mean()
histogram_mse = (
((_full_histogram(left) - _full_histogram(right)) ** 2).mean() /
left.size)
return {'Misc:StereoCardMSE': mean_square_err,
'Misc:StereoCardHistogramMSE': histogram_mse}
def analyze_color_info(self,
grey_shade_threshold=0.05,
freq_colors_threshold=0.1,
edge_ratio_gaussian_sigma=1):
"""
Find the average RGB color of the image and compare with the existing
Pantone color system to identify the color name.
:param grey_shade_threshold:
The threshold to select a grey shade in NumberOfGreyShades.
Percent of the most frequent shade (Range from 0 to 1).
:param freq_colors_threshold:
The threshold to select a peak in PercentFrequentColors.
Percent of the most frequent shade (Range from 0 to 1).
:param edge_ratio_gaussian_sigma:
The sigma to use in gaussian blurring in Canny edge detection
for EdgeRatio.
:return: dict with the keys:
- Color:ClosestLabeledColorRGB - The closest RGB value of the
color found in the Pantone color palette.
- Color:ClosestLabeledColorRGB - The name of the closest color
found in the Pantone color palette.
- Color:AverageRGB - The average RGB value of the image.
- Color:NumberOfGreyShades - The number of grey shades that are
present more than a threshold percent of the most popular
greyscale in a greyscale image with intensities from 0 - 255.
- Color:PercentFrequentColors - The ratio of the number of colors
which occur frequently to the number of colors in the
palette.
- Color:EdgeRatio - The percentage of pixels in the picture where
edges are found.
- Color:MeanSquareErrorFromGrey - The mean square error fo each
pixel with respect to the greyscale equivalent image.
- Color:UsesAlpha - True if the alpha channel is present and being
used.
"""
image_array = self.fetch('ndarray_noalpha')
if image_array.ndim == 4: # Animated images
mean_color = image_array.mean(axis=(0, 1, 2))
elif image_array.ndim == 3 and image_array.shape[2] == 3: # Static
mean_color = image_array.mean(axis=(0, 1))
elif image_array.ndim == 2: # Greyscale images
avg = image_array.mean()
mean_color = (avg, avg, avg)
else:
msg = ('Unsupported image type in "analyze_color_info()". '
'Expected animated, greyscale, rgb, or rgba images. '
'Found an image with {0} dimensions and shape {1}. '
.format(image_array.ndim, image_array.shape))
logging.warn(msg)
return {}
# Find the mean color and the closest color in the known palette
closest_label, closest_color = PantonePaint().find_closest(mean_color)
grey_array = self.fetch('ndarray_grey')
def _full_histogram(img):
return numpy.histogram(img, bins=range(256))[0]
if image_array.ndim == 3 or image_array.ndim == 2:
# Find the edge ratio by applying the canny filter and finding
# bright spots. Not applicable to animated images.
scale = max(1.0, numpy.average(image_array.shape[:2]) / 500.0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
img_shape = map(lambda x: int(x / scale), grey_array.shape[:2])
grey_img = skimage.transform.resize(grey_array,
output_shape=img_shape,
preserve_range=True)
edge_img = skimage.feature.canny(grey_img,
sigma=edge_ratio_gaussian_sigma)
edge_ratio = (edge_img > 0).mean()
# Find the number of grey shades in the imag eusing the histogram.
grey_hist = _full_histogram(grey_array)
grey_hist_max = grey_shade_threshold * grey_hist.max()
num_grey_shades = (grey_hist > grey_hist_max).sum()
else:
edge_ratio = None
num_grey_shades = None
# Find the peaks_percent using a histogram
if image_array.ndim == 4: # Animated images
hist = {
"red": _full_histogram(image_array[:, :, :, 0]),
"green": _full_histogram(image_array[:, :, :, 1]),
"blue": _full_histogram(image_array[:, :, :, 2])
}
elif image_array.ndim == 3 and image_array.shape[2] == 3: # Static
hist = {
"red": _full_histogram(image_array[:, :, 0]),
"green": _full_histogram(image_array[:, :, 1]),
"blue": _full_histogram(image_array[:, :, 2])
}
elif image_array.ndim == 2: # Greyscale images
hist = {"grey": _full_histogram(image_array)}
# Calculate peaks by finding the number of colors which occur
# more than a given threshold. The threshold is chosen to be 1% of
# the color that occurs most number of times.
hist_concat = numpy.concatenate(tuple(hist.values()))
peaks_hist_max = freq_colors_threshold * hist_concat.max()
peaks_percent = (hist_concat > peaks_hist_max).mean()
blackwhite_mean_square_err = None
if image_array.ndim == 2: # Greyscale images
blackwhite_mean_square_err = 0
elif image_array.ndim == 3:
blackwhite_mean_square_err = 0
for chan in range(image_array.shape[2]):
blackwhite_mean_square_err += (
(image_array[:, :, chan] - grey_array) ** 2).mean()
blackwhite_mean_square_err /= image_array.shape[2]
uses_alpha = None
nd_array = self.fetch('ndarray')
if (self.is_type('alpha') and nd_array.ndim == 3 and
nd_array.shape[2] == 4):
uses_alpha = (nd_array[:, :, 3] < 255).any()
return DictNoNone({
'Color:ClosestLabeledColorRGB': closest_color,
'Color:ClosestLabeledColor': closest_label,
'Color:AverageRGB': tuple(round(i, 3) for i in mean_color),
'Color:NumberOfGreyShades': num_grey_shades,
'Color:PercentFrequentColors': peaks_percent,
'Color:EdgeRatio': edge_ratio,
'Color:MeanSquareErrorFromGrey': blackwhite_mean_square_err,
'Color:UsesAlpha': uses_alpha})
@staticmethod
def _haarcascade(image, filename, directory=None, **kwargs):
"""
Use OpenCV's haarcascade classifiers to detect certain features.
:param image: Image to use when detecting with the haarcascade.
:param filename: The file to create the CascadeClassifier with.
:param directory: The directory of the haarcascade file.
:param kwagrs: Keyword args to pass to cascade's detectMultiScale().
:return: List of rectangles of the detected objects. A rect
is defined by an array with 4 values i the order:
left, top, width, height.
"""
warn_msg = ('HAAR Cascade analysis requires the optional dependencies '
'OpenCV and opencv-data to be installed.')
try:
import cv2
except ImportError:
logging.warn(warn_msg)
return []
haar_paths = [
os.path.abspath(os.path.join(
os.path.realpath(cv2.__file__),
*([os.pardir] * 4 + ['share', 'OpenCV', 'haarcascades']))),
os.path.abspath(os.path.join(
os.path.realpath(cv2.__file__),
*([os.pardir] * 4 + ['share', 'opencv', 'haarcascades'])))]
for _dir in [directory] + haar_paths:
if _dir is not None and os.path.exists(_dir):
directory = _dir
break
if directory is None:
logging.warn(warn_msg)
return []
cascade = cv2.CascadeClassifier(os.path.join(directory, filename),)
features = cascade.detectMultiScale(image, **kwargs)
return features
def analyze_face_haarcascades(self):
"""
Use opencv's haar cascade filters to identify faces, right eye, left
eye, upper body, etc..
"""
try:
import cv2 # noqa (unused import)
from cv2 import cv
except ImportError:
logging.warn('HAAR Cascade analysis requires the optional '
'dependency OpenCV 2.x to be installed.')
return {}
image_array = self.fetch('ndarray_grey')
if image_array.ndim == 3:
logging.warn('Faces cannot be detected in animated images '
'using haarcascades yet.')
return {}
# The "scale" given here is relevant for the detection rate.
scale = max(1.0, numpy.average(image_array.shape) / 500.0)
# Equalize the histogram and make the size smaller
with warnings.catch_warnings():
warnings.simplefilter("ignore")
img_shape = map(lambda x: int(x / scale), image_array.shape)
img = skimage.img_as_ubyte(
skimage.exposure.equalize_hist(
skimage.transform.resize(image_array,
output_shape=img_shape,
preserve_range=True)))
def haar(im, key, single=False, **kwargs):
cascades = {
'frontal_face': 'haarcascade_frontalface_alt.xml',
'profile_face': 'haarcascade_profileface.xml',
'nested': 'haarcascade_eye_tree_eyeglasses.xml',
'mouth': 'haarcascade_mcs_mouth.xml',
'nose': 'haarcascade_mcs_nose.xml',
'right_eye': 'haarcascade_righteye_2splits.xml',
'left_eye': 'haarcascade_lefteye_2splits.xml',
'left_ear': 'haarcascade_mcs_leftear.xml',
'right_ear': 'haarcascade_mcs_rightear.xml',
'upper_body': 'haarcascade_upperbody.xml',
'lower_body': 'haarcascade_lowerbody.xml'}
# Set some default kwargs
kwargs['scaleFactor'] = kwargs.get('scaleFactor', 1.1)
kwargs['minNeighbors'] = kwargs.get('minNeighbors', 2)
kwargs['minSize'] = kwargs.get('minSize', (30, 30))
flags = cv.CV_HAAR_SCALE_IMAGE
if single:
flags = (flags | cv.CV_HAAR_FIND_BIGGEST_OBJECT |
cv.CV_HAAR_DO_ROUGH_SEARCH)
kwargs['flags'] = kwargs.get('flags', flags)
return list(self._haarcascade(im, cascades[key], **kwargs))
def drop_overlapping_regions(regions):
drop = set()
# Sort regions by area (leftmost is smallest and dropped first)
regions = sorted(regions, key=lambda x: x[-1] * x[-2])
# overlap: Neither range is completely greater than the other
overlap = (lambda x_min, x_width, y_min, y_width:
x_min <= y_min + y_width and y_min <= x_min + x_width)
for i1, reg1 in enumerate(regions):
for i2, reg2 in enumerate(regions[:i1]):
if (i2 not in drop and
overlap(reg1[0], reg1[2], reg2[0], reg2[2]) and
overlap(reg1[1], reg1[3], reg2[1], reg2[3])):
drop.add(i2)
for i, reg in enumerate(regions):
if i not in drop:
yield reg
frontal = haar(img, 'frontal_face')
profile = haar(img, 'profile_face')
faces = list(drop_overlapping_regions(frontal + profile))
if len(faces) == 0:
return {}
data = []
for face in faces:
scaled_face = list(map(lambda x: int(x * scale), face))
fdata = {'position': {
'left': scaled_face[0], 'top': scaled_face[1],
'width': scaled_face[2], 'height': scaled_face[3]}}
roi = list(map(int, [
max(0, face[0] - (face[2] / 8)),
max(0, face[1] - (face[3] / 8)),
min(img.shape[0], face[2] + (2 * face[2] / 8)),
min(img.shape[1], face[3] + (2 * face[3] / 8))]))
face_img = img[roi[1]:roi[1] + roi[3] - 1,
roi[0]:roi[0] + roi[2] - 1]
def feat_mid(rect, offx, offy):
return (int(scale * (roi[0] + rect[0] + offx + rect[2] / 2)),
int(scale * (roi[1] + rect[1] + offy + rect[3] // 2)))
eye_img = face_img[:roi[3] // 2, :]
nested = list(drop_overlapping_regions(haar(eye_img, 'nested')))
if len(nested) == 2:
nested = sorted(nested, key=lambda x: x[0])
fdata['eyes'] = (feat_mid(nested[0], 0, 0),
feat_mid(nested[1], 0, 0))
fdata['glasses'] = True
else:
eyes_found = []
for eye in ['left_eye', 'right_eye']:
eye_feats = haar(eye_img, eye, single=True)
if len(eye_feats) == 1:
eyes_found.append(feat_mid(eye_feats[0], 0, 0))
if len(eyes_found) > 0:
fdata['eyes'] = tuple(eyes_found)
ear_offy = roi[3] // 8
ear_img = face_img[ear_offy:roi[3] * 7 // 8, :]
ears_found = []
for ear in ['left_ear', 'right_ear']:
ear_feats = haar(ear_img, ear, single=True)
if len(ear_feats) == 1:
ears_found.append(feat_mid(ear_feats[0], 0, ear_offy))
if len(ears_found) > 0:
fdata['ears'] = tuple(ears_found)
nose_offx, nose_offy = roi[2] // 4, roi[3] // 4
nose_img = face_img[nose_offy:roi[3] * 3 // 4,
nose_offx:roi[2] * 3 // 4]
nose_feats = haar(nose_img, 'nose', single=True)
if len(nose_feats) == 1:
fdata['nose'] = feat_mid(nose_feats[0], nose_offx, nose_offy)
mouth_offy = roi[3] // 2
mouth_img = face_img[mouth_offy:, :]
mouth_feats = haar(mouth_img, 'mouth', single=True)
if len(mouth_feats) == 1:
fdata['mouth'] = feat_mid(mouth_feats[0], 0, mouth_offy)
data.append(fdata)
return {'OpenCV:Faces': data}
def analyze_facial_landmarks(self,
with_landmarks=True,
detector_upsample_num_times=0):
"""
Use ``dlib`` to find the facial landmarks and also detect pose.
Note: It works only for frontal faces, not for profile faces, etc.
:param with_landmarks:
Whether to detect the facial landmarks or not. This also computes
the location of the other facial features like the nose, mouth,
and eyes.
:param detector_upsample_num_times:
The number of times to upscale the image by when detecting faces.
:return: dict with the keys:
- dlib:Faces - A dictionary with information about the face:
- position - Dict with corner information having the keys
left, right, top, bottom.
- score - A score given on the probability of the given
feture being a face.
If the kwarg `with_landmarks` is provided, it also gives the
following information:
- nose - Location of the center of the nose.
- left eye - Location of the center of the left eye.
- right eye - Location of the center of the right eye.
- mouth - Location of the center of the mouth.
"""
image_array = self.fetch('ndarray_noalpha')
if (image_array.ndim == 4 or
(image_array.ndim == 3 and image_array.shape[2] != 3)):
logging.warn('Facial landmarks of animated images cannot be '
'detected yet.')
return {}
predictor_dat = 'shape_predictor_68_face_landmarks.dat'
predictor_arch = predictor_dat + '.bz2'
dat_path = app_dir('user_data_dir', predictor_dat)
arch_path = app_dir('user_data_dir', predictor_arch)
if with_landmarks and not os.path.exists(dat_path):
logging.warn('Downloading the landmark data file for facial '
'landmark detection. Hence, the '
'first run may take longer than normal.')
url = 'http://sourceforge.net/projects/dclib/files/dlib/v18.10/{0}'
download(url.format(predictor_arch), arch_path)
bz2_decompress(arch_path, dat_path)
detector = dlib.get_frontal_face_detector()
# TODO: Get orientation data from ``orient_id`` and use it.
faces, scores, orient_id = detector.run(
image_array,
upsample_num_times=detector_upsample_num_times)
if len(faces) == 0:
return {}
if with_landmarks:
predictor = dlib.shape_predictor(to_cstr(dat_path))
data = []
for face, score in zip(faces, scores):
fdata = {
'position': {'left': face.left(),
'top': face.top(),
'width': face.right() - face.left() + 1,
'height': face.bottom() - face.top() + 1},
'score': score}
# dlib's shape detector uses the ibug dataset to detect shape.
# More info at: http://ibug.doc.ic.ac.uk/resources/300-W/
if with_landmarks:
shape = predictor(image_array, face)
def tup(point):
return point.x, point.y
def tup2(pt1, pt2):
return int((pt1.x + pt2.x) / 2), int((pt1.y + pt2.y) / 2)
# Point 34 is the tip of the nose
fdata['nose'] = tup(shape.part(34))
# Point 40 and 37 are the two corners of the left eye
# Point 46 and 43 are the two corners of the right eye
fdata['eyes'] = (tup2(shape.part(40), shape.part(37)),
tup2(shape.part(46), shape.part(43)))
# Point 49 and 55 are the two outer corners of the mouth
fdata['mouth'] = tup2(shape.part(49), shape.part(55))
data.append(fdata)
return {'dlib:Faces': data}
def analyze_barcode_zxing(self):
"""
Use ``zxing`` to find barcodes, qr codes, data matrices, etc.
from the image.
:return: dict with the keys:
- zxing:Barcodes - An array containing information about barcodes.
Each barcode is encoded to a dictionary with the keys:
- format - The format of the barcode. Example: QR_CODE,
CODABAR, DATA_MATRIX, etc.
- data - The text data that is encdoded in the barcode.
- bounding box - A dictionary with left, width, top, height.
- points - The detection points of the barcode (4 points for
QR codes and Data matrices and 2 points for barcodes).
"""
image_array = self.fetch('ndarray')
if all(map(lambda x: x < 4, image_array.shape)):
# If the file is less than 4 pixels, it won't contain a barcode.
# Small files cause zxing to crash so, we just return empty.
return {}
if (image_array.ndim == 4 or
(image_array.ndim == 3 and
image_array.shape[2] not in (3, 4))):
logging.warn('Barcode analysis with zxing of animated images '
'or multi page images is not supported yet.')
return {}
filename = self.fetch('filename_zxing')
if filename is None:
return {}
try:
output = subprocess.check_output([
'java', '-cp', os.path.join(DATA_PATH, '*'),
'com.google.zxing.client.j2se.CommandLineRunner', '--multi',
filename],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if 'java.io.IOException: Could not load file' in err.output:
logging.error(
"`java.io` is unable to read this file. Possibly the file "
"has invalid exifdata or is corrupt. This is required for "
"zxing's barcode analysis.")
else:
logging.error(err.output)
return {}
if 'No barcode found' in output:
return {}
barcodes = []
for section in output.split("\nfile:"):
lines = section.strip().splitlines()
_format = re.search(r'format:\s([^,]+)', lines[0]).group(1)
raw_result = lines[2]
parsed_result = lines[4]
num_pts = int(re.search(r'Found (\d+) result points.', lines[5])
.group(1))
points = []
float_re = r'(\d*[.])?\d+'
for i in range(num_pts):
pt = re.search(r'\(\s*{0}\s*,\s*{0}\s*\)'.format(float_re),
lines[6 + i])
point = float(pt.group(1)), float(pt.group(2))
points.append(point)
bbox = {}
if num_pts == 2: # left, right
l, r = [(int(i), int(j)) for (i, j) in points]
bbox = {"left": l[0], "top": l[1],
"width": r[0] - l[0] + 1, "height": r[1] - l[1] + 1}
elif num_pts == 4: # bottomLeft, topLeft, topRight, bottomRight
lb, lt, rt, rb = [(int(i), int(j)) for (i, j) in points]
bbox = {"left": min(lb[0], lt[0]),
"top": min(lt[1], rt[1]),
"width": max(rb[0] - lb[0], rt[0] - lt[0]),
"height": max(rb[1] - rt[1], lb[1] - lt[1])}
barcodes.append({'format': _format, 'points': points,
'raw_data': raw_result, 'data': parsed_result,
'bounding box': bbox})
return {'zxing:Barcodes': barcodes}
def analyze_barcode_zbar(self):
"""
Use ``zbar`` to find barcodes and qr codes from the image.
:return: dict with the keys:
- zbar:Barcodes - An array containing information about barcodes.
Each barcode is encoded to a dictionary with the keys:
- format - The format of the barcode. Example: QRCODE,
I25, etc.
- data - The text data that is encdoded in the barcode.
- bounding box - A dictionary with left, width, top, height.
- confidence - The quality of the barcode. The higher it is
the more accurate the detection is.
"""
image_array = self.fetch('ndarray_grey')
if image_array.ndim == 3:
logging.warn('Barcodes cannot be detected in animated images '
'using zbar.')
return {}
height, width = image_array.shape
zbar_img = zbar.Image(width, height, 'Y800', image_array.tobytes())
scanner = zbar.ImageScanner()
scanner.parse_config('enable')
if scanner.scan(zbar_img) == 0:
return {}
barcodes = []
for barcode in zbar_img:
p = numpy.array(barcode.location)
bbox = {"left": min(p[:, 0]), "top": min(p[:, 1]),
"width": max(p[:, 0]) - min(p[:, 0]),
"height": max(p[:, 1]) - min(p[:, 1])}
barcodes.append({'data': barcode.data,
'bounding box': bbox,
'confidence': barcode.quality,
'format': str(barcode.type)})
return {'zbar:Barcodes': barcodes}
| [
"file_metadata.utilities.DictNoNone",
"numpy.clip",
"file_metadata.utilities.bz2_decompress",
"file_metadata.image.svg_file.SVGFile.create",
"logging.exception",
"numpy.array",
"logging.error",
"re.search",
"numpy.mean",
"os.path.exists",
"file_metadata.image.tiff_file.TIFFFile.create",
"loggi... | [((999, 1061), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'Image.DecompressionBombWarning'], {}), "('error', Image.DecompressionBombWarning)\n", (1020, 1061), False, 'import warnings\n'), ((5128, 5154), 'numpy.zeros_like', 'numpy.zeros_like', (['channels'], {}), '(channels)\n', (5144, 5154), False, 'import numpy\n'), ((7508, 7581), 'file_metadata.utilities.DictNoNone', 'DictNoNone', (["{'Composite:GPSLatitude': lat, 'Composite:GPSLongitude': lon}"], {}), "({'Composite:GPSLatitude': lat, 'Composite:GPSLongitude': lon})\n", (7518, 7581), False, 'from file_metadata.utilities import DictNoNone, app_dir, bz2_decompress, download, to_cstr, memoized, DATA_PATH\n'), ((28359, 28398), 'file_metadata.utilities.app_dir', 'app_dir', (['"""user_data_dir"""', 'predictor_dat'], {}), "('user_data_dir', predictor_dat)\n", (28366, 28398), False, 'from file_metadata.utilities import DictNoNone, app_dir, bz2_decompress, download, to_cstr, memoized, DATA_PATH\n'), ((28419, 28459), 'file_metadata.utilities.app_dir', 'app_dir', (['"""user_data_dir"""', 'predictor_arch'], {}), "('user_data_dir', predictor_arch)\n", (28426, 28459), False, 'from file_metadata.utilities import DictNoNone, app_dir, bz2_decompress, download, to_cstr, memoized, DATA_PATH\n'), ((28929, 28961), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (28959, 28961), False, 'import dlib\n'), ((35549, 35568), 'zbar.ImageScanner', 'zbar.ImageScanner', ([], {}), '()\n', (35566, 35568), False, 'import zbar\n'), ((1699, 1731), 'file_metadata.image.jpeg_file.JPEGFile.create', 'JPEGFile.create', (['*args'], {}), '(*args, **kwargs)\n', (1714, 1731), False, 'from file_metadata.image.jpeg_file import JPEGFile\n'), ((5237, 5328), 'numpy.clip', 'numpy.clip', (['((1 - alpha) * background + alpha * channels[..., ichan])'], {'a_min': '(0)', 'a_max': '(255)'}), '((1 - alpha) * background + alpha * channels[..., ichan], a_min=0,\n a_max=255)\n', (5247, 5328), False, 'import numpy\n'), ((9263, 9353), 'numpy.mean', 'numpy.mean', (['x[(x.shape[0] - sampley) // 2:(x.shape[0] + sampley) // 2, :, ...]'], {'axis': '(0)'}), '(x[(x.shape[0] - sampley) // 2:(x.shape[0] + sampley) // 2, :,\n ...], axis=0)\n', (9273, 9353), False, 'import numpy\n'), ((9796, 9814), 'numpy.asarray', 'numpy.asarray', (['out'], {}), '(out)\n', (9809, 9814), False, 'import numpy\n'), ((15592, 15656), 'skimage.feature.canny', 'skimage.feature.canny', (['grey_img'], {'sigma': 'edge_ratio_gaussian_sigma'}), '(grey_img, sigma=edge_ratio_gaussian_sigma)\n', (15613, 15656), False, 'import skimage\n'), ((19907, 19929), 'logging.warn', 'logging.warn', (['warn_msg'], {}), '(warn_msg)\n', (19919, 19929), False, 'import logging\n'), ((19992, 20025), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (20004, 20025), False, 'import os\n'), ((20663, 20751), 'logging.warn', 'logging.warn', (['"""Faces cannot be detected in animated images using haarcascades yet."""'], {}), "(\n 'Faces cannot be detected in animated images using haarcascades yet.')\n", (20675, 20751), False, 'import logging\n'), ((21007, 21032), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (21030, 21032), False, 'import warnings\n'), ((21046, 21077), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (21067, 21077), False, 'import warnings\n'), ((28101, 28176), 'logging.warn', 'logging.warn', (['"""Facial landmarks of animated images cannot be detected yet."""'], {}), "('Facial landmarks of animated images cannot be detected yet.')\n", (28113, 28176), False, 'import logging\n'), ((28533, 28674), 'logging.warn', 'logging.warn', (['"""Downloading the landmark data file for facial landmark detection. Hence, the first run may take longer than normal."""'], {}), "(\n 'Downloading the landmark data file for facial landmark detection. Hence, the first run may take longer than normal.'\n )\n", (28545, 28674), False, 'import logging\n'), ((28873, 28908), 'file_metadata.utilities.bz2_decompress', 'bz2_decompress', (['arch_path', 'dat_path'], {}), '(arch_path, dat_path)\n', (28887, 28908), False, 'from file_metadata.utilities import DictNoNone, app_dir, bz2_decompress, download, to_cstr, memoized, DATA_PATH\n'), ((31873, 31988), 'logging.warn', 'logging.warn', (['"""Barcode analysis with zxing of animated images or multi page images is not supported yet."""'], {}), "(\n 'Barcode analysis with zxing of animated images or multi page images is not supported yet.'\n )\n", (31885, 31988), False, 'import logging\n'), ((35288, 35362), 'logging.warn', 'logging.warn', (['"""Barcodes cannot be detected in animated images using zbar."""'], {}), "('Barcodes cannot be detected in animated images using zbar.')\n", (35300, 35362), False, 'import logging\n'), ((35742, 35771), 'numpy.array', 'numpy.array', (['barcode.location'], {}), '(barcode.location)\n', (35753, 35771), False, 'import numpy\n'), ((1883, 1914), 'file_metadata.image.xcf_file.XCFFile.create', 'XCFFile.create', (['*args'], {}), '(*args, **kwargs)\n', (1897, 1914), False, 'from file_metadata.image.xcf_file import XCFFile\n'), ((8017, 8029), 'six.moves.urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (8024, 8029), False, 'from six.moves.urllib.request import urlopen\n'), ((14727, 14741), 'pycolorname.pantone.pantonepaint.PantonePaint', 'PantonePaint', ([], {}), '()\n', (14739, 14741), False, 'from pycolorname.pantone.pantonepaint import PantonePaint\n'), ((15201, 15226), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (15224, 15226), False, 'import warnings\n'), ((15244, 15275), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (15265, 15275), False, 'import warnings\n'), ((15383, 15469), 'skimage.transform.resize', 'skimage.transform.resize', (['grey_array'], {'output_shape': 'img_shape', 'preserve_range': '(True)'}), '(grey_array, output_shape=img_shape, preserve_range\n =True)\n', (15407, 15469), False, 'import skimage\n'), ((19305, 19327), 'logging.warn', 'logging.warn', (['warn_msg'], {}), '(warn_msg)\n', (19317, 19327), False, 'import logging\n'), ((19788, 19808), 'os.path.exists', 'os.path.exists', (['_dir'], {}), '(_dir)\n', (19802, 19808), False, 'import os\n'), ((20418, 20526), 'logging.warn', 'logging.warn', (['"""HAAR Cascade analysis requires the optional dependency OpenCV 2.x to be installed."""'], {}), "(\n 'HAAR Cascade analysis requires the optional dependency OpenCV 2.x to be installed.'\n )\n", (20430, 20526), False, 'import logging\n'), ((20892, 20924), 'numpy.average', 'numpy.average', (['image_array.shape'], {}), '(image_array.shape)\n', (20905, 20924), False, 'import numpy\n'), ((28495, 28519), 'os.path.exists', 'os.path.exists', (['dat_path'], {}), '(dat_path)\n', (28509, 28519), False, 'import os\n'), ((29289, 29306), 'file_metadata.utilities.to_cstr', 'to_cstr', (['dat_path'], {}), '(dat_path)\n', (29296, 29306), False, 'from file_metadata.utilities import DictNoNone, app_dir, bz2_decompress, download, to_cstr, memoized, DATA_PATH\n'), ((2032, 2064), 'file_metadata.image.tiff_file.TIFFFile.create', 'TIFFFile.create', (['*args'], {}), '(*args, **kwargs)\n', (2047, 2064), False, 'from file_metadata.image.tiff_file import TIFFFile\n'), ((8153, 8223), 'logging.warn', 'logging.warn', (["('An issue occured while querying nominatim with: ' + url)"], {}), "('An issue occured while querying nominatim with: ' + url)\n", (8165, 8223), False, 'import logging\n'), ((8272, 8294), 'logging.exception', 'logging.exception', (['err'], {}), '(err)\n', (8289, 8294), False, 'import logging\n'), ((14574, 14591), 'logging.warn', 'logging.warn', (['msg'], {}), '(msg)\n', (14586, 14591), False, 'import logging\n'), ((15138, 15174), 'numpy.average', 'numpy.average', (['image_array.shape[:2]'], {}), '(image_array.shape[:2])\n', (15151, 15174), False, 'import numpy\n'), ((19432, 19462), 'os.path.realpath', 'os.path.realpath', (['cv2.__file__'], {}), '(cv2.__file__)\n', (19448, 19462), False, 'import os\n'), ((19598, 19628), 'os.path.realpath', 'os.path.realpath', (['cv2.__file__'], {}), '(cv2.__file__)\n', (19614, 19628), False, 'import os\n'), ((21259, 21345), 'skimage.transform.resize', 'skimage.transform.resize', (['image_array'], {'output_shape': 'img_shape', 'preserve_range': '(True)'}), '(image_array, output_shape=img_shape,\n preserve_range=True)\n', (21283, 21345), False, 'import skimage\n'), ((32221, 32249), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""*"""'], {}), "(DATA_PATH, '*')\n", (32233, 32249), False, 'import os\n'), ((32539, 32705), 'logging.error', 'logging.error', (['"""`java.io` is unable to read this file. Possibly the file has invalid exifdata or is corrupt. This is required for zxing\'s barcode analysis."""'], {}), '(\n "`java.io` is unable to read this file. Possibly the file has invalid exifdata or is corrupt. This is required for zxing\'s barcode analysis."\n )\n', (32552, 32705), False, 'import logging\n'), ((32797, 32822), 'logging.error', 'logging.error', (['err.output'], {}), '(err.output)\n', (32810, 32822), False, 'import logging\n'), ((33052, 33092), 're.search', 're.search', (['"""format:\\\\s([^,]+)"""', 'lines[0]'], {}), "('format:\\\\s([^,]+)', lines[0])\n", (33061, 33092), False, 'import re\n'), ((2183, 2214), 'file_metadata.image.svg_file.SVGFile.create', 'SVGFile.create', (['*args'], {}), '(*args, **kwargs)\n', (2197, 2214), False, 'from file_metadata.image.svg_file import SVGFile\n'), ((9999, 10017), 'numpy.diff', 'numpy.diff', (['botbar'], {}), '(botbar)\n', (10009, 10017), False, 'import numpy\n'), ((10067, 10085), 'numpy.diff', 'numpy.diff', (['topbar'], {}), '(topbar)\n', (10077, 10085), False, 'import numpy\n'), ((33199, 33249), 're.search', 're.search', (['"""Found (\\\\d+) result points."""', 'lines[5]'], {}), "('Found (\\\\d+) result points.', lines[5])\n", (33208, 33249), False, 'import re\n'), ((3660, 3676), 'numpy.ndarray', 'numpy.ndarray', (['(0)'], {}), '(0)\n', (3673, 3676), False, 'import numpy\n'), ((3730, 3755), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3753, 3755), False, 'import warnings\n'), ((3773, 3804), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (3794, 3804), False, 'import warnings\n'), ((3969, 3994), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3992, 3994), False, 'import warnings\n'), ((4012, 4043), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (4033, 4043), False, 'import warnings\n')] |
# Code Generating Discrete Legendre Orthogonal Polynomials and the
# Legendre Delay Network Basis
#
# <NAME>, December 2020
#
# The code in this file is licensed under the Creative Commons Zero license.
# To the extent possible under law, <NAME> has waived all copyright and
# related or neighboring rights to this code. You should have received a
# complete copy of the license along with this code. If not, please visit
#
# https://creativecommons.org/publicdomain/zero/1.0/legalcode-plain.
#
# This work is published from: Canada.
import numpy as np
import scipy.linalg
## Utility functions
def fading_factorial(K, m):
# Fading factorial as defined in the Neuman and Schonbach paper
res = 1
for i in range(m):
res *= K - i
return res
def nCr(n, r):
# Binomial coefficient (n choose r; nCr is what my trusty pocket
# calculator calls it).
return fading_factorial(n, r) // \
fading_factorial(r, r)
## Polynomial generation
def _shift_polys(P, domain=(0, 1), window=(-1, 1)):
"""
Shifts and scales a polynomial basis from the source onto the target window
"""
from fractions import Fraction
s0, s1 = Fraction(domain[0]), Fraction(domain[1])
t0, t1 = Fraction(window[0]), Fraction(window[1])
a = (t1 - t0) / (s1 - s0)
b = t0 - a * s0
print(a, b)
Pout = np.zeros(P.shape)
for i in range(P.shape[0]): # Polynomial basis index
for k in range(P.shape[1]): # Coefficient index
beta = Fraction(0)
for n in range(k, P.shape[1]):
beta += nCr(n, k) * Fraction(P[i, n]) * (a**k) * (b**(n - k))
Pout[i, k] = beta
return Pout
def _mk_poly_basis(q, fun):
P = np.zeros((q, q))
for i in range(q):
p = fun([0] * i + [1])
P[i, :len(p)] = p
return P
def mk_power_poly_basis(q, domain=(0, 1), window=(-1, 1), offs=1.0, scale=0.5):
P = np.eye(q)
P[:, 0] = offs # Constant offset
return _shift_polys(scale * P, domain=domain, window=window)
def mk_leg_poly_basis(q, domain=(0, 1), window=(-1, 1)):
return _shift_polys(_mk_poly_basis(q, np.polynomial.legendre.leg2poly),
domain=domain,
window=window)
def mk_lag_poly_basis(q, domain=(0, 1), window=(0, 1)):
return _shift_polys(_mk_poly_basis(q, np.polynomial.laguerre.lag2poly),
domain=domain,
window=window)
def mk_cheb_poly_basis(q, domain=(0, 1), window=(-1, 1)):
return _shift_polys(_mk_poly_basis(q, np.polynomial.chebyshev.cheb2poly),
domain=domain,
window=window)
## Generic LTI Code
def discretize_lti(dt, A, B):
"""
Discretizes an LTI system described by matrices A, B under a
zero-order-hold (ZOH) assumption. The new matrices Ad, Bd can be used in
the following manner
x[t + 1] = Ad x[t] + Bd u[t] ,
i.e., the returned matrices implicitly contain the
integration step.
See https://en.wikipedia.org/wiki/Discretization for
more information.
"""
# See https://en.wikipedia.org/wiki/Discretization
Ad = scipy.linalg.expm(A * dt)
Bd = np.linalg.solve(A, (Ad - np.eye(A.shape[0])) @ B)
return Ad, Bd
def reconstruct_lti(H,
T=1.0,
dampen=False,
return_discrete=False,
rcond=1e-2,
dampen_fac=1.0):
"""
Given a discrete q x N basis transformation matrix H constructs a linear
time-invariant dynamical system A, B that approximately has this basis
as an impulse response over [0, T].
This function can be thought of as the inverse of mk_lti_basis.
H: Basis transformation matrix for which the LTI system should be
reconstructed.
dampen: Determines the dampening mode. If not False (default), dampen may
be one of "lstsq" or "erasure". Furthermore, "dampen" may be a set
containing both strings. Both methods will be used in this case.
Lastly, dampen can be set to True, which is equivalent to "delay".
In "lstsq" mode, an appropriately weighted dampening term is added to
the system of equations. This term is meant to encourage the resulting
system to converge to zero for t > T, but this is not guaranteed.
In "erasure" mode the system is dampened by explicitly erasing
information beyond the current time-window.
return_discrete: If True, returns the discreteized LTI system; does not
attempt to convert the system into a continuous system.
rcond: Regularisation term passed to the least-squares solver.
dampen_fac: Factor used to weight the dampening term in the least-squares
solution. Only relevant if `dampen` is set to "lstsq".
"""
# Fetch the number of state dimensions and the number of samples
q, N = H.shape
# Canonicalise "dampen"
if dampen and isinstance(dampen, str):
dampen = {dampen}
elif isinstance(dampen, bool):
dampen = {"erasure"} if dampen else set()
if (not isinstance(dampen,
set)) or (len(dampen - {"lstsq", "erasure"}) > 0):
raise RuntimeError("Invalid value for \"dampen\"")
# Construct the least squares problem. If dampen is True, prepend a row of
# zeros to the system. This dampening factor is weightened in the linear
# system of equations to maintain a ratio of 1 : (q - 1) between the
# dampening term and the remaining weights.
if "lstsq" in dampen:
X = H.T
Y = np.concatenate((np.zeros((q, 1)), H[:, :-1]), axis=1).T
w0 = np.sqrt(dampen_fac * (N - 1) / (q - 1))
W = np.array([w0] + [1] * (N - 1))
else:
X = H[:, 1:].T
Y = H[:, :-1].T
W = np.ones(N - 1)
# Estimate the discrete system At, Bt
At = np.linalg.lstsq(W[:, None] * X, W[:, None] * Y, rcond=rcond)[0].T
Bt = H[:, -1]
# In "delay mode" subtract the outer product of the longest delay
# decoder/encoder pair from the state in each timestep
if "erasure" in dampen:
enc, dec = enc, dec = H[:, 0], np.linalg.pinv(H)[0]
At = At - np.outer(enc, dec) @ At
Bt = Bt - np.outer(enc, dec) @ Bt
# If so desired, return the discrete system
if return_discrete:
return At, Bt
# Undo discretization (this is the inverse of discretize_lti)
A = np.real(scipy.linalg.logm(At)) * N / T
B = np.linalg.solve(At - np.eye(q), A @ Bt) / T
return A, B
def mk_lti_basis(A, B, N=None, N_smpl=None, normalize=True, from_discrete_lti=False):
"""
Constructs a basis transformation matrix for the given LTI system. This
function is used internally by mk_ldn_basis.
A: q x q LTI system feedback matrix
B: q LTI system input vectur
N: Window width. Defaults to q
N_smpl: Actual number of samples to return. Defaults to N
"""
# Make sure A is a square matrix
assert (A.ndim == 2) and (A.shape[0] == A.shape[1])
q = A.shape[0]
# Make sure B has the right shape
B = B.flatten()
assert (B.shape[0] == q)
# Fetch the window width
N = q if N is None else int(N)
assert N > 0
# Fetch the number of samples
N_smpl = N if N_smpl is None else int(N_smpl)
assert N_smpl > 0
# Generate the impulse response matrix
At, Bt = (A, B) if from_discrete_lti else discretize_lti(1.0 / N, A, B)
res = np.zeros((q, N_smpl))
Aexp = np.eye(q)
for i in range(N_smpl):
res[:, N_smpl - i - 1] = Aexp @ Bt
Aexp = At @ Aexp
return (res / np.linalg.norm(res, axis=1)[:, None]) if normalize else res
def mk_poly_basis_lti(P, rcond=None, dampen=False, reencoder_kw={}):
"""
Converts a set of polynomials into the corresponding LTI system.
P is a matrix of polynomial coefficients i.e.,
p_n(x) = sum_{i = 0}^{q - 1} P_{n + 1, i + 1} x^i
The polynomials should form a function basis over [0, 1].
"""
assert P.shape[0] == P.shape[1]
assert np.linalg.matrix_rank(P, tol=1e-6) == P.shape[0]
q = P.shape[0]
# Compute the differential of each polynomial
PD = np.concatenate((P[:, 1:], np.zeros(
(q, 1))), axis=1) * np.arange(1, q + 1)[None, :]
# Compute a matrix constructing the differential from the other polynomials
A = np.linalg.lstsq(P.T, PD.T, rcond=rcond)[0].T
# The x-intercept is equal to B
B = P[:, 0]
# If dampening is requested, subtract the delay re-encoder from A
return (A - mk_poly_basis_reencoder(P, **reencoder_kw) if dampen else A), B
def mk_poly_sys_lti(A, B, dampen=True, reencoder_kw={}):
return (A -
mk_poly_sys_reencoder(A, B, **reencoder_kw) if dampen else A), B
def mk_poly_basis_reencoder_hilbert(P):
# Flip the polynomials
N, q = P.shape
P_flip = _shift_polys(P, window=(1, 0))
# Evaluate the target polynomials at `theta - theta' = 0` and at `theta`
y = np.array([np.polyval(P[i, ::-1], 0) for i in range(N)])
e = np.array([np.polyval(P[i, ::-1], 1) for i in range(N)])
# Compute the inverse Hilbert matrix
QInv = scipy.linalg.invhilbert(P.shape[1])
return np.outer(e, np.linalg.solve(P.T, QInv @ np.linalg.inv(P_flip) @ y))
def mk_poly_basis_inverse_hilbert(P, rcond=1e-6):
"""
Computes a polynomial basis that is orthogonal to P over the interval
[0, 1].
"""
N, q = P.shape
QInv = scipy.linalg.invhilbert(q)
return np.linalg.lstsq(P.T, QInv, rcond=rcond)[0]
def mk_poly_basis_reencoder_hilbert_2(P, rcond=1e-6):
(N, _), PI = P.shape, mk_poly_basis_inverse_hilbert(P, rcond=rcond)
e, d = np.zeros((2, N))
for i in range(N):
e[i], d[i] = np.polyval(P[i], 1), np.polyval(PI[i], 1)
return np.outer(e, d)
def mk_poly_basis_reencoder(P, rcond=1e-6, dt=1e-4):
N = int(
(1.0 + dt + 1e-12) / dt) # #samples; 1e-12 prevents rounding errors
xs = np.linspace(0, 1, N)
H = np.array([np.polyval(P[i, ::-1], xs) for i in range(P.shape[0])])
HInv = np.linalg.pinv(H.T, rcond=rcond) / dt
return np.outer(H[:, -1], HInv[:, -1])
def mk_poly_sys_reencoder(A, B, rcond=1e-6, dt=1e-4):
N = int(
(1.0 + dt + 1e-12) / dt) # #samples; 1e-12 prevents rounding errors
xs = np.linspace(0, 1, N)
H = np.array([scipy.linalg.expm(A * xs[j]) @ B for j in range(N)]).T
HInv = np.linalg.pinv(H.T, rcond=rcond) / dt
return np.outer(H[:, -1], HInv[:, -1])
## Legendre Delay Network (LDN)
def mk_ldn_lti(q, dtype=np.float, rescale=False):
"""
Generates the A, B matrices of the linear time-invariant (LTI) system
underlying the LDN.
The returned A is a q x q matrix, the returned B is a vector of length q.
Divide the returned matrices by the desired window length theta.
See <NAME>'s PhD thesis for more information:
https://hdl.handle.net/10012/14625 (Section 6.1.3, p. 134)
If `rescale` is True, a less numerically stable version of the LDN is
returned that exactly traces out the shifted Legendre polynomials,
including scaling factors.
"""
qs = np.arange(q)
A = -np.ones((q, q), dtype=dtype)
for d in range(1, q, 2): # iterate over odd diagonals
A[range(d, q), range(0, q - d)] = 1
B = np.ones((q, ), dtype=dtype)
B[1::2] = -1
if rescale:
return (2 * qs[None, :] + 1) * A, B
else:
return (2 * qs[:, None] + 1) * A, \
(2 * qs + 1) * B
def mk_leg_lti(q, dtype=np.float):
"""
Assembles an LTI system that has the Legendre polynomials as an impulse
response.
"""
qs = np.arange(q)
A = np.zeros((q, q), dtype=dtype)
for d in range(1, q, 2): # iterate over odd diagonals
A[range(d, q), range(0, q - d)] = 1
B = np.ones((q, ), dtype=dtype)
B[1::2] = -1
return (4 * qs[None, :] + 2) * A, B
## Chebyshev LTI code
def mk_cheb_lti(q, dtype=np.float):
qs = np.arange(q)
A = np.zeros((q, q), dtype=dtype)
for d in range(1, q, 2): # iterate over odd diagonals
A[range(d + 1, q), range(1, q - d)] = 2
A[d, 0] = 1
B = np.ones((q, ), dtype=dtype)
B[1::2] = -1
return (2 * qs[:, None]) * A, B
## Legendre Delay Network Basis
def mk_ldn_basis_euler(q, N=None, normalize=True):
"""
This function is the attempt at generating a LDN basis using naive Euler
integration. This produces horribly wrong results.
For reference only, DO NOT USE. Use `mk_ldn_basis` instead.
"""
q, N = int(q), int(q) if N is None else int(N)
A, B = mk_ldn_lti(q)
At, Bt = A / N + np.eye(q), B / N
res = np.zeros((q, N))
Aexp = np.eye(q)
for i in range(N):
res[:, N - i - 1] = Aexp @ Bt
Aexp = At @ Aexp
return (res / np.linalg.norm(res, axis=1)[:, None]) if normalize else res
def mk_ldn_basis(q, N=None, normalize=True):
"""
Generates the LDN basis for q basis vectors and N input samples. Set
`normalize` to `False` to obtain the exact LDN impulse response, otherwise
a normalized basis transformation matrix as defined in the TR is returned.
"""
return mk_lti_basis(*mk_ldn_lti(q), N, normalize=normalize)
## Discrete Legendre Orthogonal Polynomial Basis and Related Code
def mk_leg_basis(q, N=None):
"""
Creates a non-orthogonal basis by simply sampling the Legendre polynomials.
"""
q, N = int(q), int(q) if N is None else int(N)
xs0 = np.linspace(0.0, 1.0, N + 1)[:-1]
xs1 = np.linspace(0.0, 1.0, N + 1)[1:]
res = np.zeros((q, N))
for n in range(q):
Pn = np.polynomial.Legendre([0] * n + [1], [1, 0]).integ()
res[n] = Pn(xs1) - Pn(xs0)
return res / np.linalg.norm(res, axis=1)[:, None]
def mk_dlop_basis_linsys(q, N=None):
"""
Constructs a matrix of "Discrete Legendre Orthogonal Polynomials" (DLOPs).
q is the number of polynomials to generate, N is the number of samples for
each polynomial.
This is function is for reference only and should not be used. It is
unstable for q > 30 (if N > q the function may be stable longer).
This function uses a rather inefficient approach that directly relies on
the definition of a Legendre Polynomial (a set of orthogonal Polynomials
with Pi(1) = 1.0) to generate the basis.
In each iteration i, this function adds a new polynomial of degree i to the
set of already computed polynomials. The polynomial coefficients are
determined by solving for coefficients that generate discrete sample points
that are orthogonal to the already sampled basis vectors.
The returned basis is made orthogonal by dividing by the norm of each
discrete polynomial.
"""
# Construct the sample points
q, N = int(q), int(q) if N is None else int(N)
qs, Ns = np.arange(q), np.arange(N)
xs = 2.0 * Ns / (N - 1.0) - 1.0
# Evaluate the individual monomials (this is a Vandermonde matrix)
M = np.power(xs[:, None], qs[None, :])
# Create the matrix. The first basis vector is "all ones"
res = np.zeros((q, N))
res[0] = 1.0
# Solve for polynomial coefficients up to degree q such that the newly
# added basis vector is orthogonal to the already created basis vectors,
# and such that the last sample is one.
for i in range(1, q):
A = np.zeros((i + 1, i + 1))
b = np.zeros((i + 1, ))
b[-1] = 1.0
A[:i, :] = res[:i, :] @ M[:, :i + 1]
A[i, :] = M[0, :i + 1]
coeffs = np.linalg.solve(A, b)
res[i] = M[:, :i + 1] @ coeffs
return res / np.linalg.norm(res, axis=1)[:, None]
def mk_dlop_basis_direct(q, N=None):
"""
Slow, direct implementation of the DLOP basis according to
<NAME>., & <NAME>. (1974).
Discrete (legendre) orthogonal polynomials—A survey.
International Journal for Numerical Methods in
Engineering, 8(4), 743–770.
https://doi.org/10.1002/nme.1620080406
Note that this code relies on the fact that Python 3 always uses
"big ints" or ("long" in Python 2 terms). The integers used in this
function will likely not fit into 32- or 64-bit integers; so be careful
when porting this code to a different programing language.
"""
q, N = int(q), int(q) if N is None else int(N)
res = np.zeros((q, N))
for m in range(q):
# Usa a common denominator instead of dividing by
# fading_factorial(N - 1, j), where "j" is the inner loop variable.
# Instead we divide all terms by fading_factorial(N - 1, m) and
# multiply the terms by the additional terms that we're dividing by.
# This way we can perform the final division numer / denom computing
# the float output at the very end; everything up to this point is
# precise integer arithmetic.
denom = fading_factorial(N - 1, m)
for K in range(N):
numer = 0
for j in range(m + 1):
# Main equation from the paper. The last term corrects for the
# common denominator.
c = nCr(m, j) * nCr(m + j, j) * \
fading_factorial(K, j) * \
fading_factorial(N - 1 - j, m - j)
numer += c if (j % 2 == 0) else -c
res[m, K] = numer / denom
res[m]
return res / np.linalg.norm(res, axis=1)[:, None]
def mk_dlop_basis_recurrence(q, N=None):
"""
Computes the DLOP basis using the Legendre recurrence relation as described
in the section "Generation Scheme" of Neuman & Schonbach, 1974, pp. 758-759
(see above for the full reference).
Do NOT use this function. This function is numerically unstable and only
included as a reference. Use `mk_dlop_basis` instead
"""
# Fill the first rows
q, N = int(q), int(q) if N is None else int(N)
res = np.zeros((q, N))
if q > 0:
res[0] = np.ones(N)
if q > 1:
res[1] = np.linspace(1, -1, N)
# Iterate over all columns
for K in range(N):
# Compute the initial coefficients for the recurrence relation
c0, c1, c2 = 0, N - 2 * K - 1, N - 1
δ0, δ1, δ2 = N - 1, 2 * c1, N - 1
# Iterate over all rows
for m in range(2, q):
δ0, δ1, δ2 = δ0 + 2, δ1, δ2 - 2
c0, c1, c2 = c0 + δ0, c1 + δ1, c2 + δ2
res[m, K] = (c1 * res[m - 1, K] - c0 * res[m - 2, K]) / c2
return res / np.linalg.norm(res, axis=1)[:, None]
def mk_dlop_basis(q, N=None, eps=1e-7):
"""
Same as `mk_dlop_basis_recurrence`, but updates all columns at once using
numpy.
"""
# Fill the first rows
q, N = int(q), int(q) if N is None else int(N)
res = np.zeros((q, N))
if q > 0:
res[0] = np.ones(N) / np.sqrt(N)
if q > 1:
res[1] = np.linspace(1, -1, N) * np.sqrt((3 * (N - 1)) / (N * (N + 1)))
# Pre-compute the coefficients c0, c1. See Section 4.4 of the TR.
Ks = np.arange(0, N, dtype=np.float)[None, :]
ms = np.arange(2, q, dtype=np.float)[:, None]
α1s = np.sqrt( ((2 * ms + 1) * (N - ms)) \
/ ((2 * ms - 1) * (N + ms)))
α2s = np.sqrt( ((2 * ms + 1) * (N - ms) * (N - ms + 1)) \
/ ((2 * ms - 3) * (N + ms) * (N + ms - 1)))
β1s = α1s * ((2 * ms - 1) * (N - 2 * Ks - 1) / (ms * (N - ms)))
β2s = α2s * ((ms - 1) * (N + ms - 1) / (ms * (N - ms)))
# The mask is used to mask out columns that cannot become greater than one
# again. This prevents numerical instability.
mask = np.ones((q, N), dtype=np.bool)
# Evaluate the recurrence relation
for m in range(2, q):
# A column K can only become greater than zero, if one of the
# cells in the two previous rows was significantly greater than zero.
mask[m] = np.logical_or(mask[m - 1], mask[m - 2])
# Apply the recurrence relation
res[m] = ( (β1s[m - 2]) * res[m - 1] \
- (β2s[m - 2]) * res[m - 2]) * mask[m]
# Mask out cells that are smaller than some epsilon
mask[m] = np.abs(res[m]) > eps
return res
## Fourier and Cosine Basis
def mk_fourier_basis(q, N=None):
"""
Generates the q x N matrix F that can be used to compute a Fourier-like
transformation of a real-valued input vector of length N. The first
result dimension will be the DC offset. Even result dimensions are the
real (sine) Fourier coefficients, odd dimensions are the imaginary (cosine)
coefficients.
Beware that this is a only a Fourier transformation for q = N, and even
then not a "proper" Fourier transformation because the transformation
matrix is normalized to be orthogonal. So be careful when comparing the
results of this function to "standard" Fourier transformations.
"""
q, N = int(q), int(q) if N is None else int(N)
qs, Ns = np.arange(q)[:, None], np.arange(N)[None, :]
freq = ((qs + 1) // 2) # 0, 1, 1, 2, 2, ...
phase = (qs % 2) # 0, 1, 0, 1, 0, ...
F = np.cos(
2.0 * np.pi * freq * (Ns + 0.5) / N + \
0.5 * np.pi * phase)
F[0] /= np.sqrt(2)
F[-1] /= np.sqrt(2) if (q % 2 == 0 and N == q) else 1.0
return F * np.sqrt(2 / N)
def mk_fourier_basis_derivative(q, N=None):
"""
Returns the derivative of the Fourier series.
"""
q, N = int(q), int(q) if N is None else int(N)
qs, Ns = np.arange(q)[:, None], np.arange(N)[None, :]
freq = ((qs + 1) // 2) # 0, 1, 1, 2, 2, ...
phase = (qs % 2) # 0, 1, 0, 1, 0, ...
F = -2.0 * np.pi * freq * np.sin(
2.0 * np.pi * freq * (Ns + 0.5) / N + \
0.5 * np.pi * phase)
F[0] /= np.sqrt(2)
F[-1] /= np.sqrt(2) if (q % 2 == 0 and N == q) else 1.0
return F * np.sqrt(2 / N)
def mk_cosine_basis(q, N=None):
"""
Generates the q x N matrix C which can be used to compute the orthogonal
DCT-II, everyone's favourite basis transformation. As with the
`mk_fourier_basis` function above, this code only returns a canonical
DCT basis if q = N.
"""
q, N = int(q), int(q) if N is None else int(N)
qs, Ns = np.arange(q)[:, None], np.arange(N)[None, :]
C = np.cos((Ns + 0.5) / N * qs * np.pi)
C[0] /= np.sqrt(2)
return C * np.sqrt(2 / N)
def mk_haar_basis(q, N=None):
"""
Generates the Haar wavelets. Note that the resulting matrix is not
orthogonal exactly if N is not a power of two.
"""
def subdiv(r0, r1):
if r1 - r0 > 0:
c = r0 + (r1 - r0 + 1) // 2
yield (r0, c, r1)
for L, R in zip(subdiv(r0, c), subdiv(c, r1)):
yield L
yield R
q, N = int(q), int(q) if N is None else int(N)
res = np.zeros((q, N))
res[0] = 1
for q, (i0, i1, i2) in zip(range(1, q), subdiv(0, N)):
res[q, i0:i1] = 1
res[q, i1:i2] = -1
return res / np.linalg.norm(res, axis=1)[:, None]
## Low-pass filtered bases
def lowpass_filter_basis(T, qp=None, filter_ctor=mk_fourier_basis):
"""
Takes a basis T with shape q x N and returns a basis that additionally
applies a low-pass filter to the N-dimensional input, such that the input
is represented by qp Fourier coefficients. This is equivalent to
low-pass filtering the basis T itself, i.e., representing the discrete
basis functions in terms of the Fourier basis.
This function has no effect if qp = N; in this case, there is no
potential for information loss.
The "filter_ctor" parameter can be used to represent the given basis T in
terms of another function basis.
"""
(q, N), qp = T.shape, (T.shape[0] if qp is None else int(qp))
F = filter_ctor(qp, N)
return T @ np.linalg.pinv(F) @ F
| [
"numpy.linalg.matrix_rank",
"numpy.sqrt",
"numpy.linalg.pinv",
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"numpy.arange",
"fractions.Fraction",
"numpy.linspace",
"numpy.polyval",
"numpy.linalg.lstsq",
"numpy.polynomial.Legendre",
"numpy.abs",
"numpy.eye",
"numpy.ones",
"numpy.out... | [((1352, 1369), 'numpy.zeros', 'np.zeros', (['P.shape'], {}), '(P.shape)\n', (1360, 1369), True, 'import numpy as np\n'), ((1721, 1737), 'numpy.zeros', 'np.zeros', (['(q, q)'], {}), '((q, q))\n', (1729, 1737), True, 'import numpy as np\n'), ((1921, 1930), 'numpy.eye', 'np.eye', (['q'], {}), '(q)\n', (1927, 1930), True, 'import numpy as np\n'), ((7492, 7513), 'numpy.zeros', 'np.zeros', (['(q, N_smpl)'], {}), '((q, N_smpl))\n', (7500, 7513), True, 'import numpy as np\n'), ((7525, 7534), 'numpy.eye', 'np.eye', (['q'], {}), '(q)\n', (7531, 7534), True, 'import numpy as np\n'), ((9706, 9722), 'numpy.zeros', 'np.zeros', (['(2, N)'], {}), '((2, N))\n', (9714, 9722), True, 'import numpy as np\n'), ((9820, 9834), 'numpy.outer', 'np.outer', (['e', 'd'], {}), '(e, d)\n', (9828, 9834), True, 'import numpy as np\n'), ((9989, 10009), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (10000, 10009), True, 'import numpy as np\n'), ((10144, 10175), 'numpy.outer', 'np.outer', (['H[:, -1]', 'HInv[:, -1]'], {}), '(H[:, -1], HInv[:, -1])\n', (10152, 10175), True, 'import numpy as np\n'), ((10331, 10351), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (10342, 10351), True, 'import numpy as np\n'), ((10485, 10516), 'numpy.outer', 'np.outer', (['H[:, -1]', 'HInv[:, -1]'], {}), '(H[:, -1], HInv[:, -1])\n', (10493, 10516), True, 'import numpy as np\n'), ((11165, 11177), 'numpy.arange', 'np.arange', (['q'], {}), '(q)\n', (11174, 11177), True, 'import numpy as np\n'), ((11327, 11353), 'numpy.ones', 'np.ones', (['(q,)'], {'dtype': 'dtype'}), '((q,), dtype=dtype)\n', (11334, 11353), True, 'import numpy as np\n'), ((11670, 11682), 'numpy.arange', 'np.arange', (['q'], {}), '(q)\n', (11679, 11682), True, 'import numpy as np\n'), ((11691, 11720), 'numpy.zeros', 'np.zeros', (['(q, q)'], {'dtype': 'dtype'}), '((q, q), dtype=dtype)\n', (11699, 11720), True, 'import numpy as np\n'), ((11832, 11858), 'numpy.ones', 'np.ones', (['(q,)'], {'dtype': 'dtype'}), '((q,), dtype=dtype)\n', (11839, 11858), True, 'import numpy as np\n'), ((11988, 12000), 'numpy.arange', 'np.arange', (['q'], {}), '(q)\n', (11997, 12000), True, 'import numpy as np\n'), ((12009, 12038), 'numpy.zeros', 'np.zeros', (['(q, q)'], {'dtype': 'dtype'}), '((q, q), dtype=dtype)\n', (12017, 12038), True, 'import numpy as np\n'), ((12174, 12200), 'numpy.ones', 'np.ones', (['(q,)'], {'dtype': 'dtype'}), '((q,), dtype=dtype)\n', (12181, 12200), True, 'import numpy as np\n'), ((12683, 12699), 'numpy.zeros', 'np.zeros', (['(q, N)'], {}), '((q, N))\n', (12691, 12699), True, 'import numpy as np\n'), ((12711, 12720), 'numpy.eye', 'np.eye', (['q'], {}), '(q)\n', (12717, 12720), True, 'import numpy as np\n'), ((13587, 13603), 'numpy.zeros', 'np.zeros', (['(q, N)'], {}), '((q, N))\n', (13595, 13603), True, 'import numpy as np\n'), ((14998, 15032), 'numpy.power', 'np.power', (['xs[:, None]', 'qs[None, :]'], {}), '(xs[:, None], qs[None, :])\n', (15006, 15032), True, 'import numpy as np\n'), ((15106, 15122), 'numpy.zeros', 'np.zeros', (['(q, N)'], {}), '((q, N))\n', (15114, 15122), True, 'import numpy as np\n'), ((16336, 16352), 'numpy.zeros', 'np.zeros', (['(q, N)'], {}), '((q, N))\n', (16344, 16352), True, 'import numpy as np\n'), ((17884, 17900), 'numpy.zeros', 'np.zeros', (['(q, N)'], {}), '((q, N))\n', (17892, 17900), True, 'import numpy as np\n'), ((18728, 18744), 'numpy.zeros', 'np.zeros', (['(q, N)'], {}), '((q, N))\n', (18736, 18744), True, 'import numpy as np\n'), ((19076, 19136), 'numpy.sqrt', 'np.sqrt', (['((2 * ms + 1) * (N - ms) / ((2 * ms - 1) * (N + ms)))'], {}), '((2 * ms + 1) * (N - ms) / ((2 * ms - 1) * (N + ms)))\n', (19083, 19136), True, 'import numpy as np\n'), ((19171, 19265), 'numpy.sqrt', 'np.sqrt', (['((2 * ms + 1) * (N - ms) * (N - ms + 1) / ((2 * ms - 3) * (N + ms) * (N +\n ms - 1)))'], {}), '((2 * ms + 1) * (N - ms) * (N - ms + 1) / ((2 * ms - 3) * (N + ms) *\n (N + ms - 1)))\n', (19178, 19265), True, 'import numpy as np\n'), ((19554, 19584), 'numpy.ones', 'np.ones', (['(q, N)'], {'dtype': 'np.bool'}), '((q, N), dtype=np.bool)\n', (19561, 19584), True, 'import numpy as np\n'), ((21032, 21097), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * freq * (Ns + 0.5) / N + 0.5 * np.pi * phase)'], {}), '(2.0 * np.pi * freq * (Ns + 0.5) / N + 0.5 * np.pi * phase)\n', (21038, 21097), True, 'import numpy as np\n'), ((21129, 21139), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (21136, 21139), True, 'import numpy as np\n'), ((21670, 21680), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (21677, 21680), True, 'import numpy as np\n'), ((22181, 22216), 'numpy.cos', 'np.cos', (['((Ns + 0.5) / N * qs * np.pi)'], {}), '((Ns + 0.5) / N * qs * np.pi)\n', (22187, 22216), True, 'import numpy as np\n'), ((22229, 22239), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (22236, 22239), True, 'import numpy as np\n'), ((22727, 22743), 'numpy.zeros', 'np.zeros', (['(q, N)'], {}), '((q, N))\n', (22735, 22743), True, 'import numpy as np\n'), ((1180, 1199), 'fractions.Fraction', 'Fraction', (['domain[0]'], {}), '(domain[0])\n', (1188, 1199), False, 'from fractions import Fraction\n'), ((1201, 1220), 'fractions.Fraction', 'Fraction', (['domain[1]'], {}), '(domain[1])\n', (1209, 1220), False, 'from fractions import Fraction\n'), ((1234, 1253), 'fractions.Fraction', 'Fraction', (['window[0]'], {}), '(window[0])\n', (1242, 1253), False, 'from fractions import Fraction\n'), ((1255, 1274), 'fractions.Fraction', 'Fraction', (['window[1]'], {}), '(window[1])\n', (1263, 1274), False, 'from fractions import Fraction\n'), ((5690, 5729), 'numpy.sqrt', 'np.sqrt', (['(dampen_fac * (N - 1) / (q - 1))'], {}), '(dampen_fac * (N - 1) / (q - 1))\n', (5697, 5729), True, 'import numpy as np\n'), ((5742, 5772), 'numpy.array', 'np.array', (['([w0] + [1] * (N - 1))'], {}), '([w0] + [1] * (N - 1))\n', (5750, 5772), True, 'import numpy as np\n'), ((5842, 5856), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (5849, 5856), True, 'import numpy as np\n'), ((8085, 8120), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['P'], {'tol': '(1e-06)'}), '(P, tol=1e-06)\n', (8106, 8120), True, 'import numpy as np\n'), ((9524, 9563), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['P.T', 'QInv'], {'rcond': 'rcond'}), '(P.T, QInv, rcond=rcond)\n', (9539, 9563), True, 'import numpy as np\n'), ((10095, 10127), 'numpy.linalg.pinv', 'np.linalg.pinv', (['H.T'], {'rcond': 'rcond'}), '(H.T, rcond=rcond)\n', (10109, 10127), True, 'import numpy as np\n'), ((10436, 10468), 'numpy.linalg.pinv', 'np.linalg.pinv', (['H.T'], {'rcond': 'rcond'}), '(H.T, rcond=rcond)\n', (10450, 10468), True, 'import numpy as np\n'), ((11187, 11215), 'numpy.ones', 'np.ones', (['(q, q)'], {'dtype': 'dtype'}), '((q, q), dtype=dtype)\n', (11194, 11215), True, 'import numpy as np\n'), ((13500, 13528), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(N + 1)'], {}), '(0.0, 1.0, N + 1)\n', (13511, 13528), True, 'import numpy as np\n'), ((13544, 13572), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(N + 1)'], {}), '(0.0, 1.0, N + 1)\n', (13555, 13572), True, 'import numpy as np\n'), ((14855, 14867), 'numpy.arange', 'np.arange', (['q'], {}), '(q)\n', (14864, 14867), True, 'import numpy as np\n'), ((14869, 14881), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (14878, 14881), True, 'import numpy as np\n'), ((15375, 15399), 'numpy.zeros', 'np.zeros', (['(i + 1, i + 1)'], {}), '((i + 1, i + 1))\n', (15383, 15399), True, 'import numpy as np\n'), ((15412, 15430), 'numpy.zeros', 'np.zeros', (['(i + 1,)'], {}), '((i + 1,))\n', (15420, 15430), True, 'import numpy as np\n'), ((15545, 15566), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (15560, 15566), True, 'import numpy as np\n'), ((17932, 17942), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (17939, 17942), True, 'import numpy as np\n'), ((17974, 17995), 'numpy.linspace', 'np.linspace', (['(1)', '(-1)', 'N'], {}), '(1, -1, N)\n', (17985, 17995), True, 'import numpy as np\n'), ((18974, 19005), 'numpy.arange', 'np.arange', (['(0)', 'N'], {'dtype': 'np.float'}), '(0, N, dtype=np.float)\n', (18983, 19005), True, 'import numpy as np\n'), ((19024, 19055), 'numpy.arange', 'np.arange', (['(2)', 'q'], {'dtype': 'np.float'}), '(2, q, dtype=np.float)\n', (19033, 19055), True, 'import numpy as np\n'), ((19817, 19856), 'numpy.logical_or', 'np.logical_or', (['mask[m - 1]', 'mask[m - 2]'], {}), '(mask[m - 1], mask[m - 2])\n', (19830, 19856), True, 'import numpy as np\n'), ((21153, 21163), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (21160, 21163), True, 'import numpy as np\n'), ((21215, 21229), 'numpy.sqrt', 'np.sqrt', (['(2 / N)'], {}), '(2 / N)\n', (21222, 21229), True, 'import numpy as np\n'), ((21573, 21638), 'numpy.sin', 'np.sin', (['(2.0 * np.pi * freq * (Ns + 0.5) / N + 0.5 * np.pi * phase)'], {}), '(2.0 * np.pi * freq * (Ns + 0.5) / N + 0.5 * np.pi * phase)\n', (21579, 21638), True, 'import numpy as np\n'), ((21694, 21704), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (21701, 21704), True, 'import numpy as np\n'), ((21756, 21770), 'numpy.sqrt', 'np.sqrt', (['(2 / N)'], {}), '(2 / N)\n', (21763, 21770), True, 'import numpy as np\n'), ((22255, 22269), 'numpy.sqrt', 'np.sqrt', (['(2 / N)'], {}), '(2 / N)\n', (22262, 22269), True, 'import numpy as np\n'), ((1504, 1515), 'fractions.Fraction', 'Fraction', (['(0)'], {}), '(0)\n', (1512, 1515), False, 'from fractions import Fraction\n'), ((5909, 5969), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['(W[:, None] * X)', '(W[:, None] * Y)'], {'rcond': 'rcond'}), '(W[:, None] * X, W[:, None] * Y, rcond=rcond)\n', (5924, 5969), True, 'import numpy as np\n'), ((8277, 8296), 'numpy.arange', 'np.arange', (['(1)', '(q + 1)'], {}), '(1, q + 1)\n', (8286, 8296), True, 'import numpy as np\n'), ((8395, 8434), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['P.T', 'PD.T'], {'rcond': 'rcond'}), '(P.T, PD.T, rcond=rcond)\n', (8410, 8434), True, 'import numpy as np\n'), ((9024, 9049), 'numpy.polyval', 'np.polyval', (['P[i, ::-1]', '(0)'], {}), '(P[i, ::-1], 0)\n', (9034, 9049), True, 'import numpy as np\n'), ((9088, 9113), 'numpy.polyval', 'np.polyval', (['P[i, ::-1]', '(1)'], {}), '(P[i, ::-1], 1)\n', (9098, 9113), True, 'import numpy as np\n'), ((9767, 9786), 'numpy.polyval', 'np.polyval', (['P[i]', '(1)'], {}), '(P[i], 1)\n', (9777, 9786), True, 'import numpy as np\n'), ((9788, 9808), 'numpy.polyval', 'np.polyval', (['PI[i]', '(1)'], {}), '(PI[i], 1)\n', (9798, 9808), True, 'import numpy as np\n'), ((10028, 10054), 'numpy.polyval', 'np.polyval', (['P[i, ::-1]', 'xs'], {}), '(P[i, ::-1], xs)\n', (10038, 10054), True, 'import numpy as np\n'), ((12656, 12665), 'numpy.eye', 'np.eye', (['q'], {}), '(q)\n', (12662, 12665), True, 'import numpy as np\n'), ((13746, 13773), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {'axis': '(1)'}), '(res, axis=1)\n', (13760, 13773), True, 'import numpy as np\n'), ((15624, 15651), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {'axis': '(1)'}), '(res, axis=1)\n', (15638, 15651), True, 'import numpy as np\n'), ((17365, 17392), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {'axis': '(1)'}), '(res, axis=1)\n', (17379, 17392), True, 'import numpy as np\n'), ((18456, 18483), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {'axis': '(1)'}), '(res, axis=1)\n', (18470, 18483), True, 'import numpy as np\n'), ((18776, 18786), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (18783, 18786), True, 'import numpy as np\n'), ((18789, 18799), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (18796, 18799), True, 'import numpy as np\n'), ((18831, 18852), 'numpy.linspace', 'np.linspace', (['(1)', '(-1)', 'N'], {}), '(1, -1, N)\n', (18842, 18852), True, 'import numpy as np\n'), ((18855, 18891), 'numpy.sqrt', 'np.sqrt', (['(3 * (N - 1) / (N * (N + 1)))'], {}), '(3 * (N - 1) / (N * (N + 1)))\n', (18862, 18891), True, 'import numpy as np\n'), ((20082, 20096), 'numpy.abs', 'np.abs', (['res[m]'], {}), '(res[m])\n', (20088, 20096), True, 'import numpy as np\n'), ((20887, 20899), 'numpy.arange', 'np.arange', (['q'], {}), '(q)\n', (20896, 20899), True, 'import numpy as np\n'), ((20910, 20922), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (20919, 20922), True, 'import numpy as np\n'), ((21406, 21418), 'numpy.arange', 'np.arange', (['q'], {}), '(q)\n', (21415, 21418), True, 'import numpy as np\n'), ((21429, 21441), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (21438, 21441), True, 'import numpy as np\n'), ((22128, 22140), 'numpy.arange', 'np.arange', (['q'], {}), '(q)\n', (22137, 22140), True, 'import numpy as np\n'), ((22151, 22163), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (22160, 22163), True, 'import numpy as np\n'), ((22888, 22915), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {'axis': '(1)'}), '(res, axis=1)\n', (22902, 22915), True, 'import numpy as np\n'), ((23718, 23735), 'numpy.linalg.pinv', 'np.linalg.pinv', (['F'], {}), '(F)\n', (23732, 23735), True, 'import numpy as np\n'), ((3233, 3251), 'numpy.eye', 'np.eye', (['A.shape[0]'], {}), '(A.shape[0])\n', (3239, 3251), True, 'import numpy as np\n'), ((6190, 6207), 'numpy.linalg.pinv', 'np.linalg.pinv', (['H'], {}), '(H)\n', (6204, 6207), True, 'import numpy as np\n'), ((6229, 6247), 'numpy.outer', 'np.outer', (['enc', 'dec'], {}), '(enc, dec)\n', (6237, 6247), True, 'import numpy as np\n'), ((6271, 6289), 'numpy.outer', 'np.outer', (['enc', 'dec'], {}), '(enc, dec)\n', (6279, 6289), True, 'import numpy as np\n'), ((6533, 6542), 'numpy.eye', 'np.eye', (['q'], {}), '(q)\n', (6539, 6542), True, 'import numpy as np\n'), ((7649, 7676), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {'axis': '(1)'}), '(res, axis=1)\n', (7663, 7676), True, 'import numpy as np\n'), ((8239, 8255), 'numpy.zeros', 'np.zeros', (['(q, 1)'], {}), '((q, 1))\n', (8247, 8255), True, 'import numpy as np\n'), ((12825, 12852), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {'axis': '(1)'}), '(res, axis=1)\n', (12839, 12852), True, 'import numpy as np\n'), ((13640, 13685), 'numpy.polynomial.Legendre', 'np.polynomial.Legendre', (['([0] * n + [1])', '[1, 0]'], {}), '([0] * n + [1], [1, 0])\n', (13662, 13685), True, 'import numpy as np\n'), ((5637, 5653), 'numpy.zeros', 'np.zeros', (['(q, 1)'], {}), '((q, 1))\n', (5645, 5653), True, 'import numpy as np\n'), ((9274, 9295), 'numpy.linalg.inv', 'np.linalg.inv', (['P_flip'], {}), '(P_flip)\n', (9287, 9295), True, 'import numpy as np\n'), ((1595, 1612), 'fractions.Fraction', 'Fraction', (['P[i, n]'], {}), '(P[i, n])\n', (1603, 1612), False, 'from fractions import Fraction\n')] |
import torch
import numpy as np
def get_topic_diversity(beta, topk):
num_topics = beta.shape[0]
list_w = np.zeros((num_topics, topk))
for k in range(num_topics):
idx = beta[k,:].argsort()[-topk:][::-1]
list_w[k,:] = idx
n_unique = len(np.unique(list_w))
TD = n_unique / (topk * num_topics)
print('Topic diversity is: {}'.format(TD))
def get_document_frequency(data, wi, wj=None):
if wj is None:
D_wi = 0
for l in range(len(data)):
doc = data[l].squeeze(0)
if len(doc) == 1:
continue
else:
doc = doc.squeeze()
if wi in doc:
D_wi += 1
return D_wi
D_wj = 0
D_wi_wj = 0
for l in range(len(data)):
doc = data[l].squeeze(0)
if len(doc) == 1:
doc = [doc.squeeze()]
else:
doc = doc.squeeze()
if wj in doc:
D_wj += 1
if wi in doc:
D_wi_wj += 1
return D_wj, D_wi_wj
def get_topic_coherence(beta, data, vocab):
D = len(data) ## number of docs...data is list of documents
#print('D: ', D)
TC = []
num_topics = len(beta)
for k in range(num_topics):
#print('k: {}/{}'.format(k, num_topics))
top_10 = list(beta[k].argsort()[-11:][::-1])
top_words = [vocab[a] for a in top_10]
TC_k = 0
counter = 0
for i, word in enumerate(top_10):
# get D(w_i)
D_wi = get_document_frequency(data, word)
j = i + 1
tmp = 0
while j < len(top_10) and j > i:
# get D(w_j) and D(w_i, w_j)
D_wj, D_wi_wj = get_document_frequency(data, word, top_10[j])
# get f(w_i, w_j)
if D_wi_wj == 0:
f_wi_wj = -1
else:
f_wi_wj = -1 + ( np.log(D_wi) + np.log(D_wj) - 2.0 * np.log(D) ) / ( np.log(D_wi_wj) - np.log(D) )
# update tmp:
tmp += f_wi_wj
j += 1
counter += 1
# update TC_k
TC_k += tmp
TC.append(TC_k)
#print('counter: ', counter)
#print('num topics: ', len(TC))
TC = np.mean(TC) / counter
print('Topic coherence is: {}'.format(TC))
def nearest_neighbors(word, embeddings, vocab):
vectors = embeddings.data.cpu().numpy()
index = vocab.index(word)
print('vectors: ', vectors.shape)
query = vectors[index]
print('query: ', query.shape)
ranks = vectors.dot(query).squeeze()
denom = query.T.dot(query).squeeze()
denom = denom * np.sum(vectors**2, 1)
denom = np.sqrt(denom)
ranks = ranks / denom
mostSimilar = []
[mostSimilar.append(idx) for idx in ranks.argsort()[::-1]]
nearest_neighbors = mostSimilar[:20]
nearest_neighbors = [vocab[comp] for comp in nearest_neighbors]
return nearest_neighbors
import nltk
from nltk.collocations import *
import matplotlib.pyplot as plt
import os
def bigrams(big_document):
ignored_words = nltk.corpus.stopwords.words('english')
ignored_words.append('percent')
ignored_words.append('governor')
ignored_words.append('dont')
# bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_documents(big_document)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
finder.apply_freq_filter(150)
return [' '.join(x) for x in list(finder.ngram_fd.keys())]
def trigram(big_document):
ignored_words = nltk.corpus.stopwords.words('english')
ignored_words.append('percent')
ignored_words.append('governor')
ignored_words.append('dont')
# trigram_measures = nltk.collocations.TrigramAssocMeasures()
finder = TrigramCollocationFinder.from_documents(big_document)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
finder.apply_freq_filter(100)
return [' '.join(x) for x in list(finder.ngram_fd.keys())]
def replace_collocation(string, dict_collocation):
for key in dict_collocation.keys():
string = string.replace(key, dict_collocation[key])
return string
def plot_word_cloud(text, filename='wordcloud.eps', format='eps',
width=1000, height=500, background_color='white', figsize=(10,6), dpi=100, bbox_inches='tight'):
from wordcloud import WordCloud
meeting_string = (" ").join([word for line in text for word in line])
wordcloud = WordCloud(width=width, height=height, background_color=background_color).generate(meeting_string)
fig = plt.figure(figsize=figsize)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.imshow(wordcloud)
plt.axis("off")
fig.tight_layout()
plt.savefig(os.path.join(PLOT_PATH, filename), format=format, dpi=dpi, bbox_inches=bbox_inches)
| [
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.sqrt",
"nltk.corpus.stopwords.words",
"numpy.unique",
"numpy.log",
"os.path.join",
"wordcloud.WordCloud",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplots_adjust"
] | [((114, 142), 'numpy.zeros', 'np.zeros', (['(num_topics, topk)'], {}), '((num_topics, topk))\n', (122, 142), True, 'import numpy as np\n'), ((2686, 2700), 'numpy.sqrt', 'np.sqrt', (['denom'], {}), '(denom)\n', (2693, 2700), True, 'import numpy as np\n'), ((3084, 3122), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (3111, 3122), False, 'import nltk\n'), ((3588, 3626), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (3615, 3626), False, 'import nltk\n'), ((4637, 4664), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4647, 4664), True, 'import matplotlib.pyplot as plt\n'), ((4669, 4722), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0)', 'right': '(1)', 'top': '(1)', 'bottom': '(0)'}), '(left=0, right=1, top=1, bottom=0)\n', (4688, 4722), True, 'import matplotlib.pyplot as plt\n'), ((4727, 4748), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {}), '(wordcloud)\n', (4737, 4748), True, 'import matplotlib.pyplot as plt\n'), ((4753, 4768), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4761, 4768), True, 'import matplotlib.pyplot as plt\n'), ((268, 285), 'numpy.unique', 'np.unique', (['list_w'], {}), '(list_w)\n', (277, 285), True, 'import numpy as np\n'), ((2259, 2270), 'numpy.mean', 'np.mean', (['TC'], {}), '(TC)\n', (2266, 2270), True, 'import numpy as np\n'), ((2652, 2675), 'numpy.sum', 'np.sum', (['(vectors ** 2)', '(1)'], {}), '(vectors ** 2, 1)\n', (2658, 2675), True, 'import numpy as np\n'), ((4808, 4841), 'os.path.join', 'os.path.join', (['PLOT_PATH', 'filename'], {}), '(PLOT_PATH, filename)\n', (4820, 4841), False, 'import os\n'), ((4529, 4601), 'wordcloud.WordCloud', 'WordCloud', ([], {'width': 'width', 'height': 'height', 'background_color': 'background_color'}), '(width=width, height=height, background_color=background_color)\n', (4538, 4601), False, 'from wordcloud import WordCloud\n'), ((1964, 1979), 'numpy.log', 'np.log', (['D_wi_wj'], {}), '(D_wi_wj)\n', (1970, 1979), True, 'import numpy as np\n'), ((1982, 1991), 'numpy.log', 'np.log', (['D'], {}), '(D)\n', (1988, 1991), True, 'import numpy as np\n'), ((1911, 1923), 'numpy.log', 'np.log', (['D_wi'], {}), '(D_wi)\n', (1917, 1923), True, 'import numpy as np\n'), ((1926, 1938), 'numpy.log', 'np.log', (['D_wj'], {}), '(D_wj)\n', (1932, 1938), True, 'import numpy as np\n'), ((1948, 1957), 'numpy.log', 'np.log', (['D'], {}), '(D)\n', (1954, 1957), True, 'import numpy as np\n')] |
__author__ = '<NAME>'
import myFunctions as mf
import numpy as np
import cv2
from matplotlib import pyplot as plt
import logicFunctions as lf
img1 = cv2.imread('img1.jpg')
img3 = cv2.imread(('BlurryImage1.jpg'))
# Assignment 1:
GaussMask = 1.0/273 * np.array([[1, 4, 7, 4, 1],
[4, 16, 26, 16, 4],
[7, 26, 41, 26, 7],
[4, 16, 26, 16, 4],
[1, 4, 7, 4, 1]])
LaplacianMask = np.array([[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]])
GFmat = mf.myMasking(lf.myExtendedPadding(img1.copy()), GaussMask)
LFmat = mf.myMasking(lf.myExtendedPadding(img1.copy()), LaplacianMask)
GFmat2 = GFmat - np.min(GFmat)
GFimg = np.uint8(np.round(GFmat2 * 255 / np.max(GFmat2)))
LFmat2 = np.abs(LFmat)
LFmat2 = LFmat2 - np.min(LFmat2)
LFimg = np.uint8(np.round(LFmat2 * 255 / np.max(LFmat2)))
plt.figure()
plt.subplot(221)
plt.axis('off')
plt.title('Original image')
plt.imshow(img1[:,:,::-1])
plt.subplot(222)
plt.axis('off')
plt.title('Gaussian filtered image')
plt.imshow(GFimg[:,:,::-1])
plt.subplot(223)
plt.axis('off')
plt.title('Laplacian filtered image')
plt.imshow(LFimg[:,:,::-1])
plt.subplot(224)
plt.axis('off')
plt.title('Laplacian filtered image')
plt.imshow(np.max(LFimg, axis=2), cmap='gray')
plt.show()
# Assignment 2:
myMask = np.array([[-1, -1, -1],
[-1, 10, -1],
[-1, -1, -1]])
Mimg = np.uint8(np.round(np.abs(mf.myMasking(lf.myExtendedPadding(img3.copy()), myMask))))
plt.figure()
plt.subplot(121)
plt.axis('off')
plt.title('Original image')
plt.imshow(img3[:,:,::-1])
plt.subplot(122)
plt.axis('off')
plt.title('Masked image')
plt.imshow(Mimg[:,:,::-1])
plt.show()
# Assignment 3:
img3g = cv2.cvtColor(img3, cv2.COLOR_BGR2GRAY)
Mimgg = cv2.cvtColor(Mimg, cv2.COLOR_BGR2GRAY)
hist1 = mf.myHistPlotUint8(img3g)
hist2 = mf.myHistPlotUint8(Mimgg)
plt.figure()
plt.subplot(121)
plt.title('Original image')
plt.bar(range(256), hist1)
plt.subplot(122)
plt.title('Masked image')
plt.bar(range(256), hist2)
plt.figure()
plt.subplot(121)
plt.title('Original image')
plt.bar(range(256), hist1)
plt.ylim((0, np.max(hist1) / 20))
plt.subplot(122)
plt.title('Masked image')
plt.bar(range(256), hist2)
plt.ylim((0, np.max(hist2) / 20))
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.abs",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.figure",
"cv2.cvtColor",
"numpy.min",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"myFunctions.myHistPlotUint8",
"cv2.imread"
] | [((159, 181), 'cv2.imread', 'cv2.imread', (['"""img1.jpg"""'], {}), "('img1.jpg')\n", (169, 181), False, 'import cv2\n'), ((190, 220), 'cv2.imread', 'cv2.imread', (['"""BlurryImage1.jpg"""'], {}), "('BlurryImage1.jpg')\n", (200, 220), False, 'import cv2\n'), ((523, 570), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 4, -1], [0, -1, 0]]'], {}), '([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])\n', (531, 570), True, 'import numpy as np\n'), ((872, 885), 'numpy.abs', 'np.abs', (['LFmat'], {}), '(LFmat)\n', (878, 885), True, 'import numpy as np\n'), ((982, 994), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (992, 994), True, 'from matplotlib import pyplot as plt\n'), ((996, 1012), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (1007, 1012), True, 'from matplotlib import pyplot as plt\n'), ((1014, 1029), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1022, 1029), True, 'from matplotlib import pyplot as plt\n'), ((1031, 1058), 'matplotlib.pyplot.title', 'plt.title', (['"""Original image"""'], {}), "('Original image')\n", (1040, 1058), True, 'from matplotlib import pyplot as plt\n'), ((1060, 1088), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img1[:, :, ::-1]'], {}), '(img1[:, :, ::-1])\n', (1070, 1088), True, 'from matplotlib import pyplot as plt\n'), ((1090, 1106), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (1101, 1106), True, 'from matplotlib import pyplot as plt\n'), ((1108, 1123), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1116, 1123), True, 'from matplotlib import pyplot as plt\n'), ((1125, 1161), 'matplotlib.pyplot.title', 'plt.title', (['"""Gaussian filtered image"""'], {}), "('Gaussian filtered image')\n", (1134, 1161), True, 'from matplotlib import pyplot as plt\n'), ((1163, 1192), 'matplotlib.pyplot.imshow', 'plt.imshow', (['GFimg[:, :, ::-1]'], {}), '(GFimg[:, :, ::-1])\n', (1173, 1192), True, 'from matplotlib import pyplot as plt\n'), ((1194, 1210), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (1205, 1210), True, 'from matplotlib import pyplot as plt\n'), ((1212, 1227), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1220, 1227), True, 'from matplotlib import pyplot as plt\n'), ((1229, 1266), 'matplotlib.pyplot.title', 'plt.title', (['"""Laplacian filtered image"""'], {}), "('Laplacian filtered image')\n", (1238, 1266), True, 'from matplotlib import pyplot as plt\n'), ((1268, 1297), 'matplotlib.pyplot.imshow', 'plt.imshow', (['LFimg[:, :, ::-1]'], {}), '(LFimg[:, :, ::-1])\n', (1278, 1297), True, 'from matplotlib import pyplot as plt\n'), ((1299, 1315), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (1310, 1315), True, 'from matplotlib import pyplot as plt\n'), ((1317, 1332), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1325, 1332), True, 'from matplotlib import pyplot as plt\n'), ((1334, 1371), 'matplotlib.pyplot.title', 'plt.title', (['"""Laplacian filtered image"""'], {}), "('Laplacian filtered image')\n", (1343, 1371), True, 'from matplotlib import pyplot as plt\n'), ((1423, 1433), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1431, 1433), True, 'from matplotlib import pyplot as plt\n'), ((1467, 1519), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 10, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 10, -1], [-1, -1, -1]])\n', (1475, 1519), True, 'import numpy as np\n'), ((1657, 1669), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1667, 1669), True, 'from matplotlib import pyplot as plt\n'), ((1671, 1687), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1682, 1687), True, 'from matplotlib import pyplot as plt\n'), ((1689, 1704), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1697, 1704), True, 'from matplotlib import pyplot as plt\n'), ((1706, 1733), 'matplotlib.pyplot.title', 'plt.title', (['"""Original image"""'], {}), "('Original image')\n", (1715, 1733), True, 'from matplotlib import pyplot as plt\n'), ((1735, 1763), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img3[:, :, ::-1]'], {}), '(img3[:, :, ::-1])\n', (1745, 1763), True, 'from matplotlib import pyplot as plt\n'), ((1765, 1781), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (1776, 1781), True, 'from matplotlib import pyplot as plt\n'), ((1783, 1798), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1791, 1798), True, 'from matplotlib import pyplot as plt\n'), ((1800, 1825), 'matplotlib.pyplot.title', 'plt.title', (['"""Masked image"""'], {}), "('Masked image')\n", (1809, 1825), True, 'from matplotlib import pyplot as plt\n'), ((1827, 1855), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Mimg[:, :, ::-1]'], {}), '(Mimg[:, :, ::-1])\n', (1837, 1855), True, 'from matplotlib import pyplot as plt\n'), ((1857, 1867), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1865, 1867), True, 'from matplotlib import pyplot as plt\n'), ((1900, 1938), 'cv2.cvtColor', 'cv2.cvtColor', (['img3', 'cv2.COLOR_BGR2GRAY'], {}), '(img3, cv2.COLOR_BGR2GRAY)\n', (1912, 1938), False, 'import cv2\n'), ((1948, 1986), 'cv2.cvtColor', 'cv2.cvtColor', (['Mimg', 'cv2.COLOR_BGR2GRAY'], {}), '(Mimg, cv2.COLOR_BGR2GRAY)\n', (1960, 1986), False, 'import cv2\n'), ((1998, 2023), 'myFunctions.myHistPlotUint8', 'mf.myHistPlotUint8', (['img3g'], {}), '(img3g)\n', (2016, 2023), True, 'import myFunctions as mf\n'), ((2033, 2058), 'myFunctions.myHistPlotUint8', 'mf.myHistPlotUint8', (['Mimgg'], {}), '(Mimgg)\n', (2051, 2058), True, 'import myFunctions as mf\n'), ((2062, 2074), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2072, 2074), True, 'from matplotlib import pyplot as plt\n'), ((2076, 2092), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (2087, 2092), True, 'from matplotlib import pyplot as plt\n'), ((2094, 2121), 'matplotlib.pyplot.title', 'plt.title', (['"""Original image"""'], {}), "('Original image')\n", (2103, 2121), True, 'from matplotlib import pyplot as plt\n'), ((2153, 2169), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (2164, 2169), True, 'from matplotlib import pyplot as plt\n'), ((2171, 2196), 'matplotlib.pyplot.title', 'plt.title', (['"""Masked image"""'], {}), "('Masked image')\n", (2180, 2196), True, 'from matplotlib import pyplot as plt\n'), ((2228, 2240), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2238, 2240), True, 'from matplotlib import pyplot as plt\n'), ((2242, 2258), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (2253, 2258), True, 'from matplotlib import pyplot as plt\n'), ((2260, 2287), 'matplotlib.pyplot.title', 'plt.title', (['"""Original image"""'], {}), "('Original image')\n", (2269, 2287), True, 'from matplotlib import pyplot as plt\n'), ((2354, 2370), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (2365, 2370), True, 'from matplotlib import pyplot as plt\n'), ((2372, 2397), 'matplotlib.pyplot.title', 'plt.title', (['"""Masked image"""'], {}), "('Masked image')\n", (2381, 2397), True, 'from matplotlib import pyplot as plt\n'), ((2464, 2474), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2472, 2474), True, 'from matplotlib import pyplot as plt\n'), ((267, 376), 'numpy.array', 'np.array', (['[[1, 4, 7, 4, 1], [4, 16, 26, 16, 4], [7, 26, 41, 26, 7], [4, 16, 26, 16, 4\n ], [1, 4, 7, 4, 1]]'], {}), '([[1, 4, 7, 4, 1], [4, 16, 26, 16, 4], [7, 26, 41, 26, 7], [4, 16, \n 26, 16, 4], [1, 4, 7, 4, 1]])\n', (275, 376), True, 'import numpy as np\n'), ((787, 800), 'numpy.min', 'np.min', (['GFmat'], {}), '(GFmat)\n', (793, 800), True, 'import numpy as np\n'), ((905, 919), 'numpy.min', 'np.min', (['LFmat2'], {}), '(LFmat2)\n', (911, 919), True, 'import numpy as np\n'), ((1384, 1405), 'numpy.max', 'np.max', (['LFimg'], {'axis': '(2)'}), '(LFimg, axis=2)\n', (1390, 1405), True, 'import numpy as np\n'), ((843, 857), 'numpy.max', 'np.max', (['GFmat2'], {}), '(GFmat2)\n', (849, 857), True, 'import numpy as np\n'), ((962, 976), 'numpy.max', 'np.max', (['LFmat2'], {}), '(LFmat2)\n', (968, 976), True, 'import numpy as np\n'), ((2330, 2343), 'numpy.max', 'np.max', (['hist1'], {}), '(hist1)\n', (2336, 2343), True, 'import numpy as np\n'), ((2440, 2453), 'numpy.max', 'np.max', (['hist2'], {}), '(hist2)\n', (2446, 2453), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import pickle
import scipy.stats as stats
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from logdeep.dataset.log import log_dataset
from logdeep.dataset.sample import session_window, sliding_window
from logdeep.models import Deeplog, Loganomaly
def generate(output_dir, name):
print("Loading", output_dir + name)
with open(output_dir + name, 'r') as f:
data_iter = f.readlines()
return data_iter, len(data_iter)
class Predicter():
def __init__(self, options):
self.output_dir = options['output_dir']
self.model_dir = options['model_dir']
self.vocab_path = options["vocab_path"]
self.model_path = options['model_path']
self.model_name = options["model_name"]
self.device = options['device']
self.window_size = options['window_size']
self.min_len = options["min_len"]
self.is_logkey = options["is_logkey"]
self.is_time = options["is_time"]
self.test_ratio = options["test_ratio"]
self.num_candidates = options['num_candidates']
self.input_size = options["input_size"]
self.hidden_size = options["hidden_size"]
self.embedding_dim = options["embedding_dim"]
self.num_layers = options["num_layers"]
self.batch_size = options['batch_size']
self.sequentials = options['sequentials']
self.quantitatives = options['quantitatives']
self.semantics = options['semantics']
self.parameters = options['parameters']
self.lower_bound = 0
self.upper_bound = 3
def detect_logkey_anomaly(self, output, label):
num_anomaly = 0
for i in range(len(label)):
predicted = torch.argsort(output[i])[-self.num_candidates:].clone().detach().cpu()
if label[i] not in predicted:
num_anomaly += 1
return num_anomaly
def detect_params_anomaly(self, output, label):
num_anomaly = 0
for i in range(len(label)):
error = output[i].item() - label[i].item()
if error < self.lower_bound or error > self.upper_bound:
num_anomaly += 1
return num_anomaly
def compute_anomaly(self, results, threshold=0):
total_errors = 0
for seq_res in results:
if isinstance(threshold, float):
threshold = seq_res["predicted_logkey"] * threshold
error = (self.is_logkey and seq_res["logkey_anomaly"] > threshold) or \
(self.is_time and seq_res["params_anomaly"] > threshold)
total_errors += int(error)
return total_errors
def find_best_threshold(self, test_normal_results, test_abnormal_results, threshold_range):
test_abnormal_length = len(test_abnormal_results)
test_normal_length = len(test_normal_results)
res = [0, 0, 0, 0, 0, 0, 0, 0] # th,tp, tn, fp, fn, p, r, f1
for th in threshold_range:
FP = self.compute_anomaly(test_normal_results, th)
TP = self.compute_anomaly(test_abnormal_results, th)
if TP == 0:
continue
# Compute precision, recall and F1-measure
TN = test_normal_length - FP
FN = test_abnormal_length - TP
P = 100 * TP / (TP + FP)
R = 100 * TP / (TP + FN)
F1 = 2 * P * R / (P + R)
if F1 > res[-1]:
res = [th, TP, TN, FP, FN, P, R, F1]
return res
def unsupervised_helper(self, model, data_iter, vocab, data_type, min_len=0):
test_results = []
normal_errors = []
num_test = len(data_iter)
rand_index = torch.randperm(num_test)
rand_index = rand_index[:int(num_test * self.test_ratio)]
with torch.no_grad():
for idx, line in tqdm(enumerate(data_iter)):
if idx not in rand_index:
continue
line = [ln.split(",") for ln in line.split()]
if len(line) < min_len:
continue
line = np.array(line)
logkey = line.squeeze()
logkeys = [logkey.tolist()] # add next axis
logs, labels = sliding_window((logkeys), vocab, window_size=self.window_size, is_train=False)
dataset = log_dataset(logs=logs,
labels=labels,
seq=self.sequentials,
quan=self.quantitatives,
sem=self.semantics,
param=self.parameters)
data_loader = DataLoader(dataset,
batch_size=min(len(dataset), 64),
shuffle=True,
pin_memory=True)
# batch_size = len(dataset)
num_logkey_anomaly = 0
num_params_anomaly = 0
num_predicted_logkey = 0
for _, (log, label) in enumerate(data_loader):
features = []
for value in log.values():
features.append(value.clone().detach().to(self.device))
output = model(features=features, device=self.device)
num_predicted_logkey += len(label)
if self.is_logkey:
num_logkey_anomaly += self.detect_logkey_anomaly(output, label)
# result for line at idx
result = {"logkey_anomaly": num_logkey_anomaly,
"params_anomaly": num_params_anomaly,
"predicted_logkey": num_predicted_logkey
}
test_results.append(result)
if idx < 10 or idx % 1000 == 0:
print(data_type)
print(result)
return test_results, normal_errors
def predict_unsupervised(self):
with open(self.vocab_path, 'rb') as f:
vocab = pickle.load(f)
if self.model_name == "deeplog":
model_init = Deeplog(input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
vocab_size=len(vocab),
embedding_dim=self.embedding_dim
)
else:
model_init = Loganomaly(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
vocab_size=len(vocab),
embedding_dim=self.embedding_dim
)
model = model_init.to(self.device)
model.load_state_dict(torch.load(self.model_path)['state_dict'])
model.eval()
print('model_path: {}'.format(self.model_path))
test_normal_loader, _ = generate(self.output_dir, 'test_normal')
test_abnormal_loader, _ = generate(self.output_dir, 'test_abnormal')
# print("testing normal size: {}, testing abnormal size: {}".format(test_normal_length, test_abnormal_length))
# Test the model
start_time = time.time()
test_normal_results, normal_errors = self.unsupervised_helper(model, test_normal_loader, vocab, 'test_normal',
min_len=self.min_len)
test_abnormal_results, abnormal_errors = self.unsupervised_helper(model, test_abnormal_loader, vocab,
'test_abnormal',
min_len=self.min_len)
print("Saving test normal results", self.model_dir + "test_normal_results")
with open(self.model_dir + "test_normal_results", "wb") as f:
pickle.dump(test_normal_results, f)
print("Saving test abnormal results", self.model_dir + "test_abnormal_results")
with open(self.model_dir + "test_abnormal_results", "wb") as f:
pickle.dump(test_abnormal_results, f)
TH, TP, TN, FP, FN, P, R, F1 = self.find_best_threshold(test_normal_results,
test_abnormal_results,
threshold_range=np.arange(10))
print('Best threshold', TH)
print("Confusion matrix")
print("TP: {}, TN: {}, FP: {}, FN: {}".format(TP, TN, FP, FN))
print('Precision: {:.3f}%, Recall: {:.3f}%, F1-measure: {:.3f}%'.format(P, R, F1))
elapsed_time = time.time() - start_time
print('elapsed_time: {}'.format(elapsed_time))
def predict_supervised(self):
model = self.model.to(self.device)
model.load_state_dict(torch.load(self.model_path)['state_dict'])
model.eval()
print('model_path: {}'.format(self.model_path))
test_logs, test_labels = session_window(self.output_dir, datatype='test')
test_dataset = log_dataset(logs=test_logs,
labels=test_labels,
seq=self.sequentials,
quan=self.quantitatives,
sem=self.semantics)
self.test_loader = DataLoader(test_dataset,
batch_size=self.batch_size,
shuffle=False,
pin_memory=True)
tbar = tqdm(self.test_loader, desc="\r")
TP, FP, FN, TN = 0, 0, 0, 0
for i, (log, label) in enumerate(tbar):
features = []
for value in log.values():
features.append(value.clone().to(self.device))
output = self.model(features=features, device=self.device)
output = F.sigmoid(output)[:, 0].cpu().detach().numpy()
# predicted = torch.argmax(output, dim=1).cpu().numpy()
predicted = (output < 0.2).astype(int)
label = np.array([y.cpu() for y in label])
TP += ((predicted == 1) * (label == 1)).sum()
FP += ((predicted == 1) * (label == 0)).sum()
FN += ((predicted == 0) * (label == 1)).sum()
TN += ((predicted == 0) * (label == 0)).sum()
P = 100 * TP / (TP + FP)
R = 100 * TP / (TP + FN)
F1 = 2 * P * R / (P + R)
print(
'false positive (FP): {}, false negative (FN): {}, Precision: {:.3f}%, Recall: {:.3f}%, F1-measure: {:.3f}%'
.format(FP, FN, P, R, F1))
| [
"logdeep.dataset.log.log_dataset",
"torch.randperm",
"pickle.dump",
"torch.load",
"tqdm.tqdm",
"pickle.load",
"logdeep.dataset.sample.session_window",
"logdeep.dataset.sample.sliding_window",
"torch.nn.functional.sigmoid",
"numpy.array",
"torch.argsort",
"torch.utils.data.DataLoader",
"torch... | [((3962, 3986), 'torch.randperm', 'torch.randperm', (['num_test'], {}), '(num_test)\n', (3976, 3986), False, 'import torch\n'), ((7691, 7702), 'time.time', 'time.time', ([], {}), '()\n', (7700, 7702), False, 'import time\n'), ((9523, 9571), 'logdeep.dataset.sample.session_window', 'session_window', (['self.output_dir'], {'datatype': '"""test"""'}), "(self.output_dir, datatype='test')\n", (9537, 9571), False, 'from logdeep.dataset.sample import session_window, sliding_window\n'), ((9596, 9715), 'logdeep.dataset.log.log_dataset', 'log_dataset', ([], {'logs': 'test_logs', 'labels': 'test_labels', 'seq': 'self.sequentials', 'quan': 'self.quantitatives', 'sem': 'self.semantics'}), '(logs=test_logs, labels=test_labels, seq=self.sequentials, quan=\n self.quantitatives, sem=self.semantics)\n', (9607, 9715), False, 'from logdeep.dataset.log import log_dataset\n'), ((9883, 9971), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(test_dataset, batch_size=self.batch_size, shuffle=False,\n pin_memory=True)\n', (9893, 9971), False, 'from torch.utils.data import DataLoader\n'), ((10101, 10134), 'tqdm.tqdm', 'tqdm', (['self.test_loader'], {'desc': "'\\r'"}), "(self.test_loader, desc='\\r')\n", (10105, 10134), False, 'from tqdm import tqdm\n'), ((4070, 4085), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4083, 4085), False, 'import torch\n'), ((6462, 6476), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6473, 6476), False, 'import pickle\n'), ((8389, 8424), 'pickle.dump', 'pickle.dump', (['test_normal_results', 'f'], {}), '(test_normal_results, f)\n', (8400, 8424), False, 'import pickle\n'), ((8602, 8639), 'pickle.dump', 'pickle.dump', (['test_abnormal_results', 'f'], {}), '(test_abnormal_results, f)\n', (8613, 8639), False, 'import pickle\n'), ((9174, 9185), 'time.time', 'time.time', ([], {}), '()\n', (9183, 9185), False, 'import time\n'), ((4382, 4396), 'numpy.array', 'np.array', (['line'], {}), '(line)\n', (4390, 4396), True, 'import numpy as np\n'), ((4534, 4610), 'logdeep.dataset.sample.sliding_window', 'sliding_window', (['logkeys', 'vocab'], {'window_size': 'self.window_size', 'is_train': '(False)'}), '(logkeys, vocab, window_size=self.window_size, is_train=False)\n', (4548, 4610), False, 'from logdeep.dataset.sample import session_window, sliding_window\n'), ((4640, 4772), 'logdeep.dataset.log.log_dataset', 'log_dataset', ([], {'logs': 'logs', 'labels': 'labels', 'seq': 'self.sequentials', 'quan': 'self.quantitatives', 'sem': 'self.semantics', 'param': 'self.parameters'}), '(logs=logs, labels=labels, seq=self.sequentials, quan=self.\n quantitatives, sem=self.semantics, param=self.parameters)\n', (4651, 4772), False, 'from logdeep.dataset.log import log_dataset\n'), ((7243, 7270), 'torch.load', 'torch.load', (['self.model_path'], {}), '(self.model_path)\n', (7253, 7270), False, 'import torch\n'), ((8897, 8910), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (8906, 8910), True, 'import numpy as np\n'), ((9367, 9394), 'torch.load', 'torch.load', (['self.model_path'], {}), '(self.model_path)\n', (9377, 9394), False, 'import torch\n'), ((1952, 1976), 'torch.argsort', 'torch.argsort', (['output[i]'], {}), '(output[i])\n', (1965, 1976), False, 'import torch\n'), ((10446, 10463), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['output'], {}), '(output)\n', (10455, 10463), True, 'import torch.nn.functional as F\n')] |
import os
import copy
import scipy
import numpy as np
import matplotlib.pyplot as plt
from astropy import wcs
from astropy.io import fits
from astropy.table import Table, Column
import astropy.units as u
from astropy.coordinates import SkyCoord
from .display import display_single, SEG_CMAP
from .utils import img_cutout
from .imtools import imshift, imdelete, magnify, blkavg
class Celestial(object):
'''
Class for ``Celestial`` object.
This class is basically a celestial body from observational perspective.
It has its image, header, WCS. The mask which masks out contaminations can also be stored as an attribute.
Then this ``Celestial`` object can be saved to FITS file, can be shifted, resized, rotated, etc.
What's more, the user could check the image/mask/masked image simply by invoke ``Celestial.display_image()``.
This class can also be inherited to make other classes.
'''
def __init__(self, img, mask=None, header=None, dataset='Dragonfly', scale_bar_length=5):
'''
Initialize ``Celestial`` object.
Please note that all WCS information is derived from header!
We operate on header directly instead of wcs.
Parameters:
img (numpy 2-D array): image array.
mask (numpy 2-D array, optional): mask array. 1 means the pixel will be masked.
header: header of image, containing WCS information.
Typically it is ``astropy.io.fits.header`` object. If ``header=None``, it will create a default WCS.
dataset (str): The description of the input data.
scale_bar_length (float): Scale bar length when displaying.
Returns:
None
'''
self.shape = img.shape # in ndarray format
self.dataset = dataset
hdu = fits.PrimaryHDU(img, header=header)
self._image = hdu.data
if mask is not None:
self._mask = mask
# Sky position
ny, nx = img.shape
self.ny = ny
self.nx = nx
self.header = hdu.header
self.wcs = wcs.WCS(header)
if header is not None:
try:
self.pixel_scale = abs(header['CD1_1'] * 3600)
except:
self.pixel_scale = abs(header['PC1_1'] * 3600)
self.ra_cen, self.dec_cen = list(map(float, self.wcs.wcs_pix2world(ny // 2, nx // 2, 1)))
# This follows lower-left, lower-right, upper-right, upper-left.
self.ra_bounds, self.dec_bounds = self.wcs.wcs_pix2world([0, img.shape[1], img.shape[1], 0],
[0, 0, img.shape[0], img.shape[0]], 1)
self.sky_bounds = np.append(self.ra_bounds[2:], self.dec_bounds[1:3])
c1 = SkyCoord(ra=self.sky_bounds[0], dec=self.sky_bounds[2], unit='deg')
c2 = SkyCoord(ra=self.sky_bounds[1], dec=self.sky_bounds[3], unit='deg')
self.diag_radius = c1.separation(c2) / 2
else:
self.pixel_scale = 1
# initial length for scale bar when displaying
self.scale_bar_length = scale_bar_length
@property
def image(self):
return self._image
@image.setter
def image(self, img_array):
self._image = img_array
@property
def mask(self):
return self._mask
@mask.setter
def mask(self, mask_array):
self._mask = mask_array
@property
def hscmask(self):
return self._mask
@hscmask.setter
def hscmask(self, mask_array):
self._hscmask = mask_array
@property
def variance(self):
return self._variance
@variance.setter
def variance(self, variance_array):
self._variance = variance_array
# Save 2-D numpy array to ``fits``
def save_to_fits(self, fits_file_name, data='image', overwrite=True):
"""
Save image or mask of this ``Celestial`` object to ``fits`` file.
We operate wcs directly on header!
Parameters:
fits_file_name (str): File name of ``fits`` file
data (str): can be 'image' or 'mask'
overwrite (bool): Default is True
Returns:
None
"""
if data == 'image':
data_use = self.image
elif data == 'mask':
data_use = self.mask
else:
raise ValueError('Data can only be "image" or "mask".')
img_hdu = fits.PrimaryHDU(data_use, header=self.header)
if os.path.islink(fits_file_name):
os.unlink(fits_file_name)
img_hdu.writeto(fits_file_name, overwrite=overwrite)
return img_hdu
# Shift image/mask
def shift_image(self, dx, dy, method='spline', order=3, cval=0.0):
'''Shift the image of Celestial object. The WCS of image will also be changed.
Parameters:
dx (float): shift distance (in pixel) along x (horizontal).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the image "RIGHT" (as seen in DS9), dy = 3 is to shift the image "UP".
dy (float): shift distance (in pixel) along y (vertical).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the image "RIGHT" (as seen in DS9), dy = 3 is to shift the image "UP".
method (str): interpolation method. Use 'spline', lanczos' or 'iraf'.
If using 'iraf', default interpolation is 'poly3. 'Lanczos' requires ``GalSim`` installed.
order (int): the order of Spline or Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
shift_image (ndarray): shifted image, the "image" attribute of ``Celestial`` class will also be changed accordingly.
'''
ny, nx = self.image.shape
if abs(dx) > nx or abs(ny) > ny:
raise ValueError('# Shift distance is beyond the image size.')
if method == 'lanczos' or method == 'cubic' or method == 'quintic':
try: # try to import galsim
from galsim import degrees, Angle
from galsim.interpolant import Lanczos
from galsim import Image, InterpolatedImage
from galsim.fitswcs import AstropyWCS
except:
raise ImportError('# Import ``galsim`` failed! Please check if ``galsim`` is installed!')
# Begin shift
assert (order > 0) and isinstance(order, int), 'order of ' + method + ' must be positive interger.'
if method == 'lanczos':
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=self.pixel_scale, x_interpolant=Lanczos(order))
else:
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=self.pixel_scale, x_interpolant=method)
galimg = galimg.shift(dx=dx * self.pixel_scale, dy=dy * self.pixel_scale)
result = galimg.drawImage(scale=self.pixel_scale, nx=nx, ny=ny)#, wcs=AstropyWCS(self.wcs))
self._image = result.array
# Change the WCS of image
hdr = copy.deepcopy(self.header)
if 'CRPIX1' in hdr:
hdr['CRPIX1'] += dx
hdr['CRPIX2'] += dy
self.header = hdr
self.wcs = wcs.WCS(self.header)
return result.array
elif method == 'iraf':
self.save_to_fits('./_temp.fits', 'image')
imshift('./_temp.fits', './_shift_temp.fits', dx, dy, interp_type='poly3', boundary_type='constant')
try:
hdu = fits.open('./_shift_temp.fits')
except Exception as e:
raise ValueError('Interpolation using IRAF filed with error "{}". \n Please try another interpolation method!'.format(e))
self.image = hdu[0].data
self.shape = hdu[0].data.shape
self.header = hdu[0].header
self.wcs = wcs.WCS(self.header)
hdu.close()
imdelete('./*temp.fits')
return self.image
elif method == 'spline':
from scipy.ndimage.interpolation import shift
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
result = shift(self.image, [dy, dx], order=order, mode='constant', cval=cval)
self._image = result
# Change the WCS of image
hdr = copy.deepcopy(self.header)
if 'CRPIX1' in hdr:
hdr['CRPIX1'] += dx
hdr['CRPIX2'] += dy
self.header = hdr
self.wcs = wcs.WCS(self.header)
return result
else:
raise ValueError("# Not supported interpolation method. Use 'lanczos' or 'iraf'.")
def shift_mask(self, dx, dy, method='spline', order=3, cval=0.0):
'''Shift the mask of Celestial object.
Parameters:
dx (float): shift distance (in pixel) along x (horizontal).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the mask "RIGHT" (as seen in DS9), dy = 3 is to shift the image "UP".
dy (float): shift distance (in pixel) along y (vertical).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the mask "RIGHT" (as seen in DS9), dy = 3 is to shift the image "UP".
method (str): interpolation method. Use 'spline', lanczos' or 'iraf'.
If using 'iraf', default interpolation is 'poly3. 'Lanczos' requires ``GalSim`` installed.
order (int): the order of Spline or Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
shift_mask (ndarray): shifted mask. The "mask" attribute of ``Celestial`` class will also be changed accordingly.
'''
ny, nx = self.mask.shape
if abs(dx) > nx or abs(ny) > ny:
raise ValueError('# Shift distance is beyond the image size.')
if method == 'lanczos' or method == 'cubic' or method == 'quintic':
try: # try to import galsim
from galsim import degrees, Angle
from galsim.interpolant import Lanczos
from galsim import Image, InterpolatedImage
from galsim.fitswcs import AstropyWCS
except:
raise ImportError('# Import ``galsim`` failed! Please check if ``galsim`` is installed!')
# Begin shift
assert (order > 0) and isinstance(order, int), 'order of ' + method + ' must be positive interger.'
if method == 'lanczos':
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=self.pixel_scale, x_interpolant=Lanczos(order))
else:
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=self.pixel_scale, x_interpolant=method)
galimg = galimg.shift(dx=dx * self.pixel_scale, dy=dy * self.pixel_scale)
result = galimg.drawImage(scale=self.pixel_scale, nx=nx, ny=ny)#, wcs=AstropyWCS(self.wcs))
self._mask = result.array
# Change the WCS of image
hdr = copy.deepcopy(self.header)
if 'CRPIX1' in hdr:
hdr['CRPIX1'] += dx
hdr['CRPIX2'] += dy
self.header = hdr
self.wcs = wcs.WCS(self.header)
return result.array
elif method == 'iraf':
self.save_to_fits('./_temp.fits', 'mask')
imshift('./_temp.fits', './_shift_temp.fits', dx, dy, interp_type='poly3', boundary_type='constant')
try:
hdu = fits.open('./_shift_temp.fits')
except Exception as e:
raise ValueError('Interpolation using IRAF filed with error "{}". \n Please try another interpolation method!'.format(e))
self.mask = hdu[0].data
self.shape = hdu[0].data.shape
self.header = hdu[0].header
self.wcs = wcs.WCS(self.header)
hdu.close()
imdelete('./*temp.fits')
return self.mask
elif method == 'spline':
from scipy.ndimage.interpolation import shift
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
result = shift(self.mask, [dy, dx], order=order, mode='constant', cval=cval)
self._mask = result
# Change the WCS of image
hdr = copy.deepcopy(self.header)
if 'CRPIX1' in hdr:
hdr['CRPIX1'] += dx
hdr['CRPIX2'] += dy
self.header = hdr
self.wcs = wcs.WCS(self.header)
return result
else:
raise ValueError("# Not supported interpolation method. Use 'lanczos' or 'iraf'.")
def shift_Celestial(self, dx, dy, method='spline', order=3, cval=0.0):
'''Shift the Celestial object, including image and mask.
Parameters:
dx (float): shift distance (in pixel) along x (horizontal).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the image "RIGHT" (as seen in DS9), dy = 3 is to shift the image "UP".
dy (float): shift distance (in pixel) along y (vertical).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the image "RIGHT" (as seen in DS9), dy = 3 is to shift the image "UP".
method (str): interpolation method. Use 'spline', lanczos' or 'iraf'.
If using 'iraf', default interpolation is 'poly3. 'Lanczos' requires ``GalSim`` installed.
order (int): the order of Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
None
'''
self.shift_image(dx, dy, method=method, order=order, cval=cval)
if hasattr(self, 'mask'):
if abs(np.sum(self.mask)) > 1e-5:
self.shift_mask(dx, dy, method=method, order=order, cval=cval)
def _resize_header_wcs(self, f):
hdr = copy.deepcopy(self.header)
w = wcs.WCS(hdr)
if f > 1:
hdr['CRPIX1'] = hdr['CRPIX1'] * f # + (1 - f * 1)
# (1 - f * x1), where x1=1 is the starting index
hdr['CRPIX2'] = hdr['CRPIX2'] * f # + (1 - f * 1)
# Delete "CDELT"
if "CDELT1" in hdr or "CDELT2" in hdr:
for i in hdr['CDELT*'].keys():
del hdr[i]
if "PC1_1" in hdr or "PC2_2" in hdr:
for i in hdr['PC?_?'].keys():
del hdr[i]
if "LTV1" in hdr:
for i in hdr['LTV*'].keys():
del hdr[i]
for i in hdr['LTM*'].keys():
del hdr[i]
hdr['CD1_1'] /= f
hdr['CD2_2'] /= f
if "CD1_2" in hdr:
hdr['CD1_2'] /= f
if "CD2_1" in hdr:
hdr['CD2_1'] /= f
else:
b = round(1 / f)
hdr['CRPIX1'] = hdr['CRPIX1'] / b
hdr['CRPIX2'] = hdr['CRPIX2'] / b
# Delete "CDELT"
if "CDELT1" in hdr or "CDELT2" in hdr:
for i in hdr['CDELT*'].keys():
del hdr[i]
if "PC1_1" in hdr or "PC2_2" in hdr:
for i in hdr['PC?_?'].keys():
del hdr[i]
if "LTV1" in hdr:
for i in hdr['LTV*'].keys():
del hdr[i]
for i in hdr['LTM*'].keys():
del hdr[i]
hdr['CD1_1'] *= b
hdr['CD2_2'] *= b
if "CD1_2" in hdr:
hdr['CD1_2'] *= b
if "CD2_1" in hdr:
hdr['CD2_1'] *= b
return hdr
def resize_image(self, f, method='cubic', order=3, cval=0.0):
'''
Zoom/Resize the image of Celestial object.
f > 1 means the image will be resampled (finer)! f < 1 means the image will be degraded.
Parameters:
f (float): the positive factor of zoom. If 0 < f < 1, the image will be resized to smaller one.
method (str): interpolation method. Use 'spline', 'iraf', or 'lanczos', 'cubic', 'quintic'.
We recommend using 'spline' or 'iraf. The last three methods require ``GalSim`` installed.
Other methods are now consistent with "iraf" results.
order (int): the order Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
resize_image (ndarray): resized image. The "image" attribute of ``Celestial`` class will also be changed accordingly.
'''
if method == 'lanczos' or method == 'cubic' or method == 'quintic':
try: # try to import galsim
from galsim import degrees, Angle
from galsim.interpolant import Lanczos
from galsim import Image, InterpolatedImage
from galsim.fitswcs import AstropyWCS
except:
raise ImportError('# Import `galsim` failed! Please check if `galsim` is installed!')
assert (order > 0) and isinstance(order, int), 'order of ' + method + ' must be positive interger.'
if method == 'lanczos':
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=self.pixel_scale, x_interpolant=Lanczos(order))
else:
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=self.pixel_scale, x_interpolant=method)
ny, nx = self.image.shape
if f > 1:
result = galimg.drawImage(scale=self.pixel_scale / f,
nx=int((nx -1) * f + 1), ny=int((ny - 1)* f + 1))
self.header = self._resize_header_wcs(f)
self.header['CRPIX1'] += (1 - f * 1)
self.header['CRPIX2'] += (1 - f * 1)
self._image = result.array
self.shape = self.image.shape
self.header['NAXIS1'] = result.array.shape[1]
self.header['NAXIS2'] = result.array.shape[0]
self.pixel_scale /= f
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
dshift = 2 * (1 - f * 1) % 0.5
self.shift_image(dshift, dshift, method='spline')
# We don't want to shift wcs.
self.header['CRPIX1'] -= dshift
self.header['CRPIX2'] -= dshift
self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
else:
from math import ceil
b = round(1 / f)
nxout = ceil(nx / b)
nyout = ceil(ny / b)
result = galimg.drawImage(scale=self.pixel_scale * b,
nx=nxout, ny=nyout)
self.header = self._resize_header_wcs(f)
self.header['CRPIX1'] += 0.5 - 1 / b / 2
self.header['CRPIX2'] += 0.5 - 1 / b / 2
self._image = result.array
self.shape = self.image.shape
self.header['NAXIS1'] = result.array.shape[1]
self.header['NAXIS2'] = result.array.shape[0]
self.pixel_scale *= b
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
dshift = 0.5 - 1 / b / 2
self.shift_image(-dshift, -dshift, method='spline')
# We don't want to shift wcs.
self.header['CRPIX1'] -= dshift
self.header['CRPIX2'] -= dshift
self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
return self.image
elif method == 'iraf':
self.save_to_fits('./_temp.fits', 'image')
if f > 1:
magnify('./_temp.fits', './_resize_temp.fits', f, f)
else:
blkavg('./_temp.fits', './_resize_temp.fits',
round(1/f), round(1/f), option='sum')
try:
hdu = fits.open('./_resize_temp.fits')
except Exception as e:
raise ValueError('Interpolation using IRAF filed with error "{}". \n Please try another interpolation method!'.format(e))
self.image = hdu[0].data
self.shape = hdu[0].data.shape
self.header = hdu[0].header
#### Remove redundant PC keywords ###
for i in self.header['PC*'].keys():
del self.header[i]
#####################################
self.wcs = wcs.WCS(self.header)
self.pixel_scale /= f
hdu.close()
imdelete('./*temp.fits')
return self.image
elif method == 'spline':
ny, nx = self.image.shape
if f > 1:
from scipy import ndimage
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
nx_zoomed = (nx - 1) * f + 1
f_eff = nx_zoomed / nx
result = ndimage.zoom(self.image, f_eff, order=order)
result *= 1/(f_eff**2) # Multiplying by this factor to conserve flux
self.header = self._resize_header_wcs(f)
#self.header['CRPIX1'] += (1 - f * 1)
#self.header['CRPIX2'] += (1 - f * 1)
self._image = result
self.shape = self.image.shape
self.header['NAXIS1'] = result.shape[1]
self.header['NAXIS2'] = result.shape[0]
self.pixel_scale /= f
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
dshift = 2 * (1 - f * 1) % 0.5
self.shift_image(dshift, dshift, method='spline')
# We don't want to shift wcs.
self.header['CRPIX1'] -= dshift
self.header['CRPIX2'] -= dshift
self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
else:
b = round(1 / f)
ny_bin = int( ny / b )
nx_bin = int( nx / b )
shape = (ny_bin, b, nx_bin, b)
x_crop = int( nx_bin * b )
y_crop = int( ny_bin * b )
result = self.image[0:y_crop, 0:x_crop].reshape(shape).sum(3).sum(1)
self.header = self._resize_header_wcs(f)
self.header['CRPIX1'] += 0.5 - 1 / b / 2
self.header['CRPIX2'] += 0.5 - 1 / b / 2
self._image = result
self.shape = self.image.shape
self.header['NAXIS1'] = result.shape[1]
self.header['NAXIS2'] = result.shape[0]
self.pixel_scale *= b
self.wcs = wcs.WCS(self.header)
else:
raise ValueError("# Not supported interpolation method. Use 'lanczos', 'spline' or 'iraf'.")
def resize_mask(self, f, method='cubic', order=5, cval=0.0):
'''
Zoom/Resize the mask of Celestial object.
f > 1 means the mask will be resampled (finer)! f < 1 means the mask will be degraded.
Parameters:
f (float): the positive factor of zoom. If 0 < f < 1, the mask will be resized to smaller one.
method (str): interpolation method. Use 'spline', 'iraf', or 'lanczos', 'cubic', 'quintic'.
We recommend using 'spline' or 'iraf. The last three methods require ``GalSim`` installed.
Other methods are now consistent with "iraf" results.
order (int): the order Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
resize_mask (ndarray): resized image. The "mask" attribute of ``Celestial`` class will also be changed accordingly.
'''
if not hasattr(self, 'mask'):
raise ValueError("This object doesn't have mask yet!")
if method == 'lanczos' or method == 'cubic' or method == 'quintic':
try: # try to import galsim
from galsim import degrees, Angle
from galsim.interpolant import Lanczos
from galsim import Image, InterpolatedImage
from galsim.fitswcs import AstropyWCS
except:
raise ImportError('# Import `galsim` failed! Please check if `galsim` is installed!')
assert (order > 0) and isinstance(order, int), 'order of ' + method + ' must be positive interger.'
if method == 'lanczos':
galimg = InterpolatedImage(Image(self.mask, dtype=float),
scale=self.pixel_scale, x_interpolant=Lanczos(order))
else:
galimg = InterpolatedImage(Image(self.mask, dtype=float),
scale=self.pixel_scale, x_interpolant=method)
ny, nx = self.mask.shape
if f > 1:
result = galimg.drawImage(scale=self.pixel_scale / f,
nx=int((nx -1) * f + 1), ny=int((ny - 1)* f + 1))
self.header = self._resize_header_wcs(self.mask, f)
self.header['CRPIX1'] += (1 - f * 1)
self.header['CRPIX2'] += (1 - f * 1)
self._mask = result.array
self.shape = self.mask.shape
self.header['NAXIS1'] = result.array.shape[1]
self.header['NAXIS2'] = result.array.shape[0]
self.pixel_scale /= f
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
dshift = 2 * (1 - f * 1) % 0.5
self.shift_mask(dshift, dshift, method='spline')
# We don't want to shift wcs.
self.header['CRPIX1'] -= dshift
self.header['CRPIX2'] -= dshift
self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
else:
from math import ceil
b = round(1 / f)
nxout = ceil(nx / b)
nyout = ceil(ny / b)
result = galimg.drawImage(scale=self.pixel_scale * b,
nx=nxout, ny=nyout)
self.header = self._resize_header_wcs(self.mask, f)
self.header['CRPIX1'] += 0.5 - 1 / b / 2
self.header['CRPIX2'] += 0.5 - 1 / b / 2
self._mask = result.array
self.shape = self.image.shape
self.header['NAXIS1'] = result.array.shape[1]
self.header['NAXIS2'] = result.array.shape[0]
self.pixel_scale *= b
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
dshift = 0.5 - 1 / b / 2
self.shift_image(-dshift, -dshift, method='spline')
# We don't want to shift wcs.
self.header['CRPIX1'] -= dshift
self.header['CRPIX2'] -= dshift
self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
return self.mask
elif method == 'iraf':
self.save_to_fits('./_temp.fits', 'mask')
if f > 1:
magnify('./_temp.fits', './_resize_temp.fits', f, f)
else:
blkavg('./_temp.fits', './_resize_temp.fits',
round(1/f), round(1/f), option='sum')
try:
hdu = fits.open('./_resize_temp.fits')
except Exception as e:
raise ValueError('Interpolation using IRAF filed with error "{}". \n Please try another interpolation method!'.format(e))
self.mask = hdu[0].data
self.shape = hdu[0].data.shape
self.header = hdu[0].header
self.wcs = wcs.WCS(self.header)
self.pixel_scale /= f
hdu.close()
imdelete('./*temp.fits')
return self.mask
elif method == 'spline':
ny, nx = self.mask.shape
if f > 1:
from scipy import ndimage
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
nx_zoomed = (nx - 1) * f + 1
f_eff = nx_zoomed / nx
result = ndimage.zoom(self.image, f_eff, order=order)
result *= 1/(f_eff**2) # Multiplying by this factor to conserve flux
self.header = self._resize_header_wcs(self.mask, f)
self.header['CRPIX1'] += (1 - f * 1)
self.header['CRPIX2'] += (1 - f * 1)
self._mask = result
self.shape = self.mask.shape
self.header['NAXIS1'] = result.shape[1]
self.header['NAXIS2'] = result.shape[0]
self.pixel_scale /= f
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
dshift = 2 * (1 - f * 1) % 0.5
self.shift_image(dshift, dshift, method='spline')
# We don't want to shift wcs.
self.header['CRPIX1'] -= dshift
self.header['CRPIX2'] -= dshift
self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
else:
b = round(1 / f)
ny_bin = int( ny / b )
nx_bin = int( nx / b )
shape = (ny_bin, b, nx_bin, b)
x_crop = int( nx_bin * b )
y_crop = int( ny_bin * b )
result = self.mask[0:y_crop, 0:x_crop].reshape(shape).sum(3).sum(1)
self.header = self._resize_header_wcs(self.image, f)
self.header['CRPIX1'] += 0.5 - 1 / b / 2
self.header['CRPIX2'] += 0.5 - 1 / b / 2
self._mask = result
self.shape = self.image.shape
self.header['NAXIS1'] = result.shape[1]
self.header['NAXIS2'] = result.shape[0]
self.pixel_scale *= b
self.wcs = wcs.WCS(self.header)
else:
raise ValueError("# Not supported interpolation method. Use 'lanczos', 'spline' or 'iraf'.")
def resize_Celestial(self, f, method='cubic', order=5, cval=0.0):
'''
Resize the Celestial object, including both image and mask.
f > 1 means the image/mask will be resampled! f < 1 means the image/mask will be degraded.
Parameters:
f (float): the positive factor of zoom. If 0 < f < 1, the mask will be resized to smaller one.
method (str): interpolation method. Use 'lanczos' or 'spline' or 'iraf'. 'Lanczos' requires ``GalSim`` installed.
order (int): the order Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
None
'''
self.resize_image(f, method=method, order=order, cval=cval)
if hasattr(self, 'mask'):
self.resize_mask(f, method=method, order=order, cval=cval)
# Display image/mask
def display_image(self, **kwargs):
"""
Take a peek at the image, using "zscale", "arcsinh" streching and "viridis" colormap. You can change them by adding ``**kwargs``.
Parameters:
``**kwargs``: arguments in ``mrf.display.display_single``.
Returns:
None
"""
display_single(self.image, pixel_scale=self.pixel_scale,
scale_bar_length=self.scale_bar_length, **kwargs)
def display_mask(self, **kwargs):
"""
Take a peek at the mask.
Parameters:
``**kwargs``: arguments in ``mrf.display.display_single``.
Returns:
None
"""
display_single(self.mask, scale='linear', pixel_scale=self.pixel_scale,
cmap=SEG_CMAP, scale_bar_length=self.scale_bar_length, **kwargs)
def display_Celestial(self, **kwargs):
"""
Take a peek at the masked image, using "zscale", "arcsinh" streching and "viridis" colormap. You can change them by adding ``**kwargs``.
Parameters:
``**kwargs``: arguments in ``mrf.display.display_single``.
Returns:
None
"""
if hasattr(self, 'mask'):
display_single(self.image * (~self.mask.astype(bool)), pixel_scale=self.pixel_scale,
scale_bar_length=self.scale_bar_length, **kwargs)
else:
self.display_image()
"""
elif method == 'spline':
## This only works for ZOOM! NEED BKGAVG!
from scipy.ndimage import zoom
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
ny, nx = self.image.shape
print(ny, nx, f)
result = zoom(self.image, float(f), order=order, mode='constant', cval=cval)
result /= f**2 # preserve total flux
self.header = self._resize_header_wcs(self.image, f)
self._image = result
self.shape = self.image.shape
self.header['NAXIS1'] = result.shape[1]
self.header['NAXIS2'] = result.shape[0]
self.pixel_scale /= f
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
#dshift = 2 * (1 - f * 1) % 0.5
#self.shift_image(dshift, dshift, method=method)
# We don't want to shift wcs.
#self.header['CRPIX1'] -= dshift
#self.header['CRPIX2'] -= dshift
#self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
print(result.shape[1], result.shape[0])
dx = int((nx - 1) * f + 1) - result.shape[1]
dy = int((ny - 1) * f + 1) - result.shape[0]
print(dx, dy)
result = self.image
# Pad the image to fit the shape of `iraf` results
if dy != 0:
if dy < 0:
result = result[-dy:, :]
if dx != 0:
if dx < 0:
result = result[:, -dx:]
#result = np.append(result, np.zeros(result.shape[0], dx), axis=1)
self._image = result
#return result
"""
class Star(Celestial):
"""
This ``Star`` class is the inheritance of ``Celestial`` class.
It represents a small cutout, which is typically a star.
Other than the functions inherited from ``Celestial``, ``Star`` object has extra functions such as ``centralize``, ``mask_out_contam``.
"""
def __init__(self, img, header, starobj, colnames=['x', 'y'], halosize=40,
padsize=50, mask=None, hscmask=None):
"""
Initialize ``Star`` object.
Parameters:
img (numpy 2-D array): the image from which the cutout of star is made.
header: header of image, containing WCS information. Typically it is ``astropy.io.fits.header`` object.
starobj: A row of ``astropy.table.Table``, containing basic information of the star, such as ``ra``, `dec`` and magnitudes.
colnames (list of str): indicating the columns which contains position of the star. It could be ['x', 'y'] or ['ra', 'dec'].
halosize (float): the radial size of cutout. If ``halosize=40``, the square cutout will be 80 * 80 pix.
padsize (float): The image will be padded in order to make cutout of stars near the edge of input image.
``padsize`` should be equal to or larger than ``halosize``.
mask (numpy 2-D array): the mask of input big image.
hscmask (numpy 2-D array): the hscmask of input image.
Returns:
None
"""
Celestial.__init__(self, img, mask, header=header)
if hscmask is not None:
self.hscmask = hscmask
self.name = 'star'
self.scale_bar_length = 3
# Trim the image to star size
# starobj should at least contain x, y, (or ra, dec)
if 'x' in colnames or 'y' in colnames:
# Position of a star, in numpy convention
x_int = int(starobj['x'])
y_int = int(starobj['y'])
dx = -1.0 * (starobj['x'] - x_int)
dy = -1.0 * (starobj['y'] - y_int)
elif 'ra' in colnames or 'dec' in colnames:
w = self.wcs
x, y = w.wcs_world2pix(starobj['ra'], starobj['dec'], 0)
x_int = int(x)
y_int = int(y)
dx = -1.0 * (x - x_int)
dy = -1.0 * (y - y_int)
halosize = int(halosize)
# Make padded image to deal with stars near the edges
padsize = int(padsize)
ny, nx = self.image.shape
im_padded = np.zeros((ny + 2 * padsize, nx + 2 * padsize))
im_padded[padsize: ny + padsize, padsize: nx + padsize] = self.image
# Star itself, but no shift here.
halo = im_padded[y_int + padsize - halosize: y_int + padsize + halosize + 1,
x_int + padsize - halosize: x_int + padsize + halosize + 1]
self._image = halo
self.shape = halo.shape
self.cen_xy = [x_int, y_int]
self.dx = dx
self.dy = dy
try:
# FLux
self.flux = starobj['flux']
self.fluxann = starobj['flux_ann']
self.fluxauto = starobj['flux_auto']
except:
pass #raise Warning('No flux assigned to the star!')
if hasattr(self, 'mask'):
im_padded = np.zeros((ny + 2 * padsize, nx + 2 * padsize))
im_padded[padsize: ny + padsize, padsize: nx + padsize] = self.mask
# Mask itself, but no shift here.
halo = (im_padded[y_int + padsize - halosize: y_int + padsize + halosize + 1,
x_int + padsize - halosize: x_int + padsize + halosize + 1])
self._mask = halo
if hasattr(self, 'hscmask'):
im_padded = np.zeros((ny + 2 * padsize, nx + 2 * padsize))
im_padded[padsize: ny + padsize, padsize: nx + padsize] = self.hscmask
# Mask itself, but no shift here.
halo = (im_padded[y_int + padsize - halosize: y_int + padsize + halosize + 1,
x_int + padsize - halosize: x_int + padsize + halosize + 1])
self.hscmask = halo
def centralize(self, method='spline', order=5, cval=0.0):
"""
Shift the cutout to the true position of the star using interpolation.
Parameters:
method (str): interpolation method. Options are "iraf" and "lanczos". "Lanczos" requires ``GalSim`` installed.
order (int): the order of Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
None
"""
self.shift_Celestial(self.dx, self.dy, method=method, order=order, cval=cval)
def sub_bkg(self, sigma=4.5, deblend_cont=0.0001, verbose=True):
"""
Subtract the locally-measured background of ``Star`` object. The sky is measured by masking out objects using ``sep``.
Be cautious and be aware what you do when using this function.
Parameters:
sigma (float): The sigma in ``SExtractor``.
deblend_cont (float): Deblending parameter.
verbose (bool): Whether print out background value.
Returns:
None
"""
# Actually this should be estimated in larger cutuouts.
# So make another cutout (larger)!
from astropy.convolution import convolve, Box2DKernel
from .image import extract_obj, seg_remove_cen_obj
from sep import Background
img_blur = convolve(abs(self.image), Box2DKernel(2))
img_objects, img_segmap = extract_obj(abs(img_blur), b=10, f=4, sigma=sigma, minarea=2, pixel_scale=self.pixel_scale,
deblend_nthresh=32, deblend_cont=deblend_cont,
sky_subtract=False, show_fig=False, verbose=False)
bk = Background(self.image, img_segmap != 0)
glbbck = bk.globalback
self.globalback = glbbck
if verbose:
print('# Global background: ', glbbck)
self.image -= glbbck
def get_masked_image(self, cval=np.nan):
"""
Mask image according to the mask.
Parameter:
cval: value to fill the void. Default is NaN, but sometimes NaN is problematic.
Return:
imgcp (numpy 2-D array): masked image.
"""
if not hasattr(self, 'mask'):
print("This ``Star`` object doesn't have a ``mask``!")
return self.image
else:
imgcp = copy.copy(self.image)
imgcp[self.mask.astype(bool)] = cval
return imgcp
def mask_out_contam(self, sigma=4.5, deblend_cont=0.0001, blowup=True,
show_fig=True, verbose=True):
"""
Mask out contamination in the cutout of star. Contamination may be stars, galaxies or artifacts.
This function uses ``sep`` to identify and mask contamination.
** DO THIS AFTER CENTERIZING! **
Parameters:
sigma (float): The sigma in ``SExtractor``. Default is 4.5.
deblend_cont (float): Deblending parameter. Default is 0.0005.
blowup (bool): Whether blow up the segmentation mask by convolving a 1.5 pixel Gaussian kernel.
show_fig (bool): Whether show the figure.
verbose (bool): Whether print out results.
Returns:
None
"""
from astropy.convolution import convolve, Box2DKernel
from .utils import extract_obj, seg_remove_cen_obj
img_blur = convolve(abs(self.image), Box2DKernel(2))
img_objects, img_segmap = extract_obj(abs(img_blur), b=10, f=3, sigma=sigma, minarea=1, pixel_scale=self.pixel_scale,
deblend_nthresh=72, deblend_cont=deblend_cont, flux_aper=None,
sky_subtract=True, show_fig=show_fig, verbose=verbose)
# remove central object from segmap
cen_obj = img_objects[img_segmap[img_segmap.shape[1]//2, img_segmap.shape[0]//2] - 1]
img_segmap = seg_remove_cen_obj(img_segmap)
detect_mask = (img_segmap != 0).astype(float)
if blowup is True:
from astropy.convolution import convolve, Gaussian2DKernel
cv = convolve(1e3 * detect_mask / np.nansum(detect_mask), Gaussian2DKernel(1.5))
detect_mask = (cv > 0.5).astype(float)
self.mask = detect_mask
#imgcp = copy.copy(self.image)
#imgcp[detect_mask.astype(bool)] = cval
#self.image = imgcp
# Shift mask will be very horrible!!! Hence we still don't use self.mask.
# Instead we directly mask out on the image.
return
| [
"sep.Background",
"copy.deepcopy",
"astropy.io.fits.open",
"copy.copy",
"os.path.islink",
"scipy.ndimage.zoom",
"scipy.ndimage.interpolation.shift",
"os.unlink",
"astropy.convolution.Box2DKernel",
"astropy.io.fits.PrimaryHDU",
"numpy.nansum",
"math.ceil",
"galsim.interpolant.Lanczos",
"ast... | [((1835, 1870), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['img'], {'header': 'header'}), '(img, header=header)\n', (1850, 1870), False, 'from astropy.io import fits\n'), ((2106, 2121), 'astropy.wcs.WCS', 'wcs.WCS', (['header'], {}), '(header)\n', (2113, 2121), False, 'from astropy import wcs\n'), ((4486, 4531), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['data_use'], {'header': 'self.header'}), '(data_use, header=self.header)\n', (4501, 4531), False, 'from astropy.io import fits\n'), ((4544, 4574), 'os.path.islink', 'os.path.islink', (['fits_file_name'], {}), '(fits_file_name)\n', (4558, 4574), False, 'import os\n'), ((14608, 14634), 'copy.deepcopy', 'copy.deepcopy', (['self.header'], {}), '(self.header)\n', (14621, 14634), False, 'import copy\n'), ((14647, 14659), 'astropy.wcs.WCS', 'wcs.WCS', (['hdr'], {}), '(hdr)\n', (14654, 14659), False, 'from astropy import wcs\n'), ((38272, 38318), 'numpy.zeros', 'np.zeros', (['(ny + 2 * padsize, nx + 2 * padsize)'], {}), '((ny + 2 * padsize, nx + 2 * padsize))\n', (38280, 38318), True, 'import numpy as np\n'), ((41657, 41696), 'sep.Background', 'Background', (['self.image', '(img_segmap != 0)'], {}), '(self.image, img_segmap != 0)\n', (41667, 41696), False, 'from sep import Background\n'), ((2718, 2769), 'numpy.append', 'np.append', (['self.ra_bounds[2:]', 'self.dec_bounds[1:3]'], {}), '(self.ra_bounds[2:], self.dec_bounds[1:3])\n', (2727, 2769), True, 'import numpy as np\n'), ((2787, 2854), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': 'self.sky_bounds[0]', 'dec': 'self.sky_bounds[2]', 'unit': '"""deg"""'}), "(ra=self.sky_bounds[0], dec=self.sky_bounds[2], unit='deg')\n", (2795, 2854), False, 'from astropy.coordinates import SkyCoord\n'), ((2872, 2939), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': 'self.sky_bounds[1]', 'dec': 'self.sky_bounds[3]', 'unit': '"""deg"""'}), "(ra=self.sky_bounds[1], dec=self.sky_bounds[3], unit='deg')\n", (2880, 2939), False, 'from astropy.coordinates import SkyCoord\n'), ((4588, 4613), 'os.unlink', 'os.unlink', (['fits_file_name'], {}), '(fits_file_name)\n', (4597, 4613), False, 'import os\n'), ((7348, 7374), 'copy.deepcopy', 'copy.deepcopy', (['self.header'], {}), '(self.header)\n', (7361, 7374), False, 'import copy\n'), ((7532, 7552), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (7539, 7552), False, 'from astropy import wcs\n'), ((11591, 11617), 'copy.deepcopy', 'copy.deepcopy', (['self.header'], {}), '(self.header)\n', (11604, 11617), False, 'import copy\n'), ((11775, 11795), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (11782, 11795), False, 'from astropy import wcs\n'), ((39056, 39102), 'numpy.zeros', 'np.zeros', (['(ny + 2 * padsize, nx + 2 * padsize)'], {}), '((ny + 2 * padsize, nx + 2 * padsize))\n', (39064, 39102), True, 'import numpy as np\n'), ((39513, 39559), 'numpy.zeros', 'np.zeros', (['(ny + 2 * padsize, nx + 2 * padsize)'], {}), '((ny + 2 * padsize, nx + 2 * padsize))\n', (39521, 39559), True, 'import numpy as np\n'), ((41307, 41321), 'astropy.convolution.Box2DKernel', 'Box2DKernel', (['(2)'], {}), '(2)\n', (41318, 41321), False, 'from astropy.convolution import convolve, Box2DKernel\n'), ((42332, 42353), 'copy.copy', 'copy.copy', (['self.image'], {}), '(self.image)\n', (42341, 42353), False, 'import copy\n'), ((43388, 43402), 'astropy.convolution.Box2DKernel', 'Box2DKernel', (['(2)'], {}), '(2)\n', (43399, 43402), False, 'from astropy.convolution import convolve, Box2DKernel\n'), ((8185, 8205), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (8192, 8205), False, 'from astropy import wcs\n'), ((12426, 12446), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (12433, 12446), False, 'from astropy import wcs\n'), ((18876, 18896), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (18883, 18896), False, 'from astropy import wcs\n'), ((19313, 19333), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (19320, 19333), False, 'from astropy import wcs\n'), ((19516, 19528), 'math.ceil', 'ceil', (['(nx / b)'], {}), '(nx / b)\n', (19520, 19528), False, 'from math import ceil\n'), ((19553, 19565), 'math.ceil', 'ceil', (['(ny / b)'], {}), '(ny / b)\n', (19557, 19565), False, 'from math import ceil\n'), ((20148, 20168), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (20155, 20168), False, 'from astropy import wcs\n'), ((20581, 20601), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (20588, 20601), False, 'from astropy import wcs\n'), ((21592, 21612), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (21599, 21612), False, 'from astropy import wcs\n'), ((26752, 26772), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (26759, 26772), False, 'from astropy import wcs\n'), ((27188, 27208), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (27195, 27208), False, 'from astropy import wcs\n'), ((27390, 27402), 'math.ceil', 'ceil', (['(nx / b)'], {}), '(nx / b)\n', (27394, 27402), False, 'from math import ceil\n'), ((27427, 27439), 'math.ceil', 'ceil', (['(ny / b)'], {}), '(ny / b)\n', (27431, 27439), False, 'from math import ceil\n'), ((28032, 28052), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (28039, 28052), False, 'from astropy import wcs\n'), ((28465, 28485), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (28472, 28485), False, 'from astropy import wcs\n'), ((29304, 29324), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (29311, 29324), False, 'from astropy import wcs\n'), ((44157, 44178), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['(1.5)'], {}), '(1.5)\n', (44173, 44178), False, 'from astropy.convolution import convolve, Gaussian2DKernel\n'), ((6757, 6787), 'galsim.Image', 'Image', (['self.image'], {'dtype': 'float'}), '(self.image, dtype=float)\n', (6762, 6787), False, 'from galsim import Image, InterpolatedImage\n'), ((6944, 6974), 'galsim.Image', 'Image', (['self.image'], {'dtype': 'float'}), '(self.image, dtype=float)\n', (6949, 6974), False, 'from galsim import Image, InterpolatedImage\n'), ((7824, 7855), 'astropy.io.fits.open', 'fits.open', (['"""./_shift_temp.fits"""'], {}), "('./_shift_temp.fits')\n", (7833, 7855), False, 'from astropy.io import fits\n'), ((8518, 8586), 'scipy.ndimage.interpolation.shift', 'shift', (['self.image', '[dy, dx]'], {'order': 'order', 'mode': '"""constant"""', 'cval': 'cval'}), "(self.image, [dy, dx], order=order, mode='constant', cval=cval)\n", (8523, 8586), False, 'from scipy.ndimage.interpolation import shift\n'), ((8676, 8702), 'copy.deepcopy', 'copy.deepcopy', (['self.header'], {}), '(self.header)\n', (8689, 8702), False, 'import copy\n'), ((8860, 8880), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (8867, 8880), False, 'from astropy import wcs\n'), ((11001, 11031), 'galsim.Image', 'Image', (['self.image'], {'dtype': 'float'}), '(self.image, dtype=float)\n', (11006, 11031), False, 'from galsim import Image, InterpolatedImage\n'), ((11188, 11218), 'galsim.Image', 'Image', (['self.image'], {'dtype': 'float'}), '(self.image, dtype=float)\n', (11193, 11218), False, 'from galsim import Image, InterpolatedImage\n'), ((12066, 12097), 'astropy.io.fits.open', 'fits.open', (['"""./_shift_temp.fits"""'], {}), "('./_shift_temp.fits')\n", (12075, 12097), False, 'from astropy.io import fits\n'), ((12758, 12825), 'scipy.ndimage.interpolation.shift', 'shift', (['self.mask', '[dy, dx]'], {'order': 'order', 'mode': '"""constant"""', 'cval': 'cval'}), "(self.mask, [dy, dx], order=order, mode='constant', cval=cval)\n", (12763, 12825), False, 'from scipy.ndimage.interpolation import shift\n'), ((12914, 12940), 'copy.deepcopy', 'copy.deepcopy', (['self.header'], {}), '(self.header)\n', (12927, 12940), False, 'import copy\n'), ((13098, 13118), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (13105, 13118), False, 'from astropy import wcs\n'), ((14450, 14467), 'numpy.sum', 'np.sum', (['self.mask'], {}), '(self.mask)\n', (14456, 14467), True, 'import numpy as np\n'), ((17908, 17938), 'galsim.Image', 'Image', (['self.image'], {'dtype': 'float'}), '(self.image, dtype=float)\n', (17913, 17938), False, 'from galsim import Image, InterpolatedImage\n'), ((18099, 18129), 'galsim.Image', 'Image', (['self.image'], {'dtype': 'float'}), '(self.image, dtype=float)\n', (18104, 18129), False, 'from galsim import Image, InterpolatedImage\n'), ((21060, 21092), 'astropy.io.fits.open', 'fits.open', (['"""./_resize_temp.fits"""'], {}), "('./_resize_temp.fits')\n", (21069, 21092), False, 'from astropy.io import fits\n'), ((25786, 25815), 'galsim.Image', 'Image', (['self.mask'], {'dtype': 'float'}), '(self.mask, dtype=float)\n', (25791, 25815), False, 'from galsim import Image, InterpolatedImage\n'), ((25972, 26001), 'galsim.Image', 'Image', (['self.mask'], {'dtype': 'float'}), '(self.mask, dtype=float)\n', (25977, 26001), False, 'from galsim import Image, InterpolatedImage\n'), ((28943, 28975), 'astropy.io.fits.open', 'fits.open', (['"""./_resize_temp.fits"""'], {}), "('./_resize_temp.fits')\n", (28952, 28975), False, 'from astropy.io import fits\n'), ((44133, 44155), 'numpy.nansum', 'np.nansum', (['detect_mask'], {}), '(detect_mask)\n', (44142, 44155), True, 'import numpy as np\n'), ((6867, 6881), 'galsim.interpolant.Lanczos', 'Lanczos', (['order'], {}), '(order)\n', (6874, 6881), False, 'from galsim.interpolant import Lanczos\n'), ((11111, 11125), 'galsim.interpolant.Lanczos', 'Lanczos', (['order'], {}), '(order)\n', (11118, 11125), False, 'from galsim.interpolant import Lanczos\n'), ((18022, 18036), 'galsim.interpolant.Lanczos', 'Lanczos', (['order'], {}), '(order)\n', (18029, 18036), False, 'from galsim.interpolant import Lanczos\n'), ((22104, 22148), 'scipy.ndimage.zoom', 'ndimage.zoom', (['self.image', 'f_eff'], {'order': 'order'}), '(self.image, f_eff, order=order)\n', (22116, 22148), False, 'from scipy import ndimage\n'), ((22660, 22680), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (22667, 22680), False, 'from astropy import wcs\n'), ((23097, 23117), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (23104, 23117), False, 'from astropy import wcs\n'), ((23964, 23984), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (23971, 23984), False, 'from astropy import wcs\n'), ((25895, 25909), 'galsim.interpolant.Lanczos', 'Lanczos', (['order'], {}), '(order)\n', (25902, 25909), False, 'from galsim.interpolant import Lanczos\n'), ((29806, 29850), 'scipy.ndimage.zoom', 'ndimage.zoom', (['self.image', 'f_eff'], {'order': 'order'}), '(self.image, f_eff, order=order)\n', (29818, 29850), False, 'from scipy import ndimage\n'), ((30369, 30389), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (30376, 30389), False, 'from astropy import wcs\n'), ((30806, 30826), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (30813, 30826), False, 'from astropy import wcs\n'), ((31683, 31703), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (31690, 31703), False, 'from astropy import wcs\n')] |
import pandas as pd
import numpy as np
import pdb
import sys
import os
from sklearn.ensemble import GradientBoostingRegressor
from joblib import dump, load
import re
##################################################################3
# (Sept 2020 - Jared) - PG-MTL training script on 145 source lake
# Features and hyperparamters must be manually specified below
# (e.g. feats = ['dif_max_depth', ....]; n_estimators = 5500, etc)
####################################################################3
#file to save model to
save_file_path = '../../models/metamodel_pgdl_RMSE_GBR.joblib'
#########################################################################################
#paste features found in "pbmtl_feature_selection.py" here
feats = ['n_obs_sp', 'n_obs_su', 'dif_max_depth', 'dif_surface_area',
'dif_glm_strat_perc', 'perc_dif_max_depth', 'perc_dif_surface_area',
'perc_dif_sqrt_surface_area']
###################################################################################
#######################################################################3
#paste hyperparameters found in "pbmtl_hyperparameter_search.py" here
#
n_estimators = 5500
lr = .05
#####################################################################
ids = pd.read_csv('../../metadata/pball_site_ids.csv', header=None)
ids = ids[0].values
glm_all_f = pd.read_csv("../../results/glm_transfer/RMSE_transfer_glm_pball.csv")
train_lakes = [re.search('nhdhr_(.*)', x).group(1) for x in np.unique(glm_all_f['target_id'].values)]
train_lakes_wp = np.unique(glm_all_f['target_id'].values) #with prefix
#compile training data
train_df = pd.DataFrame()
for _, lake_id in enumerate(train_lakes):
new_df = pd.DataFrame()
#get performance results (metatargets), filter out target as source
lake_df_res = pd.read_csv("../../results/transfer_learning/target_"+lake_id+"/resultsPGRNNbasic_pball",header=None,names=['source_id','rmse'])
lake_df_res = lake_df_res[lake_df_res.source_id != 'source_id']
#get metadata differences between target and all the sources
lake_df = pd.read_feather("../../metadata/diffs/target_nhdhr_"+lake_id+".feather")
lake_df = lake_df[np.isin(lake_df['site_id'], train_lakes_wp)]
lake_df_res = lake_df_res[np.isin(lake_df_res['source_id'], train_lakes)]
lake_df_res['source_id2'] = ['nhdhr_'+str(x) for x in lake_df_res['source_id'].values]
lake_df = pd.merge(left=lake_df, right=lake_df_res.astype('object'), left_on='site_id', right_on='source_id2')
new_df = lake_df
train_df = pd.concat([train_df, new_df], ignore_index=True)
#train model
X_trn = pd.DataFrame(train_df[feats])
y_trn = np.array([float(x) for x in np.ravel(pd.DataFrame(train_df['rmse']))])
model = GradientBoostingRegressor(n_estimators=n_estimators, learning_rate=lr)
print("Training metamodel...")
model.fit(X_trn, y_trn)
dump(model, save_file_path)
print("Training Complete, saved to ", save_file_path)
| [
"pandas.read_feather",
"numpy.unique",
"pandas.read_csv",
"numpy.isin",
"pandas.concat",
"pandas.DataFrame",
"sklearn.ensemble.GradientBoostingRegressor",
"joblib.dump",
"re.search"
] | [((1261, 1322), 'pandas.read_csv', 'pd.read_csv', (['"""../../metadata/pball_site_ids.csv"""'], {'header': 'None'}), "('../../metadata/pball_site_ids.csv', header=None)\n", (1272, 1322), True, 'import pandas as pd\n'), ((1355, 1424), 'pandas.read_csv', 'pd.read_csv', (['"""../../results/glm_transfer/RMSE_transfer_glm_pball.csv"""'], {}), "('../../results/glm_transfer/RMSE_transfer_glm_pball.csv')\n", (1366, 1424), True, 'import pandas as pd\n'), ((1544, 1584), 'numpy.unique', 'np.unique', (["glm_all_f['target_id'].values"], {}), "(glm_all_f['target_id'].values)\n", (1553, 1584), True, 'import numpy as np\n'), ((1635, 1649), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1647, 1649), True, 'import pandas as pd\n'), ((2621, 2650), 'pandas.DataFrame', 'pd.DataFrame', (['train_df[feats]'], {}), '(train_df[feats])\n', (2633, 2650), True, 'import pandas as pd\n'), ((2738, 2808), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'n_estimators': 'n_estimators', 'learning_rate': 'lr'}), '(n_estimators=n_estimators, learning_rate=lr)\n', (2763, 2808), False, 'from sklearn.ensemble import GradientBoostingRegressor\n'), ((2864, 2891), 'joblib.dump', 'dump', (['model', 'save_file_path'], {}), '(model, save_file_path)\n', (2868, 2891), False, 'from joblib import dump, load\n'), ((1705, 1719), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1717, 1719), True, 'import pandas as pd\n'), ((1811, 1950), 'pandas.read_csv', 'pd.read_csv', (["('../../results/transfer_learning/target_' + lake_id +\n '/resultsPGRNNbasic_pball')"], {'header': 'None', 'names': "['source_id', 'rmse']"}), "('../../results/transfer_learning/target_' + lake_id +\n '/resultsPGRNNbasic_pball', header=None, names=['source_id', 'rmse'])\n", (1822, 1950), True, 'import pandas as pd\n'), ((2088, 2164), 'pandas.read_feather', 'pd.read_feather', (["('../../metadata/diffs/target_nhdhr_' + lake_id + '.feather')"], {}), "('../../metadata/diffs/target_nhdhr_' + lake_id + '.feather')\n", (2103, 2164), True, 'import pandas as pd\n'), ((2548, 2596), 'pandas.concat', 'pd.concat', (['[train_df, new_df]'], {'ignore_index': '(True)'}), '([train_df, new_df], ignore_index=True)\n', (2557, 2596), True, 'import pandas as pd\n'), ((1485, 1525), 'numpy.unique', 'np.unique', (["glm_all_f['target_id'].values"], {}), "(glm_all_f['target_id'].values)\n", (1494, 1525), True, 'import numpy as np\n'), ((2183, 2226), 'numpy.isin', 'np.isin', (["lake_df['site_id']", 'train_lakes_wp'], {}), "(lake_df['site_id'], train_lakes_wp)\n", (2190, 2226), True, 'import numpy as np\n'), ((2258, 2304), 'numpy.isin', 'np.isin', (["lake_df_res['source_id']", 'train_lakes'], {}), "(lake_df_res['source_id'], train_lakes)\n", (2265, 2304), True, 'import numpy as np\n'), ((1440, 1466), 're.search', 're.search', (['"""nhdhr_(.*)"""', 'x'], {}), "('nhdhr_(.*)', x)\n", (1449, 1466), False, 'import re\n'), ((2696, 2726), 'pandas.DataFrame', 'pd.DataFrame', (["train_df['rmse']"], {}), "(train_df['rmse'])\n", (2708, 2726), True, 'import pandas as pd\n')] |
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import mars.dataframe as md
from mars.tensor.core import CHUNK_TYPE as TENSOR_CHUNK_TYPE
from mars.tests.core import TestBase
from mars.dataframe.core import SERIES_CHUNK_TYPE, Series, DataFrame, DATAFRAME_CHUNK_TYPE
from mars.dataframe.indexing.iloc import DataFrameIlocGetItem, DataFrameIlocSetItem
class Test(TestBase):
def testSetIndex(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
df3 = df2.set_index('y', drop=True)
df3.tiles()
self.assertEqual(df3.chunk_shape, (2, 2))
pd.testing.assert_index_equal(df3.chunks[0].columns.to_pandas(), pd.Index(['x']))
pd.testing.assert_index_equal(df3.chunks[1].columns.to_pandas(), pd.Index(['z']))
df4 = df2.set_index('y', drop=False)
df4.tiles()
self.assertEqual(df4.chunk_shape, (2, 2))
pd.testing.assert_index_equal(df4.chunks[0].columns.to_pandas(), pd.Index(['x', 'y']))
pd.testing.assert_index_equal(df4.chunks[1].columns.to_pandas(), pd.Index(['z']))
def testILocGetItem(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
df3 = df2.iloc[1]
df3.tiles()
self.assertIsInstance(df3, Series)
self.assertIsInstance(df3.op, DataFrameIlocGetItem)
self.assertEqual(df3.shape, (3,))
self.assertEqual(df3.chunk_shape, (2,))
self.assertEqual(df3.chunks[0].shape, (2,))
self.assertEqual(df3.chunks[1].shape, (1,))
self.assertEqual(df3.chunks[0].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[1].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df3.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df3.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df3.chunks[1].inputs[0].shape, (2, 1))
# slice index
df4 = df2.iloc[:, 2:4]
df4.tiles()
self.assertIsInstance(df4, DataFrame)
self.assertIsInstance(df4.op, DataFrameIlocGetItem)
self.assertEqual(df4.shape, (3, 1))
self.assertEqual(df4.chunk_shape, (2, 1))
self.assertEqual(df4.chunks[0].shape, (2, 1))
self.assertEqual(df4.chunks[1].shape, (1, 1))
self.assertEqual(df4.chunks[0].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[1].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[0].inputs[0].index, (0, 1))
self.assertEqual(df4.chunks[0].inputs[0].shape, (2, 1))
self.assertEqual(df4.chunks[1].inputs[0].index, (1, 1))
self.assertEqual(df4.chunks[1].inputs[0].shape, (1, 1))
# plain fancy index
df5 = df2.iloc[[0], [0, 1, 2]]
df5.tiles()
self.assertIsInstance(df5, DataFrame)
self.assertIsInstance(df5.op, DataFrameIlocGetItem)
self.assertEqual(df5.shape, (1, 3))
self.assertEqual(df5.chunk_shape, (1, 2))
self.assertEqual(df5.chunks[0].shape, (1, 2))
self.assertEqual(df5.chunks[1].shape, (1, 1))
np.testing.assert_array_equal(df5.chunks[0].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[1], [0])
self.assertEqual(df5.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df5.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df5.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df5.chunks[1].inputs[0].shape, (2, 1))
# fancy index
df6 = df2.iloc[[1, 2], [0, 1, 2]]
df6.tiles()
self.assertIsInstance(df6, DataFrame)
self.assertIsInstance(df6.op, DataFrameIlocGetItem)
self.assertEqual(df6.shape, (2, 3))
self.assertEqual(df6.chunk_shape, (2, 2))
self.assertEqual(df6.chunks[0].shape, (1, 2))
self.assertEqual(df6.chunks[1].shape, (1, 1))
self.assertEqual(df6.chunks[2].shape, (1, 2))
self.assertEqual(df6.chunks[3].shape, (1, 1))
np.testing.assert_array_equal(df6.chunks[0].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[1], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[1], [0])
self.assertEqual(df6.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df6.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df6.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df6.chunks[1].inputs[0].shape, (2, 1))
self.assertEqual(df6.chunks[2].inputs[0].index, (1, 0))
self.assertEqual(df6.chunks[2].inputs[0].shape, (1, 2))
self.assertEqual(df6.chunks[3].inputs[0].index, (1, 1))
self.assertEqual(df6.chunks[3].inputs[0].shape, (1, 1))
# plain index
df7 = df2.iloc[1, 2]
df7.tiles()
self.assertIsInstance(df7, Series)
self.assertIsInstance(df7.op, DataFrameIlocGetItem)
self.assertEqual(df7.shape, ())
self.assertEqual(df7.chunk_shape, ())
self.assertEqual(df7.chunks[0].dtype, df7.dtype)
self.assertEqual(df7.chunks[0].shape, ())
self.assertEqual(df7.chunks[0].op.indexes, (1, 0))
self.assertEqual(df7.chunks[0].inputs[0].index, (0, 1))
self.assertEqual(df7.chunks[0].inputs[0].shape, (2, 1))
def testILocSetItem(self):
df1 = pd.DataFrame([[1,3,3], [4,2,6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
df2.tiles()
# plain index
df3 = md.DataFrame(df1, chunk_size=2)
df3.iloc[1] = 100
df3.tiles()
self.assertIsInstance(df3.op, DataFrameIlocSetItem)
self.assertEqual(df3.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df3.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns.to_pandas(), df3.columns.to_pandas())
for c1, c2 in zip(df2.chunks, df3.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns.to_pandas(), c2.columns.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df3.chunks[0].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[1].op.indexes, (1, slice(None, None, None)))
# # slice index
df4 = md.DataFrame(df1, chunk_size=2)
df4.iloc[:, 2:4] = 1111
df4.tiles()
self.assertIsInstance(df4.op, DataFrameIlocSetItem)
self.assertEqual(df4.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df4.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns.to_pandas(), df4.columns.to_pandas())
for c1, c2 in zip(df2.chunks, df4.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns.to_pandas(), c2.columns.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df4.chunks[1].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[3].op.indexes, (slice(None, None, None), slice(None, None, None)))
# plain fancy index
df5 = md.DataFrame(df1, chunk_size=2)
df5.iloc[[0], [0, 1, 2]] = 2222
df5.tiles()
self.assertIsInstance(df5.op, DataFrameIlocSetItem)
self.assertEqual(df5.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df5.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns.to_pandas(), df5.columns.to_pandas())
for c1, c2 in zip(df2.chunks, df5.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns.to_pandas(), c2.columns.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
np.testing.assert_array_equal(df5.chunks[0].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[1], [0])
# fancy index
df6 = md.DataFrame(df1, chunk_size=2)
df6.iloc[[1, 2], [0, 1, 2]] = 3333
df6.tiles()
self.assertIsInstance(df6.op, DataFrameIlocSetItem)
self.assertEqual(df6.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df6.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns.to_pandas(), df6.columns.to_pandas())
for c1, c2 in zip(df2.chunks, df6.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns.to_pandas(), c2.columns.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
np.testing.assert_array_equal(df6.chunks[0].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[1], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[1], [0])
# plain index
df7 = md.DataFrame(df1, chunk_size=2)
df7.iloc[1, 2] = 4444
df7.tiles()
self.assertIsInstance(df7.op, DataFrameIlocSetItem)
self.assertEqual(df7.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df7.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns.to_pandas(), df7.columns.to_pandas())
for c1, c2 in zip(df2.chunks, df7.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns.to_pandas(), c2.columns.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df7.chunks[1].op.indexes, (1, 0))
def testDataFrameGetitem(self):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
series = df['c3']
self.assertIsInstance(series, Series)
self.assertEqual(series.shape, (10,))
self.assertEqual(series.name, 'c3')
self.assertEqual(series.dtype, data['c3'].dtype)
self.assertEqual(series.index_value, df.index_value)
series.tiles()
self.assertEqual(series.nsplits, ((2, 2, 2, 2, 2),))
self.assertEqual(len(series.chunks), 5)
for i, c in enumerate(series.chunks):
self.assertIsInstance(c, SERIES_CHUNK_TYPE)
self.assertEqual(c.index, (i,))
self.assertEqual(c.shape, (2,))
df1 = df[['c1', 'c2', 'c3']]
self.assertIsInstance(df1, DataFrame)
self.assertEqual(df1.shape, (10, 3))
self.assertEqual(df1.index_value, df.index_value)
pd.testing.assert_index_equal(df1.columns.to_pandas(), data[['c1', 'c2', 'c3']].columns)
pd.testing.assert_series_equal(df1.dtypes, data[['c1', 'c2', 'c3']].dtypes)
df1.tiles()
self.assertEqual(df1.nsplits, ((2, 2, 2, 2, 2), (2, 1)))
self.assertEqual(len(df1.chunks), 10)
for i, c in enumerate(df1.chunks[slice(0, 10, 2)]):
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
self.assertEqual(c.index, (i, 0))
self.assertEqual(c.shape, (2, 2))
for i, c in enumerate(df1.chunks[slice(1, 10, 2)]):
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
self.assertEqual(c.index, (i, 1))
self.assertEqual(c.shape, (2, 1))
def testDataFrameGetitemBool(self):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
mask_data1 = data.c1 > 0.5
mask_data2 = data.c1 < 0.5
mask1 = md.Series(mask_data1, chunk_size=2)
mask2 = md.Series(mask_data2, chunk_size=2)
r1 = df[mask1]
r2 = df[mask2]
r3 = df[mask1]
self.assertNotEqual(r1.index_value.key, df.index_value.key)
self.assertNotEqual(r1.index_value.key, mask1.index_value.key)
self.assertEqual(r1.columns.key, df.columns.key)
self.assertIs(r1.columns, df.columns)
self.assertNotEqual(r1.index_value.key, r2.index_value.key)
self.assertEqual(r1.columns.key, r2.columns.key)
self.assertIs(r1.columns, r2.columns)
self.assertEqual(r1.index_value.key, r3.index_value.key)
self.assertEqual(r1.columns.key, r3.columns.key)
self.assertIs(r1.columns, r3.columns)
def testSeriesGetitem(self):
data = pd.Series(np.random.rand(10,), name='a')
series = md.Series(data, chunk_size=3)
result1 = series[2]
self.assertEqual(result1.shape, ())
result1.tiles()
self.assertEqual(result1.nsplits, ())
self.assertEqual(len(result1.chunks), 1)
self.assertIsInstance(result1.chunks[0], TENSOR_CHUNK_TYPE)
self.assertEqual(result1.chunks[0].shape, ())
self.assertEqual(result1.chunks[0].dtype, data.dtype)
result2 = series[[4, 5, 1, 2, 3]]
self.assertEqual(result2.shape, (5,))
result2.tiles()
self.assertEqual(result2.nsplits, ((2, 2, 1),))
self.assertEqual(len(result2.chunks), 3)
self.assertEqual(result2.chunks[0].op.labels, [4, 5])
self.assertEqual(result2.chunks[1].op.labels, [1, 2])
self.assertEqual(result2.chunks[2].op.labels, [3])
data = pd.Series(np.random.rand(10), index=['i' + str(i) for i in range(10)])
series = md.Series(data, chunk_size=3)
result1 = series['i2']
self.assertEqual(result1.shape, ())
result1.tiles()
self.assertEqual(result1.nsplits, ())
self.assertEqual(result1.chunks[0].dtype, data.dtype)
self.assertTrue(result1.chunks[0].op.labels, ['i2'])
result2 = series[['i2', 'i4']]
self.assertEqual(result2.shape, (2,))
result2.tiles()
self.assertEqual(result2.nsplits, ((2,),))
self.assertEqual(result2.chunks[0].dtype, data.dtype)
self.assertTrue(result2.chunks[0].op.labels, [['i2', 'i4']])
| [
"numpy.random.rand",
"pandas.Index",
"mars.dataframe.DataFrame",
"mars.dataframe.Series",
"pandas.DataFrame",
"pandas.testing.assert_series_equal",
"numpy.testing.assert_array_equal"
] | [((1004, 1106), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 3, 3], [4, 2, 6], [7, 8, 9]]'], {'index': "['a1', 'a2', 'a3']", 'columns': "['x', 'y', 'z']"}), "([[1, 3, 3], [4, 2, 6], [7, 8, 9]], index=['a1', 'a2', 'a3'],\n columns=['x', 'y', 'z'])\n", (1016, 1106), True, 'import pandas as pd\n'), ((1144, 1175), 'mars.dataframe.DataFrame', 'md.DataFrame', (['df1'], {'chunk_size': '(2)'}), '(df1, chunk_size=2)\n', (1156, 1175), True, 'import mars.dataframe as md\n'), ((1818, 1920), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 3, 3], [4, 2, 6], [7, 8, 9]]'], {'index': "['a1', 'a2', 'a3']", 'columns': "['x', 'y', 'z']"}), "([[1, 3, 3], [4, 2, 6], [7, 8, 9]], index=['a1', 'a2', 'a3'],\n columns=['x', 'y', 'z'])\n", (1830, 1920), True, 'import pandas as pd\n'), ((1958, 1989), 'mars.dataframe.DataFrame', 'md.DataFrame', (['df1'], {'chunk_size': '(2)'}), '(df1, chunk_size=2)\n', (1970, 1989), True, 'import mars.dataframe as md\n'), ((4022, 4085), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df5.chunks[0].op.indexes[0]', '[0]'], {}), '(df5.chunks[0].op.indexes[0], [0])\n', (4051, 4085), True, 'import numpy as np\n'), ((4094, 4160), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df5.chunks[0].op.indexes[1]', '[0, 1]'], {}), '(df5.chunks[0].op.indexes[1], [0, 1])\n', (4123, 4160), True, 'import numpy as np\n'), ((4169, 4232), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df5.chunks[1].op.indexes[0]', '[0]'], {}), '(df5.chunks[1].op.indexes[0], [0])\n', (4198, 4232), True, 'import numpy as np\n'), ((4241, 4304), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df5.chunks[1].op.indexes[1]', '[0]'], {}), '(df5.chunks[1].op.indexes[1], [0])\n', (4270, 4304), True, 'import numpy as np\n'), ((5070, 5133), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[0].op.indexes[0]', '[1]'], {}), '(df6.chunks[0].op.indexes[0], [1])\n', (5099, 5133), True, 'import numpy as np\n'), ((5142, 5208), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[0].op.indexes[1]', '[0, 1]'], {}), '(df6.chunks[0].op.indexes[1], [0, 1])\n', (5171, 5208), True, 'import numpy as np\n'), ((5217, 5280), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[1].op.indexes[0]', '[1]'], {}), '(df6.chunks[1].op.indexes[0], [1])\n', (5246, 5280), True, 'import numpy as np\n'), ((5289, 5352), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[1].op.indexes[1]', '[0]'], {}), '(df6.chunks[1].op.indexes[1], [0])\n', (5318, 5352), True, 'import numpy as np\n'), ((5361, 5424), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[2].op.indexes[0]', '[0]'], {}), '(df6.chunks[2].op.indexes[0], [0])\n', (5390, 5424), True, 'import numpy as np\n'), ((5433, 5499), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[2].op.indexes[1]', '[0, 1]'], {}), '(df6.chunks[2].op.indexes[1], [0, 1])\n', (5462, 5499), True, 'import numpy as np\n'), ((5508, 5571), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[3].op.indexes[0]', '[0]'], {}), '(df6.chunks[3].op.indexes[0], [0])\n', (5537, 5571), True, 'import numpy as np\n'), ((5580, 5643), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[3].op.indexes[1]', '[0]'], {}), '(df6.chunks[3].op.indexes[1], [0])\n', (5609, 5643), True, 'import numpy as np\n'), ((6757, 6859), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 3, 3], [4, 2, 6], [7, 8, 9]]'], {'index': "['a1', 'a2', 'a3']", 'columns': "['x', 'y', 'z']"}), "([[1, 3, 3], [4, 2, 6], [7, 8, 9]], index=['a1', 'a2', 'a3'],\n columns=['x', 'y', 'z'])\n", (6769, 6859), True, 'import pandas as pd\n'), ((6894, 6925), 'mars.dataframe.DataFrame', 'md.DataFrame', (['df1'], {'chunk_size': '(2)'}), '(df1, chunk_size=2)\n', (6906, 6925), True, 'import mars.dataframe as md\n'), ((6983, 7014), 'mars.dataframe.DataFrame', 'md.DataFrame', (['df1'], {'chunk_size': '(2)'}), '(df1, chunk_size=2)\n', (6995, 7014), True, 'import mars.dataframe as md\n'), ((8035, 8066), 'mars.dataframe.DataFrame', 'md.DataFrame', (['df1'], {'chunk_size': '(2)'}), '(df1, chunk_size=2)\n', (8047, 8066), True, 'import mars.dataframe as md\n'), ((9141, 9172), 'mars.dataframe.DataFrame', 'md.DataFrame', (['df1'], {'chunk_size': '(2)'}), '(df1, chunk_size=2)\n', (9153, 9172), True, 'import mars.dataframe as md\n'), ((10014, 10077), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df5.chunks[0].op.indexes[0]', '[0]'], {}), '(df5.chunks[0].op.indexes[0], [0])\n', (10043, 10077), True, 'import numpy as np\n'), ((10086, 10152), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df5.chunks[0].op.indexes[1]', '[0, 1]'], {}), '(df5.chunks[0].op.indexes[1], [0, 1])\n', (10115, 10152), True, 'import numpy as np\n'), ((10161, 10224), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df5.chunks[1].op.indexes[0]', '[0]'], {}), '(df5.chunks[1].op.indexes[0], [0])\n', (10190, 10224), True, 'import numpy as np\n'), ((10233, 10296), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df5.chunks[1].op.indexes[1]', '[0]'], {}), '(df5.chunks[1].op.indexes[1], [0])\n', (10262, 10296), True, 'import numpy as np\n'), ((10334, 10365), 'mars.dataframe.DataFrame', 'md.DataFrame', (['df1'], {'chunk_size': '(2)'}), '(df1, chunk_size=2)\n', (10346, 10365), True, 'import mars.dataframe as md\n'), ((11210, 11273), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[0].op.indexes[0]', '[1]'], {}), '(df6.chunks[0].op.indexes[0], [1])\n', (11239, 11273), True, 'import numpy as np\n'), ((11282, 11348), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[0].op.indexes[1]', '[0, 1]'], {}), '(df6.chunks[0].op.indexes[1], [0, 1])\n', (11311, 11348), True, 'import numpy as np\n'), ((11357, 11420), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[1].op.indexes[0]', '[1]'], {}), '(df6.chunks[1].op.indexes[0], [1])\n', (11386, 11420), True, 'import numpy as np\n'), ((11429, 11492), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[1].op.indexes[1]', '[0]'], {}), '(df6.chunks[1].op.indexes[1], [0])\n', (11458, 11492), True, 'import numpy as np\n'), ((11501, 11564), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[2].op.indexes[0]', '[0]'], {}), '(df6.chunks[2].op.indexes[0], [0])\n', (11530, 11564), True, 'import numpy as np\n'), ((11573, 11639), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[2].op.indexes[1]', '[0, 1]'], {}), '(df6.chunks[2].op.indexes[1], [0, 1])\n', (11602, 11639), True, 'import numpy as np\n'), ((11648, 11711), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[3].op.indexes[0]', '[0]'], {}), '(df6.chunks[3].op.indexes[0], [0])\n', (11677, 11711), True, 'import numpy as np\n'), ((11720, 11783), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['df6.chunks[3].op.indexes[1]', '[0]'], {}), '(df6.chunks[3].op.indexes[1], [0])\n', (11749, 11783), True, 'import numpy as np\n'), ((11821, 11852), 'mars.dataframe.DataFrame', 'md.DataFrame', (['df1'], {'chunk_size': '(2)'}), '(df1, chunk_size=2)\n', (11833, 11852), True, 'import mars.dataframe as md\n'), ((12876, 12908), 'mars.dataframe.DataFrame', 'md.DataFrame', (['data'], {'chunk_size': '(2)'}), '(data, chunk_size=2)\n', (12888, 12908), True, 'import mars.dataframe as md\n'), ((13805, 13880), 'pandas.testing.assert_series_equal', 'pd.testing.assert_series_equal', (['df1.dtypes', "data[['c1', 'c2', 'c3']].dtypes"], {}), "(df1.dtypes, data[['c1', 'c2', 'c3']].dtypes)\n", (13835, 13880), True, 'import pandas as pd\n'), ((14580, 14612), 'mars.dataframe.DataFrame', 'md.DataFrame', (['data'], {'chunk_size': '(2)'}), '(data, chunk_size=2)\n', (14592, 14612), True, 'import mars.dataframe as md\n'), ((14700, 14735), 'mars.dataframe.Series', 'md.Series', (['mask_data1'], {'chunk_size': '(2)'}), '(mask_data1, chunk_size=2)\n', (14709, 14735), True, 'import mars.dataframe as md\n'), ((14752, 14787), 'mars.dataframe.Series', 'md.Series', (['mask_data2'], {'chunk_size': '(2)'}), '(mask_data2, chunk_size=2)\n', (14761, 14787), True, 'import mars.dataframe as md\n'), ((15549, 15578), 'mars.dataframe.Series', 'md.Series', (['data'], {'chunk_size': '(3)'}), '(data, chunk_size=3)\n', (15558, 15578), True, 'import mars.dataframe as md\n'), ((16462, 16491), 'mars.dataframe.Series', 'md.Series', (['data'], {'chunk_size': '(3)'}), '(data, chunk_size=3)\n', (16471, 16491), True, 'import mars.dataframe as md\n'), ((1364, 1379), 'pandas.Index', 'pd.Index', (["['x']"], {}), "(['x'])\n", (1372, 1379), True, 'import pandas as pd\n'), ((1454, 1469), 'pandas.Index', 'pd.Index', (["['z']"], {}), "(['z'])\n", (1462, 1469), True, 'import pandas as pd\n'), ((1660, 1680), 'pandas.Index', 'pd.Index', (["['x', 'y']"], {}), "(['x', 'y'])\n", (1668, 1680), True, 'import pandas as pd\n'), ((1755, 1770), 'pandas.Index', 'pd.Index', (["['z']"], {}), "(['z'])\n", (1763, 1770), True, 'import pandas as pd\n'), ((12800, 12821), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)'], {}), '(10, 5)\n', (12814, 12821), True, 'import numpy as np\n'), ((14504, 14525), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)'], {}), '(10, 5)\n', (14518, 14525), True, 'import numpy as np\n'), ((15501, 15519), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (15515, 15519), True, 'import numpy as np\n'), ((16384, 16402), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (16398, 16402), True, 'import numpy as np\n')] |
"""
Package used to hold all methods related to preprocessing steps
Created on 22/06/2019
@author: nidragedd
"""
import matplotlib
# Set the matplotlib backend to a non-interactive one so figures can be saved in the background
matplotlib.use("agg")
import matplotlib.pyplot as plt
import numpy as np
import torch
from src.preprocess import preprocess
from src.utils import constants
def imshow(image, ax=None):
"""
Image show given a tensor as input
:param image: (torch tensor) the image data as a Torch tensor
:param ax: (matplotlib Axis) not required, will be used if given or created otherwise
:return: (matplotlib Axis)
"""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.numpy().transpose((1, 2, 0))
# Undo preprocessing
mean = np.array(constants.norm_means)
std = np.array(constants.norm_std)
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def display_most_likely_classes(image_path, classnames, probs):
"""
Display an image along with the top K classes
:param image_path: (string) full path to the image to display
:param classnames: (array) human readable names for most likely predicted classes
:param probs: (array) values for classes probabilities
"""
plt.style.use("ggplot")
figure, axis = plt.subplots(2, 1, figsize=(15, 10))
axis[0].set_title(classnames[0])
axis[0].set_axis_off()
axis[1].barh(np.arange(len(probs)), probs, tick_label=classnames)
axis[1].set_aspect(0.1)
axis[1].invert_yaxis()
imshow(torch.from_numpy(preprocess.process_image(image_path)), axis[0])
plt.savefig("prediction.png")
| [
"numpy.clip",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"matplotlib.pyplot.style.use",
"numpy.array",
"src.preprocess.preprocess.process_image",
"matplotlib.pyplot.subplots"
] | [((228, 249), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (242, 249), False, 'import matplotlib\n'), ((916, 946), 'numpy.array', 'np.array', (['constants.norm_means'], {}), '(constants.norm_means)\n', (924, 946), True, 'import numpy as np\n'), ((957, 985), 'numpy.array', 'np.array', (['constants.norm_std'], {}), '(constants.norm_std)\n', (965, 985), True, 'import numpy as np\n'), ((1116, 1136), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (1123, 1136), True, 'import numpy as np\n'), ((1521, 1544), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1534, 1544), True, 'import matplotlib.pyplot as plt\n'), ((1564, 1600), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(15, 10)'}), '(2, 1, figsize=(15, 10))\n', (1576, 1600), True, 'import matplotlib.pyplot as plt\n'), ((1871, 1900), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""prediction.png"""'], {}), "('prediction.png')\n", (1882, 1900), True, 'import matplotlib.pyplot as plt\n'), ((694, 708), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (706, 708), True, 'import matplotlib.pyplot as plt\n'), ((1819, 1855), 'src.preprocess.preprocess.process_image', 'preprocess.process_image', (['image_path'], {}), '(image_path)\n', (1843, 1855), False, 'from src.preprocess import preprocess\n')] |
import gym
from softmax import PolicyGradient
import matplotlib.pyplot as plt
import retro
import numpy as np
import cv2
class SonicDiscretizer(gym.ActionWrapper):
"""
Wrap a gym-retro environment and make it use discrete
actions for the Sonic game.
"""
# B is do nothing
# down
# def __init__(self, env):
# super(SonicDiscretizer, self).__init__(env)
# buttons = ["B", "A", "MODE", "START", "UP", "DOWN", "LEFT", "RIGHT", "C", "Y", "X", "Z"]
# actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'], ['DOWN'],
# ['DOWN', 'B'], ['B']]
# self._actions = []
# for action in actions:
# arr = np.array([False] * 12)
# for button in action:
# arr[buttons.index(button)] = True
# self._actions.append(arr)
# self.action_space = gym.spaces.Discrete(len(self._actions))
def __init__(self, env):
super(SonicDiscretizer, self).__init__(env)
buttons = ["B", "A", "MODE", "START", "UP", "DOWN", "LEFT", "RIGHT", "C", "Y", "X", "Z"]
actions = [['RIGHT'], ['DOWN', 'B'], ['B'], ['RIGHT', 'DOWN']]
self._actions = []
for action in actions:
arr = np.array([False] * 12)
for button in action:
arr[buttons.index(button)] = True
self._actions.append(arr)
self.action_space = gym.spaces.Discrete(len(self._actions))
def action(self, a):
return self._actions[a].copy()
DISPLAY_REWARD_THRESHOLD = 400 # renders environment if total episode reward is greater then this threshold
RENDER = False # rendering wastes time
env = retro.make('SonicTheHedgehog-Genesis', 'GreenHillZone.Act1')
env.seed(1) # reproducible, general Policy gradient has high variance
env = env.unwrapped
env = SonicDiscretizer(env)
print(env.action_space)
print(env.observation_space)
print(env.observation_space.high)
print(env.observation_space.low)
RL = PolicyGradient(
n_actions=env.action_space.n,
n_features=1120,
learning_rate=0.02,
reward_decay=0.99,
# output_graph=True,
)
for i_episode in range(3000):
observation = env.reset()
inx, iny, inc = env.observation_space.shape
inx = int(inx / 8)
iny = int(iny / 8)
input_array = []
output_array = []
n = 0
fitness_max_current = 0
fitness_current = 0
x_pos_max = 0
x_pos_end = 0
counter = 0
while True:
if RENDER:
env.render()
# ---------- scale the input state ----------
ob = cv2.resize(observation, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx, iny))
for x in ob:
for y in x:
input_array.append(y)
current_state = np.asarray(input_array)
input_array.clear()
action = RL.choose_action(current_state)
observation_, reward, done, info = env.step(action)
# ---------- scale the output state ----------
next_state = cv2.resize(observation_, (inx, iny))
next_state = cv2.cvtColor(next_state, cv2.COLOR_BGR2GRAY)
next_state = np.reshape(next_state, (inx, iny))
for x in next_state:
for y in x:
output_array.append(y)
next_state = np.asarray(output_array)
output_array.clear()
RL.store_transition(current_state, action, reward)
x_pos_end = info['screen_x_end']
xpos = info['x']
if xpos > x_pos_max:
fitness_current += 1
fitness_current += reward
x_pos_max = xpos
if xpos == x_pos_end and xpos > 500:
fitness_current += 100000
break
if fitness_current > fitness_max_current:
fitness_max_current = fitness_current
counter = 0
else:
counter += 1
if done or counter == 250:
ep_rs_sum = sum(RL.ep_rs)
if 'running_reward' not in globals():
running_reward = ep_rs_sum
else:
running_reward = running_reward * 0.99 + ep_rs_sum * 0.01
if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True # rendering
print("episode:", i_episode, " reward:", int(running_reward))
vt = RL.learn()
if i_episode == 0:
plt.plot(vt) # plot the episode vt
plt.xlabel('episode steps')
plt.ylabel('normalized state-action value')
plt.show()
break
observation = observation_ | [
"softmax.PolicyGradient",
"numpy.reshape",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.array",
"cv2.cvtColor",
"cv2.resize",
"retro.make",
"matplotlib.pyplot.show"
] | [((1690, 1750), 'retro.make', 'retro.make', (['"""SonicTheHedgehog-Genesis"""', '"""GreenHillZone.Act1"""'], {}), "('SonicTheHedgehog-Genesis', 'GreenHillZone.Act1')\n", (1700, 1750), False, 'import retro\n'), ((2000, 2105), 'softmax.PolicyGradient', 'PolicyGradient', ([], {'n_actions': 'env.action_space.n', 'n_features': '(1120)', 'learning_rate': '(0.02)', 'reward_decay': '(0.99)'}), '(n_actions=env.action_space.n, n_features=1120, learning_rate\n =0.02, reward_decay=0.99)\n', (2014, 2105), False, 'from softmax import PolicyGradient\n'), ((2590, 2625), 'cv2.resize', 'cv2.resize', (['observation', '(inx, iny)'], {}), '(observation, (inx, iny))\n', (2600, 2625), False, 'import cv2\n'), ((2639, 2675), 'cv2.cvtColor', 'cv2.cvtColor', (['ob', 'cv2.COLOR_BGR2GRAY'], {}), '(ob, cv2.COLOR_BGR2GRAY)\n', (2651, 2675), False, 'import cv2\n'), ((2689, 2715), 'numpy.reshape', 'np.reshape', (['ob', '(inx, iny)'], {}), '(ob, (inx, iny))\n', (2699, 2715), True, 'import numpy as np\n'), ((2824, 2847), 'numpy.asarray', 'np.asarray', (['input_array'], {}), '(input_array)\n', (2834, 2847), True, 'import numpy as np\n'), ((3065, 3101), 'cv2.resize', 'cv2.resize', (['observation_', '(inx, iny)'], {}), '(observation_, (inx, iny))\n', (3075, 3101), False, 'import cv2\n'), ((3123, 3167), 'cv2.cvtColor', 'cv2.cvtColor', (['next_state', 'cv2.COLOR_BGR2GRAY'], {}), '(next_state, cv2.COLOR_BGR2GRAY)\n', (3135, 3167), False, 'import cv2\n'), ((3189, 3223), 'numpy.reshape', 'np.reshape', (['next_state', '(inx, iny)'], {}), '(next_state, (inx, iny))\n', (3199, 3223), True, 'import numpy as np\n'), ((3338, 3362), 'numpy.asarray', 'np.asarray', (['output_array'], {}), '(output_array)\n', (3348, 3362), True, 'import numpy as np\n'), ((1253, 1275), 'numpy.array', 'np.array', (['([False] * 12)'], {}), '([False] * 12)\n', (1261, 1275), True, 'import numpy as np\n'), ((4415, 4427), 'matplotlib.pyplot.plot', 'plt.plot', (['vt'], {}), '(vt)\n', (4423, 4427), True, 'import matplotlib.pyplot as plt\n'), ((4469, 4496), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""episode steps"""'], {}), "('episode steps')\n", (4479, 4496), True, 'import matplotlib.pyplot as plt\n'), ((4513, 4556), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""normalized state-action value"""'], {}), "('normalized state-action value')\n", (4523, 4556), True, 'import matplotlib.pyplot as plt\n'), ((4573, 4583), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4581, 4583), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
@author: GRANT_I
AstraZeneca, Macclesfield, UK
Questions to <EMAIL>
"""
# Python 3 script to fit equation 1 in manuscript to experimental data
# Total in Plasma over time
import numpy as np
from scipy.optimize import curve_fit
def plasma_total(t, k_res, cmax):
# Total concentration in plasma (assume negligible contribution from the
# released drug)
# Equation 1 in manuscript
cpl_total = cmax * np.exp(-(k_hy + k_res) * t)
return cpl_total
def fit_release(t_data, rel_data):
# Returns fitted parameters release rate constant, max extent
# and release half-life
popt, pcov = curve_fit(plasma_total, t_8932, c_pl8932)
k_res = popt[0]
cmax = popt[1]
return(k_res, cmax)
# From figure 2a we have a release half-life for SPL-8932 of 4.4 hours
# convert to hydrolysis rate constant
k_hy = np.log(2) / 4.4
# Experimental Data for SPL-8932 (time in hours and total concentration in
# plasma in micrograms per ml)
t_8932 = [1, 3, 6, 9, 16, 28]
c_pl8932 = [116.333, 59.2, 17.467, 2.977, 0.189, 0.03]
# Fit model to experimental data for SPL-8932 total in plasma
k_res, c_max = fit_release(t_8932, c_pl8932)
dose = 250.0 # micrograms (10 mg/kg to a 25 g mouse)
# Effecitve volume of distribution for dendrimer 'initially'
Vc = dose / (c_max * 0.025)
# Show fitted results
print('\n' + 'Fitted Value for k_res RES Uptake Constant' + '\n')
print ('k_res = ', int(100.0 * k_res + 0.5) / 100.0, ' per hr')
print ('Vc = ', int(10.0 * Vc + 0.5) / 10.0, ' ml per kg') | [
"scipy.optimize.curve_fit",
"numpy.exp",
"numpy.log"
] | [((671, 712), 'scipy.optimize.curve_fit', 'curve_fit', (['plasma_total', 't_8932', 'c_pl8932'], {}), '(plasma_total, t_8932, c_pl8932)\n', (680, 712), False, 'from scipy.optimize import curve_fit\n'), ((914, 923), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (920, 923), True, 'import numpy as np\n'), ((467, 494), 'numpy.exp', 'np.exp', (['(-(k_hy + k_res) * t)'], {}), '(-(k_hy + k_res) * t)\n', (473, 494), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def plot_results():
# From https://www.delftstack.com/howto/matplotlib/how-to-plot-in-real-time-using-matplotlib/
# From https://matplotlib.org/stable/gallery/subplots_axes_and_figures/subplots_demo.html
figure1, axs = plt.subplots(2, 5, figsize=(32, 18), gridspec_kw={'width_ratios': [1,1,1,1,1], 'height_ratios': [1, 1]})
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.35, hspace=0.40)
figure1.suptitle('Training with multiple size datasets', size=22)
figure1.canvas.set_window_title('Training datas')
x_axis_values = np.linspace(0, 10, 10)
txt = "AP_IOU_RANGE(Orange),AP_IOU_50(green),AP_IOU_75(purple),AP_IOU_RANGE_large(red),AR_RANGE(blue)"
plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=24)
S_DATA60_AP_IOU_RANGE = [0,0,0.010,0.044,0.097,0.400,0.504,0.562,0.597,0.611]
S_DATA60_AP_IOU_50 = [0,0,0.014,0.091,0.193,0.705,0.831,0.875,0.916,0.951]
S_DATA60_AP_IOU_75 = [0,0,0.012,0.036,0.076,0.412,0.583,0.691,0.710,0.745]
S_DATA60_AP_IOU_RANGE_large_area = [0,0,0.012,0.069,0.149,0.559,0.659,0.691,0.700,0.693]
S_DATA60_AR_RANGE = [0,0,0.009,0.066,0.148,0.564,0.661,0.693,0.702,0.696]
axs[0, 0].set_title('DATASET_60')
axs[0, 0].set(xlabel='EPOCHS', ylabel='Training Metrics')
axs[0, 0].plot(x_axis_values, S_DATA60_AP_IOU_RANGE, 'tab:orange')
axs[0, 0].plot(x_axis_values, S_DATA60_AP_IOU_50, 'tab:green')
axs[0, 0].plot(x_axis_values, S_DATA60_AP_IOU_75, 'tab:purple')
axs[0, 0].plot(x_axis_values, S_DATA60_AP_IOU_RANGE_large_area, 'tab:red')
axs[0, 0].plot(x_axis_values, S_DATA60_AR_RANGE, 'tab:blue')
S_DATA120_AP_IOU_RANGE = [0,0.091,0.397,0.588,0.630,0.651,0.631,0.658,0.652,0.634]
S_DATA120_AP_IOU_50 = [0,0.162,0.675,0.931,0.982,0.983,0.984,0.975,0.972,0.972]
S_DATA120_AP_IOU_75 = [0,0.090,0.458,0.714,0.791,0.798,0.776,0.822,0.807,0.774]
S_DATA120_AP_IOU_RANGE_large_area = [0,0.111,0.510,0.699,0.702,0.720,0.706,0.726,0.722,0.707]
S_DATA120_AR_RANGE = [0,0.151,0.541,0.701,0.703,0.722,0.708,0.729,0.723,0.709]
axs[0, 1].set_title('DATASET_120')
axs[0, 1].set(xlabel='EPOCHS',
ylabel='Training Metrics')
axs[0, 1].plot(x_axis_values, S_DATA120_AP_IOU_RANGE, 'tab:orange')
axs[0, 1].plot(x_axis_values, S_DATA120_AP_IOU_50, 'tab:green')
axs[0, 1].plot(x_axis_values, S_DATA120_AP_IOU_75, 'tab:purple')
axs[0, 1].plot(x_axis_values, S_DATA120_AP_IOU_RANGE_large_area, 'tab:red')
axs[0, 1].plot(x_axis_values, S_DATA120_AR_RANGE, 'tab:blue')
S_DATA180_AP_IOU_RANGE = [0.010,0.413,0.620,0.631,0.615,0.615,0.634,0.647,0.638,0.627]
S_DATA180_AP_IOU_50 = [0.024,0.717,0.942,0.969,0.980,0.985,0.985,0.976,0.972,0.974]
S_DATA180_AP_IOU_75 = [0.005,0.446,0.774,0.785,0.741,0.739,0.775,0.817,0.807,0.778]
S_DATA180_AP_IOU_RANGE_large_area = [0.021,0.569,0.709,0.710,0.694,0.692,0.710,0.714,0.709,0.697]
S_DATA180_AR_RANGE = [0.024,0.580,0.710,0.713,0.697,0.693,0.711,0.718,0.711,0.698]
axs[0, 2].set_title('DATASET_180')
axs[0, 2].set(xlabel='EPOCHS',
ylabel='Training Metrics')
axs[0, 2].plot(x_axis_values, S_DATA180_AP_IOU_RANGE, 'tab:orange')
axs[0, 2].plot(x_axis_values, S_DATA180_AP_IOU_50, 'tab:green')
axs[0, 2].plot(x_axis_values, S_DATA180_AP_IOU_75, 'tab:purple')
axs[0, 2].plot(x_axis_values, S_DATA180_AP_IOU_RANGE_large_area, 'tab:red')
axs[0, 2].plot(x_axis_values, S_DATA180_AR_RANGE, 'tab:blue')
S_DATA240_AP_IOU_RANGE = [0.082,0.534,0.636,0.652,0.658,0.618,0.626,0.633,0.638,0.634]
S_DATA240_AP_IOU_50 = [0.155,0.886,0.963,0.980,0.982,0.981,0.984,0.969,0.968,0.969]
S_DATA240_AP_IOU_75 = [0.073,0.591,0.801,0.821,0.845,0.751,0.765,0.770,0.774,0.772]
S_DATA240_AP_IOU_RANGE_large_area = [0.102,0.644,0.717,0.726,0.727,0.697,0.702,0.716,0.721,0.716]
S_DATA240_AR_RANGE = [0.172,0.664,0.720,0.727,0.727,0.699,0.702,0.718,0.723,0.719]
axs[0, 3].set_title('DATASET_240')
axs[0, 3].set(xlabel='EPOCHS',
ylabel='Training Metrics')
axs[0, 3].plot(x_axis_values, S_DATA240_AP_IOU_RANGE, 'tab:orange')
axs[0, 3].plot(x_axis_values, S_DATA240_AP_IOU_50, 'tab:green')
axs[0, 3].plot(x_axis_values, S_DATA240_AP_IOU_75, 'tab:purple')
axs[0, 3].plot(x_axis_values, S_DATA240_AP_IOU_RANGE_large_area, 'tab:red')
axs[0, 3].plot(x_axis_values, S_DATA240_AR_RANGE, 'tab:blue')
S_DATA270_AP_IOU_RANGE = [0.162,0.485,0.550,0.613,0.601,0.524,0.582,0.649,0.652,0.653]
S_DATA270_AP_IOU_50 = [0.315,0.939,0.982,0.980,0.987,0.982,0.988,0.978,0.977,0.978]
S_DATA270_AP_IOU_75 = [0.156,0.426,0.545,0.709,0.661,0.465,0.618,0.812,0.809,0.820]
S_DATA270_AP_IOU_RANGE_large_area = [0.295,0.637,0.638,0.686,0.682,0.622,0.667,0.713,0.719,0.718]
S_DATA270_AR_RANGE = [0.303,0.637,0.639,0.689,0.684,0.623,0.667,0.718,0.721,0.719]
axs[0, 4].set_title('DATASET_270')
axs[0, 4].set(xlabel='EPOCHS',
ylabel='Training Metrics')
axs[0, 4].plot(x_axis_values, S_DATA270_AP_IOU_RANGE, 'tab:orange')
axs[0, 4].plot(x_axis_values, S_DATA270_AP_IOU_50, 'tab:green')
axs[0, 4].plot(x_axis_values, S_DATA270_AP_IOU_75, 'tab:purple')
axs[0, 4].plot(x_axis_values, S_DATA270_AP_IOU_RANGE_large_area, 'tab:red')
axs[0, 4].plot(x_axis_values, S_DATA270_AR_RANGE, 'tab:blue')
X_DATA60_AP_IOU_RANGE = [0,0,0.006,0.332,0.561,0.654,0.663,0.681,0.681,0.682]
X_DATA60_AP_IOU_50 = [0,0,0.010,0.523,0.868,0.972,0.980,0.983,0.986,0.987]
X_DATA60_AP_IOU_75 = [0,0,0.007,0.401,0.671,0.808,0.811,0.865,0.865,0.879]
X_DATA60_AP_IOU_RANGE_large_area = [0,0,0.006,0.377,0.642,0.724,0.730,0.742,0.739,0.737]
X_DATA60_AR_RANGE = [0,0,0.004,0.376,0.643,0.726,0.733,0.745,0.743,0.740]
axs[1, 0].set_title('X_DATASET_60')
axs[1, 0].set(xlabel='EPOCHS',
ylabel='Training Metrics')
axs[1, 0].plot(x_axis_values, X_DATA60_AP_IOU_RANGE, 'tab:orange')
axs[1, 0].plot(x_axis_values, X_DATA60_AP_IOU_50, 'tab:green')
axs[1, 0].plot(x_axis_values, X_DATA60_AP_IOU_75, 'tab:purple')
axs[1, 0].plot(x_axis_values, X_DATA60_AP_IOU_RANGE_large_area, 'tab:red')
axs[1, 0].plot(x_axis_values, X_DATA60_AR_RANGE, 'tab:blue')
X_DATA120_AP_IOU_RANGE = [0,0.288,0.623,0.667,0.637,0.657,0.680,0.699,0.697,0.699]
X_DATA120_AP_IOU_50 = [0,0.486,0.969,0.982,0.981,0.984,0.985,0.988,0.988,0.988]
X_DATA120_AP_IOU_75 = [0,0.318,0.755,0.838,0.802,0.836,0.861,0.911,0.901,0.902]
X_DATA120_AP_IOU_RANGE_large_area = [0,0.335,0.712,0.734,0.718,0.728,0.743,0.755,0.753,0.753]
X_DATA120_AR_RANGE = [0,0.337,0.714,0.735,0.719,0.729,0.746,0.758,0.756,0.757]
axs[1, 1].set_title('X_DATASET_120')
axs[1, 1].set(xlabel='EPOCHS',
ylabel='Training Metrics')
axs[1, 1].plot(x_axis_values, X_DATA120_AP_IOU_RANGE, 'tab:orange')
axs[1, 1].plot(x_axis_values, X_DATA120_AP_IOU_50, 'tab:green')
axs[1, 1].plot(x_axis_values, X_DATA120_AP_IOU_75, 'tab:purple')
axs[1, 1].plot(x_axis_values, X_DATA120_AP_IOU_RANGE_large_area, 'tab:red')
axs[1, 1].plot(x_axis_values, X_DATA120_AR_RANGE, 'tab:blue')
X_DATA180_AP_IOU_RANGE = [0.010,0.640,0.672,0.696,0.691,0.685,0.708,0.714,0.708,0.708]
X_DATA180_AP_IOU_50 = [0.020,0.969,0.989,0.987,0.987,0.987,0.987,0.988,0.988,0.988]
X_DATA180_AP_IOU_75 = [0.010,0.790,0.887,0.892,0.887,0.889,0.919,0.905,0.908,0.910]
X_DATA180_AP_IOU_RANGE_large_area = [0.011,0.719,0.733,0.750,0.748,0.743,0.762,0.765,0.759,0.758]
X_DATA180_AR_RANGE = [0.006,0.722,0.733,0.754,0.752,0.746,0.766,0.768,0.763,0.761]
axs[1, 2].set_title('X_DATASET_180')
axs[1, 2].set(xlabel='EPOCHS',
ylabel='Training Metrics')
axs[1, 2].plot(x_axis_values, X_DATA180_AP_IOU_RANGE, 'tab:orange')
axs[1, 2].plot(x_axis_values, X_DATA180_AP_IOU_50, 'tab:green')
axs[1, 2].plot(x_axis_values, X_DATA180_AP_IOU_75, 'tab:purple')
axs[1, 2].plot(x_axis_values, X_DATA180_AP_IOU_RANGE_large_area, 'tab:red')
axs[1, 2].plot(x_axis_values, X_DATA180_AR_RANGE, 'tab:blue')
X_DATA240_AP_IOU_RANGE = [0.200,0.653,0.646,0.683,0.675,0.693,0.685,0.720,0.717,0.721]
X_DATA240_AP_IOU_50 = [0.332,0.985,0.985,0.986,0.985,0.987,0.986,0.988,0.988,0.988]
X_DATA240_AP_IOU_75 = [0.222,0.803,0.804,0.879,0.971,0.898,0.882,0.936,0.923,0.928]
X_DATA240_AP_IOU_RANGE_large_area = [0.229,0.720,0.709,0.740,0.736,0.749,0.744,0.773,0.769,0.771]
X_DATA240_AR_RANGE = [0.233,0.722,0.711,0.742,0.740,0.753,0.747,0.776,0.773,0.776]
axs[1, 3].set_title('X_DATASET_240')
axs[1, 3].set(xlabel='EPOCHS',
ylabel='Training Metrics')
axs[1, 3].plot(x_axis_values, X_DATA240_AP_IOU_RANGE, 'tab:orange')
axs[1, 3].plot(x_axis_values, X_DATA240_AP_IOU_50, 'tab:green')
axs[1, 3].plot(x_axis_values, X_DATA240_AP_IOU_75, 'tab:purple')
axs[1, 3].plot(x_axis_values, X_DATA240_AP_IOU_RANGE_large_area, 'tab:red')
axs[1, 3].plot(x_axis_values, X_DATA240_AR_RANGE, 'tab:blue')
X_DATA270_AP_IOU_RANGE = [0.338,0.617,0.616,0.630,0.650,0.621,0.633,0.716,0.715,0.717]
X_DATA270_AP_IOU_50 = [0.560,0.987,0.985,0.986,0.987,0.987,0.988,0.988,0.988,0.989]
X_DATA270_AP_IOU_75 = [0.379,0.736,0.718,0.780,0.832,0.768,0.796,0.944,0.947,0.939]
X_DATA270_AP_IOU_RANGE_large_area = [0.391,0.689,0.685,0.696,0.719,0.689,0.699,0.765,0.767,0.766]
X_DATA270_AR_RANGE = [0.403,0.690,0.688,0.698,0.721,0.691,0.701,0.769,0.771,0.770]
axs[1, 4].set_title('X_DATASET_270')
axs[1, 4].set(xlabel='EPOCHS',
ylabel='Training Metrics')
axs[1, 4].plot(x_axis_values, X_DATA270_AP_IOU_RANGE, 'tab:orange')
axs[1, 4].plot(x_axis_values, X_DATA270_AP_IOU_50, 'tab:green')
axs[1, 4].plot(x_axis_values, X_DATA270_AP_IOU_75, 'tab:purple')
axs[1, 4].plot(x_axis_values, X_DATA270_AP_IOU_RANGE_large_area, 'tab:red')
axs[1, 4].plot(x_axis_values, X_DATA270_AR_RANGE, 'tab:blue')
plt.show()
def main():
plot_results()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.figtext",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
] | [((288, 400), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(5)'], {'figsize': '(32, 18)', 'gridspec_kw': "{'width_ratios': [1, 1, 1, 1, 1], 'height_ratios': [1, 1]}"}), "(2, 5, figsize=(32, 18), gridspec_kw={'width_ratios': [1, 1, 1,\n 1, 1], 'height_ratios': [1, 1]})\n", (300, 400), True, 'import matplotlib.pyplot as plt\n'), ((397, 487), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.1)', 'right': '(0.9)', 'top': '(0.9)', 'wspace': '(0.35)', 'hspace': '(0.4)'}), '(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.35,\n hspace=0.4)\n', (416, 487), True, 'import matplotlib.pyplot as plt\n'), ((629, 651), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(10)'], {}), '(0, 10, 10)\n', (640, 651), True, 'import numpy as np\n'), ((763, 848), 'matplotlib.pyplot.figtext', 'plt.figtext', (['(0.5)', '(0.01)', 'txt'], {'wrap': '(True)', 'horizontalalignment': '"""center"""', 'fontsize': '(24)'}), "(0.5, 0.01, txt, wrap=True, horizontalalignment='center',\n fontsize=24)\n", (774, 848), True, 'import matplotlib.pyplot as plt\n'), ((10018, 10028), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10026, 10028), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
import gym
gym.logger.set_level(40) # suppress warnings (please remove if gives error)
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
from time import time
import itertools
import torch
torch.manual_seed(0) # set random seed
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from env_player import EnvPlayer
"""
Simple rewards avg steps: 1353.0
Norm disc rewards avg steps: 352.0
"""
class Agent(nn.Module):
def __init__(self, device, s_size=4, h_size=16, a_size=2, name='test'):
super(Agent, self).__init__()
self.dev = device
self.name = name
self.fc1 = nn.Linear(s_size, h_size)
self.fc2 = nn.Linear(h_size, a_size)
if device.type == 'cuda':
self.cuda(device)
print("Agent init on device {}".format(self.dev))
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
def act(self, state, return_proba=False):
state = torch.from_numpy(state).float().unsqueeze(0).to(self.dev)
probs = self.forward(state).cpu()
m = Categorical(probs)
action = m.sample()
logp = m.log_prob(action)
act = action.item()
# return action and "error" according to the generated probs
_ret = (act, logp) if return_proba else act #probs.max(1)[1].item
return _ret
def discounted_rewards(rewards, gamma, normalize=True):
"""
Because we have a Markov process, the action at time-step tt can only affect
the future reward, so the past reward shouldn’t be contributing to the policy
gradient. So to properly assign credit to the action a_ta, we should ignore
the past reward. So a better policy gradient would simply have the future
reward as the coefficient .
"""
t_rewards = 0
disc_rewards = np.zeros(len(rewards))
for i in reversed(range(len(rewards))):
t_rewards = rewards[i] + gamma * t_rewards
disc_rewards[i] = t_rewards
if normalize:
disc_rewards -= disc_rewards.mean()
disc_rewards /= disc_rewards.std()
return disc_rewards
def grid_dict_to_values(params_grid):
"""
method to convert a grid serach dict into a list of all combinations
returns combinations and param names for each combination entry
"""
params = []
values = []
for k in params_grid:
params.append(k)
assert type(params_grid[k]) is list, 'All grid-search params must be lists. Error: {}'.format(k)
values.append(params_grid[k])
combs = list(itertools.product(*values))
return combs, params
def grid_pos_to_params(grid_data, params):
"""
converts a grid search combination to a dict for callbacks that expect kwargs
"""
func_kwargs = {}
for j,k in enumerate(params):
func_kwargs[k] = grid_data[j]
return func_kwargs
def reinforce(env, agent, n_episodes=2000, max_t=1000,
gamma=1.0, print_every=100, use_disc_rewards=True):
solved = False
optimizer = optim.Adam(agent.parameters(), lr=1e-2)
scores_deque = deque(maxlen=100)
scores = []
timings = []
print("Training with normed_disc_rewards={}".format(use_disc_rewards))
for i_episode in range(1, n_episodes+1):
t_0 = time()
saved_log_probs = []
rewards = []
state = env.reset()
for t in range(max_t):
action, log_prob = agent.act(state, return_proba=True)
saved_log_probs.append(log_prob)
state, reward, done, _ = env.step(action)
rewards.append(reward)
if done:
break
scores_deque.append(sum(rewards))
scores.append(sum(rewards))
if not use_disc_rewards:
discounts = [gamma**i for i in range(len(rewards)+1)]
R = sum([a*b for a,b in zip(discounts, rewards)])
disc_rewards = [R] * len(saved_log_probs)
else:
disc_rewards = discounted_rewards(rewards, gamma)
policy_loss = []
"""
our goal is to optimize (max) sum(proba * disc_reward) for all steps
example 1:
gamma = 1
t = 0.5
P(1 | state) = t
P(0 | state) = 1 - t
action = 1 0 1
reward = 0 0 1
=>
disc rewards = [0 + gamma * 1] [0 + gamma * 1] [1]
grad = dlogP(1) * 1 + dlogP(0) * 1 + dlogP(0) * 1
grad = 1 / P(1) * dP(1) * 1 + 1 / P(0) * dP(0) * 1+ 1 / P(0) * dP(0) * 1
grad = (1/t) * 1 + (1/(1-t) * (-1)) * 1 + (1/(1-t) * (-1)) * 1
example 2:
actions: (0,1,0) rewards: (1,0,1)
conclusions:
last two step-grads cancel each other and thus using total reward
will yield the same gradient results
"""
for i,log_prob in enumerate(saved_log_probs):
policy_loss.append(-log_prob * disc_rewards[i])
policy_loss_batch = torch.cat(policy_loss)
policy_loss = policy_loss_batch.sum()
optimizer.zero_grad()
policy_loss.backward()
optimizer.step()
t_1 = time()
timings.append(t_1-t_0)
if i_episode % print_every == 0:
print('Episode {}\tAverage Score: {:.2f}\tAverage time/ep: {:.2f}s'.format(
i_episode, np.mean(scores_deque), np.mean(timings)))
timings = []
if np.mean(scores_deque)>=195.0:
print('Environment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_deque)))
solved = True
break
return solved, scores, i_episode
e = gym.make('CartPole-v0')
play_random = False
if play_random:
p1 = EnvPlayer(env=e)
p1.play()
e.seed(0)
print('observation space:', e.observation_space)
print('action space:', e.action_space)
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
grid = {
"NormDiscRewards" : [False,True]
}
_combs, _params = grid_dict_to_values(grid)
results = []
best_agent = None
best_steps = np.inf
for grid_data in _combs:
iter_params = grid_pos_to_params(grid_data, _params)
NormDiscRewards = iter_params['NormDiscRewards']
a = Agent(device=dev)
solved, scores, n_ep = reinforce(env=e, agent=a, use_disc_rewards=NormDiscRewards)
if solved:
if n_ep < best_steps:
best_steps = n_ep
best_agent = a
results.append((iter_params,n_ep))
results = sorted(results, key=lambda x:x[1])
for result in results:
print("Rrsult: {} avg nr of steps until completion for : {}".format(
result[1], result[0]))
p2 = EnvPlayer(env=e, agent=best_agent)
p2.play(cont=False, save_gif='cart_reinforce.gif')
| [
"torch.manual_seed",
"torch.nn.functional.softmax",
"numpy.mean",
"collections.deque",
"torch.distributions.Categorical",
"env_player.EnvPlayer",
"itertools.product",
"torch.from_numpy",
"torch.cuda.is_available",
"gym.logger.set_level",
"torch.nn.Linear",
"time.time",
"gym.make",
"torch.c... | [((36, 60), 'gym.logger.set_level', 'gym.logger.set_level', (['(40)'], {}), '(40)\n', (56, 60), False, 'import gym\n'), ((246, 266), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (263, 266), False, 'import torch\n'), ((5482, 5505), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (5490, 5505), False, 'import gym\n'), ((6452, 6486), 'env_player.EnvPlayer', 'EnvPlayer', ([], {'env': 'e', 'agent': 'best_agent'}), '(env=e, agent=best_agent)\n', (6461, 6486), False, 'from env_player import EnvPlayer\n'), ((3090, 3107), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (3095, 3107), False, 'from collections import deque\n'), ((5551, 5567), 'env_player.EnvPlayer', 'EnvPlayer', ([], {'env': 'e'}), '(env=e)\n', (5560, 5567), False, 'from env_player import EnvPlayer\n'), ((717, 742), 'torch.nn.Linear', 'nn.Linear', (['s_size', 'h_size'], {}), '(s_size, h_size)\n', (726, 742), True, 'import torch.nn as nn\n'), ((758, 783), 'torch.nn.Linear', 'nn.Linear', (['h_size', 'a_size'], {}), '(h_size, a_size)\n', (767, 783), True, 'import torch.nn as nn\n'), ((976, 995), 'torch.nn.functional.softmax', 'F.softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (985, 995), True, 'import torch.nn.functional as F\n'), ((1159, 1177), 'torch.distributions.Categorical', 'Categorical', (['probs'], {}), '(probs)\n', (1170, 1177), False, 'from torch.distributions import Categorical\n'), ((2582, 2608), 'itertools.product', 'itertools.product', (['*values'], {}), '(*values)\n', (2599, 2608), False, 'import itertools\n'), ((3263, 3269), 'time.time', 'time', ([], {}), '()\n', (3267, 3269), False, 'from time import time\n'), ((4842, 4864), 'torch.cat', 'torch.cat', (['policy_loss'], {}), '(policy_loss)\n', (4851, 4864), False, 'import torch\n'), ((5001, 5007), 'time.time', 'time', ([], {}), '()\n', (5005, 5007), False, 'from time import time\n'), ((5710, 5735), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5733, 5735), False, 'import torch\n'), ((5252, 5273), 'numpy.mean', 'np.mean', (['scores_deque'], {}), '(scores_deque)\n', (5259, 5273), True, 'import numpy as np\n'), ((5183, 5204), 'numpy.mean', 'np.mean', (['scores_deque'], {}), '(scores_deque)\n', (5190, 5204), True, 'import numpy as np\n'), ((5206, 5222), 'numpy.mean', 'np.mean', (['timings'], {}), '(timings)\n', (5213, 5222), True, 'import numpy as np\n'), ((5378, 5399), 'numpy.mean', 'np.mean', (['scores_deque'], {}), '(scores_deque)\n', (5385, 5399), True, 'import numpy as np\n'), ((1055, 1078), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (1071, 1078), False, 'import torch\n')] |
# Created by <NAME> on 2019-06-13.
# Copyright © 2019 <NAME>. All rights reserved.
import numpy as np
from numpy import sqrt, sin, cos, pi, e, sqrt, isclose
import matplotlib.pyplot as plt
# good place to check
# http://hyperphysics.phy-astr.gsu.edu/hbase/Kinetic/kintem.html#c2
# speed of sound
# http://hyperphysics.phy-astr.gsu.edu/hbase/Sound/souspe3.html
# some useful gammas
gamma_air = 1.4
gamma_helium = 5. / 3
# edit these
MOLAR_MASS = 29 # g/mol
gamma = gamma_helium
# constants
na = 6.022 * 10 ** 23
KG_CONVERT = 1000 # g/kg
m = MOLAR_MASS / 1000 / na # atomic mass kg
k = 1.38064852 * 10 ** -23 # botzman J/K
R = 8.314 # J/mol /K
TEMP = 273.14 + 20 # K
deltaV = 10 # m/s
# outputs probability of being at the input velocity
def maxwell_boltzmann_distribution(v):
result = abs(sqrt((m / (2 * pi * k * TEMP))) ** 3) * 4 * pi * v ** 2 * e ** (-1 * m * v ** 2 / (2 * k * TEMP))
return result
# tells info about speeds of gas
def get_info():
most_probable_speed = 1 / sqrt(m / (2 * k * TEMP))
v_rms = sqrt(3 * R * TEMP / (MOLAR_MASS / KG_CONVERT))
mean_speed = sqrt(8 * R * TEMP / (pi * MOLAR_MASS / KG_CONVERT))
speed_sound = sqrt(gamma / 3) * v_rms
print("vrms = ", v_rms)
print("most probable speed = ", most_probable_speed)
print("mean speed =", mean_speed)
print("speed of sound = ", speed_sound)
return
get_info()
velocities = np.arange(1, 3000, 1)
plt.plot(velocities, maxwell_boltzmann_distribution(velocities))
plt.title('maxwell–boltzmann distribution')
plt.xlabel('Velocity (m/s)')
plt.ylabel('Probablity ')
plt.show()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1410, 1431), 'numpy.arange', 'np.arange', (['(1)', '(3000)', '(1)'], {}), '(1, 3000, 1)\n', (1419, 1431), True, 'import numpy as np\n'), ((1497, 1540), 'matplotlib.pyplot.title', 'plt.title', (['"""maxwell–boltzmann distribution"""'], {}), "('maxwell–boltzmann distribution')\n", (1506, 1540), True, 'import matplotlib.pyplot as plt\n'), ((1541, 1569), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity (m/s)"""'], {}), "('Velocity (m/s)')\n", (1551, 1569), True, 'import matplotlib.pyplot as plt\n'), ((1570, 1595), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probablity """'], {}), "('Probablity ')\n", (1580, 1595), True, 'import matplotlib.pyplot as plt\n'), ((1596, 1606), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1604, 1606), True, 'import matplotlib.pyplot as plt\n'), ((1048, 1094), 'numpy.sqrt', 'sqrt', (['(3 * R * TEMP / (MOLAR_MASS / KG_CONVERT))'], {}), '(3 * R * TEMP / (MOLAR_MASS / KG_CONVERT))\n', (1052, 1094), False, 'from numpy import sqrt, sin, cos, pi, e, sqrt, isclose\n'), ((1112, 1163), 'numpy.sqrt', 'sqrt', (['(8 * R * TEMP / (pi * MOLAR_MASS / KG_CONVERT))'], {}), '(8 * R * TEMP / (pi * MOLAR_MASS / KG_CONVERT))\n', (1116, 1163), False, 'from numpy import sqrt, sin, cos, pi, e, sqrt, isclose\n'), ((1011, 1035), 'numpy.sqrt', 'sqrt', (['(m / (2 * k * TEMP))'], {}), '(m / (2 * k * TEMP))\n', (1015, 1035), False, 'from numpy import sqrt, sin, cos, pi, e, sqrt, isclose\n'), ((1182, 1197), 'numpy.sqrt', 'sqrt', (['(gamma / 3)'], {}), '(gamma / 3)\n', (1186, 1197), False, 'from numpy import sqrt, sin, cos, pi, e, sqrt, isclose\n'), ((814, 843), 'numpy.sqrt', 'sqrt', (['(m / (2 * pi * k * TEMP))'], {}), '(m / (2 * pi * k * TEMP))\n', (818, 843), False, 'from numpy import sqrt, sin, cos, pi, e, sqrt, isclose\n')] |
"""
Solvers
-------
This part of the package provides wrappers around Assimulo solvers.
"""
from assimulo.problem import Explicit_Problem
import numpy as np
import sys
from means.simulation import SensitivityTerm
from means.simulation.trajectory import Trajectory, TrajectoryWithSensitivityData
import inspect
from means.util.memoisation import memoised_property, MemoisableObject
from means.util.sympyhelpers import to_one_dim_array
NP_FLOATING_POINT_PRECISION = np.double
#-- Easy initialisation utilities -------------------------------------------------------------
class UniqueNameInitialisationMixin(object):
@classmethod
def unique_name(self):
return NotImplemented
class SolverException(Exception):
__base_exception_class = None
__base_exception_kwargs = None
def __init__(self, message, base_exception=None):
if base_exception is not None:
if message is None:
message = ''
# We need to take message argument as otherwise SolverException is unpickleable
message += '{0.__class__.__name__}: {0!s}'.format(base_exception)
super(SolverException, self).__init__(message)
# CVodeError does not serialise well, so let's store it as a set of arguments and create the base exception
# on the fly, rather than storing the actual object
if base_exception is not None:
self.__base_exception_class = base_exception.__class__
self.__base_exception_kwargs = base_exception.__dict__.copy()
@property
def base_exception(self):
if self.__base_exception_class is not None:
return self.__base_exception_class(**self.__base_exception_kwargs)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self.message == other.message and self.__base_exception_class == other.__base_exception_class and \
self.__base_exception_kwargs == other.__base_exception_kwargs
def available_solvers(with_sensitivity_support=False):
members = inspect.getmembers(sys.modules[__name__])
initialisable_solvers = {}
# Some metaprogramming here: look for all classes at this module that are subclasses of
# `UniqueNameInitialisationMixin`. Compile a dictionary of these
for name, object in members:
if inspect.isclass(object) and issubclass(object, SolverBase) \
and issubclass(object, UniqueNameInitialisationMixin) \
and object != UniqueNameInitialisationMixin:
if with_sensitivity_support and not issubclass(object, SensitivitySolverBase):
# If we need sensitivity support, skip all non-sensitivity solvers
continue
elif not with_sensitivity_support and issubclass(object, SensitivitySolverBase):
# If we don't need sensitivity support, skip all solvers with sensitivity support
continue
assert(object.unique_name not in initialisable_solvers)
initialisable_solvers[object.unique_name().lower()] = object
return initialisable_solvers
#-- Exception handling utilities -----------------------------------------------------------
def parse_flag(exception_message):
"""
Parse the flag from the solver exception.
e.g.
>>> parse_flag("Exception: Dopri5 failed with flag -3")
-3
:param exception_message: message from the exception
:type exception_message: str
:return: flag id
:rtype: int
"""
import re
match = re.match('.* failed with flag (-\d+)', exception_message)
try:
return int(match.group(1))
except Exception:
return None
#-- Base solver functionality ---------------------------------------------------------------
def _set_kwargs_as_attributes(instance, **kwargs):
for attribute, value in kwargs.iteritems():
setattr(instance, attribute, value)
return instance
def _wrap_results_to_trajectories(simulated_timepoints, simulated_values, descriptions):
number_of_timepoints, number_of_simulated_values = simulated_values.shape
assert(len(descriptions) == number_of_simulated_values)
assert(len(simulated_timepoints) == number_of_timepoints)
# Wrap results to trajectories
trajectories = []
for description, simulated_value_column in zip(descriptions, simulated_values.T):
trajectories.append(Trajectory(simulated_timepoints, simulated_value_column, description))
return trajectories
class SolverBase(MemoisableObject):
"""
This acts as a base class for ODE solvers used in `means`.
It wraps around the solvers available in :module:`assimulo` package, and provides some basic functionality
that allows solvers be used with `means` objects.
"""
_parameters = None
_initial_conditions = None
_problem = None
_starting_time = None
_options = None
def __init__(self, problem, parameters, initial_conditions, starting_time=0.0, **options):
"""
:param problem: Problem to simulate
:type problem: :class:`~means.approximation.ODEProblem`
:param parameters: Parameters of the solver. One entry for each constant in `problem`
:type parameters: :class:`iterable`
:param initial_conditions: Initial conditions of the system. One for each of the equations.
Assumed to be zero, if not specified
:type initial_conditions: :class:`iterable`
:param starting_time: Starting time for the solver, defaults to 0.0
:type starting_time: float
:param options: Options to be passed to the specific instance of the solver.
"""
parameters = to_one_dim_array(parameters, dtype=NP_FLOATING_POINT_PRECISION)
initial_conditions = to_one_dim_array(initial_conditions, dtype=NP_FLOATING_POINT_PRECISION)
assert(parameters.shape == (len(problem.parameters),))
assert(initial_conditions.shape[0] == problem.number_of_equations)
self._parameters = parameters
self._initial_conditions = initial_conditions
self._starting_time = float(starting_time)
self._problem = problem
self._options = options
def simulate(self, timepoints):
"""
Simulate initialised solver for the specified timepoints
:param timepoints: timepoints that will be returned from simulation
:return: a list of trajectories for each of the equations in the problem.
"""
solver = self._solver
last_timepoint = timepoints[-1]
try:
simulated_timepoints, simulated_values = solver.simulate(last_timepoint, ncp_list=timepoints)
except (Exception, self._solver_exception_class) as e:
# The exceptions thrown by solvers are usually hiding the real cause, try to see if it is
# our right_hand_side_as_function that is broken first
try:
self._problem.right_hand_side_as_function(self._initial_conditions, self._parameters)
except:
# If it is broken, throw that exception instead
raise
else:
# If it is not, handle the original exception
self._handle_solver_exception(e)
trajectories = self._results_to_trajectories(simulated_timepoints, simulated_values)
return trajectories
def _handle_solver_exception(self, solver_exception):
"""
This function handles any exceptions that occurred in the solver and have been proven not to be
related to our right_hand_side function.
Subclasses can override it.
:param solver_exception: the exception raised by the solver
:type solver_exception: Exception
"""
# By default just re-raise it with our wrapper
raise SolverException(None, solver_exception)
def _default_solver_instance(self):
raise NotImplementedError
@property
def _solver_exception_class(self):
"""
Property That would return the exception class thrown by a specific solver the subclases can override.
"""
return None
@memoised_property
def _solver(self):
solver = self._default_solver_instance()
verbosity = self._options.pop('verbosity', 50)
return _set_kwargs_as_attributes(solver, verbosity=verbosity, **self._options)
@memoised_property
def _assimulo_problem(self):
rhs = self._problem.right_hand_side_as_function
parameters = self._parameters
initial_conditions = self._initial_conditions
initial_timepoint = self._starting_time
model = Explicit_Problem(lambda t, x: rhs(x, parameters),
initial_conditions, initial_timepoint)
return model
def _results_to_trajectories(self, simulated_timepoints, simulated_values):
"""
Convert the resulting results into a list of trajectories
:param simulated_timepoints: timepoints output from a solver
:param simulated_values: values returned by the solver
:return:
"""
descriptions = self._problem.left_hand_side_descriptors
return _wrap_results_to_trajectories(simulated_timepoints, simulated_values, descriptions)
class CVodeMixin(UniqueNameInitialisationMixin, object):
@classmethod
def unique_name(cls):
return 'cvode'
@property
def _solver_exception_class(self):
from assimulo.solvers.sundials import CVodeError
return CVodeError
def _cvode_instance(self, model, options):
from assimulo.solvers.sundials import CVode
solver = CVode(model)
if 'usesens' in options:
raise AttributeError('Cannot set \'usesens\' parameter. Use Simulation or SimulationWithSensitivities for '
'sensitivity calculations')
return solver
class CVodeSolver(SolverBase, CVodeMixin):
def _default_solver_instance(self):
solver = self._cvode_instance(self._assimulo_problem, self._options)
# It is necessary to set usesens to false here as we are non-parametric here
solver.usesens = False
return solver
class ODE15sMixin(CVodeMixin):
"""
A CVODE solver that mimicks the parameters used in `ode15s`_ solver in MATLAB.
The different parameters that are set differently by default are:
``discr``
Set to ``'BDF'`` by default
``atol``
Set to ``1e-6``
``rtol``
Set to ``1e-3``
.. _`ode15s`: http://www.mathworks.ch/ch/help/matlab/ref/ode15s.html
"""
ATOL = 1e-6
RTOL = 1e-3
MINH = 5.684342e-14
@classmethod
def unique_name(cls):
return 'ode15s'
def _cvode_instance(self, model, options):
solver = super(ODE15sMixin, self)._cvode_instance(model, options)
# BDF method below makes it a key similarity to the ode15s
solver.discr = options.pop('discr', 'BDF')
solver.atol = options.pop('atol', self.ATOL)
solver.rtol = options.pop('rtol', self.RTOL)
solver.maxord = options.pop('maxord', 5)
# If minh is not set, CVODE would try to continue the simulation, issuing a warning
# We set it here so this simulation fails.
solver.minh = options.pop('minh', self.MINH)
return solver
class ODE15sLikeSolver(SolverBase, ODE15sMixin):
def _default_solver_instance(self):
solver = self._cvode_instance(self._assimulo_problem, self._options)
# It is necessary to set usesens to false here as we are non-parametric here
solver.usesens = False
return solver
class Dopri5Solver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers.runge_kutta import Dopri5
return Dopri5(self._assimulo_problem)
@classmethod
def unique_name(self):
return 'dopri5'
def _handle_solver_exception(self, solver_exception):
# Let's try and parse the exception flag, to add some helpful info
flag = parse_flag(solver_exception.message)
FLAG_DOCUMENTATION = {-1: 'Input is not consistent',
-2: 'Larger NMAX is needed',
-3: 'Step size becomes too small',
-4: 'Problem is probably stiff'}
new_message = None
try:
new_message = 'Dopri5 failed with flag {0}: {1}'.format(flag, FLAG_DOCUMENTATION[flag])
exception = Exception(new_message)
except KeyError:
# We have no documentation for this exception, let's just reraise it
exception = solver_exception
# Use the superclass method to rethrow the exception with our wrapper
super(Dopri5Solver, self)._handle_solver_exception(exception)
class LSODARSolver(SolverBase, UniqueNameInitialisationMixin):
@property
def _solver_exception_class(self):
from assimulo.exception import ODEPACK_Exception
return ODEPACK_Exception
def _default_solver_instance(self):
from assimulo.solvers import LSODAR
return LSODAR(self._assimulo_problem)
@classmethod
def unique_name(self):
return 'lsodar'
def _handle_solver_exception(self, solver_exception):
flag = parse_flag(solver_exception.message)
from assimulo.exception import ODEPACK_Exception
FLAG_DOCUMENTATION = {-1: 'Excess work done on this call (perhaps wrong jt)',
-2: 'Excess accuracy requested (tolerances too small)',
-3: 'Illegal input detected (see printed message)',
-4: 'Repeated error test failures (check all inputs)',
-5: 'Repeated convergence failures (perhaps bad jacobian supplied or wrong choice of '
'jt or tolerances)',
-6: 'Error weight became zero during problem.',
-7: 'Work space insufficient to finish (see messages)'}
new_message = None
try:
new_message = 'LSODAR failed with flag {0}: {1}'.format(flag, FLAG_DOCUMENTATION[flag])
exception = ODEPACK_Exception(new_message)
except KeyError:
# We have no documentation for this exception, let's just reraise it
exception = solver_exception
# Use the superclass method to rethrow the exception with our wrapper
super(LSODARSolver, self)._handle_solver_exception(exception)
class ExplicitEulerSolver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import ExplicitEuler
return ExplicitEuler(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'euler'
def simulate(self, timepoints):
# Euler solver does not return the correct timepoints for some reason, work around that by resampling them
trajectories = super(ExplicitEulerSolver, self).simulate(timepoints)
resampled_trajectories = []
for trajectory in trajectories:
resampled_trajectories.append(trajectory.resample(timepoints))
return resampled_trajectories
class RungeKutta4Solver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import RungeKutta4
return RungeKutta4(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'rungekutta4'
def simulate(self, timepoints):
# RungeKutta4 solver does not return the correct timepoints for some reason, work around that by resampling them
trajectories = super(RungeKutta4Solver, self).simulate(timepoints)
resampled_trajectories = []
for trajectory in trajectories:
resampled_trajectories.append(trajectory.resample(timepoints))
return resampled_trajectories
class RungeKutta34Solver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import RungeKutta34
return RungeKutta34(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'rungekutta34'
class Radau5Solver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import Radau5ODE
return Radau5ODE(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'radau5'
def _handle_solver_exception(self, solver_exception):
# Let's try and parse the exception flag, to add some helpful info
flag = parse_flag(solver_exception.message)
FLAG_DOCUMENTATION = {-1: 'Input is not consistent',
-2: 'Larger NMAX is needed',
-3: 'Step size becomes too small',
-4: 'Matrix is repeatedly singular'}
new_message = None
try:
new_message = 'Radau5 failed with flag {0}: {1}'.format(flag, FLAG_DOCUMENTATION[flag])
exception = Exception(new_message)
except KeyError:
# We have no documentation for this exception, let's just reraise it
exception = solver_exception
# Use the superclass method to rethrow the exception with our wrapper
super(Radau5Solver, self)._handle_solver_exception(exception)
class RodasSolver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import RodasODE
return RodasODE(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'rodas'
def _handle_solver_exception(self, solver_exception):
# Let's try and parse the exception flag, to add some helpful info
flag = parse_flag(solver_exception.message)
FLAG_DOCUMENTATION = {-1: 'Input is not consistent',
-2: 'Larger NMAX is needed',
-3: 'Step size becomes too small',
-4: 'Matrix is repeatedly singular'}
new_message = None
try:
new_message = 'Rodas failed with flag {0}: {1}'.format(flag, FLAG_DOCUMENTATION[flag])
exception = Exception(new_message)
except KeyError:
# We have no documentation for this exception, let's just reraise it
exception = solver_exception
# Use the superclass method to rethrow the exception with our wrapper
super(RodasSolver, self)._handle_solver_exception(exception)
#-- Solvers with sensitivity support -----------------------------------------------------------------------------------
def _add_sensitivity_data_to_trajectories(trajectories, raw_sensitivity_data, parameters):
sensitivity_values = []
for i, trajectory in enumerate(trajectories):
ode_term = trajectory.description
term_sensitivities = []
for j, parameter in enumerate(parameters):
term_sensitivities.append((parameter, raw_sensitivity_data[j, :, i]))
sensitivity_values.append(term_sensitivities)
trajectories_with_sensitivity_data = []
for trajectory, sensitivities in zip(trajectories, sensitivity_values):
# Collect the sensitivities into a nice dictionary of Trajectory objects
sensitivity_trajectories = []
for parameter, values in sensitivities:
sensitivity_trajectories.append(Trajectory(trajectory.timepoints, values,
SensitivityTerm(trajectory.description, parameter)))
trajectory_with_sensitivities = TrajectoryWithSensitivityData.from_trajectory(trajectory,
sensitivity_trajectories)
trajectories_with_sensitivity_data.append(trajectory_with_sensitivities)
return trajectories_with_sensitivity_data
class SensitivitySolverBase(SolverBase):
@property
def _assimulo_problem(self):
rhs = self._problem.right_hand_side_as_function
parameters = self._parameters
initial_conditions = self._initial_conditions
initial_timepoint = self._starting_time
# Solvers with sensitivity support should be able to accept parameters
# into rhs function directly
model = Explicit_Problem(lambda t, x, p: rhs(x, p),
initial_conditions, initial_timepoint)
model.p0 = np.array(parameters)
return model
def _results_to_trajectories(self, simulated_timepoints, simulated_values):
trajectories = super(SensitivitySolverBase, self)._results_to_trajectories(simulated_timepoints,
simulated_values)
sensitivities_raw = np.array(self._solver.p_sol)
trajectories_with_sensitivity_data = _add_sensitivity_data_to_trajectories(trajectories, sensitivities_raw,
self._problem.parameters)
return trajectories_with_sensitivity_data
class CVodeSolverWithSensitivities(SensitivitySolverBase, CVodeMixin):
def _default_solver_instance(self):
solver = self._cvode_instance(self._assimulo_problem, self._options)
# It is necessary to set usesens to true here as we are non-parametric here
solver.usesens = True
solver.report_continuously = True
return solver
class ODE15sSolverWithSensitivities(SensitivitySolverBase, ODE15sMixin):
def _default_solver_instance(self):
solver = self._cvode_instance(self._assimulo_problem, self._options)
# It is necessary to set usesens to true here as we are non-parametric here
solver.usesens = True
solver.report_continuously = True
return solver
| [
"inspect.getmembers",
"assimulo.solvers.Radau5ODE",
"assimulo.solvers.RungeKutta34",
"inspect.isclass",
"assimulo.solvers.LSODAR",
"assimulo.solvers.RungeKutta4",
"assimulo.solvers.sundials.CVode",
"means.util.sympyhelpers.to_one_dim_array",
"re.match",
"means.simulation.trajectory.Trajectory",
... | [((2064, 2105), 'inspect.getmembers', 'inspect.getmembers', (['sys.modules[__name__]'], {}), '(sys.modules[__name__])\n', (2082, 2105), False, 'import inspect\n'), ((3553, 3611), 're.match', 're.match', (['""".* failed with flag (-\\\\d+)"""', 'exception_message'], {}), "('.* failed with flag (-\\\\d+)', exception_message)\n", (3561, 3611), False, 'import re\n'), ((5726, 5789), 'means.util.sympyhelpers.to_one_dim_array', 'to_one_dim_array', (['parameters'], {'dtype': 'NP_FLOATING_POINT_PRECISION'}), '(parameters, dtype=NP_FLOATING_POINT_PRECISION)\n', (5742, 5789), False, 'from means.util.sympyhelpers import to_one_dim_array\n'), ((5819, 5890), 'means.util.sympyhelpers.to_one_dim_array', 'to_one_dim_array', (['initial_conditions'], {'dtype': 'NP_FLOATING_POINT_PRECISION'}), '(initial_conditions, dtype=NP_FLOATING_POINT_PRECISION)\n', (5835, 5890), False, 'from means.util.sympyhelpers import to_one_dim_array\n'), ((9718, 9730), 'assimulo.solvers.sundials.CVode', 'CVode', (['model'], {}), '(model)\n', (9723, 9730), False, 'from assimulo.solvers.sundials import CVode\n'), ((11893, 11923), 'assimulo.solvers.runge_kutta.Dopri5', 'Dopri5', (['self._assimulo_problem'], {}), '(self._assimulo_problem)\n', (11899, 11923), False, 'from assimulo.solvers.runge_kutta import Dopri5\n'), ((13221, 13251), 'assimulo.solvers.LSODAR', 'LSODAR', (['self._assimulo_problem'], {}), '(self._assimulo_problem)\n', (13227, 13251), False, 'from assimulo.solvers import LSODAR\n'), ((14835, 14872), 'assimulo.solvers.ExplicitEuler', 'ExplicitEuler', (['self._assimulo_problem'], {}), '(self._assimulo_problem)\n', (14848, 14872), False, 'from assimulo.solvers import ExplicitEuler\n'), ((15536, 15571), 'assimulo.solvers.RungeKutta4', 'RungeKutta4', (['self._assimulo_problem'], {}), '(self._assimulo_problem)\n', (15547, 15571), False, 'from assimulo.solvers import RungeKutta4\n'), ((16246, 16282), 'assimulo.solvers.RungeKutta34', 'RungeKutta34', (['self._assimulo_problem'], {}), '(self._assimulo_problem)\n', (16258, 16282), False, 'from assimulo.solvers import RungeKutta34\n'), ((16525, 16558), 'assimulo.solvers.Radau5ODE', 'Radau5ODE', (['self._assimulo_problem'], {}), '(self._assimulo_problem)\n', (16534, 16558), False, 'from assimulo.solvers import Radau5ODE\n'), ((17715, 17747), 'assimulo.solvers.RodasODE', 'RodasODE', (['self._assimulo_problem'], {}), '(self._assimulo_problem)\n', (17723, 17747), False, 'from assimulo.solvers import RodasODE\n'), ((19813, 19900), 'means.simulation.trajectory.TrajectoryWithSensitivityData.from_trajectory', 'TrajectoryWithSensitivityData.from_trajectory', (['trajectory', 'sensitivity_trajectories'], {}), '(trajectory,\n sensitivity_trajectories)\n', (19858, 19900), False, 'from means.simulation.trajectory import Trajectory, TrajectoryWithSensitivityData\n'), ((20666, 20686), 'numpy.array', 'np.array', (['parameters'], {}), '(parameters)\n', (20674, 20686), True, 'import numpy as np\n'), ((21024, 21052), 'numpy.array', 'np.array', (['self._solver.p_sol'], {}), '(self._solver.p_sol)\n', (21032, 21052), True, 'import numpy as np\n'), ((2343, 2366), 'inspect.isclass', 'inspect.isclass', (['object'], {}), '(object)\n', (2358, 2366), False, 'import inspect\n'), ((4419, 4488), 'means.simulation.trajectory.Trajectory', 'Trajectory', (['simulated_timepoints', 'simulated_value_column', 'description'], {}), '(simulated_timepoints, simulated_value_column, description)\n', (4429, 4488), False, 'from means.simulation.trajectory import Trajectory, TrajectoryWithSensitivityData\n'), ((14330, 14360), 'assimulo.exception.ODEPACK_Exception', 'ODEPACK_Exception', (['new_message'], {}), '(new_message)\n', (14347, 14360), False, 'from assimulo.exception import ODEPACK_Exception\n'), ((19719, 19769), 'means.simulation.SensitivityTerm', 'SensitivityTerm', (['trajectory.description', 'parameter'], {}), '(trajectory.description, parameter)\n', (19734, 19769), False, 'from means.simulation import SensitivityTerm\n')] |
"""Implementation of 'Interpretable Counterfactual Explanations Guided by Prototypes'
Based on the original paper authored by <NAME> and <NAME>, and available at
https://arxiv.org/abs/1907.02584
We have used the implementation of the method available in the library ALIBI
(https://github.com/SeldonIO/alibi), with the configuration matching that in the original paper as
far as possible. See the appendix of our paper for details.
"""
import os
from typing import Optional
import numpy as np
import tensorflow as tf
import torch
import uces.utils
from alibi.explainers.cfproto import CounterFactualProto
from tensorflow.keras.layers import (
Conv2D,
Dense,
Dropout,
Flatten,
Input,
MaxPooling2D,
UpSampling2D,
)
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.utils import to_categorical
from torch import Tensor
from uces.datasets import SupportedDataset
from .methods import CEMethod
# ALIBI is not compatible with v2 behaviour, thus disable it.
tf.compat.v1.disable_v2_behavior()
print("TF version: ", tf.__version__)
print("Eager execution enabled: ", tf.executing_eagerly()) # False
class PrototypesMethod(CEMethod):
def __init__(self, k: Optional[int] = None, theta: Optional[int] = None) -> None:
"""Constructs a new instance.
If k and theta are None, will use the default values for the dataset.
"""
super().__init__()
self.k = k
self.theta = theta
def _generate_counterfactuals(
self,
results_dir: str,
ensemble_id: int,
dataset: str,
train_dataset: SupportedDataset,
n_classes: int,
originals: Tensor,
targets: Tensor,
) -> Tensor:
if dataset == "mnist" or dataset.startswith("simulatedmnist"):
dataset_type = "mnist"
elif dataset == "breastcancer" or dataset.startswith("simulatedbc"):
dataset_type = "breastcancer"
elif dataset == "bostonhousing":
dataset_type = "bostonhousing"
else:
raise ValueError
x_train, y_train = uces.utils.get_inputs_and_targets(
train_dataset, device=originals.device
)
x_train = x_train.detach().cpu().numpy()
y_train = y_train.detach().cpu().numpy()
x_test = originals.detach().cpu().numpy()
if dataset_type == "mnist":
x_train = np.reshape(x_train, (-1, 28, 28, 1))
x_test = np.reshape(x_test, (-1, 28, 28, 1))
y_train = to_categorical(y_train)
tabular_train_mean = np.mean(x_train, axis=0)
tabular_train_std = np.std(x_train, axis=0)
if dataset_type == "mnist":
x_train = ((x_train - x_train.min()) / (x_train.max() - x_train.min())) - 0.5
x_test = ((x_test - x_test.min()) / (x_test.max() - x_test.min())) - 0.5
else:
x_train = (x_train - tabular_train_mean) / tabular_train_std
x_test = (x_test - tabular_train_mean) / tabular_train_std
base_path = os.path.join(results_dir, self.name)
if not os.path.exists(base_path):
os.mkdir(base_path)
classifier_path = os.path.join(results_dir, self.name, f"{dataset}_classifier.h5")
if os.path.exists(classifier_path):
classifier = load_model(classifier_path)
else:
if dataset_type == "mnist":
classifier = _mnist_cnn_model()
classifier.fit(x_train, y_train, batch_size=64, epochs=3, verbose=1)
elif dataset_type == "breastcancer":
classifier = _bc_classifier_model()
classifier.fit(x_train, y_train, batch_size=128, epochs=500, verbose=1)
elif dataset_type == "bostonhousing":
classifier = _bh_classifier_model()
classifier.fit(x_train, y_train, batch_size=128, epochs=500, verbose=1)
else:
raise ValueError
classifier.save(classifier_path, save_format="h5")
if dataset_type == "mnist":
ae_path = os.path.join(results_dir, self.name, f"{dataset}_ae.h5")
enc_path = os.path.join(results_dir, self.name, f"{dataset}_enc.h5")
if os.path.exists(ae_path) and os.path.exists(enc_path):
ae = load_model(ae_path)
enc = load_model(enc_path, compile=False)
else:
ae, enc, dec = _mnist_ae_model()
ae.fit(
x_train,
x_train,
batch_size=128,
epochs=4,
validation_data=(x_test, x_test),
verbose=1,
)
ae.save(ae_path, save_format="h5")
enc.save(enc_path, save_format="h5")
cf = CounterFactualProto(
classifier,
shape=(1,) + x_train.shape[1:],
gamma=100.0,
theta=100.0 if self.theta is None else self.theta,
ae_model=ae,
enc_model=enc,
max_iterations=2000,
feature_range=(x_train.min(), x_train.max()),
c_init=1.0,
c_steps=1,
)
cf.fit(x_train)
else:
# For breastcancer the hyperparameters are taken from the prototypes paper.
# For bostonhousing, we used the same hyperaparameters as for breastcancer, except for
# k and theta which we found by grid search (k is set below).
cf = CounterFactualProto(
classifier,
use_kdtree=True,
shape=(1,) + x_train.shape[1:],
theta=100.0 if self.theta is None else self.theta,
max_iterations=2000,
feature_range=(x_train.min(axis=0), x_train.max(axis=0)),
c_init=1.0,
c_steps=1,
)
cf.fit(x_train)
if self.k is None:
if dataset == "bostonhousing" and self.k is None:
k: Optional[int] = 10
else:
# Setting k=None uses the default value appropriate for the encoder/kd-tree.
k = None
else:
k = self.k
counterfactuals_list = []
for j in range(targets.size(0)):
X = x_test[j].reshape((1,) + x_test[0].shape)
explanation = cf.explain(X, target_class=[targets[0].item()], k=k, verbose=True,)
if explanation.cf is None:
print(f"Failed to find CE for original {j}")
counterfactuals_list.append(torch.tensor(cf.sess.run(cf.adv)))
else:
print("Counterfactual prediction: {}".format(explanation.cf["class"]))
print("Closest prototype class: {}".format(explanation.id_proto))
counterfactuals_list.append(torch.tensor(explanation.cf["X"]))
# Prototypes uses different normalization to our code, so we need to undo this.
if dataset_type == "mnist":
# We generate CEs normalized between -0.5 and 0.5, but need to return them between 0 and 1.
counterfactuals_normalized = torch.cat(counterfactuals_list)
counterfactuals_renormalized = counterfactuals_normalized + 0.5
else:
# We generate CEs standardized to mean 0 std dev 1. We need to undo this.
counterfactuals_normalized = torch.cat(counterfactuals_list)
counterfactuals_renormalized = (
counterfactuals_normalized * tabular_train_std + tabular_train_mean
)
return counterfactuals_renormalized.view(originals.size())
@property
def name(self) -> str:
return f"alibi_prototypes2_k{self.k}_theta{self.theta}"
@property
def use_adversarial_training(self) -> bool:
return False
def _mnist_cnn_model():
x_in = Input(shape=(28, 28, 1))
x = Conv2D(filters=32, kernel_size=2, padding="same", activation="relu")(x_in)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Conv2D(filters=64, kernel_size=2, padding="same", activation="relu")(x)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Flatten()(x)
x = Dense(256, activation="relu")(x)
x = Dropout(0.5)(x)
x_out = Dense(10, activation="softmax")(x)
cnn = Model(inputs=x_in, outputs=x_out)
cnn.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
return cnn
def _mnist_ae_model():
x_in = Input(shape=(28, 28, 1))
x = Conv2D(16, (3, 3), activation="relu", padding="same")(x_in)
x = Conv2D(16, (3, 3), activation="relu", padding="same")(x)
x = MaxPooling2D((2, 2), padding="same")(x)
encoded = Conv2D(1, (3, 3), activation=None, padding="same")(x)
encoder = Model(x_in, encoded)
dec_in = Input(shape=(14, 14, 1))
x = Conv2D(16, (3, 3), activation="relu", padding="same")(dec_in)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation="relu", padding="same")(x)
decoded = Conv2D(1, (3, 3), activation=None, padding="same")(x)
decoder = Model(dec_in, decoded)
x_out = decoder(encoder(x_in))
autoencoder = Model(x_in, x_out)
autoencoder.compile(loss="mse", optimizer="adam")
return autoencoder, encoder, decoder
def _bc_classifier_model():
x_in = Input(shape=(30,))
x = Dense(40, activation="relu")(x_in)
x = Dense(40, activation="relu")(x)
x_out = Dense(2, activation="softmax")(x)
classifier = Model(x_in, x_out)
classifier.compile(optimizer="sgd", loss="categorical_crossentropy", metrics=["accuracy"])
return classifier
def _bh_classifier_model():
x_in = Input(shape=(13,))
x = Dense(40, activation="relu")(x_in)
x = Dense(40, activation="relu")(x)
x_out = Dense(2, activation="softmax")(x)
classifier = Model(x_in, x_out)
classifier.compile(optimizer="sgd", loss="categorical_crossentropy", metrics=["accuracy"])
return classifier
| [
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Input",
"numpy.mean",
"os.path.exists",
"numpy.reshape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.executing_eagerly",
"os.mkdir",
"tensorflow.keras.mod... | [((1010, 1044), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.compat.v1.disable_v2_behavior', ([], {}), '()\n', (1042, 1044), True, 'import tensorflow as tf\n'), ((1119, 1141), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (1139, 1141), True, 'import tensorflow as tf\n'), ((7944, 7968), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(28, 28, 1)'}), '(shape=(28, 28, 1))\n', (7949, 7968), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8400, 8433), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'x_in', 'outputs': 'x_out'}), '(inputs=x_in, outputs=x_out)\n', (8405, 8433), False, 'from tensorflow.keras.models import Model, load_model\n'), ((8575, 8599), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(28, 28, 1)'}), '(shape=(28, 28, 1))\n', (8580, 8599), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8863, 8883), 'tensorflow.keras.models.Model', 'Model', (['x_in', 'encoded'], {}), '(x_in, encoded)\n', (8868, 8883), False, 'from tensorflow.keras.models import Model, load_model\n'), ((8898, 8922), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(14, 14, 1)'}), '(shape=(14, 14, 1))\n', (8903, 8922), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((9172, 9194), 'tensorflow.keras.models.Model', 'Model', (['dec_in', 'decoded'], {}), '(dec_in, decoded)\n', (9177, 9194), False, 'from tensorflow.keras.models import Model, load_model\n'), ((9249, 9267), 'tensorflow.keras.models.Model', 'Model', (['x_in', 'x_out'], {}), '(x_in, x_out)\n', (9254, 9267), False, 'from tensorflow.keras.models import Model, load_model\n'), ((9405, 9423), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(30,)'}), '(shape=(30,))\n', (9410, 9423), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((9570, 9588), 'tensorflow.keras.models.Model', 'Model', (['x_in', 'x_out'], {}), '(x_in, x_out)\n', (9575, 9588), False, 'from tensorflow.keras.models import Model, load_model\n'), ((9748, 9766), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(13,)'}), '(shape=(13,))\n', (9753, 9766), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((9913, 9931), 'tensorflow.keras.models.Model', 'Model', (['x_in', 'x_out'], {}), '(x_in, x_out)\n', (9918, 9931), False, 'from tensorflow.keras.models import Model, load_model\n'), ((2529, 2552), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_train'], {}), '(y_train)\n', (2543, 2552), False, 'from tensorflow.keras.utils import to_categorical\n'), ((2583, 2607), 'numpy.mean', 'np.mean', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (2590, 2607), True, 'import numpy as np\n'), ((2636, 2659), 'numpy.std', 'np.std', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (2642, 2659), True, 'import numpy as np\n'), ((3050, 3086), 'os.path.join', 'os.path.join', (['results_dir', 'self.name'], {}), '(results_dir, self.name)\n', (3062, 3086), False, 'import os\n'), ((3188, 3252), 'os.path.join', 'os.path.join', (['results_dir', 'self.name', 'f"""{dataset}_classifier.h5"""'], {}), "(results_dir, self.name, f'{dataset}_classifier.h5')\n", (3200, 3252), False, 'import os\n'), ((3264, 3295), 'os.path.exists', 'os.path.exists', (['classifier_path'], {}), '(classifier_path)\n', (3278, 3295), False, 'import os\n'), ((7977, 8045), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=2, padding='same', activation='relu')\n", (7983, 8045), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8060, 8085), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (8072, 8085), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8097, 8109), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (8104, 8109), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8122, 8190), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=2, padding='same', activation='relu')\n", (8128, 8190), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8202, 8227), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (8214, 8227), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8239, 8251), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (8246, 8251), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8264, 8273), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8271, 8273), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8285, 8314), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (8290, 8314), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8326, 8338), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8333, 8338), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8354, 8385), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (8359, 8385), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8608, 8661), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(16, (3, 3), activation='relu', padding='same')\n", (8614, 8661), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8676, 8729), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(16, (3, 3), activation='relu', padding='same')\n", (8682, 8729), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8741, 8777), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'padding': '"""same"""'}), "((2, 2), padding='same')\n", (8753, 8777), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8795, 8845), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(1)', '(3, 3)'], {'activation': 'None', 'padding': '"""same"""'}), "(1, (3, 3), activation=None, padding='same')\n", (8801, 8845), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((8931, 8984), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(16, (3, 3), activation='relu', padding='same')\n", (8937, 8984), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((9001, 9021), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (9013, 9021), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((9033, 9086), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(16, (3, 3), activation='relu', padding='same')\n", (9039, 9086), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((9104, 9154), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(1)', '(3, 3)'], {'activation': 'None', 'padding': '"""same"""'}), "(1, (3, 3), activation=None, padding='same')\n", (9110, 9154), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((9432, 9460), 'tensorflow.keras.layers.Dense', 'Dense', (['(40)'], {'activation': '"""relu"""'}), "(40, activation='relu')\n", (9437, 9460), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((9475, 9503), 'tensorflow.keras.layers.Dense', 'Dense', (['(40)'], {'activation': '"""relu"""'}), "(40, activation='relu')\n", (9480, 9503), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((9519, 9549), 'tensorflow.keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (9524, 9549), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((9775, 9803), 'tensorflow.keras.layers.Dense', 'Dense', (['(40)'], {'activation': '"""relu"""'}), "(40, activation='relu')\n", (9780, 9803), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((9818, 9846), 'tensorflow.keras.layers.Dense', 'Dense', (['(40)'], {'activation': '"""relu"""'}), "(40, activation='relu')\n", (9823, 9846), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((9862, 9892), 'tensorflow.keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (9867, 9892), False, 'from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D\n'), ((2417, 2453), 'numpy.reshape', 'np.reshape', (['x_train', '(-1, 28, 28, 1)'], {}), '(x_train, (-1, 28, 28, 1))\n', (2427, 2453), True, 'import numpy as np\n'), ((2475, 2510), 'numpy.reshape', 'np.reshape', (['x_test', '(-1, 28, 28, 1)'], {}), '(x_test, (-1, 28, 28, 1))\n', (2485, 2510), True, 'import numpy as np\n'), ((3102, 3127), 'os.path.exists', 'os.path.exists', (['base_path'], {}), '(base_path)\n', (3116, 3127), False, 'import os\n'), ((3141, 3160), 'os.mkdir', 'os.mkdir', (['base_path'], {}), '(base_path)\n', (3149, 3160), False, 'import os\n'), ((3322, 3349), 'tensorflow.keras.models.load_model', 'load_model', (['classifier_path'], {}), '(classifier_path)\n', (3332, 3349), False, 'from tensorflow.keras.models import Model, load_model\n'), ((4089, 4145), 'os.path.join', 'os.path.join', (['results_dir', 'self.name', 'f"""{dataset}_ae.h5"""'], {}), "(results_dir, self.name, f'{dataset}_ae.h5')\n", (4101, 4145), False, 'import os\n'), ((4169, 4226), 'os.path.join', 'os.path.join', (['results_dir', 'self.name', 'f"""{dataset}_enc.h5"""'], {}), "(results_dir, self.name, f'{dataset}_enc.h5')\n", (4181, 4226), False, 'import os\n'), ((7225, 7256), 'torch.cat', 'torch.cat', (['counterfactuals_list'], {}), '(counterfactuals_list)\n', (7234, 7256), False, 'import torch\n'), ((7474, 7505), 'torch.cat', 'torch.cat', (['counterfactuals_list'], {}), '(counterfactuals_list)\n', (7483, 7505), False, 'import torch\n'), ((4242, 4265), 'os.path.exists', 'os.path.exists', (['ae_path'], {}), '(ae_path)\n', (4256, 4265), False, 'import os\n'), ((4270, 4294), 'os.path.exists', 'os.path.exists', (['enc_path'], {}), '(enc_path)\n', (4284, 4294), False, 'import os\n'), ((4317, 4336), 'tensorflow.keras.models.load_model', 'load_model', (['ae_path'], {}), '(ae_path)\n', (4327, 4336), False, 'from tensorflow.keras.models import Model, load_model\n'), ((4359, 4394), 'tensorflow.keras.models.load_model', 'load_model', (['enc_path'], {'compile': '(False)'}), '(enc_path, compile=False)\n', (4369, 4394), False, 'from tensorflow.keras.models import Model, load_model\n'), ((6920, 6953), 'torch.tensor', 'torch.tensor', (["explanation.cf['X']"], {}), "(explanation.cf['X'])\n", (6932, 6953), False, 'import torch\n')] |
from torch.utils.data import Dataset
import numpy as np
import time
from tqdm import tqdm
def binary_search(arr, k):
low = 0
high = len(arr) - 1
while low <= high:
mid = (low + high) // 2
if arr[mid] < k:
low = mid + 1
elif arr[mid] > k:
high = mid - 1
else:
return True
return False
def build_ratings(node_subgraph, ratings):
unique_items = np.unique(ratings.T[1])
indices = []
items = ratings.T[1]
i, j = 0, 0
while j != len(items):
if items[i] != items[j]:
indices.append(i)
i = j
j += 1
indices.append(i)
indices.append(j)
items = len(indices) - 1
remove = []
for i in range(items):
if not binary_search(node_subgraph, unique_items[i]):
remove.extend([j for j in range(indices[i], indices[i + 1])])
# for i in
dcm = np.delete(ratings, remove, axis=0)
return dcm
class SubgraphRating(Dataset):
def __init__(self, node_subgraph, ratings, graph='subgraph', verbose=False):
for i in range(len(node_subgraph) - 1):
if node_subgraph[i] > node_subgraph[i+1]:
assert 1 == 2
assert graph == 'subgraph' or graph == 'full'
self.graph = graph
t1 = time.time()
if self.graph == 'subgraph':
self.ratings = build_ratings(node_subgraph, ratings)
t2 = time.time()
if verbose:
print(f'Building ratings in {t2 - t1}')
print(f'Number observation: {len(self.ratings)}')
def __len__(self):
return self.ratings.shape[0]
def __getitem__(self, idx):
return {'user': self.ratings[idx, 0], 'item': self.ratings[idx, 1], 'label': self.ratings[idx, 2]}
class Rating(Dataset):
def __init__(self, ratings):
self.ratings = ratings
def __len__(self):
return self.ratings.shape[0]
def __getitem__(self, idx):
return {'user': self.ratings[idx, 0], 'item': self.ratings[idx, 1], 'label': self.ratings[idx, 2]}
| [
"numpy.delete",
"time.time",
"numpy.unique"
] | [((434, 457), 'numpy.unique', 'np.unique', (['ratings.T[1]'], {}), '(ratings.T[1])\n', (443, 457), True, 'import numpy as np\n'), ((916, 950), 'numpy.delete', 'np.delete', (['ratings', 'remove'], {'axis': '(0)'}), '(ratings, remove, axis=0)\n', (925, 950), True, 'import numpy as np\n'), ((1306, 1317), 'time.time', 'time.time', ([], {}), '()\n', (1315, 1317), False, 'import time\n'), ((1433, 1444), 'time.time', 'time.time', ([], {}), '()\n', (1442, 1444), False, 'import time\n')] |
#!/usr/bin/env python3
from gensim.models import KeyedVectors
from gensim.utils import tokenize
import numpy as np
from config import random_seed, word2vec_file, word2vec_dim, max_word_num
def load_word2vec():
print("Load Word2Vec from {} ...".format(word2vec_file))
return KeyedVectors.load_word2vec_format(word2vec_file,
binary=True)
def word2vec_unknown():
np.random.seed(random_seed)
return np.random.uniform(-1, 1, (word2vec_dim,))
def word2vec_sentence(sentence, wv, arr=None, zero_padding=True, unknown_vec=None):
tokens = list(tokenize(sentence, lowercase=False, deacc=False))
if arr is None:
n_used_token = len(tokens)
arr = np.empty((n_used_token, word2vec_dim))
else:
n_used_token = min(arr.shape[0], len(tokens))
if unknown_vec is None:
unknown_vec = word2vec_unknown()
skipped_ind = []
for i, token in enumerate(tokens[:n_used_token]):
if token in wv:
arr[i,:] = wv[token]
else:
skipped_ind.append(i)
arr[i,:] = unknown_vec
if zero_padding and n_used_token < arr.shape[0]:
arr[n_used_token:,:] = 0
return arr, tokens, skipped_ind
def word2vec_sentences(sentences, wv=None, print_stat=True):
if wv is None:
wv = load_word2vec()
data = np.empty((len(sentences), max_word_num, word2vec_dim))
n = len(sentences)
unknown_count = dict()
token_set = set()
n_unknown = np.empty((n,))
n_token = np.empty((n,))
offset = np.empty((n,))
print("Transform words to vectors ...", end='')
for i in range(n):
if (i + 1) % 5000 == 0:
print(".", end="")
sentence_arr, tokens, unknown_ind = word2vec_sentence(sentences[i], wv, data[i])
n_unknown[i] = len(unknown_ind)
n_token[i] = len(tokens)
token_set.update(tokens)
for ind in unknown_ind:
unknown_tok = tokens[ind]
if unknown_tok in unknown_count:
unknown_count[unknown_tok] += 1
else:
unknown_count[unknown_tok] = 1
print(".")
if print_stat:
def print_stat_func(arr, sum_n_token, desc):
print(" {} of {} tokens are {} ({:.1f}%), min {}, max {}, mean {:.2f}, median {}"
.format(int(arr.sum()), int(sum_n_token), desc, 100*(arr.sum()/sum_n_token), int(arr.min()),
int(arr.max()), arr.mean(), int(np.percentile(arr, 50))))
print("Print statistics ...")
n_padded = (max_word_num - n_token).clip(0)
n_clipped = (n_token - max_word_num).clip(0)
sum_n_token = n_token.sum()
print(" Dataset contains {} sentences, fixed sentence length is {}, number of unique tokens is {}"
.format(n, max_word_num, len(token_set)))
print_stat_func(n_token, sum_n_token, "in dataset sentences")
print_stat_func(n_clipped, sum_n_token, "clipped")
print_stat_func(n_padded, max_word_num * n, "padded")
print_stat_func(n_unknown, sum_n_token, "unknown")
common_unknowns = sorted(unknown_count.items(), key=lambda x: x[1])[::-1][:10]
print(" Most common unknowns: {}"
.format(", ".join(["{} ({})".format(t, c) for t, c in common_unknowns])))
return data
def main():
wv = load_word2vec()
np.random.seed(random_seed)
sentence = 'The police in Hintertupfingen is slow today!'
mat, tok, skipped = word2vec_sentence(sentence, wv)
print("Full sentence: ", sentence)
print("All tokens: ", tok)
print("Skipped (zero vec): ", [tok[i] for i in skipped])
print("Matrix (only 5D):\n", mat[:, :5])
print()
mat = word2vec_sentences([sentence], wv)
print("Matrix (only 10x5D):\n", mat[0, :10, :5])
if __name__ == "__main__":
main()
| [
"gensim.utils.tokenize",
"gensim.models.KeyedVectors.load_word2vec_format",
"numpy.empty",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.percentile"
] | [((285, 346), 'gensim.models.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['word2vec_file'], {'binary': '(True)'}), '(word2vec_file, binary=True)\n', (318, 346), False, 'from gensim.models import KeyedVectors\n'), ((421, 448), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (435, 448), True, 'import numpy as np\n'), ((460, 501), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(word2vec_dim,)'], {}), '(-1, 1, (word2vec_dim,))\n', (477, 501), True, 'import numpy as np\n'), ((1500, 1514), 'numpy.empty', 'np.empty', (['(n,)'], {}), '((n,))\n', (1508, 1514), True, 'import numpy as np\n'), ((1529, 1543), 'numpy.empty', 'np.empty', (['(n,)'], {}), '((n,))\n', (1537, 1543), True, 'import numpy as np\n'), ((1557, 1571), 'numpy.empty', 'np.empty', (['(n,)'], {}), '((n,))\n', (1565, 1571), True, 'import numpy as np\n'), ((3383, 3410), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (3397, 3410), True, 'import numpy as np\n'), ((605, 653), 'gensim.utils.tokenize', 'tokenize', (['sentence'], {'lowercase': '(False)', 'deacc': '(False)'}), '(sentence, lowercase=False, deacc=False)\n', (613, 653), False, 'from gensim.utils import tokenize\n'), ((724, 762), 'numpy.empty', 'np.empty', (['(n_used_token, word2vec_dim)'], {}), '((n_used_token, word2vec_dim))\n', (732, 762), True, 'import numpy as np\n'), ((2486, 2508), 'numpy.percentile', 'np.percentile', (['arr', '(50)'], {}), '(arr, 50)\n', (2499, 2508), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.segmentation import (morphological_chan_vese,
morphological_geodesic_active_contour,
inverse_gaussian_gradient,
checkerboard_level_set)
import skimage
def store_evolution_in(lst):
"""Returns a callback function to store the evolution of the level sets in
the given list.
"""
def _store(x):
plt.imshow(x, cmap = 'gray')
lst.append(np.copy(x))
return _store
# Morphological ACWE
image = img_as_float(data.camera())
# Initial level set
init_ls = checkerboard_level_set(image.shape, 6)
# List with intermediate results for plotting the evolution
evolution = []
callback = store_evolution_in(evolution)
ls = morphological_chan_vese(image, 35, init_level_set=init_ls, smoothing=3,
iter_callback=callback)
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
ax = axes.flatten()
ax[0].imshow(image, cmap="gray")
ax[0].set_axis_off()
ax[0].contour(ls, [0.5], colors='r')
ax[0].set_title("Morphological ACWE segmentation", fontsize=12)
ax[1].imshow(ls, cmap="gray")
ax[1].set_axis_off()
contour = ax[1].contour(evolution[2], [0.5], colors='g')
contour.collections[0].set_label("Iteration 2")
contour = ax[1].contour(evolution[7], [0.5], colors='y')
contour.collections[0].set_label("Iteration 7")
contour = ax[1].contour(evolution[-1], [0.5], colors='r')
contour.collections[0].set_label("Iteration 35")
ax[1].legend(loc="upper right")
title = "Morphological ACWE evolution"
ax[1].set_title(title, fontsize=12)
# Morphological GAC
image = img_as_float(data.coins())
gimage = inverse_gaussian_gradient(image)
# Initial level set
init_ls = np.zeros(image.shape, dtype=np.int8)
init_ls[10:-10, 10:-10] = 1
# List with intermediate results for plotting the evolution
evolution2 = []
callback = store_evolution_in(evolution2)
ls = morphological_geodesic_active_contour(gimage, 230, init_ls,
smoothing=1, balloon=-1,
threshold=0.69,
iter_callback=callback)
ax[2].imshow(image, cmap="gray")
ax[2].set_axis_off()
ax[2].contour(ls, [0.5], colors='r')
ax[2].set_title("Morphological GAC segmentation", fontsize=12)
ax[3].imshow(ls, cmap="gray")
ax[3].set_axis_off()
contour = ax[3].contour(evolution2[0], [0.5], colors='g')
contour.collections[0].set_label("Iteration 0")
contour = ax[3].contour(evolution2[100], [0.5], colors='y')
contour.collections[0].set_label("Iteration 100")
contour = ax[3].contour(evolution2[-1], [0.5], colors='r')
contour.collections[0].set_label("Iteration 230")
ax[3].legend(loc="upper right")
title = "Morphological GAC evolution"
ax[3].set_title(title, fontsize=12)
fig.tight_layout()
plt.show()
import pdb
pdb.set_trace() | [
"matplotlib.pyplot.imshow",
"numpy.copy",
"skimage.segmentation.morphological_chan_vese",
"skimage.segmentation.checkerboard_level_set",
"skimage.segmentation.inverse_gaussian_gradient",
"skimage.segmentation.morphological_geodesic_active_contour",
"numpy.zeros",
"pdb.set_trace",
"skimage.data.coins... | [((593, 631), 'skimage.segmentation.checkerboard_level_set', 'checkerboard_level_set', (['image.shape', '(6)'], {}), '(image.shape, 6)\n', (615, 631), False, 'from skimage.segmentation import morphological_chan_vese, morphological_geodesic_active_contour, inverse_gaussian_gradient, checkerboard_level_set\n'), ((753, 852), 'skimage.segmentation.morphological_chan_vese', 'morphological_chan_vese', (['image', '(35)'], {'init_level_set': 'init_ls', 'smoothing': '(3)', 'iter_callback': 'callback'}), '(image, 35, init_level_set=init_ls, smoothing=3,\n iter_callback=callback)\n', (776, 852), False, 'from skimage.segmentation import morphological_chan_vese, morphological_geodesic_active_contour, inverse_gaussian_gradient, checkerboard_level_set\n'), ((870, 904), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(8, 8)'}), '(2, 2, figsize=(8, 8))\n', (882, 904), True, 'import matplotlib.pyplot as plt\n'), ((1623, 1655), 'skimage.segmentation.inverse_gaussian_gradient', 'inverse_gaussian_gradient', (['image'], {}), '(image)\n', (1648, 1655), False, 'from skimage.segmentation import morphological_chan_vese, morphological_geodesic_active_contour, inverse_gaussian_gradient, checkerboard_level_set\n'), ((1687, 1723), 'numpy.zeros', 'np.zeros', (['image.shape'], {'dtype': 'np.int8'}), '(image.shape, dtype=np.int8)\n', (1695, 1723), True, 'import numpy as np\n'), ((1875, 2003), 'skimage.segmentation.morphological_geodesic_active_contour', 'morphological_geodesic_active_contour', (['gimage', '(230)', 'init_ls'], {'smoothing': '(1)', 'balloon': '(-1)', 'threshold': '(0.69)', 'iter_callback': 'callback'}), '(gimage, 230, init_ls, smoothing=1,\n balloon=-1, threshold=0.69, iter_callback=callback)\n', (1912, 2003), False, 'from skimage.segmentation import morphological_chan_vese, morphological_geodesic_active_contour, inverse_gaussian_gradient, checkerboard_level_set\n'), ((2697, 2707), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2705, 2707), True, 'import matplotlib.pyplot as plt\n'), ((2719, 2734), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (2732, 2734), False, 'import pdb\n'), ((547, 560), 'skimage.data.camera', 'data.camera', ([], {}), '()\n', (558, 560), False, 'from skimage import data, img_as_float\n'), ((1600, 1612), 'skimage.data.coins', 'data.coins', ([], {}), '()\n', (1610, 1612), False, 'from skimage import data, img_as_float\n'), ((433, 459), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x'], {'cmap': '"""gray"""'}), "(x, cmap='gray')\n", (443, 459), True, 'import matplotlib.pyplot as plt\n'), ((475, 485), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (482, 485), True, 'import numpy as np\n')] |
import numpy as np
import pytest
@pytest.fixture
def random_data(size, dtype):
rng = np.random.default_rng(2841)
data = rng.integers(-100, 100, size=size)
data = data.astype(dtype)
return data
| [
"numpy.random.default_rng"
] | [((91, 118), 'numpy.random.default_rng', 'np.random.default_rng', (['(2841)'], {}), '(2841)\n', (112, 118), True, 'import numpy as np\n')] |
import nltk
import random
import spacy
import string
import numpy as np
from nltk import ngrams
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from spacy.tokens import Doc
np.random.seed(1234)
random.seed(1234)
stopWords = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
ps = PorterStemmer()
nlp = spacy.load("en_core_web_sm")
char_vocab_to_id = {'char_PAD': 0, 'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10, 'k': 11,
'l': 12, 'm': 13, 'n': 14, 'o': 15, 'p': 16, 'q': 17, 'r': 18, 's': 19, 't': 20, 'u': 21, 'v': 22, 'w': 23,
'x': 24, 'y': 25, 'z': 26,
'$': 27, '-': 28, ':': 29, '@': 30, '.': 31, '/': 32, '\'': 33, '&': 44, '%': 45, '<': 46, '>': 47, '_': 48,
'0': 34, '1': 35, '2': 36, '3': 37, '4': 38, '5': 39, '6': 40, '7': 41, '8': 42, '9': 43}
ent_vocab_to_id = {'ent_PAD': 0, 'GPE': 1, 'LOC': 2, 'DATE': 3, 'TIME': 4, 'MONEY': 5, 'ORDINAL':6, 'CARDINAL': 7}
def replace_punctuations(s, default_char=''):
''' punctuation removal '''
for c in string.punctuation:
if c == '-':
s = s.replace(c, ' ')
if c not in {':', '$', '@', '.', '/', '\'', '&', '%', '<', '>'}:
s = s.replace(c, default_char)
return s
class WhitespaceTokenizer(object):
def __init__(self, vocab):
self.vocab = vocab
def __call__(self, text):
words = text.split()
# All tokens 'own' a subsequent space character in this tokenizer
spaces = [True] * len(words)
return Doc(self.vocab, words=words, spaces=spaces)
nlp = spacy.load('en_core_web_sm')
nlp.tokenizer = WhitespaceTokenizer(nlp.vocab)
def get_vectorized_char_seq(phrase, char_vocab_to_id, q_len, q_wd_len):
q_char_vec = []
for wd in phrase.split():
wd_vec = []
for char in wd:
if char in char_vocab_to_id:
wd_vec.append(char_vocab_to_id[char])
else:
wd_vec.append(0)
if len(wd_vec) >= q_wd_len:
wd_vec = wd_vec[:q_wd_len]
else:
wd_vec = pad_arr_seq(wd_vec, q_wd_len, 0)
q_char_vec.append(wd_vec)
if len(q_char_vec) >= q_len:
return q_char_vec[:q_len]
else:
return pad_arr_seq(q_char_vec, q_len, [0] * q_wd_len)
def get_gold_labels_tagger(q_phrase, para_val_sample, max_seq_len):
# q_phrase = ' '.join(q_phrase.split())
para_val_sample = ' '.join(para_val_sample.split())
q_word_list = q_phrase.split()
para_val_sample_word_list = para_val_sample.split()
label_vec = [0] * len(q_phrase.split())
index_list = []
for wd_id, q_word in enumerate(q_word_list):
if q_word == para_val_sample_word_list[0]:
if ' '.join(q_word_list[wd_id:]).startswith(para_val_sample):
for j in range(len(para_val_sample_word_list)):
index_list.append(wd_id+j)
for pos_id in index_list:
label_vec[pos_id] = 1
assert len(label_vec) == len(q_phrase.split())
if len(label_vec) >= max_seq_len:
return label_vec[:max_seq_len], len(q_word_list)
else:
return pad_arr_seq(label_vec, max_seq_len, 0), len(q_word_list)
def get_vectorized_entity_tags(phrase, ent_vocab_to_id, q_len):
q_ent_tag_vec = []
phrase = phrase.strip()
doc = nlp(phrase)
word_tags = []
for i in range(len(doc)):
word_tags.append((doc[i].text, doc[i].ent_iob_, doc[i].ent_type_))
if doc[i].ent_type_ in ent_vocab_to_id:
q_ent_tag_vec.append(ent_vocab_to_id[doc[i].ent_type_])
else:
q_ent_tag_vec.append(ent_vocab_to_id['ent_PAD'])
if len(q_ent_tag_vec) >= q_len:
return q_ent_tag_vec[:q_len]
else:
return pad_arr_seq(q_ent_tag_vec, q_len, 0)
def get_query_n_grams(q_phrase, max_n=3, min_n=1):
q_words = q_phrase.lower().split()
pos_tag_dict = {tup[0]:tup[1] for tup in nltk.pos_tag(q_words)}
exclueded_pos_set = { 'VB', 'VBD', 'VBG', 'VBZ'}
q_uni_bigram_phrases = set()
for n_gr in range(min_n, max_n+1, 1):
n_gram_list = list(ngrams(q_words, n_gr))
for tup in n_gram_list:
n_gram_phrase = ' '.join([wd for wd in list(tup) if wd not in stopWords
and pos_tag_dict[wd] not in exclueded_pos_set])
if n_gram_phrase != '':
q_uni_bigram_phrases.add(n_gram_phrase.strip())
return q_uni_bigram_phrases
def pad_arr_seq(curr_seq, max_len, padding_seq):
for i in range(max_len-len(curr_seq)):
curr_seq.append(padding_seq)
assert len(curr_seq) == max_len
return curr_seq
def get_activity_id(node_DB, activity_name):
for activity_id in node_DB['activity']:
if node_DB['activity'][activity_id]['ActivityName'] == activity_name:
return activity_id
return '-'
def preprocess_text(phrase):
phrase = replace_punctuations(phrase)
if len(phrase) < 3:
return ''
token_list = []
for wd in phrase.split():
# if wd in stopWords:
# continue
if not wd.isdigit():
token_list.append(lemmatizer.lemmatize(wd))
else:
token_list.append(wd)
return ' '.join(token_list)
def has_partial_match(wd, cand_wd_set):
cand_wd = ' '.join(cand_wd_set)
if cand_wd.startswith(wd) or cand_wd.endswith(wd):
sim = (len(wd) * 1.0) / len(cand_wd)
#print(wd, sim, cand_wd)
if 0.5 > sim >= 0.12 and wd.isdigit():
return 1, True
if sim >= 0.5:
return 2, True
return 0, False
def get_match_vec(q_phrase, cand_phrase, max_q_len):
'''
:param q_phrase:
:param cand_phrase:
:param max_q_len:
:return:
'''
q_match_vec = []
cand_wd_set = cand_phrase.lower().split()
for wd in q_phrase.lower().split():
if wd in cand_wd_set:
q_match_vec.append(3)
else:
match_id, is_match = has_partial_match(wd, cand_wd_set)
q_match_vec.append(match_id)
if len(q_match_vec) >= max_q_len:
return q_match_vec[:max_q_len]
else:
return pad_arr_seq(q_match_vec, max_q_len, 0)
def get_vectorized_phrase(phrase, vocab_to_id, max_seq_len):
phrase_vec = []
for wd in phrase.split():
if wd in vocab_to_id:
phrase_vec.append(vocab_to_id[wd])
else:
phrase_vec.append(0)
if len(phrase_vec) >= max_seq_len:
return phrase_vec[:max_seq_len]
else:
return pad_arr_seq(phrase_vec, max_seq_len, 0)
def extract_noun_phrases(sentence):
doc = nlp(sentence)
noun_phrases = set()
exclude_set = set()
for token in doc:
if token.pos_ in {'PRON'}:
exclude_set.add(token.text)
for chunk in doc.noun_chunks:
noun_phrases.add(chunk.text)
noun_phrases = noun_phrases.difference(exclude_set)
return noun_phrases
def get_candidate_query_phrases(sentence):
noun_P = extract_noun_phrases(sentence)
q_n_grams = get_query_n_grams(sentence)
return q_n_grams.union(noun_P)
if __name__ == '__main__':
print(get_gold_labels_tagger('new york hotels for for 10 people','for 10 people' , 10)) | [
"nltk.pos_tag",
"nltk.corpus.stopwords.words",
"spacy.load",
"nltk.stem.WordNetLemmatizer",
"nltk.stem.PorterStemmer",
"random.seed",
"spacy.tokens.Doc",
"numpy.random.seed",
"nltk.ngrams"
] | [((237, 257), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (251, 257), True, 'import numpy as np\n'), ((258, 275), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (269, 275), False, 'import random\n'), ((334, 353), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (351, 353), False, 'from nltk.stem import WordNetLemmatizer\n'), ((359, 374), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (372, 374), False, 'from nltk.stem import PorterStemmer\n'), ((381, 409), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (391, 409), False, 'import spacy\n'), ((1697, 1725), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (1707, 1725), False, 'import spacy\n'), ((293, 319), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (308, 319), False, 'from nltk.corpus import stopwords\n'), ((1645, 1688), 'spacy.tokens.Doc', 'Doc', (['self.vocab'], {'words': 'words', 'spaces': 'spaces'}), '(self.vocab, words=words, spaces=spaces)\n', (1648, 1688), False, 'from spacy.tokens import Doc\n'), ((4037, 4058), 'nltk.pos_tag', 'nltk.pos_tag', (['q_words'], {}), '(q_words)\n', (4049, 4058), False, 'import nltk\n'), ((4216, 4237), 'nltk.ngrams', 'ngrams', (['q_words', 'n_gr'], {}), '(q_words, n_gr)\n', (4222, 4237), False, 'from nltk import ngrams\n')] |
import numpy as np
def integer_sequences(L, S, nondecr=False, m=None, M=None):
"""
Generate sequences of non-negative integers.
Parameters:
L: the length of the sequences
S: the sum of the integers in each sequence
Optional parameters:
nondecr: (boolean) return only non-decreasing sequences (default: False)
m: tuple of length L; gives lower bounds for coefficients of list (default: None)
M: tuple of length L; gives upper bounds for coefficients of list (default: None)
"""
# If M and m are not given, use the following defaults.
if M is None:
M = (S,)*L
if m is None:
m = (0,)*L
# If length=0 and sum=0 then yield the empty tuple.
# Otherwise, yield nothing.
if L==0:
if S==0:
yield tuple()
# If length=1 and sum lies within given boundaries, yield 1-tuple.
elif L==1:
if m[0]<=S<=M[0]:
yield (S,)
# If length>1, then loop through possible values for first coefficient,
# and recursively generate (L-1)-tuples that give the remaining coefficients.
elif L>1:
for first in range(m[0], min(S,M[0])+1):
m_next = m[1:] if nondecr==False else (first,)+m[2:]
for tail in integer_sequences(L=L-1, S=S-first, nondecr=nondecr, m=m_next, M=M[1:]):
yield (first,)+tail
def matrices_from_degree_sequence(deg_seq=tuple()):
"""
Generate all matrices that can occur as adjacency matrices
of r-marked graphs whose degree_sequence() equals deg_seq.
These are symmetric (r×r)-matrices with non-negative integer coefficients
with even coefficients on the diagonal, whose row sum equals deg_seq.
"""
L = len(deg_seq)
M = np.zeros((L,L), dtype=int)
if L==0:
yield M
elif L>0:
# Range over possible values for top left entries:
for top_left in range(0,deg_seq[0]+1,2):
M[0,0] = top_left
# Range over sequences that make up the rest of the first row:
for top_row_rest in integer_sequences(L=L-1, S=deg_seq[0]-top_left, M=deg_seq[1:]):
M[0,1:] = M[1:,0] = top_row_rest
# Compute the row sum of the remaining bottom right square
row_sum_rem = tuple(deg_seq[i] - M[0,i] for i in range(1,L))
# Loop over all possible bottom right squares:
for BRS in matrices_from_degree_sequence(deg_seq=row_sum_rem):
M[1:,1:]=BRS
yield M.copy()
def increasing_injections(r,s,m=1):
"""
Generates all increasing tuples of length r
with values in {1, ..., s}.
Optional argument:
m: minimum value of first entry (default=1, used for recursion)
"""
if r==0:
yield tuple()
elif r==1:
for i in range(m,s+1):
yield (i,)
else:
for first in range(m,s-r+2):
for tail in increasing_injections(r-1,s,m=first+1):
yield (first,)+tail
| [
"numpy.zeros"
] | [((1795, 1822), 'numpy.zeros', 'np.zeros', (['(L, L)'], {'dtype': 'int'}), '((L, L), dtype=int)\n', (1803, 1822), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
comaleName = r'\sc{Clear}'
class EE:
@staticmethod
def fx(x, s=0.0, h=0.5):
Z=(1 + s) * x ** 2 + 2 * (1 + h * s) * x * (1 - x) + (1 - x) ** 2
if Z>0:
return ((1 + s) * x ** 2 + (1 + h * s) * x * (1 - x)) / (Z)
else:
return 0
@staticmethod
def sig(x): return 1. / (1 + np.exp(-x))
@staticmethod
def logit(p): return np.log(p) - np.log(1 - p)
# def logit_(p): return T.log(p) - T.log(1 - p)
# def sig_(x): return 1. / (1 + T.exp(-x))
@staticmethod
def Nu(s, t, nu0, theta, n=2000): return EE.Z(EE.sig(t * s / 2 + EE.logit(nu0)), n, theta)
@staticmethod
def forward(x0=0.005,h=0.5,s=1,t=150):
def f(x,h=0.5,s=1): return ((1+s)*x*x + (1+h*s)*x*(1-x) )/((1+s)*x*x + 2*(1+h*s)*x*(1-x) +(1-x)**2)
x=[x0]
for i in range(t):
x+=[f(x[-1],h,s)]
return pd.Series(x)
floatX = 'float64'
@staticmethod
def Z(nu, n, theta): return theta * (
nu * ((nu + 1) / 2. - 1. / ((1 - nu) * n + 1)) + (1 - nu) * ((n + 1.) / (2 * n) - 1. / ((1 - nu) * n + 1)))
| [
"pandas.Series",
"numpy.exp",
"numpy.log"
] | [((929, 941), 'pandas.Series', 'pd.Series', (['x'], {}), '(x)\n', (938, 941), True, 'import pandas as pd\n'), ((428, 437), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (434, 437), True, 'import numpy as np\n'), ((440, 453), 'numpy.log', 'np.log', (['(1 - p)'], {}), '(1 - p)\n', (446, 453), True, 'import numpy as np\n'), ((372, 382), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (378, 382), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Simulation Utilities."""
# File to contain function necessary for the chi_square simulations
import copy
import logging
import numpy as np
def add_noise(flux, snr, use_mu=False):
"""Using the formulation 1/sigma (default) or mu/sigma from wikipedia.
https://en.wikipedia.org/wiki/Signal-to-noise_ratio#Alternative_definition
Applies noise based on the flux at each pixel.
"""
if not snr:
logging.warning("Assuming SNR=0 means add no noise")
return flux
else:
if use_mu:
sigma = np.median(flux) / snr
else:
sigma = np.ones_like(flux) / snr
# Add normal distributed noise at the snr level.
noisy_flux = flux + np.random.normal(0, sigma)
return noisy_flux
def combine_spectra(star, planet, alpha):
"""Combine the Spectrum objects "star" and "planet".
Strength ratio of alpha
spec = star + planet * alpha
"""
star = copy.copy(star)
planet = copy.copy(planet)
if np.all(star.xaxis == planet.xaxis): # make sure wavelengths even first
pass
else:
planet.interpolate1d_to(star)
# combined_spectrum = star + (planet*alpha)
# Combined spectra with proper normalization
norm_factor = 1 / (1 + alpha)
combined_spectrum = (star + (planet * alpha)) * norm_factor
return combined_spectrum
def spec_max_delta(obs_spec, rvs, gammas):
"""Calculate max doppler shift of a spectrum."""
return max_delta(obs_spec.xaxis, rvs, gammas)
def max_delta(wavelength, rvs, gammas):
"""Calculate max doppler shift.
Given a spectrum, and some doppler shifts, find the wavelength limit
to have full coverage without much wastage computations.
# Currently set at 2*delta.
"""
check_inputs(rvs)
check_inputs(gammas)
shift_max = np.max(np.abs(rvs)) + np.max(np.abs(gammas))
obs_limits = np.array([np.min(wavelength), np.max(wavelength)])
delta = [lim * shift_max / 299792.458 for lim in obs_limits]
return 2 * round(max(delta), 3)
def check_inputs(var):
"""Turn inputs into numpy arrays.
Defaults to zero if None.
"""
if (var is None) or ("None" in str(var)):
var = np.array([0])
elif isinstance(var, (np.float, np.int)):
var = np.asarray([var], dtype=np.float32)
if len(var) == 0: # Empty sequence
raise ValueError("Empty variable vector. Check config.yaml\n"
"var = {0}".format(var))
return var
| [
"numpy.random.normal",
"numpy.abs",
"numpy.ones_like",
"numpy.median",
"logging.warning",
"numpy.asarray",
"copy.copy",
"numpy.max",
"numpy.array",
"numpy.min",
"numpy.all"
] | [((969, 984), 'copy.copy', 'copy.copy', (['star'], {}), '(star)\n', (978, 984), False, 'import copy\n'), ((998, 1015), 'copy.copy', 'copy.copy', (['planet'], {}), '(planet)\n', (1007, 1015), False, 'import copy\n'), ((1024, 1058), 'numpy.all', 'np.all', (['(star.xaxis == planet.xaxis)'], {}), '(star.xaxis == planet.xaxis)\n', (1030, 1058), True, 'import numpy as np\n'), ((448, 500), 'logging.warning', 'logging.warning', (['"""Assuming SNR=0 means add no noise"""'], {}), "('Assuming SNR=0 means add no noise')\n", (463, 500), False, 'import logging\n'), ((2227, 2240), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2235, 2240), True, 'import numpy as np\n'), ((737, 763), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma'], {}), '(0, sigma)\n', (753, 763), True, 'import numpy as np\n'), ((1855, 1866), 'numpy.abs', 'np.abs', (['rvs'], {}), '(rvs)\n', (1861, 1866), True, 'import numpy as np\n'), ((1877, 1891), 'numpy.abs', 'np.abs', (['gammas'], {}), '(gammas)\n', (1883, 1891), True, 'import numpy as np\n'), ((1921, 1939), 'numpy.min', 'np.min', (['wavelength'], {}), '(wavelength)\n', (1927, 1939), True, 'import numpy as np\n'), ((1941, 1959), 'numpy.max', 'np.max', (['wavelength'], {}), '(wavelength)\n', (1947, 1959), True, 'import numpy as np\n'), ((2301, 2336), 'numpy.asarray', 'np.asarray', (['[var]'], {'dtype': 'np.float32'}), '([var], dtype=np.float32)\n', (2311, 2336), True, 'import numpy as np\n'), ((570, 585), 'numpy.median', 'np.median', (['flux'], {}), '(flux)\n', (579, 585), True, 'import numpy as np\n'), ((626, 644), 'numpy.ones_like', 'np.ones_like', (['flux'], {}), '(flux)\n', (638, 644), True, 'import numpy as np\n')] |
from __future__ import division, absolute_import, print_function
__copyright__ = "Copyright (C) 2018 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import pyopencl as cl
import pyopencl.clrandom # noqa: F401
import loopy as lp
import pytest
import sys
from pyopencl.tools import ( # noqa: F401
pytest_generate_tests_for_pyopencl
as pytest_generate_tests)
from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_2 # noqa: F401
def test_register_function_lookup(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from testlib import register_log2_lookup
x = np.random.rand(10)
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
prog = lp.make_kernel(
"{[i]: 0<=i<10}",
"""
y[i] = log2(x[i])
""")
prog = lp.register_function_id_to_in_knl_callable_mapper(prog,
register_log2_lookup)
evt, (out, ) = prog(queue, x=x)
assert np.linalg.norm(np.log2(x)-out)/np.linalg.norm(np.log2(x)) < 1e-15
@pytest.mark.parametrize("inline", [False, True])
def test_register_knl(ctx_factory, inline):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
n = 2 ** 4
x = np.random.rand(n, n, n, n, n)
y = np.random.rand(n, n, n, n, n)
grandchild_knl = lp.make_function(
"{[i, j]:0<= i, j< 16}",
"""
c[i, j] = 2*a[i, j] + 3*b[i, j]
""", name='linear_combo1')
child_knl = lp.make_function(
"{[i, j]:0<=i, j < 16}",
"""
[i, j]: g[i, j] = linear_combo1([i, j]: e[i, j], [i, j]: f[i, j])
""", name='linear_combo2')
parent_knl = lp.make_kernel(
"{[i, j, k, l, m]: 0<=i, j, k, l, m<16}",
"""
[j, l]: z[i, j, k, l, m] = linear_combo2([j, l]: x[i, j, k, l, m],
[j, l]: y[i, j, k, l, m])
""",
kernel_data=[
lp.GlobalArg(
name='x',
dtype=np.float64,
shape=(16, 16, 16, 16, 16)),
lp.GlobalArg(
name='y',
dtype=np.float64,
shape=(16, 16, 16, 16, 16)), '...'],
)
knl = lp.register_callable_kernel(
parent_knl, child_knl)
knl = lp.register_callable_kernel(
knl, grandchild_knl)
if inline:
knl = lp.inline_callable_kernel(knl, 'linear_combo2')
knl = lp.inline_callable_kernel(knl, 'linear_combo1')
evt, (out, ) = knl(queue, x=x, y=y)
assert (np.linalg.norm(2*x+3*y-out)/(
np.linalg.norm(2*x+3*y))) < 1e-15
@pytest.mark.parametrize("inline", [False, True])
def test_slices_with_negative_step(ctx_factory, inline):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
n = 2 ** 4
x = np.random.rand(n, n, n, n, n)
y = np.random.rand(n, n, n, n, n)
child_knl = lp.make_function(
"{[i, j]:0<=i, j < 16}",
"""
g[i, j] = 2*e[i, j] + 3*f[i, j]
""", name="linear_combo")
parent_knl = lp.make_kernel(
"{[i, k, m]: 0<=i, k, m<16}",
"""
z[i, 15:-1:-1, k, :, m] = linear_combo(x[i, :, k, :, m],
y[i, :, k, :, m])
""",
kernel_data=[
lp.GlobalArg(
name='x',
dtype=np.float64,
shape=(16, 16, 16, 16, 16)),
lp.GlobalArg(
name='y',
dtype=np.float64,
shape=(16, 16, 16, 16, 16)),
lp.GlobalArg(
name='z',
dtype=np.float64,
shape=(16, 16, 16, 16, 16)), '...'],
)
knl = lp.register_callable_kernel(
parent_knl, child_knl)
if inline:
knl = lp.inline_callable_kernel(knl, 'linear_combo')
evt, (out, ) = knl(queue, x=x, y=y)
assert (np.linalg.norm(2*x+3*y-out[:, ::-1, :, :, :])/(
np.linalg.norm(2*x+3*y))) < 1e-15
@pytest.mark.parametrize("inline", [False, True])
def test_register_knl_with_call_with_kwargs(ctx_factory, inline):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
n = 2 ** 2
a_dev = cl.clrandom.rand(queue, (n, n, n, n, n), np.float32)
b_dev = cl.clrandom.rand(queue, (n, n, n, n, n), np.float32)
c_dev = cl.clrandom.rand(queue, (n, n, n, n, n), np.float64)
callee_knl = lp.make_function(
"{[i, j]:0<=i, j < %d}" % n,
"""
h[i, j] = 2 * e[i, j] + 3*f[i, j] + 4*g[i, j]
<>f1[i, j] = 2*f[i, j]
p[i, j] = 7 * e[i, j] + 4*f1[i, j] + 2*g[i, j]
""",
[
lp.GlobalArg('f, e, h, g'), '...'],
name='linear_combo')
caller_knl = lp.make_kernel(
"{[i, j, k, l, m]: 0<=i, j, k, l, m<%d}" % n,
"""
<> d[i, j, k, l, m] = 2*b[i, j, k, l, m]
[j, l]: x[i, j, k, l, m], [j, l]: y[i, j, k, l, m] = linear_combo(
f=[j, l]: a[i, j, k, l, m],
g=[j, l]: d[i, j, k, l, m],
e=[j, l]: c[i, j, k, l, m])
""")
knl = lp.register_callable_kernel(
caller_knl, callee_knl)
if inline:
knl = lp.inline_callable_kernel(knl, 'linear_combo')
evt, (out1, out2, ) = knl(queue, a=a_dev, b=b_dev, c=c_dev)
a = a_dev.get()
b = b_dev.get()
c = c_dev.get()
h = out1.get() # h = 2c + 3a + 8b
p = out2.get() # p = 7c + 8a + 4b
h_exact = 3*a + 8*b + 2*c
p_exact = 8*a + 4*b + 7*c
assert np.linalg.norm(h-h_exact)/np.linalg.norm(h_exact) < 1e-7
assert np.linalg.norm(p-p_exact)/np.linalg.norm(p_exact) < 1e-7
@pytest.mark.parametrize("inline", [False, True])
def test_register_knl_with_hw_axes(ctx_factory, inline):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
n = 2 ** 4
x_dev = cl.clrandom.rand(queue, (n, n, n, n, n), np.float64)
y_dev = cl.clrandom.rand(queue, (n, n, n, n, n), np.float64)
callee_knl = lp.make_function(
"{[i, j]:0<=i, j < 16}",
"""
g[i, j] = 2*e[i, j] + 3*f[i, j]
""", name='linear_combo')
callee_knl = lp.split_iname(callee_knl, "i", 4, inner_tag="l.0", outer_tag="g.0")
caller_knl = lp.make_kernel(
"{[i, j, k, l, m]: 0<=i, j, k, l, m<16}",
"""
[j, l]: z[i, j, k, l, m] = linear_combo([j, l]: x[i, j, k, l, m],
[j, l]: y[i, j, k, l, m])
"""
)
caller_knl = lp.split_iname(caller_knl, "i", 4, inner_tag="l.1", outer_tag="g.1")
knl = lp.register_callable_kernel(
caller_knl, callee_knl)
if inline:
knl = lp.inline_callable_kernel(knl, 'linear_combo')
evt, (out, ) = knl(queue, x=x_dev, y=y_dev)
x_host = x_dev.get()
y_host = y_dev.get()
assert np.linalg.norm(2*x_host+3*y_host-out.get())/np.linalg.norm(
2*x_host+3*y_host) < 1e-15
@pytest.mark.parametrize("inline", [False, True])
def test_shape_translation_through_sub_array_ref(ctx_factory, inline):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
x1 = cl.clrandom.rand(queue, (3, 2), dtype=np.float64)
x2 = cl.clrandom.rand(queue, (6, ), dtype=np.float64)
x3 = cl.clrandom.rand(queue, (6, 6), dtype=np.float64)
callee1 = lp.make_function(
"{[i]: 0<=i<6}",
"""
a[i] = 2*abs(b[i])
""", name="callee_fn1")
callee2 = lp.make_function(
"{[i, j]: 0<=i<3 and 0 <= j < 2}",
"""
a[i, j] = 3*b[i, j]
""", name="callee_fn2")
callee3 = lp.make_function(
"{[i]: 0<=i<6}",
"""
a[i] = 5*b[i]
""", name="callee_fn3")
knl = lp.make_kernel(
"{[i, j, k, l]: 0<= i < 6 and 0 <= j < 3 and 0 <= k < 2 and 0<=l<6}",
"""
[i]: y1[i//2, i%2] = callee_fn1([i]: x1[i//2, i%2])
[j, k]: y2[2*j+k] = callee_fn2([j, k]: x2[2*j+k])
[l]: y3[l, l] = callee_fn3([l]: x3[l, l])
""")
knl = lp.register_callable_kernel(knl, callee1)
knl = lp.register_callable_kernel(knl, callee2)
knl = lp.register_callable_kernel(knl, callee3)
if inline:
knl = lp.inline_callable_kernel(knl, 'callee_fn1')
knl = lp.inline_callable_kernel(knl, 'callee_fn2')
knl = lp.inline_callable_kernel(knl, 'callee_fn3')
knl = lp.set_options(knl, "write_cl")
knl = lp.set_options(knl, "return_dict")
evt, out_dict = knl(queue, x1=x1, x2=x2, x3=x3)
y1 = out_dict['y1'].get()
y2 = out_dict['y2'].get()
y3 = out_dict['y3'].get()
assert (np.linalg.norm(y1-2*x1.get())) < 1e-15
assert (np.linalg.norm(y2-3*x2.get())) < 1e-15
assert (np.linalg.norm(np.diag(y3-5*x3.get()))) < 1e-15
def test_multi_arg_array_call(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
import pymbolic.primitives as p
n = 10
acc_i = p.Variable("acc_i")[0]
i = p.Variable("i")
index = p.Variable("index")[0]
a_i = p.Subscript(p.Variable("a"), p.Variable("i"))
argmin_kernel = lp.make_function(
"{[i]: 0 <= i < n}",
[
lp.Assignment(id="init2", assignee=index,
expression=0),
lp.Assignment(id="init1", assignee=acc_i,
expression="214748367"),
lp.Assignment(id="insn", assignee=index,
expression=p.If(p.Expression.eq(acc_i, a_i), i, index),
depends_on="update"),
lp.Assignment(id="update", assignee=acc_i,
expression=p.Variable("min")(acc_i, a_i),
depends_on="init1,init2")],
name="custom_argmin")
argmin_kernel = lp.fix_parameters(argmin_kernel, n=n)
knl = lp.make_kernel(
"{[i]:0<=i<n}",
"""
min_val, min_index = custom_argmin([i]:b[i])
""")
knl = lp.fix_parameters(knl, n=n)
knl = lp.set_options(knl, return_dict=True)
knl = lp.register_callable_kernel(knl, argmin_kernel)
b = np.random.randn(n)
evt, out_dict = knl(queue, b=b)
tol = 1e-15
from numpy.linalg import norm
assert(norm(out_dict['min_val'][0] - np.min(b)) < tol)
assert(norm(out_dict['min_index'][0] - np.argmin(b)) < tol)
@pytest.mark.parametrize("inline", [False, True])
def test_packing_unpacking(ctx_factory, inline):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
x1 = cl.clrandom.rand(queue, (3, 2), dtype=np.float64)
x2 = cl.clrandom.rand(queue, (6, ), dtype=np.float64)
callee1 = lp.make_function(
"{[i]: 0<=i<6}",
"""
a[i] = 2*b[i]
""", name="callee_fn1")
callee2 = lp.make_function(
"{[i, j]: 0<=i<2 and 0 <= j < 3}",
"""
a[i, j] = 3*b[i, j]
""", name="callee_fn2")
knl = lp.make_kernel(
"{[i, j, k]: 0<= i < 3 and 0 <= j < 2 and 0 <= k < 6}",
"""
[i, j]: y1[i, j] = callee_fn1([i, j]: x1[i, j])
[k]: y2[k] = callee_fn2([k]: x2[k])
""")
knl = lp.register_callable_kernel(knl, callee1)
knl = lp.register_callable_kernel(knl, callee2)
knl = lp.pack_and_unpack_args_for_call(knl, 'callee_fn1')
knl = lp.pack_and_unpack_args_for_call(knl, 'callee_fn2')
if inline:
knl = lp.inline_callable_kernel(knl, 'callee_fn1')
knl = lp.inline_callable_kernel(knl, 'callee_fn2')
knl = lp.set_options(knl, "write_cl")
knl = lp.set_options(knl, "return_dict")
evt, out_dict = knl(queue, x1=x1, x2=x2)
y1 = out_dict['y1'].get()
y2 = out_dict['y2'].get()
assert np.linalg.norm(2*x1.get()-y1)/np.linalg.norm(
2*x1.get()) < 1e-15
assert np.linalg.norm(3*x2.get()-y2)/np.linalg.norm(
3*x2.get()) < 1e-15
def test_non_sub_array_refs_arguments(ctx_factory):
import loopy as lp
from loopy.transform.callable import _match_caller_callee_argument_dimension_
callee = lp.make_function("{[i] : 0 <= i < 6}", "a[i] = a[i] + j",
[lp.GlobalArg("a", dtype="double", shape=(6,), is_output_only=False),
lp.ValueArg("j", dtype="int")], name="callee")
caller1 = lp.make_kernel("{[j] : 0 <= j < 2}", "callee(a[:], b[0])",
[lp.GlobalArg("a", dtype="double", shape=(6, ), is_output_only=False),
lp.GlobalArg("b", dtype="double", shape=(1, ), is_output_only=False)],
name="caller", target=lp.CTarget())
caller2 = lp.make_kernel("{[j] : 0 <= j < 2}", "callee(a[:], 3.1415926)",
[lp.GlobalArg("a", dtype="double", shape=(6, ),
is_output_only=False)],
name="caller", target=lp.CTarget())
caller3 = lp.make_kernel("{[j] : 0 <= j < 2}", "callee(a[:], kappa)",
[lp.GlobalArg("a", dtype="double", shape=(6, ),
is_output_only=False)],
name="caller", target=lp.CTarget())
registered = lp.register_callable_kernel(caller1, callee)
inlined = _match_caller_callee_argument_dimension_(registered, callee.name)
inlined = lp.inline_callable_kernel(inlined, callee.name)
print(inlined)
registered = lp.register_callable_kernel(caller2, callee)
inlined = _match_caller_callee_argument_dimension_(registered, callee.name)
inlined = lp.inline_callable_kernel(inlined, callee.name)
print(inlined)
registered = lp.register_callable_kernel(caller3, callee)
inlined = _match_caller_callee_argument_dimension_(registered, callee.name)
inlined = lp.inline_callable_kernel(inlined, callee.name)
print(inlined)
@pytest.mark.parametrize("inline", [False, True])
def test_empty_sub_array_refs(ctx_factory, inline):
# See: https://github.com/OP2/PyOP2/pull/559#discussion_r272208618
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
x = np.random.randn(10)
y = np.random.randn(10)
callee = lp.make_function(
"{[d]:0<=d<1}",
"""
a[d] = b[d] - c[d]
""", name='wence_function')
caller = lp.make_kernel("{[i]: 0<=i<10}",
"""
[]:z[i] = wence_function([]:x[i], []:y[i])
""",
[lp.GlobalArg('x, y', dtype=np.float64, shape=(10, )), '...'])
caller = lp.register_callable_kernel(caller, callee)
if inline:
caller = lp.inline_callable_kernel(caller, callee.name)
evt, (out, ) = caller(queue, x=x, y=y)
assert np.allclose(out, x-y)
def test_nested_callable_inline():
callee1 = lp.make_function(
"{[i]: 0<=i<1}",
"""
y[i] = 2*x[i]
""", name='callee1')
callee2 = lp.make_kernel(
"{[i]: 0<=i<1}",
"""
[]:y[i] = callee1([]: x[i])
""", name='callee2')
caller = lp.make_kernel("{[i]: 0<=i<10}",
"""
[]:z[i] = callee2([]:x[i])
""",
[lp.GlobalArg('x', dtype=float, shape=lp.auto),
'...'])
callee2 = lp.register_callable_kernel(callee2, callee1)
callee2 = lp.inline_callable_kernel(callee2, callee1.name)
callee2 = callee2.root_kernel
caller = lp.register_callable_kernel(caller, callee2)
caller = lp.inline_callable_kernel(caller, callee2.name)
print(caller)
if __name__ == "__main__":
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from pytest import main
main([__file__])
# vim: foldmethod=marker
| [
"loopy.register_callable_kernel",
"pyopencl.create_some_context",
"numpy.random.rand",
"loopy.make_function",
"loopy.set_options",
"loopy.GlobalArg",
"numpy.linalg.norm",
"loopy.split_iname",
"loopy.transform.callable._match_caller_callee_argument_dimension_",
"loopy.inline_callable_kernel",
"py... | [((2067, 2115), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inline"""', '[False, True]'], {}), "('inline', [False, True])\n", (2090, 2115), False, 'import pytest\n'), ((3726, 3774), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inline"""', '[False, True]'], {}), "('inline', [False, True])\n", (3749, 3774), False, 'import pytest\n'), ((5185, 5233), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inline"""', '[False, True]'], {}), "('inline', [False, True])\n", (5208, 5233), False, 'import pytest\n'), ((6989, 7037), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inline"""', '[False, True]'], {}), "('inline', [False, True])\n", (7012, 7037), False, 'import pytest\n'), ((8301, 8349), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inline"""', '[False, True]'], {}), "('inline', [False, True])\n", (8324, 8349), False, 'import pytest\n'), ((11719, 11767), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inline"""', '[False, True]'], {}), "('inline', [False, True])\n", (11742, 11767), False, 'import pytest\n'), ((15061, 15109), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inline"""', '[False, True]'], {}), "('inline', [False, True])\n", (15084, 15109), False, 'import pytest\n'), ((1564, 1584), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (1579, 1584), True, 'import pyopencl as cl\n'), ((1640, 1658), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (1654, 1658), True, 'import numpy as np\n'), ((1669, 1693), 'pyopencl.create_some_context', 'cl.create_some_context', ([], {}), '()\n', (1691, 1693), True, 'import pyopencl as cl\n'), ((1706, 1726), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (1721, 1726), True, 'import pyopencl as cl\n'), ((1739, 1826), 'loopy.make_kernel', 'lp.make_kernel', (['"""{[i]: 0<=i<10}"""', '"""\n y[i] = log2(x[i])\n """'], {}), '(\'{[i]: 0<=i<10}\',\n """\n y[i] = log2(x[i])\n """)\n', (1753, 1826), True, 'import loopy as lp\n'), ((1859, 1936), 'loopy.register_function_id_to_in_knl_callable_mapper', 'lp.register_function_id_to_in_knl_callable_mapper', (['prog', 'register_log2_lookup'], {}), '(prog, register_log2_lookup)\n', (1908, 1936), True, 'import loopy as lp\n'), ((2196, 2216), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (2211, 2216), True, 'import pyopencl as cl\n'), ((2241, 2270), 'numpy.random.rand', 'np.random.rand', (['n', 'n', 'n', 'n', 'n'], {}), '(n, n, n, n, n)\n', (2255, 2270), True, 'import numpy as np\n'), ((2279, 2308), 'numpy.random.rand', 'np.random.rand', (['n', 'n', 'n', 'n', 'n'], {}), '(n, n, n, n, n)\n', (2293, 2308), True, 'import numpy as np\n'), ((2331, 2468), 'loopy.make_function', 'lp.make_function', (['"""{[i, j]:0<= i, j< 16}"""', '"""\n c[i, j] = 2*a[i, j] + 3*b[i, j]\n """'], {'name': '"""linear_combo1"""'}), '(\'{[i, j]:0<= i, j< 16}\',\n """\n c[i, j] = 2*a[i, j] + 3*b[i, j]\n """, name=\n \'linear_combo1\')\n', (2347, 2468), True, 'import loopy as lp\n'), ((2502, 2673), 'loopy.make_function', 'lp.make_function', (['"""{[i, j]:0<=i, j < 16}"""', '"""\n [i, j]: g[i, j] = linear_combo1([i, j]: e[i, j], [i, j]: f[i, j])\n """'], {'name': '"""linear_combo2"""'}), '(\'{[i, j]:0<=i, j < 16}\',\n """\n [i, j]: g[i, j] = linear_combo1([i, j]: e[i, j], [i, j]: f[i, j])\n """\n , name=\'linear_combo2\')\n', (2518, 2673), True, 'import loopy as lp\n'), ((3322, 3372), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['parent_knl', 'child_knl'], {}), '(parent_knl, child_knl)\n', (3349, 3372), True, 'import loopy as lp\n'), ((3396, 3444), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['knl', 'grandchild_knl'], {}), '(knl, grandchild_knl)\n', (3423, 3444), True, 'import loopy as lp\n'), ((3868, 3888), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (3883, 3888), True, 'import pyopencl as cl\n'), ((3913, 3942), 'numpy.random.rand', 'np.random.rand', (['n', 'n', 'n', 'n', 'n'], {}), '(n, n, n, n, n)\n', (3927, 3942), True, 'import numpy as np\n'), ((3951, 3980), 'numpy.random.rand', 'np.random.rand', (['n', 'n', 'n', 'n', 'n'], {}), '(n, n, n, n, n)\n', (3965, 3980), True, 'import numpy as np\n'), ((3998, 4134), 'loopy.make_function', 'lp.make_function', (['"""{[i, j]:0<=i, j < 16}"""', '"""\n g[i, j] = 2*e[i, j] + 3*f[i, j]\n """'], {'name': '"""linear_combo"""'}), '(\'{[i, j]:0<=i, j < 16}\',\n """\n g[i, j] = 2*e[i, j] + 3*f[i, j]\n """, name=\n \'linear_combo\')\n', (4014, 4134), True, 'import loopy as lp\n'), ((4898, 4948), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['parent_knl', 'child_knl'], {}), '(parent_knl, child_knl)\n', (4925, 4948), True, 'import loopy as lp\n'), ((5336, 5356), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (5351, 5356), True, 'import pyopencl as cl\n'), ((5386, 5438), 'pyopencl.clrandom.rand', 'cl.clrandom.rand', (['queue', '(n, n, n, n, n)', 'np.float32'], {}), '(queue, (n, n, n, n, n), np.float32)\n', (5402, 5438), True, 'import pyopencl as cl\n'), ((5451, 5503), 'pyopencl.clrandom.rand', 'cl.clrandom.rand', (['queue', '(n, n, n, n, n)', 'np.float32'], {}), '(queue, (n, n, n, n, n), np.float32)\n', (5467, 5503), True, 'import pyopencl as cl\n'), ((5516, 5568), 'pyopencl.clrandom.rand', 'cl.clrandom.rand', (['queue', '(n, n, n, n, n)', 'np.float64'], {}), '(queue, (n, n, n, n, n), np.float64)\n', (5532, 5568), True, 'import pyopencl as cl\n'), ((5948, 6414), 'loopy.make_kernel', 'lp.make_kernel', (["('{[i, j, k, l, m]: 0<=i, j, k, l, m<%d}' % n)", '"""\n <> d[i, j, k, l, m] = 2*b[i, j, k, l, m]\n [j, l]: x[i, j, k, l, m], [j, l]: y[i, j, k, l, m] = linear_combo(\n f=[j, l]: a[i, j, k, l, m],\n g=[j, l]: d[i, j, k, l, m],\n e=[j, l]: c[i, j, k, l, m])\n """'], {}), '(\'{[i, j, k, l, m]: 0<=i, j, k, l, m<%d}\' % n,\n """\n <> d[i, j, k, l, m] = 2*b[i, j, k, l, m]\n [j, l]: x[i, j, k, l, m], [j, l]: y[i, j, k, l, m] = linear_combo(\n f=[j, l]: a[i, j, k, l, m],\n g=[j, l]: d[i, j, k, l, m],\n e=[j, l]: c[i, j, k, l, m])\n """\n )\n', (5962, 6414), True, 'import loopy as lp\n'), ((6442, 6493), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['caller_knl', 'callee_knl'], {}), '(caller_knl, callee_knl)\n', (6469, 6493), True, 'import loopy as lp\n'), ((7131, 7151), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (7146, 7151), True, 'import pyopencl as cl\n'), ((7181, 7233), 'pyopencl.clrandom.rand', 'cl.clrandom.rand', (['queue', '(n, n, n, n, n)', 'np.float64'], {}), '(queue, (n, n, n, n, n), np.float64)\n', (7197, 7233), True, 'import pyopencl as cl\n'), ((7246, 7298), 'pyopencl.clrandom.rand', 'cl.clrandom.rand', (['queue', '(n, n, n, n, n)', 'np.float64'], {}), '(queue, (n, n, n, n, n), np.float64)\n', (7262, 7298), True, 'import pyopencl as cl\n'), ((7317, 7453), 'loopy.make_function', 'lp.make_function', (['"""{[i, j]:0<=i, j < 16}"""', '"""\n g[i, j] = 2*e[i, j] + 3*f[i, j]\n """'], {'name': '"""linear_combo"""'}), '(\'{[i, j]:0<=i, j < 16}\',\n """\n g[i, j] = 2*e[i, j] + 3*f[i, j]\n """, name=\n \'linear_combo\')\n', (7333, 7453), True, 'import loopy as lp\n'), ((7488, 7556), 'loopy.split_iname', 'lp.split_iname', (['callee_knl', '"""i"""', '(4)'], {'inner_tag': '"""l.0"""', 'outer_tag': '"""g.0"""'}), "(callee_knl, 'i', 4, inner_tag='l.0', outer_tag='g.0')\n", (7502, 7556), True, 'import loopy as lp\n'), ((7575, 7818), 'loopy.make_kernel', 'lp.make_kernel', (['"""{[i, j, k, l, m]: 0<=i, j, k, l, m<16}"""', '"""\n [j, l]: z[i, j, k, l, m] = linear_combo([j, l]: x[i, j, k, l, m],\n [j, l]: y[i, j, k, l, m])\n """'], {}), '(\'{[i, j, k, l, m]: 0<=i, j, k, l, m<16}\',\n """\n [j, l]: z[i, j, k, l, m] = linear_combo([j, l]: x[i, j, k, l, m],\n [j, l]: y[i, j, k, l, m])\n """\n )\n', (7589, 7818), True, 'import loopy as lp\n'), ((7865, 7933), 'loopy.split_iname', 'lp.split_iname', (['caller_knl', '"""i"""', '(4)'], {'inner_tag': '"""l.1"""', 'outer_tag': '"""g.1"""'}), "(caller_knl, 'i', 4, inner_tag='l.1', outer_tag='g.1')\n", (7879, 7933), True, 'import loopy as lp\n'), ((7945, 7996), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['caller_knl', 'callee_knl'], {}), '(caller_knl, callee_knl)\n', (7972, 7996), True, 'import loopy as lp\n'), ((8457, 8477), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (8472, 8477), True, 'import pyopencl as cl\n'), ((8488, 8537), 'pyopencl.clrandom.rand', 'cl.clrandom.rand', (['queue', '(3, 2)'], {'dtype': 'np.float64'}), '(queue, (3, 2), dtype=np.float64)\n', (8504, 8537), True, 'import pyopencl as cl\n'), ((8547, 8594), 'pyopencl.clrandom.rand', 'cl.clrandom.rand', (['queue', '(6,)'], {'dtype': 'np.float64'}), '(queue, (6,), dtype=np.float64)\n', (8563, 8594), True, 'import pyopencl as cl\n'), ((8605, 8654), 'pyopencl.clrandom.rand', 'cl.clrandom.rand', (['queue', '(6, 6)'], {'dtype': 'np.float64'}), '(queue, (6, 6), dtype=np.float64)\n', (8621, 8654), True, 'import pyopencl as cl\n'), ((8670, 8778), 'loopy.make_function', 'lp.make_function', (['"""{[i]: 0<=i<6}"""', '"""\n a[i] = 2*abs(b[i])\n """'], {'name': '"""callee_fn1"""'}), '(\'{[i]: 0<=i<6}\',\n """\n a[i] = 2*abs(b[i])\n """, name=\'callee_fn1\')\n', (8686, 8778), True, 'import loopy as lp\n'), ((8815, 8942), 'loopy.make_function', 'lp.make_function', (['"""{[i, j]: 0<=i<3 and 0 <= j < 2}"""', '"""\n a[i, j] = 3*b[i, j]\n """'], {'name': '"""callee_fn2"""'}), '(\'{[i, j]: 0<=i<3 and 0 <= j < 2}\',\n """\n a[i, j] = 3*b[i, j]\n """, name=\'callee_fn2\')\n', (8831, 8942), True, 'import loopy as lp\n'), ((8979, 9082), 'loopy.make_function', 'lp.make_function', (['"""{[i]: 0<=i<6}"""', '"""\n a[i] = 5*b[i]\n """'], {'name': '"""callee_fn3"""'}), '(\'{[i]: 0<=i<6}\',\n """\n a[i] = 5*b[i]\n """, name=\'callee_fn3\')\n', (8995, 9082), True, 'import loopy as lp\n'), ((9115, 9415), 'loopy.make_kernel', 'lp.make_kernel', (['"""{[i, j, k, l]: 0<= i < 6 and 0 <= j < 3 and 0 <= k < 2 and 0<=l<6}"""', '"""\n [i]: y1[i//2, i%2] = callee_fn1([i]: x1[i//2, i%2])\n [j, k]: y2[2*j+k] = callee_fn2([j, k]: x2[2*j+k])\n [l]: y3[l, l] = callee_fn3([l]: x3[l, l])\n """'], {}), '(\n \'{[i, j, k, l]: 0<= i < 6 and 0 <= j < 3 and 0 <= k < 2 and 0<=l<6}\',\n """\n [i]: y1[i//2, i%2] = callee_fn1([i]: x1[i//2, i%2])\n [j, k]: y2[2*j+k] = callee_fn2([j, k]: x2[2*j+k])\n [l]: y3[l, l] = callee_fn3([l]: x3[l, l])\n """\n )\n', (9129, 9415), True, 'import loopy as lp\n'), ((9438, 9479), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['knl', 'callee1'], {}), '(knl, callee1)\n', (9465, 9479), True, 'import loopy as lp\n'), ((9490, 9531), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['knl', 'callee2'], {}), '(knl, callee2)\n', (9517, 9531), True, 'import loopy as lp\n'), ((9542, 9583), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['knl', 'callee3'], {}), '(knl, callee3)\n', (9569, 9583), True, 'import loopy as lp\n'), ((9788, 9819), 'loopy.set_options', 'lp.set_options', (['knl', '"""write_cl"""'], {}), "(knl, 'write_cl')\n", (9802, 9819), True, 'import loopy as lp\n'), ((9830, 9864), 'loopy.set_options', 'lp.set_options', (['knl', '"""return_dict"""'], {}), "(knl, 'return_dict')\n", (9844, 9864), True, 'import loopy as lp\n'), ((10253, 10273), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (10268, 10273), True, 'import pyopencl as cl\n'), ((10364, 10379), 'pymbolic.primitives.Variable', 'p.Variable', (['"""i"""'], {}), "('i')\n", (10374, 10379), True, 'import pymbolic.primitives as p\n'), ((11151, 11188), 'loopy.fix_parameters', 'lp.fix_parameters', (['argmin_kernel'], {'n': 'n'}), '(argmin_kernel, n=n)\n', (11168, 11188), True, 'import loopy as lp\n'), ((11200, 11317), 'loopy.make_kernel', 'lp.make_kernel', (['"""{[i]:0<=i<n}"""', '"""\n min_val, min_index = custom_argmin([i]:b[i])\n """'], {}), '(\'{[i]:0<=i<n}\',\n """\n min_val, min_index = custom_argmin([i]:b[i])\n """\n )\n', (11214, 11317), True, 'import loopy as lp\n'), ((11345, 11372), 'loopy.fix_parameters', 'lp.fix_parameters', (['knl'], {'n': 'n'}), '(knl, n=n)\n', (11362, 11372), True, 'import loopy as lp\n'), ((11383, 11420), 'loopy.set_options', 'lp.set_options', (['knl'], {'return_dict': '(True)'}), '(knl, return_dict=True)\n', (11397, 11420), True, 'import loopy as lp\n'), ((11432, 11479), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['knl', 'argmin_kernel'], {}), '(knl, argmin_kernel)\n', (11459, 11479), True, 'import loopy as lp\n'), ((11488, 11506), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (11503, 11506), True, 'import numpy as np\n'), ((11853, 11873), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (11868, 11873), True, 'import pyopencl as cl\n'), ((11884, 11933), 'pyopencl.clrandom.rand', 'cl.clrandom.rand', (['queue', '(3, 2)'], {'dtype': 'np.float64'}), '(queue, (3, 2), dtype=np.float64)\n', (11900, 11933), True, 'import pyopencl as cl\n'), ((11943, 11990), 'pyopencl.clrandom.rand', 'cl.clrandom.rand', (['queue', '(6,)'], {'dtype': 'np.float64'}), '(queue, (6,), dtype=np.float64)\n', (11959, 11990), True, 'import pyopencl as cl\n'), ((12007, 12110), 'loopy.make_function', 'lp.make_function', (['"""{[i]: 0<=i<6}"""', '"""\n a[i] = 2*b[i]\n """'], {'name': '"""callee_fn1"""'}), '(\'{[i]: 0<=i<6}\',\n """\n a[i] = 2*b[i]\n """, name=\'callee_fn1\')\n', (12023, 12110), True, 'import loopy as lp\n'), ((12147, 12274), 'loopy.make_function', 'lp.make_function', (['"""{[i, j]: 0<=i<2 and 0 <= j < 3}"""', '"""\n a[i, j] = 3*b[i, j]\n """'], {'name': '"""callee_fn2"""'}), '(\'{[i, j]: 0<=i<2 and 0 <= j < 3}\',\n """\n a[i, j] = 3*b[i, j]\n """, name=\'callee_fn2\')\n', (12163, 12274), True, 'import loopy as lp\n'), ((12307, 12516), 'loopy.make_kernel', 'lp.make_kernel', (['"""{[i, j, k]: 0<= i < 3 and 0 <= j < 2 and 0 <= k < 6}"""', '"""\n [i, j]: y1[i, j] = callee_fn1([i, j]: x1[i, j])\n [k]: y2[k] = callee_fn2([k]: x2[k])\n """'], {}), '(\'{[i, j, k]: 0<= i < 3 and 0 <= j < 2 and 0 <= k < 6}\',\n """\n [i, j]: y1[i, j] = callee_fn1([i, j]: x1[i, j])\n [k]: y2[k] = callee_fn2([k]: x2[k])\n """\n )\n', (12321, 12516), True, 'import loopy as lp\n'), ((12544, 12585), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['knl', 'callee1'], {}), '(knl, callee1)\n', (12571, 12585), True, 'import loopy as lp\n'), ((12596, 12637), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['knl', 'callee2'], {}), '(knl, callee2)\n', (12623, 12637), True, 'import loopy as lp\n'), ((12649, 12700), 'loopy.pack_and_unpack_args_for_call', 'lp.pack_and_unpack_args_for_call', (['knl', '"""callee_fn1"""'], {}), "(knl, 'callee_fn1')\n", (12681, 12700), True, 'import loopy as lp\n'), ((12711, 12762), 'loopy.pack_and_unpack_args_for_call', 'lp.pack_and_unpack_args_for_call', (['knl', '"""callee_fn2"""'], {}), "(knl, 'callee_fn2')\n", (12743, 12762), True, 'import loopy as lp\n'), ((12908, 12939), 'loopy.set_options', 'lp.set_options', (['knl', '"""write_cl"""'], {}), "(knl, 'write_cl')\n", (12922, 12939), True, 'import loopy as lp\n'), ((12950, 12984), 'loopy.set_options', 'lp.set_options', (['knl', '"""return_dict"""'], {}), "(knl, 'return_dict')\n", (12964, 12984), True, 'import loopy as lp\n'), ((14401, 14445), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['caller1', 'callee'], {}), '(caller1, callee)\n', (14428, 14445), True, 'import loopy as lp\n'), ((14460, 14525), 'loopy.transform.callable._match_caller_callee_argument_dimension_', '_match_caller_callee_argument_dimension_', (['registered', 'callee.name'], {}), '(registered, callee.name)\n', (14500, 14525), False, 'from loopy.transform.callable import _match_caller_callee_argument_dimension_\n'), ((14540, 14587), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['inlined', 'callee.name'], {}), '(inlined, callee.name)\n', (14565, 14587), True, 'import loopy as lp\n'), ((14626, 14670), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['caller2', 'callee'], {}), '(caller2, callee)\n', (14653, 14670), True, 'import loopy as lp\n'), ((14685, 14750), 'loopy.transform.callable._match_caller_callee_argument_dimension_', '_match_caller_callee_argument_dimension_', (['registered', 'callee.name'], {}), '(registered, callee.name)\n', (14725, 14750), False, 'from loopy.transform.callable import _match_caller_callee_argument_dimension_\n'), ((14765, 14812), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['inlined', 'callee.name'], {}), '(inlined, callee.name)\n', (14790, 14812), True, 'import loopy as lp\n'), ((14851, 14895), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['caller3', 'callee'], {}), '(caller3, callee)\n', (14878, 14895), True, 'import loopy as lp\n'), ((14910, 14975), 'loopy.transform.callable._match_caller_callee_argument_dimension_', '_match_caller_callee_argument_dimension_', (['registered', 'callee.name'], {}), '(registered, callee.name)\n', (14950, 14975), False, 'from loopy.transform.callable import _match_caller_callee_argument_dimension_\n'), ((14990, 15037), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['inlined', 'callee.name'], {}), '(inlined, callee.name)\n', (15015, 15037), True, 'import loopy as lp\n'), ((15269, 15289), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (15284, 15289), True, 'import pyopencl as cl\n'), ((15299, 15318), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (15314, 15318), True, 'import numpy as np\n'), ((15327, 15346), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (15342, 15346), True, 'import numpy as np\n'), ((15361, 15473), 'loopy.make_function', 'lp.make_function', (['"""{[d]:0<=d<1}"""', '"""\n a[d] = b[d] - c[d]\n\n """'], {'name': '"""wence_function"""'}), '(\'{[d]:0<=d<1}\',\n """\n a[d] = b[d] - c[d]\n\n """, name=\'wence_function\')\n', (15377, 15473), True, 'import loopy as lp\n'), ((15719, 15762), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['caller', 'callee'], {}), '(caller, callee)\n', (15746, 15762), True, 'import loopy as lp\n'), ((15898, 15921), 'numpy.allclose', 'np.allclose', (['out', '(x - y)'], {}), '(out, x - y)\n', (15909, 15921), True, 'import numpy as np\n'), ((15971, 16071), 'loopy.make_function', 'lp.make_function', (['"""{[i]: 0<=i<1}"""', '"""\n y[i] = 2*x[i]\n """'], {'name': '"""callee1"""'}), '(\'{[i]: 0<=i<1}\',\n """\n y[i] = 2*x[i]\n """, name=\'callee1\')\n', (15987, 16071), True, 'import loopy as lp\n'), ((16107, 16224), 'loopy.make_kernel', 'lp.make_kernel', (['"""{[i]: 0<=i<1}"""', '"""\n []:y[i] = callee1([]: x[i])\n """'], {'name': '"""callee2"""'}), '(\'{[i]: 0<=i<1}\',\n """\n []:y[i] = callee1([]: x[i])\n """, name=\'callee2\'\n )\n', (16121, 16224), True, 'import loopy as lp\n'), ((16539, 16584), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['callee2', 'callee1'], {}), '(callee2, callee1)\n', (16566, 16584), True, 'import loopy as lp\n'), ((16599, 16647), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['callee2', 'callee1.name'], {}), '(callee2, callee1.name)\n', (16624, 16647), True, 'import loopy as lp\n'), ((16695, 16739), 'loopy.register_callable_kernel', 'lp.register_callable_kernel', (['caller', 'callee2'], {}), '(caller, callee2)\n', (16722, 16739), True, 'import loopy as lp\n'), ((16753, 16800), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['caller', 'callee2.name'], {}), '(caller, callee2.name)\n', (16778, 16800), True, 'import loopy as lp\n'), ((3487, 3534), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['knl', '"""linear_combo2"""'], {}), "(knl, 'linear_combo2')\n", (3512, 3534), True, 'import loopy as lp\n'), ((3549, 3596), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['knl', '"""linear_combo1"""'], {}), "(knl, 'linear_combo1')\n", (3574, 3596), True, 'import loopy as lp\n'), ((4991, 5037), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['knl', '"""linear_combo"""'], {}), "(knl, 'linear_combo')\n", (5016, 5037), True, 'import loopy as lp\n'), ((6536, 6582), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['knl', '"""linear_combo"""'], {}), "(knl, 'linear_combo')\n", (6561, 6582), True, 'import loopy as lp\n'), ((8040, 8086), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['knl', '"""linear_combo"""'], {}), "(knl, 'linear_combo')\n", (8065, 8086), True, 'import loopy as lp\n'), ((9614, 9658), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['knl', '"""callee_fn1"""'], {}), "(knl, 'callee_fn1')\n", (9639, 9658), True, 'import loopy as lp\n'), ((9673, 9717), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['knl', '"""callee_fn2"""'], {}), "(knl, 'callee_fn2')\n", (9698, 9717), True, 'import loopy as lp\n'), ((9732, 9776), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['knl', '"""callee_fn3"""'], {}), "(knl, 'callee_fn3')\n", (9757, 9776), True, 'import loopy as lp\n'), ((10333, 10352), 'pymbolic.primitives.Variable', 'p.Variable', (['"""acc_i"""'], {}), "('acc_i')\n", (10343, 10352), True, 'import pymbolic.primitives as p\n'), ((10392, 10411), 'pymbolic.primitives.Variable', 'p.Variable', (['"""index"""'], {}), "('index')\n", (10402, 10411), True, 'import pymbolic.primitives as p\n'), ((10437, 10452), 'pymbolic.primitives.Variable', 'p.Variable', (['"""a"""'], {}), "('a')\n", (10447, 10452), True, 'import pymbolic.primitives as p\n'), ((10454, 10469), 'pymbolic.primitives.Variable', 'p.Variable', (['"""i"""'], {}), "('i')\n", (10464, 10469), True, 'import pymbolic.primitives as p\n'), ((12793, 12837), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['knl', '"""callee_fn1"""'], {}), "(knl, 'callee_fn1')\n", (12818, 12837), True, 'import loopy as lp\n'), ((12852, 12896), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['knl', '"""callee_fn2"""'], {}), "(knl, 'callee_fn2')\n", (12877, 12896), True, 'import loopy as lp\n'), ((15796, 15842), 'loopy.inline_callable_kernel', 'lp.inline_callable_kernel', (['caller', 'callee.name'], {}), '(caller, callee.name)\n', (15821, 15842), True, 'import loopy as lp\n'), ((16950, 16966), 'pytest.main', 'main', (['[__file__]'], {}), '([__file__])\n', (16954, 16966), False, 'from pytest import main\n'), ((3651, 3686), 'numpy.linalg.norm', 'np.linalg.norm', (['(2 * x + 3 * y - out)'], {}), '(2 * x + 3 * y - out)\n', (3665, 3686), True, 'import numpy as np\n'), ((3689, 3718), 'numpy.linalg.norm', 'np.linalg.norm', (['(2 * x + 3 * y)'], {}), '(2 * x + 3 * y)\n', (3703, 3718), True, 'import numpy as np\n'), ((5092, 5145), 'numpy.linalg.norm', 'np.linalg.norm', (['(2 * x + 3 * y - out[:, ::-1, :, :, :])'], {}), '(2 * x + 3 * y - out[:, ::-1, :, :, :])\n', (5106, 5145), True, 'import numpy as np\n'), ((5148, 5177), 'numpy.linalg.norm', 'np.linalg.norm', (['(2 * x + 3 * y)'], {}), '(2 * x + 3 * y)\n', (5162, 5177), True, 'import numpy as np\n'), ((5861, 5887), 'loopy.GlobalArg', 'lp.GlobalArg', (['"""f, e, h, g"""'], {}), "('f, e, h, g')\n", (5873, 5887), True, 'import loopy as lp\n'), ((6861, 6888), 'numpy.linalg.norm', 'np.linalg.norm', (['(h - h_exact)'], {}), '(h - h_exact)\n', (6875, 6888), True, 'import numpy as np\n'), ((6887, 6910), 'numpy.linalg.norm', 'np.linalg.norm', (['h_exact'], {}), '(h_exact)\n', (6901, 6910), True, 'import numpy as np\n'), ((6929, 6956), 'numpy.linalg.norm', 'np.linalg.norm', (['(p - p_exact)'], {}), '(p - p_exact)\n', (6943, 6956), True, 'import numpy as np\n'), ((6955, 6978), 'numpy.linalg.norm', 'np.linalg.norm', (['p_exact'], {}), '(p_exact)\n', (6969, 6978), True, 'import numpy as np\n'), ((8243, 8282), 'numpy.linalg.norm', 'np.linalg.norm', (['(2 * x_host + 3 * y_host)'], {}), '(2 * x_host + 3 * y_host)\n', (8257, 8282), True, 'import numpy as np\n'), ((10572, 10627), 'loopy.Assignment', 'lp.Assignment', ([], {'id': '"""init2"""', 'assignee': 'index', 'expression': '(0)'}), "(id='init2', assignee=index, expression=0)\n", (10585, 10627), True, 'import loopy as lp\n'), ((10665, 10730), 'loopy.Assignment', 'lp.Assignment', ([], {'id': '"""init1"""', 'assignee': 'acc_i', 'expression': '"""214748367"""'}), "(id='init1', assignee=acc_i, expression='214748367')\n", (10678, 10730), True, 'import loopy as lp\n'), ((13514, 13581), 'loopy.GlobalArg', 'lp.GlobalArg', (['"""a"""'], {'dtype': '"""double"""', 'shape': '(6,)', 'is_output_only': '(False)'}), "('a', dtype='double', shape=(6,), is_output_only=False)\n", (13526, 13581), True, 'import loopy as lp\n'), ((13599, 13628), 'loopy.ValueArg', 'lp.ValueArg', (['"""j"""'], {'dtype': '"""int"""'}), "('j', dtype='int')\n", (13610, 13628), True, 'import loopy as lp\n'), ((13732, 13799), 'loopy.GlobalArg', 'lp.GlobalArg', (['"""a"""'], {'dtype': '"""double"""', 'shape': '(6,)', 'is_output_only': '(False)'}), "('a', dtype='double', shape=(6,), is_output_only=False)\n", (13744, 13799), True, 'import loopy as lp\n'), ((13814, 13881), 'loopy.GlobalArg', 'lp.GlobalArg', (['"""b"""'], {'dtype': '"""double"""', 'shape': '(1,)', 'is_output_only': '(False)'}), "('b', dtype='double', shape=(1,), is_output_only=False)\n", (13826, 13881), True, 'import loopy as lp\n'), ((13919, 13931), 'loopy.CTarget', 'lp.CTarget', ([], {}), '()\n', (13929, 13931), True, 'import loopy as lp\n'), ((14025, 14092), 'loopy.GlobalArg', 'lp.GlobalArg', (['"""a"""'], {'dtype': '"""double"""', 'shape': '(6,)', 'is_output_only': '(False)'}), "('a', dtype='double', shape=(6,), is_output_only=False)\n", (14037, 14092), True, 'import loopy as lp\n'), ((14146, 14158), 'loopy.CTarget', 'lp.CTarget', ([], {}), '()\n', (14156, 14158), True, 'import loopy as lp\n'), ((14248, 14315), 'loopy.GlobalArg', 'lp.GlobalArg', (['"""a"""'], {'dtype': '"""double"""', 'shape': '(6,)', 'is_output_only': '(False)'}), "('a', dtype='double', shape=(6,), is_output_only=False)\n", (14260, 14315), True, 'import loopy as lp\n'), ((14369, 14381), 'loopy.CTarget', 'lp.CTarget', ([], {}), '()\n', (14379, 14381), True, 'import loopy as lp\n'), ((15643, 15694), 'loopy.GlobalArg', 'lp.GlobalArg', (['"""x, y"""'], {'dtype': 'np.float64', 'shape': '(10,)'}), "('x, y', dtype=np.float64, shape=(10,))\n", (15655, 15694), True, 'import loopy as lp\n'), ((16437, 16482), 'loopy.GlobalArg', 'lp.GlobalArg', (['"""x"""'], {'dtype': 'float', 'shape': 'lp.auto'}), "('x', dtype=float, shape=lp.auto)\n", (16449, 16482), True, 'import loopy as lp\n'), ((2044, 2054), 'numpy.log2', 'np.log2', (['x'], {}), '(x)\n', (2051, 2054), True, 'import numpy as np\n'), ((3011, 3079), 'loopy.GlobalArg', 'lp.GlobalArg', ([], {'name': '"""x"""', 'dtype': 'np.float64', 'shape': '(16, 16, 16, 16, 16)'}), "(name='x', dtype=np.float64, shape=(16, 16, 16, 16, 16))\n", (3023, 3079), True, 'import loopy as lp\n'), ((3158, 3226), 'loopy.GlobalArg', 'lp.GlobalArg', ([], {'name': '"""y"""', 'dtype': 'np.float64', 'shape': '(16, 16, 16, 16, 16)'}), "(name='y', dtype=np.float64, shape=(16, 16, 16, 16, 16))\n", (3170, 3226), True, 'import loopy as lp\n'), ((4440, 4508), 'loopy.GlobalArg', 'lp.GlobalArg', ([], {'name': '"""x"""', 'dtype': 'np.float64', 'shape': '(16, 16, 16, 16, 16)'}), "(name='x', dtype=np.float64, shape=(16, 16, 16, 16, 16))\n", (4452, 4508), True, 'import loopy as lp\n'), ((4587, 4655), 'loopy.GlobalArg', 'lp.GlobalArg', ([], {'name': '"""y"""', 'dtype': 'np.float64', 'shape': '(16, 16, 16, 16, 16)'}), "(name='y', dtype=np.float64, shape=(16, 16, 16, 16, 16))\n", (4599, 4655), True, 'import loopy as lp\n'), ((4734, 4802), 'loopy.GlobalArg', 'lp.GlobalArg', ([], {'name': '"""z"""', 'dtype': 'np.float64', 'shape': '(16, 16, 16, 16, 16)'}), "(name='z', dtype=np.float64, shape=(16, 16, 16, 16, 16))\n", (4746, 4802), True, 'import loopy as lp\n'), ((11634, 11643), 'numpy.min', 'np.min', (['b'], {}), '(b)\n', (11640, 11643), True, 'import numpy as np\n'), ((11695, 11707), 'numpy.argmin', 'np.argmin', (['b'], {}), '(b)\n', (11704, 11707), True, 'import numpy as np\n'), ((2013, 2023), 'numpy.log2', 'np.log2', (['x'], {}), '(x)\n', (2020, 2023), True, 'import numpy as np\n'), ((10845, 10872), 'pymbolic.primitives.Expression.eq', 'p.Expression.eq', (['acc_i', 'a_i'], {}), '(acc_i, a_i)\n', (10860, 10872), True, 'import pymbolic.primitives as p\n'), ((11017, 11034), 'pymbolic.primitives.Variable', 'p.Variable', (['"""min"""'], {}), "('min')\n", (11027, 11034), True, 'import pymbolic.primitives as p\n')] |
from keras.utils import np_utils
import numpy as np
import math
import matplotlib.pyplot as plt
class FixedChunkTest:
def __init__(self, time_delay, filename="fixed_chunk2.txt"):
'''
Chunks are written in the filename in which every line is a sequence of outputs followed by the number of the respective chunk
All chunk numbers must be in ascending order and must have the same number of outputs
Chunks will be shuffled and presented repeatedly throughout
'''
dataset= np.loadtxt(filename, dtype="i", delimiter=",")
self.time_delay = time_delay
self.time_counter = 0
self.current_index= 0
self.output_size= dataset.shape[1]-1
self.data = dataset[:,:self.output_size]
self.data_class= dataset[:,self.output_size]
acc = np.zeros(len(self.data_class), dtype=int)
for i,sample in enumerate(self.data):
#print(sample)
#print(self.data_class)
tmp= sample*self.data_class
acc[i]= int(tmp.sum())
acc-= 1
self.true_labels= acc
self.chunk= []
new_chunk= None
new_chunk_index= None
for i,sample in enumerate(self.data):
if new_chunk is None:
new_chunk_index= self.data_class[i]
new_chunk= [sample]
else:
if new_chunk_index == self.data_class[i]:
new_chunk.append(sample)
else:
self.chunk.append(np.asarray(new_chunk))
new_chunk= [sample]
new_chunk_index= self.data_class[i]
self.chunk.append(np.asarray(new_chunk))
self.chunk= np.asarray(self.chunk)
self.number_of_chunks= self.chunk.shape[0]
self.chunk_index= np.random.randint(self.number_of_chunks)
#print(self.chunk)
# print(self.chunk.shape)
# for i in range(10):
# rand= np.random.randint(self.number_of_chunks)
# print(self.chunk[rand])
# exit()
# self.chunk= 0
# self.output_size = output_size
# self.counter = -1
# self.output_class= data_class[current_index]
self.previous_output_class= None
self.previous_previous_output_class= None
#print(self.data_class.shape[0])
#exit()
# self.sequenceA_length = 4
# self.sequenceB_length = 4 #np.random.randint(2)+5
def getOutputSize(self):
return self.output_size
def trueLabel(self):
return self.true_labels
def updateTimeDelay(self):
self.time_counter+= 1
if self.time_counter > self.time_delay:
self.time_counter = 0
self.previous_previous_output_class= self.previous_output_class
self.previous_output_class= self.output_class
return True
else:
return False
#create an input pattern for the system
def getInput(self, reset = False):
if reset == True:
self.current_index=0
self.time_counter=0
update = self.updateTimeDelay()
#print(self.chunk[self.chunk_index].shape)
#exit()
if update == True:
self.current_index+= 1
#check if a new chunk should start
if self.current_index >= self.chunk[self.chunk_index].shape[0]:
self.chunk_index= np.random.randint(self.number_of_chunks)
self.current_index= 0
#chunk is the cluster it pertains
#output class is the current output
#self.chunk_index=
#print("chunk",self.chunk)
self.output_class = self.chunk[self.chunk_index][self.current_index]
noise_intensity= 0
if self.previous_output_class is None or np.array_equal(self.previous_output_class, self.output_class):
input_value = self.output_class*np.exp(-0.1*self.time_counter) + np.random.randn(self.output_size)*noise_intensity
else:
input_value = self.output_class*np.exp(-0.1*self.time_counter) + np.random.randn(self.output_size)*noise_intensity + self.previous_output_class*np.exp(-0.1*(self.time_counter+self.time_delay))
return input_value
def getSequence(self, sequence_size):
#print(self.data.shape[0])
#print(input_sequence.shape)
#exit()
self.input_sequence = np.empty((sequence_size, self.data.shape[1]))
self.input_class = np.empty(sequence_size)
for i in range(sequence_size):
input_value = self.getInput()
#input_class.append(self.chunk)
#input_sequence.append(input_value)
self.input_class[i] = self.chunk_index
self.input_sequence[i] = input_value
return self.input_sequence, self.input_class
def plot(self, input_class, input_sequence = None, save = False):
a = np.asarray(input_class)
t = [i for i,value in enumerate(a)]
plt.plot(t, a)
if input_sequence != None:
sequence = [np.argmax(x) for x in input_sequence]
plt.plot(t, sequence)
if save == True:
plt.savefig("plot.png")
plt.show()
plt.close()
def plotSuperposed(self, input_class, input_sequence = None, save = False):
input_sequence= np.asarray(input_sequence)
t = [i for i,value in enumerate(input_sequence)]
#exit()
for i in range(input_sequence.shape[1]):
a = input_sequence[:,i]
plt.plot(t, a)
a = np.asarray(input_class)
plt.plot(t, a)
if save == True:
plt.savefig("plot.png")
plt.show()
plt.close()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.argmax",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.random.randint",
"numpy.empty",
"numpy.array_equal",
"numpy.loadtxt",
"numpy.random.randn",
"matplotlib.pyplot.show"
] | [((486, 532), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'dtype': '"""i"""', 'delimiter': '""","""'}), "(filename, dtype='i', delimiter=',')\n", (496, 532), True, 'import numpy as np\n'), ((1428, 1450), 'numpy.asarray', 'np.asarray', (['self.chunk'], {}), '(self.chunk)\n', (1438, 1450), True, 'import numpy as np\n'), ((1516, 1556), 'numpy.random.randint', 'np.random.randint', (['self.number_of_chunks'], {}), '(self.number_of_chunks)\n', (1533, 1556), True, 'import numpy as np\n'), ((3744, 3789), 'numpy.empty', 'np.empty', (['(sequence_size, self.data.shape[1])'], {}), '((sequence_size, self.data.shape[1]))\n', (3752, 3789), True, 'import numpy as np\n'), ((3811, 3834), 'numpy.empty', 'np.empty', (['sequence_size'], {}), '(sequence_size)\n', (3819, 3834), True, 'import numpy as np\n'), ((4195, 4218), 'numpy.asarray', 'np.asarray', (['input_class'], {}), '(input_class)\n', (4205, 4218), True, 'import numpy as np\n'), ((4260, 4274), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'a'], {}), '(t, a)\n', (4268, 4274), True, 'import matplotlib.pyplot as plt\n'), ((4437, 4447), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4445, 4447), True, 'import matplotlib.pyplot as plt\n'), ((4450, 4461), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4459, 4461), True, 'import matplotlib.pyplot as plt\n'), ((4561, 4587), 'numpy.asarray', 'np.asarray', (['input_sequence'], {}), '(input_sequence)\n', (4571, 4587), True, 'import numpy as np\n'), ((4751, 4774), 'numpy.asarray', 'np.asarray', (['input_class'], {}), '(input_class)\n', (4761, 4774), True, 'import numpy as np\n'), ((4777, 4791), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'a'], {}), '(t, a)\n', (4785, 4791), True, 'import matplotlib.pyplot as plt\n'), ((4844, 4854), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4852, 4854), True, 'import matplotlib.pyplot as plt\n'), ((4857, 4868), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4866, 4868), True, 'import matplotlib.pyplot as plt\n'), ((1390, 1411), 'numpy.asarray', 'np.asarray', (['new_chunk'], {}), '(new_chunk)\n', (1400, 1411), True, 'import numpy as np\n'), ((3199, 3260), 'numpy.array_equal', 'np.array_equal', (['self.previous_output_class', 'self.output_class'], {}), '(self.previous_output_class, self.output_class)\n', (3213, 3260), True, 'import numpy as np\n'), ((4363, 4384), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sequence'], {}), '(t, sequence)\n', (4371, 4384), True, 'import matplotlib.pyplot as plt\n'), ((4408, 4431), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot.png"""'], {}), "('plot.png')\n", (4419, 4431), True, 'import matplotlib.pyplot as plt\n'), ((4727, 4741), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'a'], {}), '(t, a)\n', (4735, 4741), True, 'import matplotlib.pyplot as plt\n'), ((4815, 4838), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot.png"""'], {}), "('plot.png')\n", (4826, 4838), True, 'import matplotlib.pyplot as plt\n'), ((2856, 2896), 'numpy.random.randint', 'np.random.randint', (['self.number_of_chunks'], {}), '(self.number_of_chunks)\n', (2873, 2896), True, 'import numpy as np\n'), ((4322, 4334), 'numpy.argmax', 'np.argmax', (['x'], {}), '(x)\n', (4331, 4334), True, 'import numpy as np\n'), ((3297, 3329), 'numpy.exp', 'np.exp', (['(-0.1 * self.time_counter)'], {}), '(-0.1 * self.time_counter)\n', (3303, 3329), True, 'import numpy as np\n'), ((3330, 3363), 'numpy.random.randn', 'np.random.randn', (['self.output_size'], {}), '(self.output_size)\n', (3345, 3363), True, 'import numpy as np\n'), ((3535, 3587), 'numpy.exp', 'np.exp', (['(-0.1 * (self.time_counter + self.time_delay))'], {}), '(-0.1 * (self.time_counter + self.time_delay))\n', (3541, 3587), True, 'import numpy as np\n'), ((1280, 1301), 'numpy.asarray', 'np.asarray', (['new_chunk'], {}), '(new_chunk)\n', (1290, 1301), True, 'import numpy as np\n'), ((3423, 3455), 'numpy.exp', 'np.exp', (['(-0.1 * self.time_counter)'], {}), '(-0.1 * self.time_counter)\n', (3429, 3455), True, 'import numpy as np\n'), ((3456, 3489), 'numpy.random.randn', 'np.random.randn', (['self.output_size'], {}), '(self.output_size)\n', (3471, 3489), True, 'import numpy as np\n')] |
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import numpy
#image = "ct34"
image_ext = ".jpg"
image_list = ["ct1","ct3","ct4","ct5","ct6","ct7","ct8","ct9","ct10","ct11","ct12","ct13",
"ct14","ct15","ct16","ct17","ct18","ct19","ct20","ct21","ct22","ct23","ct24","ct25","ct26",
"ct27","ct28","ct29","ct30","ct31","ct32","ct33","ct34", "ct35"]
methods = ["BT601", "BT709"]
method = "BT601"
for current_image in image_list:
# img = mpimg.imread(image + image_ext) #Get the image
img = mpimg.imread(current_image + image_ext)
for method in methods:
R, G, B = img[:,:,0], img[:,:,1], img[:,:,2] #Split RGB channels
if method == "BT601":
imgGray = 0.2989 * R + 0.5870 * G + 0.1140 * B #Convert all channels to grayscale.
elif method == "BT709":
imgGray = 0.2126 * R + 0.7152 * G + 0.0722 * B
elif method == "Decomposition_MAX":
imgGray = numpy.copy(img)
for ix in range(len(img)):
for iy in range(len(img[ix])):
val = max(img[ix, iy, 0], img[ix, iy, 1], img[ix, iy, 2]) #Determine max value of channels.
imgGray[ix, iy, 0] = val #Set all channels to the same value.
imgGray[ix, iy, 1] = val
imgGray[ix, iy, 2] = val
elif method == "Decomposition_MIN":
imgGray = numpy.copy(img)
for ix in range(len(img)):
for iy in range(len(img[ix])):
val = min(img[ix, iy, 0], img[ix, iy, 1], img[ix, iy, 2]) #Determine min value of channels.
imgGray[ix, iy, 0] = val #Set all channels to the same value.
imgGray[ix, iy, 1] = val
imgGray[ix, iy, 2] = val
plt.title(current_image + "_" + method + image_ext)
fig = plt.gcf()
fig.canvas.set_window_title(current_image + "_" + method + image_ext)
mpimg.imsave(current_image + "_" + method + image_ext, imgGray, cmap='gray')
# plt.imshow(imgGray, cmap='gray') #Show the image.
# plt.show()
| [
"numpy.copy",
"matplotlib.pyplot.gcf",
"matplotlib.image.imread",
"matplotlib.image.imsave",
"matplotlib.pyplot.title"
] | [((516, 555), 'matplotlib.image.imread', 'mpimg.imread', (['(current_image + image_ext)'], {}), '(current_image + image_ext)\n', (528, 555), True, 'import matplotlib.image as mpimg\n'), ((1555, 1606), 'matplotlib.pyplot.title', 'plt.title', (["(current_image + '_' + method + image_ext)"], {}), "(current_image + '_' + method + image_ext)\n", (1564, 1606), True, 'from matplotlib import pyplot as plt\n'), ((1615, 1624), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1622, 1624), True, 'from matplotlib import pyplot as plt\n'), ((1699, 1775), 'matplotlib.image.imsave', 'mpimg.imsave', (["(current_image + '_' + method + image_ext)", 'imgGray'], {'cmap': '"""gray"""'}), "(current_image + '_' + method + image_ext, imgGray, cmap='gray')\n", (1711, 1775), True, 'import matplotlib.image as mpimg\n'), ((887, 902), 'numpy.copy', 'numpy.copy', (['img'], {}), '(img)\n', (897, 902), False, 'import numpy\n'), ((1247, 1262), 'numpy.copy', 'numpy.copy', (['img'], {}), '(img)\n', (1257, 1262), False, 'import numpy\n')] |
from typing import Union
class TensorAdapter:
def zeros(self, size: int, dtype: str):
raise NotImplemented()
def argmax(self, arr):
raise NotImplemented()
def get(self, tensor, pos):
raise NotImplemented()
try:
import numpy as np
class NumpyAdapter(TensorAdapter):
def zeros(self, size: int, dtype: Union[str, 'np.dtype']):
return np.zeros(size, dtype=dtype)
def argmax(self, arr):
return np.argmax(arr)
def get(self, arr, pos):
return arr[pos]
except ImportError:
class NumpyAdapter(TensorAdapter):
def zero(self, _size: int, _dtype: str):
raise RuntimeError("numpy library is not installed")
def argmax(self, _arr):
raise RuntimeError("numpy library is not installed")
def get(self, _arr, _pos):
raise RuntimeError("numpy library is not installed")
_numpy_adapter = NumpyAdapter()
try:
import torch
class PyTorchAdapter(TensorAdapter):
def zeros(self, size: int, dtype: Union[str, 'torch.dtype']):
if isinstance(dtype, str):
dtype = torch.__getattribute__(dtype)
return torch.zeros(size, dtype=dtype)
def argmax(self, arr):
return torch.argmax(arr)
def get(self, arr, pos):
return arr[pos].item()
except ImportError:
class PyTorchAdapter(TensorAdapter):
def zero(self, _size: int, _dtype: str):
raise RuntimeError("torch library is not installed")
def argmax(self, _arr):
raise RuntimeError("torch library is not installed")
def get(self, _arr, _pos):
raise RuntimeError("torch library is not installed")
_pytorch_adapter = PyTorchAdapter()
| [
"numpy.argmax",
"torch.__getattribute__",
"numpy.zeros",
"torch.zeros",
"torch.argmax"
] | [((403, 430), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'dtype'}), '(size, dtype=dtype)\n', (411, 430), True, 'import numpy as np\n'), ((482, 496), 'numpy.argmax', 'np.argmax', (['arr'], {}), '(arr)\n', (491, 496), True, 'import numpy as np\n'), ((1226, 1256), 'torch.zeros', 'torch.zeros', (['size'], {'dtype': 'dtype'}), '(size, dtype=dtype)\n', (1237, 1256), False, 'import torch\n'), ((1308, 1325), 'torch.argmax', 'torch.argmax', (['arr'], {}), '(arr)\n', (1320, 1325), False, 'import torch\n'), ((1177, 1206), 'torch.__getattribute__', 'torch.__getattribute__', (['dtype'], {}), '(dtype)\n', (1199, 1206), False, 'import torch\n')] |
#libraries are imported
import numpy as np
import random
# Chess table is created
a=['|',' ','|',' ','|',' ','|',' ','|',' ','|',' ','|',' ','|',' ','|']
chart= np.array([a,a,a,a,a,a,a,a],dtype=object)
chart2= chart.copy()
list1=[]
# possible columns are listed and first random selection is performed
columns_list=[1,3,5,7,9,11,13,15]
n_column=random.choice(columns_list)
chart[0][n_column]='Q'
list1.append(n_column)
columns_list.remove(n_column)# used column is removed from column list
i=1
while i<8:
y,p=0,0
while True:
y=0
# column is randomly chosen from column list
n_column=random.choice(columns_list)
#Diagonals of randomly chosen column is checked if they match with other queens
for j in list1:
w=i-list1.index(j)
if n_column!=j-(2*w) and n_column!=j+(2*w):
y=y
else:
y=y+1
p=p+1
# if unsolvable situation is occured ,queens are take back and leaved from for loop with break
if p>4:
mask=chart=='Q'
chart[mask]=' '
comp1=chart==chart2
if comp1.all():
i=1
break
# leaved from while loop with break
if comp1.all():
break
# if conditions are proper. Q is placed. Placed colums is removed
if y==0 :
chart[i][n_column]='Q'
list1.append(n_column)
columns_list.remove(n_column)
i=i+1
break
# selection is start again here, with continue function go back to start of while loop.
if comp1.all():
mask=chart=='Q'
chart[mask]=' '
list1=[]
columns_list=[1,3,5,7,9,11,13,15]
n_column=random.choice(columns_list)
chart[0][n_column]='Q'
list1.append(n_column)
columns_list.remove(n_column)
i=1
continue
#cheess table is printed.
for i in range(8):
print(''.join(chart[i]))
| [
"numpy.array",
"random.choice"
] | [((162, 210), 'numpy.array', 'np.array', (['[a, a, a, a, a, a, a, a]'], {'dtype': 'object'}), '([a, a, a, a, a, a, a, a], dtype=object)\n', (170, 210), True, 'import numpy as np\n'), ((346, 373), 'random.choice', 'random.choice', (['columns_list'], {}), '(columns_list)\n', (359, 373), False, 'import random\n'), ((617, 644), 'random.choice', 'random.choice', (['columns_list'], {}), '(columns_list)\n', (630, 644), False, 'import random\n'), ((1784, 1811), 'random.choice', 'random.choice', (['columns_list'], {}), '(columns_list)\n', (1797, 1811), False, 'import random\n')] |
#!/usr/bin/env python3
import os
import sys
sys.path += [os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src')]
sys.path += [os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))]
import fire
import json
import numpy as np
import tensorflow as tf
import model, sample, encoder
import torch
import tflex
import argparse
parser=argparse.ArgumentParser(
description="Generate Text from GPT-2 from prompt",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--model_name',metavar='MODEL',type=str,default='117M',help='Pretrained model name')
parser.add_argument('--model_dir',type=str,default="models",help='Directory to use for model. Should have a subdirectory of model_name')
parser.add_argument('--restore_from', type=str, default='latest', help='Either "latest", "fresh", or a path to a checkpoint file')
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--stop_token", type=str, default=None, help="Token at which text generation is stopped")
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling",
)
parser.add_argument(
"--repetition_penalty", type=float, default=1.0, help="Float value controlling 'used' penalty. Implements repetition \
reduction (similar to CTRL) if set to a value > 0."
)
parser.add_argument("--k", type=int, default=40, help='Integer value controlling diversity. 1 means only 1 word is considered for each step (token), resulting in deterministic completions, \
while 40 means 40 words are considered at each step. 0 (default) is a special setting meaning no restrictions. (default=40).')
parser.add_argument("--p", type=float, default=0.9,help='Float value controlling diversity. Implements nucleus sampling, \
overriding top_k if set to a value > 0. (default=0.9)')
parser.add_argument("--padding_text", type=str, default="", help="Padding text for Transfo-XL and XLNet.")
parser.add_argument("--xlm_language", type=str, default="", help="Optional language when used with the XLM model.")
parser.add_argument("--max_length",type=int,default=-1,help='Maximum length before context window is cleared (-1 default)')
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--num_return_sequences", type=int, default=1, help="The number of samples to generate.")
parser.add_argument("--length",type=int,default=64,help='How many tokens use to deterime next word (max 1024)')
parser.add_argument("--nsamples",type=int,default=1,help="Number of samples to generate")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
@tflex.register_command
def clear_context():
tflex.reset_context()
print('')
print('')
print('')
def clear_output(wait=False):
import subprocess, platform
if platform.system()=="Windows":
subprocess.Popen("cls", shell=True).communicate()
else:
print("\033c", end="")
def is_ascii(s):
return all(ord(c) < 128 for c in s)
def interact_model(
model_name=args.model_name,
chkpoint_dir=args.model_dir,
restore_from=args.restore_from,
seed=args.seed,
nsamples=args.nsamples,
step=1,
length=args.length,
prompt=args.prompt,
clear=None,
maxlen=args.max_length,
temperature=args.temperature,
top_k=args.k,
top_p=args.p,
penalize=args.repetition_penalty
):
"""
Interactively run the model
:model_name=117M : String, which model to use
:chkpoint_dir: checkpoint directory
:restore_from=None: String for which checkpoint to use, must be in the models directory
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:step=1 : Number of tokens to generate at a time
:length=64 : Window size; use 1024 for maximum size per sample
:prompt="\\n" : Prompt to start with. The default of "" prompts with an <|endoftext|> token.
:clear=None : If this string is encountered, clear the context window.
:maxlen=-1 : if this many tokens are generated without
encountering --clear, then print it and clear the context window.
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
:penalize=0.0 : Float value controlling "used" penalty. Implements repetition
reduction (similar to CTRL) if set to a value > 0. A decent setting might be 0.85
with temperature 0.3 and top_k 40.
"""
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name)
hparams = model.default_hparams()
with open(os.path.join(chkpoint_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length > hparams.n_ctx:
raise ValueError("Length can't be largeer than n_ctx: %s" % hparams.n_ctx)
if step > length:
raise ValueError("Can't get samples longer than length: %s" % length)
with tflex.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=step,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p, penalize=penalize
)
saver = tflex.Saver(reshape=True)
if restore_from is None:
restore_from = os.path.join(chkpoint_dir, model_name)
ckpt = tflex.latest_checkpoint(restore_from)
saver.restore(sess, ckpt)
while True:
tflex.check_commands()
if tflex.should_quit():
break
try:
with open(prompt) as f:
tflex.raw_text = f.read()
if tflex.raw_text.endswith('\n'):
tflex.raw_text = tflex.raw_text[:-1]
if tflex.raw_text.endswith('\r'):
tflex.raw_text = tflex.raw_text[:-1]
except:
tflex.raw_text = prompt
tflex.raw_text = tflex.raw_text.replace('\\n', '\n')
tflex.raw_text = tflex.raw_text.replace('\\t', '\t')
#print(repr(tflex.raw_text))
tflex.context_tokens = enc.encode(tflex.raw_text) if len(tflex.raw_text) > 0 else [50256]
while len(tflex.context_tokens) > length - step - 1:
tflex.context_tokens = tflex.context_tokens[1:]
tflex.prompt_tokens = tflex.context_tokens[:]
tflex.first = True
tflex.backlog = []
tflex.backlog_count = 0
tflex.context_text = ""
tflex.context_count = 0
while True:
for tokens in generate_result(context_tokens=tflex.context_tokens, enc=enc, output=output, context=context, nsamples=1, batch_size=batch_size, sess=sess):
tflex.tokens = tokens
if tflex.first:
#clear_output(wait=True)
sys.stdout.write(enc.decode(tflex.context_tokens))
sys.stdout.flush()
tflex.first = False
tflex.backlog.extend(tflex.tokens)
tflex.backlog_count += 1
if is_ascii(enc.decode([tflex.backlog[-1]])) or tflex.backlog_count > 16:
text = enc.decode(tflex.backlog)
result = text
if clear is not None:
result, *rest = text.split(clear)
sys.stdout.write(result)
sys.stdout.flush()
tflex.context_text += text
tflex.context_count += len(tflex.backlog)
def reset_context():
tflex.context_text = ""
tflex.context_count = 0
tflex.context_tokens = []
tflex.first = True
tflex.tokens = tflex.prompt_tokens[:]
tflex.reset_context = reset_context
if maxlen > 0 and tflex.context_count > maxlen or clear is not None and clear in tflex.context_text:
tflex.reset_context()
tflex.backlog = []
tflex.backlog_count = 0
tflex.check_commands()
tflex.context_tokens.extend(tflex.tokens)
while len(tflex.context_tokens) > length - step - 1:
tflex.context_tokens = tflex.context_tokens[1:]
def generate_result(context_tokens, enc, output, context, nsamples=1, batch_size=1, sess=tf.get_default_session()):
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
for i in range(batch_size):
yield out[i]
if __name__ == '__main__':
fire.Fire(interact_model)
| [
"fire.Fire",
"tensorflow.get_default_session",
"torch.cuda.device_count",
"torch.cuda.is_available",
"tflex.raw_text.endswith",
"tensorflow.set_random_seed",
"tensorflow.Graph",
"argparse.ArgumentParser",
"subprocess.Popen",
"tensorflow.placeholder",
"tflex.should_quit",
"platform.system",
"... | [((377, 512), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate Text from GPT-2 from prompt"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Generate Text from GPT-2 from prompt',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (400, 512), False, 'import argparse\n'), ((2930, 2955), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2953, 2955), False, 'import torch\n'), ((3004, 3025), 'tflex.reset_context', 'tflex.reset_context', ([], {}), '()\n', (3023, 3025), False, 'import tflex\n'), ((5518, 5549), 'encoder.get_encoder', 'encoder.get_encoder', (['model_name'], {}), '(model_name)\n', (5537, 5549), False, 'import model, sample, encoder\n'), ((5564, 5587), 'model.default_hparams', 'model.default_hparams', ([], {}), '()\n', (5585, 5587), False, 'import model, sample, encoder\n'), ((9430, 9454), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (9452, 9454), True, 'import tensorflow as tf\n'), ((9738, 9763), 'fire.Fire', 'fire.Fire', (['interact_model'], {}), '(interact_model)\n', (9747, 9763), False, 'import fire\n'), ((3128, 3145), 'platform.system', 'platform.system', ([], {}), '()\n', (3143, 3145), False, 'import subprocess, platform\n'), ((5997, 6041), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, None]'], {}), '(tf.int32, [batch_size, None])\n', (6011, 6041), True, 'import tensorflow as tf\n'), ((6050, 6070), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6064, 6070), True, 'import numpy as np\n'), ((6079, 6103), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (6097, 6103), True, 'import tensorflow as tf\n'), ((6121, 6292), 'sample.sample_sequence', 'sample.sample_sequence', ([], {'hparams': 'hparams', 'length': 'step', 'context': 'context', 'batch_size': 'batch_size', 'temperature': 'temperature', 'top_k': 'top_k', 'top_p': 'top_p', 'penalize': 'penalize'}), '(hparams=hparams, length=step, context=context,\n batch_size=batch_size, temperature=temperature, top_k=top_k, top_p=\n top_p, penalize=penalize)\n', (6143, 6292), False, 'import model, sample, encoder\n'), ((6359, 6384), 'tflex.Saver', 'tflex.Saver', ([], {'reshape': '(True)'}), '(reshape=True)\n', (6370, 6384), False, 'import tflex\n'), ((6497, 6534), 'tflex.latest_checkpoint', 'tflex.latest_checkpoint', (['restore_from'], {}), '(restore_from)\n', (6520, 6534), False, 'import tflex\n'), ((2835, 2860), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2858, 2860), False, 'import torch\n'), ((5602, 5656), 'os.path.join', 'os.path.join', (['chkpoint_dir', 'model_name', '"""hparams.json"""'], {}), "(chkpoint_dir, model_name, 'hparams.json')\n", (5614, 5656), False, 'import os\n'), ((5699, 5711), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5708, 5711), False, 'import json\n'), ((6443, 6481), 'os.path.join', 'os.path.join', (['chkpoint_dir', 'model_name'], {}), '(chkpoint_dir, model_name)\n', (6455, 6481), False, 'import os\n'), ((6600, 6622), 'tflex.check_commands', 'tflex.check_commands', ([], {}), '()\n', (6620, 6622), False, 'import tflex\n'), ((6636, 6655), 'tflex.should_quit', 'tflex.should_quit', ([], {}), '()\n', (6653, 6655), False, 'import tflex\n'), ((7041, 7076), 'tflex.raw_text.replace', 'tflex.raw_text.replace', (['"""\\\\n"""', '"""\n"""'], {}), "('\\\\n', '\\n')\n", (7063, 7076), False, 'import tflex\n'), ((7104, 7139), 'tflex.raw_text.replace', 'tflex.raw_text.replace', (['"""\\\\t"""', '"""\t"""'], {}), "('\\\\t', '\\t')\n", (7126, 7139), False, 'import tflex\n'), ((103, 128), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (118, 128), False, 'import os\n'), ((198, 223), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (213, 223), False, 'import os\n'), ((3164, 3199), 'subprocess.Popen', 'subprocess.Popen', (['"""cls"""'], {'shell': '(True)'}), "('cls', shell=True)\n", (3180, 3199), False, 'import subprocess, platform\n'), ((5958, 5968), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5966, 5968), True, 'import tensorflow as tf\n'), ((6781, 6810), 'tflex.raw_text.endswith', 'tflex.raw_text.endswith', (['"""\n"""'], {}), "('\\n')\n", (6804, 6810), False, 'import tflex\n'), ((6878, 6907), 'tflex.raw_text.endswith', 'tflex.raw_text.endswith', (["'\\r'"], {}), "('\\r')\n", (6901, 6907), False, 'import tflex\n'), ((8066, 8100), 'tflex.backlog.extend', 'tflex.backlog.extend', (['tflex.tokens'], {}), '(tflex.tokens)\n', (8086, 8100), False, 'import tflex\n'), ((9130, 9152), 'tflex.check_commands', 'tflex.check_commands', ([], {}), '()\n', (9150, 9152), False, 'import tflex\n'), ((9167, 9208), 'tflex.context_tokens.extend', 'tflex.context_tokens.extend', (['tflex.tokens'], {}), '(tflex.tokens)\n', (9194, 9208), False, 'import tflex\n'), ((7997, 8015), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8013, 8015), False, 'import sys\n'), ((8413, 8437), 'sys.stdout.write', 'sys.stdout.write', (['result'], {}), '(result)\n', (8429, 8437), False, 'import sys\n'), ((8454, 8472), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8470, 8472), False, 'import sys\n'), ((9019, 9040), 'tflex.reset_context', 'tflex.reset_context', ([], {}), '()\n', (9038, 9040), False, 'import tflex\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat May 9 15:28:22 2020
@author: Ciaran
"""
def player_displacement_value_tab(events_df, metrica_attack, metrica_defence, bokeh_attack, bokeh_defence, shirt_mapping, match_list):
import metrica_to_bokeh as mtb
import Metrica_PitchControl as mpc
import pitch_value_model as pvm
from bokeh.models import ColumnDataSource, Select, TextInput, Panel,Div, Button, DataTable, TableColumn, Paragraph
from bokeh.events import ButtonClick
from bokeh.layouts import row, column, WidgetBox
import numpy as np
from scipy import interpolate
def make_dataset(play, event_frame, metrica_attack, metrica_defence, bokeh_attack, bokeh_defence, field_dimen = (106.,68.,), new_grid_size = 500):
params = mpc.default_model_params(3)
event = events_df.loc[[(play, int(event_frame))]]
tracking_frame = event['Start Frame'][0]
att_frame = bokeh_attack.loc[(play,tracking_frame)]
att_player_frame = att_frame[att_frame['player'] != "ball"]
att_player_frame['Shirt Number'] = att_player_frame['player'].map(int).map(shirt_mapping[play]).fillna("")
def_frame = bokeh_defence.loc[(play,tracking_frame)]
def_player_frame = def_frame[def_frame['player'] != "ball"]
def_player_frame['Shirt Number'] = def_player_frame['player'].map(int).map(shirt_mapping[play]).fillna("")
ball_frame = att_frame[att_frame['player'] == "ball"]
PPCF,xgrid,ygrid = pvm.lastrow_generate_pitch_control_for_event(play,event_frame, events_df, metrica_attack, metrica_defence, params, field_dimen = (106.,68.,), n_grid_cells_x = 50)
PT = pvm.generate_relevance_at_event(play,event_frame, events_df, PPCF, params)
PS = pvm.generate_scoring_opportunity(field_dimen = (106.,68.,),n_grid_cells_x = 50)
PPV = pvm.generate_pitch_value(PPCF,PT,PS,field_dimen = (106.,68.,),n_grid_cells_x = 50)
RPPV = pvm.generate_relative_pitch_value(play, event_frame, events_df, metrica_attack, PPV, xgrid, ygrid)
xgrid_new = np.linspace( -field_dimen[0]/2., field_dimen[0]/2., new_grid_size)
ygrid_new = np.linspace( -field_dimen[1]/2., field_dimen[1]/2., new_grid_size)
PPCF_int = interpolate.interp2d(xgrid, ygrid, PPCF, kind = 'cubic')
PPCF_new = PPCF_int(xgrid_new, ygrid_new)
PPCF_dict = dict(image = [PPCF_new],x = [xgrid.min()],y = [ygrid.min()],dw = [field_dimen[0]], dh = [field_dimen[1]])
PT_int = interpolate.interp2d(xgrid, ygrid, PT, kind = 'cubic')
PT_new = PT_int(xgrid_new, ygrid_new)
PT_dict = dict(image = [PT_new],x = [xgrid.min()],y = [ygrid.min()],dw = [field_dimen[0]], dh = [field_dimen[1]])
PS_int = interpolate.interp2d(xgrid, ygrid, PS, kind = 'cubic')
PS_new = PS_int(xgrid_new, ygrid_new)
PS_dict = dict(image = [PS_new],x = [xgrid.min()],y = [ygrid.min()],dw = [field_dimen[0]], dh = [field_dimen[1]])
PPV_int = interpolate.interp2d(xgrid, ygrid, PPV, kind = 'cubic')
PPV_new = PPV_int(xgrid_new, ygrid_new)
PPV_dict = dict(image = [PPV_new],x = [xgrid.min()],y = [ygrid.min()],dw = [field_dimen[0]], dh = [field_dimen[1]])
RPPV_int = interpolate.interp2d(xgrid, ygrid, RPPV, kind = 'cubic')
RPPV_new = RPPV_int(xgrid_new, ygrid_new)
RPPV_dict = dict(image = [RPPV_new],x = [xgrid.min()],y = [ygrid.min()],dw = [field_dimen[0]], dh = [field_dimen[1]])
event_src = ColumnDataSource(event)
att_src = ColumnDataSource(att_player_frame)
def_src = ColumnDataSource(def_player_frame)
ball_src = ColumnDataSource(ball_frame)
PPCF_src = ColumnDataSource(PPCF_dict)
PT_src = ColumnDataSource(PT_dict)
PS_src = ColumnDataSource(PS_dict)
PPV_src = ColumnDataSource(PPV_dict)
RPPV_src = ColumnDataSource(RPPV_dict)
return event_src, att_src, def_src, ball_src, PPCF_src, PT_src, PS_src, PPV_src, RPPV_src, xgrid, ygrid
def make_plot(event_src, att_src, def_src, ball_src, surface_src, bokeh_attack, bokeh_defence, xgrid, ygrid,field_dimen = (106.,68.,), new_grid_size = 500, point_click = False):
surface = mtb.plot_bokeh_surface_at_event(event_src, att_src, def_src, ball_src, surface_src, bokeh_attack, bokeh_defence, xgrid, ygrid, point_click = True)
return surface
def update(attr, old, new):
match_selection = match_select.value
event_selection = event_select.value
new_og_event_src, new_og_att_src, new_og_def_src, new_og_ball_src, new_og_PPCF_src, new_og_PT_src, new_og_PS_src, new_og_PPV_src, new_og_RPPV_src, xgrid, ygrid = make_dataset(match_selection, event_selection, metrica_attack, metrica_defence, bokeh_attack, bokeh_defence)
og_att_src.data.update(new_og_att_src.data)
og_def_src.data.update(new_og_def_src.data)
og_ball_src.data.update(new_og_ball_src.data)
og_PPCF_src.data.update(new_og_PPCF_src.data)
og_PT_src.data.update(new_og_PT_src.data)
og_PT_src.data.update(new_og_PS_src.data)
og_PPV_src.data.update(new_og_PPV_src.data)
og_RPPV_src.data.update(new_og_RPPV_src.data)
new_event_src, new_att_src, new_def_src, new_ball_src, new_PPCF_src, new_PT_src, new_PS_src, new_PPV_src, new_RPPV_src, xgrid, ygrid = make_dataset(match_selection, event_selection, metrica_attack, metrica_defence, bokeh_attack, bokeh_defence)
event_src.data.update(new_event_src.data)
att_src.data.update(new_att_src.data)
def_src.data.update(new_def_src.data)
ball_src.data.update(new_ball_src.data)
PPCF_src.data.update(new_PPCF_src.data)
PT_src.data.update(new_PT_src.data)
PT_src.data.update(new_PS_src.data)
PPV_src.data.update(new_PPV_src.data)
RPPV_src.data.update(new_RPPV_src.data)
match_select = Select(title="Select Match:", value=match_list[0], options=match_list)
match_select.on_change('value', update)
event_select = TextInput(title="Event:",value="0")
event_select.on_change('value', update)
player_select = TextInput(title="Index of Player Moved:",value="")
team_select = Select(title="Team of Player Moved:", value="attack", options=["attack", "defence"])
def recalculate(event):
match_selection = match_select.value
event_selection = event_select.value
player_selection = int(player_select.value)
team_selection = team_select.value
if team_selection == 'attack':
player = att_src.data['player'][player_selection]
shirt = att_src.data['Shirt Number'][player_selection]
# attack
selected_att_x = att_src.data['x'][player_selection]
selected_att_y = att_src.data['y'][player_selection]
og_selected_att_x = og_att_src.data['x'][player_selection]
og_selected_att_y = og_att_src.data['y'][player_selection]
x_displacement = selected_att_x - og_selected_att_x
y_displacement = selected_att_y - og_selected_att_y
metrica_attack_new = mtb.player_displacement(match_selection, event_selection, events_df, metrica_attack, metrica_defence,'attack', player, x_displacement, y_displacement)
bokeh_attack_new = mtb.tracking_to_bokeh_format(metrica_attack_new)
new_event_src, new_att_src, new_def_src, new_ball_src, new_PPCF_src, new_PT_src, new_PS_src, new_PPV_src, new_RPPV_src, xgrid, ygrid = make_dataset(match_selection, event_selection, metrica_attack_new, metrica_defence, bokeh_attack_new, bokeh_defence)
event_src.data.update(new_event_src.data)
att_src.data.update(new_att_src.data)
def_src.data.update(new_def_src.data)
ball_src.data.update(new_ball_src.data)
PPCF_src.data.update(new_PPCF_src.data)
PT_src.data.update(new_PT_src.data)
PT_src.data.update(new_PS_src.data)
PPV_src.data.update(new_PPV_src.data)
RPPV_src.data.update(new_RPPV_src.data)
pitch_value = make_plot(event_src, att_src, def_src, ball_src, PPV_src, bokeh_attack, bokeh_defence, xgrid, ygrid, field_dimen = (106.,68.,), new_grid_size = 500)
relative_pitch_value = make_plot(event_src, att_src, def_src, ball_src, RPPV_src, bokeh_attack, bokeh_defence, xgrid, ygrid, field_dimen = (106.,68.,), new_grid_size = 500)
elif team_selection == 'defence':
player = def_src.data['player'][player_selection]
shirt = def_src.data['Shirt Number'][player_selection]
# defence
selected_def_x = def_src.data['x'][player_selection]
selected_def_y = def_src.data['y'][player_selection]
og_selected_def_x = og_def_src.data['x'][player_selection]
og_selected_def_y = og_def_src.data['y'][player_selection]
x_displacement = selected_def_x - og_selected_def_x
y_displacement = selected_def_y - og_selected_def_y
metrica_defence_new = mtb.player_displacement(match_selection, event_selection, events_df, metrica_attack, metrica_defence,'defence', player, x_displacement, y_displacement)
bokeh_defence_new = mtb.tracking_to_bokeh_format(metrica_defence_new)
new_event_src, new_att_src, new_def_src, new_ball_src, new_PPCF_src, new_PT_src, new_PS_src, new_PPV_src, new_RPPV_src, xgrid, ygrid = make_dataset(match_selection, event_selection, metrica_attack, metrica_defence_new, bokeh_attack, bokeh_defence_new)
event_src.data.update(new_event_src.data)
att_src.data.update(new_att_src.data)
def_src.data.update(new_def_src.data)
ball_src.data.update(new_ball_src.data)
PPCF_src.data.update(new_PPCF_src.data)
PT_src.data.update(new_PT_src.data)
PT_src.data.update(new_PS_src.data)
PPV_src.data.update(new_PPV_src.data)
RPPV_src.data.update(new_RPPV_src.data)
pitch_value = make_plot(event_src, att_src, def_src, ball_src, PPV_src, bokeh_attack, bokeh_defence, xgrid, ygrid, field_dimen = (106.,68.,), new_grid_size = 500)
relative_pitch_value = make_plot(event_src, att_src, def_src, ball_src, RPPV_src, bokeh_attack, bokeh_defence, xgrid, ygrid, field_dimen = (106.,68.,), new_grid_size = 500)
recalculate_button = Button(label="Re-Calculate Pitch Value")
recalculate_button.on_event(ButtonClick, recalculate)
# Initial match to plot
play = 'Liverpool [3] - 0 Bournemouth'
event_frame = 0
og_event_src, og_att_src, og_def_src, og_ball_src, og_PPCF_src, og_PT_src, og_PS_src, og_PPV_src, og_RPPV_src, xgrid, ygrid = make_dataset(play, event_frame, metrica_attack, metrica_defence, bokeh_attack, bokeh_defence)
event_src, att_src, def_src, ball_src, PPCF_src, PT_src, PS_src, PPV_src, RPPV_src, xgrid, ygrid = make_dataset(play, event_frame, metrica_attack, metrica_defence, bokeh_attack, bokeh_defence)
pitch_value = make_plot(event_src, att_src, def_src, ball_src, PPV_src, bokeh_attack, bokeh_defence, xgrid, ygrid, field_dimen = (106.,68.,), new_grid_size = 500)
pitch_value.title.text = "Pitch Value"
pitch_value.title.text_font_size = "20px"
relative_pitch_value = make_plot(event_src, att_src, def_src, ball_src, RPPV_src, bokeh_attack, bokeh_defence, xgrid, ygrid, field_dimen = (106.,68.,), new_grid_size = 500)
relative_pitch_value.title.text = "Relative Pitch Value"
relative_pitch_value.title.text_font_size = "20px"
# Create data tables for viewing movements
columns = [
TableColumn(field="Shirt Number", title = "Shirt Number"),
TableColumn(field="player", title = "player"),
TableColumn(field="x", title = "x"),
TableColumn(field="y", title ="y"),
]
att_table = DataTable(source=att_src, columns=columns, width=400, height=280, editable = True)
att_title = Div(text="Red Team")
def_table = DataTable(source=def_src, columns=columns, width=400, height=280, editable = True)
def_title = Div(text="Blue Team")
# Paragraph for instructions and disclaimers
disclaimer = Paragraph(text = "*** Disclaimer - You can click the PointDrawTool in Pitch Value for each team to move a single player at a time. If you wish to re-calculate the Pitch Value based on the new location, you must add in a new player (click anywhere on the pitch with the PointDrawTool) and then remove them again (click on them to highlight only them, then press backspace). This is necessary to do for each team everytime you want to re-calculate Pitch Values. Any advice on correcting this would be appreciated! ***")
instructions = Paragraph(text = "Select a match from the dropdown list below. Then enter an event number. If a player is moved, enter their Index to the respective table and select their team (Red = attack, Blue = defence). Pitch Value is the same as in 'Events - Pitch Value'. Relative Pitch Value highlights areas that will increase Pitch Value compared to the starting position. Again can turn off some HoverTools if necessary.")
#notes = column(disclaimer, instructions)
# Layout setup
control = WidgetBox(column(match_select, event_select, player_select, team_select, recalculate_button))
plots = column(pitch_value, relative_pitch_value)
tables = column(column(att_title, att_table), column(def_title, def_table))
layout = column(disclaimer, instructions, row(plots, column(control, tables)))
tab3 = Panel(child = layout, title = 'Player Displacement - Pitch Value')
return tab3 | [
"bokeh.layouts.column",
"pitch_value_model.lastrow_generate_pitch_control_for_event",
"scipy.interpolate.interp2d",
"bokeh.models.Div",
"pitch_value_model.generate_relative_pitch_value",
"metrica_to_bokeh.plot_bokeh_surface_at_event",
"bokeh.models.Paragraph",
"bokeh.models.TableColumn",
"numpy.lins... | [((6028, 6098), 'bokeh.models.Select', 'Select', ([], {'title': '"""Select Match:"""', 'value': 'match_list[0]', 'options': 'match_list'}), "(title='Select Match:', value=match_list[0], options=match_list)\n", (6034, 6098), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((6166, 6202), 'bokeh.models.TextInput', 'TextInput', ([], {'title': '"""Event:"""', 'value': '"""0"""'}), "(title='Event:', value='0')\n", (6175, 6202), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((6278, 6329), 'bokeh.models.TextInput', 'TextInput', ([], {'title': '"""Index of Player Moved:"""', 'value': '""""""'}), "(title='Index of Player Moved:', value='')\n", (6287, 6329), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((6354, 6442), 'bokeh.models.Select', 'Select', ([], {'title': '"""Team of Player Moved:"""', 'value': '"""attack"""', 'options': "['attack', 'defence']"}), "(title='Team of Player Moved:', value='attack', options=['attack',\n 'defence'])\n", (6360, 6442), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((10776, 10816), 'bokeh.models.Button', 'Button', ([], {'label': '"""Re-Calculate Pitch Value"""'}), "(label='Re-Calculate Pitch Value')\n", (10782, 10816), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((12278, 12363), 'bokeh.models.DataTable', 'DataTable', ([], {'source': 'att_src', 'columns': 'columns', 'width': '(400)', 'height': '(280)', 'editable': '(True)'}), '(source=att_src, columns=columns, width=400, height=280, editable=True\n )\n', (12287, 12363), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((12378, 12398), 'bokeh.models.Div', 'Div', ([], {'text': '"""Red Team"""'}), "(text='Red Team')\n", (12381, 12398), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((12416, 12501), 'bokeh.models.DataTable', 'DataTable', ([], {'source': 'def_src', 'columns': 'columns', 'width': '(400)', 'height': '(280)', 'editable': '(True)'}), '(source=def_src, columns=columns, width=400, height=280, editable=True\n )\n', (12425, 12501), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((12516, 12537), 'bokeh.models.Div', 'Div', ([], {'text': '"""Blue Team"""'}), "(text='Blue Team')\n", (12519, 12537), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((12612, 13137), 'bokeh.models.Paragraph', 'Paragraph', ([], {'text': '"""*** Disclaimer - You can click the PointDrawTool in Pitch Value for each team to move a single player at a time. If you wish to re-calculate the Pitch Value based on the new location, you must add in a new player (click anywhere on the pitch with the PointDrawTool) and then remove them again (click on them to highlight only them, then press backspace). This is necessary to do for each team everytime you want to re-calculate Pitch Values. Any advice on correcting this would be appreciated! ***"""'}), "(text=\n '*** Disclaimer - You can click the PointDrawTool in Pitch Value for each team to move a single player at a time. If you wish to re-calculate the Pitch Value based on the new location, you must add in a new player (click anywhere on the pitch with the PointDrawTool) and then remove them again (click on them to highlight only them, then press backspace). This is necessary to do for each team everytime you want to re-calculate Pitch Values. Any advice on correcting this would be appreciated! ***'\n )\n", (12621, 13137), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((13150, 13575), 'bokeh.models.Paragraph', 'Paragraph', ([], {'text': '"""Select a match from the dropdown list below. Then enter an event number. If a player is moved, enter their Index to the respective table and select their team (Red = attack, Blue = defence). Pitch Value is the same as in \'Events - Pitch Value\'. Relative Pitch Value highlights areas that will increase Pitch Value compared to the starting position. Again can turn off some HoverTools if necessary."""'}), '(text=\n "Select a match from the dropdown list below. Then enter an event number. If a player is moved, enter their Index to the respective table and select their team (Red = attack, Blue = defence). Pitch Value is the same as in \'Events - Pitch Value\'. Relative Pitch Value highlights areas that will increase Pitch Value compared to the starting position. Again can turn off some HoverTools if necessary."\n )\n', (13159, 13575), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((13786, 13827), 'bokeh.layouts.column', 'column', (['pitch_value', 'relative_pitch_value'], {}), '(pitch_value, relative_pitch_value)\n', (13792, 13827), False, 'from bokeh.layouts import row, column, WidgetBox\n'), ((14007, 14069), 'bokeh.models.Panel', 'Panel', ([], {'child': 'layout', 'title': '"""Player Displacement - Pitch Value"""'}), "(child=layout, title='Player Displacement - Pitch Value')\n", (14012, 14069), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((806, 833), 'Metrica_PitchControl.default_model_params', 'mpc.default_model_params', (['(3)'], {}), '(3)\n', (830, 833), True, 'import Metrica_PitchControl as mpc\n'), ((1538, 1707), 'pitch_value_model.lastrow_generate_pitch_control_for_event', 'pvm.lastrow_generate_pitch_control_for_event', (['play', 'event_frame', 'events_df', 'metrica_attack', 'metrica_defence', 'params'], {'field_dimen': '(106.0, 68.0)', 'n_grid_cells_x': '(50)'}), '(play, event_frame, events_df,\n metrica_attack, metrica_defence, params, field_dimen=(106.0, 68.0),\n n_grid_cells_x=50)\n', (1582, 1707), True, 'import pitch_value_model as pvm\n'), ((1715, 1790), 'pitch_value_model.generate_relevance_at_event', 'pvm.generate_relevance_at_event', (['play', 'event_frame', 'events_df', 'PPCF', 'params'], {}), '(play, event_frame, events_df, PPCF, params)\n', (1746, 1790), True, 'import pitch_value_model as pvm\n'), ((1804, 1882), 'pitch_value_model.generate_scoring_opportunity', 'pvm.generate_scoring_opportunity', ([], {'field_dimen': '(106.0, 68.0)', 'n_grid_cells_x': '(50)'}), '(field_dimen=(106.0, 68.0), n_grid_cells_x=50)\n', (1836, 1882), True, 'import pitch_value_model as pvm\n'), ((1899, 1987), 'pitch_value_model.generate_pitch_value', 'pvm.generate_pitch_value', (['PPCF', 'PT', 'PS'], {'field_dimen': '(106.0, 68.0)', 'n_grid_cells_x': '(50)'}), '(PPCF, PT, PS, field_dimen=(106.0, 68.0),\n n_grid_cells_x=50)\n', (1923, 1987), True, 'import pitch_value_model as pvm\n'), ((1998, 2100), 'pitch_value_model.generate_relative_pitch_value', 'pvm.generate_relative_pitch_value', (['play', 'event_frame', 'events_df', 'metrica_attack', 'PPV', 'xgrid', 'ygrid'], {}), '(play, event_frame, events_df,\n metrica_attack, PPV, xgrid, ygrid)\n', (2031, 2100), True, 'import pitch_value_model as pvm\n'), ((2120, 2191), 'numpy.linspace', 'np.linspace', (['(-field_dimen[0] / 2.0)', '(field_dimen[0] / 2.0)', 'new_grid_size'], {}), '(-field_dimen[0] / 2.0, field_dimen[0] / 2.0, new_grid_size)\n', (2131, 2191), True, 'import numpy as np\n'), ((2208, 2279), 'numpy.linspace', 'np.linspace', (['(-field_dimen[1] / 2.0)', '(field_dimen[1] / 2.0)', 'new_grid_size'], {}), '(-field_dimen[1] / 2.0, field_dimen[1] / 2.0, new_grid_size)\n', (2219, 2279), True, 'import numpy as np\n'), ((2297, 2351), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['xgrid', 'ygrid', 'PPCF'], {'kind': '"""cubic"""'}), "(xgrid, ygrid, PPCF, kind='cubic')\n", (2317, 2351), False, 'from scipy import interpolate\n'), ((2550, 2602), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['xgrid', 'ygrid', 'PT'], {'kind': '"""cubic"""'}), "(xgrid, ygrid, PT, kind='cubic')\n", (2570, 2602), False, 'from scipy import interpolate\n'), ((2793, 2845), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['xgrid', 'ygrid', 'PS'], {'kind': '"""cubic"""'}), "(xgrid, ygrid, PS, kind='cubic')\n", (2813, 2845), False, 'from scipy import interpolate\n'), ((3037, 3090), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['xgrid', 'ygrid', 'PPV'], {'kind': '"""cubic"""'}), "(xgrid, ygrid, PPV, kind='cubic')\n", (3057, 3090), False, 'from scipy import interpolate\n'), ((3287, 3341), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['xgrid', 'ygrid', 'RPPV'], {'kind': '"""cubic"""'}), "(xgrid, ygrid, RPPV, kind='cubic')\n", (3307, 3341), False, 'from scipy import interpolate\n'), ((3547, 3570), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['event'], {}), '(event)\n', (3563, 3570), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((3590, 3624), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['att_player_frame'], {}), '(att_player_frame)\n', (3606, 3624), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((3644, 3678), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['def_player_frame'], {}), '(def_player_frame)\n', (3660, 3678), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((3699, 3727), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['ball_frame'], {}), '(ball_frame)\n', (3715, 3727), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((3748, 3775), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['PPCF_dict'], {}), '(PPCF_dict)\n', (3764, 3775), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((3794, 3819), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['PT_dict'], {}), '(PT_dict)\n', (3810, 3819), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((3838, 3863), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['PS_dict'], {}), '(PS_dict)\n', (3854, 3863), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((3883, 3909), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['PPV_dict'], {}), '(PPV_dict)\n', (3899, 3909), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((3930, 3957), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['RPPV_dict'], {}), '(RPPV_dict)\n', (3946, 3957), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((4283, 4431), 'metrica_to_bokeh.plot_bokeh_surface_at_event', 'mtb.plot_bokeh_surface_at_event', (['event_src', 'att_src', 'def_src', 'ball_src', 'surface_src', 'bokeh_attack', 'bokeh_defence', 'xgrid', 'ygrid'], {'point_click': '(True)'}), '(event_src, att_src, def_src, ball_src,\n surface_src, bokeh_attack, bokeh_defence, xgrid, ygrid, point_click=True)\n', (4314, 4431), True, 'import metrica_to_bokeh as mtb\n'), ((12048, 12103), 'bokeh.models.TableColumn', 'TableColumn', ([], {'field': '"""Shirt Number"""', 'title': '"""Shirt Number"""'}), "(field='Shirt Number', title='Shirt Number')\n", (12059, 12103), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((12116, 12159), 'bokeh.models.TableColumn', 'TableColumn', ([], {'field': '"""player"""', 'title': '"""player"""'}), "(field='player', title='player')\n", (12127, 12159), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((12172, 12205), 'bokeh.models.TableColumn', 'TableColumn', ([], {'field': '"""x"""', 'title': '"""x"""'}), "(field='x', title='x')\n", (12183, 12205), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((12218, 12251), 'bokeh.models.TableColumn', 'TableColumn', ([], {'field': '"""y"""', 'title': '"""y"""'}), "(field='y', title='y')\n", (12229, 12251), False, 'from bokeh.models import ColumnDataSource, Select, TextInput, Panel, Div, Button, DataTable, TableColumn, Paragraph\n'), ((13689, 13775), 'bokeh.layouts.column', 'column', (['match_select', 'event_select', 'player_select', 'team_select', 'recalculate_button'], {}), '(match_select, event_select, player_select, team_select,\n recalculate_button)\n', (13695, 13775), False, 'from bokeh.layouts import row, column, WidgetBox\n'), ((13849, 13877), 'bokeh.layouts.column', 'column', (['att_title', 'att_table'], {}), '(att_title, att_table)\n', (13855, 13877), False, 'from bokeh.layouts import row, column, WidgetBox\n'), ((13879, 13907), 'bokeh.layouts.column', 'column', (['def_title', 'def_table'], {}), '(def_title, def_table)\n', (13885, 13907), False, 'from bokeh.layouts import row, column, WidgetBox\n'), ((7328, 7487), 'metrica_to_bokeh.player_displacement', 'mtb.player_displacement', (['match_selection', 'event_selection', 'events_df', 'metrica_attack', 'metrica_defence', '"""attack"""', 'player', 'x_displacement', 'y_displacement'], {}), "(match_selection, event_selection, events_df,\n metrica_attack, metrica_defence, 'attack', player, x_displacement,\n y_displacement)\n", (7351, 7487), True, 'import metrica_to_bokeh as mtb\n'), ((7511, 7559), 'metrica_to_bokeh.tracking_to_bokeh_format', 'mtb.tracking_to_bokeh_format', (['metrica_attack_new'], {}), '(metrica_attack_new)\n', (7539, 7559), True, 'import metrica_to_bokeh as mtb\n'), ((13967, 13990), 'bokeh.layouts.column', 'column', (['control', 'tables'], {}), '(control, tables)\n', (13973, 13990), False, 'from bokeh.layouts import row, column, WidgetBox\n'), ((9379, 9539), 'metrica_to_bokeh.player_displacement', 'mtb.player_displacement', (['match_selection', 'event_selection', 'events_df', 'metrica_attack', 'metrica_defence', '"""defence"""', 'player', 'x_displacement', 'y_displacement'], {}), "(match_selection, event_selection, events_df,\n metrica_attack, metrica_defence, 'defence', player, x_displacement,\n y_displacement)\n", (9402, 9539), True, 'import metrica_to_bokeh as mtb\n'), ((9564, 9613), 'metrica_to_bokeh.tracking_to_bokeh_format', 'mtb.tracking_to_bokeh_format', (['metrica_defence_new'], {}), '(metrica_defence_new)\n', (9592, 9613), True, 'import metrica_to_bokeh as mtb\n')] |
"""
Generic methods for converting data between different spatial coordinate systems.
Uses pyproj library.
"""
import firedrake as fd
import pyproj
import numpy
from abc import ABC, abstractmethod
LL_WGS84 = pyproj.Proj(proj='latlong', datum='WGS84', errcheck=True)
class CoordinateSystem(ABC):
"""
Base class for horizontal coordinate systems
Provides methods for coordinate transformations etc.
"""
@abstractmethod
def to_lonlat(self, x, y):
"""Convert coordinates to latitude and longitude"""
pass
@abstractmethod
def get_vector_rotator(self, x, y):
"""
Returns a vector rotator object.
The rotator converst vector-valued data to/from longitude, latitude
coordinates.
"""
pass
def proj_transform(x, y, trans=None, source=None, destination=None):
"""
Transform coordinates from source to target system.
:arg x,y: coordinates, float or numpy.array_like
:kwarg trans: pyproj Transformer object (optional)
:kwarg source: source coordinate system, Proj object
:kwarg destination: destination coordinate system, Proj object
"""
if trans is None:
assert source is not None and destination is not None, \
'Either trans or source and destination must be defined'
trans = None
x_is_array = isinstance(x, numpy.ndarray)
y_is_array = isinstance(y, numpy.ndarray)
numpy_inputs = x_is_array or y_is_array
if numpy_inputs:
assert x_is_array and y_is_array, 'both x and y must be numpy arrays'
assert x.shape == y.shape, 'x and y must have same shape'
# transform only non-nan entries as proj behavior can be erratic
a = numpy.full_like(x, numpy.nan)
b = numpy.full_like(y, numpy.nan)
good_ix = numpy.logical_and(numpy.isfinite(x), numpy.isfinite(y))
a[good_ix], b[good_ix] = trans.transform(x[good_ix], y[good_ix])
else:
a, b = trans.transform(x, y)
return a, b
class UTMCoordinateSystem(CoordinateSystem):
"""
Represents Universal Transverse Mercator coordinate systems
"""
def __init__(self, utm_zone):
self.proj_obj = pyproj.Proj(proj='utm', zone=utm_zone, datum='WGS84',
units='m', errcheck=True)
self.transformer_lonlat = pyproj.Transformer.from_crs(
self.proj_obj.srs, LL_WGS84.srs)
self.transformer_xy = pyproj.Transformer.from_crs(
LL_WGS84.srs, self.proj_obj.srs)
def to_lonlat(self, x, y, positive_lon=False):
"""
Convert (x, y) coordinates to (latitude, longitude)
:arg x: x coordinate
:arg y: y coordinate
:type x: float or numpy.array_like
:type y: float or numpy.array_like
:kwarg positive_lon: should positive longitude be enforced?
:return: longitude, latitude coordinates
"""
lon, lat = proj_transform(x, y, trans=self.transformer_lonlat)
if positive_lon:
lon = numpy.mod(lon, 360.0)
return lon, lat
def to_xy(self, lon, lat):
"""
Convert (latitude, longitude) coordinates to (x, y)
:arg lon: longitude coordinate
:arg lat: latitude coordinate
:type longitude: float or numpy.array_like
:type latitude: float or numpy.array_like
:return: x, y coordinates
"""
x, y = proj_transform(lon, lat, trans=self.transformer_xy)
return x, y
def get_mesh_lonlat_function(self, mesh2d):
"""
Construct a :class:`Function` holding the mesh coordinates in
longitude-latitude coordinates.
:arg mesh2d: the 2D mesh
"""
dim = mesh2d.topological_dimension()
if dim != 2:
raise ValueError(f'Expected a mesh of dimension 2, not {dim}')
if mesh2d.geometric_dimension() != 2:
raise ValueError('Mesh must reside in 2-dimensional space')
x = mesh2d.coordinates.dat.data_ro[:, 0]
y = mesh2d.coordinates.dat.data_ro[:, 1]
lon, lat = self.transformer_lonlat.transform(x, y)
lonlat = fd.Function(mesh2d.coordinates.function_space())
lonlat.dat.data[:, 0] = lon
lonlat.dat.data[:, 1] = lat
return lonlat
def get_vector_rotator(self, lon, lat):
"""
Returns a vector rotator object.
The rotator converts vector-valued data from longitude, latitude
coordinates to mesh coordinate system.
"""
return VectorCoordSysRotation(LL_WGS84, self.proj_obj, lon, lat)
def convert_coords(source_sys, target_sys, x, y):
"""
Converts coordinates from source_sys to target_sys
This function extends pyproj.transform method by handling NaNs correctly.
:arg source_sys: pyproj coordinate system where (x, y) are defined in
:arg target_sys: target pyproj coordinate system
:arg x: x coordinate
:arg y: y coordinate
:type x: float or numpy.array_like
:type y: float or numpy.array_like
"""
if isinstance(x, numpy.ndarray):
# proj may give wrong results if nans in the arrays
lon = numpy.full_like(x, numpy.nan)
lat = numpy.full_like(y, numpy.nan)
goodIx = numpy.logical_and(numpy.isfinite(x), numpy.isfinite(y))
lon[goodIx], lat[goodIx] = pyproj.transform(
source_sys, target_sys, x[goodIx], y[goodIx])
else:
lon, lat = pyproj.transform(source_sys, target_sys, x, y)
return lon, lat
def get_vector_rotation_matrix(source_sys, target_sys, x, y, delta=None):
"""
Estimate rotation matrix that converts vectors defined in source_sys to
target_sys.
Assume that we have a vector field defined in source_sys: vectors located at
(x, y) define the x and y components. We can then rotate the vectors to
represent x2 and y2 components of the target_sys by applying a local
rotation:
.. code-block:: python
R, theta = get_vector_rotation_matrix(source_sys, target_sys, x, lat)
v_xy = numpy.array([[v_x], [v_y]])
v_new = numpy.matmul(R, v_xy)
v_x2, v_y2 = v_new
"""
if delta is None:
delta = 1e-6 # ~1 m in LL_WGS84
x1, y1 = pyproj.transform(source_sys, target_sys, x, y)
x2, y2 = pyproj.transform(source_sys, target_sys, x, y + delta)
dxdl = (x2 - x1) / delta
dydl = (y2 - y1) / delta
theta = numpy.arctan2(-dxdl, dydl)
c = numpy.cos(theta)
s = numpy.sin(theta)
R = numpy.array([[c, -s], [s, c]])
return R, theta
class VectorCoordSysRotation(object):
"""
Rotates vectors defined in source_sys coordinates to a different coordinate
system.
"""
def __init__(self, source_sys, target_sys, x, y):
"""
:arg source_sys: pyproj coordinate system where (x, y) are defined in
:arg target_sys: target pyproj coordinate system
:arg x: x coordinate
:arg y: y coordinate
"""
R, theta = get_vector_rotation_matrix(source_sys, target_sys, x, y)
self.rotation_sin = numpy.sin(theta)
self.rotation_cos = numpy.cos(theta)
def __call__(self, v_x, v_y, i_node=None):
"""
Rotate vectors defined by the `v_x` and `v_y` components.
:arg v_x, v_y: vector x, y components
:kwarg ix_node: If not None, rotate the i-th vector instead of the
whole array
"""
# | c -s | | v_x |
# | s c | | v_y |
f = [i_node] if i_node is not None else slice(None, None, None)
u = v_x * self.rotation_cos[f] - v_y * self.rotation_sin[f]
v = v_x * self.rotation_sin[f] + v_y * self.rotation_cos[f]
return u, v
| [
"numpy.full_like",
"pyproj.transform",
"numpy.array",
"pyproj.Transformer.from_crs",
"numpy.arctan2",
"numpy.cos",
"pyproj.Proj",
"numpy.isfinite",
"numpy.sin",
"numpy.mod"
] | [((209, 266), 'pyproj.Proj', 'pyproj.Proj', ([], {'proj': '"""latlong"""', 'datum': '"""WGS84"""', 'errcheck': '(True)'}), "(proj='latlong', datum='WGS84', errcheck=True)\n", (220, 266), False, 'import pyproj\n'), ((6231, 6277), 'pyproj.transform', 'pyproj.transform', (['source_sys', 'target_sys', 'x', 'y'], {}), '(source_sys, target_sys, x, y)\n', (6247, 6277), False, 'import pyproj\n'), ((6292, 6346), 'pyproj.transform', 'pyproj.transform', (['source_sys', 'target_sys', 'x', '(y + delta)'], {}), '(source_sys, target_sys, x, y + delta)\n', (6308, 6346), False, 'import pyproj\n'), ((6417, 6443), 'numpy.arctan2', 'numpy.arctan2', (['(-dxdl)', 'dydl'], {}), '(-dxdl, dydl)\n', (6430, 6443), False, 'import numpy\n'), ((6453, 6469), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (6462, 6469), False, 'import numpy\n'), ((6478, 6494), 'numpy.sin', 'numpy.sin', (['theta'], {}), '(theta)\n', (6487, 6494), False, 'import numpy\n'), ((6503, 6533), 'numpy.array', 'numpy.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (6514, 6533), False, 'import numpy\n'), ((1721, 1750), 'numpy.full_like', 'numpy.full_like', (['x', 'numpy.nan'], {}), '(x, numpy.nan)\n', (1736, 1750), False, 'import numpy\n'), ((1763, 1792), 'numpy.full_like', 'numpy.full_like', (['y', 'numpy.nan'], {}), '(y, numpy.nan)\n', (1778, 1792), False, 'import numpy\n'), ((2188, 2267), 'pyproj.Proj', 'pyproj.Proj', ([], {'proj': '"""utm"""', 'zone': 'utm_zone', 'datum': '"""WGS84"""', 'units': '"""m"""', 'errcheck': '(True)'}), "(proj='utm', zone=utm_zone, datum='WGS84', units='m', errcheck=True)\n", (2199, 2267), False, 'import pyproj\n'), ((2338, 2398), 'pyproj.Transformer.from_crs', 'pyproj.Transformer.from_crs', (['self.proj_obj.srs', 'LL_WGS84.srs'], {}), '(self.proj_obj.srs, LL_WGS84.srs)\n', (2365, 2398), False, 'import pyproj\n'), ((2442, 2502), 'pyproj.Transformer.from_crs', 'pyproj.Transformer.from_crs', (['LL_WGS84.srs', 'self.proj_obj.srs'], {}), '(LL_WGS84.srs, self.proj_obj.srs)\n', (2469, 2502), False, 'import pyproj\n'), ((5156, 5185), 'numpy.full_like', 'numpy.full_like', (['x', 'numpy.nan'], {}), '(x, numpy.nan)\n', (5171, 5185), False, 'import numpy\n'), ((5200, 5229), 'numpy.full_like', 'numpy.full_like', (['y', 'numpy.nan'], {}), '(y, numpy.nan)\n', (5215, 5229), False, 'import numpy\n'), ((5338, 5400), 'pyproj.transform', 'pyproj.transform', (['source_sys', 'target_sys', 'x[goodIx]', 'y[goodIx]'], {}), '(source_sys, target_sys, x[goodIx], y[goodIx])\n', (5354, 5400), False, 'import pyproj\n'), ((5443, 5489), 'pyproj.transform', 'pyproj.transform', (['source_sys', 'target_sys', 'x', 'y'], {}), '(source_sys, target_sys, x, y)\n', (5459, 5489), False, 'import pyproj\n'), ((7079, 7095), 'numpy.sin', 'numpy.sin', (['theta'], {}), '(theta)\n', (7088, 7095), False, 'import numpy\n'), ((7124, 7140), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (7133, 7140), False, 'import numpy\n'), ((1829, 1846), 'numpy.isfinite', 'numpy.isfinite', (['x'], {}), '(x)\n', (1843, 1846), False, 'import numpy\n'), ((1848, 1865), 'numpy.isfinite', 'numpy.isfinite', (['y'], {}), '(y)\n', (1862, 1865), False, 'import numpy\n'), ((3028, 3049), 'numpy.mod', 'numpy.mod', (['lon', '(360.0)'], {}), '(lon, 360.0)\n', (3037, 3049), False, 'import numpy\n'), ((5265, 5282), 'numpy.isfinite', 'numpy.isfinite', (['x'], {}), '(x)\n', (5279, 5282), False, 'import numpy\n'), ((5284, 5301), 'numpy.isfinite', 'numpy.isfinite', (['y'], {}), '(y)\n', (5298, 5301), False, 'import numpy\n')] |
""" Re-parametrization of LASSO regression for ESL."""
import numpy as np
from .esl_regressor import EslRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Lasso, LinearRegression, lars_path
class LassoRegressor(EslRegressor):
""" LASSO regression.
By default, predictors are normalized before regression.
"""
def __init__(self, shrinkage: float):
""" Constructs a LASSO regressor.
Args:
shrinkage: desired ratio between L1 norm of coefficients under regularization and L1 norm
of OLS coefficient - 1 for no regularization.
"""
super(LassoRegressor, self).__init__()
self.shrinkage = shrinkage
self._fit_response_shape_length = None
self.coef_ = None
self.intercept_ = None
@staticmethod
def lasso_shrinkage(X: np.ndarray, y: np.ndarray, ols_beta_l1: float, alpha: float):
""" Computes the lasso shrinkage corresponding to a value of the regularization parameter ``alpha``."""
lasso = Lasso(alpha=alpha)
lasso.fit(X, y)
return np.linalg.norm(lasso.coef_, ord=1) / ols_beta_l1
def _fit(self, X: np.ndarray, Y: np.ndarray) -> None:
""" Trains the regressor.
Args:
X: numpy matrix of input features, dimensions ``(N, n_features)``.
Y: numpy matrix of responses, dimensions ``(N, n_responses)``.
"""
self._fit_response_shape_length = len(Y.shape)
assert len(Y.shape) == 1 or Y.shape[1] == 1
# including the constant in the regression
scaler = StandardScaler()
Xc = scaler.fit_transform(X)
self.intercept_ = np.average(Y) if len(Y.shape) == 1 else np.average(Y, axis = 0)
if len(Y.shape) == 1:
norm_coef = self._fit_single_Y(X, Y)
self.coef_ = norm_coef / scaler.scale_
self.intercept_ -= np.sum(self.coef_ * scaler.mean_)
else:
norm_coef = np.zeros((Y.shape[1], X.shape[1]))
for i_resp in range(Y.shape[1]):
norm_coef[i_resp, :] = self._fit_single_Y(X, Y[:, i_resp])
self.coef_ = norm_coef / scaler.scale_[np.newaxis, :]
self.intercept_ -= np.dot(self.coef_, scaler.mean_)
def _fit_single_Y(self, X: np.ndarray, y: np.ndarray):
if self.shrinkage == 0:
return np.zeros((X.shape[1],))
alphas, active, coefs = lars_path(X, y, method='lasso')
coefs_ols = coefs[:, -1]
if self.shrinkage == 1:
return coefs[:, -1]
coefs_ols_l1 = np.linalg.norm(coefs_ols, ord=1)
shrinkages = np.array([np.linalg.norm(coefs[:, i], ord=1) for i in range(coefs.shape[1])]) / coefs_ols_l1
i = np.searchsorted(shrinkages, self.shrinkage)
if self.shrinkage == shrinkages[i]:
return coefs[:, i]
else:
ls, rs = shrinkages[i - 1], shrinkages[i]
return coefs[:, i - 1] + (self.shrinkage - ls) / (rs - ls) * (coefs[:, i] - coefs[:, i - 1])
def _predict(self, X: np.ndarray) -> np.ndarray:
""" Predicts, returning a 2d array."""
return self.intercept_[np.newaxis, :] + np.dot(X, self.coef_.T)
@property
def coeffs(self):
return self.coef_
@property
def intercept(self):
return self.intercept_ | [
"sklearn.linear_model.Lasso",
"numpy.average",
"numpy.searchsorted",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"numpy.linalg.norm",
"sklearn.linear_model.lars_path"
] | [((1068, 1086), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'alpha'}), '(alpha=alpha)\n', (1073, 1086), False, 'from sklearn.linear_model import Lasso, LinearRegression, lars_path\n'), ((1628, 1644), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1642, 1644), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2465, 2496), 'sklearn.linear_model.lars_path', 'lars_path', (['X', 'y'], {'method': '"""lasso"""'}), "(X, y, method='lasso')\n", (2474, 2496), False, 'from sklearn.linear_model import Lasso, LinearRegression, lars_path\n'), ((2620, 2652), 'numpy.linalg.norm', 'np.linalg.norm', (['coefs_ols'], {'ord': '(1)'}), '(coefs_ols, ord=1)\n', (2634, 2652), True, 'import numpy as np\n'), ((2781, 2824), 'numpy.searchsorted', 'np.searchsorted', (['shrinkages', 'self.shrinkage'], {}), '(shrinkages, self.shrinkage)\n', (2796, 2824), True, 'import numpy as np\n'), ((1127, 1161), 'numpy.linalg.norm', 'np.linalg.norm', (['lasso.coef_'], {'ord': '(1)'}), '(lasso.coef_, ord=1)\n', (1141, 1161), True, 'import numpy as np\n'), ((1709, 1722), 'numpy.average', 'np.average', (['Y'], {}), '(Y)\n', (1719, 1722), True, 'import numpy as np\n'), ((1749, 1770), 'numpy.average', 'np.average', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (1759, 1770), True, 'import numpy as np\n'), ((1936, 1969), 'numpy.sum', 'np.sum', (['(self.coef_ * scaler.mean_)'], {}), '(self.coef_ * scaler.mean_)\n', (1942, 1969), True, 'import numpy as np\n'), ((2009, 2043), 'numpy.zeros', 'np.zeros', (['(Y.shape[1], X.shape[1])'], {}), '((Y.shape[1], X.shape[1]))\n', (2017, 2043), True, 'import numpy as np\n'), ((2263, 2295), 'numpy.dot', 'np.dot', (['self.coef_', 'scaler.mean_'], {}), '(self.coef_, scaler.mean_)\n', (2269, 2295), True, 'import numpy as np\n'), ((2408, 2431), 'numpy.zeros', 'np.zeros', (['(X.shape[1],)'], {}), '((X.shape[1],))\n', (2416, 2431), True, 'import numpy as np\n'), ((3225, 3248), 'numpy.dot', 'np.dot', (['X', 'self.coef_.T'], {}), '(X, self.coef_.T)\n', (3231, 3248), True, 'import numpy as np\n'), ((2685, 2719), 'numpy.linalg.norm', 'np.linalg.norm', (['coefs[:, i]'], {'ord': '(1)'}), '(coefs[:, i], ord=1)\n', (2699, 2719), True, 'import numpy as np\n')] |
import collections
import numpy as np
from guesswhat.statistics.abstract_plotter import *
import seaborn as sns
import pandas as pd
class SuccessDialogueLength(AbstractPlotter):
def __init__(self, path, games, logger, suffix):
super(SuccessDialogueLength, self).__init__(path, self.__class__.__name__, suffix)
status_list = []
status_count = collections.defaultdict(int)
length_list = []
for game in games:
length_list.append(len(game.questions))
status_count[game.status] += 1
status_list.append(game.status)
success = np.array([s == "success" for s in status_list]) + 0
failure = np.array([s == "failure" for s in status_list]) + 0
incomp = np.array([s == "incomplete" for s in status_list]) + 0
sns.set_style("whitegrid", {"axes.grid": False})
if sum(incomp) > 0:
columns = ['Size of Dialogues', 'Success', 'Failure', 'Incomplete']
data = np.array([length_list, success, failure, incomp]).transpose()
else:
columns = ['Size of Dialogues', 'Success', 'Failure']
data = np.array([length_list, success, failure]).transpose()
df = pd.DataFrame(data, columns=columns)
df = df.convert_objects(convert_numeric=True)
df = df.groupby('Size of Dialogues').sum()
df = df.div(df.sum(axis=1), axis=0)
#df = df.sort_values(by='Success')
f = df.plot(kind="bar", stacked=True, width=1, alpha=0.3)
f.set_xlim(-0.5,29.5)
plt.xlabel("Size of Dialogues", {'size':'14'})
plt.ylabel("Success ratio", {'size':'14'})
| [
"seaborn.set_style",
"numpy.array",
"collections.defaultdict",
"pandas.DataFrame"
] | [((374, 402), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (397, 402), False, 'import collections\n'), ((821, 869), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""', "{'axes.grid': False}"], {}), "('whitegrid', {'axes.grid': False})\n", (834, 869), True, 'import seaborn as sns\n'), ((1227, 1262), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (1239, 1262), True, 'import pandas as pd\n'), ((617, 666), 'numpy.array', 'np.array', (["[(s == 'success') for s in status_list]"], {}), "([(s == 'success') for s in status_list])\n", (625, 666), True, 'import numpy as np\n'), ((687, 736), 'numpy.array', 'np.array', (["[(s == 'failure') for s in status_list]"], {}), "([(s == 'failure') for s in status_list])\n", (695, 736), True, 'import numpy as np\n'), ((757, 809), 'numpy.array', 'np.array', (["[(s == 'incomplete') for s in status_list]"], {}), "([(s == 'incomplete') for s in status_list])\n", (765, 809), True, 'import numpy as np\n'), ((998, 1047), 'numpy.array', 'np.array', (['[length_list, success, failure, incomp]'], {}), '([length_list, success, failure, incomp])\n', (1006, 1047), True, 'import numpy as np\n'), ((1159, 1200), 'numpy.array', 'np.array', (['[length_list, success, failure]'], {}), '([length_list, success, failure])\n', (1167, 1200), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Gamut変換の特性を調べる。
"""
# 外部ライブラリのインポート
import os
import numpy as np
import matplotlib.pyplot as plt
# 自作ライブラリのインポート
import test_pattern_generator2 as tpg
import color_space as cs
import transfer_functions as tf
from CalcParameters import CalcParameters
import plot_utility as pu
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
base_param = {
'revision': 1,
'inner_sample_num': 4,
'outer_sample_num': 6,
'hue_devide_num': 4,
'img_width': 1920,
'img_height': 1080,
'pattern_space_rate': 0.71,
'inner_gamut_name': 'ITR-R BT.709',
'outer_gamut_name': 'ITU-R BT.2020',
'inner_primaries': np.array(tpg.get_primaries(cs.BT709)[0]),
'outer_primaries': np.array(tpg.get_primaries(cs.BT2020)[0]),
'transfer_function': tf.SRGB,
'background_luminance': 2,
'reference_white': 100
}
def plot_chromaticity_diagram(
base_param, outer_xyY, outer_ref_xyY,
xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9):
xy_image = tpg.get_chromaticity_image(
xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
cmf_xy = tpg._get_cmfs_xy()
xlim = (min(0, xmin), max(0.8, xmax))
ylim = (min(0, ymin), max(0.9, ymax))
figsize_h = 8 * 1.0
figsize_v = 9 * 1.0
rate = 1.3
# gamut の用意
outer_gamut = base_param['outer_primaries']
inner_gamut = base_param['inner_primaries']
outer_name = base_param['outer_gamut_name']
inner_name = base_param['inner_gamut_name']
ax1 = pu.plot_1_graph(fontsize=20 * rate,
figsize=(figsize_h, figsize_v),
graph_title="CIE1931 Chromaticity Diagram",
xlabel=None, ylabel=None,
legend_size=18 * rate,
xlim=xlim, ylim=ylim,
xtick=[x * 0.1 + xmin for x in
range(int((xlim[1] - xlim[0])/0.1) + 1)],
ytick=[x * 0.1 + ymin for x in
range(int((ylim[1] - ylim[0])/0.1) + 1)],
xtick_size=17 * rate,
ytick_size=17 * rate,
linewidth=4 * rate,
minor_xtick_num=2, minor_ytick_num=2)
ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
'-k', lw=3.5*rate, label=None)
ax1.plot(inner_gamut[:, 0], inner_gamut[:, 1],
c=UNIVERSAL_COLOR_LIST[0], label=inner_name, lw=2.75*rate)
ax1.plot(outer_gamut[:, 0], outer_gamut[:, 1],
c=UNIVERSAL_COLOR_LIST[3], label=outer_name, lw=2.75*rate)
ax1.plot(tpg.D65_WHITE[0], tpg.D65_WHITE[1], marker='x', c='k',
lw=2.75*rate, label='D65', ms=10*rate, mew=2.75*rate)
ax1.plot(outer_ref_xyY[..., 0], outer_ref_xyY[..., 1], ls='', marker='s',
c='#808080', ms=8*rate)
ax1.plot(outer_xyY[..., 0], outer_xyY[..., 1], ls='', marker='o',
c='#808080', ms=8*rate)
# annotation
arrowprops = dict(
facecolor='#333333', shrink=0.0, headwidth=8, headlength=10,
width=2)
# for idx in range(outer_xyY.shape[0]):
# ed_pos = (outer_ref_xyY[idx][0], outer_ref_xyY[idx][1])
# st_pos = (outer_xyY[idx][0], outer_xyY[idx][1])
# ax1.annotate("", xy=ed_pos, xytext=st_pos, xycoords='data',
# textcoords='data', ha='left', va='bottom',
# arrowprops=arrowprops)
for h_idx in range(outer_xyY.shape[0]):
for s_idx in range(outer_xyY.shape[1]):
ed_pos = (outer_ref_xyY[h_idx][s_idx][0],
outer_ref_xyY[h_idx][s_idx][1])
st_pos = (outer_xyY[h_idx][s_idx][0],
outer_xyY[h_idx][s_idx][1])
ax1.annotate("", xy=ed_pos, xytext=st_pos, xycoords='data',
textcoords='data', ha='left', va='bottom',
arrowprops=arrowprops)
ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
plt.legend(loc='upper right')
png_file_name = "./are.png"
plt.savefig(png_file_name, bbox_inches='tight')
plt.show()
def make_outer_xyY(inner_xyY, outer_xyY):
"""
outer_xy の index[0] がちょうど inner_gamut の edge に
なるように、outer_xy にデータを追加する。
"""
start_index = inner_xyY.shape[1] - 1
xyY_array = np.append(inner_xyY, outer_xyY, axis=1)
return xyY_array[:, start_index:, :]
def main_func():
cp = CalcParameters(base_param)
draw_param = cp.calc_parameters()
outer_xyY = make_outer_xyY(
draw_param['inner_xyY'], draw_param['outer_xyY'])
outer_ref_xyY = make_outer_xyY(
draw_param['inner_ref_xyY'], draw_param['outer_ref_xyY'])
# plot_chromaticity_diagram(base_param, outer_xyY[4], outer_ref_xyY[4])
plot_chromaticity_diagram(base_param, outer_xyY, outer_ref_xyY)
print(outer_xyY[4])
print(outer_ref_xyY[4])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
main_func()
| [
"matplotlib.pyplot.savefig",
"test_pattern_generator2.get_chromaticity_image",
"test_pattern_generator2._get_cmfs_xy",
"numpy.append",
"test_pattern_generator2.get_primaries",
"os.path.abspath",
"CalcParameters.CalcParameters",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1086, 1156), 'test_pattern_generator2.get_chromaticity_image', 'tpg.get_chromaticity_image', ([], {'xmin': 'xmin', 'xmax': 'xmax', 'ymin': 'ymin', 'ymax': 'ymax'}), '(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)\n', (1112, 1156), True, 'import test_pattern_generator2 as tpg\n'), ((1179, 1197), 'test_pattern_generator2._get_cmfs_xy', 'tpg._get_cmfs_xy', ([], {}), '()\n', (1195, 1197), True, 'import test_pattern_generator2 as tpg\n'), ((4171, 4200), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4181, 4200), True, 'import matplotlib.pyplot as plt\n'), ((4237, 4284), 'matplotlib.pyplot.savefig', 'plt.savefig', (['png_file_name'], {'bbox_inches': '"""tight"""'}), "(png_file_name, bbox_inches='tight')\n", (4248, 4284), True, 'import matplotlib.pyplot as plt\n'), ((4289, 4299), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4297, 4299), True, 'import matplotlib.pyplot as plt\n'), ((4498, 4537), 'numpy.append', 'np.append', (['inner_xyY', 'outer_xyY'], {'axis': '(1)'}), '(inner_xyY, outer_xyY, axis=1)\n', (4507, 4537), True, 'import numpy as np\n'), ((4607, 4633), 'CalcParameters.CalcParameters', 'CalcParameters', (['base_param'], {}), '(base_param)\n', (4621, 4633), False, 'from CalcParameters import CalcParameters\n'), ((750, 777), 'test_pattern_generator2.get_primaries', 'tpg.get_primaries', (['cs.BT709'], {}), '(cs.BT709)\n', (767, 777), True, 'import test_pattern_generator2 as tpg\n'), ((815, 843), 'test_pattern_generator2.get_primaries', 'tpg.get_primaries', (['cs.BT2020'], {}), '(cs.BT2020)\n', (832, 843), True, 'import test_pattern_generator2 as tpg\n'), ((5120, 5145), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (5135, 5145), False, 'import os\n')] |
#! /usr/bin/env python2.7
prefixes = ('af', 'as', 'au', 'ca', 'eu', 'na', 'sa')
import matplotlib
matplotlib.use('Agg')
import dem as d
from matplotlib import pyplot as plt
from demMethods import plotGrids
import numpy as np
import demMethods as dm
kss_to_report = [100.0, 150.0, 200.0]
a = [0, 20000, 0, 2000000]
suffix = '0_4'
dx = 100.0
dy = 10000.0
contours = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5]
basin_lengths = [50000, 100000, 200000, 400000]
x_bins = np.arange(a[0],a[1],dx)
y_bins = np.arange(a[2],a[3],dy)
f = open('stats_for_concavity0_4.txt','w')
for basin_length in basin_lengths:
chi_vec = np.array([])
relief_vec = np.array([])
for prefix in prefixes:
print(prefix)
chi = d.GeographicChi.load(prefix + '_chi_' + str(basin_length) + '_' + suffix + '_1000000')
relief = d.ChiScaledRelief.load(prefix + '_relief_' + str(basin_length) + '_' + suffix + '_1000000')
this_chi, this_relief = dm.extract_values_from_grid(chi, relief, ignore_zeros=True)
chi_vec = np.concatenate((chi_vec, this_chi))
relief_vec = np.concatenate((relief_vec, this_relief))
ks_vec = this_relief / this_chi
for ks_to_report in kss_to_report:
i = np.where(ks_vec > ks_to_report)
f.write('Fraction of points exceeding threshold: ' + str(ks_to_report) + '; for dataset: ' + prefix + '; basin length: ' + str(basin_length) + '; concavity: ' + suffix + ': ' + str(float(len(i[0]))/float(len(this_chi))) + '\n')
H, xedges, yedges = dm.create_density(this_chi,this_relief,x_bins,y_bins)
H = np.flipud(H.T)
H = H / np.sum(H) / dx / dy
v = np.ndarray.flatten(H)
v = np.sort(v)
vc = np.cumsum(v) * dx * dy
plt.figure()
plt.imshow(np.log10(H), extent = a)
plt.colorbar()
plt.ion()
plt.axis('normal')
for contour in contours:
i = np.where(vc >= contour)
i = np.min(i)
contour_value = v[i]
plt.contour(np.flipud(H) > contour_value, levels = [0], extent = a)
plt.savefig('density_' + prefix + '_' + str(basin_length) + '_' + suffix + '.eps')
ks_vec = relief_vec / chi_vec
for ks_to_report in kss_to_report:
i = np.where(ks_vec > ks_to_report)
f.write('Fraction of points exceeding threshold: ' + str(ks_to_report) + '; basin length: ' + str(basin_length) + '; concavity: ' + suffix + ': ' + str(float(len(i[0]))/float(len(this_chi))) + '\n')
H, xedges, yedges = dm.create_density(chi_vec,relief_vec,x_bins,y_bins)
H = np.flipud(H.T)
H = H / np.sum(H) / dx / dy
v = np.ndarray.flatten(H)
v = np.sort(v)
vc = np.cumsum(v) * dx * dy
plt.figure()
plt.imshow(np.log10(H), extent = a)
plt.colorbar()
plt.ion()
plt.axis('normal')
for contour in contours:
i = np.where(vc >= contour)
i = np.min(i)
contour_value = v[i]
plt.contour(np.flipud(H) > contour_value, levels = [0], extent = a)
plt.savefig('density_' + str(basin_length) + '_'+ suffix + '.eps')
| [
"numpy.log10",
"demMethods.create_density",
"numpy.flipud",
"matplotlib.use",
"numpy.where",
"numpy.sort",
"matplotlib.pyplot.colorbar",
"demMethods.extract_values_from_grid",
"numpy.array",
"numpy.ndarray.flatten",
"matplotlib.pyplot.figure",
"numpy.sum",
"numpy.cumsum",
"numpy.concatenat... | [((100, 121), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (114, 121), False, 'import matplotlib\n'), ((460, 485), 'numpy.arange', 'np.arange', (['a[0]', 'a[1]', 'dx'], {}), '(a[0], a[1], dx)\n', (469, 485), True, 'import numpy as np\n'), ((493, 518), 'numpy.arange', 'np.arange', (['a[2]', 'a[3]', 'dy'], {}), '(a[2], a[3], dy)\n', (502, 518), True, 'import numpy as np\n'), ((611, 623), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (619, 623), True, 'import numpy as np\n'), ((641, 653), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (649, 653), True, 'import numpy as np\n'), ((2535, 2589), 'demMethods.create_density', 'dm.create_density', (['chi_vec', 'relief_vec', 'x_bins', 'y_bins'], {}), '(chi_vec, relief_vec, x_bins, y_bins)\n', (2552, 2589), True, 'import demMethods as dm\n'), ((2595, 2609), 'numpy.flipud', 'np.flipud', (['H.T'], {}), '(H.T)\n', (2604, 2609), True, 'import numpy as np\n'), ((2650, 2671), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['H'], {}), '(H)\n', (2668, 2671), True, 'import numpy as np\n'), ((2680, 2690), 'numpy.sort', 'np.sort', (['v'], {}), '(v)\n', (2687, 2690), True, 'import numpy as np\n'), ((2737, 2749), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2747, 2749), True, 'from matplotlib import pyplot as plt\n'), ((2794, 2808), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2806, 2808), True, 'from matplotlib import pyplot as plt\n'), ((2813, 2822), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2820, 2822), True, 'from matplotlib import pyplot as plt\n'), ((2827, 2845), 'matplotlib.pyplot.axis', 'plt.axis', (['"""normal"""'], {}), "('normal')\n", (2835, 2845), True, 'from matplotlib import pyplot as plt\n'), ((950, 1009), 'demMethods.extract_values_from_grid', 'dm.extract_values_from_grid', (['chi', 'relief'], {'ignore_zeros': '(True)'}), '(chi, relief, ignore_zeros=True)\n', (977, 1009), True, 'import demMethods as dm\n'), ((1028, 1063), 'numpy.concatenate', 'np.concatenate', (['(chi_vec, this_chi)'], {}), '((chi_vec, this_chi))\n', (1042, 1063), True, 'import numpy as np\n'), ((1085, 1126), 'numpy.concatenate', 'np.concatenate', (['(relief_vec, this_relief)'], {}), '((relief_vec, this_relief))\n', (1099, 1126), True, 'import numpy as np\n'), ((1526, 1582), 'demMethods.create_density', 'dm.create_density', (['this_chi', 'this_relief', 'x_bins', 'y_bins'], {}), '(this_chi, this_relief, x_bins, y_bins)\n', (1543, 1582), True, 'import demMethods as dm\n'), ((1592, 1606), 'numpy.flipud', 'np.flipud', (['H.T'], {}), '(H.T)\n', (1601, 1606), True, 'import numpy as np\n'), ((1655, 1676), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['H'], {}), '(H)\n', (1673, 1676), True, 'import numpy as np\n'), ((1689, 1699), 'numpy.sort', 'np.sort', (['v'], {}), '(v)\n', (1696, 1699), True, 'import numpy as np\n'), ((1744, 1756), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1754, 1756), True, 'from matplotlib import pyplot as plt\n'), ((1809, 1823), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1821, 1823), True, 'from matplotlib import pyplot as plt\n'), ((1832, 1841), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1839, 1841), True, 'from matplotlib import pyplot as plt\n'), ((1850, 1868), 'matplotlib.pyplot.axis', 'plt.axis', (['"""normal"""'], {}), "('normal')\n", (1858, 1868), True, 'from matplotlib import pyplot as plt\n'), ((2271, 2302), 'numpy.where', 'np.where', (['(ks_vec > ks_to_report)'], {}), '(ks_vec > ks_to_report)\n', (2279, 2302), True, 'import numpy as np\n'), ((2765, 2776), 'numpy.log10', 'np.log10', (['H'], {}), '(H)\n', (2773, 2776), True, 'import numpy as np\n'), ((2892, 2915), 'numpy.where', 'np.where', (['(vc >= contour)'], {}), '(vc >= contour)\n', (2900, 2915), True, 'import numpy as np\n'), ((2928, 2937), 'numpy.min', 'np.min', (['i'], {}), '(i)\n', (2934, 2937), True, 'import numpy as np\n'), ((1226, 1257), 'numpy.where', 'np.where', (['(ks_vec > ks_to_report)'], {}), '(ks_vec > ks_to_report)\n', (1234, 1257), True, 'import numpy as np\n'), ((1776, 1787), 'numpy.log10', 'np.log10', (['H'], {}), '(H)\n', (1784, 1787), True, 'import numpy as np\n'), ((1918, 1941), 'numpy.where', 'np.where', (['(vc >= contour)'], {}), '(vc >= contour)\n', (1926, 1941), True, 'import numpy as np\n'), ((1958, 1967), 'numpy.min', 'np.min', (['i'], {}), '(i)\n', (1964, 1967), True, 'import numpy as np\n'), ((2705, 2717), 'numpy.cumsum', 'np.cumsum', (['v'], {}), '(v)\n', (2714, 2717), True, 'import numpy as np\n'), ((1713, 1725), 'numpy.cumsum', 'np.cumsum', (['v'], {}), '(v)\n', (1722, 1725), True, 'import numpy as np\n'), ((2622, 2631), 'numpy.sum', 'np.sum', (['H'], {}), '(H)\n', (2628, 2631), True, 'import numpy as np\n'), ((2987, 2999), 'numpy.flipud', 'np.flipud', (['H'], {}), '(H)\n', (2996, 2999), True, 'import numpy as np\n'), ((1623, 1632), 'numpy.sum', 'np.sum', (['H'], {}), '(H)\n', (1629, 1632), True, 'import numpy as np\n'), ((2025, 2037), 'numpy.flipud', 'np.flipud', (['H'], {}), '(H)\n', (2034, 2037), True, 'import numpy as np\n')] |
from ml_tutor.model import BaseModelRegression
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
class LinearRegression(BaseModelRegression):
def __init__(self, learning_rate=0.0001, num_iter=100000, tol=0.00001, visual_training=True):
"""
Creates the Linear Regression algorithm.
:param learning_rate: Learning rate gives the rate of speed where the gradient moves during gradient descent
:param num_iter: Number of times to go through the dataset to train the model.
:param tol: If the difference between old and new values for model parameters are less than this number, training stops.
:param visual_training: If True - the training process will be visualized [NOTE: only in Jupyter Notebook and Google Colab]
"""
super(BaseModelRegression, self).__init__()
self.learning_rate = learning_rate
self.num_iter = num_iter
self.tol = tol
self.visual_training = visual_training
if not super().__is_visual_on__():
self.visual_training = False
print("Visualization is only supported in Jupyter Notebook and Google Colab.")
self.randn_id = None
# Gradient descent params
self.starting_b = 0
self.starting_m = 0
self.b_history = []
self.m_history = []
print("If your dataset is sparse for visual training, random feature will be selected to match required shape.")
print("Required shape for this algorithm is: [N, 1].")
def fit(self, X, y):
"""
Train the model using features (X) as training data and y as target values.
:param X: Features from a dataset
:param y: Target values (This is what you want to predict)
"""
self.X = X
self.y = y
if len(self.y.shape) < 2:
self.y = np.expand_dims(self.y, axis=1)
if len(self.X.shape) < 2:
self.X = np.expand_dims(self.X, axis=1)
if self.X.shape[1] > 1:
if self.visual_training:
print("The dataset is sparse for visual training. This algorithm works only on shape [N, 1].")
print("Random feature selected to match required size.")
print("Current shape of your data: {}".format(self.X.shape))
self.randn_id = np.random.randint(0, self.X.shape[1])
print("Column selected on id: {}".format(self.randn_id))
self.X = self.X[:, self.randn_id]
if len(self.X.shape) < 2:
self.X = np.expand_dims(self.X, axis=1)
print("New shape of your data: {}".format(self.X.shape))
# calling gradient descent function, and output of it is going to be our the best possible (according to our dataset) M and B
self.__gradient_descent__(self.starting_b, self.starting_m)
def __gradient_descent__(self, b, m):
"""
main function for the gradient descent
:param b: Bias or constant
:param m: coefficient for X
"""
self.new_b = b
self.new_m = m
for i in range(self.num_iter):
candidate_m, candidate_b = self.__gradient_descent_step__(self.new_b, self.new_m)
if all(np.abs(candidate_m - self.new_m) <= self.tol) and \
all(np.abs(candidate_b - self.new_b) <= self.tol):
break
self.new_m = candidate_m
self.new_b = candidate_b
if i % 1000 == 0:
self.b_history.append(self.new_b)
self.m_history.append(self.new_m)
if self.visual_training:
self.__visual_training__()
def __visual_training__(self):
"""
Helper function used to crete real time visualization of the training process.
"""
# Import only relevant libraries for Jupyter Notebook if needed
from IPython import display
for i in range(len(self.b_history)):
plt.close()
plt.clf()
plt.figure(figsize=(12, 10))
plt.scatter(self.X, self.y, c='b', label="Training set")
plt.plot(self.X, np.add(np.multiply(self.X, self.m_history[i]), self.b_history[i]), c='r',
label="Regression line")
plt.title("Linear Regression - Training process")
plt.xlabel("Feature value")
plt.ylabel("Target value")
plt.legend(framealpha=1, frameon=True)
display.display(plt.gcf())
display.display()
time.sleep(1)
display.clear_output(wait=True)
def __gradient_descent_step__(self, b, m):
"""
Helper function for Gradient descent. Performs a single step of the gradient optimization.
"""
candidated_b = b - np.multiply(self.learning_rate,
np.sum(-np.multiply(2 / float(len(self.X)),
np.subtract(self.y,
np.add(np.multiply(self.X, m), b))), axis=0))
candidated_m = m - np.multiply(self.learning_rate,
np.sum(np.multiply(2 / float(len(self.X)),
np.multiply(-self.X,
np.subtract(self.y,
np.add(np.multiply(self.X, m), b)))),
axis=0))
return candidated_m, candidated_b
def predict(self, X):
"""
This method performs predictions on the unseen data from your dataset.
:param X: Data samples used to perform prediction on. (Generally a test set)
:return: Predicted labels for each data sample
"""
if X.shape[1] > 2:
if self.visual_training:
X = X[:, self.randn_id]
if X.shape[1] < 2:
X = np.expand_dims(X, axis=1)
y_pred = np.add(np.multiply(X, self.new_m), self.new_b)
return y_pred
def score(self, real, predicted):
"""
Return the MSE computed on real vs. predicted classes.
:param real: Expected targets(generally found in the dataset)
:param predicted: Predicted values by the algorithm
:return: Mean squared error computed on real vs. predicted classes [0. - 1.]
"""
assert len(real) == len(predicted)
return mean_squared_error(real, predicted)
def sklearn_version(self):
"""
Auto-generates sklearn code for a selected algorithm.
NOTE: This function will automatically add one more code cell to your Jupyter Notebook/Google Colab (with the sklearn code inside).
"""
if not super().__is_visual_on__():
print("Supported only in Jupyter Notebook and Google Colab.")
return NotImplementedError
if super().__is_google_colab__():
return "This method is not supported in Google Colab for now :/"
from IPython.core.getipython import get_ipython
contents = """
# If you don't have Sklearn installed execute line below
# pip install sklearn
# This is how you can import LinearRegression using sklearn library
from sklearn.linear_model import LinearRegression
# Define regressor with selected parameters
model = LinearRegression()
# Train the model using dataset you desire
model.fit(X_train, y_train)
# Finally, use trained model to make predictions
predictions = model.predict(X_test)
# Use Score method to make predictions
print(model.score(X_test, y_test))
"""
shell = get_ipython()
payload = dict(
source='set_next_input',
text=contents,
replace=False,
)
shell.payload_manager.write_payload(payload, single=False)
def how_it_works(self, video=False):
"""
Generates theory on how the algorithm works right in the Jupyter Notebook/Google colab.
:param video: Some people prefer video tutorials over reading version. Set this parameter to True if you want video tutorial instead. :)
"""
if not super().__is_visual_on__():
print("Supported only in Jupyter Notebook and Google Colab.")
return NotImplementedError
from IPython.core.getipython import get_ipython
if not video:
content = u"""
<div>
<h1>Linear Regression — Understanding the Theory</h1>
<br>
<img src="https://miro.medium.com/max/770/0*c39Seo5WzCpU4GAn">
<br>
<br>
<p>
Linear regression is probably the simplest approach for statistical learning. It is a good starting point for more advanced approaches, and in fact, many fancy statistical learning techniques can be seen as an extension of linear regression. Therefore, understanding this simple model will build a good base before moving on to more complex approaches.
<br><br>
Linear regression is very good to answer the following questions:<br><br>
- Is there a relationship between 2 variables?<br>
- How strong is the relationship?<br>
- Which variable contributes the most?<br>
- How accurately can we estimate the effect of each variable?<br>
- How accurately can we predict the target?<br>
- Is the relationship linear? (duh)<br>
- Is there an interaction effect?<br>
</p>
<p>
<h2>Estimating the coefficients</h2><br><br>
Let’s assume we only have one variable and one target. Then, linear regression is expressed as:
<br><br>
<img src="https://miro.medium.com/max/770/1*B-U6j1vxqqaYjgTZgunxIg@2x.png">
<br><br>
In the equation above, the betas are the coefficients. These coefficients are what we need in order to make predictions with our model.<br><br>
So how do we find these parameters?<br><br>
To find the parameters, we need to minimize the least squares or the sum of squared errors. Of course, the linear model is not perfect and it will not predict all the data accurately, meaning that there is a difference between the actual value and the prediction. The error is easily calculated with:
<br><br>
<img src="https://miro.medium.com/max/727/1*ly-QBw2oLDVx9M7MzxkKnw@2x.png">
<br><br>
But why are the errors squared?<br><br>
We square the error, because the prediction can be either above or below the true value, resulting in a negative or positive difference respectively. If we did not square the errors, the sum of errors could decrease because of negative differences and not because the model is a good fit. Also, squaring the errors penalizes large differences, and so the minimizing the squared errors “guarantees” a better model. Let’s take a look at a graph to better understand.
<br><br>
<img src="https://miro.medium.com/max/770/1*3CgiH8QI0ZN5LfdmK2t6XQ@2x.png">
<br><br>
In the graph above, the red dots are the true data and the blue line is linear model. The grey lines illustrate the errors between the predicted and the true values. The blue line is thus the one that minimizes the sum of the squared length of the grey lines.
<br><br>After some math that is too heavy for a blog post, you can finally estimate the coefficients with the following equations:<br><br>
<br><br>
<img src="https://miro.medium.com/max/614/1*YOiQ9UpR-A2jHvGR6JZwtQ@2x.png"><br><br>
<img src="https://miro.medium.com/max/339/1*t9rzyx0zh7o5Zx1Y-IQOvg@2x.png">
<br><br>
Where x bar and y bar represent the mean.
<br><br>
<h2>Estimate the relevancy of the coefficients</h2>
<br><br>
Now that you have coefficients, how can you tell if they are relevant to predict your target?<br><br>
The best way is to find the p-value. The p-value is used to quantify statistical significance; it allows to tell whether the null hypothesis is to be rejected or not.<br><br>
The null hypothesis?<br><br>
For any modelling task, the hypothesis is that there is some correlation between the features and the target. The null hypothesis is therefore the opposite: there is no correlation between the features and the target.<br><br>
So, finding the p-value for each coefficient will tell if the variable is statistically significant to predict the target. As a general rule of thumb, if the p-value is less than 0.05: there is a strong relationship between the variable and the target.
<br><br>
<h2>Assess the accuracy of the model</h2>
<br><br>
You found out that your variable was statistically significant by finding its p-value. Great!<br><br>
Now, how do you know if your linear model is any good?<br><br>
To assess that, we usually use the RSE (residual standard error) and the R² statistic.<br><br>
The first error metric is simple to understand: the lower the residual errors, the better the model fits the data (in this case, the closer the data is to a linear relationship).<br><br>
As for the R² metric, it measures the proportion of variability in the target that can be explained using a feature X. Therefore, assuming a linear relationship, if feature X can explain (predict) the target, then the proportion is high and the R² value will be close to 1. If the opposite is true, the R² value is then closer to 0.
<br><br>
</p>
<h1>Author and source:</h1>
<h2>Author: <a target="_blank" href="https://towardsdatascience.com/@marcopeixeiro"><NAME></a></h2>
<h2>To find more resources go to the source of the post: <a target="_blank" href="https://towardsdatascience.com/linear-regression-understanding-the-theory-7e53ac2831b5">Towards data science post</a></h2>
</div>
"""
get_ipython().run_cell_magic(u'html', u'', content)
else:
content = u"""
<div>
<h1> K-Means - How it works? </h1>
<iframe width="560" height="315" src="https://www.youtube.com/embed/kHwlB_j7Hkc" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
</div>
"""
get_ipython().run_cell_magic(u'markdown', u'', content)
def interview_questions(self):
"""
Generates commonly asked interview questions about the algorithm in the Jupyter Notebook/Google colab.
"""
if not super().__is_visual_on__():
print("Supported only in Jupyter Notebook and Google Colab.")
return NotImplementedError
from IPython.core.getipython import get_ipython
content = u"""
<h1> Linear Regression Interview Questions </h1>
<h2> 1. Can you list out the critical assumptions of linear regression?</h2>
<p>
There are three crucial assumptions one has to make in linear regression. They are,
<ol>
<li>It is imperative to have a linear relationship between the dependent and independent A scatter plot can prove handy to check out this fact.</li>
<li>The independent variables in the dataset should not exhibit any multi-collinearity. In case they do, it should be at the barest minimum. There should be a restriction on their value depending on the domain requirement.</li>
<li>Homoscedasticity is one of the most critical It states that there should be an equal distribution of errors.</li>
</ol>
</p>
<h2> 2. What is the primary difference between R square and adjusted R square?</h2>
<p>
In linear regression, you use both these values for model validation. However, there is a clear distinction between the two. R square accounts for the variation of all independent variables on the dependent variable. In other words, it considers each independent variable for explaining the variation. In the case of Adjusted R square, it accounts for the significant variables alone for indicating the percentage of variation in the model. By significant, we refer to the P values less than 0.05.
</p>
<h2>3. What is the importance of the F-test in a linear model?</h2>
<p>
The F-test is a crucial one in the sense that it tests the goodness of the model. When you reiterate the model to improve the accuracy with the changes, the F-test proves its utility in understanding the effect of the overall regression.
</p>
<h2>4. What are the disadvantages of the linear regression model?</h2>
<p>
One of the most significant demerits of the linear model is that it is sensitive and dependent on the outliers. It can affect the overall result. Another notable demerit of the linear model is overfitting. Similarly, underfitting is also a significant disadvantage of the linear model.
</p>
<h3> The questions and answers taken from: [<a href="https://www.digitalvidya.com/blog/most-commonly-asked-interview-questions-on-linear-regression">link</a>]</h3>
<h3> Quiz like questions: <a href="https://www.analyticsvidhya.com/blog/2017/07/30-questions-to-test-a-data-scientist-on-linear-regression/" target="_blank">link</a></h3>
"""
get_ipython().run_cell_magic(u'html', u'', content)
| [
"IPython.display.display",
"numpy.multiply",
"numpy.abs",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"time.sleep",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.close",
"IPython.display.clear_output",
"matplotlib.pyplot.... | [((5750, 5785), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['real', 'predicted'], {}), '(real, predicted)\n', (5768, 5785), False, 'from sklearn.metrics import mean_squared_error\n'), ((6840, 6853), 'IPython.core.getipython.get_ipython', 'get_ipython', ([], {}), '()\n', (6851, 6853), False, 'from IPython.core.getipython import get_ipython\n'), ((1711, 1741), 'numpy.expand_dims', 'np.expand_dims', (['self.y'], {'axis': '(1)'}), '(self.y, axis=1)\n', (1725, 1741), True, 'import numpy as np\n'), ((1783, 1813), 'numpy.expand_dims', 'np.expand_dims', (['self.X'], {'axis': '(1)'}), '(self.X, axis=1)\n', (1797, 1813), True, 'import numpy as np\n'), ((3494, 3505), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3503, 3505), True, 'import matplotlib.pyplot as plt\n'), ((3509, 3518), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3516, 3518), True, 'import matplotlib.pyplot as plt\n'), ((3522, 3550), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (3532, 3550), True, 'import matplotlib.pyplot as plt\n'), ((3555, 3611), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.X', 'self.y'], {'c': '"""b"""', 'label': '"""Training set"""'}), "(self.X, self.y, c='b', label='Training set')\n", (3566, 3611), True, 'import matplotlib.pyplot as plt\n'), ((3746, 3795), 'matplotlib.pyplot.title', 'plt.title', (['"""Linear Regression - Training process"""'], {}), "('Linear Regression - Training process')\n", (3755, 3795), True, 'import matplotlib.pyplot as plt\n'), ((3799, 3826), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Feature value"""'], {}), "('Feature value')\n", (3809, 3826), True, 'import matplotlib.pyplot as plt\n'), ((3830, 3856), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Target value"""'], {}), "('Target value')\n", (3840, 3856), True, 'import matplotlib.pyplot as plt\n'), ((3860, 3898), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'framealpha': '(1)', 'frameon': '(True)'}), '(framealpha=1, frameon=True)\n', (3870, 3898), True, 'import matplotlib.pyplot as plt\n'), ((3933, 3950), 'IPython.display.display', 'display.display', ([], {}), '()\n', (3948, 3950), False, 'from IPython import display\n'), ((3954, 3967), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3964, 3967), False, 'import time\n'), ((3971, 4002), 'IPython.display.clear_output', 'display.clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (3991, 4002), False, 'from IPython import display\n'), ((5298, 5323), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (5312, 5323), True, 'import numpy as np\n'), ((5343, 5369), 'numpy.multiply', 'np.multiply', (['X', 'self.new_m'], {}), '(X, self.new_m)\n', (5354, 5369), True, 'import numpy as np\n'), ((2114, 2151), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.X.shape[1]'], {}), '(0, self.X.shape[1])\n', (2131, 2151), True, 'import numpy as np\n'), ((3919, 3928), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3926, 3928), True, 'import matplotlib.pyplot as plt\n'), ((15604, 15617), 'IPython.core.getipython.get_ipython', 'get_ipython', ([], {}), '()\n', (15615, 15617), False, 'from IPython.core.getipython import get_ipython\n'), ((2295, 2325), 'numpy.expand_dims', 'np.expand_dims', (['self.X'], {'axis': '(1)'}), '(self.X, axis=1)\n', (2309, 2325), True, 'import numpy as np\n'), ((3639, 3677), 'numpy.multiply', 'np.multiply', (['self.X', 'self.m_history[i]'], {}), '(self.X, self.m_history[i])\n', (3650, 3677), True, 'import numpy as np\n'), ((12515, 12528), 'IPython.core.getipython.get_ipython', 'get_ipython', ([], {}), '()\n', (12526, 12528), False, 'from IPython.core.getipython import get_ipython\n'), ((12851, 12864), 'IPython.core.getipython.get_ipython', 'get_ipython', ([], {}), '()\n', (12862, 12864), False, 'from IPython.core.getipython import get_ipython\n'), ((2897, 2929), 'numpy.abs', 'np.abs', (['(candidate_m - self.new_m)'], {}), '(candidate_m - self.new_m)\n', (2903, 2929), True, 'import numpy as np\n'), ((2958, 2990), 'numpy.abs', 'np.abs', (['(candidate_b - self.new_b)'], {}), '(candidate_b - self.new_b)\n', (2964, 2990), True, 'import numpy as np\n'), ((4428, 4450), 'numpy.multiply', 'np.multiply', (['self.X', 'm'], {}), '(self.X, m)\n', (4439, 4450), True, 'import numpy as np\n'), ((4837, 4859), 'numpy.multiply', 'np.multiply', (['self.X', 'm'], {}), '(self.X, m)\n', (4848, 4859), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sentencepiece as spm
import tensorflow as tf
import tensorflow.keras.backend as K
import wget
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
random_seed = 1234
random.seed(random_seed)
np.random.seed(random_seed)
tf.random.set_seed(random_seed)
print(tf.__version__)
print(tf.config.list_physical_devices('GPU'))
print(tf.test.gpu_device_name())
#
# prepare dir
#
data_dir = './data'
if not os.path.exists(data_dir):
data_dir = '../data'
print(os.listdir(data_dir))
songys_dir = os.path.join(data_dir, 'songys')
if not os.path.exists(songys_dir):
os.makedirs(songys_dir)
train_txt = os.path.join(songys_dir, 'ChatbotData.csv')
#
# file check
#
def print_file(filename, count=10):
"""
라인 수 만큼 파일내용 출력
:param filename: file name
:param count: print line count
"""
with open(filename) as f:
for i, line in enumerate(f):
print(line.strip())
if count < i:
break
#
# download file
#
wget.download('https://raw.githubusercontent.com/songys/Chatbot_data/master/ChatbotData .csv', train_txt)
print(os.listdir(songys_dir))
print_file(train_txt)
#
# data read
# https://pandas.pydata.org/pandas-docs/stable/index.html
#
# head=0 첫벗째 줄이 head
train_data = pd.read_csv(train_txt, header=0, delimiter=',')
print(f'전체 학습 raw 개수: {len(train_data)}')
train_data = train_data.dropna()
print(f'전체 학습 valid 개수: {len(train_data)}')
train_data = train_data.sample(1000) # 빠른 확인을 위해 1000개만 사용
print(f'전체 학습 sample 개수: {len(train_data)}')
label_counts = train_data['label'].value_counts()
print(f'전체 학습 label 개수: {label_counts}')
#
# vocabulary
#
# vocab load
vocab_file = os.path.join(data_dir, 'ko_32000.model')
vocab = spm.SentencePieceProcessor()
vocab.load(vocab_file)
#
# tokenize
#
questions, answers = [], []
for i, row in train_data.iterrows():
question = vocab.encode_as_pieces(row['Q'])
questions.append(question)
answer = vocab.encode_as_pieces(row['A'])
answers.append(answer)
assert len(questions) == len(answers)
print(questions[:100])
print(answers[:100])
#
# token to id
#
question_ids = [[vocab.piece_to_id(p) for p in question] for question in questions]
answer_ids = [[vocab.piece_to_id(p) for p in answer] for answer in answers]
print(question_ids[:100])
print(answer_ids[:100])
#
# pad
#
# 길이가 달라서 matrix 생성 안됨
print(np.array(question_ids)[:50])
print(np.array(answer_ids)[:50])
# 길이 확인
question_length = [len(question_id) for question_id in question_ids]
print(question_length[:100])
answer_length = [len(answer_id) for answer_id in answer_ids]
print(answer_length[:100])
# 최대 길이 확인
answer_max_length, question_max_length = max(question_length), max(answer_length)
# 최대 sequence 길이 지정 (임의 지정)
n_seq = max(answer_max_length, question_max_length) + 2
print(answer_max_length, question_max_length, n_seq)
# pad id
pad_id = vocab.pad_id()
print('pad_id:', pad_id)
#
# inputs
#
# train numpy matrix
enc_inputs = np.zeros((len(question_ids), n_seq))
dec_inputs = np.zeros((len(answer_ids), n_seq))
dec_labels = np.zeros((len(answer_ids), n_seq))
print(enc_inputs.shape, enc_inputs[0], enc_inputs[-1])
print(dec_inputs.shape, dec_inputs[0], dec_inputs[-1])
print(dec_labels.shape, dec_labels[0], dec_labels[-1])
# assing question_ids to enc_inputs
for i, token_id in enumerate(question_ids):
token_id += [0] * (n_seq - len(token_id))
token_id = token_id[:n_seq]
assert len(token_id) == n_seq
enc_inputs[i] = token_id
print(enc_inputs.shape, enc_inputs[0], enc_inputs[-1])
# assing answer_ids to dec_inputs and dec_labels
n_max = n_seq - 1
for i, token_id in enumerate(answer_ids):
token_id = token_id[:n_max]
dec_input = [vocab.bos_id()] + token_id
dec_input += [0] * (n_seq - len(dec_input))
dec_label = token_id + [vocab.eos_id()]
dec_label += [0] * (n_seq - len(dec_label))
assert len(dec_input) == len(dec_label) == n_seq
dec_inputs[i] = dec_input
dec_labels[i] = dec_label
print(dec_inputs.shape, dec_inputs[0].astype(np.int), dec_inputs[-1].astype(np.int))
print(dec_labels.shape, dec_labels[0].astype(np.int), dec_labels[-1].astype(np.int))
train_inputs = (enc_inputs, dec_inputs)
#
# loss and accuracy
#
def lm_loss(y_true, y_pred):
"""
pad 부분을 제외하고 loss를 계산하는 함수
:param y_true: 정답
:param y_pred: 예측 값
:retrun loss: pad 부분이 제외된 loss 값
"""
loss = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE)(y_true, y_pred)
mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)
loss *= mask
return loss
def lm_acc(y_true, y_pred):
"""
pad 부분을 제외하고 accuracy를 계산하는 함수
:param y_true: 정답
:param y_pred: 예측 값
:retrun loss: pad 부분이 제외된 accuracy 값
"""
y_pred_class = tf.cast(K.argmax(y_pred, axis=-1), tf.float32)
y_true = tf.cast(y_true, tf.float32)
matches = tf.cast(K.equal(y_true, y_pred_class), tf.float32)
mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)
matches *= mask
accuracy = K.sum(matches) / K.maximum(K.sum(mask), 1)
return accuracy
#
# rnn
#
def build_model_rnn(n_vocab, d_model):
"""
rnn model build
:param n_vocab: number of vocab
:param d_model: hidden size
:return model: model
"""
enc_inputs = tf.keras.layers.Input((None,))
dec_inputs = tf.keras.layers.Input((None,))
embedding = tf.keras.layers.Embedding(n_vocab, d_model)
enc_hidden = embedding(enc_inputs) # bs, n_seq, d_model
enc_hidden, fw_h = tf.keras.layers.SimpleRNN(units=d_model, return_state=True)(enc_hidden) # bs, d_model
dec_hidden = embedding(dec_inputs) # bs, n_seq, d_model
dec_hidden = tf.keras.layers.SimpleRNN(units=d_model, return_sequences=True)(dec_hidden, initial_state=[fw_h]) # bs, n_seq, d_model
outputs = tf.keras.layers.Dense(n_vocab, activation=tf.nn.softmax)(dec_hidden)
model = tf.keras.Model(inputs=(enc_inputs, dec_inputs), outputs=outputs)
return model
# model build
model_rnn = build_model_rnn(len(vocab), 256)
print(model_rnn.summary())
# complie
# https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adam
model_rnn.compile(loss=lm_loss, optimizer=tf.keras.optimizers.Adam(), metrics=[lm_acc])
# early stopping
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='lm_acc', patience=10)
# save weights
save_rnn_file = os.path.join(songys_dir, 'rnn.hdf5')
save_weights = tf.keras.callbacks.ModelCheckpoint(save_rnn_file, monitor='lm_acc', verbose=1, save_best_only=True, mode='max', save_freq='epoch', save_weights_only=True)
# train
history = model_rnn.fit(train_inputs, dec_labels, epochs=500, batch_size=128, callbacks=[early_stopping, save_weights])
def draw_history(history, acc='lm_acc'):
"""
draw training history
:param history: training history object
:param acc: acc key
"""
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(history.history['loss'], 'b-', label='loss')
plt.xlabel('Epoch')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(history.history[acc], 'g-', label=acc)
plt.xlabel('Epoch')
plt.legend()
plt.show()
draw_history(history)
#
# chat inference
#
string = '안녕 만나서 반가워'
enc_tokens = vocab.encode_as_pieces(string)
print(enc_tokens)
enc_token_id = [vocab.piece_to_id(p) for p in enc_tokens][:n_max]
print(enc_token_id)
enc_inputs = enc_token_id
enc_inputs += [0] * (n_seq - len(enc_inputs))
assert len(enc_inputs) == n_seq
print(enc_inputs)
dec_inputs = [vocab.bos_id()]
dec_inputs += [0] * (n_seq - len(dec_inputs))
assert len(dec_inputs) == n_seq
print(dec_inputs)
results = []
print(results)
result = model_rnn.predict((np.array([enc_inputs]), np.array([dec_inputs])))
prob = result[0][0]
print(prob.shape, prob)
word_id = int(np.random.choice(len(vocab), 1, p=prob)[0])
print(word_id)
results.append(word_id)
dec_inputs[1] = word_id
print(dec_inputs)
result = model_rnn.predict((np.array([enc_inputs]), np.array([dec_inputs])))
prob = result[0][1]
print(prob.shape, prob)
word_id = int(np.random.choice(len(vocab), 1, p=prob)[0])
print(word_id)
results.append(word_id)
dec_inputs[2] = word_id
print(dec_inputs)
result = model_rnn.predict((np.array([enc_inputs]), np.array([dec_inputs])))
prob = result[0][2]
print(prob.shape, prob)
word_id = int(np.random.choice(len(vocab), 1, p=prob)[0])
print(word_id)
def do_predict(vocab, model, n_seq, string):
"""
응답을 순차적으로 생성
:param vocab: vocab
:param model: model object
:param n_seq: 시퀀스 길이 (number of sequence)
:param string: 입력 문자열
:return response: 입력 문자열에 대한 응답
"""
# encoder_tokens = vocab.encode_as_pieces(string)
enc_inputs = vocab.encode_as_ids(string)[:n_seq]
enc_inputs += [0] * (n_seq - len(enc_inputs))
assert len(enc_inputs) == n_seq
# decoder_tokens = ['[BOS]']
dec_inputs = [vocab.bos_id()]
dec_inputs += [0] * (n_seq - len(dec_inputs))
response = []
for i in range(n_seq - 1):
outputs = model.predict([np.array([enc_inputs]), np.array([dec_inputs])])
prob = outputs[0][i]
word_id = int(np.random.choice(len(vocab), 1, p=prob)[0])
if word_id == vocab.eos_id():
break
response.append(word_id)
dec_inputs[i + 1] = word_id
return vocab.decode_ids(response)
model_rnn = build_model_rnn(len(vocab), 256)
print(model_rnn.summary())
string = '안녕 만나서 반가워'
print(do_predict(vocab, model_rnn, n_seq, string))
model_rnn.load_weights(save_rnn_file)
print(do_predict(vocab, model_rnn, n_seq, string))
#
# bi rnn
#
def build_model_bi_rnn(n_vocab, d_model):
"""
bi rnn model build
:param n_vocab: number of vocab
:param d_model: hidden size
:return model: model
"""
enc_inputs = tf.keras.layers.Input((None,))
dec_inputs = tf.keras.layers.Input((None,))
embedding = tf.keras.layers.Embedding(n_vocab, d_model)
enc_hidden = embedding(enc_inputs) # bs, n_seq, d_model
enc_hidden, fw_h, bw_h = tf.keras.layers.Bidirectional(tf.keras.layers.SimpleRNN(units=d_model, return_state=True))(enc_hidden) # bs, 2 * d_model
s_h = tf.concat([fw_h, bw_h], axis=-1) # bs, 2 * d_model
dec_hidden = embedding(dec_inputs) # bs, n_seq, d_model
dec_hidden = tf.keras.layers.SimpleRNN(units=d_model * 2, return_sequences=True)(dec_hidden, initial_state=[s_h]) # bs, n_seq, 2 * d_model
outputs = tf.keras.layers.Dense(n_vocab, activation=tf.nn.softmax)(dec_hidden)
model = tf.keras.Model(inputs=(enc_inputs, dec_inputs), outputs=outputs)
return model
# model build
model_bi_rnn = build_model_bi_rnn(len(vocab), 256)
print(model_bi_rnn.summary())
# complie
# https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adam
model_bi_rnn.compile(loss=lm_loss, optimizer=tf.keras.optimizers.Adam(), metrics=[lm_acc])
# early stopping
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='lm_acc', patience=10)
# save weights
save_bi_rnn_file = os.path.join(songys_dir, 'bi_rnn.hdf5')
save_weights = tf.keras.callbacks.ModelCheckpoint(save_bi_rnn_file, monitor='lm_acc', verbose=1, save_best_only=True, mode='max', save_freq='epoch', save_weights_only=True)
# train
history = model_bi_rnn.fit(train_inputs, dec_labels, epochs=500, batch_size=128, callbacks=[early_stopping, save_weights])
# history
draw_history(history)
model_bi_rnn = build_model_bi_rnn(len(vocab), 256)
print(model_bi_rnn.summary())
print(do_predict(vocab, model_rnn, n_seq, string))
model_bi_rnn.load_weights(save_bi_rnn_file)
print(do_predict(vocab, model_bi_rnn, n_seq, string))
| [
"wget.download",
"pandas.read_csv",
"sentencepiece.SentencePieceProcessor",
"tensorflow.keras.callbacks.EarlyStopping",
"numpy.array",
"tensorflow.config.list_physical_devices",
"tensorflow.keras.layers.Dense",
"tensorflow.cast",
"os.path.exists",
"tensorflow.keras.layers.Input",
"os.listdir",
... | [((282, 306), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (293, 306), False, 'import random\n'), ((307, 334), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (321, 334), True, 'import numpy as np\n'), ((335, 366), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['random_seed'], {}), '(random_seed)\n', (353, 366), True, 'import tensorflow as tf\n'), ((608, 640), 'os.path.join', 'os.path.join', (['data_dir', '"""songys"""'], {}), "(data_dir, 'songys')\n", (620, 640), False, 'import os\n'), ((717, 760), 'os.path.join', 'os.path.join', (['songys_dir', '"""ChatbotData.csv"""'], {}), "(songys_dir, 'ChatbotData.csv')\n", (729, 760), False, 'import os\n'), ((1089, 1204), 'wget.download', 'wget.download', (['"""https://raw.githubusercontent.com/songys/Chatbot_data/master/ChatbotData .csv"""', 'train_txt'], {}), "(\n 'https://raw.githubusercontent.com/songys/Chatbot_data/master/ChatbotData .csv'\n , train_txt)\n", (1102, 1204), False, 'import wget\n'), ((1358, 1405), 'pandas.read_csv', 'pd.read_csv', (['train_txt'], {'header': '(0)', 'delimiter': '""","""'}), "(train_txt, header=0, delimiter=',')\n", (1369, 1405), True, 'import pandas as pd\n'), ((1766, 1806), 'os.path.join', 'os.path.join', (['data_dir', '"""ko_32000.model"""'], {}), "(data_dir, 'ko_32000.model')\n", (1778, 1806), False, 'import os\n'), ((1815, 1843), 'sentencepiece.SentencePieceProcessor', 'spm.SentencePieceProcessor', ([], {}), '()\n', (1841, 1843), True, 'import sentencepiece as spm\n'), ((6345, 6408), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""lm_acc"""', 'patience': '(10)'}), "(monitor='lm_acc', patience=10)\n", (6377, 6408), True, 'import tensorflow as tf\n'), ((6440, 6476), 'os.path.join', 'os.path.join', (['songys_dir', '"""rnn.hdf5"""'], {}), "(songys_dir, 'rnn.hdf5')\n", (6452, 6476), False, 'import os\n'), ((6492, 6655), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['save_rnn_file'], {'monitor': '"""lm_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""', 'save_freq': '"""epoch"""', 'save_weights_only': '(True)'}), "(save_rnn_file, monitor='lm_acc', verbose\n =1, save_best_only=True, mode='max', save_freq='epoch',\n save_weights_only=True)\n", (6526, 6655), True, 'import tensorflow as tf\n'), ((10925, 10988), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""lm_acc"""', 'patience': '(10)'}), "(monitor='lm_acc', patience=10)\n", (10957, 10988), True, 'import tensorflow as tf\n'), ((11023, 11062), 'os.path.join', 'os.path.join', (['songys_dir', '"""bi_rnn.hdf5"""'], {}), "(songys_dir, 'bi_rnn.hdf5')\n", (11035, 11062), False, 'import os\n'), ((11078, 11243), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['save_bi_rnn_file'], {'monitor': '"""lm_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""', 'save_freq': '"""epoch"""', 'save_weights_only': '(True)'}), "(save_bi_rnn_file, monitor='lm_acc',\n verbose=1, save_best_only=True, mode='max', save_freq='epoch',\n save_weights_only=True)\n", (11112, 11243), True, 'import tensorflow as tf\n'), ((396, 434), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (427, 434), True, 'import tensorflow as tf\n'), ((442, 467), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), '()\n', (465, 467), True, 'import tensorflow as tf\n'), ((515, 539), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (529, 539), False, 'import os\n'), ((572, 592), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (582, 592), False, 'import os\n'), ((648, 674), 'os.path.exists', 'os.path.exists', (['songys_dir'], {}), '(songys_dir)\n', (662, 674), False, 'import os\n'), ((680, 703), 'os.makedirs', 'os.makedirs', (['songys_dir'], {}), '(songys_dir)\n', (691, 703), False, 'import os\n'), ((1201, 1223), 'os.listdir', 'os.listdir', (['songys_dir'], {}), '(songys_dir)\n', (1211, 1223), False, 'import os\n'), ((4919, 4946), 'tensorflow.cast', 'tf.cast', (['y_true', 'tf.float32'], {}), '(y_true, tf.float32)\n', (4926, 4946), True, 'import tensorflow as tf\n'), ((5363, 5393), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['(None,)'], {}), '((None,))\n', (5384, 5393), True, 'import tensorflow as tf\n'), ((5411, 5441), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['(None,)'], {}), '((None,))\n', (5432, 5441), True, 'import tensorflow as tf\n'), ((5459, 5502), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['n_vocab', 'd_model'], {}), '(n_vocab, d_model)\n', (5484, 5502), True, 'import tensorflow as tf\n'), ((5971, 6035), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '(enc_inputs, dec_inputs)', 'outputs': 'outputs'}), '(inputs=(enc_inputs, dec_inputs), outputs=outputs)\n', (5985, 6035), True, 'import tensorflow as tf\n'), ((6933, 6960), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (6943, 6960), True, 'import matplotlib.pyplot as plt\n'), ((6966, 6986), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (6977, 6986), True, 'import matplotlib.pyplot as plt\n'), ((6991, 7044), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']", '"""b-"""'], {'label': '"""loss"""'}), "(history.history['loss'], 'b-', label='loss')\n", (6999, 7044), True, 'import matplotlib.pyplot as plt\n'), ((7049, 7068), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (7059, 7068), True, 'import matplotlib.pyplot as plt\n'), ((7073, 7085), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7083, 7085), True, 'import matplotlib.pyplot as plt\n'), ((7091, 7111), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (7102, 7111), True, 'import matplotlib.pyplot as plt\n'), ((7116, 7163), 'matplotlib.pyplot.plot', 'plt.plot', (['history.history[acc]', '"""g-"""'], {'label': 'acc'}), "(history.history[acc], 'g-', label=acc)\n", (7124, 7163), True, 'import matplotlib.pyplot as plt\n'), ((7168, 7187), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (7178, 7187), True, 'import matplotlib.pyplot as plt\n'), ((7192, 7204), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7202, 7204), True, 'import matplotlib.pyplot as plt\n'), ((7210, 7220), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7218, 7220), True, 'import matplotlib.pyplot as plt\n'), ((9821, 9851), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['(None,)'], {}), '((None,))\n', (9842, 9851), True, 'import tensorflow as tf\n'), ((9869, 9899), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['(None,)'], {}), '((None,))\n', (9890, 9899), True, 'import tensorflow as tf\n'), ((9917, 9960), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['n_vocab', 'd_model'], {}), '(n_vocab, d_model)\n', (9942, 9960), True, 'import tensorflow as tf\n'), ((10184, 10216), 'tensorflow.concat', 'tf.concat', (['[fw_h, bw_h]'], {'axis': '(-1)'}), '([fw_h, bw_h], axis=-1)\n', (10193, 10216), True, 'import tensorflow as tf\n'), ((10539, 10603), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '(enc_inputs, dec_inputs)', 'outputs': 'outputs'}), '(inputs=(enc_inputs, dec_inputs), outputs=outputs)\n', (10553, 10603), True, 'import tensorflow as tf\n'), ((2455, 2477), 'numpy.array', 'np.array', (['question_ids'], {}), '(question_ids)\n', (2463, 2477), True, 'import numpy as np\n'), ((2490, 2510), 'numpy.array', 'np.array', (['answer_ids'], {}), '(answer_ids)\n', (2498, 2510), True, 'import numpy as np\n'), ((4479, 4571), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'reduction': 'tf.keras.losses.Reduction.NONE'}), '(reduction=tf.keras.losses.\n Reduction.NONE)\n', (4524, 4571), True, 'import tensorflow as tf\n'), ((4602, 4625), 'tensorflow.not_equal', 'tf.not_equal', (['y_true', '(0)'], {}), '(y_true, 0)\n', (4614, 4625), True, 'import tensorflow as tf\n'), ((4867, 4892), 'tensorflow.keras.backend.argmax', 'K.argmax', (['y_pred'], {'axis': '(-1)'}), '(y_pred, axis=-1)\n', (4875, 4892), True, 'import tensorflow.keras.backend as K\n'), ((4969, 4998), 'tensorflow.keras.backend.equal', 'K.equal', (['y_true', 'y_pred_class'], {}), '(y_true, y_pred_class)\n', (4976, 4998), True, 'import tensorflow.keras.backend as K\n'), ((5031, 5054), 'tensorflow.not_equal', 'tf.not_equal', (['y_true', '(0)'], {}), '(y_true, 0)\n', (5043, 5054), True, 'import tensorflow as tf\n'), ((5103, 5117), 'tensorflow.keras.backend.sum', 'K.sum', (['matches'], {}), '(matches)\n', (5108, 5117), True, 'import tensorflow.keras.backend as K\n'), ((5588, 5647), 'tensorflow.keras.layers.SimpleRNN', 'tf.keras.layers.SimpleRNN', ([], {'units': 'd_model', 'return_state': '(True)'}), '(units=d_model, return_state=True)\n', (5613, 5647), True, 'import tensorflow as tf\n'), ((5754, 5817), 'tensorflow.keras.layers.SimpleRNN', 'tf.keras.layers.SimpleRNN', ([], {'units': 'd_model', 'return_sequences': '(True)'}), '(units=d_model, return_sequences=True)\n', (5779, 5817), True, 'import tensorflow as tf\n'), ((5889, 5945), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['n_vocab'], {'activation': 'tf.nn.softmax'}), '(n_vocab, activation=tf.nn.softmax)\n', (5910, 5945), True, 'import tensorflow as tf\n'), ((6264, 6290), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (6288, 6290), True, 'import tensorflow as tf\n'), ((7744, 7766), 'numpy.array', 'np.array', (['[enc_inputs]'], {}), '([enc_inputs])\n', (7752, 7766), True, 'import numpy as np\n'), ((7768, 7790), 'numpy.array', 'np.array', (['[dec_inputs]'], {}), '([dec_inputs])\n', (7776, 7790), True, 'import numpy as np\n'), ((8005, 8027), 'numpy.array', 'np.array', (['[enc_inputs]'], {}), '([enc_inputs])\n', (8013, 8027), True, 'import numpy as np\n'), ((8029, 8051), 'numpy.array', 'np.array', (['[dec_inputs]'], {}), '([dec_inputs])\n', (8037, 8051), True, 'import numpy as np\n'), ((8266, 8288), 'numpy.array', 'np.array', (['[enc_inputs]'], {}), '([enc_inputs])\n', (8274, 8288), True, 'import numpy as np\n'), ((8290, 8312), 'numpy.array', 'np.array', (['[dec_inputs]'], {}), '([dec_inputs])\n', (8298, 8312), True, 'import numpy as np\n'), ((10315, 10382), 'tensorflow.keras.layers.SimpleRNN', 'tf.keras.layers.SimpleRNN', ([], {'units': '(d_model * 2)', 'return_sequences': '(True)'}), '(units=d_model * 2, return_sequences=True)\n', (10340, 10382), True, 'import tensorflow as tf\n'), ((10457, 10513), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['n_vocab'], {'activation': 'tf.nn.softmax'}), '(n_vocab, activation=tf.nn.softmax)\n', (10478, 10513), True, 'import tensorflow as tf\n'), ((10844, 10870), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (10868, 10870), True, 'import tensorflow as tf\n'), ((5130, 5141), 'tensorflow.keras.backend.sum', 'K.sum', (['mask'], {}), '(mask)\n', (5135, 5141), True, 'import tensorflow.keras.backend as K\n'), ((10082, 10141), 'tensorflow.keras.layers.SimpleRNN', 'tf.keras.layers.SimpleRNN', ([], {'units': 'd_model', 'return_state': '(True)'}), '(units=d_model, return_state=True)\n', (10107, 10141), True, 'import tensorflow as tf\n'), ((9069, 9091), 'numpy.array', 'np.array', (['[enc_inputs]'], {}), '([enc_inputs])\n', (9077, 9091), True, 'import numpy as np\n'), ((9093, 9115), 'numpy.array', 'np.array', (['[dec_inputs]'], {}), '([dec_inputs])\n', (9101, 9115), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from multilayer_perceptron import MLP
from gradient_boosting_decision_tree import GBDT
from xgboost import XGBoost
from random_forest import RandomForest
from adaboost import AdaBoost
from factorization_machines import FactorizationMachines
from support_vector_machine import SVM
from k_nearest_neighbor import kNearestNeighbor
def gen_linear(train_num):
x = 2 * np.random.random((train_num, 2)) - 1
return x, (x.sum(axis=1) > 0) * 1
def gen_circle(train_num):
x = 2 * np.random.random((train_num, 2)) - 1
return x, (np.square(x).sum(axis=1) > 0.6) * 1
def gen_xor(train_num):
x = 2 * np.random.random((train_num, 2)) - 1
return x, np.array([(xi[0] * xi[1] > 0) for xi in x]) * 1
def gen_spiral(train_num):
r = 0.8 * np.arange(train_num) / train_num
y = np.arange(train_num) % 2
t = 1.75 * r * 2 * np.pi + y * np.pi
x = np.c_[r * np.sin(t) + np.random.random(train_num) /
10, r * np.cos(t) + np.random.random(train_num) / 10]
return x, y * 1
def gen_moon(train_num):
y = np.arange(train_num) % 2
x0 = (y - 0.5) * (.5 - np.cos(np.linspace(0, np.pi, train_num))) + \
np.random.random(train_num) / 10
x1 = (y - 0.5) * (.5 - 2 * np.sin(np.linspace(0, np.pi, train_num))
) + np.random.random(train_num) / 10
return np.c_[x0, x1], y
# visualize decision boundary change
def boundary_vis_plots(model, x, y, subplot=[1, 1, 1]):
plt.subplot(subplot[0], subplot[1], subplot[2])
xx, yy = np.meshgrid(np.linspace(-1, 1, 50), np.linspace(-1, 1, 50))
pred = model.predict(np.c_[xx.ravel(), yy.ravel()])
zz = pred.reshape(xx.shape) if len(pred.shape) == 1 or pred.shape[
1] == 1 else pred[:, 1].reshape(xx.shape)
if subplot[2] <= subplot[1]:
plt.title(type(model).__name__)
plt.contourf(xx, yy, zz, levels=np.linspace(
zz.min(), zz.max(), 40), cmap=plt.cm.RdBu)
plt.contour(xx, yy, zz, levels=[0.5], colors='darkred')
plt.scatter(x[:, 0], x[:, 1], c=np.array(
['red', 'blue'])[y], s=10, edgecolors='k')
if subplot[2] == subplot[0] * subplot[1]:
plt.show()
def main():
data_loaders = [gen_linear, gen_circle, gen_xor, gen_spiral, gen_moon]
models = [
(kNearestNeighbor, {'k': 5}),
(FactorizationMachines, {'learning_rate': 1, 'embedding_dim': 1}),
(SVM, {}),
(AdaBoost, {'esti_num': 10}),
(RandomForest, {'tree_num': 20, 'max_depth': 3}),
(XGBoost, {'tree_num': 20, 'max_depth': 3}),
(MLP, {'act_type': 'Tanh', 'opt_type': 'Adam', 'layers': [
2, 8, 7, 2], 'epochs': 200, 'learning_rate': 0.5, 'lmbda': 1e-4})
]
for i, data_loader in enumerate(data_loaders):
x, y = data_loader(256)
for j, model in enumerate(models):
clf = model[0](**model[1])
clf.fit(x, y if not j in [2, 3] else 2 * y - 1)
boundary_vis_plots(clf, x, y, subplot=[len(
data_loaders), len(models), len(models) * i + 1 + j])
if __name__ == "__main__":
main()
| [
"numpy.random.random",
"numpy.square",
"matplotlib.pyplot.contour",
"numpy.linspace",
"numpy.array",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.subplot",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1492, 1539), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot[0]', 'subplot[1]', 'subplot[2]'], {}), '(subplot[0], subplot[1], subplot[2])\n', (1503, 1539), True, 'import matplotlib.pyplot as plt\n'), ((1967, 2022), 'matplotlib.pyplot.contour', 'plt.contour', (['xx', 'yy', 'zz'], {'levels': '[0.5]', 'colors': '"""darkred"""'}), "(xx, yy, zz, levels=[0.5], colors='darkred')\n", (1978, 2022), True, 'import matplotlib.pyplot as plt\n'), ((845, 865), 'numpy.arange', 'np.arange', (['train_num'], {}), '(train_num)\n', (854, 865), True, 'import numpy as np\n'), ((1094, 1114), 'numpy.arange', 'np.arange', (['train_num'], {}), '(train_num)\n', (1103, 1114), True, 'import numpy as np\n'), ((1565, 1587), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(50)'], {}), '(-1, 1, 50)\n', (1576, 1587), True, 'import numpy as np\n'), ((1589, 1611), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(50)'], {}), '(-1, 1, 50)\n', (1600, 1611), True, 'import numpy as np\n'), ((2174, 2184), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2182, 2184), True, 'import matplotlib.pyplot as plt\n'), ((420, 452), 'numpy.random.random', 'np.random.random', (['(train_num, 2)'], {}), '((train_num, 2))\n', (436, 452), True, 'import numpy as np\n'), ((536, 568), 'numpy.random.random', 'np.random.random', (['(train_num, 2)'], {}), '((train_num, 2))\n', (552, 568), True, 'import numpy as np\n'), ((662, 694), 'numpy.random.random', 'np.random.random', (['(train_num, 2)'], {}), '((train_num, 2))\n', (678, 694), True, 'import numpy as np\n'), ((713, 756), 'numpy.array', 'np.array', (['[(xi[0] * xi[1] > 0) for xi in x]'], {}), '([(xi[0] * xi[1] > 0) for xi in x])\n', (721, 756), True, 'import numpy as np\n'), ((804, 824), 'numpy.arange', 'np.arange', (['train_num'], {}), '(train_num)\n', (813, 824), True, 'import numpy as np\n'), ((1200, 1227), 'numpy.random.random', 'np.random.random', (['train_num'], {}), '(train_num)\n', (1216, 1227), True, 'import numpy as np\n'), ((1331, 1358), 'numpy.random.random', 'np.random.random', (['train_num'], {}), '(train_num)\n', (1347, 1358), True, 'import numpy as np\n'), ((2059, 2084), 'numpy.array', 'np.array', (["['red', 'blue']"], {}), "(['red', 'blue'])\n", (2067, 2084), True, 'import numpy as np\n'), ((929, 938), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (935, 938), True, 'import numpy as np\n'), ((941, 968), 'numpy.random.random', 'np.random.random', (['train_num'], {}), '(train_num)\n', (957, 968), True, 'import numpy as np\n'), ((993, 1002), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (999, 1002), True, 'import numpy as np\n'), ((1005, 1032), 'numpy.random.random', 'np.random.random', (['train_num'], {}), '(train_num)\n', (1021, 1032), True, 'import numpy as np\n'), ((1153, 1185), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'train_num'], {}), '(0, np.pi, train_num)\n', (1164, 1185), True, 'import numpy as np\n'), ((588, 600), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (597, 600), True, 'import numpy as np\n'), ((1271, 1303), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'train_num'], {}), '(0, np.pi, train_num)\n', (1282, 1303), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
def output(pos, focus, zoom):
if pos <= focus:
return focus * ((pos / focus) ** zoom)
else:
return (1 - (1 - focus) * (((1 - pos) / (1 - focus)) ** zoom))
def input(pos, focus, zoom):
if pos <= focus:
return focus * ((pos / focus) ** (1.0 / zoom))
else:
return (1 - ((1 - focus) * (
((1 - pos) / (1 - focus)) ** (1.0 / zoom))))
def output_np(pos, focus, zoom):
return np.where(pos <= focus,
focus * ((pos / focus) ** zoom),
(1 - (1 - focus) * (((1 - pos) / (1 - focus)) ** zoom)))
def input_np(pos, focus, zoom):
return np.where(pos < focus,
focus * ((pos / focus) ** (1.0 / zoom)),
(1 - ((1 - focus) * (
((1 - pos) / (1 - focus)) ** (1.0 / zoom)))))
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Polynomial zoom', artist='<NAME>',
comment='with matplotlib')
writer = FFMpegWriter(fps=15, metadata=metadata)
fig = plt.figure()
l, = plt.plot([], [], '-')
l2, = plt.plot([], [], '-', color='r')
plt.xlim(0, 1)
plt.ylim(0, 1)
t = np.arange(0, 1, 0.001)
focus = 0.2
c = [0, 1]
with writer.saving(fig, "zoom.mp4", 90):
for zoom in np.arange(1, 10, 0.1):
l.set_data(t, input_np(t, focus, zoom))
b = np.arange(0, 1, 0.01)
c = focus + (b - focus) / zoom
l2.set_data(b, c)
writer.grab_frame()
zoom = 5
with writer.saving(fig, "focus.mp4", 101):
for focus in np.arange(0, 1.001, 0.01):
l.set_data(t, input_np(t, focus, zoom))
b = np.arange(0, 1, 0.01)
c = focus + (b - focus) / zoom
l2.set_data(b, c)
writer.grab_frame()
| [
"matplotlib.use",
"numpy.where",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.arange"
] | [((37, 58), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (51, 58), False, 'import matplotlib\n'), ((1167, 1179), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1177, 1179), True, 'import matplotlib.pyplot as plt\n'), ((1185, 1206), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]', '"""-"""'], {}), "([], [], '-')\n", (1193, 1206), True, 'import matplotlib.pyplot as plt\n'), ((1213, 1245), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]', '"""-"""'], {'color': '"""r"""'}), "([], [], '-', color='r')\n", (1221, 1245), True, 'import matplotlib.pyplot as plt\n'), ((1247, 1261), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (1255, 1261), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1276), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (1270, 1276), True, 'import matplotlib.pyplot as plt\n'), ((1282, 1304), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.001)'], {}), '(0, 1, 0.001)\n', (1291, 1304), True, 'import numpy as np\n'), ((571, 681), 'numpy.where', 'np.where', (['(pos <= focus)', '(focus * (pos / focus) ** zoom)', '(1 - (1 - focus) * ((1 - pos) / (1 - focus)) ** zoom)'], {}), '(pos <= focus, focus * (pos / focus) ** zoom, 1 - (1 - focus) * ((1 -\n pos) / (1 - focus)) ** zoom)\n', (579, 681), True, 'import numpy as np\n'), ((769, 895), 'numpy.where', 'np.where', (['(pos < focus)', '(focus * (pos / focus) ** (1.0 / zoom))', '(1 - (1 - focus) * ((1 - pos) / (1 - focus)) ** (1.0 / zoom))'], {}), '(pos < focus, focus * (pos / focus) ** (1.0 / zoom), 1 - (1 - focus\n ) * ((1 - pos) / (1 - focus)) ** (1.0 / zoom))\n', (777, 895), True, 'import numpy as np\n'), ((1386, 1407), 'numpy.arange', 'np.arange', (['(1)', '(10)', '(0.1)'], {}), '(1, 10, 0.1)\n', (1395, 1407), True, 'import numpy as np\n'), ((1654, 1679), 'numpy.arange', 'np.arange', (['(0)', '(1.001)', '(0.01)'], {}), '(0, 1.001, 0.01)\n', (1663, 1679), True, 'import numpy as np\n'), ((1469, 1490), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (1478, 1490), True, 'import numpy as np\n'), ((1741, 1762), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (1750, 1762), True, 'import numpy as np\n')] |
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from models import Net
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.binary_cross_entropy(output, target, reduction='sum')
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.binary_cross_entropy(output, target, reduction='sum').item() # sum up batch loss
correct += F.smooth_l1_loss(output, target)
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, ({:.4f})\n'.format(
test_loss,
correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Vec2Color Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1500, metavar='N',
help='number of epochs to train (default: 1500)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
file_names = ('capitalize', 'lower', 'upper', 'title')
x_df = pd.concat([pd.read_csv('doc2color/data/{}.csv'.format(file_name)) for file_name in file_names])
y_df = pd.concat([pd.read_csv('doc2color/data/rgb.csv')] * len(file_names))
tensor_x = torch.stack([torch.from_numpy(np.array(i)) for i in x_df.values.astype(np.float32)])
tensor_y = torch.stack([torch.from_numpy(np.array(i)) for i in y_df.values.astype(np.float32) / 255.0])
x_train, x_test, y_train, y_test = train_test_split(
tensor_x, tensor_y, test_size=0.01, random_state=args.seed)
train_dataset = torch.utils.data.TensorDataset(x_train, y_train)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size, shuffle=True, **kwargs)
test_dataset = torch.utils.data.TensorDataset(x_test, y_test)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "doc2color/pt_objects/vec2color.pt")
if __name__ == '__main__':
main() | [
"torch.manual_seed",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"torch.nn.functional.binary_cross_entropy",
"torch.optim.lr_scheduler.StepLR",
"torch.utils.data.TensorDataset",
"torch.nn.functional.smooth_l1_loss",
"numpy.array",
"torch.cuda.is_availa... | [((1604, 1668), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Vec2Color Example"""'}), "(description='PyTorch Vec2Color Example')\n", (1627, 1668), False, 'import argparse\n'), ((3057, 3085), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3074, 3085), False, 'import torch\n'), ((3100, 3143), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (3112, 3143), False, 'import torch\n'), ((3713, 3789), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tensor_x', 'tensor_y'], {'test_size': '(0.01)', 'random_state': 'args.seed'}), '(tensor_x, tensor_y, test_size=0.01, random_state=args.seed)\n', (3729, 3789), False, 'from sklearn.model_selection import train_test_split\n'), ((3820, 3868), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (3850, 3868), False, 'import torch\n'), ((3888, 3986), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size,\n shuffle=True, **kwargs)\n', (3915, 3986), False, 'import torch\n'), ((4011, 4057), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (4041, 4057), False, 'import torch\n'), ((4076, 4178), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(True)'}), '(test_dataset, batch_size=args.test_batch_size,\n shuffle=True, **kwargs)\n', (4103, 4178), False, 'import torch\n'), ((4293, 4341), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['optimizer'], {'step_size': '(1)', 'gamma': 'args.gamma'}), '(optimizer, step_size=1, gamma=args.gamma)\n', (4299, 4341), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((587, 642), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (609, 642), True, 'import torch.nn.functional as F\n'), ((1062, 1077), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1075, 1077), False, 'import torch\n'), ((3026, 3051), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3049, 3051), False, 'import torch\n'), ((1347, 1379), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['output', 'target'], {}), '(output, target)\n', (1363, 1379), True, 'import torch.nn.functional as F\n'), ((4196, 4201), 'models.Net', 'Net', ([], {}), '()\n', (4199, 4201), False, 'from models import Net\n'), ((3406, 3443), 'pandas.read_csv', 'pd.read_csv', (['"""doc2color/data/rgb.csv"""'], {}), "('doc2color/data/rgb.csv')\n", (3417, 3443), True, 'import pandas as pd\n'), ((3510, 3521), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (3518, 3521), True, 'import numpy as np\n'), ((3610, 3621), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (3618, 3621), True, 'import numpy as np\n'), ((1240, 1295), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (1262, 1295), True, 'import torch.nn.functional as F\n')] |
import numpy as np
import torch
import matplotlib.pyplot as plt
from pathlib import Path
from matplotlib.backends.backend_pdf import PdfPages
import glob
import pandas as pd
import os
from itertools import compress
from results_utils import display_dataset_name, display_decoder_name, coef_variation, metrics, dict_mean, dict_ste, dict_mean_ste
from distinctipy import distinctipy
import pandas as pd
res_dir = Path("../work/results/")
log_dir = "../work/out/"
latex_dir = Path("../work/latex/")
pdfs_dir = Path("../work/pdfs/")
latex_dir.mkdir(parents=True, exist_ok=True)
pdfs_dir.mkdir(parents=True, exist_ok=True)
configs_others = ["64", "64b", "64c"]
runs = [1, 2, 3]
tag = "real"
if tag == "real":
datasets = ['yelp_airport', 'taxi', 'yelp_mississauga', 'twitter', 'wikipedia', 'pubg', 'yelp_toronto', 'reddit_askscience_comments', 'reddit_politics_submissions', 'lastfm', 'mooc', 'reddit']
#datasets = ['yelp_airport', 'taxi', 'yelp_mississauga', 'twitter']
#configs_rqs = ["1", "2", "3", "5", "8", "10", "15"]
configs_rqs = ["1", "2", "3", "8", "10", "15"]
elif tag == "synth":
datasets = ['synth/poisson', 'synth/renewal', 'synth/self_correcting', 'synth/hawkes2', 'synth/hawkes1']
configs_rqs = ["3", "5", "8", "10", "15"]
elif tag == "both":
datasets = ['synth/poisson', 'synth/renewal', 'synth/self_correcting', 'synth/hawkes2', 'synth/hawkes1', 'yelp_airport', 'taxi', 'yelp_mississauga', 'twitter', 'wikipedia', 'pubg', 'yelp_toronto', 'reddit_askscience_comments', 'reddit_politics_submissions', 'lastfm', 'mooc', 'reddit']
loss_test_mean_numbin = dict()
loss_val_mean_numbin = dict()
mace_test_mean_numbin = dict()
decoder = "RQS_EXP-crps_qapprox"
configs = configs_rqs
for dataset in datasets: ######
print("--- ", dataset, " ---")
dict_config_loss_val_mean = dict()
dict_config_loss_test_mean = dict()
dict_config_mace_test_mean = dict()
for config in configs:
list_loss_val_mean = []
list_loss_test_mean = []
list_mace_test_mean = []
# validation
for run in runs:
suffix = str(dataset).replace("/", "-") + "-" + str(decoder) + '-' + config + '-' + str(run) + '.pt'
results_file = res_dir / suffix
if results_file.exists():
loaded = torch.load(results_file, map_location=torch.device('cpu'))
list_loss_val_mean.append(loaded['loss_val'].item())
list_loss_test_mean.append(loaded['loss_test'].item() )
if 'predictions_test' in loaded.keys():
metrics_sampling = metrics(loaded["predictions_test"], mace_only = True)
list_mace_test_mean.append(metrics_sampling["mace"])
else:
print("Predictions missing in ", results_file, " !!!!!")
list_mace_test_mean.append(np.inf)
#all_metrics_sampling_best.append(metrics_sampling_best)
else:
print(results_file, " missing!")
print("****")
print(list_loss_val_mean)
print("---")
print(list_loss_test_mean)
print("---")
print(list_mace_test_mean)
#loss_val = np.mean(list_loss_val)
#loss_test = np.mean(list_loss_test)
#mace_test = np.mean(list_mace_test)
#dict_config_val[config] = "{:0.3f}".format(np.mean(list_loss_val)) + " (" + "{:0.3f}".format(np.std(list_loss_val)/np.sqrt(len(list_loss_val)) ) + ")"
dict_config_loss_val_mean[config] = "{:0.3f}".format(np.mean(list_loss_val_mean))
#dict_config_test[config] = "{:0.3f}".format(np.mean(list_loss_test)) + " (" + "{:0.3f}".format(np.std(list_loss_test)/np.sqrt(len(list_loss_test)) ) + ")"
dict_config_loss_test_mean[config] = "{:0.3f}".format(np.mean(list_loss_test_mean))
#dict_config_mace_test[config] = "{:0.3f}".format(np.mean(list_mace_test)) + " (" + "{:0.3f}".format(np.std(list_mace_test)/np.sqrt(len(list_mace_test)) ) + ")"
dict_config_mace_test_mean[config] = "{:0.3f}".format(np.mean(list_mace_test_mean))
loss_val_mean_numbin[dataset] = dict_config_loss_val_mean
loss_test_mean_numbin[dataset] = dict_config_loss_test_mean
mace_test_mean_numbin[dataset] = dict_config_mace_test_mean
loss_val_mean_numbin = pd.DataFrame.from_dict(loss_val_mean_numbin, orient='index')
loss_test_mean_numbin = pd.DataFrame.from_dict(loss_test_mean_numbin, orient='index')
mace_test_mean_numbin = pd.DataFrame.from_dict(mace_test_mean_numbin, orient='index')
table_loss_mace = loss_test_mean_numbin + " (" + mace_test_mean_numbin + ")"
loss_val_mean_numbin.rename(display_dataset_name, axis = 0, inplace = True)
loss_test_mean_numbin.rename(display_dataset_name, axis = 0, inplace = True)
mace_test_mean_numbin.rename(display_dataset_name, axis = 1, inplace = True)
###
table_file = "table_numbin_loss_" + tag + ".tex"
results_file = latex_dir / table_file
with open(results_file, 'w') as tf:
res = loss_test_mean_numbin.to_latex(float_format="{:0.3f}".format, index_names = False)
print(res)
tf.write(res)
table_file = "table_numbin_mace_" + tag + ".tex"
results_file = latex_dir / table_file
with open(results_file, 'w') as tf:
res = mace_test_mean_numbin.to_latex(float_format="{:0.3f}".format, index_names = False)
print(res)
tf.write(res)
if False:
table_file = "table_numbin_loss_mace_" + tag + ".tex"
results_file = latex_dir / table_file
with open(results_file, 'w') as tf:
res = table_loss_mace.to_latex(float_format="{:0.3f}".format, index_names = False)
print(res)
tf.write(res)
breakpoint()
| [
"numpy.mean",
"pathlib.Path",
"pandas.DataFrame.from_dict",
"results_utils.metrics",
"torch.device"
] | [((414, 438), 'pathlib.Path', 'Path', (['"""../work/results/"""'], {}), "('../work/results/')\n", (418, 438), False, 'from pathlib import Path\n'), ((478, 500), 'pathlib.Path', 'Path', (['"""../work/latex/"""'], {}), "('../work/latex/')\n", (482, 500), False, 'from pathlib import Path\n'), ((513, 534), 'pathlib.Path', 'Path', (['"""../work/pdfs/"""'], {}), "('../work/pdfs/')\n", (517, 534), False, 'from pathlib import Path\n'), ((4351, 4411), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['loss_val_mean_numbin'], {'orient': '"""index"""'}), "(loss_val_mean_numbin, orient='index')\n", (4373, 4411), True, 'import pandas as pd\n'), ((4436, 4497), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['loss_test_mean_numbin'], {'orient': '"""index"""'}), "(loss_test_mean_numbin, orient='index')\n", (4458, 4497), True, 'import pandas as pd\n'), ((4522, 4583), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['mace_test_mean_numbin'], {'orient': '"""index"""'}), "(mace_test_mean_numbin, orient='index')\n", (4544, 4583), True, 'import pandas as pd\n'), ((3575, 3602), 'numpy.mean', 'np.mean', (['list_loss_val_mean'], {}), '(list_loss_val_mean)\n', (3582, 3602), True, 'import numpy as np\n'), ((3840, 3868), 'numpy.mean', 'np.mean', (['list_loss_test_mean'], {}), '(list_loss_test_mean)\n', (3847, 3868), True, 'import numpy as np\n'), ((4103, 4131), 'numpy.mean', 'np.mean', (['list_mace_test_mean'], {}), '(list_mace_test_mean)\n', (4110, 4131), True, 'import numpy as np\n'), ((2614, 2665), 'results_utils.metrics', 'metrics', (["loaded['predictions_test']"], {'mace_only': '(True)'}), "(loaded['predictions_test'], mace_only=True)\n", (2621, 2665), False, 'from results_utils import display_dataset_name, display_decoder_name, coef_variation, metrics, dict_mean, dict_ste, dict_mean_ste\n'), ((2354, 2373), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2366, 2373), False, 'import torch\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest, os
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
@skip_check_grad_ci(reason="DNNL's MatMul doesn't implemend grad kernel.")
class TestDnnlMatMulOp(OpTest):
def generate_data(self):
self.x = np.random.random((25, 2, 2)).astype("float32")
self.y = np.random.random((25, 2, 2)).astype("float32")
self.alpha = 1.0
self.out = self.alpha * np.matmul(self.x, self.y)
def set_attributes(self):
self.alpha = self.alpha if hasattr(self, 'alpha') else 1.0
self.attrs = {'alpha': self.alpha}
def setUp(self):
# Set max isa, otherwise fails on SKX and earlier
os.environ["DNNL_MAX_CPU_ISA"] = "AVX"
self.op_type = "matmul"
self._cpu_only = True
self.use_mkldnn = True
self.generate_data()
self.set_attributes()
self.attrs['use_mkldnn'] = True
self.inputs = {'X': self.x, 'Y': self.y}
self.outputs = {'Out': self.out}
def test_check_output(self):
self.check_output()
class TestDnnlMatMulOpAlpha(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((17, 2, 3)).astype("float32")
self.y = np.random.random((17, 3, 2)).astype("float32")
self.alpha = 2.0
self.out = self.alpha * np.matmul(self.x, self.y)
class TestDnnlMatMulOp2D(TestDnnlMatMulOp):
def print_tensor(self, name, tensor):
print(name)
print(tensor)
def generate_data(self):
self.x = np.random.random((12, 9)).astype("float32")
self.y = np.random.random((9, 12)).astype("float32")
self.out = np.matmul(self.x, self.y)
class TestDnnlMatMulOpTransposeX(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((12, 9)).astype("float32")
self.y = np.random.random((12, 9)).astype("float32")
self.out = np.matmul(np.transpose(self.x), self.y)
def set_attributes(self):
self.attrs = {'transpose_X': True}
class TestDnnlMatMulOpTransposeY(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((12, 9)).astype("float32")
self.y = np.random.random((12, 9)).astype("float32")
self.out = np.matmul(self.x, np.transpose(self.y))
def set_attributes(self):
self.attrs = {'transpose_Y': True}
class TestDnnlMatMulOpTransposeY3D(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((17, 3, 2)).astype("float32")
self.y = np.random.random((17, 3, 2)).astype("float32")
self.out = np.matmul(self.x, np.transpose(self.y, (0, 2, 1)))
def set_attributes(self):
self.attrs = {'transpose_Y': True}
class TestDnnlMatMulOpInt8NoScales(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((12, 9)).astype("int8")
self.y = np.random.random((9, 12)).astype("int8")
self.out = np.matmul(self.x, self.y)
class TestDnnlMatMulOpInt8(TestDnnlMatMulOp):
def quantize(self, tensor):
scale = 127. / np.abs(np.amax(tensor))
quantized = np.round(scale * tensor).astype("int8")
return scale, quantized
def generate_data(self):
x_float = np.random.random((12, 9)).astype("float32")
self.x_scale, self.x = self.quantize(x_float)
y_float = np.random.random((9, 12)).astype("float32")
self.y_scale, self.y = self.quantize(y_float)
out_float = np.matmul(x_float, y_float)
self.out_scale, self.out = self.quantize(out_float)
def set_attributes(self):
self.attrs = {
'Scale_x': self.x_scale,
'Scale_y': self.y_scale,
'Scale_out': self.out_scale,
}
def test_check_output(self):
int_atol = 1
self.check_output(atol=int_atol)
class TestDnnlMatMulOpInt8ForceFP32(TestDnnlMatMulOpInt8):
def generate_data(self):
x_float = np.random.random((12, 9)).astype("float32")
self.x_scale, self.x = self.quantize(x_float)
y_float = np.random.random((9, 12)).astype("float32")
self.y_scale, self.y = self.quantize(y_float)
out_float = np.matmul(x_float, y_float)
self.out = out_float
def set_attributes(self):
self.attrs = {
'Scale_x': self.x_scale,
'Scale_y': self.y_scale,
'force_fp32_output': True
}
class TestDnnlMatMulOpInt8ForceFP32BasicScales(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.randint(0, 3, (12, 9)).astype("int8")
self.y = np.random.randint(0, 3, (9, 12)).astype("int8")
self.out = np.matmul(self.x, self.y).astype("float32")
def set_attributes(self):
self.attrs = {'force_fp32_output': True}
if __name__ == "__main__":
unittest.main()
| [
"numpy.random.random",
"paddle.fluid.tests.unittests.op_test.skip_check_grad_ci",
"numpy.random.randint",
"numpy.matmul",
"unittest.main",
"numpy.transpose",
"numpy.amax",
"numpy.round"
] | [((768, 841), 'paddle.fluid.tests.unittests.op_test.skip_check_grad_ci', 'skip_check_grad_ci', ([], {'reason': '"""DNNL\'s MatMul doesn\'t implemend grad kernel."""'}), '(reason="DNNL\'s MatMul doesn\'t implemend grad kernel.")\n', (786, 841), False, 'from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci\n'), ((5468, 5483), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5481, 5483), False, 'import unittest, os\n'), ((2317, 2342), 'numpy.matmul', 'np.matmul', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (2326, 2342), True, 'import numpy as np\n'), ((3596, 3621), 'numpy.matmul', 'np.matmul', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (3605, 3621), True, 'import numpy as np\n'), ((4125, 4152), 'numpy.matmul', 'np.matmul', (['x_float', 'y_float'], {}), '(x_float, y_float)\n', (4134, 4152), True, 'import numpy as np\n'), ((4832, 4859), 'numpy.matmul', 'np.matmul', (['x_float', 'y_float'], {}), '(x_float, y_float)\n', (4841, 4859), True, 'import numpy as np\n'), ((1088, 1113), 'numpy.matmul', 'np.matmul', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (1097, 1113), True, 'import numpy as np\n'), ((1990, 2015), 'numpy.matmul', 'np.matmul', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (1999, 2015), True, 'import numpy as np\n'), ((2577, 2597), 'numpy.transpose', 'np.transpose', (['self.x'], {}), '(self.x)\n', (2589, 2597), True, 'import numpy as np\n'), ((2923, 2943), 'numpy.transpose', 'np.transpose', (['self.y'], {}), '(self.y)\n', (2935, 2943), True, 'import numpy as np\n'), ((3269, 3300), 'numpy.transpose', 'np.transpose', (['self.y', '(0, 2, 1)'], {}), '(self.y, (0, 2, 1))\n', (3281, 3300), True, 'import numpy as np\n'), ((920, 948), 'numpy.random.random', 'np.random.random', (['(25, 2, 2)'], {}), '((25, 2, 2))\n', (936, 948), True, 'import numpy as np\n'), ((984, 1012), 'numpy.random.random', 'np.random.random', (['(25, 2, 2)'], {}), '((25, 2, 2))\n', (1000, 1012), True, 'import numpy as np\n'), ((1822, 1850), 'numpy.random.random', 'np.random.random', (['(17, 2, 3)'], {}), '((17, 2, 3))\n', (1838, 1850), True, 'import numpy as np\n'), ((1886, 1914), 'numpy.random.random', 'np.random.random', (['(17, 3, 2)'], {}), '((17, 3, 2))\n', (1902, 1914), True, 'import numpy as np\n'), ((2193, 2218), 'numpy.random.random', 'np.random.random', (['(12, 9)'], {}), '((12, 9))\n', (2209, 2218), True, 'import numpy as np\n'), ((2254, 2279), 'numpy.random.random', 'np.random.random', (['(9, 12)'], {}), '((9, 12))\n', (2270, 2279), True, 'import numpy as np\n'), ((2443, 2468), 'numpy.random.random', 'np.random.random', (['(12, 9)'], {}), '((12, 9))\n', (2459, 2468), True, 'import numpy as np\n'), ((2504, 2529), 'numpy.random.random', 'np.random.random', (['(12, 9)'], {}), '((12, 9))\n', (2520, 2529), True, 'import numpy as np\n'), ((2781, 2806), 'numpy.random.random', 'np.random.random', (['(12, 9)'], {}), '((12, 9))\n', (2797, 2806), True, 'import numpy as np\n'), ((2842, 2867), 'numpy.random.random', 'np.random.random', (['(12, 9)'], {}), '((12, 9))\n', (2858, 2867), True, 'import numpy as np\n'), ((3121, 3149), 'numpy.random.random', 'np.random.random', (['(17, 3, 2)'], {}), '((17, 3, 2))\n', (3137, 3149), True, 'import numpy as np\n'), ((3185, 3213), 'numpy.random.random', 'np.random.random', (['(17, 3, 2)'], {}), '((17, 3, 2))\n', (3201, 3213), True, 'import numpy as np\n'), ((3478, 3503), 'numpy.random.random', 'np.random.random', (['(12, 9)'], {}), '((12, 9))\n', (3494, 3503), True, 'import numpy as np\n'), ((3536, 3561), 'numpy.random.random', 'np.random.random', (['(9, 12)'], {}), '((9, 12))\n', (3552, 3561), True, 'import numpy as np\n'), ((3732, 3747), 'numpy.amax', 'np.amax', (['tensor'], {}), '(tensor)\n', (3739, 3747), True, 'import numpy as np\n'), ((3769, 3793), 'numpy.round', 'np.round', (['(scale * tensor)'], {}), '(scale * tensor)\n', (3777, 3793), True, 'import numpy as np\n'), ((3889, 3914), 'numpy.random.random', 'np.random.random', (['(12, 9)'], {}), '((12, 9))\n', (3905, 3914), True, 'import numpy as np\n'), ((4006, 4031), 'numpy.random.random', 'np.random.random', (['(9, 12)'], {}), '((9, 12))\n', (4022, 4031), True, 'import numpy as np\n'), ((4596, 4621), 'numpy.random.random', 'np.random.random', (['(12, 9)'], {}), '((12, 9))\n', (4612, 4621), True, 'import numpy as np\n'), ((4713, 4738), 'numpy.random.random', 'np.random.random', (['(9, 12)'], {}), '((9, 12))\n', (4729, 4738), True, 'import numpy as np\n'), ((5179, 5211), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)', '(12, 9)'], {}), '(0, 3, (12, 9))\n', (5196, 5211), True, 'import numpy as np\n'), ((5244, 5276), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)', '(9, 12)'], {}), '(0, 3, (9, 12))\n', (5261, 5276), True, 'import numpy as np\n'), ((5311, 5336), 'numpy.matmul', 'np.matmul', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (5320, 5336), True, 'import numpy as np\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: 02_constitutive.ipynb (unless otherwise specified).
__all__ = ['Elastic', 'PlaneStrain', 'PlaneStress', 'TransverseIsotropic', 'MMC']
# Cell
import numpy as np
from .base import BaseConstitutive, Properties
from .io import jsonReader
# Cell
class Elastic(BaseConstitutive):
def __init__(self, props):
#Call the BaseMaterial constructor
BaseConstitutive.__init__(self, props)
#Create the hookean matrix
fc1 = self.E/((1+self.nu)*(1-2*self.nu))
fc2 = (1-2*self.nu)/2
D11 = self.nu*(np.ones(3)-np.eye(3))+(1-self.nu)*np.eye(3)
D12 = np.zeros((3,3))
D21 = D12.copy()
D22 = fc2*np.eye(3)
self.De = fc1*np.block([[D11,D12],[D21,D22]])
def getStress(self, deformation):
sigma = np.matmul(self.De, deformation.eps)
return sigma, self.De
def getElasticMatrix(self):
return self.De
# Cell
class PlaneStrain(BaseConstitutive):
def __init__(self, props):
#Call the BaseMaterial constructor
BaseConstitutive.__init__(self, props)
#Create the hookean matrix
fc1 = self.E/((1+self.nu)*(1-2*self.nu))
fc2 = (1-2*self.nu)/2
D11 = self.nu*(np.ones(2)-np.eye(2))+(1-self.nu)*np.eye(2)
D12 = np.zeros((2,1))
D21 = np.zeros((1,2))
D22 = np.array([fc2]).reshape((1,1))
self.De = fc1*np.vstack([np.hstack([D11,D12]),np.hstack([D21,D22])])
def getStress(self, deformation):
sigma = np.matmul(self.De, deformation.eps)
return sigma, self.De
def getElasticMatrix(self):
return self.De
# Cell
class PlaneStress(BaseConstitutive):
def __init__(self, props):
#Call the BaseMaterial constructor
BaseConstitutive.__init__(self, props)
#Create the hookean matrix
fc1 = self.E/(1-self.nu**2)
fc2 = 1-self.nu
D11 = self.nu*(np.ones(2)-np.eye(2))+np.eye(2)
D12 = np.zeros((2,1))
D21 = np.zeros((1,2))
D22 = np.array([fc2]).reshape((1,1))
self.De = fc1*np.vstack([np.hstack([D11,D12]),np.hstack([D21,D22])])
def getStress(self, deformation):
sigma = np.matmul(self.De, deformation.eps)
return sigma, self.De
def getTangent(self):
return self.De
# Cell
class TransverseIsotropic(BaseConstitutive):
def __init__(self, props):
#Call the BaseMaterial constructor
BaseConstitutive.__init__(self, props)
#Create the hookean matrix
fc1 = self.E2/(2*(1+self.nu23))
D11 = np.ones((3,3))
D11[0,0] *= 1./self.E1
D11[1,1] *= 1./self.E2
D11[2,2] *= 1./self.E2
D11[0,1] *= -1.*self.nu12/self.E2
D11[1,0] = D11[0,1].copy()
D11[0,2] *= -1.*self.nu12/self.E2
D11[2,0] = D11[0,2].copy()
D11[1,2] *= -1.*self.nu23/self.E2
D11[2,1] = D11[1,2].copy()
D12 = np.zeros((3,3))
D21 = D12.copy()
D22 = np.diagflat([self.G12, self.G12, fc1])
self.De = np.linalg.inv(np.block([[D11,D12],[D21,D22]]))
def getStress(self, deformation):
sigma = np.matmul(self.De, deformation.eps)
return sigma, self.De
def getElasticMatrix(self):
return self.De
# Cell
class MMC(BaseConstitutive):
def __init__(self, props):
#Call the BaseMaterial constructor
BaseConstitutive.__init__(self, props)
#Create the hookean matrix
fc1 = self.E/((1+self.nu)*(1-2*self.nu))
fc2 = (1-2*self.nu)/2
D11 = self.nu*(np.ones(3)-np.eye(3))+(1-self.nu)*np.eye(3)
D12 = np.zeros((3,3))
D21 = D12.copy()
D22 = fc2*np.eye(3)
self.De = fc1*np.block([[D11,D12],[D21,D22]])
def getStress(self, deformation):
sigma = np.matmul(self.De, deformation.eps)
return sigma, self.De
def getElasticMatrix(self):
return self.De
def PlasticIntegration(self):
pass
def getElastoPlasticMatrix(self):
return self.D | [
"numpy.block",
"numpy.eye",
"numpy.ones",
"numpy.hstack",
"numpy.array",
"numpy.zeros",
"numpy.matmul",
"numpy.diagflat"
] | [((640, 656), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (648, 656), True, 'import numpy as np\n'), ((819, 854), 'numpy.matmul', 'np.matmul', (['self.De', 'deformation.eps'], {}), '(self.De, deformation.eps)\n', (828, 854), True, 'import numpy as np\n'), ((1305, 1321), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (1313, 1321), True, 'import numpy as np\n'), ((1335, 1351), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (1343, 1351), True, 'import numpy as np\n'), ((1529, 1564), 'numpy.matmul', 'np.matmul', (['self.De', 'deformation.eps'], {}), '(self.De, deformation.eps)\n', (1538, 1564), True, 'import numpy as np\n'), ((1984, 2000), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (1992, 2000), True, 'import numpy as np\n'), ((2014, 2030), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (2022, 2030), True, 'import numpy as np\n'), ((2208, 2243), 'numpy.matmul', 'np.matmul', (['self.De', 'deformation.eps'], {}), '(self.De, deformation.eps)\n', (2217, 2243), True, 'import numpy as np\n'), ((2591, 2606), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (2598, 2606), True, 'import numpy as np\n'), ((2947, 2963), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2955, 2963), True, 'import numpy as np\n'), ((3002, 3040), 'numpy.diagflat', 'np.diagflat', (['[self.G12, self.G12, fc1]'], {}), '([self.G12, self.G12, fc1])\n', (3013, 3040), True, 'import numpy as np\n'), ((3162, 3197), 'numpy.matmul', 'np.matmul', (['self.De', 'deformation.eps'], {}), '(self.De, deformation.eps)\n', (3171, 3197), True, 'import numpy as np\n'), ((3640, 3656), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3648, 3656), True, 'import numpy as np\n'), ((3819, 3854), 'numpy.matmul', 'np.matmul', (['self.De', 'deformation.eps'], {}), '(self.De, deformation.eps)\n', (3828, 3854), True, 'import numpy as np\n'), ((699, 708), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (705, 708), True, 'import numpy as np\n'), ((732, 766), 'numpy.block', 'np.block', (['[[D11, D12], [D21, D22]]'], {}), '([[D11, D12], [D21, D22]])\n', (740, 766), True, 'import numpy as np\n'), ((1960, 1969), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1966, 1969), True, 'import numpy as np\n'), ((3074, 3108), 'numpy.block', 'np.block', (['[[D11, D12], [D21, D22]]'], {}), '([[D11, D12], [D21, D22]])\n', (3082, 3108), True, 'import numpy as np\n'), ((3699, 3708), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3705, 3708), True, 'import numpy as np\n'), ((3732, 3766), 'numpy.block', 'np.block', (['[[D11, D12], [D21, D22]]'], {}), '([[D11, D12], [D21, D22]])\n', (3740, 3766), True, 'import numpy as np\n'), ((616, 625), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (622, 625), True, 'import numpy as np\n'), ((1281, 1290), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1287, 1290), True, 'import numpy as np\n'), ((1365, 1380), 'numpy.array', 'np.array', (['[fc2]'], {}), '([fc2])\n', (1373, 1380), True, 'import numpy as np\n'), ((2044, 2059), 'numpy.array', 'np.array', (['[fc2]'], {}), '([fc2])\n', (2052, 2059), True, 'import numpy as np\n'), ((3616, 3625), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3622, 3625), True, 'import numpy as np\n'), ((582, 592), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (589, 592), True, 'import numpy as np\n'), ((593, 602), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (599, 602), True, 'import numpy as np\n'), ((1247, 1257), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1254, 1257), True, 'import numpy as np\n'), ((1258, 1267), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1264, 1267), True, 'import numpy as np\n'), ((1430, 1451), 'numpy.hstack', 'np.hstack', (['[D11, D12]'], {}), '([D11, D12])\n', (1439, 1451), True, 'import numpy as np\n'), ((1451, 1472), 'numpy.hstack', 'np.hstack', (['[D21, D22]'], {}), '([D21, D22])\n', (1460, 1472), True, 'import numpy as np\n'), ((1938, 1948), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1945, 1948), True, 'import numpy as np\n'), ((1949, 1958), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1955, 1958), True, 'import numpy as np\n'), ((2109, 2130), 'numpy.hstack', 'np.hstack', (['[D11, D12]'], {}), '([D11, D12])\n', (2118, 2130), True, 'import numpy as np\n'), ((2130, 2151), 'numpy.hstack', 'np.hstack', (['[D21, D22]'], {}), '([D21, D22])\n', (2139, 2151), True, 'import numpy as np\n'), ((3582, 3592), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (3589, 3592), True, 'import numpy as np\n'), ((3593, 3602), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3599, 3602), True, 'import numpy as np\n')] |
#!/usr/local/sci/bin/python
# PYTHON3
#
# Author: <NAME>
# Created: 07 Jan 2019
# Last update: 07 Jan 2019
# Location: /data/local/hadkw/HADCRUH2/MARINE/EUSTACEMDS/EUSTACE_SST_MAT/
# GitHub: https://github.com/Kate-Willett/HadISDH_Marine_Build/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code reads in the list of frerquencies for AT and DPT for each decimal place for each year and plots
# a two panel time series with a line for each decimal
#
# We are only really interested in 0 and 5 so for clarity the other numbers are the same colour.
#
# -----------------------
# LIST OF MODULES
# -----------------------
# inbuilt:
# import datetime as dt
## Folling two lines should be uncommented if using with SPICE or screen
## import matplotlib
## matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# import numpy as np
# from matplotlib.dates import date2num,num2date
# import sys, os
# import sys, getopt
# from scipy.optimize import curve_fit,fsolve,leastsq
# from scipy import pi,sqrt,exp
# from scipy.special import erf
# import scipy.stats
# from math import sqrt,pi
# import struct
# import pdb # pdb.set_trace() or c
#
# Kates:
# import MDS_basic_KATE as MDStool
#
# -----------------------
# DATA
# -----------------------
# /data/local/hadkw/HADCRUH2/MARINE/LISTS/
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# check the desired bits are uncommented/commented (filepaths etc)
#
# # First load up python 3 which is currently:
# module load scitools/experimental-current
# python PlotTSDecimal_JAN2019.py
#
# This runs the code, outputs the plots
#
# -----------------------
# OUTPUT
# -----------------------
# some plots:
# /data/local/hadkw/HADCRUH2/MARINE/IMAGES/SummaryDecimal_OBSclim2NBC_I300_55_JAN2019.png
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 1 (07 Jan 2019)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# START
#************************************************************************
import datetime as dt
# Folling two lines should be uncommented if using with SPICE or screen
## import matplotlib
## matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.dates import date2num,num2date
import sys, os
import sys, getopt
from scipy.optimize import curve_fit,fsolve,leastsq
from scipy import pi,sqrt,exp
from scipy.special import erf
import scipy.stats
from math import sqrt,pi
import struct
import pdb # pdb.set_trace() or c
#*************************************************************************
# READDATA
#*************************************************************************
def ReadData(FileName,typee,delimee):
''' Use numpy genfromtxt reading to read in all rows from a complex array '''
''' Need to specify format as it is complex '''
''' outputs an array of tuples that in turn need to be subscripted by their names defaults f0...f8 '''
return np.genfromtxt(FileName, dtype=typee,delimiter=delimee,comments=False) # ReadData
#************************************************************************
# Main
#************************************************************************
source = 'I300_55' # ICOADS main source and threshold choice
switch = 'ships' # 'all', 'ships', 'buoys'
ittype = 'OBSclim2NBC'
nowmon = 'JAN'
nowyear = '2019'
StYr = 1973
EdYr = 2017
NYr = (EdYr-StYr)+1
INDIR = '/data/local/hadkw/HADCRUH2/MARINE/LISTS/'
INFILAT = 'DecimalFreqStatsAT_'+source+'_'+switch+'_'+ittype+'_'+nowmon+nowyear+'.txt'
INFILDPT = 'DecimalFreqStatsDPT_'+source+'_'+switch+'_'+ittype+'_'+nowmon+nowyear+'.txt'
OUTDIR = '/data/local/hadkw/HADCRUH2/MARINE/IMAGES/'
OutPlt = 'DecimalFreqTS2panel_'+source+'_'+switch+'_'+ittype+'_'+nowmon+nowyear
# create empty arrays for decimal data bundles
Yr = []
nobs = [] # we're looking at all obs, not just those with 'good' data
AT0s = []
AT1s = []
AT2s = []
AT3s = []
AT4s = []
AT5s = []
AT6s = []
AT7s = []
AT8s = []
AT9s = []
DPT0s = []
DPT1s = []
DPT2s = []
DPT3s = []
DPT4s = []
DPT5s = []
DPT6s = []
DPT7s = []
DPT8s = []
DPT9s = []
#typee='unicode'
#typee = ('U4','U18',
# 'U8','U14',
# 'U6','U17',
# 'U6','U17',
# 'U6','U17',
# 'U6','U17',
# 'U6','U17',
# 'U6','U17',
# 'U6','U17',
# 'U6','U17',
# 'U6','U17',
# 'U6','U17')
typee = ("int","|S5","|S3","|S3","|S7", # year
"int", # nobs
"|S4","int","|S2","float", # 0
"|S7","int","|S2","float", # 1
"|S7","int","|S2","float", # 2
"|S7","int","|S2","float", # 3
"|S7","int","|S2","float", # 4
"|S7","int","|S2","float", # 5
"|S7","int","|S2","float", # 6
"|S7","int","|S2","float", # 7
"|S7","int","|S2","float", # 8
"|S7","int","|S2","float", # 9
"|S2")
#delimee = (4,18,8,14,6,17,6,17,6,17,6,17,6,17,6,17,6,17,6,17,6,17,6,3)
delimee = (4,5,3,3,7,
8,
4,8,2,6,
7,8,2,6,
7,8,2,6,
7,8,2,6,
7,8,2,6,
7,8,2,6,
7,8,2,6,
7,8,2,6,
7,8,2,6,
7,8,2,6,
2)
#pdb.set_trace()
# Read in AT Decimal file and populate lists
RawData = ReadData(INDIR+INFILAT,typee,delimee)
Yr = np.array(RawData['f0'][0:NYr])
nobs = np.array(RawData['f5'][0:NYr])
AT0s = np.array(RawData['f9'][0:NYr])
AT1s = np.array(RawData['f13'][0:NYr])
AT2s = np.array(RawData['f17'][0:NYr])
AT3s = np.array(RawData['f21'][0:NYr])
AT4s = np.array(RawData['f25'][0:NYr])
AT5s = np.array(RawData['f29'][0:NYr])
AT6s = np.array(RawData['f33'][0:NYr])
AT7s = np.array(RawData['f37'][0:NYr])
AT8s = np.array(RawData['f41'][0:NYr])
AT9s = np.array(RawData['f45'][0:NYr])
# Makes zero values sub zero so that they are not plotted
gAT0s = np.where(AT0s > 0.)[0]
gAT1s = np.where(AT1s > 0.)[0]
gAT2s = np.where(AT2s > 0.)[0]
gAT3s = np.where(AT3s > 0.)[0]
gAT4s = np.where(AT4s > 0.)[0]
gAT5s = np.where(AT5s > 0.)[0]
gAT6s = np.where(AT6s > 0.)[0]
gAT7s = np.where(AT7s > 0.)[0]
gAT8s = np.where(AT8s > 0.)[0]
gAT9s = np.where(AT9s > 0.)[0]
# Read in DPT Decimal file and populate lists
RawData = ReadData(INDIR+INFILDPT,typee, delimee)
# YEAR and NOBS should be identical to AT
DPT0s = np.array(RawData['f9'][0:NYr])
DPT1s = np.array(RawData['f13'][0:NYr])
DPT2s = np.array(RawData['f17'][0:NYr])
DPT3s = np.array(RawData['f21'][0:NYr])
DPT4s = np.array(RawData['f25'][0:NYr])
DPT5s = np.array(RawData['f29'][0:NYr])
DPT6s = np.array(RawData['f33'][0:NYr])
DPT7s = np.array(RawData['f37'][0:NYr])
DPT8s = np.array(RawData['f41'][0:NYr])
DPT9s = np.array(RawData['f45'][0:NYr])
# Makes zero values sub zero so thDPT they are not plotted
gDPT0s = np.where(DPT0s > 0.)[0]
gDPT1s = np.where(DPT1s > 0.)[0]
gDPT2s = np.where(DPT2s > 0.)[0]
gDPT3s = np.where(DPT3s > 0.)[0]
gDPT4s = np.where(DPT4s > 0.)[0]
gDPT5s = np.where(DPT5s > 0.)[0]
gDPT6s = np.where(DPT6s > 0.)[0]
gDPT7s = np.where(DPT7s > 0.)[0]
gDPT8s = np.where(DPT8s > 0.)[0]
gDPT9s = np.where(DPT9s > 0.)[0]
# Make plot of decimals for AT and DPT over time
gap= 0.04
# New two panel plot
plt.clf()
fig = plt.figure(figsize=(10,5))
ax1 = plt.axes([0.07,0.10,0.41,0.88]) # left, bottom, width, height
ax1.plot(Yr[gAT0s],AT0s[gAT0s],c = 'red',linestyle = 'solid',linewidth = 2)
ax1.plot(Yr[gAT1s],AT1s[gAT1s],c = 'black',linestyle = 'solid',linewidth = 2)
ax1.plot(Yr[gAT2s],AT2s[gAT2s],c = 'black',linestyle = 'solid',linewidth = 2)
ax1.plot(Yr[gAT3s],AT3s[gAT3s],c = 'black',linestyle = 'solid',linewidth = 2)
ax1.plot(Yr[gAT4s],AT4s[gAT4s],c = 'black',linestyle = 'solid',linewidth = 2)
ax1.plot(Yr[gAT5s],AT5s[gAT5s],c = 'blue',linestyle = 'solid',linewidth = 2)
ax1.plot(Yr[gAT6s],AT6s[gAT6s],c = 'black',linestyle = 'solid',linewidth = 2)
ax1.plot(Yr[gAT7s],AT7s[gAT7s],c = 'black',linestyle = 'solid',linewidth = 2)
ax1.plot(Yr[gAT8s],AT8s[gAT8s],c = 'black',linestyle = 'solid',linewidth = 2)
ax1.plot(Yr[gAT9s],AT9s[gAT9s],c = 'black',linestyle = 'solid',linewidth = 2)
ax1.set_xlabel('Year')
ax1.set_ylabel('% of Air Temperatures', color='black')
ax1.set_ylim(0,40)
ax1.set_xlim(StYr-1,EdYr+1)
ax1.annotate("a)",xy=(0.04,0.94),xycoords='axes fraction',size=14,color='black')
ax1.annotate("0s",xy=(0.94,0.90),xycoords='axes fraction',size=14,color='red',horizontalalignment='right')
ax1.annotate("5s",xy=(0.94,0.86),xycoords='axes fraction',size=14,color='blue',horizontalalignment='right')
ax1.annotate("Other",xy=(0.94,0.82),xycoords='axes fraction',size=14,color='black',horizontalalignment='right')
ax2 = plt.axes([0.57,0.10,0.41,0.88]) # left, bottom, width, height
ax2.set_ylabel('% of Dewpoint Temperatures', color='black')
ax2.set_xlabel('Year')
ax2.set_xlim(StYr-1,EdYr+1)
ax2.set_ylim(0,40)
ax2.plot(Yr[gDPT0s],DPT0s[gDPT0s],c = 'red',linestyle = 'solid',linewidth = 2)
ax2.plot(Yr[gDPT1s],DPT1s[gDPT1s],c = 'black',linestyle = 'solid',linewidth = 2)
ax2.plot(Yr[gDPT2s],DPT2s[gDPT2s],c = 'black',linestyle = 'solid',linewidth = 2)
ax2.plot(Yr[gDPT3s],DPT3s[gDPT3s],c = 'black',linestyle = 'solid',linewidth = 2)
ax2.plot(Yr[gDPT4s],DPT4s[gDPT4s],c = 'black',linestyle = 'solid',linewidth = 2)
ax2.plot(Yr[gDPT5s],DPT5s[gDPT5s],c = 'blue',linestyle = 'solid',linewidth = 2)
ax2.plot(Yr[gDPT6s],DPT6s[gDPT6s],c = 'black',linestyle = 'solid',linewidth = 2)
ax2.plot(Yr[gDPT7s],DPT7s[gDPT7s],c = 'black',linestyle = 'solid',linewidth = 2)
ax2.plot(Yr[gDPT8s],DPT8s[gDPT8s],c = 'black',linestyle = 'solid',linewidth = 2)
ax2.plot(Yr[gDPT9s],DPT9s[gDPT9s],c = 'black',linestyle = 'solid',linewidth = 2)
ax2.annotate("b)",xy=(0.94,0.94),xycoords='axes fraction',size=14,color='black',horizontalalignment='left')
ax2.annotate("0s",xy=(0.94,0.90),xycoords='axes fraction',size=14,color='red',horizontalalignment='right')
ax2.annotate("5s",xy=(0.94,0.86),xycoords='axes fraction',size=14,color='blue',horizontalalignment='right')
ax2.annotate("Other",xy=(0.94,0.82),xycoords='axes fraction',size=14,color='black',horizontalalignment='right')
#plt.tight_layout()
#plt.savefig(OUTDIR+OutPlt+".eps")
plt.savefig(OUTDIR+OutPlt+".png")
#pdb.set_trace()
print("And were done")
#************************************************************************
| [
"matplotlib.pyplot.savefig",
"numpy.where",
"matplotlib.pyplot.clf",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"numpy.genfromtxt"
] | [((5298, 5328), 'numpy.array', 'np.array', (["RawData['f0'][0:NYr]"], {}), "(RawData['f0'][0:NYr])\n", (5306, 5328), True, 'import numpy as np\n'), ((5336, 5366), 'numpy.array', 'np.array', (["RawData['f5'][0:NYr]"], {}), "(RawData['f5'][0:NYr])\n", (5344, 5366), True, 'import numpy as np\n'), ((5374, 5404), 'numpy.array', 'np.array', (["RawData['f9'][0:NYr]"], {}), "(RawData['f9'][0:NYr])\n", (5382, 5404), True, 'import numpy as np\n'), ((5412, 5443), 'numpy.array', 'np.array', (["RawData['f13'][0:NYr]"], {}), "(RawData['f13'][0:NYr])\n", (5420, 5443), True, 'import numpy as np\n'), ((5451, 5482), 'numpy.array', 'np.array', (["RawData['f17'][0:NYr]"], {}), "(RawData['f17'][0:NYr])\n", (5459, 5482), True, 'import numpy as np\n'), ((5490, 5521), 'numpy.array', 'np.array', (["RawData['f21'][0:NYr]"], {}), "(RawData['f21'][0:NYr])\n", (5498, 5521), True, 'import numpy as np\n'), ((5529, 5560), 'numpy.array', 'np.array', (["RawData['f25'][0:NYr]"], {}), "(RawData['f25'][0:NYr])\n", (5537, 5560), True, 'import numpy as np\n'), ((5568, 5599), 'numpy.array', 'np.array', (["RawData['f29'][0:NYr]"], {}), "(RawData['f29'][0:NYr])\n", (5576, 5599), True, 'import numpy as np\n'), ((5607, 5638), 'numpy.array', 'np.array', (["RawData['f33'][0:NYr]"], {}), "(RawData['f33'][0:NYr])\n", (5615, 5638), True, 'import numpy as np\n'), ((5646, 5677), 'numpy.array', 'np.array', (["RawData['f37'][0:NYr]"], {}), "(RawData['f37'][0:NYr])\n", (5654, 5677), True, 'import numpy as np\n'), ((5685, 5716), 'numpy.array', 'np.array', (["RawData['f41'][0:NYr]"], {}), "(RawData['f41'][0:NYr])\n", (5693, 5716), True, 'import numpy as np\n'), ((5724, 5755), 'numpy.array', 'np.array', (["RawData['f45'][0:NYr]"], {}), "(RawData['f45'][0:NYr])\n", (5732, 5755), True, 'import numpy as np\n'), ((6273, 6303), 'numpy.array', 'np.array', (["RawData['f9'][0:NYr]"], {}), "(RawData['f9'][0:NYr])\n", (6281, 6303), True, 'import numpy as np\n'), ((6312, 6343), 'numpy.array', 'np.array', (["RawData['f13'][0:NYr]"], {}), "(RawData['f13'][0:NYr])\n", (6320, 6343), True, 'import numpy as np\n'), ((6352, 6383), 'numpy.array', 'np.array', (["RawData['f17'][0:NYr]"], {}), "(RawData['f17'][0:NYr])\n", (6360, 6383), True, 'import numpy as np\n'), ((6392, 6423), 'numpy.array', 'np.array', (["RawData['f21'][0:NYr]"], {}), "(RawData['f21'][0:NYr])\n", (6400, 6423), True, 'import numpy as np\n'), ((6432, 6463), 'numpy.array', 'np.array', (["RawData['f25'][0:NYr]"], {}), "(RawData['f25'][0:NYr])\n", (6440, 6463), True, 'import numpy as np\n'), ((6472, 6503), 'numpy.array', 'np.array', (["RawData['f29'][0:NYr]"], {}), "(RawData['f29'][0:NYr])\n", (6480, 6503), True, 'import numpy as np\n'), ((6512, 6543), 'numpy.array', 'np.array', (["RawData['f33'][0:NYr]"], {}), "(RawData['f33'][0:NYr])\n", (6520, 6543), True, 'import numpy as np\n'), ((6552, 6583), 'numpy.array', 'np.array', (["RawData['f37'][0:NYr]"], {}), "(RawData['f37'][0:NYr])\n", (6560, 6583), True, 'import numpy as np\n'), ((6592, 6623), 'numpy.array', 'np.array', (["RawData['f41'][0:NYr]"], {}), "(RawData['f41'][0:NYr])\n", (6600, 6623), True, 'import numpy as np\n'), ((6632, 6663), 'numpy.array', 'np.array', (["RawData['f45'][0:NYr]"], {}), "(RawData['f45'][0:NYr])\n", (6640, 6663), True, 'import numpy as np\n'), ((7136, 7145), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7143, 7145), True, 'import matplotlib.pyplot as plt\n'), ((7152, 7179), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (7162, 7179), True, 'import matplotlib.pyplot as plt\n'), ((7185, 7218), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.07, 0.1, 0.41, 0.88]'], {}), '([0.07, 0.1, 0.41, 0.88])\n', (7193, 7218), True, 'import matplotlib.pyplot as plt\n'), ((8566, 8599), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.57, 0.1, 0.41, 0.88]'], {}), '([0.57, 0.1, 0.41, 0.88])\n', (8574, 8599), True, 'import matplotlib.pyplot as plt\n'), ((10059, 10096), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(OUTDIR + OutPlt + '.png')"], {}), "(OUTDIR + OutPlt + '.png')\n", (10070, 10096), True, 'import matplotlib.pyplot as plt\n'), ((3176, 3247), 'numpy.genfromtxt', 'np.genfromtxt', (['FileName'], {'dtype': 'typee', 'delimiter': 'delimee', 'comments': '(False)'}), '(FileName, dtype=typee, delimiter=delimee, comments=False)\n', (3189, 3247), True, 'import numpy as np\n'), ((5823, 5843), 'numpy.where', 'np.where', (['(AT0s > 0.0)'], {}), '(AT0s > 0.0)\n', (5831, 5843), True, 'import numpy as np\n'), ((5854, 5874), 'numpy.where', 'np.where', (['(AT1s > 0.0)'], {}), '(AT1s > 0.0)\n', (5862, 5874), True, 'import numpy as np\n'), ((5885, 5905), 'numpy.where', 'np.where', (['(AT2s > 0.0)'], {}), '(AT2s > 0.0)\n', (5893, 5905), True, 'import numpy as np\n'), ((5916, 5936), 'numpy.where', 'np.where', (['(AT3s > 0.0)'], {}), '(AT3s > 0.0)\n', (5924, 5936), True, 'import numpy as np\n'), ((5947, 5967), 'numpy.where', 'np.where', (['(AT4s > 0.0)'], {}), '(AT4s > 0.0)\n', (5955, 5967), True, 'import numpy as np\n'), ((5978, 5998), 'numpy.where', 'np.where', (['(AT5s > 0.0)'], {}), '(AT5s > 0.0)\n', (5986, 5998), True, 'import numpy as np\n'), ((6009, 6029), 'numpy.where', 'np.where', (['(AT6s > 0.0)'], {}), '(AT6s > 0.0)\n', (6017, 6029), True, 'import numpy as np\n'), ((6040, 6060), 'numpy.where', 'np.where', (['(AT7s > 0.0)'], {}), '(AT7s > 0.0)\n', (6048, 6060), True, 'import numpy as np\n'), ((6071, 6091), 'numpy.where', 'np.where', (['(AT8s > 0.0)'], {}), '(AT8s > 0.0)\n', (6079, 6091), True, 'import numpy as np\n'), ((6102, 6122), 'numpy.where', 'np.where', (['(AT9s > 0.0)'], {}), '(AT9s > 0.0)\n', (6110, 6122), True, 'import numpy as np\n'), ((6733, 6754), 'numpy.where', 'np.where', (['(DPT0s > 0.0)'], {}), '(DPT0s > 0.0)\n', (6741, 6754), True, 'import numpy as np\n'), ((6766, 6787), 'numpy.where', 'np.where', (['(DPT1s > 0.0)'], {}), '(DPT1s > 0.0)\n', (6774, 6787), True, 'import numpy as np\n'), ((6799, 6820), 'numpy.where', 'np.where', (['(DPT2s > 0.0)'], {}), '(DPT2s > 0.0)\n', (6807, 6820), True, 'import numpy as np\n'), ((6832, 6853), 'numpy.where', 'np.where', (['(DPT3s > 0.0)'], {}), '(DPT3s > 0.0)\n', (6840, 6853), True, 'import numpy as np\n'), ((6865, 6886), 'numpy.where', 'np.where', (['(DPT4s > 0.0)'], {}), '(DPT4s > 0.0)\n', (6873, 6886), True, 'import numpy as np\n'), ((6898, 6919), 'numpy.where', 'np.where', (['(DPT5s > 0.0)'], {}), '(DPT5s > 0.0)\n', (6906, 6919), True, 'import numpy as np\n'), ((6931, 6952), 'numpy.where', 'np.where', (['(DPT6s > 0.0)'], {}), '(DPT6s > 0.0)\n', (6939, 6952), True, 'import numpy as np\n'), ((6964, 6985), 'numpy.where', 'np.where', (['(DPT7s > 0.0)'], {}), '(DPT7s > 0.0)\n', (6972, 6985), True, 'import numpy as np\n'), ((6997, 7018), 'numpy.where', 'np.where', (['(DPT8s > 0.0)'], {}), '(DPT8s > 0.0)\n', (7005, 7018), True, 'import numpy as np\n'), ((7030, 7051), 'numpy.where', 'np.where', (['(DPT9s > 0.0)'], {}), '(DPT9s > 0.0)\n', (7038, 7051), True, 'import numpy as np\n')] |
import numpy as np
bbox_vertices = np.array(
[
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 1.0],
],
dtype=np.float32,
)
FULLSCREEN_QUAD = np.array(
[
-1.0,
-1.0,
0.0,
+1.0,
-1.0,
0.0,
-1.0,
+1.0,
0.0,
-1.0,
+1.0,
0.0,
+1.0,
-1.0,
0.0,
+1.0,
+1.0,
0.0,
],
dtype=np.float32,
)
_bbox_corners = np.array(
[
[0, 0, 0, 1.0],
[1, 0, 0, 1.0],
[0, 1, 0, 1.0],
[1, 1, 0, 1.0],
[0, 0, 1, 1.0],
[1, 0, 1, 1.0],
[0, 1, 1, 1.0],
[1, 1, 1, 1.0],
],
dtype="float32",
)
aabb_triangle_strip = np.array(
[_bbox_corners[_] for _ in (6, 7, 4, 5, 1, 7, 3, 6, 2, 4, 0, 1, 2, 3)],
dtype="float32",
)
| [
"numpy.array"
] | [((36, 905), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0], [1.0, \n 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0], [1.0, 0.0, \n 1.0, 1.0], [0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0], [1.0, 1.0, 0.0, \n 1.0], [1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0],\n [0.0, 1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0, 1.0], [0.0,\n 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 1.0, 1.0, 1.0], [0.0, 0.0, \n 1.0, 1.0], [1.0, 0.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 0.0, \n 1.0], [1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0],\n [1.0, 0.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 0.0, 1.0], [0.0,\n 1.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0], [0.0, 1.0, \n 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0], [1.0, 0.0, 1.0, 1.0]\n ]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0], [1.0,\n 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0], [1.0, 1.0, \n 0.0, 1.0], [1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, \n 1.0], [0.0, 1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 1.0, 1.0, 1.0], [0.0,\n 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 0.0, \n 0.0, 1.0], [1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, \n 1.0], [1.0, 0.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 0.0, 1.0],\n [0.0, 1.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0], [0.0,\n 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0], [1.0, 0.0, \n 1.0, 1.0]], dtype=np.float32)\n', (44, 905), True, 'import numpy as np\n'), ((1181, 1316), 'numpy.array', 'np.array', (['[-1.0, -1.0, 0.0, +1.0, -1.0, 0.0, -1.0, +1.0, 0.0, -1.0, +1.0, 0.0, +1.0, \n -1.0, 0.0, +1.0, +1.0, 0.0]'], {'dtype': 'np.float32'}), '([-1.0, -1.0, 0.0, +1.0, -1.0, 0.0, -1.0, +1.0, 0.0, -1.0, +1.0, \n 0.0, +1.0, -1.0, 0.0, +1.0, +1.0, 0.0], dtype=np.float32)\n', (1189, 1316), True, 'import numpy as np\n'), ((1491, 1656), 'numpy.array', 'np.array', (['[[0, 0, 0, 1.0], [1, 0, 0, 1.0], [0, 1, 0, 1.0], [1, 1, 0, 1.0], [0, 0, 1, \n 1.0], [1, 0, 1, 1.0], [0, 1, 1, 1.0], [1, 1, 1, 1.0]]'], {'dtype': '"""float32"""'}), "([[0, 0, 0, 1.0], [1, 0, 0, 1.0], [0, 1, 0, 1.0], [1, 1, 0, 1.0], [\n 0, 0, 1, 1.0], [1, 0, 1, 1.0], [0, 1, 1, 1.0], [1, 1, 1, 1.0]], dtype=\n 'float32')\n", (1499, 1656), True, 'import numpy as np\n'), ((1752, 1853), 'numpy.array', 'np.array', (['[_bbox_corners[_] for _ in (6, 7, 4, 5, 1, 7, 3, 6, 2, 4, 0, 1, 2, 3)]'], {'dtype': '"""float32"""'}), "([_bbox_corners[_] for _ in (6, 7, 4, 5, 1, 7, 3, 6, 2, 4, 0, 1, 2,\n 3)], dtype='float32')\n", (1760, 1853), True, 'import numpy as np\n')] |
### Author <NAME> - 29 September 2020 ###
import pandas as pd
import numpy as np
import json
from gooey import Gooey, GooeyParser
import _pickle as cPickle
from collections import Counter
import warnings
import webbrowser
import time
from sklearn.ensemble import RandomForestClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
import sys
import os
path_main = "/".join(os.path.realpath(__file__).split("/")[:-1])
sys.path.append(path_main + '/Classes/')
sys.path.append(path_main + '/Utils/')
from media_class import Medium, Supplement, GrowthMedium, Medium_one_hot, Supplement_one_hot, GrowthMedium_one_hot
from gene_one_hot import one_hot
from help_functions import mean, str_to_bool, str_none_check
from message import display_message
@Gooey(dump_build_config=False,
program_name="CellCulturePy",
richtext_controls=True,
required_cols=3,
optional_cols=1,
default_size=(1300, 800))
def main():
'''
'''
Cache_pickle = pd.read_pickle('Data/cache_features.pkl')
with open("Data/cache_features.json", "r") as read_file:
Cache_json = json.load(read_file)
# Assign variables
TumorType_arg = Cache_json["cache_diseases_highest"]
TumorType_arg.sort()
Tissue_arg = Cache_json["cache_tumor_site"]
Tissue_arg.sort()
parser = GooeyParser(description="Predicting media conditions with genomic profile")
parser.add_argument("TumorType", help="what is your tumor type", choices=TumorType_arg)
parser.add_argument("Tissue", help="what is your tissue type", choices=Tissue_arg)
parser.add_argument("Dimension", help="what is your growing dimension", choices=Cache_json["cache_dimension"])
parser.add_argument("maf", help="Select maf file from TWIST (mutect1 or 2)", widget="FileChooser")
parser.add_argument("cnv", help="Select cnv file from TWIST (.tumor.called)", widget="FileChooser")
parser.add_argument("-m", dest="Media", nargs='+', default=False, choices=Cache_json["cache_media"], help="you can select one or multiple media types you want to look for", widget="Listbox")
parser.add_argument("-s", dest="Supplements", action="store_true", default=False, help="Do you want to include looking for supplements (default: No)")
args = parser.parse_args()
display_message(part=1)
predict(Cache_json=Cache_json, Cache_pickle=Cache_pickle, TumorType=args.TumorType, Tissue=args.Tissue, Dimension=args.Dimension, maf=args.maf, cnv=args.cnv, media=str_to_bool(args.Media), supplements=False)
display_message(part=2)
#Displaying
path_main = ("/".join(os.path.realpath(__file__).split("/")[:-1]))
webbrowser.open('file://' + path_main + "/tmp.html")
time.sleep(5)
os.remove(path_main + "/tmp.html")
def maf_extract(maf):
'''
'''
id_ = []
data_dict= {}
file_name = maf.split('/')[-1]
i = 0
with open(maf, 'r', encoding="latin-1") as f:
try:
for line in f:
if line.startswith("#"):
continue
elif not id_:
id_ = line.replace('\n', '').split('\t')
else:
data_dict[i] = line.replace('\n', '').split('\t')
i += 1
except:
warnings.warn(f"File: {file_name}, had problems with unrecognizable symbols", DeprecationWarning)
maf_frame = pd.DataFrame.from_dict(data_dict, orient="index", columns=id_)
maf_frame = maf_frame[~maf_frame["Variant_Classification"].isin(["Intron", "lincRNA", "IGR", "5'Flank", "5'UTR", "Silent", "3'UTR", "RNA"])]
return maf_frame, file_name
def cnv_extract(cnv):
'''
'''
file_name = cnv.split('/')[-1]
cnv_frame = pd.read_csv(cnv, sep="\t")
chromosomelist = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "X", "Y"]
cnv_data = {}
for chromosoom in chromosomelist:
cnv_tmp = cnv_frame[cnv_frame["Chromosome"] == chromosoom]
if type(cnv_tmp) == pd.core.series.Series or cnv_tmp.empty == True:
cnv_data[chromosoom] = 1
elif len(cnv_tmp) > 1:
cnv_data[chromosoom] = (sum(cnv_tmp["Num_Probes"] * cnv_tmp["Segment_Mean"]) / sum(cnv_tmp["Num_Probes"]))
else:
cnv_data[chromosoom] = cnv_tmp["Segment_Mean"].tolist()[0]
cnv_data = pd.Series(cnv_data, name='Value')
cnv_data.index.name = 'Chromosome'
cnv_data = cnv_data.reset_index()
return cnv_data, file_name
def predict(Cache_json, Cache_pickle, TumorType, Tissue, Dimension, maf, cnv, media=False, supplements=False):
'''
'''
one_hot_features = Cache_pickle.iloc[0]["Features"]
one_hot_media = Cache_pickle.iloc[0]["Media"]
#TumorType info
idx = list(one_hot_features.levels).index(TumorType)
counts = one_hot_features.counts
counts[idx] = 1
one_hot_features.counts = counts
#Tissue info
levels = list(one_hot_features.levels)
idx = [i for i in range(len(levels)) if levels[i] == Tissue]
if len(idx) > 1:
idx = idx[1]
else:
idx = idx[0]
counts = one_hot_features.counts
counts[idx] = 1
one_hot_features.counts = counts
#Dimension info
levels = list(one_hot_features.levels)
idx = [i for i in range(len(levels)) if levels[i] == Dimension]
if len(idx) > 1:
idx = idx[1]
else:
idx = idx[0]
counts = one_hot_features.counts
counts[idx] = 1
one_hot_features.counts = counts
# Maf info
maf_data, maf_name = maf_extract(maf)
if maf_data.empty:
print("Mutation file is empty as nothing was probably protein coding")
else:
for i, row in maf_data.iterrows():
levels = list(one_hot_features.levels)
idx = [i for i in range(len(levels)) if levels[i] == row["Hugo_Symbol"]]
if not idx:
continue
else:
counts = one_hot_features.counts
counts[idx] = 1
one_hot_features.counts = counts
# CNV Info
cnv_data, cnv_name = cnv_extract(cnv)
if cnv_data.empty:
print("CNV file is empty as no copy number variations")
else:
for i, row in cnv_data.iterrows():
idx = list(one_hot_features.levels).index(row["Chromosome"])
counts = one_hot_features.counts
counts[idx] = row["Value"]
one_hot_features.counts = counts
#Load model
with open('Models/RF_model.sav', 'rb') as f:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rf = cPickle.load(f)
# Get media input
if media != False:
if len(media) == 1:
print("you only added 1 media type, so will use all")
else:
print("Media selection currently not implemented, using all media types")
#Loop through media append to features
media = one_hot_media.media
length_loop = len(media.counts)
input_data = []
media_name = []
for i in range(0, length_loop):
for j in range(0, length_loop):
media_tmp = media.counts
if i == j:
media_tmp[i] = 1
media_name.append(f"{media.levels[i]}")
else:
media_tmp[i] = 1
media_tmp[j] = 1
media_name.append(f"{media.levels[i]} and {media.levels[j]}")
one_hot = np.append(one_hot_features.counts, media_tmp)
input_data.append(one_hot)
if i == j:
media_tmp[i] = 0
else:
media_tmp[i] = 0
media_tmp[j] = 0
else:
continue
predictions_proba = rf.predict_proba(input_data)
# Get predictions for growing
predictions = [predictions_proba[i,1] for i in range(len(predictions_proba))]
# Get top 10 values
idx = np.argsort(-np.array(predictions))
order_pred = [predictions[i] for i in idx]
order_media = [media_name[i] for i in idx]
# Make this readable and not overly populated with same media
pred_df = pd.DataFrame({"Media": order_media, "value": order_pred})
dis = [x.split(" and ") for x in pred_df["Media"]]
dis = [x+["NaN"] if len(x) == 1 else x for x in dis]
pred_df[["Media1","Media2"]] = pd.DataFrame(dis)
# Create a readable dataframe
ranked_media = pred_df[pred_df["Media2"] == "NaN"]["Media1"].tolist()
output_df = []
for med in ranked_media:
tmp_df = pred_df[pred_df["Media1"] == med]
media1 = ["" if i >= 1 else x for i,x in enumerate(tmp_df["Media1"])]
tmp_out = pd.DataFrame({"Main media": media1, "Second media": tmp_df["Media2"].tolist(), "Probability of growing": tmp_df["value"].tolist()})
output_df.append(tmp_out)
output_df = pd.concat(output_df)
output_df.to_html('tmp.html')
return pred_df
if __name__ == '__main__':
main() | [
"pandas.read_csv",
"webbrowser.open",
"time.sleep",
"numpy.array",
"sys.path.append",
"os.remove",
"pandas.read_pickle",
"pandas.DataFrame.from_dict",
"_pickle.load",
"pandas.concat",
"warnings.simplefilter",
"pandas.DataFrame",
"warnings.warn",
"pandas.Series",
"help_functions.str_to_bo... | [((435, 475), 'sys.path.append', 'sys.path.append', (["(path_main + '/Classes/')"], {}), "(path_main + '/Classes/')\n", (450, 475), False, 'import sys\n'), ((476, 514), 'sys.path.append', 'sys.path.append', (["(path_main + '/Utils/')"], {}), "(path_main + '/Utils/')\n", (491, 514), False, 'import sys\n'), ((762, 915), 'gooey.Gooey', 'Gooey', ([], {'dump_build_config': '(False)', 'program_name': '"""CellCulturePy"""', 'richtext_controls': '(True)', 'required_cols': '(3)', 'optional_cols': '(1)', 'default_size': '(1300, 800)'}), "(dump_build_config=False, program_name='CellCulturePy',\n richtext_controls=True, required_cols=3, optional_cols=1, default_size=\n (1300, 800))\n", (767, 915), False, 'from gooey import Gooey, GooeyParser\n'), ((958, 999), 'pandas.read_pickle', 'pd.read_pickle', (['"""Data/cache_features.pkl"""'], {}), "('Data/cache_features.pkl')\n", (972, 999), True, 'import pandas as pd\n'), ((1294, 1369), 'gooey.GooeyParser', 'GooeyParser', ([], {'description': '"""Predicting media conditions with genomic profile"""'}), "(description='Predicting media conditions with genomic profile')\n", (1305, 1369), False, 'from gooey import Gooey, GooeyParser\n'), ((2257, 2280), 'message.display_message', 'display_message', ([], {'part': '(1)'}), '(part=1)\n', (2272, 2280), False, 'from message import display_message\n'), ((2497, 2520), 'message.display_message', 'display_message', ([], {'part': '(2)'}), '(part=2)\n', (2512, 2520), False, 'from message import display_message\n'), ((2618, 2670), 'webbrowser.open', 'webbrowser.open', (["('file://' + path_main + '/tmp.html')"], {}), "('file://' + path_main + '/tmp.html')\n", (2633, 2670), False, 'import webbrowser\n'), ((2675, 2688), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2685, 2688), False, 'import time\n'), ((2693, 2727), 'os.remove', 'os.remove', (["(path_main + '/tmp.html')"], {}), "(path_main + '/tmp.html')\n", (2702, 2727), False, 'import os\n'), ((3357, 3419), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data_dict'], {'orient': '"""index"""', 'columns': 'id_'}), "(data_dict, orient='index', columns=id_)\n", (3379, 3419), True, 'import pandas as pd\n'), ((3694, 3720), 'pandas.read_csv', 'pd.read_csv', (['cnv'], {'sep': '"""\t"""'}), "(cnv, sep='\\t')\n", (3705, 3720), True, 'import pandas as pd\n'), ((4563, 4596), 'pandas.Series', 'pd.Series', (['cnv_data'], {'name': '"""Value"""'}), "(cnv_data, name='Value')\n", (4572, 4596), True, 'import pandas as pd\n'), ((8313, 8370), 'pandas.DataFrame', 'pd.DataFrame', (["{'Media': order_media, 'value': order_pred}"], {}), "({'Media': order_media, 'value': order_pred})\n", (8325, 8370), True, 'import pandas as pd\n'), ((8518, 8535), 'pandas.DataFrame', 'pd.DataFrame', (['dis'], {}), '(dis)\n', (8530, 8535), True, 'import pandas as pd\n'), ((9023, 9043), 'pandas.concat', 'pd.concat', (['output_df'], {}), '(output_df)\n', (9032, 9043), True, 'import pandas as pd\n'), ((1083, 1103), 'json.load', 'json.load', (['read_file'], {}), '(read_file)\n', (1092, 1103), False, 'import json\n'), ((2449, 2472), 'help_functions.str_to_bool', 'str_to_bool', (['args.Media'], {}), '(args.Media)\n', (2460, 2472), False, 'from help_functions import mean, str_to_bool, str_none_check\n'), ((6730, 6755), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (6753, 6755), False, 'import warnings\n'), ((6769, 6800), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (6790, 6800), False, 'import warnings\n'), ((6818, 6833), '_pickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (6830, 6833), True, 'import _pickle as cPickle\n'), ((7636, 7681), 'numpy.append', 'np.append', (['one_hot_features.counts', 'media_tmp'], {}), '(one_hot_features.counts, media_tmp)\n', (7645, 7681), True, 'import numpy as np\n'), ((8115, 8136), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (8123, 8136), True, 'import numpy as np\n'), ((391, 417), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (407, 417), False, 'import os\n'), ((3242, 3343), 'warnings.warn', 'warnings.warn', (['f"""File: {file_name}, had problems with unrecognizable symbols"""', 'DeprecationWarning'], {}), "(f'File: {file_name}, had problems with unrecognizable symbols',\n DeprecationWarning)\n", (3255, 3343), False, 'import warnings\n'), ((2569, 2595), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2585, 2595), False, 'import os\n')] |
"""
Volumes
=======
A :class:`Volume` represents a 3D region of space with a fixed, scalar volume. It corresponds
to the "box" used in simulations. The following box types have been implemented:
.. autosummary::
:nosignatures:
Parallelepiped
TriclinicBox
Cuboid
Cube
The :class:`TriclinicBox` can be constructed using both the LAMMPS and HOOMD-blue
:class:`TriclinicBox.Convention`\s for applying tilt factors.
Examples
--------
Construct a simulation box with defined basis vectors and volume::
v = relentless.volume.Cube(L=3)
>>> print(v.a)
[3.0 0.0 0.0]
>>> print(v.b)
[0.0 3.0 0.0]
>>> print(v.c)
[0.0 0.0 3.0]
>>> print(v.volume)
27.0
.. rubric:: Developer notes
To implement your own simulation box, create a class that derives from :class:`Volume`
and define the required methods.
.. autosummary::
:nosignatures:
Volume
.. autoclass:: Volume
:members:
.. autoclass:: Parallelepiped
:members:
.. autoclass:: TriclinicBox
:members:
.. autoclass:: Cuboid
:members:
.. autoclass:: Cube
:members:
"""
import abc
from enum import Enum
import numpy
class Volume(abc.ABC):
r"""Abstract base class defining a region of space.
A Volume can be any region of space; typically, it is a simulation "box."
Any deriving class must implement the volume method that computes the scalar
volume of the region. Additionally, methods to serialize and deserialize a
Volume must be specified so that the object can be saved to disk.
"""
@property
@abc.abstractmethod
def volume(self):
r"""float: Volume of the region."""
pass
@abc.abstractmethod
def to_json(self):
r"""Serialize as a dictionary.
The serialized data can be saved to file as JSON data.
Returns
-------
dict
The serialized :class:`Volume` data.
"""
pass
@classmethod
@abc.abstractmethod
def from_json(cls, data):
r"""Deserialize from a dictionary.
Returns
-------
:class:`Volume`
The object reconstructed from the ``data``.
"""
pass
class Parallelepiped(Volume):
r"""Parallelepiped box defined by three vectors.
The three vectors :math:`\mathbf{a}`, :math:`\mathbf{b}`, and :math:`\mathbf{c}`
must form a right-hand basis so that the box volume :math:`V` is positive:
.. math::
V = (\mathbf{a} \times \mathbf{b}) \cdot \mathbf{c} > 0
Parameters
----------
a : array_like
First vector defining the parallelepiped.
b : array_like
Second vector defining the parallelepiped.
c : array_like
Third vector defining the parallelepiped.
Raises
------
TypeError
If ``a``, ``b``, and ``c`` are not all 3-element vectors.
ValueError
If the volume is not positive.
"""
def __init__(self, a, b, c):
self.a = numpy.asarray(a,dtype=numpy.float64)
self.b = numpy.asarray(b,dtype=numpy.float64)
self.c = numpy.asarray(c,dtype=numpy.float64)
if not (self.a.shape==(3,) and self.b.shape==(3,) and self.c.shape==(3,)):
raise TypeError('a, b, and c must be 3-element vectors.')
if self.volume <= 0:
raise ValueError('The volume must be positive.')
@property
def volume(self):
return numpy.dot(numpy.cross(self.a,self.b),self.c)
def to_json(self):
r"""Serialize as a dictionary.
The dictionary contains the three box vectors ``a``, ``b``, and ``c`` as tuples.
Returns
-------
dict
The serialized Parallelepiped.
"""
return {'a': tuple(self.a),
'b': tuple(self.b),
'c': tuple(self.c)
}
@classmethod
def from_json(cls, data):
r"""Deserialize from a dictionary.
Parameters
----------
data : dict
The serialized equivalent of the Parallelepiped object. The keys
of ``data`` should be ``('a','b','c')``, and the data for each
is the 3-element box vector.
Returns
-------
:class:`Parallelepiped`
A new Parallelepiped object constructed from the data.
"""
return Parallelepiped(**data)
class TriclinicBox(Parallelepiped):
r"""Triclinic box.
A TriclinicBox is a special type of :class:`Parallelepiped`. The box is
defined by an orthorhombic box oriented along the Cartesian axes and having
three vectors of length :math:`L_x`, :math:`L_y`, and :math:`L_z`, respectively.
The box is then tilted by factors :math:`xy`, :math:`xz`, and :math:`yz`, which
are upper off-diagonal elements of the matrix of box vectors. As a result,
the :math:`\mathbf{a}` vector is always aligned along the :math:`x` axis, while
the other two vectors may be tilted.
The tilt factors can be defined using one of two :class:`TriclinicBox.Convention`\s.
By default, the LAMMPS convention is applied to calculate the basis vectors.
Parameters
----------
Lx : float
Length along the :math:`x` axis.
Ly : float
Length along the :math:`y` axis.
Lz : float
Length along the :math:`z` axis.
xy : float
First tilt factor.
xz : float
Second tilt factor.
yz : float
Third tilt factor.
Raises
------
ValueError
If ``Lx``, ``Ly``, and ``Lz`` are not all positive.
ValueError
If the convention is not ``TriclinicBox.Convention.LAMMPS`` or
``TriclinicBox.Convention.HOOMD``.
"""
class Convention(Enum):
r"""Convention by which the tilt factors are applied to the basis vectors.
In the `LAMMPS <https://lammps.sandia.gov/doc/Howto_triclinic.html>`_
simulation convention, specified using ``TriclinicBox.Convention.LAMMPS``,
the basis vectors are
.. math::
\mathbf{a} = (L_x,0,0)
\quad \mathbf{b} = (xy,L_y,0)
\quad \mathbf{c} = (xz,yz,L_z)
In the `HOOMD-blue <https://hoomd-blue.readthedocs.io/en/stable/box.html>`_
simulation convention, specified using ``TriclinicBox.Convention.HOOMD``,
the basis vectors are
.. math::
\mathbf{a} = (L_x,0,0)
\quad \mathbf{b} = (xy \cdot L_y,L_y,0)
\quad \mathbf{c} = (xz \cdot L_z,yz \cdot L_z,L_z)
Attributes
----------
LAMMPS : int
LAMMPS convention for applying the tilt factors.
HOOMD : int
HOOMD convention for applying the tilt factors.
"""
LAMMPS = 1
HOOMD = 2
def __init__(self, Lx, Ly, Lz, xy, xz, yz, convention=Convention.LAMMPS):
if Lx<=0 or Ly<=0 or Lz<= 0:
raise ValueError('All side lengths must be positive.')
self._convention = convention
if self.convention is TriclinicBox.Convention.LAMMPS:
a = (Lx,0,0)
b = (xy,Ly,0)
c = (xz,yz,Lz)
elif self.convention is TriclinicBox.Convention.HOOMD:
a = (Lx,0,0)
b = (xy*Ly,Ly,0)
c = (xz*Lz,yz*Lz,Lz)
else:
raise ValueError('Triclinic convention must be TriclinicBox.Convention.LAMMPS or TriclinicBox.Convention.HOOMD')
super().__init__(a,b,c)
@property
def convention(self):
r""":class:`TriclinicBox.Convention`: Convention for tilt factors."""
return self._convention
def to_json(self):
r"""Serialize as a dictionary.
The dictionary contains the three box lengths ``Lx``, ``Ly``, and ``Lz``,
the three tilt factors ``xy``, ``xz``, and ``yz``, and the ``convention``
for the tilt factors.
Returns
-------
dict
The serialized TriclinicBox.
"""
if self._convention is TriclinicBox.Convention.LAMMPS:
xy = self.b[0]
xz = self.c[0]
yz = self.c[1]
elif self._convention is TriclinicBox.Convention.HOOMD:
xy = self.b[0]/self.b[1]
xz = self.c[0]/self.c[2]
yz = self.c[1]/self.c[2]
return {'Lx': self.a[0],
'Ly': self.b[1],
'Lz': self.c[2],
'xy': xy,
'xz': xz,
'yz': yz,
'convention': self._convention.name
}
@classmethod
def from_json(cls, data):
r"""Deserialize from a dictionary.
Parameters
----------
data : dict
The serialized equivalent of the TriclinicBox object. The keys
of ``data`` should be ``('Lx','Ly','Lz','xy','xz','yz','convention')``.
The lengths and tilt factors should be floats, and the convention should
be a string.
Returns
-------
:class:`TriclinicBox`
A new TriclinicBox object constructed from the data.
Raises
------
ValueError
If the convention specified is not ``'LAMMPS'`` or ``'HOOMD'``.
"""
data_ = dict(data)
if data['convention']=='LAMMPS':
data_['convention'] = TriclinicBox.Convention.LAMMPS
elif data['convention']=='HOOMD':
data_['convention'] = TriclinicBox.Convention.HOOMD
else:
return ValueError('Only LAMMPS and HOOMD conventions are supported.')
return TriclinicBox(**data_)
class Cuboid(TriclinicBox):
r"""Orthorhombic box.
A Cuboid is a special type of :class:`TriclinicBox`. The three box vectors
point along the :math:`x`, :math:`y`, and :math:`z` axes, so they are all
orthogonal (i.e. :math:`xy=xz=yz=0`). Each vector can have a different length,
:math:`L_x`, :math:`L_y`, and :math:`L_z`.
Parameters
----------
Lx : float
Length along the :math:`x` axis.
Ly : float
Length along the :math:`y` axis.
Lz : float
Length along the :math:`z` axis.
"""
def __init__(self, Lx, Ly, Lz):
super().__init__(Lx,Ly,Lz,0,0,0)
def to_json(self):
r"""Serialize as a dictionary.
The dictionary contains the three box lengths ``Lx``, ``Ly``, and ``Lz``.
Returns
-------
dict
The serialized Cuboid.
"""
return {'Lx': self.a[0],
'Ly': self.b[1],
'Lz': self.c[2],
}
@classmethod
def from_json(cls, data):
r"""Deserialize from a dictionary.
Parameters
----------
data : dict
The serialized equivalent of the Cuboid object. The keys
of ``data`` should be ``('Lx','Ly','Lz')``, and their values
should be floats.
Returns
-------
:class:`Cuboid`
A new Cuboid object constructed from the data.
"""
return Cuboid(**data)
class Cube(Cuboid):
r"""Cubic box.
A Cube is a special type of :class:`Cuboid` where all vectors have the
same length :math:`L`.
Parameters
----------
L : float
The edge length of the cube.
"""
def __init__(self, L):
super().__init__(L,L,L)
def to_json(self):
r"""Serialize as a dictionary.
The dictionary contains the box length ``L``.
Returns
-------
dict
The serialized Cube.
"""
return {'L': self.a[0]}
@classmethod
def from_json(cls, data):
r"""Deserialize from a dictionary.
Parameters
----------
data : dict
The serialized equivalent of the Cube object. The keys
of ``data`` should be ``('L',)``, and its value should be a float.
Returns
-------
:class:`Cube`
A new Cube object constructed from the data.
"""
return Cube(**data)
| [
"numpy.asarray",
"numpy.cross"
] | [((2970, 3007), 'numpy.asarray', 'numpy.asarray', (['a'], {'dtype': 'numpy.float64'}), '(a, dtype=numpy.float64)\n', (2983, 3007), False, 'import numpy\n'), ((3024, 3061), 'numpy.asarray', 'numpy.asarray', (['b'], {'dtype': 'numpy.float64'}), '(b, dtype=numpy.float64)\n', (3037, 3061), False, 'import numpy\n'), ((3078, 3115), 'numpy.asarray', 'numpy.asarray', (['c'], {'dtype': 'numpy.float64'}), '(c, dtype=numpy.float64)\n', (3091, 3115), False, 'import numpy\n'), ((3420, 3447), 'numpy.cross', 'numpy.cross', (['self.a', 'self.b'], {}), '(self.a, self.b)\n', (3431, 3447), False, 'import numpy\n')] |
# Import Libraries
import os
from pathlib import Path
import argparse
import datetime, dateutil
import json
import pickle
import numpy as np
import pandas as pd
import csv
import nltk
from nltk.tokenize import word_tokenize
nltk.download('stopwords')
nltk.download('punkt')
from nltk.tokenize import TweetTokenizer
tt = TweetTokenizer()
import torch
import torchtext
from transformers import AutoModel, AutoTokenizer, AutoConfig
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
tweet_df = '/h/ypark/tweet_covid/hatespeech/hatebase/output'
class BERT:
def __init__(self):
self.config = AutoConfig.from_pretrained('digitalepidemiologylab/covid-twitter-bert-v2', output_hidden_states=True)
self.model = AutoModel.from_pretrained('digitalepidemiologylab/covid-twitter-bert-v2', config=self.config).to(device)
self.tokenizer = AutoTokenizer.from_pretrained('digitalepidemiologylab/covid-twitter-bert-v2')
self.unknown_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.unk_token)
def fine_tune(self):
return
def get_model(self):
return self.model, self.tokenizer
def check_unknown(self, word):
first_token = self.tokenizer.encode(word)[1]
# check = self.tokenizer.convert_tokens_to_ids(word) == self.unknown_id
check = first_token == self.unknown_id
if check:
print("!!! Warning !!! Word: {} cannot be processed.".format(word))
return check
def get_word_rep(self, word, word_df):
sentences = list(word_df['cleaned_text'])
bsz = len(sentences)
# tokenized_texts = [self.tokenizer.tokenize(s) for s in sentences]
# processed_texts = []
# word_idx = []
word_idx = []
ids = []
word_id = self.tokenizer.encode(word)[1]
sentences_ids = [self.tokenizer.encode(s) for s in sentences]
# for idx, tokens in enumerate(tokenized_texts):
# first_token = self.tokenizer.convert_ids_to_tokens((self.tokenizer.encode(word)))
# if first_token in tokens:
# word_idx.append(tokens.index(first_token))
# processed_texts.append(tokenized_texts[idx])
# else:
# print("Not in tokenizer")
for idx, sen_ids in enumerate(sentences_ids):
if word_id in sen_ids:
word_idx.append(sen_ids.index(word_id))
ids.append(sentences_ids[idx])
else:
print("Not in tokenizer")
# ids = [self.tokenizer.convert_tokens_to_ids(s) for s in processed_texts]
all_lens = np.clip([len(s) for s in ids], 0, 510)
if len(all_lens) == 0:
# raise ValueError
print("No text to process")
return None
maxlen = max(all_lens)
padded_ids = np.zeros((bsz, maxlen))
for i in range(bsz):
padded_ids[i][:all_lens[i]] = ids[i]
padded_ids = torch.tensor(padded_ids).int().to(device)
with torch.no_grad():
raw_outputs = self.model(padded_ids)[0]
embedded = [raw_outputs[i][word_idx[i]] for i in range(bsz)] if word_idx[i] >= 0 else None
embedded_mean = torch.stack(embedded, dim=0).mean(dim = 0)
return embedded_mean
def get_mapping(args):
fn = Path(args.unigram_dir, 'mapping_chunk_{}.pkl'.format(args.chunk_id))
with open(fn, 'rb') as f:
mapping = pickle.load(f)
return mapping
def main(args):
# Load dictionary (key: hatespeech, value: dataframe)
mapping = get_mapping(args)
model = BERT()
result = {}
hateterms = mapping.keys()
hateterms = list(hateterms)
for word in hateterms:
mapping_word = mapping[word]
if not mapping[word].empty and not model.check_unknown(word.lower()):
print('****** Word: {} in progress ******'.format(word))
gb = mapping_word.groupby('day')
mapping_by_days = [gb.get_group(x) for x in gb.groups]
word_by_days = {}
for group in mapping_by_days:
day = group['day'].iloc[0]
print("\t Day {} ... ".format(day))
embedding = model.get_word_rep(word, group)
if embedding is not None:
word_by_days[day] = embedding
result[word] = word_by_days
# embedding = model.get_word_rep(word, mapping_word)
# embedding_s = pd.Series(embedding, dtype='object')
# mapping_word.insert(0, 'embedding', embedding_s)
# result[word] = mapping_word
Path(args.export, "embedding").mkdir(exist_ok=True)
export_fn = Path(args.export, "embedding/chunk_{}_embedding.pkl".format(args.chunk_id))
with open(export_fn, 'wb') as f:
pickle.dump(result, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument("--data_dir", typel=str, defaut="../../data/panacealab_covid_tweets")
parser.add_argument("--chunk_id", type=int, default=0)
parser.add_argument("--export", type=str, default="/h/ypark/tweet_covid/hatespeech/output")
parser.add_argument("--unigram_dir", type=str, default = '/h/ypark/tweet_covid/output/unigrams/mapping')
args = parser.parse_args()
main(args)
| [
"transformers.AutoModel.from_pretrained",
"nltk.tokenize.TweetTokenizer",
"pickle.dump",
"transformers.AutoConfig.from_pretrained",
"argparse.ArgumentParser",
"nltk.download",
"pathlib.Path",
"torch.stack",
"pickle.load",
"torch.tensor",
"numpy.zeros",
"torch.cuda.is_available",
"transformer... | [((226, 252), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (239, 252), False, 'import nltk\n'), ((253, 275), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (266, 275), False, 'import nltk\n'), ((322, 338), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {}), '()\n', (336, 338), False, 'from nltk.tokenize import TweetTokenizer\n'), ((468, 493), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (491, 493), False, 'import torch\n'), ((444, 464), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (456, 464), False, 'import torch\n'), ((499, 518), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (511, 518), False, 'import torch\n'), ((4919, 4944), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4942, 4944), False, 'import argparse\n'), ((640, 745), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['"""digitalepidemiologylab/covid-twitter-bert-v2"""'], {'output_hidden_states': '(True)'}), "('digitalepidemiologylab/covid-twitter-bert-v2',\n output_hidden_states=True)\n", (666, 745), False, 'from transformers import AutoModel, AutoTokenizer, AutoConfig\n'), ((893, 970), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""digitalepidemiologylab/covid-twitter-bert-v2"""'], {}), "('digitalepidemiologylab/covid-twitter-bert-v2')\n", (922, 970), False, 'from transformers import AutoModel, AutoTokenizer, AutoConfig\n'), ((2893, 2916), 'numpy.zeros', 'np.zeros', (['(bsz, maxlen)'], {}), '((bsz, maxlen))\n', (2901, 2916), True, 'import numpy as np\n'), ((3488, 3502), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3499, 3502), False, 'import pickle\n'), ((4854, 4876), 'pickle.dump', 'pickle.dump', (['result', 'f'], {}), '(result, f)\n', (4865, 4876), False, 'import pickle\n'), ((3072, 3087), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3085, 3087), False, 'import torch\n'), ((4664, 4694), 'pathlib.Path', 'Path', (['args.export', '"""embedding"""'], {}), "(args.export, 'embedding')\n", (4668, 4694), False, 'from pathlib import Path\n'), ((763, 860), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['"""digitalepidemiologylab/covid-twitter-bert-v2"""'], {'config': 'self.config'}), "('digitalepidemiologylab/covid-twitter-bert-v2',\n config=self.config)\n", (788, 860), False, 'from transformers import AutoModel, AutoTokenizer, AutoConfig\n'), ((3265, 3293), 'torch.stack', 'torch.stack', (['embedded'], {'dim': '(0)'}), '(embedded, dim=0)\n', (3276, 3293), False, 'import torch\n'), ((3016, 3040), 'torch.tensor', 'torch.tensor', (['padded_ids'], {}), '(padded_ids)\n', (3028, 3040), False, 'import torch\n')] |
import math
import numpy as np
from data_utils.utils import unicode_csv_reader2
from collections import defaultdict
from sklearn.model_selection import StratifiedShuffleSplit
from utils import UnicodeWriter
class TweetCorpus:
'''Simple corpus reader.'''
'''
Format of tweets_file (comma separated) is:
Columns on first line followed by actual tweets from next line
Columns: AUTHOR,CONTENT,LABEL,DATE,URL,DESC
'''
'''Each tweet is stored as a dictionary'''
'''
The collapsed aggression code contained examples of insults, threats, bragging, hypervigilance
and challenges with authority
'''
'''
The collapsed grief code included examples of distress, sadness, loneliness
and death.
'''
def __init__(self, train_file = None, val_file = None, test_file = None, unlabeled_tweets_file = None):
self.max_len = 0
self.len_dict = None
self.aggress = set(['aggress', 'insult', 'snitch', 'threat', 'brag', 'aod', 'aware', 'authority', 'trust', 'fight', 'pride', 'power', 'lyric'])
self.loss = set(['loss', 'grief', 'death', 'sad', 'alone', 'reac', 'guns'])
# self.other = set(['deleted/ sex', 'money', 'gen, rel', 'rel, wom', 'authenticity', 'anger', 'retweet', 'wom', 'convo/neighborhood', 'gen, money', 'gen/women', 'deleted', 'gen/location', 'rel', 'indentity', 'amiable?', 'happy', 'sex', 'promo', 'mention', 'gen, happy', 'general', 'gen', 'identity', 'rel/gen', 'convo', 'joke', 'trans', 'wom, rel'])
self.train_tweets = self.read_tweets(train_file, 'CONTENT', ',')
self.val_tweets = self.read_tweets(val_file, 'CONTENT', ',')
self.test_tweets = self.read_tweets(test_file, 'CONTENT', ',')
self.unlabeled_tweets = self.read_tweets(unlabeled_tweets_file, 'text', ',')
self.char2idx = None
self.idx2char = None
self.init_char_dictionaries()
self.label2idx = None
self.idx2label = None
self.init_label_dictionaries()
def collapsed_label(self, fine_grained):
fine_grained = fine_grained.lower()
for a in self.aggress:
if a in fine_grained: return 'aggress'
for l in self.loss:
if l in fine_grained: return 'loss'
else: return 'other'
def read_tweets(self, file_name, column_name, delimiter):
if file_name is None:
return None
tweets = []
line_count = 0
with open(file_name) as fh:
reader = unicode_csv_reader2(fh, delimiter = delimiter)
for row in reader:
line_count += 1
if row[column_name] in (None, ''): continue
# preprocess the tweet
# row[column_name] = preprocess(row[column_name])
# put a hard cutoff of 150 characters
if len(row[column_name]) > 150:
continue
if column_name == 'CONTENT':
if 'LABEL' in row.keys():
label = self.collapsed_label(row['LABEL'].lower())
row['LABEL'] = label
tweets.append(row)
return tweets
def write_tweets(self, file_name, tweets, columns):
if file_name is None or tweets is None:
return None
unicode_writer = UnicodeWriter(open(file_name, 'w'))
unicode_writer.writerow(columns)
for tweet in tweets:
_tmp = []
for column in columns:
if column in tweet:
_tmp.append(tweet[column])
else:
_tmp.append('')
unicode_writer.writerow(_tmp)
# def preprocess_tweet(self, text):
#
# # get rid of continuous underlines in the tweet
# text = re.sub(r"_", "", text)
# # get rid of <a href=""> html tags
# text = re.sub(r"<a.*?>", "", text)
# # get rid of urls
# text = re.sub(r"http\S+", "", text)
#
# return text.strip()
def init_char_dictionaries(self):
self.char2idx = defaultdict(int)
self.len_dict = defaultdict(int)
self._update_char2idx(self.train_tweets, 'CONTENT')
self._update_char2idx(self.val_tweets, 'CONTENT')
self._update_char2idx(self.test_tweets, 'CONTENT')
self._update_char2idx(self.unlabeled_tweets, 'text')
self.idx2char = {id: c for c, id in self.char2idx.iteritems()}
def _update_char2idx(self, tweets, column_name):
if tweets is None:
return
for tweet in tweets:
if column_name in tweet:
content = tweet[column_name]
self.len_dict[len(content)] += 1
self.max_len = max(self.max_len, len(content))
for char in content:
if char not in self.char2idx:
self.char2idx[char] = len(self.char2idx) + 1
def init_label_dictionaries(self):
self.label2idx = defaultdict(int)
self._update_label2idx(self.train_tweets)
self._update_label2idx(self.val_tweets)
self._update_label2idx(self.test_tweets)
self.idx2label = {id: c for c, id in self.label2idx.iteritems()}
def _update_label2idx(self, tweets):
if tweets is None:
return
for tweet in tweets:
if 'LABEL' in tweet:
label = tweet['LABEL']
if label not in self.label2idx:
self.label2idx[label] = len(self.label2idx)
def get_class_names(self):
class_names = []
for idx in xrange(len(self.idx2label)):
class_names.append(self.idx2label[idx])
return class_names
def tweet2Indices(self, tweet, column_name):
indices = [self.char2idx[c] for c in tweet[column_name]]
return np.asarray([0 for _ in xrange(self.max_len - len(indices))] + indices)
def label2Index(self, tweet):
return self.label2idx[tweet['LABEL']]
def get_splits(self):
X_train = []
X_val = []
X_test = []
y_train = []
y_val = []
y_test = []
if self.train_tweets is not None:
for tweet in self.train_tweets:
X_train.append(self.tweet2Indices(tweet, 'CONTENT'))
y_train.append(self.label2Index(tweet))
if self.val_tweets is not None:
for tweet in self.val_tweets:
X_val.append(self.tweet2Indices(tweet, 'CONTENT'))
y_val.append(self.label2Index(tweet))
if self.test_tweets is not None:
for tweet in self.test_tweets:
X_test.append(self.tweet2Indices(tweet, 'CONTENT'))
y_test.append(self.label2Index(tweet))
X_train = np.asarray(X_train)
y_train = np.asarray(y_train)
X_val = np.asarray(X_val)
y_val = np.asarray(y_val)
X_test = np.asarray(X_test)
y_test = np.asarray(y_test)
return X_train, X_val, X_test, y_train, y_val, y_test
def get_splits_for_lm1(self):
X = []
if self.unlabeled_tweets is not None:
for tweet in self.unlabeled_tweets:
X.append(self.tweet2Indices(tweet, 'text'))
X = np.asarray(X)
return X
def get_splits_for_lm2(self):
X = []
if self.train_tweets is not None:
for tweet in self.train_tweets:
X.append(self.tweet2Indices(tweet, 'CONTENT'))
if self.val_tweets is not None:
for tweet in self.val_tweets:
X.append(self.tweet2Indices(tweet, 'CONTENT'))
if self.test_tweets is not None:
for tweet in self.test_tweets:
X.append(self.tweet2Indices(tweet, 'CONTENT'))
X = np.asarray(X)
return X
def get_stratified_splits(self, split_ratio = 0.2):
X = []
y = []
if self.train_tweets is not None:
for tweet in self.train_tweets:
X.append(self.tweet2Indices(tweet, 'CONTENT'))
y.append(self.label2Index(tweet))
if self.val_tweets is not None:
for tweet in self.val_tweets:
X.append(self.tweet2Indices(tweet, 'CONTENT'))
y.append(self.label2Index(tweet))
if self.test_tweets is not None:
for tweet in self.test_tweets:
X.append(self.tweet2Indices(tweet, 'CONTENT'))
y.append(self.label2Index(tweet))
X = np.asarray(X)
y = np.asarray(y)
# print 'type(X): ', type(X)
# print 'type(X[0]): ', type(X[0])
# print 'type(y): ', type(y)
# print 'type(y[0]): ', type(y[0])
# print 'X.shape', X.shape
X_train, X_test, y_train, y_test = self._perform_stratified_shuffle_split(X, y, split_ratio = split_ratio)
X_train, X_val, y_train, y_val = self._perform_stratified_shuffle_split(X_train, y_train, split_ratio = math.floor((split_ratio * 100) / (1.0 - split_ratio)) / 100)
return X_train, X_val, X_test, y_train, y_val, y_test
def _perform_stratified_shuffle_split(self, X, y, split_ratio = 0.2):
sss = StratifiedShuffleSplit(n_splits = 1, test_size = split_ratio, random_state = 0)
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
return X_train, X_test, y_train, y_test
def get_label_dist(self, y_train, y_val, y_test):
train_label_dist = defaultdict(int)
val_label_dist = defaultdict(int)
test_label_dist = defaultdict(int)
for y in y_train:
train_label_dist[y] += 1
for y in y_val:
val_label_dist[y] += 1
for y in y_test:
test_label_dist[y] += 1
return train_label_dist, val_label_dist, test_label_dist
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"math.floor",
"numpy.asarray",
"collections.defaultdict",
"data_utils.utils.unicode_csv_reader2"
] | [((4083, 4099), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4094, 4099), False, 'from collections import defaultdict\n'), ((4125, 4141), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4136, 4141), False, 'from collections import defaultdict\n'), ((5000, 5016), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (5011, 5016), False, 'from collections import defaultdict\n'), ((6802, 6821), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (6812, 6821), True, 'import numpy as np\n'), ((6840, 6859), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (6850, 6859), True, 'import numpy as np\n'), ((6876, 6893), 'numpy.asarray', 'np.asarray', (['X_val'], {}), '(X_val)\n', (6886, 6893), True, 'import numpy as np\n'), ((6910, 6927), 'numpy.asarray', 'np.asarray', (['y_val'], {}), '(y_val)\n', (6920, 6927), True, 'import numpy as np\n'), ((6945, 6963), 'numpy.asarray', 'np.asarray', (['X_test'], {}), '(X_test)\n', (6955, 6963), True, 'import numpy as np\n'), ((6981, 6999), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (6991, 6999), True, 'import numpy as np\n'), ((7282, 7295), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (7292, 7295), True, 'import numpy as np\n'), ((7822, 7835), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (7832, 7835), True, 'import numpy as np\n'), ((8549, 8562), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (8559, 8562), True, 'import numpy as np\n'), ((8575, 8588), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (8585, 8588), True, 'import numpy as np\n'), ((9227, 9300), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': 'split_ratio', 'random_state': '(0)'}), '(n_splits=1, test_size=split_ratio, random_state=0)\n', (9249, 9300), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((9616, 9632), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (9627, 9632), False, 'from collections import defaultdict\n'), ((9658, 9674), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (9669, 9674), False, 'from collections import defaultdict\n'), ((9701, 9717), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (9712, 9717), False, 'from collections import defaultdict\n'), ((2498, 2542), 'data_utils.utils.unicode_csv_reader2', 'unicode_csv_reader2', (['fh'], {'delimiter': 'delimiter'}), '(fh, delimiter=delimiter)\n', (2517, 2542), False, 'from data_utils.utils import unicode_csv_reader2\n'), ((9013, 9064), 'math.floor', 'math.floor', (['(split_ratio * 100 / (1.0 - split_ratio))'], {}), '(split_ratio * 100 / (1.0 - split_ratio))\n', (9023, 9064), False, 'import math\n')] |
#!/usr/bin/env python3
# Author: <NAME>
import os
import os.path as osp
import time
from typing import Any, Dict, List, Optional
from sklearn.metrics import r2_score, explained_variance_score
import h5py
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from pytorch_transformers import AdamW, WarmupCosineSchedule
from torch.utils import data
from src import (
get_model_class, is_learning_model, is_input_masked_model,
TensorboardWriter,
create_logger,
)
from src.utils import get_inverse_sqrt_schedule
from src.dataset import DATASET_MODES, SpikesDataset
from src.mask import Masker, UNMASKED_LABEL, DEFAULT_MASK_VAL
"""
Runner class for NDT
"""
def get_lightest_gpus(num_gpus):
# TODO update with better CUDA_VISIBLE_DEVICES support (or just use ray)
if torch.cuda.device_count() == 1:
return [0]
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return np.argsort(memory_available)[-num_gpus:].tolist()
def exp_smooth(new_metric, old_metric, mu=0.5):
r""" Higher mu is smoother """
return (1.0 - mu) * new_metric + mu * old_metric
def exp_smooth_dict(new_metrics, rolling_metrics, mu=0.5):
for m in new_metrics:
if m in rolling_metrics:
rolling_metrics[m] = exp_smooth(new_metrics[m], rolling_metrics[m], mu)
class Runner:
r"""
Two paths to inference.
A:
Have a config file.
Load device.
Load a checkpoint.
B:
Pass a checkpoint path (other steps automated)
We prioritize path A.
"""
def __init__(self, config=None, checkpoint_path=None):
assert config is not None or checkpoint_path is not None
self.flush_secs = 10
self.model = None
self.optimizer = None
self.lr_scheduler = None
self.device = None
self.num_neurons = 0
self.pth_time = 0
self.count_updates = 0
self.count_checkpoints = 0
self.num_gpus = 0
self.masker = None
self.rolling_metrics = {} # For PBT
if checkpoint_path is not None:
tmp_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
ckpt_dict = torch.load(checkpoint_path, map_location=tmp_device)
config = ckpt_dict["config"]
self.config = config
if not osp.exists(config.LOG_DIR):
os.makedirs(config.LOG_DIR, exist_ok=True)
logfile_path = osp.join(config.LOG_DIR, f"{config.VARIANT}.log")
# if osp.exists(logfile_path):
# os.remove(logfile_path)
self.logger = create_logger()
self.logger.clear_filehandlers()
self.logger.add_filehandler(logfile_path)
if hasattr(config.TRAIN, "TUNE_MODE") and config.TRAIN.TUNE_MODE:
self.logger.clear_streamhandlers()
self.best_val = {
"value": 100,
"update": -1,
}
self.best_unmasked_val = {
"value": 100,
"update": -1,
}
self.best_R2 = {
"value": -100,
"update": -1,
}
if checkpoint_path is not None:
self.load_device()
self.load_checkpoint(checkpoint_path, map_location=self.device)
def setup_model(self, device):
r""" Creates model and assigns to device """
self.model = get_model_class(self.config.MODEL.NAME)(
self.config.MODEL,
self.trial_length,
self.num_neurons,
device,
max_spikes=self.max_spikes
)
num_hidden = self.model.get_hidden_size()
if self.num_gpus > 1:
if self.config.SYSTEM.GPU_AUTO_ASSIGN:
gpu_indices = get_lightest_gpus(self.num_gpus)
else:
gpu_indices = list(range(self.num_gpus))
if self.device_gpu in gpu_indices:
gpu_indices.remove(self.device_gpu)
else:
gpu_indices = gpu_indices[:-1]
gpu_indices = [self.device_gpu] + gpu_indices # Make sure our primary gpu is first
self.model = nn.DataParallel(self.model, device_ids=gpu_indices)
self.model = self.model.to(device)
return num_hidden
def _get_parameters(self):
return list(self.model.parameters())
def _do_log(self, update):
return (
update > 0 and update % self.config.TRAIN.LOG_INTERVAL == 0
)
def save_checkpoint(
self, file_name: str, extra_state: Optional[Dict] = None
) -> None:
r"""Save checkpoint with specified name.
Args:
file_name: file name for checkpoint
Returns:
None
"""
checkpoint = {
"state_dict": self.model.state_dict(),
"optim_state": None if self.optimizer is None else self.optimizer.state_dict(),
"lr_scheduler": None if self.lr_scheduler is None else self.lr_scheduler.state_dict(),
"config": self.config,
"best_val": self.best_val,
"best_unmasked_val": self.best_unmasked_val,
"best_r2": self.best_R2,
"max_spikes": self.max_spikes,
"num_neurons": self.num_neurons,
"trial_length": self.trial_length,
}
checkpoint["extra_state"] = dict( # metadata
update=self.count_updates,
checkpoint=self.count_checkpoints,
pth_time=self.pth_time,
max_spikes=self.max_spikes
)
if extra_state is not None:
checkpoint["extra_state"].update(extra_state)
if len(osp.split(file_name)[0]) > 0:
full_path = file_name
else:
os.makedirs(self.config.CHECKPOINT_DIR, exist_ok=True)
full_path = osp.join(self.config.CHECKPOINT_DIR, file_name)
#self.logger.info("Saving {} with val {}, dropout {}. Decoder weights: {}".format(
# full_path,
# self.best_val,
# self.config.MODEL.DROPOUT,
# self.model.state_dict()['decoder.0.bias'][:5]
# ))
torch.save(
checkpoint, full_path
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Will fully load model if not already configured. Expects runner devices to be set.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
ckpt_dict = torch.load(checkpoint_path, *args, **kwargs)
if "num_neurons" in ckpt_dict:
self.num_neurons = ckpt_dict["num_neurons"]
if "trial_length" in ckpt_dict:
self.trial_length = ckpt_dict["trial_length"]
if "max_spikes" in ckpt_dict:
self.max_spikes = ckpt_dict["max_spikes"]
if self.model is None:
self.setup_model(self.device)
self.model.load_state_dict(ckpt_dict["state_dict"])
if "optim_state" in ckpt_dict and self.optimizer is not None:
self.optimizer.load_state_dict(ckpt_dict["optim_state"])
if "lr_scheduler" in ckpt_dict and self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(ckpt_dict["lr_scheduler"])
if "best_val" in ckpt_dict:
self.best_val = ckpt_dict["best_val"]
if "best_unmasked_val" in ckpt_dict:
self.best_unmasked_val = ckpt_dict["best_unmasked_val"]
if "best_r2" in ckpt_dict:
self.best_R2 = ckpt_dict["best_r2"]
if "extra_state" in ckpt_dict:
self.count_updates = ckpt_dict["extra_state"]["update"]
self.logger.info("Update loaded -- {}".format(self.count_updates))
self.count_checkpoints = ckpt_dict["extra_state"]["checkpoint"]
self.pth_time = ckpt_dict["extra_state"]["pth_time"]
#self.logger.info("Loading {} with val {}, dropout {}. Decoder weight {}".format(
# checkpoint_path,
# self.best_val,
# self.config.MODEL.DROPOUT,
# self.model.state_dict()['decoder.0.bias'][:5]
# ))
return ckpt_dict
def load_device(self):
if not torch.cuda.is_available():
self.device = torch.device("cpu")
else:
self.num_gpus = min(self.config.SYSTEM.NUM_GPUS, torch.cuda.device_count())
self.logger.info(f"Using {self.num_gpus} GPUs")
gpu_id = self.config.SYSTEM.TORCH_GPU_ID
if self.config.SYSTEM.GPU_AUTO_ASSIGN:
gpu_id = get_lightest_gpus(1)[0]
self.device = (
torch.device("cuda", gpu_id)
)
self.device_gpu = gpu_id
self.logger.info(f"Using {self.device}")
def update_config(self, config):
r""" Update config node and propagate through model. Used for pbt.
"""
# Diff LR
#self.logger.info(f"\n\n Updating config! {config.TRAIN.LR.SCHEDULE} \n\n")
if self.config.TRAIN.LR.INIT != config.TRAIN.LR.INIT and self.optimizer is not None:
for g in self.optimizer.param_groups:
g['lr'] = config.TRAIN.LR.INIT # Manualy override of LR
self.config = config
if self.masker is not None:
self.masker.config = config.TRAIN
self.model.update_config(config.MODEL)
def load_train_val_data_and_masker(self):
training_set = SpikesDataset(self.config, self.config.DATA.TRAIN_FILENAME, mode=DATASET_MODES.train, logger=self.logger)
self.training_generator = data.DataLoader(training_set,
batch_size=self.config.TRAIN.BATCH_SIZE, shuffle=True
)
# We'll need this to embed spikes. Hoping max spikes for val/train isn't too far off
self.max_spikes = training_set.get_max_spikes() + 3
self.logger.info(f"Clipping all spikes to {self.max_spikes}.")
self.logger.info(f"Training on {len(training_set)} samples.")
if self.config.TRAIN.DO_VAL:
self.validation_set = SpikesDataset(self.config, self.config.DATA.VAL_FILENAME, mode=DATASET_MODES.val, logger=self.logger)
self.validation_set.clip_spikes(self.max_spikes)
# Typically this is small enough
# validation_generator = data.DataLoader(validation_set,
# batch_size=len(validation_set), shuffle=False,
# )
self.num_neurons = training_set.get_num_neurons()
self.trial_length = training_set.trial_length
self.masker = Masker(self.config.TRAIN, self.device)
def load_optimizer(self, num_hidden):
train_cfg = self.config.TRAIN
if is_learning_model(self.config.MODEL.NAME):
self.optimizer = AdamW(
list(filter(lambda p: p.requires_grad, self._get_parameters())),
lr=train_cfg.LR.INIT,
weight_decay=train_cfg.WEIGHT_DECAY,
eps=train_cfg.EPS,
)
self.logger.info(
"number of trainable parameters: {}".format(
sum(
param.numel()
for param in self.model.parameters()
if param.requires_grad
)
)
)
if self.optimizer is not None and train_cfg.LR.SCHEDULE:
if train_cfg.LR.SCHEDULER == "cosine":
self.lr_scheduler = WarmupCosineSchedule(
self.optimizer,
warmup_steps=train_cfg.LR.WARMUP,
t_total=train_cfg.NUM_UPDATES
)
else:
self.lr_scheduler = get_inverse_sqrt_schedule(
self.optimizer,
warmup_steps=train_cfg.LR.WARMUP,
lr_max=train_cfg.LR.INIT
)
def train(self, checkpoint_path=None) -> None:
r"""Main method for training model.
Args:
checkpoint_path: path of checkpoint to load
Returns:
None
"""
self.load_device()
train_cfg = self.config.TRAIN
self.load_train_val_data_and_masker()
num_hidden = self.setup_model(self.device)
self.load_optimizer(num_hidden)
if checkpoint_path is not None:
self.load_checkpoint(checkpoint_path, map_location="cpu")
start_updates = self.count_updates
for update in range(start_updates, train_cfg.NUM_UPDATES):
metrics = self.train_epoch()
if metrics["done"]:
break
if not metrics["done"]:
self.logger.info("Reached max updates without early stopping. Consider training some more.")
if not train_cfg.TUNE_MODE:
metrics_dict = {
"Loss": self.best_val["value"],
"Unmasked Loss": self.best_unmasked_val["value"],
}
if train_cfg.DO_R2:
metrics_dict.update({ "R2": self.best_R2["value"] })
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
writer.add_hparams(self.extract_hps_dict(), metrics_dict)
torch.cuda.empty_cache()
def train_epoch(self):
r"""
One (PBT) epoch of training. Model and data should be set up and on device at this point.
Note: LFADS runs an epoch every pass through the data. This may be too frequently for transformers.
i.e. we may need to do multiple passes through the data. For now, we're changing to report every pass through data.
Returns:
metrics: Information about the epoch.
"done" -- should stop this run (e.g. due to early stopping). Keyword for Tune PBT.
"""
if self.training_generator is None:
raise Exception("No dataset generator set")
update = self.count_updates
#self.logger.info(f"update {update}")
train_cfg = self.config.TRAIN
expand_prob = min((update - train_cfg.MASK_SPAN_RAMP_START) / (train_cfg.MASK_SPAN_RAMP_END - train_cfg.MASK_SPAN_RAMP_START), 1)
self.model.train()
t_start = time.time()
for spikes, rates, heldout_spikes, forward_spikes in self.training_generator:
spikes = spikes.to(self.device)
rates = rates.to(self.device) if self.config.MODEL.REQUIRES_RATES else None
if self.training_generator.dataset.has_heldout:
heldout_spikes = heldout_spikes.to(self.device)
forward_spikes = forward_spikes.to(self.device)
else:
heldout_spikes = None
forward_spikes = None
masked_spikes, labels = self.masker.mask_batch(
spikes,
max_spikes=self.max_spikes,
should_mask=is_input_masked_model(self.config.MODEL.NAME),
expand_prob=expand_prob,
heldout_spikes=heldout_spikes,
forward_spikes=forward_spikes
)
mlm_loss, _, layer_outputs, *_ = self.model(
masked_spikes,
mask_labels=labels,
rates=rates,
return_outputs=False,
)
loss = mlm_loss.mean()
if self.optimizer is not None:
self.optimizer.zero_grad()
loss.backward()
params = self._get_parameters()
nn.utils.clip_grad_norm_(
params, train_cfg.MAX_GRAD_NORM
)
self.optimizer.step()
self.pth_time += time.time() - t_start
self.count_updates += 1
update = self.count_updates
if self.optimizer is not None and train_cfg.LR.SCHEDULE:
self.lr_scheduler.step()
if self._do_log(update):
# * Note we're only logging the loss of the last train step
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
if self.optimizer is not None and train_cfg.LR.SCHEDULE:
writer.add_scalar("lr", self.lr_scheduler.get_last_lr()[0])
self.logger.queue_stat("LR", self.lr_scheduler.get_last_lr()[0])
writer.add_scalar(
"loss", # train loss
loss,
update,
)
self.logger.queue_stat("loss", loss.item())
metrics_dict = dict(
done = False,
epoch = self.count_updates,
# r2 = self.best_r2["value"],
best_masked_loss = self.best_val["value"] # Tune will reference this value to select best model.
)
if (train_cfg.DO_VAL and update % train_cfg.VAL_INTERVAL == 0):
self.model.eval()
with torch.no_grad():
spikes, rates, heldout_spikes, forward_spikes = self.validation_set.get_dataset()
spikes = spikes.to(self.device)
rates = rates.to(self.device)
if self.validation_set.has_heldout:
heldout_spikes = heldout_spikes.to(self.device)
forward_spikes = forward_spikes.to(self.device)
else:
heldout_spikes = None
forward_spikes = None
feed_rates = rates if self.config.MODEL.REQUIRES_RATES else None
masked_spikes, labels = self.masker.mask_batch(
spikes,
max_spikes=self.max_spikes,
should_mask=is_input_masked_model(self.config.MODEL.NAME),
heldout_spikes=heldout_spikes,
forward_spikes=forward_spikes,
)
loss, pred_rates, *_ = self.model(
masked_spikes,
mask_labels=labels,
rates=feed_rates,
)
val_loss = loss.mean()
# no_mask evaluation should still exclude heldout neurons
if heldout_spikes is not None:
spikes = torch.cat([spikes, torch.zeros_like(heldout_spikes)], -1)
spikes = torch.cat([spikes, torch.zeros_like(forward_spikes)], 1)
no_mask_labels = spikes.clone()
no_mask_labels[..., -heldout_spikes.size(-1)] = -100 # unmasked_label
no_mask_labels[:, -forward_spikes.size(1):,:] = -100 # unmasked_label
else:
no_mask_labels = spikes
no_mask_loss, pred_rates, *_ = self.model(
spikes,
mask_labels=no_mask_labels,
passthrough=True,
rates=rates
)
no_mask_loss = no_mask_loss.mean()
metrics_dict["unmasked_loss"] = no_mask_loss.item()
metrics_dict["masked_loss"] = val_loss.item()
if "smth_masked_loss" not in self.rolling_metrics:
self.rolling_metrics["smth_masked_loss"] = metrics_dict["masked_loss"]
else:
self.rolling_metrics["smth_masked_loss"] = exp_smooth(metrics_dict["masked_loss"], self.rolling_metrics["smth_masked_loss"])
metrics_dict["smth_masked_loss"] = self.rolling_metrics["smth_masked_loss"]
if self._do_log(update):
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
writer.add_scalar(
"val_loss",
val_loss,
update,
)
writer.add_scalar(
"unmasked_loss",
no_mask_loss,
update,
)
self.logger.queue_stat("val loss", val_loss.item())
self.logger.queue_stat("unmasked val loss", no_mask_loss.item())
if train_cfg.DO_R2 and self.validation_set.has_rates:
r2 = self.neuron_r2(rates, pred_rates)
writer.add_scalar("r2", r2, update)
self.logger.queue_stat("r2", r2)
if self.best_R2["value"] < r2:
self.best_R2["value"] = r2
self.best_R2["update"] = update
self.save_checkpoint(f'{self.config.VARIANT}.gr2.pth') # greatest r2
metrics_dict["r2"] = r2
if no_mask_loss.item() < self.best_unmasked_val["value"]:
self.logger.info(f"Overwriting best unmasked val {self.best_unmasked_val['value']} from {self.best_unmasked_val['update']} with {no_mask_loss} at {update}.")
self.best_unmasked_val["value"] = no_mask_loss.item()
self.best_unmasked_val["update"] = update
self.save_checkpoint(f'{self.config.VARIANT}.lfve.pth') # full validation
if val_loss.item() < self.best_val["value"]:
self.logger.info(f"Overwriting best val {self.best_val['value']} from {self.best_val['update']} with {val_loss} at {update}.")
self.best_val["value"] = val_loss.item()
self.best_val["update"] = update
self.save_checkpoint(f'{self.config.VARIANT}.lve.pth')
elif update - self.best_val["update"] > train_cfg.PATIENCE:
self.logger.info(f"Val loss has not improved for {train_cfg.PATIENCE} updates. Stopping...")
self.logger.info(f"Best val: {self.best_val['value']} at {self.best_val['update']} updates.")
if train_cfg.DO_R2 and self.validation_set.has_rates: # log down for hparams
self.logger.info(f"Best R2: {self.best_R2['value']} at {self.best_R2['update']}")
r2 = self.neuron_r2(rates, pred_rates)
metrics_dict["r2"] = r2
metrics_dict["done"] = True
metrics_dict["best_masked_loss"] = self.best_val["value"]
if self._do_log(update):
self.logger.log_update(update)
self.logger.info(
"update: {}\tpth-time: {:.3f}s\t".format(
update, self.pth_time
)
)
if update % train_cfg.CHECKPOINT_INTERVAL == 0 and not train_cfg.TUNE_MODE: # Don't save extra checkpoints when sweeping
self.save_checkpoint(
f"{self.config.VARIANT}.{self.count_checkpoints}.pth"
)
self.count_checkpoints += 1
return metrics_dict
def eval(
self,
checkpoint_path: str,
save_path = ""
) -> None:
# * The evaluation code path has legacy code (and will not run).
# * Evaluation / analysis is done in analysis scripts.
r"""Evaluates a single checkpoint.
Runs masking identical to train and calculates PoissonNLL, R2 on masked neurons.
Args:
checkpoint_path: path of checkpoint
Returns:
None
"""
self.logger.info(f"Starting evaluation")
self.load_device()
self.masker = Masker(self.config.TRAIN, self.device)
# Not using a generator atm because we can fit the whole set onto GPU
test_set = SpikesDataset(self.config, self.config.DATA.TEST_FILENAME, mode="test", logger=self.logger)
self.logger.info(f"Evaluating on {len(test_set)} samples.")
train_cfg = self.config.TRAIN
ckpt_dict = self.load_checkpoint(checkpoint_path, map_location="cpu")
assert test_set.get_num_neurons() == self.num_neurons # Compatibility check
update = ckpt_dict["extra_state"]["update"]
test_set.clip_spikes(self.max_spikes)
self.model.eval()
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
with torch.no_grad():
spikes, rates, heldout_spikes, forward_spikes = test_set.get_dataset()
spikes = spikes.to(self.device)
rates = rates.to(self.device)
if test_set.has_heldout:
heldout_spikes = heldout_spikes.to(self.device)
forward_spikes = forward_spikes.to(self.device)
else:
heldout_spikes = None
forward_spikes = None
masked_spikes, labels = self.masker.mask_batch(
spikes,
train_cfg,
max_spikes=self.max_spikes,
should_mask=is_input_masked_model(self.config.MODEL.NAME),
heldout_spikes=heldout_spikes,
forward_spikes=forward_spikes
)
loss, pred_rates, *_ = self.model(masked_spikes, mask_labels=labels)
test_loss = loss.mean()
writer.add_scalar(
"test_loss",
test_loss,
update,
)
# Ideally we could do this just on masked areas
selected_mask = labels != UNMASKED_LABEL
masked_rates = torch.masked_select(rates, selected_mask).cpu()
masked_pred_rates = torch.masked_select(pred_rates, selected_mask).cpu()
r2 = r2_score(masked_rates, masked_pred_rates, multioutput='uniform_average')
writer.add_scalar("test_r2", r2, update)
self.logger.queue_stat("test r2", r2)
self.logger.queue_stat("test loss", test_loss.item())
stat_str = "\t".join([f"{stat[0]}: {stat[1]:.3f}" for stat in self.logger.empty_queue()])
self.logger.info("update: {}\t{}".format(update, stat_str))
def get_rates(
self,
checkpoint_path = None,
mode = DATASET_MODES.trainval,
save_path = None,
keep_layers = -1, # keep last layer
) -> None:
r"""Evaluates model (with checkpoint loaded) on train/val data and retrieves rates and activations (features for downstream tasks).
Matches LFADS structure - we thus use a single dataset (no train val differentiator).
Args:
checkpoint_path: path of checkpoint (will use model on runner if not provided)
save_path: Path to save activations at (optional). Does not save if nothing provided
Returns:
rates: ! confirm shape
layer_outputs: ! confirm shape
"""
self.logger.info(f"Getting rates...")
if self.device is None:
self.load_device()
train_cfg = self.config.TRAIN
self.masker = Masker(train_cfg, self.device) # Unused
whole_set = SpikesDataset(self.config, self.config.DATA.TRAIN_FILENAME, mode=mode, logger=self.logger)
self.max_spikes = whole_set.get_max_spikes() + 3
self.num_neurons = whole_set.get_num_neurons()
self.logger.info(f"Evaluating on {len(whole_set)} samples.")
data_generator = data.DataLoader(whole_set,
batch_size=train_cfg.BATCH_SIZE, shuffle=False
)
ckpt_dict = self.load_checkpoint(checkpoint_path, map_location="cpu")
if self.num_neurons is None:
self.num_neurons(whole_set.get_num_neurons())
update = ckpt_dict["extra_state"]["update"]
if self.max_spikes is not None:
whole_set.clip_spikes(self.max_spikes)
self.model.eval()
with torch.no_grad():
losses = []
pred_rates = []
layer_outputs = []
# all_attentions = []
for spikes, _, heldout_spikes, forward_spikes in data_generator:
spikes = spikes.to(self.device)
if data_generator.dataset.has_heldout:
heldout_spikes = heldout_spikes.to(self.device)
forward_spikes = forward_spikes.to(self.device)
# Do NOT provide privileged eval info
spikes = torch.cat([spikes, torch.zeros_like(heldout_spikes)], -1)
spikes = torch.cat([spikes, torch.zeros_like(forward_spikes)], 1)
else:
heldout_spikes = None
forward_spikes = None
labels = spikes # i.e. predict everything
loss, batch_rates, batch_layer_outputs, *_ = self.model(
# loss, batch_rates, batch_layer_outputs, _, _, batch_attn_list, *_ = self.model(
spikes,
mask_labels=spikes,
passthrough=True,
return_outputs=True,
return_weights=True,
)
batch_layer_outputs = batch_layer_outputs[keep_layers:]
losses.append(loss.mean().item())
pred_rates.append(batch_rates)
# batch_layer_outputs is Batch list of Layer list of modules * T x B x H (permuted due to transformer)
layer_outputs.append(batch_layer_outputs)
# layer x trial x time x time
# all_attentions.append(batch_attn_list)
# trial x time x h
pred_rates = torch.cat(pred_rates, dim=0)
if self.config.MODEL.LOGRATE:
pred_rates = pred_rates.exp()
# Note this a list
outputs_per_layer = zip(*layer_outputs) # Now lists of all samples, grouped by layer
all_layer_outputs = [torch.cat(layer, dim=1).permute(1, 0, 2) for layer in outputs_per_layer]
# all_layer_outputs is Layer list of B x M*T x H
# attention_per_layer = zip(*all_attentions) # Lists of all samples, grouped by layer
# all_attentions = torch.stack([torch.cat(layer, dim=0) for layer in attention_per_layer], dim=0)
self.logger.queue_stat("test loss", torch.tensor(losses).mean().item())
self.logger.log_update(update)
if save_path is not None:
with h5py.File(save_path, 'w') as f:
f.create_dataset('rates', data=pred_rates.cpu().numpy())
f.create_dataset('layer_outputs', data=all_layer_outputs[-1].cpu().numpy()) # Only final layer
# f.create_dataset('attention', data=all_attentions.cpu().numpy())
return pred_rates, all_layer_outputs # , all_attentions
def _clean_rates(self, gt, pred, flatten=False):
if gt.size() != pred.size():
raise Exception(f"Incompatible r2 sizes, GT: {gt.size()}, Pred: {pred.size()}")
if flatten or len(gt.size()) > 1:
gt = gt.flatten(end_dim=1)
pred = pred.flatten(end_dim=1)
if self.config.MODEL.LOGRATE:
gt = gt.exp()
pred = pred.exp()
return gt.cpu(), pred.cpu()
def neuron_r2(self, gt, pred, **kwargs):
gt, pred = self._clean_rates(gt, pred, **kwargs)
return r2_score(gt, pred, multioutput='uniform_average')
def neuron_vaf(self, gt, pred, **kwargs):
gt, pred = self._clean_rates(gt, pred, **kwargs)
return explained_variance_score(gt, pred, multioutput='uniform_average')
# For HParams
def extract_hps_dict(self):
hp_dict = {}
hp_dict.update(self._extract_flat_dict(self.config.MODEL, "MODEL"))
hp_dict.update(self._extract_flat_dict(self.config.TRAIN, "TRAIN"))
return hp_dict
BLACKLIST = ['MODEL/LOSS']
def _extract_flat_dict(self, config, prefix):
flat_dict = {}
if prefix in Runner.BLACKLIST:
return flat_dict
for key, value in config.items():
if isinstance(value, dict):
flat_dict.update(self._extract_flat_dict(value, f"{prefix}/{key}"))
elif not isinstance(value, list): # drop lists
flat_dict[f"{prefix}/{key}"] = value
return flat_dict
| [
"src.TensorboardWriter",
"torch.nn.utils.clip_grad_norm_",
"torch.cuda.device_count",
"src.get_model_class",
"numpy.argsort",
"torch.cuda.is_available",
"sklearn.metrics.r2_score",
"os.path.exists",
"src.dataset.SpikesDataset",
"pytorch_transformers.WarmupCosineSchedule",
"os.path.split",
"src... | [((871, 936), 'os.system', 'os.system', (['"""nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp"""'], {}), "('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n", (880, 936), False, 'import os\n'), ((816, 841), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (839, 841), False, 'import torch\n'), ((2566, 2615), 'os.path.join', 'osp.join', (['config.LOG_DIR', 'f"""{config.VARIANT}.log"""'], {}), "(config.LOG_DIR, f'{config.VARIANT}.log')\n", (2574, 2615), True, 'import os.path as osp\n'), ((2715, 2730), 'src.create_logger', 'create_logger', ([], {}), '()\n', (2728, 2730), False, 'from src import get_model_class, is_learning_model, is_input_masked_model, TensorboardWriter, create_logger\n'), ((6221, 6254), 'torch.save', 'torch.save', (['checkpoint', 'full_path'], {}), '(checkpoint, full_path)\n', (6231, 6254), False, 'import torch\n'), ((6760, 6804), 'torch.load', 'torch.load', (['checkpoint_path', '*args'], {}), '(checkpoint_path, *args, **kwargs)\n', (6770, 6804), False, 'import torch\n'), ((9682, 9792), 'src.dataset.SpikesDataset', 'SpikesDataset', (['self.config', 'self.config.DATA.TRAIN_FILENAME'], {'mode': 'DATASET_MODES.train', 'logger': 'self.logger'}), '(self.config, self.config.DATA.TRAIN_FILENAME, mode=\n DATASET_MODES.train, logger=self.logger)\n', (9695, 9792), False, 'from src.dataset import DATASET_MODES, SpikesDataset\n'), ((9822, 9910), 'torch.utils.data.DataLoader', 'data.DataLoader', (['training_set'], {'batch_size': 'self.config.TRAIN.BATCH_SIZE', 'shuffle': '(True)'}), '(training_set, batch_size=self.config.TRAIN.BATCH_SIZE,\n shuffle=True)\n', (9837, 9910), False, 'from torch.utils import data\n'), ((10787, 10825), 'src.mask.Masker', 'Masker', (['self.config.TRAIN', 'self.device'], {}), '(self.config.TRAIN, self.device)\n', (10793, 10825), False, 'from src.mask import Masker, UNMASKED_LABEL, DEFAULT_MASK_VAL\n'), ((10918, 10959), 'src.is_learning_model', 'is_learning_model', (['self.config.MODEL.NAME'], {}), '(self.config.MODEL.NAME)\n', (10935, 10959), False, 'from src import get_model_class, is_learning_model, is_input_masked_model, TensorboardWriter, create_logger\n'), ((13482, 13506), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13504, 13506), False, 'import torch\n'), ((14489, 14500), 'time.time', 'time.time', ([], {}), '()\n', (14498, 14500), False, 'import time\n'), ((23852, 23890), 'src.mask.Masker', 'Masker', (['self.config.TRAIN', 'self.device'], {}), '(self.config.TRAIN, self.device)\n', (23858, 23890), False, 'from src.mask import Masker, UNMASKED_LABEL, DEFAULT_MASK_VAL\n'), ((23989, 24084), 'src.dataset.SpikesDataset', 'SpikesDataset', (['self.config', 'self.config.DATA.TEST_FILENAME'], {'mode': '"""test"""', 'logger': 'self.logger'}), "(self.config, self.config.DATA.TEST_FILENAME, mode='test',\n logger=self.logger)\n", (24002, 24084), False, 'from src.dataset import DATASET_MODES, SpikesDataset\n'), ((27386, 27416), 'src.mask.Masker', 'Masker', (['train_cfg', 'self.device'], {}), '(train_cfg, self.device)\n', (27392, 27416), False, 'from src.mask import Masker, UNMASKED_LABEL, DEFAULT_MASK_VAL\n'), ((27447, 27541), 'src.dataset.SpikesDataset', 'SpikesDataset', (['self.config', 'self.config.DATA.TRAIN_FILENAME'], {'mode': 'mode', 'logger': 'self.logger'}), '(self.config, self.config.DATA.TRAIN_FILENAME, mode=mode,\n logger=self.logger)\n', (27460, 27541), False, 'from src.dataset import DATASET_MODES, SpikesDataset\n'), ((27744, 27818), 'torch.utils.data.DataLoader', 'data.DataLoader', (['whole_set'], {'batch_size': 'train_cfg.BATCH_SIZE', 'shuffle': '(False)'}), '(whole_set, batch_size=train_cfg.BATCH_SIZE, shuffle=False)\n', (27759, 27818), False, 'from torch.utils import data\n'), ((31644, 31693), 'sklearn.metrics.r2_score', 'r2_score', (['gt', 'pred'], {'multioutput': '"""uniform_average"""'}), "(gt, pred, multioutput='uniform_average')\n", (31652, 31693), False, 'from sklearn.metrics import r2_score, explained_variance_score\n'), ((31813, 31878), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['gt', 'pred'], {'multioutput': '"""uniform_average"""'}), "(gt, pred, multioutput='uniform_average')\n", (31837, 31878), False, 'from sklearn.metrics import r2_score, explained_variance_score\n'), ((2322, 2374), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': 'tmp_device'}), '(checkpoint_path, map_location=tmp_device)\n', (2332, 2374), False, 'import torch\n'), ((2460, 2486), 'os.path.exists', 'osp.exists', (['config.LOG_DIR'], {}), '(config.LOG_DIR)\n', (2470, 2486), True, 'import os.path as osp\n'), ((2500, 2542), 'os.makedirs', 'os.makedirs', (['config.LOG_DIR'], {'exist_ok': '(True)'}), '(config.LOG_DIR, exist_ok=True)\n', (2511, 2542), False, 'import os\n'), ((3475, 3514), 'src.get_model_class', 'get_model_class', (['self.config.MODEL.NAME'], {}), '(self.config.MODEL.NAME)\n', (3490, 3514), False, 'from src import get_model_class, is_learning_model, is_input_masked_model, TensorboardWriter, create_logger\n'), ((4230, 4281), 'torch.nn.DataParallel', 'nn.DataParallel', (['self.model'], {'device_ids': 'gpu_indices'}), '(self.model, device_ids=gpu_indices)\n', (4245, 4281), True, 'import torch.nn as nn\n'), ((5827, 5881), 'os.makedirs', 'os.makedirs', (['self.config.CHECKPOINT_DIR'], {'exist_ok': '(True)'}), '(self.config.CHECKPOINT_DIR, exist_ok=True)\n', (5838, 5881), False, 'import os\n'), ((5906, 5953), 'os.path.join', 'osp.join', (['self.config.CHECKPOINT_DIR', 'file_name'], {}), '(self.config.CHECKPOINT_DIR, file_name)\n', (5914, 5953), True, 'import os.path as osp\n'), ((8450, 8475), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8473, 8475), False, 'import torch\n'), ((8503, 8522), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (8515, 8522), False, 'import torch\n'), ((8882, 8910), 'torch.device', 'torch.device', (['"""cuda"""', 'gpu_id'], {}), "('cuda', gpu_id)\n", (8894, 8910), False, 'import torch\n'), ((10294, 10400), 'src.dataset.SpikesDataset', 'SpikesDataset', (['self.config', 'self.config.DATA.VAL_FILENAME'], {'mode': 'DATASET_MODES.val', 'logger': 'self.logger'}), '(self.config, self.config.DATA.VAL_FILENAME, mode=\n DATASET_MODES.val, logger=self.logger)\n', (10307, 10400), False, 'from src.dataset import DATASET_MODES, SpikesDataset\n'), ((15937, 15948), 'time.time', 'time.time', ([], {}), '()\n', (15946, 15948), False, 'import time\n'), ((24488, 24562), 'src.TensorboardWriter', 'TensorboardWriter', (['self.config.TENSORBOARD_DIR'], {'flush_secs': 'self.flush_secs'}), '(self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs)\n', (24505, 24562), False, 'from src import get_model_class, is_learning_model, is_input_masked_model, TensorboardWriter, create_logger\n'), ((28197, 28212), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (28210, 28212), False, 'import torch\n'), ((29924, 29952), 'torch.cat', 'torch.cat', (['pred_rates'], {'dim': '(0)'}), '(pred_rates, dim=0)\n', (29933, 29952), False, 'import torch\n'), ((1029, 1057), 'numpy.argsort', 'np.argsort', (['memory_available'], {}), '(memory_available)\n', (1039, 1057), True, 'import numpy as np\n'), ((8598, 8623), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8621, 8623), False, 'import torch\n'), ((11688, 11793), 'pytorch_transformers.WarmupCosineSchedule', 'WarmupCosineSchedule', (['self.optimizer'], {'warmup_steps': 'train_cfg.LR.WARMUP', 't_total': 'train_cfg.NUM_UPDATES'}), '(self.optimizer, warmup_steps=train_cfg.LR.WARMUP,\n t_total=train_cfg.NUM_UPDATES)\n', (11708, 11793), False, 'from pytorch_transformers import AdamW, WarmupCosineSchedule\n'), ((11922, 12027), 'src.utils.get_inverse_sqrt_schedule', 'get_inverse_sqrt_schedule', (['self.optimizer'], {'warmup_steps': 'train_cfg.LR.WARMUP', 'lr_max': 'train_cfg.LR.INIT'}), '(self.optimizer, warmup_steps=train_cfg.LR.WARMUP,\n lr_max=train_cfg.LR.INIT)\n', (11947, 12027), False, 'from src.utils import get_inverse_sqrt_schedule\n'), ((13284, 13358), 'src.TensorboardWriter', 'TensorboardWriter', (['self.config.TENSORBOARD_DIR'], {'flush_secs': 'self.flush_secs'}), '(self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs)\n', (13301, 13358), False, 'from src import get_model_class, is_learning_model, is_input_masked_model, TensorboardWriter, create_logger\n'), ((15777, 15834), 'torch.nn.utils.clip_grad_norm_', 'nn.utils.clip_grad_norm_', (['params', 'train_cfg.MAX_GRAD_NORM'], {}), '(params, train_cfg.MAX_GRAD_NORM)\n', (15801, 15834), True, 'import torch.nn as nn\n'), ((16253, 16327), 'src.TensorboardWriter', 'TensorboardWriter', (['self.config.TENSORBOARD_DIR'], {'flush_secs': 'self.flush_secs'}), '(self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs)\n', (16270, 16327), False, 'from src import get_model_class, is_learning_model, is_input_masked_model, TensorboardWriter, create_logger\n'), ((17189, 17204), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17202, 17204), False, 'import torch\n'), ((24613, 24628), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (24626, 24628), False, 'import torch\n'), ((26045, 26117), 'sklearn.metrics.r2_score', 'r2_score', (['masked_rates', 'masked_pred_rates'], {'multioutput': '"""uniform_average"""'}), "(masked_rates, masked_pred_rates, multioutput='uniform_average')\n", (26053, 26117), False, 'from sklearn.metrics import r2_score, explained_variance_score\n'), ((30724, 30749), 'h5py.File', 'h5py.File', (['save_path', '"""w"""'], {}), "(save_path, 'w')\n", (30733, 30749), False, 'import h5py\n'), ((2260, 2285), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2283, 2285), False, 'import torch\n'), ((5737, 5757), 'os.path.split', 'osp.split', (['file_name'], {}), '(file_name)\n', (5746, 5757), True, 'import os.path as osp\n'), ((15157, 15202), 'src.is_input_masked_model', 'is_input_masked_model', (['self.config.MODEL.NAME'], {}), '(self.config.MODEL.NAME)\n', (15178, 15202), False, 'from src import get_model_class, is_learning_model, is_input_masked_model, TensorboardWriter, create_logger\n'), ((17945, 17990), 'src.is_input_masked_model', 'is_input_masked_model', (['self.config.MODEL.NAME'], {}), '(self.config.MODEL.NAME)\n', (17966, 17990), False, 'from src import get_model_class, is_learning_model, is_input_masked_model, TensorboardWriter, create_logger\n'), ((19819, 19893), 'src.TensorboardWriter', 'TensorboardWriter', (['self.config.TENSORBOARD_DIR'], {'flush_secs': 'self.flush_secs'}), '(self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs)\n', (19836, 19893), False, 'from src import get_model_class, is_learning_model, is_input_masked_model, TensorboardWriter, create_logger\n'), ((25297, 25342), 'src.is_input_masked_model', 'is_input_masked_model', (['self.config.MODEL.NAME'], {}), '(self.config.MODEL.NAME)\n', (25318, 25342), False, 'from src import get_model_class, is_learning_model, is_input_masked_model, TensorboardWriter, create_logger\n'), ((25887, 25928), 'torch.masked_select', 'torch.masked_select', (['rates', 'selected_mask'], {}), '(rates, selected_mask)\n', (25906, 25928), False, 'import torch\n'), ((25971, 26017), 'torch.masked_select', 'torch.masked_select', (['pred_rates', 'selected_mask'], {}), '(pred_rates, selected_mask)\n', (25990, 26017), False, 'import torch\n'), ((30202, 30225), 'torch.cat', 'torch.cat', (['layer'], {'dim': '(1)'}), '(layer, dim=1)\n', (30211, 30225), False, 'import torch\n'), ((18505, 18537), 'torch.zeros_like', 'torch.zeros_like', (['heldout_spikes'], {}), '(heldout_spikes)\n', (18521, 18537), False, 'import torch\n'), ((18592, 18624), 'torch.zeros_like', 'torch.zeros_like', (['forward_spikes'], {}), '(forward_spikes)\n', (18608, 18624), False, 'import torch\n'), ((28753, 28785), 'torch.zeros_like', 'torch.zeros_like', (['heldout_spikes'], {}), '(heldout_spikes)\n', (28769, 28785), False, 'import torch\n'), ((28840, 28872), 'torch.zeros_like', 'torch.zeros_like', (['forward_spikes'], {}), '(forward_spikes)\n', (28856, 28872), False, 'import torch\n'), ((30593, 30613), 'torch.tensor', 'torch.tensor', (['losses'], {}), '(losses)\n', (30605, 30613), False, 'import torch\n')] |
from typing import Optional, Union
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy as np
_annotation_kws = {
"horizontalalignment": "left", # if not mirror_intensity else "right",
"verticalalignment": "center",
"fontsize": 7,
"rotation": 90,
"rotation_mode": "anchor",
"zorder": 5,
}
def plot_spectrum(spectrum,
annotate_ions: bool = False,
mirror_intensity: bool = False,
grid: Union[bool, str] = True,
ax: plt.Axes = None,
peak_color="teal",
**plt_kwargs) -> plt.Axes:
"""
Plot a single MS/MS spectrum.
Code is largely taken from package "spectrum_utils".
Parameters
----------
spectrum: matchms.Spectrum
The spectrum to be plotted.
annotate_ions:
Flag indicating whether or not to annotate fragment using peak comments
(if present in the spectrum). The default is True.
mirror_intensity:
Flag indicating whether to flip the intensity axis or not.
grid:
Draw grid lines or not. Either a boolean to enable/disable both major
and minor grid lines or 'major'/'minor' to enable major or minor grid
lines respectively.
ax:
Axes instance on which to plot the spectrum. If None the current Axes
instance is used.
Returns
-------
plt.Axes
The matplotlib Axes instance on which the spectrum is plotted.
"""
# pylint: disable=too-many-locals, too-many-arguments
if ax is None:
ax = plt.gca()
min_mz = max(0, np.floor(spectrum.peaks.mz[0] / 100 - 1) * 100)
max_mz = np.ceil(spectrum.peaks.mz[-1] / 100 + 1) * 100
max_intensity = spectrum.peaks.intensities.max()
intensities = spectrum.peaks.intensities / max_intensity
def make_stems():
"""calculate where the stems of the spectrum peaks are going to be"""
x = np.zeros([2, spectrum.peaks.mz.size], dtype="float")
y = np.zeros(x.shape)
x[:, :] = np.tile(spectrum.peaks.mz, (2, 1))
y[1, :] = intensities
return x, y
x, y = make_stems()
if mirror_intensity is True:
y = -y
ax.plot(x, y, color=peak_color, linewidth=1.0, marker="", zorder=5, **plt_kwargs)
if annotate_ions and isinstance(spectrum.get("peak_comments"), dict):
for mz, comment in spectrum.get("peak_comments").items():
idx = (-abs(spectrum.peaks.mz - mz)).argmax()
ax.text(mz, intensities[idx], f"m/z: {mz} \n {comment}",
_annotation_kws)
ax.set_xlim(min_mz, max_mz)
ax.yaxis.set_major_formatter(mticker.PercentFormatter(xmax=1.0))
y_max = 1.25 if annotate_ions else 1.10
ax.set_ylim(*(0, y_max) if not mirror_intensity else (-y_max, 0))
ax.xaxis.set_minor_locator(mticker.AutoLocator())
ax.yaxis.set_minor_locator(mticker.AutoLocator())
ax.xaxis.set_minor_locator(mticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(mticker.AutoMinorLocator())
if grid in (True, "both", "major"):
ax.grid(visible=True, which="major", color="#9E9E9E", linewidth=0.2)
if grid in (True, "both", "minor"):
ax.grid(visible=True, which="minor", color="#9E9E9E", linewidth=0.2)
ax.set_axisbelow(True)
ax.tick_params(axis="both", which="both", labelsize="small")
y_ticks = ax.get_yticks()
ax.set_yticks(y_ticks[y_ticks <= 1.0])
ax.set_xlabel("m/z", style="italic")
ax.set_ylabel("Intensity")
title = "Spectrum" if spectrum.get("compound_name") is None else spectrum.get("compound_name")
ax.set_title(title)
return ax
def plot_spectra_mirror(spec_top,
spec_bottom,
ax: Optional[plt.Axes] = None,
**spectrum_kws) -> plt.Axes:
"""Mirror plot two MS/MS spectra.
Code is largely taken from package "spectrum_utils".
Parameters
----------
spec_top: matchms.Spectrum
The spectrum to be plotted on the top.
spec_bottom: matchms.Spectrum
The spectrum to be plotted on the bottom.
ax:
Axes instance on which to plot the spectrum. If None the current Axes
instance is used.
spectrum_kws:
Keyword arguments for `plot_spectrum()`.
Returns
-------
plt.Axes
The matplotlib Axes instance on which the spectra are plotted.
"""
if ax is None:
ax = plt.gca()
if spectrum_kws is None:
spectrum_kws = {}
# Top spectrum.
plot_spectrum(spec_top, mirror_intensity=False, ax=ax, peak_color="darkblue", **spectrum_kws)
y_max = ax.get_ylim()[1]
# Mirrored bottom spectrum.
plot_spectrum(spec_bottom, mirror_intensity=True, ax=ax, peak_color="teal", **spectrum_kws)
y_min = ax.get_ylim()[0]
ax.set_ylim(y_min, y_max)
ax.axhline(0, color="#9E9E9E", zorder=10)
# Update axes so that both spectra fit.
min_mz = max(
[
0,
np.floor(spec_top.peaks.mz[0] / 100 - 1) * 100,
np.floor(spec_bottom.peaks.mz[0] / 100 - 1) * 100,
]
)
max_mz = max(
[
np.ceil(spec_top.peaks.mz[-1] / 100 + 1) * 100,
np.ceil(spec_bottom.peaks.mz[-1] / 100 + 1) * 100,
]
)
ax.set_xlim(min_mz, max_mz)
ax.yaxis.set_major_locator(mticker.AutoLocator())
ax.yaxis.set_minor_locator(mticker.AutoMinorLocator())
ax.yaxis.set_major_formatter(
mticker.FuncFormatter(lambda x, pos: f"{abs(x):.0%}")
)
name1 = "Spectrum 1" if spec_top.get("compound_name") is None else spec_top.get("compound_name")
name2 = "Spectrum 2" if spec_bottom.get("compound_name") is None else spec_bottom.get("compound_name")
x_text = 0.04 * (max_mz - min_mz)
ax.text(x_text, y_max, name1, ha="left", va="top", zorder=2, backgroundcolor="white")
ax.text(x_text, y_min, name2, ha="left", va="bottom", zorder=2, backgroundcolor="white")
ax.set_title("Spectrum comparison")
return ax
def plot_spectra_array(spectrums,
n_cols: int = 2,
peak_color="darkblue",
dpi: int = 200,
**spectrum_kws) -> plt.Axes:
"""Mirror plot two MS/MS spectra.
Code is largely taken from package "spectrum_utils".
Parameters
----------
spectrums: list of matchms.Spectrum
List of spectra to be plotted in a single figure.
n_cols:
Number of spectra to be plotted per row. Default is 4.
spectrum_kws:
Keyword arguments for `plot_spectrum()`.
"""
assert isinstance(spectrums, list), "Expected list of Spectrum objects as input."
n_spectra = len(spectrums)
n_rows = int(np.ceil(n_spectra / n_cols))
fig, axes = plt.subplots(n_rows, n_cols, figsize=(7 * n_cols, 3 * n_rows), dpi=dpi)
if spectrum_kws is None:
spectrum_kws = {}
for i in range(n_rows):
for j in range(n_cols):
counter = i * n_cols + j
if counter >= n_spectra:
break
plot_spectrum(spectrums[counter],
mirror_intensity=False, ax=axes[i, j],
peak_color=peak_color, **spectrum_kws)
axes[i, j].set_title("")
if spectrums[counter].get("compound_name") is None:
name = f"Spectrum {i * n_cols + j}"
else:
name = spectrums[counter].get("compound_name")
y_max = axes[i, j].get_ylim()[1]
x_min = axes[i, j].get_xlim()[0]
axes[i, j].text(x_min, y_max, name, va="bottom", zorder=2)
plt.title("Spectrum comparison")
return fig, axes
| [
"numpy.tile",
"numpy.ceil",
"matplotlib.ticker.PercentFormatter",
"matplotlib.pyplot.gca",
"numpy.floor",
"numpy.zeros",
"matplotlib.ticker.AutoLocator",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.ticker.AutoMinorLocator"
] | [((6808, 6879), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_rows', 'n_cols'], {'figsize': '(7 * n_cols, 3 * n_rows)', 'dpi': 'dpi'}), '(n_rows, n_cols, figsize=(7 * n_cols, 3 * n_rows), dpi=dpi)\n', (6820, 6879), True, 'import matplotlib.pyplot as plt\n'), ((7671, 7703), 'matplotlib.pyplot.title', 'plt.title', (['"""Spectrum comparison"""'], {}), "('Spectrum comparison')\n", (7680, 7703), True, 'import matplotlib.pyplot as plt\n'), ((1599, 1608), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1606, 1608), True, 'import matplotlib.pyplot as plt\n'), ((1691, 1731), 'numpy.ceil', 'np.ceil', (['(spectrum.peaks.mz[-1] / 100 + 1)'], {}), '(spectrum.peaks.mz[-1] / 100 + 1)\n', (1698, 1731), True, 'import numpy as np\n'), ((1966, 2018), 'numpy.zeros', 'np.zeros', (['[2, spectrum.peaks.mz.size]'], {'dtype': '"""float"""'}), "([2, spectrum.peaks.mz.size], dtype='float')\n", (1974, 2018), True, 'import numpy as np\n'), ((2031, 2048), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (2039, 2048), True, 'import numpy as np\n'), ((2067, 2101), 'numpy.tile', 'np.tile', (['spectrum.peaks.mz', '(2, 1)'], {}), '(spectrum.peaks.mz, (2, 1))\n', (2074, 2101), True, 'import numpy as np\n'), ((2681, 2715), 'matplotlib.ticker.PercentFormatter', 'mticker.PercentFormatter', ([], {'xmax': '(1.0)'}), '(xmax=1.0)\n', (2705, 2715), True, 'import matplotlib.ticker as mticker\n'), ((2863, 2884), 'matplotlib.ticker.AutoLocator', 'mticker.AutoLocator', ([], {}), '()\n', (2882, 2884), True, 'import matplotlib.ticker as mticker\n'), ((2917, 2938), 'matplotlib.ticker.AutoLocator', 'mticker.AutoLocator', ([], {}), '()\n', (2936, 2938), True, 'import matplotlib.ticker as mticker\n'), ((2971, 2997), 'matplotlib.ticker.AutoMinorLocator', 'mticker.AutoMinorLocator', ([], {}), '()\n', (2995, 2997), True, 'import matplotlib.ticker as mticker\n'), ((3030, 3056), 'matplotlib.ticker.AutoMinorLocator', 'mticker.AutoMinorLocator', ([], {}), '()\n', (3054, 3056), True, 'import matplotlib.ticker as mticker\n'), ((4466, 4475), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4473, 4475), True, 'import matplotlib.pyplot as plt\n'), ((5371, 5392), 'matplotlib.ticker.AutoLocator', 'mticker.AutoLocator', ([], {}), '()\n', (5390, 5392), True, 'import matplotlib.ticker as mticker\n'), ((5425, 5451), 'matplotlib.ticker.AutoMinorLocator', 'mticker.AutoMinorLocator', ([], {}), '()\n', (5449, 5451), True, 'import matplotlib.ticker as mticker\n'), ((6763, 6790), 'numpy.ceil', 'np.ceil', (['(n_spectra / n_cols)'], {}), '(n_spectra / n_cols)\n', (6770, 6790), True, 'import numpy as np\n'), ((1630, 1670), 'numpy.floor', 'np.floor', (['(spectrum.peaks.mz[0] / 100 - 1)'], {}), '(spectrum.peaks.mz[0] / 100 - 1)\n', (1638, 1670), True, 'import numpy as np\n'), ((5014, 5054), 'numpy.floor', 'np.floor', (['(spec_top.peaks.mz[0] / 100 - 1)'], {}), '(spec_top.peaks.mz[0] / 100 - 1)\n', (5022, 5054), True, 'import numpy as np\n'), ((5074, 5117), 'numpy.floor', 'np.floor', (['(spec_bottom.peaks.mz[0] / 100 - 1)'], {}), '(spec_bottom.peaks.mz[0] / 100 - 1)\n', (5082, 5117), True, 'import numpy as np\n'), ((5181, 5221), 'numpy.ceil', 'np.ceil', (['(spec_top.peaks.mz[-1] / 100 + 1)'], {}), '(spec_top.peaks.mz[-1] / 100 + 1)\n', (5188, 5221), True, 'import numpy as np\n'), ((5241, 5284), 'numpy.ceil', 'np.ceil', (['(spec_bottom.peaks.mz[-1] / 100 + 1)'], {}), '(spec_bottom.peaks.mz[-1] / 100 + 1)\n', (5248, 5284), True, 'import numpy as np\n')] |
import convolution
import numpy as np
def kernelAsList():
return [[1,2,3,-12,3,2,1]]
def midKernel():
kernel = kernelAsList()
return np.array(kernel).reshape(1,len(kernel[0]))
def midKernelTranspose():
kernel = kernelAsList()
return np.array(kernel).reshape(len(kernel[0]),1)
def edgeDetection(image):
imageEdge = convolution.applyFilter(image, midKernelTranspose())
return convolution.applyFilter(image,midKernel()) | [
"numpy.array"
] | [((147, 163), 'numpy.array', 'np.array', (['kernel'], {}), '(kernel)\n', (155, 163), True, 'import numpy as np\n'), ((256, 272), 'numpy.array', 'np.array', (['kernel'], {}), '(kernel)\n', (264, 272), True, 'import numpy as np\n')] |
from __future__ import division, absolute_import
import numpy as np
def bb_intersection_over_union(boxA, boxB):
"""Calculate Intersection Over Union"""
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def calculate_bbox(mask):
"""Calculate Bounding Box of white pixels in binary image"""
histogram_y = np.squeeze(np.dot(mask, np.ones((mask.shape[1], 1))))
histogram_x = np.squeeze(np.dot(mask.T, np.ones((mask.shape[0], 1))))
nonzero_y_indexes = np.squeeze(np.where(histogram_y > 0))
nonzero_x_indexes = np.squeeze(np.where(histogram_x > 0))
assert len(nonzero_y_indexes) > 0 and len(nonzero_x_indexes) > 0, 'mask should not be empty'
# if len(nonzero_y_indexes) >= 0 and len(nonzero_x_indexes) >= 0:
# return [0, 0, 0, 0]
mask_height, mask_width = mask.shape
ymin = float(nonzero_y_indexes[0]) / mask_height
ymax = float(nonzero_y_indexes[-1]) / mask_height
xmin = float(nonzero_x_indexes[0]) / mask_width
xmax = float(nonzero_x_indexes[-1]) / mask_width
# make sure sane bbox values
assert ymin >= 0.0
assert ymin < 1.0
assert ymax >= 0.0
assert ymax < 1.0
assert xmin >= 0.0
assert xmin < 1.0
assert xmax >= 0.0
assert xmax < 1.0
height = ymax - ymin
assert height >= 0.0
assert height < 1.0
width = xmax - xmin
assert width >= 0.0
assert width < 1.0
x0 = xmin * mask_width
y0 = ymin * mask_height
x1 = xmax * mask_width
y1 = ymax * mask_height
return [int(x0), int(y0), int(x1), int(y1)]
| [
"numpy.where",
"numpy.ones"
] | [((1244, 1269), 'numpy.where', 'np.where', (['(histogram_y > 0)'], {}), '(histogram_y > 0)\n', (1252, 1269), True, 'import numpy as np\n'), ((1306, 1331), 'numpy.where', 'np.where', (['(histogram_x > 0)'], {}), '(histogram_x > 0)\n', (1314, 1331), True, 'import numpy as np\n'), ((1104, 1131), 'numpy.ones', 'np.ones', (['(mask.shape[1], 1)'], {}), '((mask.shape[1], 1))\n', (1111, 1131), True, 'import numpy as np\n'), ((1178, 1205), 'numpy.ones', 'np.ones', (['(mask.shape[0], 1)'], {}), '((mask.shape[0], 1))\n', (1185, 1205), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import numpy as np
from datetime import datetime
from tempfile import mkdtemp
def read_header(f):
'''
Read header information from the first 1024 bytes of an OpenEphys file.
Parameters
----------
| f : file
| An open file handle to an OpenEphys file
Returns
-------
| header : dict
| Dictionary of header info
'''
header = {}
# Read the data as a string
# Remove newlines and redundant "header." prefixes
# The result should be a series of "key = value" strings, separated
# by semicolons.
# header_string = f.read(1024).replace('\n','').replace('header.','')
header_string = f.read(1024).replace('\n','').replace('header.','')
# Parse each key = value string separately
for pair in header_string.split(';'):
if '=' in pair:
key, value = pair.split(' = ')
key = key.strip()
value = value.strip()
# Convert some values to numeric
if key in ['bitVolts', 'sampleRate']:
header[key] = float(value)
elif key in ['blockLength', 'bufferSize', 'header_bytes']:
header[key] = int(value)
elif key == 'date_created':
header[key] = datetime.strptime(value, "'%d-%b-%Y %H%M%S'")
elif "'" in value: #strip out quotes around strings
header[key] = value[1:-1]
else:
# Keep as string
header[key] = value
return header
def get_type(fname):
with file(fname, 'rb') as f:
# check if file is openephys metadata or a message event
if os.path.splitext(fname)[-1].upper() == '.OPENEPHYS':
return 'OPENEPHYS'
elif os.path.basename(fname).endswith('.events'):
return 'EVENTS_MESSAGE'
try:
s = read_header(f)['description']
except:
s = 'OTHER'
if 'sample count' in s:
return 'CONTINUOUS'
elif 'eventType' in s:
return 'SPIKE'
elif 'sample position' in s:
return 'EVENT'
else:
return s
def load_continuous(fname, tmp_dir):
with file(fname,'rb') as f:
header = read_header(f)
spec_record_marker = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 255])
# calculate number of records
block_len = header['blockLength']
record_len = 2 * block_len + 22
file_len = os.fstat(f.fileno()).st_size
n_records = int((file_len - 1024) / record_len)
# initialize memory, using memmap arrays
pkg_dir = mkdtemp(dir=tmp_dir) # use pkg_dir for temp files (gets cleaned up after import)
chan_name = header['channel'] # use chan name to allow parallelization
path = os.path.join(pkg_dir,chan_name) # create path
samples = np.memmap(path+'_samples.dat', dtype='int16', mode='w+', shape=(int(n_records*block_len)))
timestamps = np.memmap(path+'_timestamps.dat', dtype='int64', mode='w+', shape=(n_records))
rec_nums = np.memmap(path+'_rec_nums.dat', dtype='uint16', mode='w+', shape=(n_records))
'''
samples = np.zeros(int(n_records*block_len))
timestamps = np.zeros(n_records)
rec_nums = np.zeros(n_records)
'''
for rec in range(n_records):
timestamps[rec] = np.fromfile(f, np.dtype('<i8'), 1)
N = np.fromfile(f, np.dtype('<u2'), 1).item()
rec_nums[rec] = np.fromfile(f, np.dtype('<u2'), 1)
data = np.fromfile(f, np.dtype('>i2'), block_len) # read N big-endian int16 samples
record_marker = np.fromfile(f, np.dtype('<u1'), 10)
samples[rec*block_len:(rec+1)*block_len] = data
data = samples * header['bitVolts']
data = {
'data' : samples,
'timestamps' : timestamps,
'rec_nums' : rec_nums,
'header' : header,
'units' : 'uV'
}
return data
def load_spikes(fname):
with file(fname,'rb') as f:
header = read_header(f)
n_channels = int(header['num_channels'])
n_samples = 40 # **NOT CURRENTLY WRITTEN TO HEADER**
# Calculate record lenght and num records
record_len = 2*n_channels*n_samples + 4*n_channels + 2*n_channels + 44
file_len = os.fstat(f.fileno()).st_size
n_records = int((file_len - 1024) / record_len)
# initialize memory, using memmap arrays
pkg_dir = mkdtemp() # use pkg_dir for temp files (gets cleaned up after import)
chan_name = header['electrode'] # use chan name to allow parallelization
path = os.path.join(pkg_dir,chan_name) # create path
spikes = np.memmap(path+'_spikes.dat', dtype='uint16', mode='w+', shape=(n_records, n_channels, n_samples))
timestamps = np.memmap(path+'_timestamps.dat', dtype='int64', mode='w+', shape=(n_records))
source = np.memmap(path+'_source.dat', dtype='uint16', mode='w+', shape=(n_records))
gain = np.memmap(path+'_gain.dat', dtype='float32', mode='w+', shape=(n_records, n_channels))
thresh = np.memmap(path+'_thresh.dat', dtype='uint16', mode='w+', shape=(n_records, n_channels))
sortedId = np.memmap(path+'_sortedId.dat', dtype='uint16', mode='w+', shape=(n_records, n_channels))
recNum = np.memmap(path+'_recNum.dat', dtype='uint16', mode='w+', shape=(n_records))
'''
spikes = np.zeros((n_records, n_channels, n_samples))
timestamps = np.zeros(n_records)
source = np.zeros(n_records)
gain = np.zeros((n_records, n_channels))
thresh = np.zeros((n_records, n_channels))
sortedId = np.zeros((n_records, n_channels))
recNum = np.zeros(n_records)
'''
for rec in range(n_records):
eventType = np.fromfile(f, np.dtype('<u1'), 1) #always equal to 4, discard
timestamps[rec] = np.fromfile(f, np.dtype('<i8'), 1)
software_timestamp = np.fromfile(f, np.dtype('<i8'), 1)
source[rec] = np.fromfile(f, np.dtype('<u2'), 1)
n_channels = np.fromfile(f, np.dtype('<u2'), 1)
n_samples = np.fromfile(f, np.dtype('<u2'), 1)
sortedId[rec] = np.fromfile(f, np.dtype('<u2'), 1)
electrodeId = np.fromfile(f, np.dtype('<u2'), 1)
channel = np.fromfile(f, np.dtype('<u2'), 1)
color = np.fromfile(f, np.dtype('<u1'), 3)
pcProj = np.fromfile(f, np.float32, 2)
sampleFreq = np.fromfile(f, np.dtype('<u2'), 1)
waveforms = np.fromfile(f, np.dtype('<u2'), n_channels*n_samples)
gain[rec,:] = np.fromfile(f, np.float32, n_channels)
thresh[rec,:] = np.fromfile(f, np.dtype('<u2'), n_channels)
recNum[rec] = np.fromfile(f, np.dtype('<u2'), 1)
if isinstance(n_channels, np.ndarray):
n_channels = n_channels[0]
if isinstance(n_samples, np.ndarray):
n_samples = n_samples[0]
waveforms = np.reshape(waveforms, (n_channels, n_samples))
waveforms = (np.float64(waveforms) - 32768) / gain[rec,:] / 1000
spikes[rec,:,:] = waveforms
data = {
'header' : header,
'spikes' : spikes,
'units' : 'uV',
'timestamps' : timestamps,
'source' : source,
'gain' : gain,
'thresh' : thresh,
'rec_nums' : recNum,
'unitID' : sortedId,
}
return data
def load_events(fname,pkg):
with file(fname,'rb') as f:
header = read_header(f)
record_len = 16
file_len = os.fstat(f.fileno()).st_size
n_records = int((file_len - 1024) / record_len)
channel = np.zeros(n_records)
timestamps = np.zeros(n_records)
sampleNum = np.zeros(n_records)
nodeId = np.zeros(n_records)
eventType = np.zeros(n_records)
eventId = np.zeros(n_records)
rec_nums = np.zeros(n_records)
for rec in range(n_records):
timestamps[rec] = np.fromfile(f, np.dtype('<i8'), 1)
sampleNum[rec] = np.fromfile(f, np.dtype('<i2'), 1)
eventType[rec] = np.fromfile(f, np.dtype('<u1'), 1)
nodeId[rec] = np.fromfile(f, np.dtype('<u1'), 1)
eventId[rec] = np.fromfile(f, np.dtype('<u1'), 1)
channel[rec] = np.fromfile(f, np.dtype('<u1'), 1)
rec_nums[rec] = np.fromfile(f, np.dtype('<u2'), 1)
data = {
'header' : header,
'channel' : channel,
'timestamps' : timestamps,
'eventType' : eventType,
'nodeId' : nodeId,
'eventId' : eventId,
'rec_nums' : rec_nums,
'sampleNum' : sampleNum,
}
return data
| [
"numpy.fromfile",
"numpy.reshape",
"datetime.datetime.strptime",
"numpy.float64",
"numpy.memmap",
"os.path.join",
"os.path.splitext",
"numpy.array",
"numpy.zeros",
"tempfile.mkdtemp",
"os.path.basename",
"numpy.dtype"
] | [((2337, 2379), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 255]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 255])\n', (2345, 2379), True, 'import numpy as np\n'), ((2677, 2697), 'tempfile.mkdtemp', 'mkdtemp', ([], {'dir': 'tmp_dir'}), '(dir=tmp_dir)\n', (2684, 2697), False, 'from tempfile import mkdtemp\n'), ((2869, 2901), 'os.path.join', 'os.path.join', (['pkg_dir', 'chan_name'], {}), '(pkg_dir, chan_name)\n', (2881, 2901), False, 'import os\n'), ((3053, 3131), 'numpy.memmap', 'np.memmap', (["(path + '_timestamps.dat')"], {'dtype': '"""int64"""', 'mode': '"""w+"""', 'shape': 'n_records'}), "(path + '_timestamps.dat', dtype='int64', mode='w+', shape=n_records)\n", (3062, 3131), True, 'import numpy as np\n'), ((3154, 3231), 'numpy.memmap', 'np.memmap', (["(path + '_rec_nums.dat')"], {'dtype': '"""uint16"""', 'mode': '"""w+"""', 'shape': 'n_records'}), "(path + '_rec_nums.dat', dtype='uint16', mode='w+', shape=n_records)\n", (3163, 3231), True, 'import numpy as np\n'), ((4615, 4624), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (4622, 4624), False, 'from tempfile import mkdtemp\n'), ((4802, 4834), 'os.path.join', 'os.path.join', (['pkg_dir', 'chan_name'], {}), '(pkg_dir, chan_name)\n', (4814, 4834), False, 'import os\n'), ((4870, 4974), 'numpy.memmap', 'np.memmap', (["(path + '_spikes.dat')"], {'dtype': '"""uint16"""', 'mode': '"""w+"""', 'shape': '(n_records, n_channels, n_samples)'}), "(path + '_spikes.dat', dtype='uint16', mode='w+', shape=(n_records,\n n_channels, n_samples))\n", (4879, 4974), True, 'import numpy as np\n'), ((4995, 5073), 'numpy.memmap', 'np.memmap', (["(path + '_timestamps.dat')"], {'dtype': '"""int64"""', 'mode': '"""w+"""', 'shape': 'n_records'}), "(path + '_timestamps.dat', dtype='int64', mode='w+', shape=n_records)\n", (5004, 5073), True, 'import numpy as np\n'), ((5097, 5172), 'numpy.memmap', 'np.memmap', (["(path + '_source.dat')"], {'dtype': '"""uint16"""', 'mode': '"""w+"""', 'shape': 'n_records'}), "(path + '_source.dat', dtype='uint16', mode='w+', shape=n_records)\n", (5106, 5172), True, 'import numpy as np\n'), ((5199, 5291), 'numpy.memmap', 'np.memmap', (["(path + '_gain.dat')"], {'dtype': '"""float32"""', 'mode': '"""w+"""', 'shape': '(n_records, n_channels)'}), "(path + '_gain.dat', dtype='float32', mode='w+', shape=(n_records,\n n_channels))\n", (5208, 5291), True, 'import numpy as np\n'), ((5313, 5406), 'numpy.memmap', 'np.memmap', (["(path + '_thresh.dat')"], {'dtype': '"""uint16"""', 'mode': '"""w+"""', 'shape': '(n_records, n_channels)'}), "(path + '_thresh.dat', dtype='uint16', mode='w+', shape=(n_records,\n n_channels))\n", (5322, 5406), True, 'import numpy as np\n'), ((5427, 5523), 'numpy.memmap', 'np.memmap', (["(path + '_sortedId.dat')"], {'dtype': '"""uint16"""', 'mode': '"""w+"""', 'shape': '(n_records, n_channels)'}), "(path + '_sortedId.dat', dtype='uint16', mode='w+', shape=(\n n_records, n_channels))\n", (5436, 5523), True, 'import numpy as np\n'), ((5541, 5616), 'numpy.memmap', 'np.memmap', (["(path + '_recNum.dat')"], {'dtype': '"""uint16"""', 'mode': '"""w+"""', 'shape': 'n_records'}), "(path + '_recNum.dat', dtype='uint16', mode='w+', shape=n_records)\n", (5550, 5616), True, 'import numpy as np\n'), ((8123, 8142), 'numpy.zeros', 'np.zeros', (['n_records'], {}), '(n_records)\n', (8131, 8142), True, 'import numpy as np\n'), ((8164, 8183), 'numpy.zeros', 'np.zeros', (['n_records'], {}), '(n_records)\n', (8172, 8183), True, 'import numpy as np\n'), ((8205, 8224), 'numpy.zeros', 'np.zeros', (['n_records'], {}), '(n_records)\n', (8213, 8224), True, 'import numpy as np\n'), ((8246, 8265), 'numpy.zeros', 'np.zeros', (['n_records'], {}), '(n_records)\n', (8254, 8265), True, 'import numpy as np\n'), ((8287, 8306), 'numpy.zeros', 'np.zeros', (['n_records'], {}), '(n_records)\n', (8295, 8306), True, 'import numpy as np\n'), ((8328, 8347), 'numpy.zeros', 'np.zeros', (['n_records'], {}), '(n_records)\n', (8336, 8347), True, 'import numpy as np\n'), ((8369, 8388), 'numpy.zeros', 'np.zeros', (['n_records'], {}), '(n_records)\n', (8377, 8388), True, 'import numpy as np\n'), ((6780, 6809), 'numpy.fromfile', 'np.fromfile', (['f', 'np.float32', '(2)'], {}), '(f, np.float32, 2)\n', (6791, 6809), True, 'import numpy as np\n'), ((6998, 7036), 'numpy.fromfile', 'np.fromfile', (['f', 'np.float32', 'n_channels'], {}), '(f, np.float32, n_channels)\n', (7009, 7036), True, 'import numpy as np\n'), ((7393, 7439), 'numpy.reshape', 'np.reshape', (['waveforms', '(n_channels, n_samples)'], {}), '(waveforms, (n_channels, n_samples))\n', (7403, 7439), True, 'import numpy as np\n'), ((3480, 3495), 'numpy.dtype', 'np.dtype', (['"""<i8"""'], {}), "('<i8')\n", (3488, 3495), True, 'import numpy as np\n'), ((3617, 3632), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (3625, 3632), True, 'import numpy as np\n'), ((3682, 3697), 'numpy.dtype', 'np.dtype', (['""">i2"""'], {}), "('>i2')\n", (3690, 3697), True, 'import numpy as np\n'), ((3790, 3805), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (3798, 3805), True, 'import numpy as np\n'), ((6087, 6102), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (6095, 6102), True, 'import numpy as np\n'), ((6183, 6198), 'numpy.dtype', 'np.dtype', (['"""<i8"""'], {}), "('<i8')\n", (6191, 6198), True, 'import numpy as np\n'), ((6251, 6266), 'numpy.dtype', 'np.dtype', (['"""<i8"""'], {}), "('<i8')\n", (6259, 6266), True, 'import numpy as np\n'), ((6319, 6334), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6327, 6334), True, 'import numpy as np\n'), ((6387, 6402), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6395, 6402), True, 'import numpy as np\n'), ((6455, 6470), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6463, 6470), True, 'import numpy as np\n'), ((6523, 6538), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6531, 6538), True, 'import numpy as np\n'), ((6591, 6606), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6599, 6606), True, 'import numpy as np\n'), ((6659, 6674), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6667, 6674), True, 'import numpy as np\n'), ((6727, 6742), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (6735, 6742), True, 'import numpy as np\n'), ((6858, 6873), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6866, 6873), True, 'import numpy as np\n'), ((6926, 6941), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6934, 6941), True, 'import numpy as np\n'), ((7085, 7100), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (7093, 7100), True, 'import numpy as np\n'), ((7162, 7177), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (7170, 7177), True, 'import numpy as np\n'), ((8472, 8487), 'numpy.dtype', 'np.dtype', (['"""<i8"""'], {}), "('<i8')\n", (8480, 8487), True, 'import numpy as np\n'), ((8537, 8552), 'numpy.dtype', 'np.dtype', (['"""<i2"""'], {}), "('<i2')\n", (8545, 8552), True, 'import numpy as np\n'), ((8602, 8617), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (8610, 8617), True, 'import numpy as np\n'), ((8667, 8682), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (8675, 8682), True, 'import numpy as np\n'), ((8732, 8747), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (8740, 8747), True, 'import numpy as np\n'), ((8797, 8812), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (8805, 8812), True, 'import numpy as np\n'), ((8862, 8877), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (8870, 8877), True, 'import numpy as np\n'), ((1790, 1813), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (1806, 1813), False, 'import os\n'), ((1303, 1348), 'datetime.datetime.strptime', 'datetime.strptime', (['value', '"""\'%d-%b-%Y %H%M%S\'"""'], {}), '(value, "\'%d-%b-%Y %H%M%S\'")\n', (1320, 1348), False, 'from datetime import datetime\n'), ((1693, 1716), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (1709, 1716), False, 'import os\n'), ((3545, 3560), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (3553, 3560), True, 'import numpy as np\n'), ((7465, 7486), 'numpy.float64', 'np.float64', (['waveforms'], {}), '(waveforms)\n', (7475, 7486), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import re
import datetime
import hashlib
import random
import numpy as np
import keras
from keras.layers import Input, Embedding, Dense, Dropout
from keras.layers import LSTM, GRU
from keras.optimizers import Adam
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model, load_model
from keras.callbacks import Callback
import h5py
charset = 'abcdefghijklmnopqrstuvwxyz0123456789 .-%_:/\\$'
invalid_sep = re.compile('[\(\)@#$*;"\']+')
invalid_char = re.compile('[^a-zA-Z0-9 \.\-%_:/\\\\]+')
multi_space = re.compile(' +')
def normalize(s):
s = re.sub(invalid_sep, ' ', s)
s = re.sub(invalid_char, '', s)
s = re.sub(multi_space, ' ', s)
return s.lower()
# parsing raw AOL input
def parse(fname):
f = open(fname)
prev_query = ''
for line in f:
line = line.decode('utf-8','ignore').encode("utf-8")
line = line.strip().split('\t')
timestamp = datetime.datetime.strptime(line[2], '%Y-%m-%d %H:%M:%S')
timestamp = (timestamp - datetime.datetime(1970,1,1)).total_seconds()
query = normalize(line[1])
if query == '-':
query = prev_query
else:
prev_query = query
if not query:
prev_query = ''
continue
clicked = len(line) > 3
r = 2+random.randint(0,max(0,len(query)-3))
prefix = query[:r]
md5 = hashlib.md5(prefix).hexdigest()
line = '\t'.join([str(int(timestamp)), query, prefix, md5])
print(line)
class Sequencer(object):
PAD, END = 0, 1
def __init__(self):
self.token_to_indice = dict([(c,i+2) for (i,c) in enumerate(charset)])
self.vocabs = ['PAD', 'END']+list(charset)
def encode(self, line, ending=True):
seq = map(self.token_to_indice.__getitem__, line)
if ending:
seq.append(self.END)
return seq
def decode(self, seq):
if not seq:
return ''
if seq[-1] == self.END:
seq = seq[:-1]
line = ''.join(map(self.vocabs.__getitem__, seq))
return line
def padding(seq, maxlen):
return pad_sequences(seq, maxlen, padding='post', value=0)
class WeightsSaver(Callback):
def __init__(self, model, N):
self.model = model
self.N = N
self.batch = 0
def on_batch_end(self, batch, logs={}):
if self.batch % self.N == 0:
name = 'weights/weights%08d.hdf5' % self.batch
self.model.save_weights(name)
self.batch += 1
class LanguageModel(object):
def __init__(self):
self.sqn = Sequencer()
def save(self):
pass
def load(self):
pass
def build(self, hid_size, n_hid_layers, drp_rate, batch_size):
cin = Input(batch_shape=(None, None))
voc_size = len(self.sqn.vocabs)
# A trick to map categories to onehot encoding
emb = Embedding(voc_size, voc_size, trainable=False, weights=[np.identity(voc_size)])(cin)
prev = emb
for i in range(n_hid_layers):
lstm = LSTM(hid_size, return_sequences=True, implementation=2)(prev)
dropout = Dropout(drp_rate)(lstm)
prev = dropout
cout = Dense(voc_size, activation='softmax')(prev)
self.model = Model(inputs=cin, outputs=cout)
self.model.summary()
self.batch_size = batch_size
def train(self, fname, maxlen, lr=1e-3):
ref = []
for line in open(fname):
line = line.strip()
seq = self.sqn.encode(line)
ref.append(seq)
ref = np.array(ref)
ref = padding(ref, maxlen+1)
X, Y = ref[:, :-1], ref[:, 1:]
Y = np.expand_dims(Y, -1)
M = X>self.sqn.END
M[:,0] = 0
self.model.compile(
loss='sparse_categorical_crossentropy',
sample_weight_mode='temporal',
optimizer=Adam(lr=lr)
)
self.model.fit(X, Y, batch_size=self.batch_size, sample_weight=M,
callbacks=[WeightsSaver(self.model, 500)],
validation_split=0.01,
epochs=3
)
def array_str(arr):
s = ', '.join(['%.8e' % x for x in arr])
return s+',\n'
def sanitize_for_tf(name):
#HACK for make the variable names consistent between THEANO and TENSORFLOW models
return name.replace("KERNEL:0","KERNEL").replace("BIAS:0","BIAS")
# Dumping the HDF5 weights to a model.c file
# and specifies the dimension in model.h
def dump(fname):
f = h5py.File(fname)
fheader = open('model.h', 'w')
fctx = open('model.c', 'w')
for name in f.attrs['layer_names']:
if name.startswith('lstm') or name.startswith('dense'):
layer = f[name][name]
for elem in layer:
shape = layer[elem].shape
for i,n in enumerate(shape):
current_row='int '+(name+'_%s_shape_%d = %d;\n'%(elem, i, n)).upper()
current_row = sanitize_for_tf(current_row)
fheader.write(current_row)
elem_decl = 'const float '+(name+'_'+elem).upper()+'[]'
elem_decl = sanitize_for_tf(elem_decl)
fheader.write('extern '+elem_decl+';\n\n')
fctx.write(elem_decl+' = {\n')
mat = np.array(layer[elem])
if len(shape) == 2:
for i in range(shape[0]):
fctx.write(array_str(mat[i]))
else:
fctx.write(array_str(mat))
fctx.write('};\n\n')
if __name__ == '__main__':
prog_name = os.path.basename(sys.argv[0])
if prog_name == 'train':
q = LanguageModel()
q.build(256, 2, 0.5, 256)
q.train(sys.argv[1], 60)
elif prog_name == 'parse':
parse(sys.argv[1])
elif prog_name == 'dump':
dump(sys.argv[1])
| [
"keras.optimizers.Adam",
"datetime.datetime",
"numpy.identity",
"hashlib.md5",
"re.compile",
"datetime.datetime.strptime",
"h5py.File",
"numpy.array",
"keras.layers.Input",
"keras.layers.LSTM",
"os.path.basename",
"keras.models.Model",
"numpy.expand_dims",
"keras.layers.Dense",
"re.sub",... | [((519, 550), 're.compile', 're.compile', (['"""[\\\\(\\\\)@#$*;"\']+"""'], {}), '(\'[\\\\(\\\\)@#$*;"\\\']+\')\n', (529, 550), False, 'import re\n'), ((564, 606), 're.compile', 're.compile', (['"""[^a-zA-Z0-9 \\\\.\\\\-%_:/\\\\\\\\]+"""'], {}), "('[^a-zA-Z0-9 \\\\.\\\\-%_:/\\\\\\\\]+')\n", (574, 606), False, 'import re\n'), ((619, 635), 're.compile', 're.compile', (['""" +"""'], {}), "(' +')\n", (629, 635), False, 'import re\n'), ((663, 690), 're.sub', 're.sub', (['invalid_sep', '""" """', 's'], {}), "(invalid_sep, ' ', s)\n", (669, 690), False, 'import re\n'), ((699, 726), 're.sub', 're.sub', (['invalid_char', '""""""', 's'], {}), "(invalid_char, '', s)\n", (705, 726), False, 'import re\n'), ((735, 762), 're.sub', 're.sub', (['multi_space', '""" """', 's'], {}), "(multi_space, ' ', s)\n", (741, 762), False, 'import re\n'), ((2211, 2262), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['seq', 'maxlen'], {'padding': '"""post"""', 'value': '(0)'}), "(seq, maxlen, padding='post', value=0)\n", (2224, 2262), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((4622, 4638), 'h5py.File', 'h5py.File', (['fname'], {}), '(fname)\n', (4631, 4638), False, 'import h5py\n'), ((5748, 5777), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (5764, 5777), False, 'import os\n'), ((1007, 1063), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['line[2]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(line[2], '%Y-%m-%d %H:%M:%S')\n", (1033, 1063), False, 'import datetime\n'), ((2839, 2870), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None)'}), '(batch_shape=(None, None))\n', (2844, 2870), False, 'from keras.layers import Input, Embedding, Dense, Dropout\n'), ((3357, 3388), 'keras.models.Model', 'Model', ([], {'inputs': 'cin', 'outputs': 'cout'}), '(inputs=cin, outputs=cout)\n', (3362, 3388), False, 'from keras.models import Model, load_model\n'), ((3667, 3680), 'numpy.array', 'np.array', (['ref'], {}), '(ref)\n', (3675, 3680), True, 'import numpy as np\n'), ((3769, 3790), 'numpy.expand_dims', 'np.expand_dims', (['Y', '(-1)'], {}), '(Y, -1)\n', (3783, 3790), True, 'import numpy as np\n'), ((3291, 3328), 'keras.layers.Dense', 'Dense', (['voc_size'], {'activation': '"""softmax"""'}), "(voc_size, activation='softmax')\n", (3296, 3328), False, 'from keras.layers import Input, Embedding, Dense, Dropout\n'), ((1475, 1494), 'hashlib.md5', 'hashlib.md5', (['prefix'], {}), '(prefix)\n', (1486, 1494), False, 'import hashlib\n'), ((3141, 3196), 'keras.layers.LSTM', 'LSTM', (['hid_size'], {'return_sequences': '(True)', 'implementation': '(2)'}), '(hid_size, return_sequences=True, implementation=2)\n', (3145, 3196), False, 'from keras.layers import LSTM, GRU\n'), ((3225, 3242), 'keras.layers.Dropout', 'Dropout', (['drp_rate'], {}), '(drp_rate)\n', (3232, 3242), False, 'from keras.layers import Input, Embedding, Dense, Dropout\n'), ((3995, 4006), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (3999, 4006), False, 'from keras.optimizers import Adam\n'), ((5438, 5459), 'numpy.array', 'np.array', (['layer[elem]'], {}), '(layer[elem])\n', (5446, 5459), True, 'import numpy as np\n'), ((1097, 1126), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (1114, 1126), False, 'import datetime\n'), ((3036, 3057), 'numpy.identity', 'np.identity', (['voc_size'], {}), '(voc_size)\n', (3047, 3057), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.optimize as optimize
R = 287.058; Cpd = 1005.7; Cpv = 1875; g = 9.81 # define constants
Lw = lambda T: (2.501 - 0.00237 * T) * 10**6
varsIncluded = ['timestamp', 'Pressure', 'Temperature',
'Humidity', 'WindDir', 'WindSpeed','Height',
'WindNorth', 'WindEast']
iso_format = '%Y-%m-%d %H:%M:%S.%f'
def available(conn, z_range=None, pct=0.8):
if z_range is None:
sql_query = 'select distinct LaunchTime, LaunchLatitude, '\
+ 'LaunchLongitude from sonde'
return pd.read_sql(sql_query,
conn,
parse_dates=['LaunchTime'])
else:
sql_query = \
"select min(timestamp) as startTime, max(timestamp) as endTime, "\
+ "min(Height) as minHeight, max(Height) as maxHeight, "\
+ "avg(Latitude) as Latitude, avg(Longitude) as Longitude,"\
+ "min(Dropping) as dropping "\
+ "from sonde where Height between ? and ? "\
+ "group by LaunchTime, Dropping"
allsondes = pd.read_sql(sql_query,
conn,
parse_dates=['startTime', 'endTime'],
params=z_range)
allsondes['startTime'] = allsondes['startTime'].dt.tz_localize(None)
allsondes['endTime'] = allsondes['endTime'].dt.tz_localize(None)
return allsondes.query('(maxHeight - minHeight)/{z_full} > {pct}'\
.format(z_full=z_range[1]-z_range[0],pct=pct))\
.reset_index(drop=True)
def load(conn, meta, ddz_smooth_window=10):
sql_query = "select * from sonde where timestamp between ? and ?"
t_range = [meta['startTime'].strftime(iso_format),
meta['endTime'].strftime(iso_format)]
sonde_raw = pd.read_sql(sql_query,
conn,
parse_dates=['timestamp'],
params=t_range)
sonde = sonde_raw[varsIncluded]
return Radiosonde(sonde, meta, ddz_smooth_window)
def gradx(f, x):
'''
Compute gradient of variable with non-uniform
height with central differencing.
Formula based on Lagrange Interpolation.
ref: A First Course in Numerical Methods. Ascher & Greif
pg. 419
'''
f_x0, f_xw, f_xe = f[1:-2], f[0:-3], f[2:-1]
h0, h1 = x[1:-2] - x[0:-3], x[2:-1] - x[1:-2]
f_grad = (h1 - h0)/(h1 * h0) * f_x0 \
+ 1 / (h0 + h1) * (h0/h1 * f_xe - h1/h0 * f_xw)
return f_grad, x[1:-2]
class Radiosonde(pd.DataFrame):
def __init__(self, df, meta, ddz_smooth_window=10):
pd.DataFrame.__init__(self, index=df['Height'].values)
self.index.name = 'z'
setattr(self,'raw_df', df)
setattr(self, 'meta', meta)
# compact values for calculation
# loading variables to numpy array to easier computation
self['timestamp'] = df['timestamp'].values
self['z'] = df['Height'].values
self['U'] = df['WindSpeed'].values
self['UDir'] = df['WindDir'].values
self['v'] = df['WindNorth'].values
self['u'] = df['WindEast'].values
self['T_K'] = df['Temperature'].values
self['T'] = self['T_K'] - 273.15
self['P'] = df['Pressure'].values
self['RH'] = df['Humidity'].values
self['es'] = 6.11 * 10 ** (7.5 * (self['T']) / (self['T'] + 237.3)) # sat vapor pres
self['e'] = self['RH'] * self['es'] / 100 # vapor pres
self['r'] = 0.622 * self['e'] / self['P'] # mixing ratio (1000 to convert to kgkg-1)
self['q'] = self['r'] / (1 + self['r'])
# moist static energy (no contribution from water droplets)
# Note: only 1.2% drop of g at 40km, so g taken as constant
self['mse'] = Cpd * self['T_K'] + g * self['z'] \
+ Lw(self['T']) * self['q']
# potential temperature for moist air
self['theta_v'] = self['T_K'] * (1000/self['P']) \
** (0.2854 * (1-0.28e-3 * self['r'] * 1000))
# dew point temperature
self['T_D'] = (237.3 * np.log10(self['e']/6.11)) \
/ (7.5 - np.log10(self['e']/6.11))
self['T_DK'] = self['T_D'] + 273.15
self._compute_theta_e()
self._compute_ddz_vars(ddz_smooth_window=ddz_smooth_window)
# equivalent potential temperature
## Computed with iterative calc on dew point and pressure
def _compute_theta_e(self):
'''
Compute equivalent potential temperature with bolton (1980) method"
'''
T_DK = self['T_DK']
T_K = self['T_K']
r = self['r']
T_LK = 1 / (1 / (T_DK - 56) + np.log(T_K/T_DK)/800) + 56
P = self['P']
theta_e = T_K * (1000/P) ** (0.2854 * (1 - 0.28e-3 * r)) \
* np.exp( (3.376 / T_LK - 0.00254) * r * (1 + 0.81e-3 * r))
self['T_LK'] = T_LK
self['theta_e'] = theta_e
def _compute_ddz_vars(self, ddz_smooth_window=10):
'''
Compute vertical buoyancy gradient, velocity gradient and gradient
Richardson number
'''
self['dThetadz'] = self._ddz('theta_e', ddz_smooth_window)
self['dUdz'] = self._ddz('U', ddz_smooth_window)
self['dbdz'] = g / self['theta_e'] * self['dThetadz']
self['Ri_g'] = self['dbdz'] / self['dUdz'] ** 2
def _ddz(self, varName, ddz_smooth_window=10):
ser_tmp_sm = self[varName].rolling(ddz_smooth_window).mean()
dudz, z_grad = gradx(ser_tmp_sm.values, ser_tmp_sm.index.values)
return pd.Series(dudz, index=z_grad)
def lcl(self):
def _lcl_p(P, T_K, r):
'''
Inputs:
* P_0 : Pressure, in hPa
* T_K0 : Temperature in K
* r_0 : mixing ratio in kg/kg
'''
# constants
R = 287.058; Cp = 1004; # define constants
# Compute fixed point
e = r * P / 0.622
T_dK = (237.3 * np.log10(e/6.11)) / (7.5 - np.log10(e/6.11)) + 273.15
return P * (T_dK / T_K) ** (Cp/R)
T_K = self['T_K'].values[0]
P = self['P'].values[0]
r = self['r'].values[0]
P_lcl = optimize.fixed_point(_lcl_p, x0=P, args=[T_K,r])
print("This method still needs improvement.")
return P_lcl.mean()
def zplot(self, x_vars, ax=None, **kwargs):
if ax == None:
fig, ax = plt.subplots(**kwargs)
for varname in x_vars:
ax.plot(self[varname].values,
self['z'].values,
label=varname)
ax.legend()
return ax
def __repr__(self):
return self.meta.__repr__()+'\n\n'
| [
"pandas.Series",
"numpy.log10",
"numpy.log",
"numpy.exp",
"scipy.optimize.fixed_point",
"pandas.read_sql",
"pandas.DataFrame.__init__",
"matplotlib.pyplot.subplots"
] | [((1956, 2027), 'pandas.read_sql', 'pd.read_sql', (['sql_query', 'conn'], {'parse_dates': "['timestamp']", 'params': 't_range'}), "(sql_query, conn, parse_dates=['timestamp'], params=t_range)\n", (1967, 2027), True, 'import pandas as pd\n'), ((643, 699), 'pandas.read_sql', 'pd.read_sql', (['sql_query', 'conn'], {'parse_dates': "['LaunchTime']"}), "(sql_query, conn, parse_dates=['LaunchTime'])\n", (654, 699), True, 'import pandas as pd\n'), ((1179, 1266), 'pandas.read_sql', 'pd.read_sql', (['sql_query', 'conn'], {'parse_dates': "['startTime', 'endTime']", 'params': 'z_range'}), "(sql_query, conn, parse_dates=['startTime', 'endTime'], params=\n z_range)\n", (1190, 1266), True, 'import pandas as pd\n'), ((2777, 2831), 'pandas.DataFrame.__init__', 'pd.DataFrame.__init__', (['self'], {'index': "df['Height'].values"}), "(self, index=df['Height'].values)\n", (2798, 2831), True, 'import pandas as pd\n'), ((5721, 5750), 'pandas.Series', 'pd.Series', (['dudz'], {'index': 'z_grad'}), '(dudz, index=z_grad)\n', (5730, 5750), True, 'import pandas as pd\n'), ((6378, 6427), 'scipy.optimize.fixed_point', 'optimize.fixed_point', (['_lcl_p'], {'x0': 'P', 'args': '[T_K, r]'}), '(_lcl_p, x0=P, args=[T_K, r])\n', (6398, 6427), True, 'import scipy.optimize as optimize\n'), ((4969, 5025), 'numpy.exp', 'np.exp', (['((3.376 / T_LK - 0.00254) * r * (1 + 0.00081 * r))'], {}), '((3.376 / T_LK - 0.00254) * r * (1 + 0.00081 * r))\n', (4975, 5025), True, 'import numpy as np\n'), ((6604, 6626), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '(**kwargs)\n', (6616, 6626), True, 'import matplotlib.pyplot as plt\n'), ((4257, 4283), 'numpy.log10', 'np.log10', (["(self['e'] / 6.11)"], {}), "(self['e'] / 6.11)\n", (4265, 4283), True, 'import numpy as np\n'), ((4316, 4342), 'numpy.log10', 'np.log10', (["(self['e'] / 6.11)"], {}), "(self['e'] / 6.11)\n", (4324, 4342), True, 'import numpy as np\n'), ((4834, 4852), 'numpy.log', 'np.log', (['(T_K / T_DK)'], {}), '(T_K / T_DK)\n', (4840, 4852), True, 'import numpy as np\n'), ((6161, 6179), 'numpy.log10', 'np.log10', (['(e / 6.11)'], {}), '(e / 6.11)\n', (6169, 6179), True, 'import numpy as np\n'), ((6188, 6206), 'numpy.log10', 'np.log10', (['(e / 6.11)'], {}), '(e / 6.11)\n', (6196, 6206), True, 'import numpy as np\n')] |
import cv2
import os
import numpy as np
MODEL_PATH= "models"+os.sep+"face_detection"+os.sep+"caffe"+os.sep+"VGG_ILSVRC_19_layers"
PROTO_TXT = MODEL_PATH+os.sep+"deploy.prototxt.txt"
MODEL= MODEL_PATH+os.sep+"VGG_ILSVRC_19_layers.caffemodel"
class Caffe_detector():
def __init__(self):
print("[INFO] loading model... ",MODEL)
self.detector = cv2.dnn.readNetFromCaffe(PROTO_TXT, MODEL)
def detect_faces(self,img):
return self.detector.detect_faces()
def get_faces(self,image_path,crop=False):
image = cv2.imread(image_path)
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
print("[INFO] computing object detections...")
self.detector.setInput(blob)
print("saddas")
detections = self.detector.forward()
print("Detections received now calculating ")
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for the
# object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the bounding box of the face along with the associated
# probability
text = "{:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(image, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output image
cv2.imshow("Output", image)
cv2.waitKey(0)
'''
real_img = cv2.imread(images_path)
img = cv2.cvtColor(real_img, cv2.COLOR_BGR2RGB)
#"img = cv2.imread(images_path)
faces = self.detector.detect_faces(img)
cropped = []
result = {}
boxes = []
for face in faces:
x, y, width, height = face['box']
keypoints = face['keypoints']
cv2.rectangle(real_img,
(x, y),
(x + width, y + height),
(0, 155, 255),
2)
boxes.append((x,y,width,height))
cv2.circle(real_img, (keypoints['left_eye']), 2, (0, 155, 255), 2)
cv2.circle(real_img, (keypoints['right_eye']), 2, (0, 155, 255), 2)
cv2.circle(real_img, (keypoints['nose']), 2, (0, 155, 255), 2)
cv2.circle(real_img, (keypoints['mouth_left']), 2, (0, 155, 255), 2)
cv2.circle(real_img, (keypoints['mouth_right']), 2, (0, 155, 255), 2)
if crop:
# 'box': [x, y, width, height], crop_img = img[y:y+h, x:x+w]
cropped.append( real_img[y:y+height, x:x+width] )
result["original_image"] =real_img
result["cropped"] = cropped
result["boxes"] = boxes
return result
''' | [
"cv2.rectangle",
"cv2.dnn.readNetFromCaffe",
"cv2.imshow",
"cv2.putText",
"numpy.array",
"cv2.resize",
"cv2.waitKey",
"cv2.imread"
] | [((361, 403), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['PROTO_TXT', 'MODEL'], {}), '(PROTO_TXT, MODEL)\n', (385, 403), False, 'import cv2\n'), ((543, 565), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (553, 565), False, 'import cv2\n'), ((2073, 2100), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'image'], {}), "('Output', image)\n", (2083, 2100), False, 'import cv2\n'), ((2109, 2123), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2120, 2123), False, 'import cv2\n'), ((636, 665), 'cv2.resize', 'cv2.resize', (['image', '(300, 300)'], {}), '(image, (300, 300))\n', (646, 665), False, 'import cv2\n'), ((1804, 1872), 'cv2.rectangle', 'cv2.rectangle', (['image', '(startX, startY)', '(endX, endY)', '(0, 0, 255)', '(2)'], {}), '(image, (startX, startY), (endX, endY), (0, 0, 255), 2)\n', (1817, 1872), False, 'import cv2\n'), ((1919, 2008), 'cv2.putText', 'cv2.putText', (['image', 'text', '(startX, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.45)', '(0, 0, 255)', '(2)'], {}), '(image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0,\n 255), 2)\n', (1930, 2008), False, 'import cv2\n'), ((1464, 1486), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (1472, 1486), True, 'import numpy as np\n')] |
from src.problems.problem import Problem
from dataclasses import dataclass
import numpy as np
class Minimization2DProblem(Problem):
x_min: int = -6
y_min: int = -6
x_max: int = 6
y_max: int = 6
vtr: float
def is_solved(self, best_solution: np.ndarray):
return self.evaluate(best_solution) - self.vtr <= 10e-6
def get_ndim(self):
return 2
def get_lowerbound(self) -> np.ndarray:
return np.array([self.x_min, self.y_min])
def get_upperbound(self) -> np.ndarray:
return np.array([self.x_max, self.y_max])
@dataclass
class ParabolicRidge(Minimization2DProblem):
vtr: float = 0.0
def fitness(self, solution: np.ndarray) -> np.ndarray:
return -1 * solution[:, 0] + 100 * np.sum(solution[:, 0:] ** 2, axis=1)
@dataclass
class RosenBrock2D(Minimization2DProblem):
vtr: float = 0.0
def fitness(self, solution: np.ndarray) -> np.ndarray:
return (1 - solution[:, 0]) ** 2 + ((solution[:, 1] - solution[:, 0] ** 2) ** 2) * 100.
@dataclass
class DeExample(Minimization2DProblem):
vtr: float = 0.0
def fitness(self, solution: np.ndarray) -> np.ndarray:
return 3 * (1 - solution[:, 0]) ** 2 * np.exp(-1 * solution[:, 0]**2 - (solution[:, 1] + 1)**2) \
- 10 * (1/5 * solution[:, 0] - solution[:, 0]**3 - solution[:, 1]**5) * np.exp(-1 * solution[:, 0]**2 -1 * solution[:, 1]**2) \
- 1/3 * np.exp(-1 * (solution[:, 0] + 1) ** 2 - solution[:, 1] ** 2) | [
"numpy.exp",
"numpy.array",
"numpy.sum"
] | [((471, 505), 'numpy.array', 'np.array', (['[self.x_min, self.y_min]'], {}), '([self.x_min, self.y_min])\n', (479, 505), True, 'import numpy as np\n'), ((573, 607), 'numpy.array', 'np.array', (['[self.x_max, self.y_max]'], {}), '([self.x_max, self.y_max])\n', (581, 607), True, 'import numpy as np\n'), ((796, 832), 'numpy.sum', 'np.sum', (['(solution[:, 0:] ** 2)'], {'axis': '(1)'}), '(solution[:, 0:] ** 2, axis=1)\n', (802, 832), True, 'import numpy as np\n'), ((1472, 1532), 'numpy.exp', 'np.exp', (['(-1 * (solution[:, 0] + 1) ** 2 - solution[:, 1] ** 2)'], {}), '(-1 * (solution[:, 0] + 1) ** 2 - solution[:, 1] ** 2)\n', (1478, 1532), True, 'import numpy as np\n'), ((1259, 1319), 'numpy.exp', 'np.exp', (['(-1 * solution[:, 0] ** 2 - (solution[:, 1] + 1) ** 2)'], {}), '(-1 * solution[:, 0] ** 2 - (solution[:, 1] + 1) ** 2)\n', (1265, 1319), True, 'import numpy as np\n'), ((1399, 1457), 'numpy.exp', 'np.exp', (['(-1 * solution[:, 0] ** 2 - 1 * solution[:, 1] ** 2)'], {}), '(-1 * solution[:, 0] ** 2 - 1 * solution[:, 1] ** 2)\n', (1405, 1457), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
###
# Name: <NAME>
# Student ID: 0022707716
# Email: <EMAIL>
# Course: PHYS/CPSC/MATH 220 Fall 2018
# Assignment: CW 12
###
import numpy as np
import sombrero as sb
def test_sombrero():
"""
Checks if the first 3 and last 3 values of a known function are correct.
"""
knownStartx = np.array([-0.9, -0.9, -0.89999998])
knownStarty = np.array([0.00000000e+00, 8.99884295e-06, 1.79952436e-05])
knownEndx = np.array([-0.81623219, -0.81619339, -0.8161547 ])
knownEndy = np.array([0.03885052, 0.03874837, 0.03864621])
testVals = sb.sombrero(0.18, -0.9, 0, 50)
assert np.allclose(testVals[0,0:3], knownStartx) and np.allclose(testVals[1,0:3], knownStarty) and np.allclose(testVals[0,-4:-1], knownEndx) and np.allclose(testVals[1,-4:-1], knownEndy) | [
"numpy.array",
"sombrero.sombrero",
"numpy.allclose"
] | [((348, 383), 'numpy.array', 'np.array', (['[-0.9, -0.9, -0.89999998]'], {}), '([-0.9, -0.9, -0.89999998])\n', (356, 383), True, 'import numpy as np\n'), ((402, 449), 'numpy.array', 'np.array', (['[0.0, 8.99884295e-06, 1.79952436e-05]'], {}), '([0.0, 8.99884295e-06, 1.79952436e-05])\n', (410, 449), True, 'import numpy as np\n'), ((477, 525), 'numpy.array', 'np.array', (['[-0.81623219, -0.81619339, -0.8161547]'], {}), '([-0.81623219, -0.81619339, -0.8161547])\n', (485, 525), True, 'import numpy as np\n'), ((543, 589), 'numpy.array', 'np.array', (['[0.03885052, 0.03874837, 0.03864621]'], {}), '([0.03885052, 0.03874837, 0.03864621])\n', (551, 589), True, 'import numpy as np\n'), ((605, 635), 'sombrero.sombrero', 'sb.sombrero', (['(0.18)', '(-0.9)', '(0)', '(50)'], {}), '(0.18, -0.9, 0, 50)\n', (616, 635), True, 'import sombrero as sb\n'), ((647, 689), 'numpy.allclose', 'np.allclose', (['testVals[0, 0:3]', 'knownStartx'], {}), '(testVals[0, 0:3], knownStartx)\n', (658, 689), True, 'import numpy as np\n'), ((693, 735), 'numpy.allclose', 'np.allclose', (['testVals[1, 0:3]', 'knownStarty'], {}), '(testVals[1, 0:3], knownStarty)\n', (704, 735), True, 'import numpy as np\n'), ((739, 781), 'numpy.allclose', 'np.allclose', (['testVals[0, -4:-1]', 'knownEndx'], {}), '(testVals[0, -4:-1], knownEndx)\n', (750, 781), True, 'import numpy as np\n'), ((785, 827), 'numpy.allclose', 'np.allclose', (['testVals[1, -4:-1]', 'knownEndy'], {}), '(testVals[1, -4:-1], knownEndy)\n', (796, 827), True, 'import numpy as np\n')] |
import numpy as np
from simforest.criterion import find_split_variance, find_split_theil, find_split_atkinson, find_split_index_gini
from sklearn.preprocessing import LabelEncoder
def find_split(X, y, p, q, criterion, sim_function, gamma=None):
""" Find split among direction drew on pair of data-points
Parameters
----------
X : all data-points
y : output vector
p : first data-point used for drawing direction of the split
q : second data-point used for drawing direction of the split
criterion : criterion, criterion to be minimized when finding for optimal split
sim_function : function used to measure similarity between data-points
Returns
-------
impurity : impurity induced by the split (value of criterion function)
split_point : split threshold
similarities : array of shape (n_samples,), values of similarity-values based projection
"""
similarities = sim_function(X, p, q, gamma)
indices = sorted([i for i in range(len(y)) if not np.isnan(similarities[i])],
key=lambda x: similarities[x])
y = y[indices]
n = len(y)
if criterion == 'variance':
i, impurity = find_split_variance(y.astype(np.float32),
similarities[indices].astype(np.float32),
np.int32(n - 1))
elif criterion == 'theil':
i, impurity = find_split_theil(y[indices].astype(np.float32),
similarities[indices].astype(np.float32),
np.int32(n - 1))
elif criterion == 'atkinson':
i, impurity = find_split_atkinson(y[indices].astype(np.float32),
similarities[indices].astype(np.float32),
np.int32(n - 1))
elif criterion == 'gini':
if y.dtype != int:
encoder = LabelEncoder()
y = encoder.fit_transform(y)
y = np.array(y[indices], dtype=np.int32)
classes = np.unique(y).astype(np.int32)
i, impurity = find_split_index_gini(y[indices], np.int32(n - 1), classes)
split_point = (similarities[indices[i]] + similarities[indices[i + 1]]) / 2
return impurity, split_point, similarities
| [
"sklearn.preprocessing.LabelEncoder",
"numpy.unique",
"numpy.int32",
"numpy.array",
"numpy.isnan"
] | [((1438, 1453), 'numpy.int32', 'np.int32', (['(n - 1)'], {}), '(n - 1)\n', (1446, 1453), True, 'import numpy as np\n'), ((1677, 1692), 'numpy.int32', 'np.int32', (['(n - 1)'], {}), '(n - 1)\n', (1685, 1692), True, 'import numpy as np\n'), ((1101, 1126), 'numpy.isnan', 'np.isnan', (['similarities[i]'], {}), '(similarities[i])\n', (1109, 1126), True, 'import numpy as np\n'), ((1928, 1943), 'numpy.int32', 'np.int32', (['(n - 1)'], {}), '(n - 1)\n', (1936, 1943), True, 'import numpy as np\n'), ((2094, 2130), 'numpy.array', 'np.array', (['y[indices]'], {'dtype': 'np.int32'}), '(y[indices], dtype=np.int32)\n', (2102, 2130), True, 'import numpy as np\n'), ((2025, 2039), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2037, 2039), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2236, 2251), 'numpy.int32', 'np.int32', (['(n - 1)'], {}), '(n - 1)\n', (2244, 2251), True, 'import numpy as np\n'), ((2149, 2161), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2158, 2161), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
plt.figure(1)
plt.clf()
plt.axis([-10, 10, -10, 10])
# Define properties of the "bouncing balls"
n = 10
pos = (20 * np.random.sample(n*2) - 10).reshape(n, 2)
vel = (0.3 * np.random.normal(size=n*2)).reshape(n, 2)
sizes = 100 * np.random.sample(n) + 100
# Colors where each row is (Red, Green, Blue, Alpha). Each can go
# from 0 to 1. Alpha is the transparency.
colors = np.random.sample([n, 4])
# Draw all the circles and return an object ``circles`` that allows
# manipulation of the plotted circles.
circles = plt.scatter(pos[:,0], pos[:,1], marker='o', s=sizes, c=colors)
for i in range(100):
pos = pos + vel
bounce = abs(pos) > 10 # Find balls that are outside walls
vel[bounce] = -vel[bounce] # Bounce if outside the walls
circles.set_offsets(pos) # Change the positions
plt.pause(0.05)
| [
"numpy.random.normal",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"numpy.random.sample",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.axis"
] | [((52, 65), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (62, 65), True, 'import matplotlib.pyplot as plt\n'), ((66, 75), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (73, 75), True, 'import matplotlib.pyplot as plt\n'), ((76, 104), 'matplotlib.pyplot.axis', 'plt.axis', (['[-10, 10, -10, 10]'], {}), '([-10, 10, -10, 10])\n', (84, 104), True, 'import matplotlib.pyplot as plt\n'), ((426, 450), 'numpy.random.sample', 'np.random.sample', (['[n, 4]'], {}), '([n, 4])\n', (442, 450), True, 'import numpy as np\n'), ((569, 633), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pos[:, 0]', 'pos[:, 1]'], {'marker': '"""o"""', 's': 'sizes', 'c': 'colors'}), "(pos[:, 0], pos[:, 1], marker='o', s=sizes, c=colors)\n", (580, 633), True, 'import matplotlib.pyplot as plt\n'), ((863, 878), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.05)'], {}), '(0.05)\n', (872, 878), True, 'import matplotlib.pyplot as plt\n'), ((280, 299), 'numpy.random.sample', 'np.random.sample', (['n'], {}), '(n)\n', (296, 299), True, 'import numpy as np\n'), ((224, 252), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n * 2)'}), '(size=n * 2)\n', (240, 252), True, 'import numpy as np\n'), ((169, 192), 'numpy.random.sample', 'np.random.sample', (['(n * 2)'], {}), '(n * 2)\n', (185, 192), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from scripts import loss_funcs
from scipy.stats import norm as scipy_normal
# Grab some normal points
distribution = loss_funcs.NormalPDFLoss()
hello = {}
to_try = np.asarray([1500] * 1)
#colours = ['r', 'g', 'b', 'm', 'c', 'y']
mean = [0.0, 0.1, 0.2, 0.3, 0.4]
mean = [0.0, 0.0, 0.0, 0.0, 0.0]
std_d = [1.0, 1.1, 1.2, 1.3, 1.4]
colors = ['r', 'g', 'c', 'b', 'm']
means = np.zeros((len(mean), to_try.size))
for stage_i, (a_mean, a_std_d, a_color) in enumerate(zip(mean, std_d, colors)):
for try_i, i in enumerate(to_try):
random_variables = scipy_normal.rvs(size=i, loc=0., scale=1.)
# Evaluate the cdf at each random deviate and sort the array
cdfs = scipy_normal.cdf(random_variables, loc=0.0, scale=a_std_d)
cdfs = cdfs[np.where(np.logical_and(cdfs > a_mean, cdfs < 1 - a_mean))[0]]
# Make it median-invariant
#cdfs = np.abs(cdfs - 0.5)
cdfs_sorted = np.sort(cdfs)
# Extend the cdfs and take means
#cdfs[:] = 0.5
#np.random.shuffle(cdfs)
#cdfs = np.mean(cdfs.reshape(random_variables.shape[0], -1), axis=1)
# Cumulative sum and normalise so last element is 1.0
cdfs_summed = np.cumsum(cdfs)
cdfs_summed /= 0.5 * i
# Get expected points
cdfs_expected = np.linspace(0., 1., num=cdfs_sorted.size)
cdfs_summed = cdfs_expected
cdfs_summed = np.log(np.cosh(cdfs_summed - cdfs_sorted))
# Plot shiz
plt.plot(cdfs_sorted, cdfs_summed, '-', lw=1, ms=2, color=a_color, alpha=0.1)
boop = np.max(cdfs_summed)
#plt.plot([0, 1], [boop, boop], '--', color=c, label='mean^2 of {}'.format(i))
means[stage_i, try_i] = boop
boop = np.mean(means[stage_i, :])
plt.plot([0, 1], [boop, boop], '--', color=a_color, label='mean^2 of {}, {}'.format(a_mean, a_std_d))
#plt.plot([0, 1], [0, 0], 'k--')
plt.legend()
plt.xlabel('ci')
plt.ylabel('(F(ci) - ci)^2')
#plt.title('CDF stat for mean {}, std_d {} of model'.format(mean, std_d))
plt.show()
print("Std deviation in means: {}".format(np.std(means, axis=1)))
| [
"numpy.mean",
"numpy.logical_and",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.sort",
"numpy.asarray",
"matplotlib.pyplot.plot",
"scipy.stats.norm.rvs",
"numpy.max",
"scripts.loss_funcs.NormalPDFLoss",
"numpy.linspace",
"numpy.cosh",
"numpy.std",
"numpy.cumsum",
"scipy... | [((169, 195), 'scripts.loss_funcs.NormalPDFLoss', 'loss_funcs.NormalPDFLoss', ([], {}), '()\n', (193, 195), False, 'from scripts import loss_funcs\n'), ((217, 239), 'numpy.asarray', 'np.asarray', (['([1500] * 1)'], {}), '([1500] * 1)\n', (227, 239), True, 'import numpy as np\n'), ((1940, 1952), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1950, 1952), True, 'import matplotlib.pyplot as plt\n'), ((1953, 1969), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ci"""'], {}), "('ci')\n", (1963, 1969), True, 'import matplotlib.pyplot as plt\n'), ((1970, 1998), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""(F(ci) - ci)^2"""'], {}), "('(F(ci) - ci)^2')\n", (1980, 1998), True, 'import matplotlib.pyplot as plt\n'), ((2073, 2083), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2081, 2083), True, 'import matplotlib.pyplot as plt\n'), ((1772, 1798), 'numpy.mean', 'np.mean', (['means[stage_i, :]'], {}), '(means[stage_i, :])\n', (1779, 1798), True, 'import numpy as np\n'), ((609, 653), 'scipy.stats.norm.rvs', 'scipy_normal.rvs', ([], {'size': 'i', 'loc': '(0.0)', 'scale': '(1.0)'}), '(size=i, loc=0.0, scale=1.0)\n', (625, 653), True, 'from scipy.stats import norm as scipy_normal\n'), ((737, 795), 'scipy.stats.norm.cdf', 'scipy_normal.cdf', (['random_variables'], {'loc': '(0.0)', 'scale': 'a_std_d'}), '(random_variables, loc=0.0, scale=a_std_d)\n', (753, 795), True, 'from scipy.stats import norm as scipy_normal\n'), ((973, 986), 'numpy.sort', 'np.sort', (['cdfs'], {}), '(cdfs)\n', (980, 986), True, 'import numpy as np\n'), ((1247, 1262), 'numpy.cumsum', 'np.cumsum', (['cdfs'], {}), '(cdfs)\n', (1256, 1262), True, 'import numpy as np\n'), ((1349, 1392), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': 'cdfs_sorted.size'}), '(0.0, 1.0, num=cdfs_sorted.size)\n', (1360, 1392), True, 'import numpy as np\n'), ((1522, 1599), 'matplotlib.pyplot.plot', 'plt.plot', (['cdfs_sorted', 'cdfs_summed', '"""-"""'], {'lw': '(1)', 'ms': '(2)', 'color': 'a_color', 'alpha': '(0.1)'}), "(cdfs_sorted, cdfs_summed, '-', lw=1, ms=2, color=a_color, alpha=0.1)\n", (1530, 1599), True, 'import matplotlib.pyplot as plt\n'), ((1615, 1634), 'numpy.max', 'np.max', (['cdfs_summed'], {}), '(cdfs_summed)\n', (1621, 1634), True, 'import numpy as np\n'), ((2127, 2148), 'numpy.std', 'np.std', (['means'], {'axis': '(1)'}), '(means, axis=1)\n', (2133, 2148), True, 'import numpy as np\n'), ((1457, 1491), 'numpy.cosh', 'np.cosh', (['(cdfs_summed - cdfs_sorted)'], {}), '(cdfs_summed - cdfs_sorted)\n', (1464, 1491), True, 'import numpy as np\n'), ((826, 874), 'numpy.logical_and', 'np.logical_and', (['(cdfs > a_mean)', '(cdfs < 1 - a_mean)'], {}), '(cdfs > a_mean, cdfs < 1 - a_mean)\n', (840, 874), True, 'import numpy as np\n')] |
import os
import sys
import cv2
import csv
import time
import torch
import torchvision
import numpy as np
# import busio
# from board import SCL
# from board import SDA
from uuid import uuid1
# from adafruit_motor import servo
# from adafruit_motor import motor
# from adafruit_pca9685 import PCA9685
from controller import PS4Controller
from controller import get_button_command
from gpio_controller.ServoController import ServoController
import camera
import neural_network
class Autocar:
def __init__(self):
# init i2c
# i2c = busio.I2C(SCL, SDA)
# init PCA
# self.pca = PCA9685(i2c)
# self.pca.frequency = 50
self.speed = 0
self.theta = 0
self.oldSpeed = 0
self.oldTheta = 0
self.max_speed = 30
self.max_theta = 90
self.servo_controller = ServoController()
self.servo_controller.init()
# self.servo_steer = servo.Servo(self.pca.channels[0])
# self.esc = servo.ContinuousServo(self.pca.channels[1])
# init model
model = neural_network.Net()
self.model = model.eval()
self.model.load_state_dict(torch.load('model/autopilot.pt'))
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
self.model.to(self.device)
# init vars
self.temp = 0
mean = 255.0 * np.array([0.485, 0.456, 0.406])
stdev = 255.0 * np.array([0.229, 0.224, 0.225])
self.normalize = torchvision.transforms.Normalize(mean, stdev)
self.angle_out = 0
# init Camera
self.cam = camera.Camera()
# initial content
curr_dir = os.getcwd()
control_data = os.path.join(curr_dir, 'control_data.csv')
if os.path.exists(control_data):
with open(control_data, 'w') as f:
f.write('date,steering,speed\n')
def scale_servo(self, x):
"""used to scale -1,1 to 0,180"""
return round((30-70)*x+1/1+1+70, 2)
def scale_esc(self, x):
"""used to scale -1,1 to 0,180"""
return round((x+1)/12, 2)
def drive(self, axis_data):
# self.servo_steer.angle = self.scale_servo(-axis_data[0])
# sum_inputs = round(-self.scale_esc(axis_data[4]) +
# self.scale_esc(axis_data[3]), 2)
# self.esc.throttle = sum_inputs
brake = False
raw_speed = axis_data[1]
raw_theta = axis_data[2]
self.speed = np.interp(-raw_speed, (-1, 1),
(-self.max_speed, self.max_speed))
self.theta = np.interp(raw_theta, (-1, 1),
(-self.max_theta, self.max_theta))
if ((self.oldSpeed != self.speed) or (self.oldTheta != self.theta)):
self.oldSpeed = self.speed
self.oldTheta = self.theta
self.speed = self.servo_controller.set_speed(self.speed)
self.theta = self.servo_controller.set_steer(self.theta)
if brake:
self.speed = self.servo_controller.brake()
self.oldSpeed = self.speed
time.sleep(0.5)
def save_data(self, axis_data):
raw_speed = axis_data[1]
raw_theta = axis_data[2]
count = self.cam.count
img = self.cam.value
if count != self.temp:
num = uuid1()
cv2.imwrite('images/' + str(num) + '.jpg', img)
# append inputs to csv
with open('control_data.csv', 'a', newline='') as f:
writer = csv.writer(f)
# writer.writerow([num, axis_data[0], axis_data[4]])
writer.writerow([num, raw_theta, raw_speed])
self.temp = count
print('Save data!')
else:
pass
def preprocess(self, camera_value):
x = camera_value
x = cv2.resize(x, (224, 224))
x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
x = x.transpose((2, 0, 1))
x = torch.from_numpy(x).float()
x = self.normalize(x)
x = x.to(self.device)
x = x[None, ...]
return x
def autopilot(self):
img = self.preprocess(self.cam.value)
count = self.cam.count
if count != self.temp:
print('RUN!')
self.model.eval()
with torch.no_grad():
output = self.model(img)
# outnump = output.cpu().data.numpy()
outnump = output.cpu().numpy()
if outnump >= 1:
self.angle_out = [[1]]
elif outnump <= -1:
self.angle_out = [[-1]]
else:
self.angle_out = outnump
print(self.angle_out[0][0])
self.temp = count
else:
pass
# self.drive({0: self.angle_out[0][0],
# 1: 0.0, 2: 0.0, 3: -1.0, 4: 1, 5: 0.0})
self.drive({1: 1, # speed
2: self.angle_out[0][0]}) # steering
if __name__ == "__main__":
car = Autocar()
# Initialize controller
ps4_controller = PS4Controller()
ps4_controller.start()
# Start in training mode
train = True
trig = True
def toggle(x):
return not x
try:
while True:
button_data, axis_data, _ = ps4_controller.read()
if button_data[0] == True and trig:
# Stop training
train = toggle(train)
trig = False
elif button_data[0] == False:
trig = True
if train:
car.drive(axis_data)
if axis_data[4] >= 0.12:
car.save_data(axis_data)
else:
print('Not saving img')
else:
car.autopilot()
except KeyboardInterrupt:
car.pca.deinit()
sys.exit(0)
| [
"time.sleep",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"sys.exit",
"os.path.exists",
"neural_network.Net",
"csv.writer",
"uuid.uuid1",
"torchvision.transforms.Normalize",
"numpy.interp",
"controller.PS4Controller",
"cv2.cvtColor",
"cv2.resize",
"torch.load",
"camer... | [((5104, 5119), 'controller.PS4Controller', 'PS4Controller', ([], {}), '()\n', (5117, 5119), False, 'from controller import PS4Controller\n'), ((847, 864), 'gpio_controller.ServoController.ServoController', 'ServoController', ([], {}), '()\n', (862, 864), False, 'from gpio_controller.ServoController import ServoController\n'), ((1069, 1089), 'neural_network.Net', 'neural_network.Net', ([], {}), '()\n', (1087, 1089), False, 'import neural_network\n'), ((1503, 1548), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['mean', 'stdev'], {}), '(mean, stdev)\n', (1535, 1548), False, 'import torchvision\n'), ((1618, 1633), 'camera.Camera', 'camera.Camera', ([], {}), '()\n', (1631, 1633), False, 'import camera\n'), ((1680, 1691), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1689, 1691), False, 'import os\n'), ((1715, 1757), 'os.path.join', 'os.path.join', (['curr_dir', '"""control_data.csv"""'], {}), "(curr_dir, 'control_data.csv')\n", (1727, 1757), False, 'import os\n'), ((1769, 1797), 'os.path.exists', 'os.path.exists', (['control_data'], {}), '(control_data)\n', (1783, 1797), False, 'import os\n'), ((2492, 2557), 'numpy.interp', 'np.interp', (['(-raw_speed)', '(-1, 1)', '(-self.max_speed, self.max_speed)'], {}), '(-raw_speed, (-1, 1), (-self.max_speed, self.max_speed))\n', (2501, 2557), True, 'import numpy as np\n'), ((2610, 2674), 'numpy.interp', 'np.interp', (['raw_theta', '(-1, 1)', '(-self.max_theta, self.max_theta)'], {}), '(raw_theta, (-1, 1), (-self.max_theta, self.max_theta))\n', (2619, 2674), True, 'import numpy as np\n'), ((3865, 3890), 'cv2.resize', 'cv2.resize', (['x', '(224, 224)'], {}), '(x, (224, 224))\n', (3875, 3890), False, 'import cv2\n'), ((3903, 3937), 'cv2.cvtColor', 'cv2.cvtColor', (['x', 'cv2.COLOR_BGR2RGB'], {}), '(x, cv2.COLOR_BGR2RGB)\n', (3915, 3937), False, 'import cv2\n'), ((1159, 1191), 'torch.load', 'torch.load', (['"""model/autopilot.pt"""'], {}), "('model/autopilot.pt')\n", (1169, 1191), False, 'import torch\n'), ((1390, 1421), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1398, 1421), True, 'import numpy as np\n'), ((1446, 1477), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1454, 1477), True, 'import numpy as np\n'), ((3126, 3141), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3136, 3141), False, 'import time\n'), ((3355, 3362), 'uuid.uuid1', 'uuid1', ([], {}), '()\n', (3360, 3362), False, 'from uuid import uuid1\n'), ((5889, 5900), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5897, 5900), False, 'import sys\n'), ((1251, 1276), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1274, 1276), False, 'import torch\n'), ((3549, 3562), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3559, 3562), False, 'import csv\n'), ((3985, 4004), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (4001, 4004), False, 'import torch\n'), ((4324, 4339), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4337, 4339), False, 'import torch\n')] |
#!/usr/bin/env python
from load import ROOT as R
from matplotlib import pyplot as plt
import numpy as N
from matplotlib.ticker import MaxNLocator
import gna.constructors as C
from gna.bindings import DataType
from gna.unittest import *
from gna import env
from gna import context
# @floatcopy(globals()) # uncomment after porting the histogram
def test_histedges_linear_01():
k, b = 1.5, 0.5
size = 10
edges0 = N.arange(size, dtype='d')
edges1 = k*edges0+b
hist_in = C.Histogram(edges0)
hist_out = C.HistEdgesLinear(hist_in, k, b);
out0 = N.array(hist_in.hist.hist.datatype().edges)
out1 = N.array(hist_out.histedges.hist.datatype().edges)
print(' Edges0 (expected)', edges0)
print(' Edges0', out0)
print(' Edges1 (expected)', edges1)
print(' Edges1', out1)
assert (edges0-out0 == 0.0).all()
assert (edges1-out1 == 0.0).all()
if __name__ == "__main__":
run_unittests(globals())
| [
"gna.constructors.Histogram",
"numpy.arange",
"gna.constructors.HistEdgesLinear"
] | [((425, 450), 'numpy.arange', 'N.arange', (['size'], {'dtype': '"""d"""'}), "(size, dtype='d')\n", (433, 450), True, 'import numpy as N\n'), ((491, 510), 'gna.constructors.Histogram', 'C.Histogram', (['edges0'], {}), '(edges0)\n', (502, 510), True, 'import gna.constructors as C\n'), ((526, 558), 'gna.constructors.HistEdgesLinear', 'C.HistEdgesLinear', (['hist_in', 'k', 'b'], {}), '(hist_in, k, b)\n', (543, 558), True, 'import gna.constructors as C\n')] |
import numpy as np
import scipy.sparse as sp
from pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix import get2DMatrix, getBCHorizontal, \
get2DUpwindMatrix
def getBoussinesq2DUpwindMatrix(N, dx, u_adv, order):
Dx = get2DUpwindMatrix(N, dx, order)
# Note: In the equations it is u_t + u_adv* D_x u = ... so in order to comply with the form u_t = M u,
# add a minus sign in front of u_adv
Zero = np.zeros((N[0] * N[1], N[0] * N[1]))
M1 = sp.hstack((-u_adv * Dx, Zero, Zero, Zero), format="csr")
M2 = sp.hstack((Zero, -u_adv * Dx, Zero, Zero), format="csr")
M3 = sp.hstack((Zero, Zero, -u_adv * Dx, Zero), format="csr")
M4 = sp.hstack((Zero, Zero, Zero, -u_adv * Dx), format="csr")
M = sp.vstack((M1, M2, M3, M4), format="csr")
return sp.csc_matrix(M)
def getBoussinesq2DMatrix(N, h, bc_hor, bc_ver, c_s, Nfreq, order):
Dx_u, Dz_u = get2DMatrix(N, h, bc_hor[0], bc_ver[0], order)
Dx_w, Dz_w = get2DMatrix(N, h, bc_hor[1], bc_ver[1], order)
# Dx_b, Dz_b = get2DMatrix(N, h, bc_hor[2], bc_ver[2], order)
Dx_p, Dz_p = get2DMatrix(N, h, bc_hor[3], bc_ver[3], order)
# Id_N = sp.eye(N[0] * N[1])
Zero = np.zeros((N[0] * N[1], N[0] * N[1]))
Id_w = sp.eye(N[0] * N[1])
# Note: Bring all terms to right hand side, therefore a couple of minus signs
# are needed
M1 = sp.hstack((Zero, Zero, Zero, -Dx_p), format="csr")
M2 = sp.hstack((Zero, Zero, Id_w, -Dz_p), format="csr")
M3 = sp.hstack((Zero, -Nfreq ** 2 * Id_w, Zero, Zero), format="csr")
M4 = sp.hstack((-c_s ** 2 * Dx_u, -c_s ** 2 * Dz_w, Zero, Zero), format="csr")
M = sp.vstack((M1, M2, M3, M4), format="csr")
Id = sp.eye(4 * N[0] * N[1])
return sp.csc_matrix(Id), sp.csc_matrix(M)
def getBoussinesqBCHorizontal(value, N, dx, bc_hor):
bu_left, bu_right = getBCHorizontal(value[0], N, dx, bc_hor[0])
bw_left, bw_right = getBCHorizontal(value[1], N, dx, bc_hor[1])
# bb_left, bb_right = getBCHorizontal(value[2], N, dx, bc_hor[2])
bp_left, bp_right = getBCHorizontal(value[3], N, dx, bc_hor[3])
b_left = np.concatenate((bp_left, bp_left, bu_left + bw_left))
b_right = np.concatenate((bp_right, bp_right, bu_right + bw_right))
return b_left, b_right
def getBoussinesqBCVertical():
return 0.0
| [
"pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix.get2DUpwindMatrix",
"scipy.sparse.vstack",
"scipy.sparse.eye",
"pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix.get2DMatrix",
"pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix.getBCHori... | [((250, 281), 'pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix.get2DUpwindMatrix', 'get2DUpwindMatrix', (['N', 'dx', 'order'], {}), '(N, dx, order)\n', (267, 281), False, 'from pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix import get2DMatrix, getBCHorizontal, get2DUpwindMatrix\n'), ((443, 479), 'numpy.zeros', 'np.zeros', (['(N[0] * N[1], N[0] * N[1])'], {}), '((N[0] * N[1], N[0] * N[1]))\n', (451, 479), True, 'import numpy as np\n'), ((489, 545), 'scipy.sparse.hstack', 'sp.hstack', (['(-u_adv * Dx, Zero, Zero, Zero)'], {'format': '"""csr"""'}), "((-u_adv * Dx, Zero, Zero, Zero), format='csr')\n", (498, 545), True, 'import scipy.sparse as sp\n'), ((555, 611), 'scipy.sparse.hstack', 'sp.hstack', (['(Zero, -u_adv * Dx, Zero, Zero)'], {'format': '"""csr"""'}), "((Zero, -u_adv * Dx, Zero, Zero), format='csr')\n", (564, 611), True, 'import scipy.sparse as sp\n'), ((621, 677), 'scipy.sparse.hstack', 'sp.hstack', (['(Zero, Zero, -u_adv * Dx, Zero)'], {'format': '"""csr"""'}), "((Zero, Zero, -u_adv * Dx, Zero), format='csr')\n", (630, 677), True, 'import scipy.sparse as sp\n'), ((687, 743), 'scipy.sparse.hstack', 'sp.hstack', (['(Zero, Zero, Zero, -u_adv * Dx)'], {'format': '"""csr"""'}), "((Zero, Zero, Zero, -u_adv * Dx), format='csr')\n", (696, 743), True, 'import scipy.sparse as sp\n'), ((752, 793), 'scipy.sparse.vstack', 'sp.vstack', (['(M1, M2, M3, M4)'], {'format': '"""csr"""'}), "((M1, M2, M3, M4), format='csr')\n", (761, 793), True, 'import scipy.sparse as sp\n'), ((806, 822), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['M'], {}), '(M)\n', (819, 822), True, 'import scipy.sparse as sp\n'), ((910, 956), 'pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix.get2DMatrix', 'get2DMatrix', (['N', 'h', 'bc_hor[0]', 'bc_ver[0]', 'order'], {}), '(N, h, bc_hor[0], bc_ver[0], order)\n', (921, 956), False, 'from pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix import get2DMatrix, getBCHorizontal, get2DUpwindMatrix\n'), ((974, 1020), 'pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix.get2DMatrix', 'get2DMatrix', (['N', 'h', 'bc_hor[1]', 'bc_ver[1]', 'order'], {}), '(N, h, bc_hor[1], bc_ver[1], order)\n', (985, 1020), False, 'from pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix import get2DMatrix, getBCHorizontal, get2DUpwindMatrix\n'), ((1104, 1150), 'pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix.get2DMatrix', 'get2DMatrix', (['N', 'h', 'bc_hor[3]', 'bc_ver[3]', 'order'], {}), '(N, h, bc_hor[3], bc_ver[3], order)\n', (1115, 1150), False, 'from pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix import get2DMatrix, getBCHorizontal, get2DUpwindMatrix\n'), ((1197, 1233), 'numpy.zeros', 'np.zeros', (['(N[0] * N[1], N[0] * N[1])'], {}), '((N[0] * N[1], N[0] * N[1]))\n', (1205, 1233), True, 'import numpy as np\n'), ((1245, 1264), 'scipy.sparse.eye', 'sp.eye', (['(N[0] * N[1])'], {}), '(N[0] * N[1])\n', (1251, 1264), True, 'import scipy.sparse as sp\n'), ((1375, 1425), 'scipy.sparse.hstack', 'sp.hstack', (['(Zero, Zero, Zero, -Dx_p)'], {'format': '"""csr"""'}), "((Zero, Zero, Zero, -Dx_p), format='csr')\n", (1384, 1425), True, 'import scipy.sparse as sp\n'), ((1435, 1485), 'scipy.sparse.hstack', 'sp.hstack', (['(Zero, Zero, Id_w, -Dz_p)'], {'format': '"""csr"""'}), "((Zero, Zero, Id_w, -Dz_p), format='csr')\n", (1444, 1485), True, 'import scipy.sparse as sp\n'), ((1495, 1558), 'scipy.sparse.hstack', 'sp.hstack', (['(Zero, -Nfreq ** 2 * Id_w, Zero, Zero)'], {'format': '"""csr"""'}), "((Zero, -Nfreq ** 2 * Id_w, Zero, Zero), format='csr')\n", (1504, 1558), True, 'import scipy.sparse as sp\n'), ((1568, 1641), 'scipy.sparse.hstack', 'sp.hstack', (['(-c_s ** 2 * Dx_u, -c_s ** 2 * Dz_w, Zero, Zero)'], {'format': '"""csr"""'}), "((-c_s ** 2 * Dx_u, -c_s ** 2 * Dz_w, Zero, Zero), format='csr')\n", (1577, 1641), True, 'import scipy.sparse as sp\n'), ((1650, 1691), 'scipy.sparse.vstack', 'sp.vstack', (['(M1, M2, M3, M4)'], {'format': '"""csr"""'}), "((M1, M2, M3, M4), format='csr')\n", (1659, 1691), True, 'import scipy.sparse as sp\n'), ((1702, 1725), 'scipy.sparse.eye', 'sp.eye', (['(4 * N[0] * N[1])'], {}), '(4 * N[0] * N[1])\n', (1708, 1725), True, 'import scipy.sparse as sp\n'), ((1853, 1896), 'pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix.getBCHorizontal', 'getBCHorizontal', (['value[0]', 'N', 'dx', 'bc_hor[0]'], {}), '(value[0], N, dx, bc_hor[0])\n', (1868, 1896), False, 'from pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix import get2DMatrix, getBCHorizontal, get2DUpwindMatrix\n'), ((1921, 1964), 'pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix.getBCHorizontal', 'getBCHorizontal', (['value[1]', 'N', 'dx', 'bc_hor[1]'], {}), '(value[1], N, dx, bc_hor[1])\n', (1936, 1964), False, 'from pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix import get2DMatrix, getBCHorizontal, get2DUpwindMatrix\n'), ((2059, 2102), 'pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix.getBCHorizontal', 'getBCHorizontal', (['value[3]', 'N', 'dx', 'bc_hor[3]'], {}), '(value[3], N, dx, bc_hor[3])\n', (2074, 2102), False, 'from pySDC.implementations.problem_classes.boussinesq_helpers.build2DFDMatrix import get2DMatrix, getBCHorizontal, get2DUpwindMatrix\n'), ((2117, 2170), 'numpy.concatenate', 'np.concatenate', (['(bp_left, bp_left, bu_left + bw_left)'], {}), '((bp_left, bp_left, bu_left + bw_left))\n', (2131, 2170), True, 'import numpy as np\n'), ((2185, 2242), 'numpy.concatenate', 'np.concatenate', (['(bp_right, bp_right, bu_right + bw_right)'], {}), '((bp_right, bp_right, bu_right + bw_right))\n', (2199, 2242), True, 'import numpy as np\n'), ((1738, 1755), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['Id'], {}), '(Id)\n', (1751, 1755), True, 'import scipy.sparse as sp\n'), ((1757, 1773), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['M'], {}), '(M)\n', (1770, 1773), True, 'import scipy.sparse as sp\n')] |
import pickle
import numpy as np
import pandas as pd
from src.src_vvCV_MDMP.vv_CV_MDMP import *
from South_Function.South_function_trainer import *
##
# Example for vv_CV_MDMP
def my_func_1(X):
return 1 + X+ X**2 + torch.sin(X * math.pi) * torch.exp(-1.* X.pow(2))
def my_func_2(X):
return 1.5 + X+ 1.5*(X**2) + 1.75*torch.sin(X * math.pi) * torch.exp(-1.* X.pow(2))
## Varying one of distributions -- check the effect of the closeness of target distributions
mu_1_sit0 = torch.zeros(1,1)
cov_1_sit0= torch.eye(1)
mu_2_sit0 = torch.zeros(1,1)
cov_2_sit0= torch.eye(1)
means_tuple_sit0 = (mu_1_sit0, mu_2_sit0)
covs_tuple_sit0 = (cov_1_sit0, cov_2_sit0)
mu_1_sit1 = torch.zeros(1,1)
cov_1_sit1= torch.eye(1)
mu_2_sit1 = torch.zeros(1,1)
cov_2_sit1= torch.eye(1) * 1.1
means_tuple_sit1 = (mu_1_sit1, mu_2_sit1)
covs_tuple_sit1 = (cov_1_sit1, cov_2_sit1)
mu_1_sit2 = torch.zeros(1,1)
cov_1_sit2= torch.eye(1)
mu_2_sit2 = torch.zeros(1,1)
cov_2_sit2= torch.eye(1) * 1.15
means_tuple_sit2 = (mu_1_sit2, mu_2_sit2)
covs_tuple_sit2 = (cov_1_sit2, cov_2_sit2)
mu_1_sit3 = torch.zeros(1,1)
cov_1_sit3= torch.eye(1)
mu_2_sit3 = torch.zeros(1,1)
cov_2_sit3= torch.eye(1) * 1.2
means_tuple_sit3 = (mu_1_sit3, mu_2_sit3)
covs_tuple_sit3 = (cov_1_sit3, cov_2_sit3)
mu_1_sit4 = torch.zeros(1,1)
cov_1_sit4= torch.eye(1)
mu_2_sit4 = torch.zeros(1,1)
cov_2_sit4= torch.eye(1) * 1.25
means_tuple_sit4 = (mu_1_sit4, mu_2_sit4)
covs_tuple_sit4 = (cov_1_sit4, cov_2_sit4)
tuple_of_meanscovstuple = ((means_tuple_sit0, covs_tuple_sit0), (means_tuple_sit1, covs_tuple_sit1),(means_tuple_sit2, covs_tuple_sit2), (means_tuple_sit3, covs_tuple_sit3), (means_tuple_sit4, covs_tuple_sit4))
#
true_vals = torch.Tensor([[2, 3],[2, 3.15], [2, 3.225], [2, 3.3], [2, 3.375]])
true_vals.size() # 2
true_vals[0].size()
# Initialize the class
no_replica = 100
set_of_ss = 50
no_sets = 5
my_example = toy_example_MDMP(funcs= (my_func_1, my_func_2), sample_size_per_dist = set_of_ss, num_rep = no_replica, \
vv_CV_model=VV_CV_vectorvaluedfuncs_model_MDMP, \
vv_CV_obj = penalized_ls_objective_vectorvaluedfunc_MDMP, \
prior_kernel = stein_matrix_valued_kernel , base_kernel=rbf_kernel, \
batch_size_tune = 5, flag_if_use_medianheuristic=False, beta_cstkernel=0, lr_tune=0.05,\
epochs_tune=30, verbose_tune=False, \
regularizer_const = 1e-3, regularizer_const_FB=1, batch_size=5, lr=1e-3, epochs=400, \
verbose=False)
# Run the algorithm and save outputs
MyvvCV_ests, MysvCV_ests, MysvCV_closed_form_sols = my_example.varying_distrbutions_multiruns(tuple_of_meanscovstuple)
#
MSE_MyvvCV_ests = torch.zeros(len(tuple_of_meanscovstuple))
MSE_MysvCV_ests = torch.zeros(len(tuple_of_meanscovstuple))
MSE_MysvCV_closed_form_sols = torch.zeros(len(tuple_of_meanscovstuple))
#
MSE_MyvvCV_ests_std = torch.zeros(len(tuple_of_meanscovstuple))
MSE_MysvCV_ests_std = torch.zeros(len(tuple_of_meanscovstuple))
MSE_MysvCV_closed_form_sols_std = torch.zeros(len(tuple_of_meanscovstuple))
#
for i in range(len(tuple_of_meanscovstuple)):
cur_task_true_vals = true_vals[i].unsqueeze(dim=0)
assert cur_task_true_vals.size() == torch.Size([1, len(tuple_of_meanscovstuple[0][0])])
MSE_MyvvCV_ests[i] = (MyvvCV_ests[i,:,:] - cur_task_true_vals).pow(2).mean()
MSE_MyvvCV_ests_std[i] = (MyvvCV_ests[i,:,:] - cur_task_true_vals).pow(2).std()/((len(tuple_of_meanscovstuple) * torch.ones(1)).sqrt())
MSE_MysvCV_ests[i] = (MysvCV_ests[i,:,:] - cur_task_true_vals).pow(2).mean()
MSE_MysvCV_ests_std[i] = (MysvCV_ests[i,:,:] - cur_task_true_vals).pow(2).std()/((len(tuple_of_meanscovstuple) * torch.ones(1)).sqrt())
MSE_MysvCV_closed_form_sols[i] = (MysvCV_closed_form_sols[i,:,:] - cur_task_true_vals).pow(2).mean()
MSE_MysvCV_closed_form_sols_std[i] = (MysvCV_closed_form_sols[i,:,:] - cur_task_true_vals).pow(2).std()/((len(tuple_of_meanscovstuple) * torch.ones(1)).sqrt())
MSE_dat = torch.stack((MSE_MyvvCV_ests, MSE_MysvCV_ests, MSE_MysvCV_closed_form_sols), dim=0).detach().numpy()
MSE_dat
# Plot
# Form a pd.dataframe
for i in range(no_sets):
# vv-CV
VV_cvest_funcidx_methodidx_f1 = list(zip(np.abs(MyvvCV_ests[i, :, 0].detach().numpy() - true_vals[i, 0].detach().numpy())**2, np.repeat('vv-CV', no_replica), np.repeat("Set {}".format(i), no_replica)))
cur_vv_CV_est_f1_df = pd.DataFrame(data=VV_cvest_funcidx_methodidx_f1, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
vv_CV_est_f1_df = cur_vv_CV_est_f1_df
if i >= 1:
vv_CV_est_f1_df = vv_CV_est_f1_df.append(cur_vv_CV_est_f1_df)
VV_cvest_funcidx_methodidx_f2 = list(zip(np.abs(MyvvCV_ests[i, :, 1].detach().numpy() - true_vals[i, 1].detach().numpy())**2, np.repeat('vv-CV', no_replica), np.repeat("Set {}".format(i), no_replica)))
cur_vv_CV_est_f2_df = pd.DataFrame(data=VV_cvest_funcidx_methodidx_f2, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
vv_CV_est_f2_df = cur_vv_CV_est_f2_df
if i >= 1:
vv_CV_est_f2_df = vv_CV_est_f2_df.append(cur_vv_CV_est_f2_df)
vv_CV_est_giant_f1f2 = vv_CV_est_f1_df.append(vv_CV_est_f2_df)
# CF -- should use sv-CV_closed form sols
CF_cvest_funcidx_methodidx_f1 = list(zip(np.abs(MysvCV_closed_form_sols[i, :, 0].detach().numpy() - true_vals[i, 0].detach().numpy())**2, np.repeat('CF', no_replica), np.repeat("Set {}".format(i), no_replica)))
cur_CF_est_f1_df = pd.DataFrame(data=CF_cvest_funcidx_methodidx_f1, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
CF_est_f1_df = cur_CF_est_f1_df
if i >= 1:
CF_est_f1_df = CF_est_f1_df.append(cur_CF_est_f1_df)
CF_cvest_funcidx_methodidx_f2 = list(zip(np.abs(MysvCV_closed_form_sols[i, :, 1].detach().numpy() - true_vals[i, 1].detach().numpy())**2, np.repeat('CF', no_replica), np.repeat("Set {}".format(i), no_replica)))
cur_CF_est_f2_df = pd.DataFrame(data=CF_cvest_funcidx_methodidx_f2, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
CF_est_f2_df = cur_CF_est_f2_df
if i >= 1:
CF_est_f2_df = CF_est_f2_df.append(cur_CF_est_f2_df)
CF_est_giant_f1f2 = CF_est_f1_df.append(CF_est_f2_df)
# sv-CV
SV_cvest_funcidx_methodidx_f1 = list(zip(np.abs(MysvCV_ests[i, :, 0].detach().numpy() - true_vals[i, 0].detach().numpy())**2, np.repeat('CV', no_replica), np.repeat("Set {}".format(i), no_replica)))
cur_sv_CV_est_f1_df = pd.DataFrame(data=SV_cvest_funcidx_methodidx_f1, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
sv_CV_est_f1_df = cur_sv_CV_est_f1_df
if i >= 1:
sv_CV_est_f1_df = sv_CV_est_f1_df.append(cur_sv_CV_est_f1_df)
SV_cvest_funcidx_methodidx_f2 = list(zip(np.abs(MysvCV_ests[i, :, 1].detach().numpy()- true_vals[i, 1].detach().numpy())**2, np.repeat('CV', no_replica), np.repeat("Set {}".format(i), no_replica)))
cur_sv_CV_est_f2_df = pd.DataFrame(data=SV_cvest_funcidx_methodidx_f2, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
sv_CV_est_f2_df = cur_sv_CV_est_f2_df
if i >= 1:
sv_CV_est_f2_df = sv_CV_est_f2_df.append(cur_sv_CV_est_f2_df)
sv_CV_est_giant_f1f2 = sv_CV_est_f1_df.append(sv_CV_est_f2_df)
# Merge into one giant dataset
my_vv_CV_DF_giant_f1f2 = vv_CV_est_giant_f1f2.append([CF_est_giant_f1f2, sv_CV_est_giant_f1f2])
my_vv_CV_DF_giant_f1f2['cv_est']
##################
# Save output
##################
# Comments: If you want to rerun the above experiment and save your own results, please uncomment the following block to save your data.
#
# my_vv_CV_DF_giant_f1f2 = vv_CV_est_giant_f1f2.append([sv_CV_est_giant_f1f2])
#
# my_vv_CV_DF_giant_f1f2.to_pickle("../data/South_function_pdframe_saved.pkl")
#
# with open('../data/South_function_all_data.pkl', 'wb') as output:
# MSE_MyvvCV_ests_std = MSE_MyvvCV_ests_std
# pickle.dump(MSE_MyvvCV_ests_std, output, pickle.HIGHEST_PROTOCOL)
#
# MSE_MysvCV_ests_std = MSE_MysvCV_ests_std
# pickle.dump(MSE_MysvCV_ests_std, output, pickle.HIGHEST_PROTOCOL)
#
# MSE_MysvCV_closed_form_sols_std = MSE_MysvCV_closed_form_sols_std
# pickle.dump(MSE_MysvCV_closed_form_sols_std, output, pickle.HIGHEST_PROTOCOL)
#
# #
# MyvvCV_ests = MyvvCV_ests
# pickle.dump(MyvvCV_ests, output, pickle.HIGHEST_PROTOCOL)
#
# MysvCV_ests = MysvCV_ests
# pickle.dump(MysvCV_ests, output, pickle.HIGHEST_PROTOCOL)
#
# MysvCV_closed_form_sols = MysvCV_closed_form_sols
# pickle.dump(MysvCV_closed_form_sols, output, pickle.HIGHEST_PROTOCOL)
#
# MSE_dat = MSE_dat
# pickle.dump(MSE_dat, output, pickle.HIGHEST_PROTOCOL)
#
# means_tuple_sit1 = means_tuple_sit1
# pickle.dump(means_tuple_sit1, output, pickle.HIGHEST_PROTOCOL)
# covs_tuple_sit1 = covs_tuple_sit1
# pickle.dump(covs_tuple_sit1, output, pickle.HIGHEST_PROTOCOL)
#
# means_tuple_sit2 = means_tuple_sit2
# pickle.dump(means_tuple_sit2, output, pickle.HIGHEST_PROTOCOL)
# covs_tuple_sit2 = covs_tuple_sit2
# pickle.dump(covs_tuple_sit2, output, pickle.HIGHEST_PROTOCOL)
#
# means_tuple_sit3 = means_tuple_sit3
# pickle.dump(means_tuple_sit3, output, pickle.HIGHEST_PROTOCOL)
# covs_tuple_sit3 = covs_tuple_sit3
# pickle.dump(covs_tuple_sit3, output, pickle.HIGHEST_PROTOCOL)
#
# means_tuple_sit4 = means_tuple_sit4
# pickle.dump(means_tuple_sit4, output, pickle.HIGHEST_PROTOCOL)
# covs_tuple_sit4 = covs_tuple_sit4
# pickle.dump(covs_tuple_sit4, output, pickle.HIGHEST_PROTOCOL)
#
| [
"pandas.DataFrame",
"numpy.repeat"
] | [((4520, 4617), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'VV_cvest_funcidx_methodidx_f1', 'columns': "['cv_est', 'method_idx', 'setting']"}), "(data=VV_cvest_funcidx_methodidx_f1, columns=['cv_est',\n 'method_idx', 'setting'])\n", (4532, 4617), True, 'import pandas as pd\n'), ((4993, 5090), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'VV_cvest_funcidx_methodidx_f2', 'columns': "['cv_est', 'method_idx', 'setting']"}), "(data=VV_cvest_funcidx_methodidx_f2, columns=['cv_est',\n 'method_idx', 'setting'])\n", (5005, 5090), True, 'import pandas as pd\n'), ((5587, 5684), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'CF_cvest_funcidx_methodidx_f1', 'columns': "['cv_est', 'method_idx', 'setting']"}), "(data=CF_cvest_funcidx_methodidx_f1, columns=['cv_est',\n 'method_idx', 'setting'])\n", (5599, 5684), True, 'import pandas as pd\n'), ((6051, 6148), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'CF_cvest_funcidx_methodidx_f2', 'columns': "['cv_est', 'method_idx', 'setting']"}), "(data=CF_cvest_funcidx_methodidx_f2, columns=['cv_est',\n 'method_idx', 'setting'])\n", (6063, 6148), True, 'import pandas as pd\n'), ((6579, 6676), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'SV_cvest_funcidx_methodidx_f1', 'columns': "['cv_est', 'method_idx', 'setting']"}), "(data=SV_cvest_funcidx_methodidx_f1, columns=['cv_est',\n 'method_idx', 'setting'])\n", (6591, 6676), True, 'import pandas as pd\n'), ((7048, 7145), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'SV_cvest_funcidx_methodidx_f2', 'columns': "['cv_est', 'method_idx', 'setting']"}), "(data=SV_cvest_funcidx_methodidx_f2, columns=['cv_est',\n 'method_idx', 'setting'])\n", (7060, 7145), True, 'import pandas as pd\n'), ((4418, 4448), 'numpy.repeat', 'np.repeat', (['"""vv-CV"""', 'no_replica'], {}), "('vv-CV', no_replica)\n", (4427, 4448), True, 'import numpy as np\n'), ((4891, 4921), 'numpy.repeat', 'np.repeat', (['"""vv-CV"""', 'no_replica'], {}), "('vv-CV', no_replica)\n", (4900, 4921), True, 'import numpy as np\n'), ((5491, 5518), 'numpy.repeat', 'np.repeat', (['"""CF"""', 'no_replica'], {}), "('CF', no_replica)\n", (5500, 5518), True, 'import numpy as np\n'), ((5955, 5982), 'numpy.repeat', 'np.repeat', (['"""CF"""', 'no_replica'], {}), "('CF', no_replica)\n", (5964, 5982), True, 'import numpy as np\n'), ((6480, 6507), 'numpy.repeat', 'np.repeat', (['"""CV"""', 'no_replica'], {}), "('CV', no_replica)\n", (6489, 6507), True, 'import numpy as np\n'), ((6949, 6976), 'numpy.repeat', 'np.repeat', (['"""CV"""', 'no_replica'], {}), "('CV', no_replica)\n", (6958, 6976), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from txros import util
from navigator_missions.navigator import Navigator
import numpy as np
from mil_tools import rosmsg_to_numpy
from twisted.internet import defer
from mil_misc_tools import ThrowingArgumentParser
from mil_msgs.srv import CameraToLidarTransform, CameraToLidarTransformRequest
from mil_msgs.msg import ObjectsInImage
from geometry_msgs.msg import Point
class Docking(Navigator):
@classmethod
def decode_parameters(cls, parameters):
argv = parameters.split()
return cls.parser.parse_args(argv)
@classmethod
def init(cls):
parser = ThrowingArgumentParser(description='Dock',
usage='''Default parameters: \'runtask Docking
\'''')
parser.add_argument('-t', '--time', type=int, default=-1)
cls.parser = parser
cls.bboxsub = cls.nh.subscribe("/bbox_pub", ObjectsInImage)
cls.camera_lidar_tf = cls.nh.get_service_client('/camera_to_lidar/front_right_cam', CameraToLidarTransform)
@util.cancellableInlineCallbacks
def run(self, args):
# Parse Arguments
wait_time = args.time
# Find Dock
dock_position = None
largest_size = 0
boat_pos = (yield self.tx_pose)[0]
# Get 10 closest unclassified objects
unclass = yield self.get_sorted_objects(name='UNKNOWN', n=10, throw=False)
for obj in unclass[0]:
point = rosmsg_to_numpy(obj.pose.position)
scale = rosmsg_to_numpy(obj.scale)
# Filter such that we know the dock is closer than 20 meters
if np.linalg.norm(point - boat_pos) > 20:
break
size = scale[0] * scale[1]
if size > largest_size:
largest_size = size
dock_position = point
if dock_position is None:
self.send_feedback('Cancelling, failed to find dock position')
return
self.send_feedback('Found dock, looking for image')
# Find the camera input
center_frame = yield self.get_center_frame()
symbol = center_frame[2].lower()
self.send_feedback('Identified {}'.format(symbol))
# Find the target point
target_pt = yield self.get_target_pt(center_frame)
self.send_feedback('Identified target')
# Identify the time to wait in the dock
if wait_time == -1:
if 'triangle' in symbol:
wait_time = 7
elif 'circle' in symbol:
wait_time = 17
else: # Cruciform
wait_time = 27
# Go to pose
self.send_feedback('Moving into dock')
yield self.move.set_position(target_pt).look_at(dock_position).go(blind=True)
# Sleep the appropriate amount of time
self.send_feedback('------------------------------------------------')
self.send_feedback('!!!!!!!!!!!!! STATION KEEPING !!!!!!!!!!!!!!!!!!')
yield self.nh.sleep(wait_time)
self.send_feedback('!!!!!!!!!!!!!!! EXITING DOCK !!!!!!!!!!!!!!!!!!!')
self.send_feedback('------------------------------------------------')
# Back out of the dock
yield self.move.backward(5).go(blind=True)
yield self.move.backward(5).go(blind=True)
self.send_feedback('Done with docking!')
@util.cancellableInlineCallbacks
def get_center_frame(self):
msgf = yield self.bboxsub.get_next_message()
msg = msgf.objects[0]
# print msg
c1 = rosmsg_to_numpy(msg.points[0])
c2 = rosmsg_to_numpy(msg.points[1])
tmp = (((c1 + c2) / 2.0), msgf, msg.name)
defer.returnValue(tmp)
@util.cancellableInlineCallbacks
def get_target_pt(self, center_frame):
msg = CameraToLidarTransformRequest()
msg.header.stamp = center_frame[1].header.stamp
msg.header.frame_id = center_frame[1].header.frame_id
msg.point = Point(x=center_frame[0][0], y=center_frame[0][1], z=0.0)
msg.tolerance = 500
pose_offset = yield self.camera_lidar_tf(msg)
cam_to_enu = yield self.tf_listener.get_transform('enu', center_frame[1].header.frame_id)
normal = rosmsg_to_numpy(pose_offset.normal)
normal = cam_to_enu.transform_vector(normal)
normal = normal[0:2] / np.linalg.norm(normal[0:2])
normal = np.append(normal, [0])
found_pt = rosmsg_to_numpy(pose_offset.closest)
found_pt = cam_to_enu.transform_point(found_pt)
found_pt[2] = 0
# Extend out by normal multiplier
normal *= 3
found_pt_1 = found_pt + normal
found_pt_2 = found_pt + -1 * normal
# Which is closer
boat_pos = (yield self.tx_pose)[0]
if np.linalg.norm(found_pt_1 - boat_pos) > np.linalg.norm(found_pt_2 - boat_pos):
found_pt = found_pt_2
else:
found_pt = found_pt_1
defer.returnValue(found_pt)
| [
"mil_msgs.srv.CameraToLidarTransformRequest",
"twisted.internet.defer.returnValue",
"mil_tools.rosmsg_to_numpy",
"numpy.append",
"geometry_msgs.msg.Point",
"mil_misc_tools.ThrowingArgumentParser",
"numpy.linalg.norm"
] | [((614, 759), 'mil_misc_tools.ThrowingArgumentParser', 'ThrowingArgumentParser', ([], {'description': '"""Dock"""', 'usage': '"""Default parameters: \'runtask Docking\n \'"""'}), '(description=\'Dock\', usage=\n """Default parameters: \'runtask Docking\n \'"""\n )\n', (636, 759), False, 'from mil_misc_tools import ThrowingArgumentParser\n'), ((3591, 3621), 'mil_tools.rosmsg_to_numpy', 'rosmsg_to_numpy', (['msg.points[0]'], {}), '(msg.points[0])\n', (3606, 3621), False, 'from mil_tools import rosmsg_to_numpy\n'), ((3635, 3665), 'mil_tools.rosmsg_to_numpy', 'rosmsg_to_numpy', (['msg.points[1]'], {}), '(msg.points[1])\n', (3650, 3665), False, 'from mil_tools import rosmsg_to_numpy\n'), ((3724, 3746), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['tmp'], {}), '(tmp)\n', (3741, 3746), False, 'from twisted.internet import defer\n'), ((3842, 3873), 'mil_msgs.srv.CameraToLidarTransformRequest', 'CameraToLidarTransformRequest', ([], {}), '()\n', (3871, 3873), False, 'from mil_msgs.srv import CameraToLidarTransform, CameraToLidarTransformRequest\n'), ((4012, 4068), 'geometry_msgs.msg.Point', 'Point', ([], {'x': 'center_frame[0][0]', 'y': 'center_frame[0][1]', 'z': '(0.0)'}), '(x=center_frame[0][0], y=center_frame[0][1], z=0.0)\n', (4017, 4068), False, 'from geometry_msgs.msg import Point\n'), ((4268, 4303), 'mil_tools.rosmsg_to_numpy', 'rosmsg_to_numpy', (['pose_offset.normal'], {}), '(pose_offset.normal)\n', (4283, 4303), False, 'from mil_tools import rosmsg_to_numpy\n'), ((4433, 4455), 'numpy.append', 'np.append', (['normal', '[0]'], {}), '(normal, [0])\n', (4442, 4455), True, 'import numpy as np\n'), ((4475, 4511), 'mil_tools.rosmsg_to_numpy', 'rosmsg_to_numpy', (['pose_offset.closest'], {}), '(pose_offset.closest)\n', (4490, 4511), False, 'from mil_tools import rosmsg_to_numpy\n'), ((4989, 5016), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['found_pt'], {}), '(found_pt)\n', (5006, 5016), False, 'from twisted.internet import defer\n'), ((1488, 1522), 'mil_tools.rosmsg_to_numpy', 'rosmsg_to_numpy', (['obj.pose.position'], {}), '(obj.pose.position)\n', (1503, 1522), False, 'from mil_tools import rosmsg_to_numpy\n'), ((1543, 1569), 'mil_tools.rosmsg_to_numpy', 'rosmsg_to_numpy', (['obj.scale'], {}), '(obj.scale)\n', (1558, 1569), False, 'from mil_tools import rosmsg_to_numpy\n'), ((4388, 4415), 'numpy.linalg.norm', 'np.linalg.norm', (['normal[0:2]'], {}), '(normal[0:2])\n', (4402, 4415), True, 'import numpy as np\n'), ((4819, 4856), 'numpy.linalg.norm', 'np.linalg.norm', (['(found_pt_1 - boat_pos)'], {}), '(found_pt_1 - boat_pos)\n', (4833, 4856), True, 'import numpy as np\n'), ((4859, 4896), 'numpy.linalg.norm', 'np.linalg.norm', (['(found_pt_2 - boat_pos)'], {}), '(found_pt_2 - boat_pos)\n', (4873, 4896), True, 'import numpy as np\n'), ((1659, 1691), 'numpy.linalg.norm', 'np.linalg.norm', (['(point - boat_pos)'], {}), '(point - boat_pos)\n', (1673, 1691), True, 'import numpy as np\n')] |
import time
from subprocess import call
import numpy as np
jID = {
"rhy": 0,
"rhr": 1,
"rhp": 2,
"rk": 3,
"rap": 4,
"rar": 5,
"lhy": 6,
"lhr": 7,
"lhp": 8,
"lk": 9,
"lap": 10,
"lar": 11,
"ty": 12,
"tp": 13,
"hy": 14,
"hp": 15,
"rsp": 16,
"rsr": 17,
"rsy": 18,
"re": 19,
"rwy": 20,
"rwp": 21,
"rh": 22,
"lsp": 23,
"lsr": 24,
"lsy": 25,
"le": 26,
"lwy": 27,
"lwp": 28,
"lh": 29
}
def solve1stOrderLeastSquare(x, y):
''' Solve the least square problem:
solve y=ax+b in L2 norm
'''
Q = np.vstack([np.ones(len(x)), x])
coef = solveLeastSquare(Q.T, y)
(a, b) = coef[1, 0], coef[0, 0]
return (a, b)
def solveLeastSquare(A, b):
''' Solve the least square problem:
minimize || A*x-b ||^2
'''
return np.linalg.pinv(A) * np.matrix(b).T
def gentleStop(traj_gen, joint):
''' Stop the joint when vel is low'''
while (abs(traj_gen.dq.value[jID[joint]]) > 0.0001):
time.sleep(0.001)
traj_gen.stop(joint)
def doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode='constAcc'):
''' Do N cycles in cost vel or acc with speeds given by times (ex times=[5.0,4.0,3.0])'''
traj_gen.moveJoint(joint, min_pos, 3.0)
time.sleep(3.5)
for T in times:
if mode == 'constAcc':
traj_gen.startConstAcc(joint, max_pos, T)
elif mode == 'constVel':
traj_gen.startTriangle(joint, max_pos, T, 0.3)
time.sleep(T * 2 * N - 1.0)
gentleStop(traj_gen, joint)
traj_gen.moveJoint(joint, min_pos, 3.0)
time.sleep(3.5)
# (-0.785398, 0.523599); #// right hip yaw *****************************
def identify_rhy_static(traj_gen, staticTime=60.0):
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('rhp', -1.57, 5.0)
time.sleep(5.0)
time.sleep(staticTime)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_rhy_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0]):
(joint, min_pos, max_pos) = ('rhy', -0.0, 0.5)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('lhr', 0.1, 3.0)
time.sleep(3.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
# (-0.610865, 0.349066); #// right hip roll ****************************
def identify_rhr_static(traj_gen, staticTime=60.0):
# (joint, min_pos, max_pos) = ('rhr', -0.5, 0.25)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('lhr', 0.25, 5.0)
time.sleep(5.0)
time.sleep(staticTime)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_rhr_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0, 2.5]):
(joint, min_pos, max_pos) = ('rhr', -0.5, 0.25)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('rsp', -1.57, 5.0)
traj_gen.moveJoint('lsp', -1.57, 5.0)
traj_gen.moveJoint('re', -1.57, 5.0)
traj_gen.moveJoint('le', -1.57, 5.0)
traj_gen.moveJoint('lhr', 0.25, 5.0)
time.sleep(5.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
# (-2.18166, 0.733038); #// right hip pitch ***************************:
def identify_rhp_static(traj_gen, staticTime=60.0):
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
time.sleep(5.0)
time.sleep(staticTime)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_rhp_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0]):
(joint, min_pos, max_pos) = ('rhp', -1.7, 0.6)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('rhr', -0.2, 5.0)
time.sleep(5.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
# (-0.0349066, 2.61799); #// right knee ********************************
def identify_rk_static(traj_gen, staticTime=60.0):
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('rhp', -1.57, 5.0)
traj_gen.moveJoint('rk', 1.57, 5.0)
time.sleep(5.0 + 0.5)
time.sleep(staticTime)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_rk_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0]):
(joint, min_pos, max_pos) = ('rk', 0., 2.5)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('rhp', -1.57, 5.0)
time.sleep(5.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
# (-1.309, 0.733038); #// right ankle pitch *************************
def identify_rap_static(traj_gen, staticTime=60.0):
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('rhp', -1.57, 5.0)
traj_gen.moveJoint('rk', 1.57, 5.0)
time.sleep(5.0 + 0.5)
time.sleep(staticTime)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_rap_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0]):
(joint, min_pos, max_pos) = ('rap', -1.2, 0.6)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('rhp', -1.57, 5.0)
traj_gen.moveJoint('rk', 1.57, 5.0)
time.sleep(5.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
# (-0.349066, 0.610865); #// right ankle roll **************************
def identify_rar_static(traj_gen, staticTime=60.0):
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('rhp', -1.57, 5.0)
traj_gen.moveJoint('rk', 1.57, 5.0)
time.sleep(5.0 + 0.5)
time.sleep(staticTime)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_rar_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0]):
(joint, min_pos, max_pos) = ('rar', -0.25, 0.5)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('rhp', -1.57, 5.0)
traj_gen.moveJoint('rk', 1.57, 5.0)
time.sleep(5.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
# (-0.785398, 0.523599); #// left hip yaw *********************INVERTED
def identify_lhy_static(traj_gen, staticTime=60.0):
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('lhp', -1.57, 5.0)
time.sleep(5.0)
time.sleep(staticTime)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_lhy_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0]):
(joint, min_pos, max_pos) = ('lhy', +0.0, -0.5)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('rhr', -0.1, 3.0)
time.sleep(3.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
# (-0.610865, 0.349066); #// left hip roll ********************INVERTED
def identify_lhr_static(traj_gen, staticTime=60.0):
# (joint, min_pos, max_pos) = ('lhr', +0.5, -0.25)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('rhr', -0.25, 5.0)
time.sleep(5.0)
time.sleep(staticTime)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_lhr_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0, 2.5]):
(joint, min_pos, max_pos) = ('lhr', +0.5, -0.25)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('lsp', -1.57, 5.0)
traj_gen.moveJoint('rsp', -1.57, 5.0)
traj_gen.moveJoint('le', -1.57, 5.0)
traj_gen.moveJoint('re', -1.57, 5.0)
traj_gen.moveJoint('rhr', -0.25, 5.0)
time.sleep(5.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
# (-2.18166, 0.733038); #// left hip pitch ***************************:
def identify_lhp_static(traj_gen, staticTime=60.0):
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
time.sleep(5.0)
time.sleep(staticTime)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_lhp_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0]):
(joint, min_pos, max_pos) = ('lhp', -1.7, 0.6)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('lsp', -1.57, 5.0)
traj_gen.moveJoint('rsp', -1.57, 5.0)
traj_gen.moveJoint('le', -1.57, 5.0)
traj_gen.moveJoint('re', -1.57, 5.0)
traj_gen.moveJoint('lhr', +0.2, 5.0)
time.sleep(5.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
# (-0.0349066, 2.61799); #// left knee ********************************
def identify_lk_static(traj_gen, staticTime=60.0):
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('lhp', -1.57, 5.0)
traj_gen.moveJoint('lk', 1.57, 5.0)
time.sleep(5.0 + 0.5)
time.sleep(staticTime)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_lk_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0]):
(joint, min_pos, max_pos) = ('lk', 0., 2.5)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('lhp', -1.57, 5.0)
time.sleep(5.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
# (-1.309, 0.733038); #// left ankle pitch *************************
def identify_lap_static(traj_gen, staticTime=60.0):
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('lhp', -1.57, 5.0)
traj_gen.moveJoint('lk', 1.57, 5.0)
time.sleep(5.0 + 0.5)
time.sleep(staticTime)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_lap_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0]):
(joint, min_pos, max_pos) = ('lap', -1.2, 0.6)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('lhp', -1.57, 5.0)
traj_gen.moveJoint('lk', 1.57, 5.0)
time.sleep(5.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
# (-0.349066, 0.610865); #// left ankle roll ******************INVERTED
def identify_lar_static(traj_gen, staticTime=60.0):
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('lhp', -1.57, 5.0)
traj_gen.moveJoint('lk', 1.57, 5.0)
time.sleep(5.0 + 0.5)
time.sleep(staticTime)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_lar_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0]):
(joint, min_pos, max_pos) = ('lar', +0.25, -0.5)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('lhp', -1.57, 5.0)
traj_gen.moveJoint('lk', 1.57, 5.0)
time.sleep(5.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_tp_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0, 2.5]):
(joint, min_pos, max_pos) = ('tp', 0., 1.)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def identify_ty_dynamic(traj_gen, mode='constAcc', N=3, times=[5.0, 4.0, 3.0, 2.5]):
(joint, min_pos, max_pos) = ('ty', -0.7, 0.7)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
traj_gen.moveJoint('lsp', -1.57, 5.0)
traj_gen.moveJoint('rsp', -1.57, 5.0)
time.sleep(5.0 + 0.5)
doNCycles(traj_gen, joint, min_pos, max_pos, N, times, mode)
go_to_zero_position(traj_gen, 5.0)
time.sleep(5.0 + 0.5)
def go_to_zero_position(traj_gen, T=10.0):
# Python interpreter can't deal with input(..) ??
# ret = input('Are you sure you want to put the robot in zero position? All joints will move: [y/N]')
# if ret!="y" :
# print('Cancel zero position')
# return
# put the robot in position q0
# RLEG TO 0 **********************
traj_gen.moveJoint('rhy', 0.0, T) # 0
traj_gen.moveJoint('rhr', 0.0, T) # 1
traj_gen.moveJoint('rhp', 0.0, T) # 2
traj_gen.moveJoint('rk', 0.0, T) # 3
traj_gen.moveJoint('rap', 0.0, T) # 4
traj_gen.moveJoint('rar', 0.0, T) # 5
# LLEG TO 0 **********************
traj_gen.moveJoint('lhy', 0.0, T) # 6
traj_gen.moveJoint('lhr', 0.0, T) # 7
traj_gen.moveJoint('lhp', 0.0, T) # 8
traj_gen.moveJoint('lk', 0.0, T) # 9
traj_gen.moveJoint('lap', 0.0, T) # 10
traj_gen.moveJoint('lar', 0.0, T) # 11
# TORSO TO 0
traj_gen.moveJoint('ty', 0.0, T) # 12
traj_gen.moveJoint('tp', 0.0, T) # 13
# HEAD TO 0
traj_gen.moveJoint('hy', 0.0, T) # 14
traj_gen.moveJoint('hp', 0.0, T) # 15
# RARM TO 0 **********************
traj_gen.moveJoint('rsp', 0.0, T) # 16
traj_gen.moveJoint('rsr', 0.0, T) # 17
traj_gen.moveJoint('rsy', 0.0, T) # 18
traj_gen.moveJoint('re', 0.0, T) # 19
traj_gen.moveJoint('rwy', 0.0, T) # 20
traj_gen.moveJoint('rwp', 0.0, T) # 21
traj_gen.moveJoint('rh', 0.3, T) # 22
# LARM TO 0 **********************
traj_gen.moveJoint('lsp', 0.0, T) # 23
traj_gen.moveJoint('lsr', 0.0, T) # 24
traj_gen.moveJoint('lsy', 0.0, T) # 25
traj_gen.moveJoint('le', 0.0, T) # 26
traj_gen.moveJoint('lwy', 0.0, T) # 27
traj_gen.moveJoint('lwp', 0.0, T) # 28
traj_gen.moveJoint('lh', 0.3, T) # 29
def deleteDatFilesInTmp():
call('rm /tmp/*.dat', shell=True)
def stopTracerAndCopyFiles(tracer, directory):
tracer.stop()
tracer.dump()
time.sleep(2.0)
call('mkdir ' + directory, shell=True)
call('mv /tmp/*.dat ' + directory, shell=True)
# deleteDatFilesInTmp()
# tracer = start_tracer(robot, estimator, torque_ctrl, traj_gen, ctrl_manager, inv_dyn, None)
# do your experiment here
# stopTracerAndCopyFiles(tracer,directory='/tmp/JOINT0_ID_static')
| [
"numpy.linalg.pinv",
"numpy.matrix",
"subprocess.call",
"time.sleep"
] | [((1313, 1328), 'time.sleep', 'time.sleep', (['(3.5)'], {}), '(3.5)\n', (1323, 1328), False, 'import time\n'), ((1841, 1862), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (1851, 1862), False, 'import time\n'), ((1909, 1924), 'time.sleep', 'time.sleep', (['(5.0)'], {}), '(5.0)\n', (1919, 1924), False, 'import time\n'), ((1929, 1951), 'time.sleep', 'time.sleep', (['staticTime'], {}), '(staticTime)\n', (1939, 1951), False, 'import time\n'), ((1995, 2016), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (2005, 2016), False, 'import time\n'), ((2194, 2215), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (2204, 2215), False, 'import time\n'), ((2260, 2281), 'time.sleep', 'time.sleep', (['(3.0 + 0.5)'], {}), '(3.0 + 0.5)\n', (2270, 2281), False, 'import time\n'), ((2390, 2411), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (2400, 2411), False, 'import time\n'), ((2637, 2658), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (2647, 2658), False, 'import time\n'), ((2704, 2719), 'time.sleep', 'time.sleep', (['(5.0)'], {}), '(5.0)\n', (2714, 2719), False, 'import time\n'), ((2724, 2746), 'time.sleep', 'time.sleep', (['staticTime'], {}), '(staticTime)\n', (2734, 2746), False, 'import time\n'), ((2790, 2811), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (2800, 2811), False, 'import time\n'), ((2995, 3016), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (3005, 3016), False, 'import time\n'), ((3228, 3249), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (3238, 3249), False, 'import time\n'), ((3358, 3379), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (3368, 3379), False, 'import time\n'), ((3552, 3573), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (3562, 3573), False, 'import time\n'), ((3578, 3593), 'time.sleep', 'time.sleep', (['(5.0)'], {}), '(5.0)\n', (3588, 3593), False, 'import time\n'), ((3598, 3620), 'time.sleep', 'time.sleep', (['staticTime'], {}), '(staticTime)\n', (3608, 3620), False, 'import time\n'), ((3664, 3685), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (3674, 3685), False, 'import time\n'), ((3863, 3884), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (3873, 3884), False, 'import time\n'), ((3930, 3951), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (3940, 3951), False, 'import time\n'), ((4060, 4081), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (4070, 4081), False, 'import time\n'), ((4252, 4273), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (4262, 4273), False, 'import time\n'), ((4360, 4381), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (4370, 4381), False, 'import time\n'), ((4386, 4408), 'time.sleep', 'time.sleep', (['staticTime'], {}), '(staticTime)\n', (4396, 4408), False, 'import time\n'), ((4452, 4473), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (4462, 4473), False, 'import time\n'), ((4647, 4668), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (4657, 4668), False, 'import time\n'), ((4715, 4736), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (4725, 4736), False, 'import time\n'), ((4845, 4866), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (4855, 4866), False, 'import time\n'), ((5038, 5059), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (5048, 5059), False, 'import time\n'), ((5146, 5167), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (5156, 5167), False, 'import time\n'), ((5172, 5194), 'time.sleep', 'time.sleep', (['staticTime'], {}), '(staticTime)\n', (5182, 5194), False, 'import time\n'), ((5238, 5259), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (5248, 5259), False, 'import time\n'), ((5437, 5458), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (5447, 5458), False, 'import time\n'), ((5545, 5566), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (5555, 5566), False, 'import time\n'), ((5675, 5696), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (5685, 5696), False, 'import time\n'), ((5868, 5889), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (5878, 5889), False, 'import time\n'), ((5976, 5997), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (5986, 5997), False, 'import time\n'), ((6002, 6024), 'time.sleep', 'time.sleep', (['staticTime'], {}), '(staticTime)\n', (6012, 6024), False, 'import time\n'), ((6068, 6089), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (6078, 6089), False, 'import time\n'), ((6268, 6289), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (6278, 6289), False, 'import time\n'), ((6376, 6397), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (6386, 6397), False, 'import time\n'), ((6506, 6527), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (6516, 6527), False, 'import time\n'), ((6698, 6719), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (6708, 6719), False, 'import time\n'), ((6766, 6781), 'time.sleep', 'time.sleep', (['(5.0)'], {}), '(5.0)\n', (6776, 6781), False, 'import time\n'), ((6786, 6808), 'time.sleep', 'time.sleep', (['staticTime'], {}), '(staticTime)\n', (6796, 6808), False, 'import time\n'), ((6852, 6873), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (6862, 6873), False, 'import time\n'), ((7052, 7073), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (7062, 7073), False, 'import time\n'), ((7119, 7140), 'time.sleep', 'time.sleep', (['(3.0 + 0.5)'], {}), '(3.0 + 0.5)\n', (7129, 7140), False, 'import time\n'), ((7249, 7270), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (7259, 7270), False, 'import time\n'), ((7496, 7517), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (7506, 7517), False, 'import time\n'), ((7564, 7579), 'time.sleep', 'time.sleep', (['(5.0)'], {}), '(5.0)\n', (7574, 7579), False, 'import time\n'), ((7584, 7606), 'time.sleep', 'time.sleep', (['staticTime'], {}), '(staticTime)\n', (7594, 7606), False, 'import time\n'), ((7650, 7671), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (7660, 7671), False, 'import time\n'), ((7856, 7877), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (7866, 7877), False, 'import time\n'), ((8090, 8111), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (8100, 8111), False, 'import time\n'), ((8220, 8241), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (8230, 8241), False, 'import time\n'), ((8413, 8434), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (8423, 8434), False, 'import time\n'), ((8439, 8454), 'time.sleep', 'time.sleep', (['(5.0)'], {}), '(5.0)\n', (8449, 8454), False, 'import time\n'), ((8459, 8481), 'time.sleep', 'time.sleep', (['staticTime'], {}), '(staticTime)\n', (8469, 8481), False, 'import time\n'), ((8525, 8546), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (8535, 8546), False, 'import time\n'), ((8724, 8745), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (8734, 8745), False, 'import time\n'), ((8957, 8978), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (8967, 8978), False, 'import time\n'), ((9087, 9108), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (9097, 9108), False, 'import time\n'), ((9278, 9299), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (9288, 9299), False, 'import time\n'), ((9386, 9407), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (9396, 9407), False, 'import time\n'), ((9412, 9434), 'time.sleep', 'time.sleep', (['staticTime'], {}), '(staticTime)\n', (9422, 9434), False, 'import time\n'), ((9478, 9499), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (9488, 9499), False, 'import time\n'), ((9673, 9694), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (9683, 9694), False, 'import time\n'), ((9741, 9762), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (9751, 9762), False, 'import time\n'), ((9871, 9892), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (9881, 9892), False, 'import time\n'), ((10063, 10084), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (10073, 10084), False, 'import time\n'), ((10171, 10192), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (10181, 10192), False, 'import time\n'), ((10197, 10219), 'time.sleep', 'time.sleep', (['staticTime'], {}), '(staticTime)\n', (10207, 10219), False, 'import time\n'), ((10263, 10284), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (10273, 10284), False, 'import time\n'), ((10462, 10483), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (10472, 10483), False, 'import time\n'), ((10570, 10591), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (10580, 10591), False, 'import time\n'), ((10700, 10721), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (10710, 10721), False, 'import time\n'), ((10892, 10913), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (10902, 10913), False, 'import time\n'), ((11000, 11021), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (11010, 11021), False, 'import time\n'), ((11026, 11048), 'time.sleep', 'time.sleep', (['staticTime'], {}), '(staticTime)\n', (11036, 11048), False, 'import time\n'), ((11092, 11113), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (11102, 11113), False, 'import time\n'), ((11293, 11314), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (11303, 11314), False, 'import time\n'), ((11401, 11422), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (11411, 11422), False, 'import time\n'), ((11531, 11552), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (11541, 11552), False, 'import time\n'), ((11730, 11751), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (11740, 11751), False, 'import time\n'), ((11860, 11881), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (11870, 11881), False, 'import time\n'), ((12062, 12083), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (12072, 12083), False, 'import time\n'), ((12172, 12193), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (12182, 12193), False, 'import time\n'), ((12302, 12323), 'time.sleep', 'time.sleep', (['(5.0 + 0.5)'], {}), '(5.0 + 0.5)\n', (12312, 12323), False, 'import time\n'), ((14181, 14214), 'subprocess.call', 'call', (['"""rm /tmp/*.dat"""'], {'shell': '(True)'}), "('rm /tmp/*.dat', shell=True)\n", (14185, 14214), False, 'from subprocess import call\n'), ((14304, 14319), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (14314, 14319), False, 'import time\n'), ((14324, 14362), 'subprocess.call', 'call', (["('mkdir ' + directory)"], {'shell': '(True)'}), "('mkdir ' + directory, shell=True)\n", (14328, 14362), False, 'from subprocess import call\n'), ((14367, 14413), 'subprocess.call', 'call', (["('mv /tmp/*.dat ' + directory)"], {'shell': '(True)'}), "('mv /tmp/*.dat ' + directory, shell=True)\n", (14371, 14413), False, 'from subprocess import call\n'), ((872, 889), 'numpy.linalg.pinv', 'np.linalg.pinv', (['A'], {}), '(A)\n', (886, 889), True, 'import numpy as np\n'), ((1049, 1066), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (1059, 1066), False, 'import time\n'), ((1534, 1561), 'time.sleep', 'time.sleep', (['(T * 2 * N - 1.0)'], {}), '(T * 2 * N - 1.0)\n', (1544, 1561), False, 'import time\n'), ((1654, 1669), 'time.sleep', 'time.sleep', (['(3.5)'], {}), '(3.5)\n', (1664, 1669), False, 'import time\n'), ((892, 904), 'numpy.matrix', 'np.matrix', (['b'], {}), '(b)\n', (901, 904), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from timeeval import Algorithm, TrainingType
from timeeval.adapters import FunctionAdapter
class TestAlgorithm(unittest.TestCase):
def setUp(self) -> None:
self.data = np.random.rand(10)
self.unsupervised_algorithm = Algorithm(
name="TestAlgorithm",
main=FunctionAdapter.identity(),
training_type=TrainingType.UNSUPERVISED
)
self.supervised_algorithm = Algorithm(
name="TestAlgorithm",
main=FunctionAdapter.identity(),
training_type=TrainingType.SUPERVISED
)
self.semi_supervised_algorithm = Algorithm(
name="TestAlgorithm",
main=FunctionAdapter.identity(),
training_type=TrainingType.SEMI_SUPERVISED
)
def test_execution(self):
result = self.unsupervised_algorithm.execute(self.data)
np.testing.assert_array_equal(self.data, result)
result = self.semi_supervised_algorithm.execute(self.data)
np.testing.assert_array_equal(self.data, result)
result = self.supervised_algorithm.execute(self.data)
np.testing.assert_array_equal(self.data, result)
def test_unsupervised_training(self):
with self.assertRaises(ValueError) as e:
self.unsupervised_algorithm.train(self.data)
self.assertRegex(str(e.exception), r".*[Cc]alling.*train.*unsupervised algorithm.*not supported.*")
def test_semi_and_supervised_training(self):
result = self.semi_supervised_algorithm.train(self.data)
np.testing.assert_array_equal(self.data, result)
result = self.supervised_algorithm.train(self.data)
np.testing.assert_array_equal(self.data, result)
| [
"timeeval.adapters.FunctionAdapter.identity",
"numpy.random.rand",
"numpy.testing.assert_array_equal"
] | [((220, 238), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (234, 238), True, 'import numpy as np\n'), ((914, 962), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.data', 'result'], {}), '(self.data, result)\n', (943, 962), True, 'import numpy as np\n'), ((1039, 1087), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.data', 'result'], {}), '(self.data, result)\n', (1068, 1087), True, 'import numpy as np\n'), ((1159, 1207), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.data', 'result'], {}), '(self.data, result)\n', (1188, 1207), True, 'import numpy as np\n'), ((1588, 1636), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.data', 'result'], {}), '(self.data, result)\n', (1617, 1636), True, 'import numpy as np\n'), ((1706, 1754), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.data', 'result'], {}), '(self.data, result)\n', (1735, 1754), True, 'import numpy as np\n'), ((339, 365), 'timeeval.adapters.FunctionAdapter.identity', 'FunctionAdapter.identity', ([], {}), '()\n', (363, 365), False, 'from timeeval.adapters import FunctionAdapter\n'), ((527, 553), 'timeeval.adapters.FunctionAdapter.identity', 'FunctionAdapter.identity', ([], {}), '()\n', (551, 553), False, 'from timeeval.adapters import FunctionAdapter\n'), ((718, 744), 'timeeval.adapters.FunctionAdapter.identity', 'FunctionAdapter.identity', ([], {}), '()\n', (742, 744), False, 'from timeeval.adapters import FunctionAdapter\n')] |
# Copying of array
"""
# Two types of copying method:
* shallow copy
* deep copy
"""
# importing packages
import numpy as np
print("Adding two Array:")
# adding two array
arr1 = np.array([1, 3, 5, 7, 9])
arr2 = np.array([2, 4, 6, 8, 10])
# adding two array
arr = arr1 + arr2
print(arr)
print("--------------------------------")
print("Shallow copy:")
print("Type 1,")
# shallow copy
A = np.array([1,2,3,4,5])
B = A
print(id(A))
print(id(B))
# These methods copy value of A to B with different ID, but change values from B also changes in A
print("\nType 2,")
A = np.array([1,2,3,4,5])
B = A.view()
print(id(A))
print(id(B))
print("--------------------------------")
# These methods copy value of A to B with different ID, change values from B but not affects in A
print("Deep copy:")
# Deep copy
A = np.array([1,2,3,4,5])
B = A.copy()
print(id(A))
print(id(B)) | [
"numpy.array"
] | [((200, 225), 'numpy.array', 'np.array', (['[1, 3, 5, 7, 9]'], {}), '([1, 3, 5, 7, 9])\n', (208, 225), True, 'import numpy as np\n'), ((234, 260), 'numpy.array', 'np.array', (['[2, 4, 6, 8, 10]'], {}), '([2, 4, 6, 8, 10])\n', (242, 260), True, 'import numpy as np\n'), ((422, 447), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (430, 447), True, 'import numpy as np\n'), ((606, 631), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (614, 631), True, 'import numpy as np\n'), ((853, 878), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (861, 878), True, 'import numpy as np\n')] |
from jax.scipy.signal import convolve2d as conv2
import jax, jax.numpy as jnp
import tqdm
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import gridspec
from .helpers import reconstruct, reconstruct_numpy, shift_factors, compute_loadings_percent_power, get_shapes, shifted_matrix_product, trim_shapes
def update_W(W, H, X, Lambda, M, L, K, smooth_kernel, eps, lambda_OrthW, lambda_L1W):
X_hat = reconstruct(W, H)
X = jnp.where(M==0, X_hat, X)
XHT = shifted_matrix_product(X,H.T,jnp.arange(L)-1,None,0)
X_hat_HT = shifted_matrix_product(X_hat,H.T,jnp.arange(L)-1,None,0)
XS = conv2(X, smooth_kernel, 'same')
XS_HT = shifted_matrix_product(XS, H.T, jnp.arange(L)-1,None,0)
dWWdW = jnp.dot(lambda_OrthW * jnp.sum(W, axis=2), 1. - jnp.eye(K))
dRdW = Lambda * jax.vmap(lambda x: jnp.dot(x, 1-jnp.eye(K)))(XS_HT) + lambda_L1W + dWWdW
return W * jnp.moveaxis(jnp.divide(XHT, X_hat_HT + dRdW + eps),0,2)
def seqnmf_iter(W, H, X, X_hat, Lambda, M, L, K, smooth_kernel, shift, eps,
W_fixed, lambda_OrthW, lambda_OrthH, lambda_L1W, lambda_L1H):
WTX = shifted_matrix_product(W.T,X,-jnp.arange(L)+1,0,1).sum(0)
WTX_hat = shifted_matrix_product(W.T,X_hat,-jnp.arange(L)+1,0,1).sum(0)
dRdH = jnp.dot(Lambda * (1 - jnp.eye(K)), conv2(WTX, smooth_kernel, 'same'))
dHHdH = jnp.dot(lambda_OrthH * (1 - jnp.eye(K)), conv2(H, smooth_kernel, 'same'))
dRdH += lambda_L1H + dHHdH
H = H * jnp.divide(WTX, WTX_hat + dRdH + eps)
W,H = jax.lax.cond(shift, shift_factors, lambda WH: WH, (W,H))
W = W + eps*shift
norms = jnp.sqrt(jnp.sum(jnp.power(H, 2), axis=1)).T
H = jnp.dot(jnp.diag(jnp.divide(1., norms + eps)), H)
W = jax.vmap(jnp.dot, in_axes=(2,None), out_axes=2)(W,jnp.diag(norms))
update = lambda w: update_W(w, H, X, Lambda, M, L, K, smooth_kernel, eps, lambda_OrthW, lambda_L1W)
W = jax.lax.cond(not W_fixed, update, lambda w: w, W)
X_hat = reconstruct(W, H)
X = jnp.where(M==0, X_hat, X)
cost = jnp.sqrt(jnp.mean(jnp.power(X - X_hat, 2)))
return W, H, X, X_hat, cost
def seqnmf(X, K=10, L=100, Lambda=.001, W_init=None, H_init=None,
plot_it=False, max_iter=100, tol=-np.inf, shift=True, sort_factors=True,
lambda_L1W=0, lambda_L1H=0, lambda_OrthH=0, lambda_OrthW=0, M=None, W_fixed=False):
'''
:param X: an N (features) by T (timepoints) data matrix to be factorized using seqNMF
:param K: the (maximum) number of factors to search for; any unused factors will be set to all zeros
:param L: the (maximum) number of timepoints to consider in each factor; any unused timepoints will be set to zeros
:param Lambda: regularization parameter (default: 0.001)
:param W_init: initial factors (if unspecified, use random initialization)
:param H_init: initial per-timepoint factor loadings (if unspecified, initialize randomly)
:param plot_it: if True, display progress in each update using a plot (default: False)
:param max_iter: maximum number of iterations/updates
:param tol: if cost is within tol of the average of the previous 5 updates, the algorithm will terminate (default: tol = -inf)
:param shift: allow timepoint shifts in H
:param sort_factors: sort factors by time
:param lambda_L1W: regularization parameter for W (default: 0)
:param lambda_L1H: regularization parameter for H (default: 0)
:param lambda_OrthH: regularization parameter for H (default: 0)
:param lambda_OrthW: regularization parameter for W (default: 0)
:param M: binary mask of the same size as X, used to ignore a subset of the data during training (default: use all data)
:param W_fixed: if true, fix factors (W), e.g. for cross validation (default: False)
:return:
:W: N (features) by K (factors) by L (per-factor timepoints) tensor of factors
:H: K (factors) by T (timepoints) matrix of factor loadings (i.e. factor timecourses)
:cost: a vector of length (number-of-iterations + 1) containing the initial cost and cost after each update (i.e. the reconstruction error)
:loadings: the per-factor loadings-- i.e. the explanatory power of each individual factor
:power: the total power (across all factors) explained by the full reconstruction
'''
assert np.all(X >= 0), 'all data values must be positive!'
N = X.shape[0]
T = X.shape[1] + 2 * L
X = jnp.concatenate((jnp.zeros([N, L]), X, jnp.zeros([N, L])), axis=1)
if W_init is None:
W_init = jnp.array(np.max(X) * np.random.rand(N, K, L))
if H_init is None:
H_init = jnp.array(np.max(X) * np.random.rand(K, T) / np.sqrt(T / 3))
if M is None:
M = jnp.ones([N, T])
W = W_init
H = H_init
X_hat = reconstruct(W, H)
X = jnp.where(M==0, X_hat, X)
smooth_kernel = jnp.ones([1, (2 * L) - 1])
eps = jnp.max(X) * 1e-6
last_time = False
costs = np.zeros(max_iter + 1)
costs[0] = jnp.sqrt(jnp.mean(jnp.power(X - X_hat, 2)))
update = jax.jit(lambda W,H,X,X_hat,Lambda: seqnmf_iter(
W, H, X, X_hat, Lambda, M, L, K, smooth_kernel, shift, eps,
W_fixed, lambda_OrthW, lambda_OrthH, lambda_L1W, lambda_L1H))
for i in tqdm.trange(max_iter):
if (i == max_iter - 1) or ((i > 6) and (costs[i + 1] + tol) > np.mean(costs[i - 6:i])):
costs = costs[:(i + 2)]
last_time = True
if i > 0: Lambda = 0
W, H, X, X_hat, cost = update(W, H, X, X_hat, Lambda)
costs[i] = cost
if plot_it:
if i > 0:
try:
h.close()
except:
pass
h = plot(W, H)
h.suptitle(f'iteration {i}', fontsize=8)
h.show()
if last_time:
break
X = X[:, L:-L]
X_hat = X_hat[:, L:-L]
H = H[:, L:-L]
power = jnp.divide(jnp.sum(jnp.power(X, 2)) - jnp.sum(jnp.power(X - X_hat, 2)), jnp.sum(jnp.power(X, 2)))
loadings = compute_loadings_percent_power(X, W, H)
W = np.array(W)
H = np.array(H)
power = np.array(power)
loadings = np.array(loadings)
if sort_factors:
inds = np.flip(np.argsort(loadings), 0)
loadings = loadings[inds]
W = W[:, inds, :]
H = H[inds, :]
return W, H, costs, loadings, power
def plot(W, H, cmap='gray_r', factor_cmap='Spectral'):
'''
:param W: N (features) by K (factors) by L (per-factor timepoints) tensor of factors
:param H: K (factors) by T (timepoints) matrix of factor loadings (i.e. factor timecourses)
:param cmap: colormap used to draw heatmaps for the factors, factor loadings, and data reconstruction
:param factor_cmap: colormap used to distinguish individual factors
:return f: matplotlib figure handle
'''
N, K, L, T = get_shapes(W, H)
W, H = trim_shapes(W, H, N, K, L, T)
data_recon = reconstruct_numpy(W, H)
fig = plt.figure(figsize=(5, 5))
gs = gridspec.GridSpec(2, 2, width_ratios=[1, 4], height_ratios=[1, 4])
ax_h = plt.subplot(gs[1])
ax_w = plt.subplot(gs[2])
ax_data = plt.subplot(gs[3])
# plot W, H, and data_recon
sns.heatmap(np.hstack(list(map(np.squeeze, np.split(W, K, axis=1)))), cmap=cmap, ax=ax_w, cbar=False)
sns.heatmap(H, cmap=cmap, ax=ax_h, cbar=False)
sns.heatmap(data_recon, cmap=cmap, ax=ax_data, cbar=False)
# add dividing bars for factors of W and H
factor_colors = sns.color_palette(factor_cmap, K)
for k in np.arange(K):
plt.sca(ax_w)
start_w = k * L
plt.plot([start_w, start_w], [0, N - 1], '-', color=factor_colors[k])
plt.sca(ax_h)
plt.plot([0, T - 1], [k, k], '-', color=factor_colors[k])
return fig | [
"numpy.sqrt",
"numpy.random.rand",
"jax.numpy.max",
"jax.numpy.power",
"numpy.array",
"numpy.argsort",
"numpy.arange",
"numpy.mean",
"jax.numpy.eye",
"seaborn.color_palette",
"jax.numpy.divide",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.gridspec.GridSpec",
"jax.numpy.diag",
"j... | [((474, 501), 'jax.numpy.where', 'jnp.where', (['(M == 0)', 'X_hat', 'X'], {}), '(M == 0, X_hat, X)\n', (483, 501), True, 'import jax, jax.numpy as jnp\n'), ((644, 675), 'jax.scipy.signal.convolve2d', 'conv2', (['X', 'smooth_kernel', '"""same"""'], {}), "(X, smooth_kernel, 'same')\n", (649, 675), True, 'from jax.scipy.signal import convolve2d as conv2\n'), ((1566, 1623), 'jax.lax.cond', 'jax.lax.cond', (['shift', 'shift_factors', '(lambda WH: WH)', '(W, H)'], {}), '(shift, shift_factors, lambda WH: WH, (W, H))\n', (1578, 1623), False, 'import jax, jax.numpy as jnp\n'), ((1953, 2002), 'jax.lax.cond', 'jax.lax.cond', (['(not W_fixed)', 'update', '(lambda w: w)', 'W'], {}), '(not W_fixed, update, lambda w: w, W)\n', (1965, 2002), False, 'import jax, jax.numpy as jnp\n'), ((2042, 2069), 'jax.numpy.where', 'jnp.where', (['(M == 0)', 'X_hat', 'X'], {}), '(M == 0, X_hat, X)\n', (2051, 2069), True, 'import jax, jax.numpy as jnp\n'), ((4348, 4362), 'numpy.all', 'np.all', (['(X >= 0)'], {}), '(X >= 0)\n', (4354, 4362), True, 'import numpy as np\n'), ((4832, 4859), 'jax.numpy.where', 'jnp.where', (['(M == 0)', 'X_hat', 'X'], {}), '(M == 0, X_hat, X)\n', (4841, 4859), True, 'import jax, jax.numpy as jnp\n'), ((4879, 4903), 'jax.numpy.ones', 'jnp.ones', (['[1, 2 * L - 1]'], {}), '([1, 2 * L - 1])\n', (4887, 4903), True, 'import jax, jax.numpy as jnp\n'), ((4969, 4991), 'numpy.zeros', 'np.zeros', (['(max_iter + 1)'], {}), '(max_iter + 1)\n', (4977, 4991), True, 'import numpy as np\n'), ((5269, 5290), 'tqdm.trange', 'tqdm.trange', (['max_iter'], {}), '(max_iter)\n', (5280, 5290), False, 'import tqdm\n'), ((6123, 6134), 'numpy.array', 'np.array', (['W'], {}), '(W)\n', (6131, 6134), True, 'import numpy as np\n'), ((6143, 6154), 'numpy.array', 'np.array', (['H'], {}), '(H)\n', (6151, 6154), True, 'import numpy as np\n'), ((6167, 6182), 'numpy.array', 'np.array', (['power'], {}), '(power)\n', (6175, 6182), True, 'import numpy as np\n'), ((6198, 6216), 'numpy.array', 'np.array', (['loadings'], {}), '(loadings)\n', (6206, 6216), True, 'import numpy as np\n'), ((7029, 7055), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (7039, 7055), True, 'from matplotlib import pyplot as plt\n'), ((7065, 7131), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(2)'], {'width_ratios': '[1, 4]', 'height_ratios': '[1, 4]'}), '(2, 2, width_ratios=[1, 4], height_ratios=[1, 4])\n', (7082, 7131), False, 'from matplotlib import gridspec\n'), ((7143, 7161), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (7154, 7161), True, 'from matplotlib import pyplot as plt\n'), ((7173, 7191), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (7184, 7191), True, 'from matplotlib import pyplot as plt\n'), ((7206, 7224), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[3]'], {}), '(gs[3])\n', (7217, 7224), True, 'from matplotlib import pyplot as plt\n'), ((7368, 7414), 'seaborn.heatmap', 'sns.heatmap', (['H'], {'cmap': 'cmap', 'ax': 'ax_h', 'cbar': '(False)'}), '(H, cmap=cmap, ax=ax_h, cbar=False)\n', (7379, 7414), True, 'import seaborn as sns\n'), ((7419, 7477), 'seaborn.heatmap', 'sns.heatmap', (['data_recon'], {'cmap': 'cmap', 'ax': 'ax_data', 'cbar': '(False)'}), '(data_recon, cmap=cmap, ax=ax_data, cbar=False)\n', (7430, 7477), True, 'import seaborn as sns\n'), ((7546, 7579), 'seaborn.color_palette', 'sns.color_palette', (['factor_cmap', 'K'], {}), '(factor_cmap, K)\n', (7563, 7579), True, 'import seaborn as sns\n'), ((7593, 7605), 'numpy.arange', 'np.arange', (['K'], {}), '(K)\n', (7602, 7605), True, 'import numpy as np\n'), ((1348, 1381), 'jax.scipy.signal.convolve2d', 'conv2', (['WTX', 'smooth_kernel', '"""same"""'], {}), "(WTX, smooth_kernel, 'same')\n", (1353, 1381), True, 'from jax.scipy.signal import convolve2d as conv2\n'), ((1436, 1467), 'jax.scipy.signal.convolve2d', 'conv2', (['H', 'smooth_kernel', '"""same"""'], {}), "(H, smooth_kernel, 'same')\n", (1441, 1467), True, 'from jax.scipy.signal import convolve2d as conv2\n'), ((1513, 1550), 'jax.numpy.divide', 'jnp.divide', (['WTX', '(WTX_hat + dRdH + eps)'], {}), '(WTX, WTX_hat + dRdH + eps)\n', (1523, 1550), True, 'import jax, jax.numpy as jnp\n'), ((1769, 1817), 'jax.vmap', 'jax.vmap', (['jnp.dot'], {'in_axes': '(2, None)', 'out_axes': '(2)'}), '(jnp.dot, in_axes=(2, None), out_axes=2)\n', (1777, 1817), False, 'import jax, jax.numpy as jnp\n'), ((1819, 1834), 'jax.numpy.diag', 'jnp.diag', (['norms'], {}), '(norms)\n', (1827, 1834), True, 'import jax, jax.numpy as jnp\n'), ((4745, 4761), 'jax.numpy.ones', 'jnp.ones', (['[N, T]'], {}), '([N, T])\n', (4753, 4761), True, 'import jax, jax.numpy as jnp\n'), ((4916, 4926), 'jax.numpy.max', 'jnp.max', (['X'], {}), '(X)\n', (4923, 4926), True, 'import jax, jax.numpy as jnp\n'), ((7615, 7628), 'matplotlib.pyplot.sca', 'plt.sca', (['ax_w'], {}), '(ax_w)\n', (7622, 7628), True, 'from matplotlib import pyplot as plt\n'), ((7661, 7730), 'matplotlib.pyplot.plot', 'plt.plot', (['[start_w, start_w]', '[0, N - 1]', '"""-"""'], {'color': 'factor_colors[k]'}), "([start_w, start_w], [0, N - 1], '-', color=factor_colors[k])\n", (7669, 7730), True, 'from matplotlib import pyplot as plt\n'), ((7740, 7753), 'matplotlib.pyplot.sca', 'plt.sca', (['ax_h'], {}), '(ax_h)\n', (7747, 7753), True, 'from matplotlib import pyplot as plt\n'), ((7762, 7819), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, T - 1]', '[k, k]', '"""-"""'], {'color': 'factor_colors[k]'}), "([0, T - 1], [k, k], '-', color=factor_colors[k])\n", (7770, 7819), True, 'from matplotlib import pyplot as plt\n'), ((539, 552), 'jax.numpy.arange', 'jnp.arange', (['L'], {}), '(L)\n', (549, 552), True, 'import jax, jax.numpy as jnp\n'), ((611, 624), 'jax.numpy.arange', 'jnp.arange', (['L'], {}), '(L)\n', (621, 624), True, 'import jax, jax.numpy as jnp\n'), ((720, 733), 'jax.numpy.arange', 'jnp.arange', (['L'], {}), '(L)\n', (730, 733), True, 'import jax, jax.numpy as jnp\n'), ((779, 797), 'jax.numpy.sum', 'jnp.sum', (['W'], {'axis': '(2)'}), '(W, axis=2)\n', (786, 797), True, 'import jax, jax.numpy as jnp\n'), ((804, 814), 'jax.numpy.eye', 'jnp.eye', (['K'], {}), '(K)\n', (811, 814), True, 'import jax, jax.numpy as jnp\n'), ((937, 975), 'jax.numpy.divide', 'jnp.divide', (['XHT', '(X_hat_HT + dRdW + eps)'], {}), '(XHT, X_hat_HT + dRdW + eps)\n', (947, 975), True, 'import jax, jax.numpy as jnp\n'), ((1728, 1756), 'jax.numpy.divide', 'jnp.divide', (['(1.0)', '(norms + eps)'], {}), '(1.0, norms + eps)\n', (1738, 1756), True, 'import jax, jax.numpy as jnp\n'), ((2097, 2120), 'jax.numpy.power', 'jnp.power', (['(X - X_hat)', '(2)'], {}), '(X - X_hat, 2)\n', (2106, 2120), True, 'import jax, jax.numpy as jnp\n'), ((4476, 4493), 'jax.numpy.zeros', 'jnp.zeros', (['[N, L]'], {}), '([N, L])\n', (4485, 4493), True, 'import jax, jax.numpy as jnp\n'), ((4498, 4515), 'jax.numpy.zeros', 'jnp.zeros', (['[N, L]'], {}), '([N, L])\n', (4507, 4515), True, 'import jax, jax.numpy as jnp\n'), ((5025, 5048), 'jax.numpy.power', 'jnp.power', (['(X - X_hat)', '(2)'], {}), '(X - X_hat, 2)\n', (5034, 5048), True, 'import jax, jax.numpy as jnp\n'), ((6041, 6056), 'jax.numpy.power', 'jnp.power', (['X', '(2)'], {}), '(X, 2)\n', (6050, 6056), True, 'import jax, jax.numpy as jnp\n'), ((6266, 6286), 'numpy.argsort', 'np.argsort', (['loadings'], {}), '(loadings)\n', (6276, 6286), True, 'import numpy as np\n'), ((1335, 1345), 'jax.numpy.eye', 'jnp.eye', (['K'], {}), '(K)\n', (1342, 1345), True, 'import jax, jax.numpy as jnp\n'), ((1423, 1433), 'jax.numpy.eye', 'jnp.eye', (['K'], {}), '(K)\n', (1430, 1433), True, 'import jax, jax.numpy as jnp\n'), ((1675, 1690), 'jax.numpy.power', 'jnp.power', (['H', '(2)'], {}), '(H, 2)\n', (1684, 1690), True, 'import jax, jax.numpy as jnp\n'), ((4577, 4586), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (4583, 4586), True, 'import numpy as np\n'), ((4589, 4612), 'numpy.random.rand', 'np.random.rand', (['N', 'K', 'L'], {}), '(N, K, L)\n', (4603, 4612), True, 'import numpy as np\n'), ((4699, 4713), 'numpy.sqrt', 'np.sqrt', (['(T / 3)'], {}), '(T / 3)\n', (4706, 4713), True, 'import numpy as np\n'), ((5980, 5995), 'jax.numpy.power', 'jnp.power', (['X', '(2)'], {}), '(X, 2)\n', (5989, 5995), True, 'import jax, jax.numpy as jnp\n'), ((6007, 6030), 'jax.numpy.power', 'jnp.power', (['(X - X_hat)', '(2)'], {}), '(X - X_hat, 2)\n', (6016, 6030), True, 'import jax, jax.numpy as jnp\n'), ((4664, 4673), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (4670, 4673), True, 'import numpy as np\n'), ((4676, 4696), 'numpy.random.rand', 'np.random.rand', (['K', 'T'], {}), '(K, T)\n', (4690, 4696), True, 'import numpy as np\n'), ((5362, 5385), 'numpy.mean', 'np.mean', (['costs[i - 6:i]'], {}), '(costs[i - 6:i])\n', (5369, 5385), True, 'import numpy as np\n'), ((7305, 7327), 'numpy.split', 'np.split', (['W', 'K'], {'axis': '(1)'}), '(W, K, axis=1)\n', (7313, 7327), True, 'import numpy as np\n'), ((1183, 1196), 'jax.numpy.arange', 'jnp.arange', (['L'], {}), '(L)\n', (1193, 1196), True, 'import jax, jax.numpy as jnp\n'), ((1259, 1272), 'jax.numpy.arange', 'jnp.arange', (['L'], {}), '(L)\n', (1269, 1272), True, 'import jax, jax.numpy as jnp\n'), ((868, 878), 'jax.numpy.eye', 'jnp.eye', (['K'], {}), '(K)\n', (875, 878), True, 'import jax, jax.numpy as jnp\n')] |
import matplotlib.pylab as plt
import numpy as np
import seaborn as sns
def tank(ts=120, liters=1000, litersIn=6, litersUit=6, concGroei=0.1):
c = 0
conc = [c:= c + (litersIn * concGroei / liters) - (litersUit / liters * c) for _ in range(ts + 1)]
return conc, np.arange(ts + 1)
def plot(ts, liters, litersIn, litersUit, concGroei):
conc, tsArr = tank(ts=ts, liters=liters, litersIn=litersIn, litersUit=litersUit, concGroei=concGroei)
sns.set(context="notebook")
sns.lineplot(tsArr, conc, label=f"In={litersIn} Uit={litersUit} Groei={concGroei}")
plt.xlim(0, ts)
plt.ylim(bottom=0)
plt.title(f"Concentratie in kg/L in tank van {liters} liters")
plt.xlabel("Aantal stappen")
plt.ylabel("Concentratie in kg/L")
plt.legend()
plt.tight_layout()
plt.show()
if __name__ == "__main__":
plot(1200, 1000, 6, 5, 0.1)
plot(1200, 1000, 6, 6, 0.1)
| [
"matplotlib.pylab.xlim",
"seaborn.set",
"matplotlib.pylab.tight_layout",
"matplotlib.pylab.legend",
"matplotlib.pylab.title",
"matplotlib.pylab.xlabel",
"seaborn.lineplot",
"matplotlib.pylab.show",
"matplotlib.pylab.ylim",
"numpy.arange",
"matplotlib.pylab.ylabel"
] | [((459, 486), 'seaborn.set', 'sns.set', ([], {'context': '"""notebook"""'}), "(context='notebook')\n", (466, 486), True, 'import seaborn as sns\n'), ((491, 579), 'seaborn.lineplot', 'sns.lineplot', (['tsArr', 'conc'], {'label': 'f"""In={litersIn} Uit={litersUit} Groei={concGroei}"""'}), "(tsArr, conc, label=\n f'In={litersIn} Uit={litersUit} Groei={concGroei}')\n", (503, 579), True, 'import seaborn as sns\n'), ((580, 595), 'matplotlib.pylab.xlim', 'plt.xlim', (['(0)', 'ts'], {}), '(0, ts)\n', (588, 595), True, 'import matplotlib.pylab as plt\n'), ((600, 618), 'matplotlib.pylab.ylim', 'plt.ylim', ([], {'bottom': '(0)'}), '(bottom=0)\n', (608, 618), True, 'import matplotlib.pylab as plt\n'), ((623, 685), 'matplotlib.pylab.title', 'plt.title', (['f"""Concentratie in kg/L in tank van {liters} liters"""'], {}), "(f'Concentratie in kg/L in tank van {liters} liters')\n", (632, 685), True, 'import matplotlib.pylab as plt\n'), ((690, 718), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Aantal stappen"""'], {}), "('Aantal stappen')\n", (700, 718), True, 'import matplotlib.pylab as plt\n'), ((723, 757), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Concentratie in kg/L"""'], {}), "('Concentratie in kg/L')\n", (733, 757), True, 'import matplotlib.pylab as plt\n'), ((763, 775), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (773, 775), True, 'import matplotlib.pylab as plt\n'), ((780, 798), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (796, 798), True, 'import matplotlib.pylab as plt\n'), ((803, 813), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (811, 813), True, 'import matplotlib.pylab as plt\n'), ((275, 292), 'numpy.arange', 'np.arange', (['(ts + 1)'], {}), '(ts + 1)\n', (284, 292), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.