code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from copy import deepcopy
from dataclasses import dataclass
from typing import Dict, List, Optional, Set, Tuple
import numpy as np
from l5kit.data import filter_agents_by_frames, PERCEPTION_LABEL_TO_INDEX
from l5kit.dataset import EgoDataset
from l5kit.geometry.transform import yaw_as_rotation33
from l5kit.simulation.utils import disable_agents, get_frames_subset, insert_agent
@dataclass
class SimulationConfig:
""" Defines the parameters used for the simulation of ego and agents around it.
:param use_ego_gt: whether to use GT annotations for ego instead of model's outputs
:param use_agents_gt: whether to use GT annotations for agents instead of model's outputs
:param disable_new_agents: whether to disable agents that are not returned at start_frame_index
:param distance_th_far: if a tracked agent is closed than this value to ego, it will be controlled
:param distance_th_close: if a new agent is closer than this value to ego, it will be controlled
:param start_frame_index: the start index of the simulation
:param num_simulation_steps: the number of step to simulate
:param show_info: whether to show info logging during unroll
"""
use_ego_gt: bool = False
use_agents_gt: bool = False
disable_new_agents: bool = False
distance_th_far: float = 30.0
distance_th_close: float = 15.0
start_frame_index: int = 0
num_simulation_steps: Optional[int] = None
show_info: bool = False
class SimulationDataset:
def __init__(self, scene_dataset_batch: Dict[int, EgoDataset], sim_cfg: SimulationConfig) -> None:
"""This class allows to:
- rasterise the same frame across multiple scenes for ego;
- rasterise the same frame across multiple scenes for multiple agents;
- filter agents based on distance to ego;
- set ego in future frames;
- set agents in future frames;
.. note:: only vehicles (car label) are picked as agents
:param scene_dataset_batch: a mapping from scene index to EgoDataset
:param sim_cfg: the simulation config
"""
if not len(scene_dataset_batch):
raise ValueError("can't build a simulation dataset with an empty batch")
self.scene_dataset_batch: Dict[int, EgoDataset] = scene_dataset_batch
self.sim_cfg = sim_cfg
# we must limit the scenes to the part which will be simulated
# we cut each scene so that it starts from there and ends after `num_simulation_steps`
start_frame_idx = self.sim_cfg.start_frame_index
if self.sim_cfg.num_simulation_steps is None:
end_frame_idx = self.get_min_len()
else:
end_frame_idx = start_frame_idx + self.sim_cfg.num_simulation_steps
if end_frame_idx > self.get_min_len():
raise ValueError(f"can't unroll until frame {end_frame_idx}, length is {self.get_min_len()}")
for scene_idx in scene_dataset_batch:
zarr_dt = self.scene_dataset_batch[scene_idx].dataset
self.scene_dataset_batch[scene_idx].dataset = get_frames_subset(zarr_dt, start_frame_idx, end_frame_idx)
# this is the only stateful field we need to change for EgoDataset, it's used in bisect
frame_index_ends = self.scene_dataset_batch[scene_idx].dataset.scenes["frame_index_interval"][:, 1]
self.scene_dataset_batch[scene_idx].cumulative_sizes = frame_index_ends
# buffer used to keep track of tracked agents during unroll as tuples of scene_idx, agent_idx
self._agents_tracked: Set[Tuple[int, int]] = set()
if self.sim_cfg.disable_new_agents:
# we disable all agents that wouldn't be picked at frame 0
for scene_idx, dt_ego in self.scene_dataset_batch.items():
dataset_zarr = dt_ego.dataset
frame = dataset_zarr.frames[0]
ego_pos = frame["ego_translation"][:2]
agents = dataset_zarr.agents
frame_agents = filter_agents_by_frames(frame, agents)[0]
frame_agents = self._filter_agents(scene_idx, frame_agents, ego_pos)
disable_agents(dataset_zarr, allowlist=frame_agents["track_id"])
# keep track of original dataset
self.recorded_scene_dataset_batch = deepcopy(self.scene_dataset_batch)
@staticmethod
def from_dataset_indices(dataset: EgoDataset, scene_indices: List[int],
sim_cfg: SimulationConfig) -> "SimulationDataset":
"""Create a SimulationDataset by picking indices from the provided dataset
:param dataset: the EgoDataset
:param scene_indices: scenes from the EgoDataset to pick
:param sim_cfg: a simulation config
:return: the new SimulationDataset
"""
if len(np.unique(scene_indices)) != len(scene_indices):
raise ValueError(f"can't simulate repeated scenes: {scene_indices}")
if np.any(np.asarray(scene_indices) >= len(dataset.dataset.scenes)):
raise ValueError(
f"can't pick indices {scene_indices} from dataset with length: {len(dataset.dataset.scenes)}")
scene_dataset_batch: Dict[int, EgoDataset] = {} # dicts preserve insertion order
for scene_idx in scene_indices:
scene_dataset = dataset.get_scene_dataset(scene_idx)
scene_dataset_batch[scene_idx] = scene_dataset
return SimulationDataset(scene_dataset_batch, sim_cfg)
def get_min_len(self) -> int:
"""Return the minimum number of frames between the scenes
:return: the minimum number of frames
"""
return min([len(scene_dt.dataset.frames) for scene_dt in self.scene_dataset_batch.values()])
def __len__(self) -> int:
"""
Return the minimum number of frames across scenes
:return: the number of frames
"""
return self.get_min_len()
def rasterise_frame_batch(self, state_index: int) -> List[Dict[str, np.ndarray]]:
"""
Get a frame from all scenes
:param state_index: the frame index
:return: a list of dict from EgoDatasets
"""
frame_batch = []
for scene_idx, scene_dt in self.scene_dataset_batch.items():
frame = scene_dt[state_index]
frame["scene_index"] = scene_idx # set the scene to the right index
frame_batch.append(frame)
return frame_batch
def set_ego(self, state_index: int, output_index: int, ego_translations: np.ndarray,
ego_yaws: np.ndarray) -> None:
"""Mutate future frame position and yaw for ego across scenes. This acts on the underlying dataset
:param state_index: the frame index to mutate
:param output_index: the index in ego_translations and ego_yaws to use
:param ego_translations: output translations (N, T, 2)
:param ego_yaws: output yaws (N, T)
"""
if len(ego_translations) != len(ego_yaws):
raise ValueError("lengths mismatch between translations and yaws")
if len(ego_translations) != len(self.scene_dataset_batch):
raise ValueError("lengths mismatch between scenes and predictions")
if state_index >= len(self):
raise ValueError(f"trying to mutate frame:{state_index} but length is:{len(self)}")
position_m_batch = ego_translations[:, output_index, :]
angle_rad_batch = ego_yaws[:, output_index]
for i, (scene_dataset, position_m, angle_rad) in enumerate(
zip(self.scene_dataset_batch.values(), position_m_batch, angle_rad_batch)
):
scene_dataset.dataset.frames[state_index]["ego_translation"][:2] = position_m
scene_dataset.dataset.frames[state_index]["ego_rotation"] = yaw_as_rotation33(angle_rad)
def set_agents(self, state_index: int, agents_infos: Dict[Tuple[int, int], np.ndarray]) -> None:
"""Set multiple agents in the scene datasets.
:param state_index: the frame index to set (same for all datasets)
:param agents_infos: a dict mapping (scene_idx, agent_idx) to the agent array
"""
for (scene_idx, _), agent in agents_infos.items():
insert_agent(agent, state_index, self.scene_dataset_batch[scene_idx].dataset)
def rasterise_agents_frame_batch(self, state_index: int) -> Dict[Tuple[int, int], Dict[str, np.ndarray]]:
"""Rasterise agents for each scene in the batch at a given frame.
:param state_index: the frame index in the scene
:return: a dict mapping from [scene_id, track_id] to the numpy dict
"""
ret = {}
for scene_index in self.scene_dataset_batch:
ret.update(self._rasterise_agents_frame(scene_index, state_index))
return ret
def _rasterise_agents_frame(self, scene_index: int,
state_index: int) -> Dict[Tuple[int, int], Dict[str, np.ndarray]]:
"""Rasterise agents of interest for a given frame in a given scene.
:param scene_index: index of the scene
:param state_index: frame index
:return: a dict mapping [scene_idx, agent_idx] to dict
"""
# filter agents around ego based on distance and threshold
dataset = self.scene_dataset_batch[scene_index]
frame = dataset.dataset.frames[state_index]
frame_agents = filter_agents_by_frames(frame, dataset.dataset.agents)[0]
frame_agents = self._filter_agents(scene_index, frame_agents, frame["ego_translation"][:2])
# rasterise individual agents
agents_dict: Dict[Tuple[int, int], Dict[str, np.ndarray]] = {}
for agent in frame_agents:
track_id = int(agent["track_id"])
el = dataset.get_frame(scene_index=0, state_index=state_index, track_id=track_id)
# we replace the scene_index here to match the real one (otherwise is 0)
el["scene_index"] = scene_index
agents_dict[scene_index, track_id] = el
self._update_agent_infos(scene_index, frame_agents["track_id"])
return agents_dict
def _update_agent_infos(self, scene_index: int, agent_track_ids: np.ndarray) -> None:
"""Update tracked agents object such that:
- if agent was not there -> add it
- if agent is not here anymore -> remove it
This will be used next frame to control thresholds
:param scene_index: index of the scene
:param agent_track_ids: agents track ids for this frame
"""
agent_track_set = set([(scene_index, int(track_id)) for track_id in agent_track_ids])
self._agents_tracked.update(agent_track_set)
remove_els = set([k for k in self._agents_tracked if k[0] == scene_index]) - agent_track_set
for indices in remove_els:
self._agents_tracked.remove(indices)
def _filter_agents(self, scene_idx: int, frame_agents: np.ndarray,
ego_pos: np.ndarray) -> np.ndarray:
"""Filter agents according to a set of rules:
if new agent (not in tracked_agents) then:
- must be a car
- must be in distance_th_close
if tracked agent:
- must be in distance_th_far
This is to avoid acquiring and releasing the same agents if it is on the boundary of the selection
:param scene_idx: the scene index (used to check for agents_infos)
:param frame_agents: the agents in this frame
:param ego_pos: the ego position in this frame
:return: the filtered agents
"""
# keep only vehicles
car_index = PERCEPTION_LABEL_TO_INDEX["PERCEPTION_LABEL_CAR"]
vehicle_mask = frame_agents["label_probabilities"][:, car_index]
dt_agents_ths = self.scene_dataset_batch[scene_idx].cfg["raster_params"]["filter_agents_threshold"]
vehicle_mask = vehicle_mask > dt_agents_ths
frame_agents = frame_agents[vehicle_mask]
distance_mask = np.zeros(len(frame_agents), dtype=np.bool)
for idx_agent, agent in enumerate(frame_agents):
track_id = int(agent["track_id"])
distance = np.linalg.norm(ego_pos - agent["centroid"])
# for distance use two thresholds
if (scene_idx, track_id) in self._agents_tracked:
# if we're already controlling this agent, th_far
distance_mask[idx_agent] = distance < self.sim_cfg.distance_th_far
else:
# if not, start controlling it only if in th_close
distance_mask[idx_agent] = distance < self.sim_cfg.distance_th_close
return frame_agents[distance_mask]
| [
"copy.deepcopy",
"l5kit.simulation.utils.insert_agent",
"numpy.asarray",
"l5kit.simulation.utils.get_frames_subset",
"l5kit.data.filter_agents_by_frames",
"l5kit.geometry.transform.yaw_as_rotation33",
"numpy.linalg.norm",
"l5kit.simulation.utils.disable_agents",
"numpy.unique"
] | [((4310, 4344), 'copy.deepcopy', 'deepcopy', (['self.scene_dataset_batch'], {}), '(self.scene_dataset_batch)\n', (4318, 4344), False, 'from copy import deepcopy\n'), ((3087, 3145), 'l5kit.simulation.utils.get_frames_subset', 'get_frames_subset', (['zarr_dt', 'start_frame_idx', 'end_frame_idx'], {}), '(zarr_dt, start_frame_idx, end_frame_idx)\n', (3104, 3145), False, 'from l5kit.simulation.utils import disable_agents, get_frames_subset, insert_agent\n'), ((7811, 7839), 'l5kit.geometry.transform.yaw_as_rotation33', 'yaw_as_rotation33', (['angle_rad'], {}), '(angle_rad)\n', (7828, 7839), False, 'from l5kit.geometry.transform import yaw_as_rotation33\n'), ((8241, 8318), 'l5kit.simulation.utils.insert_agent', 'insert_agent', (['agent', 'state_index', 'self.scene_dataset_batch[scene_idx].dataset'], {}), '(agent, state_index, self.scene_dataset_batch[scene_idx].dataset)\n', (8253, 8318), False, 'from l5kit.simulation.utils import disable_agents, get_frames_subset, insert_agent\n'), ((9412, 9466), 'l5kit.data.filter_agents_by_frames', 'filter_agents_by_frames', (['frame', 'dataset.dataset.agents'], {}), '(frame, dataset.dataset.agents)\n', (9435, 9466), False, 'from l5kit.data import filter_agents_by_frames, PERCEPTION_LABEL_TO_INDEX\n'), ((12184, 12227), 'numpy.linalg.norm', 'np.linalg.norm', (["(ego_pos - agent['centroid'])"], {}), "(ego_pos - agent['centroid'])\n", (12198, 12227), True, 'import numpy as np\n'), ((4159, 4223), 'l5kit.simulation.utils.disable_agents', 'disable_agents', (['dataset_zarr'], {'allowlist': "frame_agents['track_id']"}), "(dataset_zarr, allowlist=frame_agents['track_id'])\n", (4173, 4223), False, 'from l5kit.simulation.utils import disable_agents, get_frames_subset, insert_agent\n'), ((4822, 4846), 'numpy.unique', 'np.unique', (['scene_indices'], {}), '(scene_indices)\n', (4831, 4846), True, 'import numpy as np\n'), ((4971, 4996), 'numpy.asarray', 'np.asarray', (['scene_indices'], {}), '(scene_indices)\n', (4981, 4996), True, 'import numpy as np\n'), ((4016, 4054), 'l5kit.data.filter_agents_by_frames', 'filter_agents_by_frames', (['frame', 'agents'], {}), '(frame, agents)\n', (4039, 4054), False, 'from l5kit.data import filter_agents_by_frames, PERCEPTION_LABEL_TO_INDEX\n')] |
from .net_s3fd import s3fd
from .bbox import nms, decode
import torch.nn.functional as F
import numpy as np
import cv2
import torch
class SFDDetector:
def __init__(self, device, model_path, image_info):
# Initialise the face detector
model_weights = torch.load(model_path)
torch.backends.cudnn.benchmark = True
self.w, self.h, self.input_scale = image_info
self.device = device
self.face_detector = s3fd().to(self.device)
self.face_detector.load_state_dict(model_weights)
self.face_detector.eval()
def pre_process_frame(self, frame):
img = cv2.resize(frame, (self.h, self.w))
img = img[..., ::-1]
img = img - np.array([104, 117, 123])
img = img.transpose(2, 0, 1)
img = img.reshape((1,) + img.shape)
return img
def detect_rect(self, frame, thresh):
img = self.pre_process_frame(frame)
img = torch.from_numpy(img).float().to(self.device)
with torch.no_grad():
olist = self.face_detector(img)
bboxes = []
olist = [oelem.data.cpu() for oelem in olist]
for i in range(len(olist) // 2):
ocls, oreg = olist[i * 2], olist[i * 2 + 1]
stride = 2 ** (i + 2) # 4,8,16,32,64,128
poss = zip(*np.where(ocls[:, 1, :, :] > 0.05))
for Iindex, hindex, windex in poss:
axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride
score = ocls[0, 1, hindex, windex]
if score > thresh:
loc = oreg[0, :, hindex, windex].contiguous().view(1, 4)
priors = torch.Tensor([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])
variances = [0.1, 0.2]
box = decode(loc, priors, variances)
x1, y1, x2, y2 = box[0] * 1.0
bboxes.append([x1, y1, x2, y2, score])
bboxes = np.array(bboxes)
return bboxes
def extract(self, frame, thresh):
bboxes = self.detect_rect(frame, thresh)
if len(bboxes) > 0:
keep = nms(bboxes, 0.3)
bboxlist = bboxes[keep, :]
# restore the rect points
detected_faces = []
for ltrb in bboxlist:
l, t, r, b, _ = [x * self.input_scale for x in ltrb]
bt = b - t
if min(r - l, bt) < 40:
continue
b += bt * 0.1
detected_faces.append((l, t, r, b))
else:
return []
# for box in detected_faces:
# box = [int(i) for i in box]
# x, y, x2, y2 = box
# cv2.rectangle(frame, (x, y), (x2, y2), (0, 255, 255))
return detected_faces
| [
"torch.load",
"numpy.where",
"numpy.array",
"torch.Tensor",
"torch.no_grad",
"cv2.resize",
"torch.from_numpy"
] | [((272, 294), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (282, 294), False, 'import torch\n'), ((624, 659), 'cv2.resize', 'cv2.resize', (['frame', '(self.h, self.w)'], {}), '(frame, (self.h, self.w))\n', (634, 659), False, 'import cv2\n'), ((1968, 1984), 'numpy.array', 'np.array', (['bboxes'], {}), '(bboxes)\n', (1976, 1984), True, 'import numpy as np\n'), ((709, 734), 'numpy.array', 'np.array', (['[104, 117, 123]'], {}), '([104, 117, 123])\n', (717, 734), True, 'import numpy as np\n'), ((995, 1010), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1008, 1010), False, 'import torch\n'), ((1306, 1339), 'numpy.where', 'np.where', (['(ocls[:, 1, :, :] > 0.05)'], {}), '(ocls[:, 1, :, :] > 0.05)\n', (1314, 1339), True, 'import numpy as np\n'), ((1667, 1741), 'torch.Tensor', 'torch.Tensor', (['[[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]]'], {}), '([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])\n', (1679, 1741), False, 'import torch\n'), ((936, 957), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (952, 957), False, 'import torch\n')] |
import pandas as pd
from pandas import *
from keras.models import Sequential
from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Dropout, Masking
from keras import optimizers
import numpy as np
X_batches_train = [np.array([[-1.00612917, 1.47313952, 2.68021318, 1.54875809, 0.98385996,
1.49465265, 0.60429106, 1.12396908, -0.24041602, 1.77266187,
0.1961381, 1.28019637, 1.78803092, 2.05151245, 0.93606708,
0.51554755, 0., 0., 0., 0.],
[-0.97596563, 2.04536053, 0.88367922, 1.013342, -0.16605355,
3.02994344, 2.04080806, -0.25153046, -0.5964068, 2.9607247,
-0.49722121, 0.02734492, 2.16949987, 2.77367066, 0.15628842,
2.19823207, 0., 0., 0., 0.],
[0.31546283, 3.27420503, 3.23550769, -0.63724013, 0.89150128,
0.69774266, 2.76627308, -0.58408384, -0.45681779, 1.98843041,
-0.31850477, 0.83729882, 0.45471165, 3.61974147, -1.45610756,
1.35217453, 0., 0., 0., 0.],
[1.03329532, 1.97471646, 1.33949611, 1.22857243, -1.46890642,
1.74105506, 1.40969261, 0.52465603, -0.18895266, 2.81025597,
2.64901037, -0.83415186, 0.76956826, 1.48730868, -0.16190164,
2.24389007, 0., 0., 0., 0.],
[-1.0676654, 3.08429323, 1.7601179, 0.85448051, 1.15537064,
2.82487842, 0.27891413, 0.57842569, -0.62392063, 1.00343057,
1.15348843, -0.37650332, 3.37355345, 2.22285473, 0.43444434,
0.15743873, 0., 0., 0., 0.]]),
np.array([[1.05258873, -0.17897376, -0.99932932, -1.02854121, 0.85159208,
2.32349131, 1.96526709, -0.08398597, -0.69474809, 1.32820222,
1.19514151, 1.56814867, 0.86013263, 1.48342922, 0.,
0., 0., 0., 0., 0.],
[0.1920635, -0.48702788, 1.24353985, -1.3864121, 0.16713229,
3.10134683, 0.61658271, -0.63360643, 0.86000807, 2.74876157,
2.87604877, 0.16339724, 2.87595396, 3.2846962, 0.,
0., 0., 0., 0., 0.],
[0.1380241, -0.76783029, 0.18814436, -1.18165209, -0.02981728,
1.49908113, 0.61521007, -0.98191097, 0.31250199, 1.39015803,
3.16213211, -0.70891214, 3.83881766, 1.92683533, 0.,
0., 0., 0., 0., 0.],
[1.39080778, -0.59179216, 0.80348201, 0.64638205, -1.40144268,
1.49751413, 3.0092166, 1.33099666, 1.43714841, 2.90734268,
3.09688943, 0.32934884, 1.14592787, 1.58152023, 0.,
0., 0., 0., 0., 0.],
[-0.77164353, 0.50293096, 0.0717377, 0.14487556, -0.90246591,
2.32612179, 1.98628857, 1.29683166, -0.12399569, 2.60184685,
3.20136653, 0.44056647, 0.98283455, 1.79026663, 0.,
0., 0., 0., 0., 0.]]),
np.array([[-0.93359914, 2.31840281, 0.55691601, 1.90930758, -1.58260431,
-1.05801881, 3.28012523, 3.84105406, -1.2127093, 0.00490079,
1.28149304, 0., 0., 0., 0.,
0., 0., 0., 0., 0.],
[-1.03105486, 2.7703693, 0.16751813, 1.12127987, -0.44070271,
-0.0789227, 2.79008301, 1.11456745, 1.13982551, -1.10128658,
0.87430834, 0., 0., 0., 0.,
0., 0., 0., 0., 0.],
[-0.69710668, 1.72702833, -2.62599502, 2.34730002, 0.77756661,
0.16415884, 3.30712178, 1.67331828, -0.44022431, 0.56837829,
1.1566811, 0., 0., 0., 0.,
0., 0., 0., 0., 0.],
[-0.71845983, 1.79908544, 0.37385522, 1.3870915, -1.48823234,
-1.487419, 3.0879945, 1.74617784, -0.91538815, -0.24244522,
0.81393954, 0., 0., 0., 0.,
0., 0., 0., 0., 0.],
[-1.38501563, 3.73330047, -0.52494265, 2.37133716, -0.24546709,
-0.28360782, 2.89384717, 2.42891743, 0.40144022, -1.21850571,
2.00370751, 0., 0., 0., 0.,
0., 0., 0., 0., 0.]]),
np.array([[1.27989188, 1.16254538, -0.06889142, 1.84133355, 1.3234908,
1.29611702, 2.0019294, -0.03220116, 1.1085194, 1.96495985,
1.68544302, 1.94503544, 0., 0., 0.,
0., 0., 0., 0., 0.],
[1.3004439, 2.48768923, 0.59809607, 2.38155155, 2.78705889,
1.67018683, 0.21731778, -0.59277191, 2.87427207, 2.63950475,
2.39211459, 0.93083423, 0., 0., 0.,
0., 0., 0., 0., 0.],
[2.39239371, 0.30900383, -0.97307155, 1.98100711, 0.30613735,
1.12827171, 0.16987791, 0.31959096, 1.30366416, 1.45881023,
2.45668401, 0.5218711, 0., 0., 0.,
0., 0., 0., 0., 0.],
[0.0826574, 2.05100254, 0.013161, 2.95120798, 1.15730011,
0.75537024, 0.13708569, -0.44922143, 0.64834001, 2.50640862,
2.00349347, 3.35573624, 0., 0., 0.,
0., 0., 0., 0., 0.],
[0.47135124, 2.10258532, 0.70212032, 2.56063126, 1.62466971,
2.64026892, 0.21309489, -0.57752813, 2.21335957, 0.20453233,
0.03106993, 3.01167822, 0., 0., 0.,
0., 0., 0., 0., 0.]]),
np.array([[-0.42125521, 0.54016939, 1.63016057, 2.01555253, -0.10961255,
-0.42549555, 1.55793753, -0.0998756, 0.36417335, 3.37126414,
1.62151191, 2.84084192, 0.10831384, 0.89293054, -0.08671363,
0.49340353, 0., 0., 0., 0.],
[-0.37615411, 2.00581062, 2.30426605, 2.02205839, 0.65871664,
1.34478836, -0.55379752, -1.42787727, 0.59732227, 0.84969282,
0.54345723, 0.95849568, -0.17131602, -0.70425277, -0.5337757,
1.78207229, 0., 0., 0., 0.],
[-0.13863276, 1.71490034, 2.02677925, 2.60608619, 0.26916522,
0.35928298, -1.26521844, -0.59859219, 1.19162219, 1.64565259,
1.16787165, 2.95245196, 0.48681084, 1.66621053, 0.918077,
-1.10583747, 0., 0., 0., 0.],
[0.87763797, 2.38740754, 2.9111822, 2.21184069, 0.78091173,
-0.53270909, 0.40100338, -0.83375593, 0.9860009, 2.43898437,
-0.64499989, 2.95092003, -1.52360727, 0.44640918, 0.78131922,
-0.24401283, 0., 0., 0., 0.],
[0.92615066, 3.45437746, 3.28808981, 2.87207404, -1.60027223,
-1.14164941, -1.63807699, 0.33084805, 2.92963629, 3.51170824,
-0.3286093, 2.19108385, 0.97812366, -1.82565766, -0.34034678,
-2.0485913, 0., 0., 0., 0.]]),
np.array([[1.96438618e+00, 1.88104784e-01, 1.61114494e+00,
6.99567690e-04, 2.55271963e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00],
[2.41578815e+00, -5.70625661e-01, 2.15545894e+00,
-1.80948908e+00, 1.62049331e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00],
[1.97017040e+00, -1.62556528e+00, 2.49469152e+00,
4.18785985e-02, 2.61875866e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00],
[3.14277819e+00, 3.01098398e-02, 7.40376369e-01,
1.76517344e+00, 2.68922918e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00],
[2.06250296e+00, 4.67605528e-01, 1.55927230e+00,
1.85788889e-01, 1.30359922e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00]]),
np.array([[1.22152427, 3.74926839, 0.64415552, 2.35268329, 1.98754653,
2.89384829, 0.44589817, 3.94228743, 2.72405657, 0.86222004,
0.68681903, 3.89952458, 1.43454512, 0., 0.,
0., 0., 0., 0., 0.],
[-0.02203262, 0.95065123, 0.71669023, 0.02919391, 2.30714524,
1.91843002, 0.73611294, 1.20560482, 0.85206836, -0.74221506,
-0.72886308, 2.39872927, -0.95841402, 0., 0.,
0., 0., 0., 0., 0.],
[0.55775319, 0.33773314, 0.79932151, 1.94966883, 3.2113281,
2.70768249, -0.69745554, 1.23208345, 1.66199957, 1.69894081,
0.13124461, 1.93256147, -0.17787952, 0., 0.,
0., 0., 0., 0., 0.],
[0.45089205, 2.62430534, -1.9517961, 2.24040577, 1.75642049,
1.94962325, 0.26796497, 2.28418304, 1.44944487, 0.28723885,
-0.81081633, 1.54840214, 0.82652939, 0., 0.,
0., 0., 0., 0., 0.],
[1.27678173, 1.17204606, -0.24738322, 1.02761617, 1.81060444,
2.37830861, 0.55260134, 2.50046334, 1.04652821, 0.03467176,
-2.07336654, 1.2628897, 0.61604732, 0., 0.,
0., 0., 0., 0., 0.]]),
np.array([[3.86138405, 2.35068317, -1.90187438, 0.600788, 0.18011722,
1.3469559, -0.54708828, 1.83798823, -0.01957845, 2.88713217,
3.1724991, 2.90802072, 0., 0., 0.,
0., 0., 0., 0., 0.],
[1.26785642, 0.51076756, 0.32070756, 2.33758816, 2.08146669,
-0.60796736, 0.93777509, 2.70474711, 0.44785738, 1.61720609,
1.52890594, 3.03072971, 0., 0., 0.,
0., 0., 0., 0., 0.],
[3.30219394, 3.1515445, 1.16550716, 2.07489374, 0.66441859,
0.97529244, 0.35176367, 1.22593639, -1.80698271, 1.19936482,
3.34017172, 2.15960657, 0., 0., 0.,
0., 0., 0., 0., 0.],
[2.34839018, 2.24827352, -1.61070856, 2.81044265, -1.21423372,
0.24633846, -0.82196609, 2.28616568, 0.033922, 2.7557593,
1.16178372, 3.66959512, 0., 0., 0.,
0., 0., 0., 0., 0.],
[1.32913219, 1.63231852, 0.58642744, 1.55873546, 0.86354741,
2.06654246, -0.44036504, 3.22723595, 1.33279468, 0.05975892,
2.48518999, 3.44690602, 0., 0., 0.,
0., 0., 0., 0., 0.]]),
np.array([[0.61424344, -1.03068819, -1.47929328, 2.91514641, 2.06867196,
1.90384921, -0.45835234, 1.22054782, 0.67931536, 0.,
0., 0., 0., 0., 0.,
0., 0., 0., 0., 0.],
[2.76480464, 1.12442631, -2.36004758, 2.91912726, 1.67891181,
3.76873596, -0.93874096, -0.32397781, -0.55732374, 0.,
0., 0., 0., 0., 0.,
0., 0., 0., 0., 0.],
[0.39953353, -1.26828104, 0.44482517, 2.85604975, 3.08891062,
2.60268725, -0.15785176, 1.58549879, -0.32948578, 0.,
0., 0., 0., 0., 0.,
0., 0., 0., 0., 0.],
[1.65156484, -1.56545168, -1.42771206, 2.74216475, 1.8758154,
3.51169147, 0.18353058, -0.14704149, 0.00442783, 0.,
0., 0., 0., 0., 0.,
0., 0., 0., 0., 0.],
[1.27736372, 0.37407608, -1.25713475, 0.53171176, 1.53714914,
0.21015523, -1.06850669, -0.09755327, -0.92373834, 0.,
0., 0., 0., 0., 0.,
0., 0., 0., 0., 0.]]),
np.array([[-1.39160433, 0.21014669, -0.89792475, 2.6702794, 1.54610601,
0.84699037, 2.96726482, 1.84236946, 0.02211578, 0.32842575,
1.02718924, 1.78447936, -1.20056829, 2.26699318, -0.23156537,
2.50124959, 1.93372501, 0.10264369, -1.70813962, 0.],
[0.38823591, -1.30348049, -0.31599117, 2.60044143, 2.32929389,
1.40348483, 3.25758736, 1.92210728, -0.34150988, -1.22336921,
2.3567069, 1.75456835, 0.28295694, 0.68114898, -0.457843,
1.83372069, 2.10177851, -0.26664178, -0.26549595, 0.],
[0.08540346, 0.71507504, 1.78164285, 3.04418137, 1.52975256,
3.55159169, 3.21396003, 3.22720346, 0.68147142, 0.12466013,
-0.4122895, 1.97986653, 1.51671949, 2.06096825, -0.6765908,
2.00145086, 1.73723014, 0.50186043, -2.27525744, 0.],
[0.00632717, 0.3050794, -0.33167875, 1.48109172, 0.19653696,
1.97504239, 2.51595821, 1.74499313, -1.65198805, -1.04424953,
-0.23786945, 1.18639347, -0.03568057, 3.82541131, 2.84039446,
2.88325909, 1.79827675, -0.80230291, 0.08165052, 0.],
[0.89980086, 0.34690991, -0.60806566, 1.69472308, 1.38043417,
0.97139487, 0.21977176, 1.01340944, -1.69946943, -0.01775586,
-0.35851919, 1.81115864, 1.15105661, 1.21410373, 1.50667558,
1.70155313, 3.1410754, -0.54806167, -0.51879299, 0.]])]
y_batches_train = [np.array([1., 2., 2., 1., 1., 2., 2., 1., 1., 2., 1., 1., 2., 2., 1., 2., 0.,
0., 0., 0.]),
np.array([1., 1., 1., 1., 1., 2., 2., 1., 1., 2., 2., 1., 2., 2., 0., 0., 0.,
0., 0., 0.]),
np.array([1., 2., 1., 2., 1., 1., 2., 2., 1., 1., 2., 0., 0., 0., 0., 0., 0.,
0., 0., 0.]),
np.array([2., 2., 1., 2., 2., 2., 1., 1., 2., 2., 2., 2., 0., 0., 0., 0., 0.,
0., 0., 0.]),
np.array([1., 2., 2., 2., 1., 1., 1., 1., 2., 2., 1., 2., 1., 1., 1., 1., 0.,
0., 0., 0.]),
np.array([2., 1., 2., 1., 2., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.]),
np.array([1., 2., 1., 2., 2., 2., 1., 2., 2., 1., 1., 2., 1., 0., 0., 0., 0.,
0., 0., 0.]),
np.array([2., 2., 1., 2., 1., 1., 1., 2., 1., 2., 2., 2., 0., 0., 0., 0., 0.,
0., 0., 0.]),
np.array([2., 1., 1., 2., 2., 2., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.]),
np.array([1., 1., 1., 2., 2., 2., 2., 2., 1., 1., 1., 2., 1., 2., 1., 2., 2.,
1., 1., 0.])]
X_batches_test = [np.array([[0.74119496, 1.97273418, 1.76675805, 0.51484268, 1.39422086,
2.97184667, -1.35274514, 2.08825434, -1.2521965, 1.11556387,
0.19776789, 2.38259223, -0.57140597, -0.79010112, 0.17038974,
1.28075761, 0.696398, 3.0920007, -0.41138503, 0.],
[-1.39081797, 0.41079718, 3.03698894, -2.07333633, 2.05575621,
2.73222939, -0.98182787, 1.06741172, -1.36310914, 0.20174856,
0.35323654, 2.70305775, 0.52549713, -0.7786237, 1.80857093,
0.96830907, -0.23610863, 1.28160768, 0.7026651, 0.],
[1.16357113, 0.43907935, 3.40158623, -0.73923043, 1.484668,
1.52809569, -0.02347205, 1.65349967, 1.79635118, -0.46647772,
-0.78400883, 0.82695404, -1.34932627, -0.3200281, 2.84417045,
0.01534261, 0.10047148, 2.70769609, -1.42669461, 0.],
[-1.05475682, 3.45578027, 1.58589338, -0.55515227, 2.13477478,
1.86777473, 0.61550335, 1.05781415, -0.45297406, -0.04317595,
-0.15255388, 0.74669395, -1.43621979, 1.06229278, 0.99792794,
1.24391783, -1.86484584, 1.92802343, 0.56148011, 0.],
[-0.0835337, 1.89593955, 1.65769335, -0.93622246, 1.05002869,
1.49675624, -0.00821712, 1.71541053, 2.02408452, 0.59011484,
0.72719784, 3.44801858, -0.00957537, 0.37176007, 1.93481168,
2.23125062, 1.67910471, 2.80923862, 0.34516993, 0.]]),
np.array([[0.40691415, 2.31873444, -0.83458005, -0.17018249, -0.39177831,
1.90353251, 2.98241467, 0.32808584, 3.09429553, 2.27183083,
3.09576659, 0., 0., 0., 0.,
0., 0., 0., 0., 0.],
[1.6862473, 1.0690102, -0.07415598, -0.09846767, 1.14562424,
2.52211963, 1.71911351, 0.41879894, 1.62787544, 3.50533394,
2.69963456, 0., 0., 0., 0.,
0., 0., 0., 0., 0.],
[3.27824216, 2.25067953, 0.40017321, -1.36011162, -1.41010106,
0.98956203, 2.30881584, -0.29496046, 2.29748247, 3.24940966,
1.06431776, 0., 0., 0., 0.,
0., 0., 0., 0., 0.],
[2.80167214, 3.88324559, -0.6984172, 0.81889567, 1.86945352,
3.07554419, 3.10357189, 1.31426767, 0.28163147, 2.75559628,
2.00866885, 0., 0., 0., 0.,
0., 0., 0., 0., 0.],
[1.54574419, 1.00720596, -1.55418837, 0.70823839, 0.14715209,
1.03747262, 0.82988672, -0.54006372, 1.4960777, 0.34578788,
1.10558132, 0., 0., 0., 0.,
0., 0., 0., 0., 0.]])]
y_batches_test = [np.array([1., 2., 2., 1., 2., 2., 1., 2., 1., 1., 1., 2., 1., 1., 2., 2., 1.,
2., 1., 0.]),
np.array([2., 2., 1., 1., 1., 2., 2., 1., 2., 2., 2., 0., 0., 0., 0., 0., 0.,
0., 0., 0.])]
X_batches_train = np.array([x.reshape((20, 5)) for x in X_batches_train])
X_batches_test = np.array([x.reshape((20, 5)) for x in X_batches_test])
y_batches_train = np.array([y.reshape((20, 1)) for y in y_batches_train])
y_batches_test = np.array([y.reshape((20, 1)) for y in y_batches_test])
# fix 1: label shifting. i.e., [0, 1, 2] -> [0, 0, 1]
for i in range(len(y_batches_train)):
y_batches_train[i][y_batches_train[i] == 1] = 0
y_batches_train[i][y_batches_train[i] == 2] = 1
for i in range(len(y_batches_test)):
y_batches_test[i][y_batches_test[i] == 1] = 0
y_batches_test[i][y_batches_test[i] == 2] = 1
data_train = list(zip(X_batches_train, y_batches_train))
data_test = list(zip(X_batches_test, y_batches_test))
# *** model initialization ***
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(20, 5))) # <- masking layer here
model.add(Bidirectional(LSTM(20, return_sequences=True), input_shape=(20, 5)))
model.add(Dropout(0.2))
# fix 2: output neuron number 3 -> 2
model.add(TimeDistributed(Dense(2, activation='sigmoid')))
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd, metrics=['mse'])
# *** model training ***
history = model.fit(X_batches_train, y_batches_train, epochs=10, batch_size=20, verbose=1)
| [
"keras.layers.Masking",
"keras.optimizers.SGD",
"keras.layers.Dropout",
"keras.layers.LSTM",
"keras.layers.Dense",
"numpy.array",
"keras.models.Sequential"
] | [((22876, 22888), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (22886, 22888), False, 'from keras.models import Sequential\n'), ((23182, 23247), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, decay=1e-06, momentum=0.9, nesterov=True)\n', (23196, 23247), False, 'from keras import optimizers\n'), ((239, 1398), 'numpy.array', 'np.array', (['[[-1.00612917, 1.47313952, 2.68021318, 1.54875809, 0.98385996, 1.49465265, \n 0.60429106, 1.12396908, -0.24041602, 1.77266187, 0.1961381, 1.28019637,\n 1.78803092, 2.05151245, 0.93606708, 0.51554755, 0.0, 0.0, 0.0, 0.0], [-\n 0.97596563, 2.04536053, 0.88367922, 1.013342, -0.16605355, 3.02994344, \n 2.04080806, -0.25153046, -0.5964068, 2.9607247, -0.49722121, 0.02734492,\n 2.16949987, 2.77367066, 0.15628842, 2.19823207, 0.0, 0.0, 0.0, 0.0], [\n 0.31546283, 3.27420503, 3.23550769, -0.63724013, 0.89150128, 0.69774266,\n 2.76627308, -0.58408384, -0.45681779, 1.98843041, -0.31850477, \n 0.83729882, 0.45471165, 3.61974147, -1.45610756, 1.35217453, 0.0, 0.0, \n 0.0, 0.0], [1.03329532, 1.97471646, 1.33949611, 1.22857243, -1.46890642,\n 1.74105506, 1.40969261, 0.52465603, -0.18895266, 2.81025597, 2.64901037,\n -0.83415186, 0.76956826, 1.48730868, -0.16190164, 2.24389007, 0.0, 0.0,\n 0.0, 0.0], [-1.0676654, 3.08429323, 1.7601179, 0.85448051, 1.15537064, \n 2.82487842, 0.27891413, 0.57842569, -0.62392063, 1.00343057, 1.15348843,\n -0.37650332, 3.37355345, 2.22285473, 0.43444434, 0.15743873, 0.0, 0.0, \n 0.0, 0.0]]'], {}), '([[-1.00612917, 1.47313952, 2.68021318, 1.54875809, 0.98385996, \n 1.49465265, 0.60429106, 1.12396908, -0.24041602, 1.77266187, 0.1961381,\n 1.28019637, 1.78803092, 2.05151245, 0.93606708, 0.51554755, 0.0, 0.0, \n 0.0, 0.0], [-0.97596563, 2.04536053, 0.88367922, 1.013342, -0.16605355,\n 3.02994344, 2.04080806, -0.25153046, -0.5964068, 2.9607247, -0.49722121,\n 0.02734492, 2.16949987, 2.77367066, 0.15628842, 2.19823207, 0.0, 0.0, \n 0.0, 0.0], [0.31546283, 3.27420503, 3.23550769, -0.63724013, 0.89150128,\n 0.69774266, 2.76627308, -0.58408384, -0.45681779, 1.98843041, -\n 0.31850477, 0.83729882, 0.45471165, 3.61974147, -1.45610756, 1.35217453,\n 0.0, 0.0, 0.0, 0.0], [1.03329532, 1.97471646, 1.33949611, 1.22857243, -\n 1.46890642, 1.74105506, 1.40969261, 0.52465603, -0.18895266, 2.81025597,\n 2.64901037, -0.83415186, 0.76956826, 1.48730868, -0.16190164, \n 2.24389007, 0.0, 0.0, 0.0, 0.0], [-1.0676654, 3.08429323, 1.7601179, \n 0.85448051, 1.15537064, 2.82487842, 0.27891413, 0.57842569, -0.62392063,\n 1.00343057, 1.15348843, -0.37650332, 3.37355345, 2.22285473, 0.43444434,\n 0.15743873, 0.0, 0.0, 0.0, 0.0]])\n', (247, 1398), True, 'import numpy as np\n'), ((1918, 3005), 'numpy.array', 'np.array', (['[[1.05258873, -0.17897376, -0.99932932, -1.02854121, 0.85159208, 2.32349131,\n 1.96526709, -0.08398597, -0.69474809, 1.32820222, 1.19514151, \n 1.56814867, 0.86013263, 1.48342922, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 0.1920635, -0.48702788, 1.24353985, -1.3864121, 0.16713229, 3.10134683,\n 0.61658271, -0.63360643, 0.86000807, 2.74876157, 2.87604877, 0.16339724,\n 2.87595396, 3.2846962, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1380241, -\n 0.76783029, 0.18814436, -1.18165209, -0.02981728, 1.49908113, \n 0.61521007, -0.98191097, 0.31250199, 1.39015803, 3.16213211, -\n 0.70891214, 3.83881766, 1.92683533, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 1.39080778, -0.59179216, 0.80348201, 0.64638205, -1.40144268, \n 1.49751413, 3.0092166, 1.33099666, 1.43714841, 2.90734268, 3.09688943, \n 0.32934884, 1.14592787, 1.58152023, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-\n 0.77164353, 0.50293096, 0.0717377, 0.14487556, -0.90246591, 2.32612179,\n 1.98628857, 1.29683166, -0.12399569, 2.60184685, 3.20136653, 0.44056647,\n 0.98283455, 1.79026663, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[1.05258873, -0.17897376, -0.99932932, -1.02854121, 0.85159208, \n 2.32349131, 1.96526709, -0.08398597, -0.69474809, 1.32820222, \n 1.19514151, 1.56814867, 0.86013263, 1.48342922, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [0.1920635, -0.48702788, 1.24353985, -1.3864121, 0.16713229, \n 3.10134683, 0.61658271, -0.63360643, 0.86000807, 2.74876157, 2.87604877,\n 0.16339724, 2.87595396, 3.2846962, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 0.1380241, -0.76783029, 0.18814436, -1.18165209, -0.02981728, \n 1.49908113, 0.61521007, -0.98191097, 0.31250199, 1.39015803, 3.16213211,\n -0.70891214, 3.83881766, 1.92683533, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 1.39080778, -0.59179216, 0.80348201, 0.64638205, -1.40144268, \n 1.49751413, 3.0092166, 1.33099666, 1.43714841, 2.90734268, 3.09688943, \n 0.32934884, 1.14592787, 1.58152023, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-\n 0.77164353, 0.50293096, 0.0717377, 0.14487556, -0.90246591, 2.32612179,\n 1.98628857, 1.29683166, -0.12399569, 2.60184685, 3.20136653, 0.44056647,\n 0.98283455, 1.79026663, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (1926, 3005), True, 'import numpy as np\n'), ((3517, 4495), 'numpy.array', 'np.array', (['[[-0.93359914, 2.31840281, 0.55691601, 1.90930758, -1.58260431, -1.05801881,\n 3.28012523, 3.84105406, -1.2127093, 0.00490079, 1.28149304, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.03105486, 2.7703693, 0.16751813,\n 1.12127987, -0.44070271, -0.0789227, 2.79008301, 1.11456745, 1.13982551,\n -1.10128658, 0.87430834, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [-0.69710668, 1.72702833, -2.62599502, 2.34730002, 0.77756661, \n 0.16415884, 3.30712178, 1.67331828, -0.44022431, 0.56837829, 1.1566811,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.71845983, 1.79908544,\n 0.37385522, 1.3870915, -1.48823234, -1.487419, 3.0879945, 1.74617784, -\n 0.91538815, -0.24244522, 0.81393954, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0], [-1.38501563, 3.73330047, -0.52494265, 2.37133716, -\n 0.24546709, -0.28360782, 2.89384717, 2.42891743, 0.40144022, -\n 1.21850571, 2.00370751, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[-0.93359914, 2.31840281, 0.55691601, 1.90930758, -1.58260431, -\n 1.05801881, 3.28012523, 3.84105406, -1.2127093, 0.00490079, 1.28149304,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.03105486, 2.7703693, \n 0.16751813, 1.12127987, -0.44070271, -0.0789227, 2.79008301, 1.11456745,\n 1.13982551, -1.10128658, 0.87430834, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0], [-0.69710668, 1.72702833, -2.62599502, 2.34730002, \n 0.77756661, 0.16415884, 3.30712178, 1.67331828, -0.44022431, 0.56837829,\n 1.1566811, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.71845983, \n 1.79908544, 0.37385522, 1.3870915, -1.48823234, -1.487419, 3.0879945, \n 1.74617784, -0.91538815, -0.24244522, 0.81393954, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0], [-1.38501563, 3.73330047, -0.52494265, \n 2.37133716, -0.24546709, -0.28360782, 2.89384717, 2.42891743, \n 0.40144022, -1.21850571, 2.00370751, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0]])\n', (3525, 4495), True, 'import numpy as np\n'), ((4997, 5996), 'numpy.array', 'np.array', (['[[1.27989188, 1.16254538, -0.06889142, 1.84133355, 1.3234908, 1.29611702, \n 2.0019294, -0.03220116, 1.1085194, 1.96495985, 1.68544302, 1.94503544, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.3004439, 2.48768923, \n 0.59809607, 2.38155155, 2.78705889, 1.67018683, 0.21731778, -0.59277191,\n 2.87427207, 2.63950475, 2.39211459, 0.93083423, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0], [2.39239371, 0.30900383, -0.97307155, 1.98100711, \n 0.30613735, 1.12827171, 0.16987791, 0.31959096, 1.30366416, 1.45881023,\n 2.45668401, 0.5218711, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 0.0826574, 2.05100254, 0.013161, 2.95120798, 1.15730011, 0.75537024, \n 0.13708569, -0.44922143, 0.64834001, 2.50640862, 2.00349347, 3.35573624,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.47135124, 2.10258532, \n 0.70212032, 2.56063126, 1.62466971, 2.64026892, 0.21309489, -0.57752813,\n 2.21335957, 0.20453233, 0.03106993, 3.01167822, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0]]'], {}), '([[1.27989188, 1.16254538, -0.06889142, 1.84133355, 1.3234908, \n 1.29611702, 2.0019294, -0.03220116, 1.1085194, 1.96495985, 1.68544302, \n 1.94503544, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.3004439, \n 2.48768923, 0.59809607, 2.38155155, 2.78705889, 1.67018683, 0.21731778,\n -0.59277191, 2.87427207, 2.63950475, 2.39211459, 0.93083423, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.39239371, 0.30900383, -0.97307155, \n 1.98100711, 0.30613735, 1.12827171, 0.16987791, 0.31959096, 1.30366416,\n 1.45881023, 2.45668401, 0.5218711, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0], [0.0826574, 2.05100254, 0.013161, 2.95120798, 1.15730011, \n 0.75537024, 0.13708569, -0.44922143, 0.64834001, 2.50640862, 2.00349347,\n 3.35573624, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.47135124, \n 2.10258532, 0.70212032, 2.56063126, 1.62466971, 2.64026892, 0.21309489,\n -0.57752813, 2.21335957, 0.20453233, 0.03106993, 3.01167822, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (5005, 5996), True, 'import numpy as np\n'), ((6502, 7670), 'numpy.array', 'np.array', (['[[-0.42125521, 0.54016939, 1.63016057, 2.01555253, -0.10961255, -0.42549555,\n 1.55793753, -0.0998756, 0.36417335, 3.37126414, 1.62151191, 2.84084192,\n 0.10831384, 0.89293054, -0.08671363, 0.49340353, 0.0, 0.0, 0.0, 0.0], [\n -0.37615411, 2.00581062, 2.30426605, 2.02205839, 0.65871664, 1.34478836,\n -0.55379752, -1.42787727, 0.59732227, 0.84969282, 0.54345723, \n 0.95849568, -0.17131602, -0.70425277, -0.5337757, 1.78207229, 0.0, 0.0,\n 0.0, 0.0], [-0.13863276, 1.71490034, 2.02677925, 2.60608619, 0.26916522,\n 0.35928298, -1.26521844, -0.59859219, 1.19162219, 1.64565259, \n 1.16787165, 2.95245196, 0.48681084, 1.66621053, 0.918077, -1.10583747, \n 0.0, 0.0, 0.0, 0.0], [0.87763797, 2.38740754, 2.9111822, 2.21184069, \n 0.78091173, -0.53270909, 0.40100338, -0.83375593, 0.9860009, 2.43898437,\n -0.64499989, 2.95092003, -1.52360727, 0.44640918, 0.78131922, -\n 0.24401283, 0.0, 0.0, 0.0, 0.0], [0.92615066, 3.45437746, 3.28808981, \n 2.87207404, -1.60027223, -1.14164941, -1.63807699, 0.33084805, \n 2.92963629, 3.51170824, -0.3286093, 2.19108385, 0.97812366, -1.82565766,\n -0.34034678, -2.0485913, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[-0.42125521, 0.54016939, 1.63016057, 2.01555253, -0.10961255, -\n 0.42549555, 1.55793753, -0.0998756, 0.36417335, 3.37126414, 1.62151191,\n 2.84084192, 0.10831384, 0.89293054, -0.08671363, 0.49340353, 0.0, 0.0, \n 0.0, 0.0], [-0.37615411, 2.00581062, 2.30426605, 2.02205839, 0.65871664,\n 1.34478836, -0.55379752, -1.42787727, 0.59732227, 0.84969282, \n 0.54345723, 0.95849568, -0.17131602, -0.70425277, -0.5337757, \n 1.78207229, 0.0, 0.0, 0.0, 0.0], [-0.13863276, 1.71490034, 2.02677925, \n 2.60608619, 0.26916522, 0.35928298, -1.26521844, -0.59859219, \n 1.19162219, 1.64565259, 1.16787165, 2.95245196, 0.48681084, 1.66621053,\n 0.918077, -1.10583747, 0.0, 0.0, 0.0, 0.0], [0.87763797, 2.38740754, \n 2.9111822, 2.21184069, 0.78091173, -0.53270909, 0.40100338, -0.83375593,\n 0.9860009, 2.43898437, -0.64499989, 2.95092003, -1.52360727, 0.44640918,\n 0.78131922, -0.24401283, 0.0, 0.0, 0.0, 0.0], [0.92615066, 3.45437746, \n 3.28808981, 2.87207404, -1.60027223, -1.14164941, -1.63807699, \n 0.33084805, 2.92963629, 3.51170824, -0.3286093, 2.19108385, 0.97812366,\n -1.82565766, -0.34034678, -2.0485913, 0.0, 0.0, 0.0, 0.0]])\n', (6510, 7670), True, 'import numpy as np\n'), ((8188, 8940), 'numpy.array', 'np.array', (['[[1.96438618, 0.188104784, 1.61114494, 0.00069956769, 2.55271963, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 2.41578815, -0.570625661, 2.15545894, -1.80948908, 1.62049331, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 1.9701704, -1.62556528, 2.49469152, 0.0418785985, 2.61875866, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 3.14277819, 0.0301098398, 0.740376369, 1.76517344, 2.68922918, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 2.06250296, 0.467605528, 1.5592723, 0.185788889, 1.30359922, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[1.96438618, 0.188104784, 1.61114494, 0.00069956769, 2.55271963, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0], [2.41578815, -0.570625661, 2.15545894, -1.80948908, 1.62049331, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0], [1.9701704, -1.62556528, 2.49469152, 0.0418785985, 2.61875866, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0], [3.14277819, 0.0301098398, 0.740376369, 1.76517344, 2.68922918, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0], [2.06250296, 0.467605528, 1.5592723, 0.185788889, 1.30359922, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (8196, 8940), True, 'import numpy as np\n'), ((10883, 11929), 'numpy.array', 'np.array', (['[[1.22152427, 3.74926839, 0.64415552, 2.35268329, 1.98754653, 2.89384829, \n 0.44589817, 3.94228743, 2.72405657, 0.86222004, 0.68681903, 3.89952458,\n 1.43454512, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.02203262, \n 0.95065123, 0.71669023, 0.02919391, 2.30714524, 1.91843002, 0.73611294,\n 1.20560482, 0.85206836, -0.74221506, -0.72886308, 2.39872927, -\n 0.95841402, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.55775319, 0.33773314,\n 0.79932151, 1.94966883, 3.2113281, 2.70768249, -0.69745554, 1.23208345,\n 1.66199957, 1.69894081, 0.13124461, 1.93256147, -0.17787952, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0], [0.45089205, 2.62430534, -1.9517961, \n 2.24040577, 1.75642049, 1.94962325, 0.26796497, 2.28418304, 1.44944487,\n 0.28723885, -0.81081633, 1.54840214, 0.82652939, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0], [1.27678173, 1.17204606, -0.24738322, 1.02761617, \n 1.81060444, 2.37830861, 0.55260134, 2.50046334, 1.04652821, 0.03467176,\n -2.07336654, 1.2628897, 0.61604732, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[1.22152427, 3.74926839, 0.64415552, 2.35268329, 1.98754653, \n 2.89384829, 0.44589817, 3.94228743, 2.72405657, 0.86222004, 0.68681903,\n 3.89952458, 1.43454512, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-\n 0.02203262, 0.95065123, 0.71669023, 0.02919391, 2.30714524, 1.91843002,\n 0.73611294, 1.20560482, 0.85206836, -0.74221506, -0.72886308, \n 2.39872927, -0.95841402, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 0.55775319, 0.33773314, 0.79932151, 1.94966883, 3.2113281, 2.70768249, \n -0.69745554, 1.23208345, 1.66199957, 1.69894081, 0.13124461, 1.93256147,\n -0.17787952, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.45089205, \n 2.62430534, -1.9517961, 2.24040577, 1.75642049, 1.94962325, 0.26796497,\n 2.28418304, 1.44944487, 0.28723885, -0.81081633, 1.54840214, 0.82652939,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.27678173, 1.17204606, -\n 0.24738322, 1.02761617, 1.81060444, 2.37830861, 0.55260134, 2.50046334,\n 1.04652821, 0.03467176, -2.07336654, 1.2628897, 0.61604732, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (10891, 11929), True, 'import numpy as np\n'), ((12437, 13439), 'numpy.array', 'np.array', (['[[3.86138405, 2.35068317, -1.90187438, 0.600788, 0.18011722, 1.3469559, -\n 0.54708828, 1.83798823, -0.01957845, 2.88713217, 3.1724991, 2.90802072,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.26785642, 0.51076756, \n 0.32070756, 2.33758816, 2.08146669, -0.60796736, 0.93777509, 2.70474711,\n 0.44785738, 1.61720609, 1.52890594, 3.03072971, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0], [3.30219394, 3.1515445, 1.16550716, 2.07489374, \n 0.66441859, 0.97529244, 0.35176367, 1.22593639, -1.80698271, 1.19936482,\n 3.34017172, 2.15960657, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 2.34839018, 2.24827352, -1.61070856, 2.81044265, -1.21423372, \n 0.24633846, -0.82196609, 2.28616568, 0.033922, 2.7557593, 1.16178372, \n 3.66959512, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.32913219, \n 1.63231852, 0.58642744, 1.55873546, 0.86354741, 2.06654246, -0.44036504,\n 3.22723595, 1.33279468, 0.05975892, 2.48518999, 3.44690602, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[3.86138405, 2.35068317, -1.90187438, 0.600788, 0.18011722, \n 1.3469559, -0.54708828, 1.83798823, -0.01957845, 2.88713217, 3.1724991,\n 2.90802072, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.26785642, \n 0.51076756, 0.32070756, 2.33758816, 2.08146669, -0.60796736, 0.93777509,\n 2.70474711, 0.44785738, 1.61720609, 1.52890594, 3.03072971, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.30219394, 3.1515445, 1.16550716, \n 2.07489374, 0.66441859, 0.97529244, 0.35176367, 1.22593639, -1.80698271,\n 1.19936482, 3.34017172, 2.15960657, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0], [2.34839018, 2.24827352, -1.61070856, 2.81044265, -1.21423372, \n 0.24633846, -0.82196609, 2.28616568, 0.033922, 2.7557593, 1.16178372, \n 3.66959512, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.32913219, \n 1.63231852, 0.58642744, 1.55873546, 0.86354741, 2.06654246, -0.44036504,\n 3.22723595, 1.33279468, 0.05975892, 2.48518999, 3.44690602, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (12445, 13439), True, 'import numpy as np\n'), ((13945, 14853), 'numpy.array', 'np.array', (['[[0.61424344, -1.03068819, -1.47929328, 2.91514641, 2.06867196, 1.90384921,\n -0.45835234, 1.22054782, 0.67931536, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0], [2.76480464, 1.12442631, -2.36004758, 2.91912726, \n 1.67891181, 3.76873596, -0.93874096, -0.32397781, -0.55732374, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.39953353, -1.26828104,\n 0.44482517, 2.85604975, 3.08891062, 2.60268725, -0.15785176, 1.58549879,\n -0.32948578, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 1.65156484, -1.56545168, -1.42771206, 2.74216475, 1.8758154, 3.51169147,\n 0.18353058, -0.14704149, 0.00442783, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0], [1.27736372, 0.37407608, -1.25713475, 0.53171176, \n 1.53714914, 0.21015523, -1.06850669, -0.09755327, -0.92373834, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.61424344, -1.03068819, -1.47929328, 2.91514641, 2.06867196, \n 1.90384921, -0.45835234, 1.22054782, 0.67931536, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.76480464, 1.12442631, -\n 2.36004758, 2.91912726, 1.67891181, 3.76873596, -0.93874096, -\n 0.32397781, -0.55732374, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0], [0.39953353, -1.26828104, 0.44482517, 2.85604975, 3.08891062,\n 2.60268725, -0.15785176, 1.58549879, -0.32948578, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.65156484, -1.56545168, -\n 1.42771206, 2.74216475, 1.8758154, 3.51169147, 0.18353058, -0.14704149,\n 0.00442783, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 1.27736372, 0.37407608, -1.25713475, 0.53171176, 1.53714914, 0.21015523,\n -1.06850669, -0.09755327, -0.92373834, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (13953, 14853), True, 'import numpy as np\n'), ((15348, 16630), 'numpy.array', 'np.array', (['[[-1.39160433, 0.21014669, -0.89792475, 2.6702794, 1.54610601, 0.84699037, \n 2.96726482, 1.84236946, 0.02211578, 0.32842575, 1.02718924, 1.78447936,\n -1.20056829, 2.26699318, -0.23156537, 2.50124959, 1.93372501, \n 0.10264369, -1.70813962, 0.0], [0.38823591, -1.30348049, -0.31599117, \n 2.60044143, 2.32929389, 1.40348483, 3.25758736, 1.92210728, -0.34150988,\n -1.22336921, 2.3567069, 1.75456835, 0.28295694, 0.68114898, -0.457843, \n 1.83372069, 2.10177851, -0.26664178, -0.26549595, 0.0], [0.08540346, \n 0.71507504, 1.78164285, 3.04418137, 1.52975256, 3.55159169, 3.21396003,\n 3.22720346, 0.68147142, 0.12466013, -0.4122895, 1.97986653, 1.51671949,\n 2.06096825, -0.6765908, 2.00145086, 1.73723014, 0.50186043, -2.27525744,\n 0.0], [0.00632717, 0.3050794, -0.33167875, 1.48109172, 0.19653696, \n 1.97504239, 2.51595821, 1.74499313, -1.65198805, -1.04424953, -\n 0.23786945, 1.18639347, -0.03568057, 3.82541131, 2.84039446, 2.88325909,\n 1.79827675, -0.80230291, 0.08165052, 0.0], [0.89980086, 0.34690991, -\n 0.60806566, 1.69472308, 1.38043417, 0.97139487, 0.21977176, 1.01340944,\n -1.69946943, -0.01775586, -0.35851919, 1.81115864, 1.15105661, \n 1.21410373, 1.50667558, 1.70155313, 3.1410754, -0.54806167, -0.51879299,\n 0.0]]'], {}), '([[-1.39160433, 0.21014669, -0.89792475, 2.6702794, 1.54610601, \n 0.84699037, 2.96726482, 1.84236946, 0.02211578, 0.32842575, 1.02718924,\n 1.78447936, -1.20056829, 2.26699318, -0.23156537, 2.50124959, \n 1.93372501, 0.10264369, -1.70813962, 0.0], [0.38823591, -1.30348049, -\n 0.31599117, 2.60044143, 2.32929389, 1.40348483, 3.25758736, 1.92210728,\n -0.34150988, -1.22336921, 2.3567069, 1.75456835, 0.28295694, 0.68114898,\n -0.457843, 1.83372069, 2.10177851, -0.26664178, -0.26549595, 0.0], [\n 0.08540346, 0.71507504, 1.78164285, 3.04418137, 1.52975256, 3.55159169,\n 3.21396003, 3.22720346, 0.68147142, 0.12466013, -0.4122895, 1.97986653,\n 1.51671949, 2.06096825, -0.6765908, 2.00145086, 1.73723014, 0.50186043,\n -2.27525744, 0.0], [0.00632717, 0.3050794, -0.33167875, 1.48109172, \n 0.19653696, 1.97504239, 2.51595821, 1.74499313, -1.65198805, -\n 1.04424953, -0.23786945, 1.18639347, -0.03568057, 3.82541131, \n 2.84039446, 2.88325909, 1.79827675, -0.80230291, 0.08165052, 0.0], [\n 0.89980086, 0.34690991, -0.60806566, 1.69472308, 1.38043417, 0.97139487,\n 0.21977176, 1.01340944, -1.69946943, -0.01775586, -0.35851919, \n 1.81115864, 1.15105661, 1.21410373, 1.50667558, 1.70155313, 3.1410754, \n -0.54806167, -0.51879299, 0.0]])\n', (15356, 16630), True, 'import numpy as np\n'), ((17154, 17269), 'numpy.array', 'np.array', (['[1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0,\n 2.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, \n 2.0, 1.0, 2.0, 0.0, 0.0, 0.0, 0.0])\n', (17162, 17269), True, 'import numpy as np\n'), ((17296, 17411), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, \n 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (17304, 17411), True, 'import numpy as np\n'), ((17438, 17553), 'numpy.array', 'np.array', (['[1.0, 2.0, 1.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 2.0, 1.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (17446, 17553), True, 'import numpy as np\n'), ((17580, 17695), 'numpy.array', 'np.array', (['[2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (17588, 17695), True, 'import numpy as np\n'), ((17722, 17837), 'numpy.array', 'np.array', (['[1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 1.0, 1.0, 1.0,\n 1.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 1.0, \n 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0])\n', (17730, 17837), True, 'import numpy as np\n'), ((17864, 17979), 'numpy.array', 'np.array', (['[2.0, 1.0, 2.0, 1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([2.0, 1.0, 2.0, 1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (17872, 17979), True, 'import numpy as np\n'), ((18006, 18121), 'numpy.array', 'np.array', (['[1.0, 2.0, 1.0, 2.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 1.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 2.0, 1.0, 2.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 1.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (18014, 18121), True, 'import numpy as np\n'), ((18148, 18263), 'numpy.array', 'np.array', (['[2.0, 2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, 2.0, 2.0, 2.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([2.0, 2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, 2.0, 2.0, 2.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (18156, 18263), True, 'import numpy as np\n'), ((18290, 18405), 'numpy.array', 'np.array', (['[2.0, 1.0, 1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([2.0, 1.0, 1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (18298, 18405), True, 'import numpy as np\n'), ((18432, 18547), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, 2.0, 1.0,\n 2.0, 2.0, 1.0, 1.0, 0.0]'], {}), '([1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, \n 2.0, 1.0, 2.0, 2.0, 1.0, 1.0, 0.0])\n', (18440, 18547), True, 'import numpy as np\n'), ((18573, 19854), 'numpy.array', 'np.array', (['[[0.74119496, 1.97273418, 1.76675805, 0.51484268, 1.39422086, 2.97184667, -\n 1.35274514, 2.08825434, -1.2521965, 1.11556387, 0.19776789, 2.38259223,\n -0.57140597, -0.79010112, 0.17038974, 1.28075761, 0.696398, 3.0920007, \n -0.41138503, 0.0], [-1.39081797, 0.41079718, 3.03698894, -2.07333633, \n 2.05575621, 2.73222939, -0.98182787, 1.06741172, -1.36310914, \n 0.20174856, 0.35323654, 2.70305775, 0.52549713, -0.7786237, 1.80857093,\n 0.96830907, -0.23610863, 1.28160768, 0.7026651, 0.0], [1.16357113, \n 0.43907935, 3.40158623, -0.73923043, 1.484668, 1.52809569, -0.02347205,\n 1.65349967, 1.79635118, -0.46647772, -0.78400883, 0.82695404, -\n 1.34932627, -0.3200281, 2.84417045, 0.01534261, 0.10047148, 2.70769609,\n -1.42669461, 0.0], [-1.05475682, 3.45578027, 1.58589338, -0.55515227, \n 2.13477478, 1.86777473, 0.61550335, 1.05781415, -0.45297406, -\n 0.04317595, -0.15255388, 0.74669395, -1.43621979, 1.06229278, \n 0.99792794, 1.24391783, -1.86484584, 1.92802343, 0.56148011, 0.0], [-\n 0.0835337, 1.89593955, 1.65769335, -0.93622246, 1.05002869, 1.49675624,\n -0.00821712, 1.71541053, 2.02408452, 0.59011484, 0.72719784, 3.44801858,\n -0.00957537, 0.37176007, 1.93481168, 2.23125062, 1.67910471, 2.80923862,\n 0.34516993, 0.0]]'], {}), '([[0.74119496, 1.97273418, 1.76675805, 0.51484268, 1.39422086, \n 2.97184667, -1.35274514, 2.08825434, -1.2521965, 1.11556387, 0.19776789,\n 2.38259223, -0.57140597, -0.79010112, 0.17038974, 1.28075761, 0.696398,\n 3.0920007, -0.41138503, 0.0], [-1.39081797, 0.41079718, 3.03698894, -\n 2.07333633, 2.05575621, 2.73222939, -0.98182787, 1.06741172, -\n 1.36310914, 0.20174856, 0.35323654, 2.70305775, 0.52549713, -0.7786237,\n 1.80857093, 0.96830907, -0.23610863, 1.28160768, 0.7026651, 0.0], [\n 1.16357113, 0.43907935, 3.40158623, -0.73923043, 1.484668, 1.52809569, \n -0.02347205, 1.65349967, 1.79635118, -0.46647772, -0.78400883, \n 0.82695404, -1.34932627, -0.3200281, 2.84417045, 0.01534261, 0.10047148,\n 2.70769609, -1.42669461, 0.0], [-1.05475682, 3.45578027, 1.58589338, -\n 0.55515227, 2.13477478, 1.86777473, 0.61550335, 1.05781415, -0.45297406,\n -0.04317595, -0.15255388, 0.74669395, -1.43621979, 1.06229278, \n 0.99792794, 1.24391783, -1.86484584, 1.92802343, 0.56148011, 0.0], [-\n 0.0835337, 1.89593955, 1.65769335, -0.93622246, 1.05002869, 1.49675624,\n -0.00821712, 1.71541053, 2.02408452, 0.59011484, 0.72719784, 3.44801858,\n -0.00957537, 0.37176007, 1.93481168, 2.23125062, 1.67910471, 2.80923862,\n 0.34516993, 0.0]])\n', (18581, 19854), True, 'import numpy as np\n'), ((20359, 21325), 'numpy.array', 'np.array', (['[[0.40691415, 2.31873444, -0.83458005, -0.17018249, -0.39177831, 1.90353251,\n 2.98241467, 0.32808584, 3.09429553, 2.27183083, 3.09576659, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.6862473, 1.0690102, -0.07415598,\n -0.09846767, 1.14562424, 2.52211963, 1.71911351, 0.41879894, 1.62787544,\n 3.50533394, 2.69963456, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 3.27824216, 2.25067953, 0.40017321, -1.36011162, -1.41010106, \n 0.98956203, 2.30881584, -0.29496046, 2.29748247, 3.24940966, 1.06431776,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.80167214, 3.88324559, \n -0.6984172, 0.81889567, 1.86945352, 3.07554419, 3.10357189, 1.31426767,\n 0.28163147, 2.75559628, 2.00866885, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0], [1.54574419, 1.00720596, -1.55418837, 0.70823839, 0.14715209,\n 1.03747262, 0.82988672, -0.54006372, 1.4960777, 0.34578788, 1.10558132,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.40691415, 2.31873444, -0.83458005, -0.17018249, -0.39177831, \n 1.90353251, 2.98241467, 0.32808584, 3.09429553, 2.27183083, 3.09576659,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.6862473, 1.0690102, -\n 0.07415598, -0.09846767, 1.14562424, 2.52211963, 1.71911351, 0.41879894,\n 1.62787544, 3.50533394, 2.69963456, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0], [3.27824216, 2.25067953, 0.40017321, -1.36011162, -\n 1.41010106, 0.98956203, 2.30881584, -0.29496046, 2.29748247, 3.24940966,\n 1.06431776, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.80167214, \n 3.88324559, -0.6984172, 0.81889567, 1.86945352, 3.07554419, 3.10357189,\n 1.31426767, 0.28163147, 2.75559628, 2.00866885, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0], [1.54574419, 1.00720596, -1.55418837, 0.70823839, \n 0.14715209, 1.03747262, 0.82988672, -0.54006372, 1.4960777, 0.34578788,\n 1.10558132, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (20367, 21325), True, 'import numpy as np\n'), ((21813, 21928), 'numpy.array', 'np.array', (['[1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0,\n 2.0, 1.0, 2.0, 1.0, 0.0]'], {}), '([1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, \n 1.0, 2.0, 2.0, 1.0, 2.0, 1.0, 0.0])\n', (21821, 21928), True, 'import numpy as np\n'), ((21953, 22068), 'numpy.array', 'np.array', (['[2.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([2.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (21961, 22068), True, 'import numpy as np\n'), ((22900, 22944), 'keras.layers.Masking', 'Masking', ([], {'mask_value': '(0.0)', 'input_shape': '(20, 5)'}), '(mask_value=0.0, input_shape=(20, 5))\n', (22907, 22944), False, 'from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Dropout, Masking\n'), ((23061, 23073), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (23068, 23073), False, 'from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Dropout, Masking\n'), ((22995, 23026), 'keras.layers.LSTM', 'LSTM', (['(20)'], {'return_sequences': '(True)'}), '(20, return_sequences=True)\n', (22999, 23026), False, 'from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Dropout, Masking\n'), ((23140, 23170), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""sigmoid"""'}), "(2, activation='sigmoid')\n", (23145, 23170), False, 'from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Dropout, Masking\n')] |
'''
@author: rohangupta
References:
https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
https://stackoverflow.com/questions/13063201/how-to-show-the-whole-image-when-using-opencv-warpperspective/20355545#20355545
'''
import cv2
import numpy as np
from matplotlib import pyplot as plt
import math
UBIT = "rgupta24"
def __normalize(point_list):
# type: (np.ndarray) -> (np.ndarray, np.ndarray)
"""
正規化処理
:param point_list: point list to be normalized
:return: normalization results
"""
m = np.mean(point_list[:2], axis=1)
max_std = max(np.std(point_list[:2], axis=1)) + 1e-9
c = np.diag([1 / max_std, 1 / max_std, 1])
c[0][2] = -m[0] / max_std
c[1][2] = -m[1] / max_std
return np.dot(c, point_list), c
def getPerspectiveTransformMatrix(p1, p2):
matrixIndex = 0
A=[]
for i in range(0, len(p1)):
x, y = p1[i][0], p1[i][1]
u, v = p2[i][0], p2[i][1]
A.append([x, y, 1, 0, 0, 0, -u * x, -u * y, -u])
A.append([0, 0, 0, x, y, 1, -v * x, -v * y, -v])
A = np.asarray(A)
U, S, Vh = np.linalg.svd(A)
L = Vh[-1, :] / Vh[-1, -1]
H = L.reshape(3, 3)
return H
def getPerspectiveTransformMatrix2(p1, p2):
matrixIndex = 0
A=[]
for i in range(0, len(p1)):
x, y = p1[i][0], p1[i][1]
u, v = p2[i][0], p2[i][1]
A.append( [-x, -y, -1, 0, 0, 0, u * x, u * y, u])
for i in range(0, len(p1)):
x, y = p1[i][0], p1[i][1]
u, v = p2[i][0], p2[i][1]
A.append([0, 0, 0, -x, -y, -1, v*x, v*y, v])
A = np.asarray(A)
U, S, Vh = np.linalg.svd(A)
np.set_printoptions(suppress=True)
#print(Vh)
L = Vh[-1,:]
H = np.reshape(L,(3, 3))
H=H/H[0,0]
return H
#Importing the images
mountain1 = cv2.imread("mountain1.jpg")
mountain2 = cv2.imread("mountain2.jpg")
### PART 1
#Converting images to gray-scale
grayMountain1 = cv2.cvtColor(mountain1, cv2.COLOR_BGR2GRAY)
grayMountain2 = cv2.cvtColor(mountain2, cv2.COLOR_BGR2GRAY)
#Creating SIFT object
sift = cv2.xfeatures2d.SIFT_create()
#Finding keypoints and descriptors for Mountain images
keyp1, desc1 = sift.detectAndCompute(grayMountain1, None)
keyp2, desc2 = sift.detectAndCompute(grayMountain2, None)
#Drawing keypoints for Mountain images
keyImage1 = cv2.drawKeypoints(grayMountain1, keyp1, np.array([]), (0, 0, 255))
keyImage2 = cv2.drawKeypoints(grayMountain2, keyp2, np.array([]), (0, 0, 255))
cv2.imwrite('task1_sift1.jpg', keyImage1)
cv2.imwrite('task1_sift2.jpg', keyImage2)
### PART 2
#Brute-Force matching with SIFT descriptors
brutef = cv2.BFMatcher()
#Matching the keypoints with k-nearest neighbor (with k=2)
matches = brutef.knnMatch(desc1, desc2, k=2)
goodMatch = []
#Performing ratio test to find good matches
for m, n in matches:
if m.distance < 0.75*n.distance:
goodMatch.append(m)
#Drawing good matches
matchImage = cv2.drawMatches(mountain1, keyp1, mountain2, keyp2, goodMatch, np.array([]), (0, 0, 255), flags=2)
cv2.imwrite('task1_matches_knn.jpg', matchImage)
### PART 3
#Getting source and destination points
srce_pts = np.float32([ keyp1[m.queryIdx].pt for m in goodMatch]).reshape(-1, 1, 2)
dest_pts = np.float32([ keyp2[m.trainIdx].pt for m in goodMatch]).reshape(-1, 1, 2)
p1=np.reshape(srce_pts,(len(srce_pts),2))
p2=np.reshape(dest_pts,(len(dest_pts),2))
#print(len(dest_pts))
#print(p1)
# print(dest_pts.shape)
#Finding Homography Matrix and mask
homography, mask = cv2.findHomography(srce_pts, dest_pts, cv2.RANSAC, 5.0)
#homographyMat=getPerspectiveTransformMatrix2(srce_pts, dest_pts)
homographyMat=getPerspectiveTransformMatrix(p1, p2)
np.set_printoptions(suppress=True)
print(homographyMat)
### PART 4
#Converting the mask to a list
matchesMask = mask.ravel().tolist()
h, w = mountain1.shape[:2]
pts = np.float32([[0,0], [0,h-1], [w-1,h-1], [w-1,0]]).reshape(-1, 1, 2)
matchIndex = []
for i in range(len(matchesMask)):
if (matchesMask[i]):
matchIndex.append(i)
matchArray = []
for i in matchIndex:
matchArray.append(goodMatch[i])
#Finding 10 random matches using inliers
np.random.seed(sum([ord(c) for c in UBIT]))
randomMatch = np.random.choice(matchArray, 10, replace=False)
#Defining draw parameters
draw_params = dict(matchColor=(0, 0, 255),
singlePointColor=None,
flags=2)
#Drawing the match image for 10 random points
matchImage = cv2.drawMatches(mountain1, keyp1, mountain2, keyp2, randomMatch, None, **draw_params)
cv2.imwrite('task1_matches.jpg', matchImage)
### PART 5
h1, w1 = mountain2.shape[:2]
h2, w2 = mountain1.shape[:2]
pts1 = np.float32([[0, 0], [0, h1], [w1, h1], [w1, 0]]).reshape(-1, 1, 2)
pts2 = np.float32([[0, 0], [0, h2], [w2, h2], [w2, 0]]).reshape(-1, 1, 2)
pts2_ = cv2.perspectiveTransform(pts2, homographyMat)
pts = np.concatenate((pts1, pts2_), axis=0)
#Finding the minimum and maximum coordinates
[xmin, ymin] = np.int32(pts.min(axis=0).ravel() - 0.5)
[xmax, ymax] = np.int32(pts.max(axis=0).ravel() + 0.5)
t = [-xmin, -ymin]
#Translating
Ht = np.array([[1, 0, t[0]], [0, 1, t[1]], [0, 0, 1]])
#Warping the first image on the second image using Homography Matrix
result = cv2.warpPerspective(mountain1, Ht.dot(homographyMat), (xmax-xmin, ymax-ymin))
result[t[1]:h1+t[1], t[0]:w1+t[0]] = mountain2
cv2.imwrite('task1_pano.jpg', result)
| [
"numpy.mean",
"numpy.linalg.svd",
"numpy.diag",
"numpy.set_printoptions",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.std",
"cv2.BFMatcher",
"numpy.reshape",
"numpy.random.choice",
"numpy.asarray",
"numpy.dot",
"numpy.concatenate",
"cv2.drawMatches",
"numpy.float32",
"cv2.imread",
"numpy.a... | [((1803, 1830), 'cv2.imread', 'cv2.imread', (['"""mountain1.jpg"""'], {}), "('mountain1.jpg')\n", (1813, 1830), False, 'import cv2\n'), ((1843, 1870), 'cv2.imread', 'cv2.imread', (['"""mountain2.jpg"""'], {}), "('mountain2.jpg')\n", (1853, 1870), False, 'import cv2\n'), ((1933, 1976), 'cv2.cvtColor', 'cv2.cvtColor', (['mountain1', 'cv2.COLOR_BGR2GRAY'], {}), '(mountain1, cv2.COLOR_BGR2GRAY)\n', (1945, 1976), False, 'import cv2\n'), ((1993, 2036), 'cv2.cvtColor', 'cv2.cvtColor', (['mountain2', 'cv2.COLOR_BGR2GRAY'], {}), '(mountain2, cv2.COLOR_BGR2GRAY)\n', (2005, 2036), False, 'import cv2\n'), ((2067, 2096), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (2094, 2096), False, 'import cv2\n'), ((2468, 2509), 'cv2.imwrite', 'cv2.imwrite', (['"""task1_sift1.jpg"""', 'keyImage1'], {}), "('task1_sift1.jpg', keyImage1)\n", (2479, 2509), False, 'import cv2\n'), ((2510, 2551), 'cv2.imwrite', 'cv2.imwrite', (['"""task1_sift2.jpg"""', 'keyImage2'], {}), "('task1_sift2.jpg', keyImage2)\n", (2521, 2551), False, 'import cv2\n'), ((2618, 2633), 'cv2.BFMatcher', 'cv2.BFMatcher', ([], {}), '()\n', (2631, 2633), False, 'import cv2\n'), ((3012, 3060), 'cv2.imwrite', 'cv2.imwrite', (['"""task1_matches_knn.jpg"""', 'matchImage'], {}), "('task1_matches_knn.jpg', matchImage)\n", (3023, 3060), False, 'import cv2\n'), ((3477, 3532), 'cv2.findHomography', 'cv2.findHomography', (['srce_pts', 'dest_pts', 'cv2.RANSAC', '(5.0)'], {}), '(srce_pts, dest_pts, cv2.RANSAC, 5.0)\n', (3495, 3532), False, 'import cv2\n'), ((3651, 3685), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (3670, 3685), True, 'import numpy as np\n'), ((4155, 4202), 'numpy.random.choice', 'np.random.choice', (['matchArray', '(10)'], {'replace': '(False)'}), '(matchArray, 10, replace=False)\n', (4171, 4202), True, 'import numpy as np\n'), ((4403, 4493), 'cv2.drawMatches', 'cv2.drawMatches', (['mountain1', 'keyp1', 'mountain2', 'keyp2', 'randomMatch', 'None'], {}), '(mountain1, keyp1, mountain2, keyp2, randomMatch, None, **\n draw_params)\n', (4418, 4493), False, 'import cv2\n'), ((4490, 4534), 'cv2.imwrite', 'cv2.imwrite', (['"""task1_matches.jpg"""', 'matchImage'], {}), "('task1_matches.jpg', matchImage)\n", (4501, 4534), False, 'import cv2\n'), ((4762, 4807), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['pts2', 'homographyMat'], {}), '(pts2, homographyMat)\n', (4786, 4807), False, 'import cv2\n'), ((4814, 4851), 'numpy.concatenate', 'np.concatenate', (['(pts1, pts2_)'], {'axis': '(0)'}), '((pts1, pts2_), axis=0)\n', (4828, 4851), True, 'import numpy as np\n'), ((5046, 5095), 'numpy.array', 'np.array', (['[[1, 0, t[0]], [0, 1, t[1]], [0, 0, 1]]'], {}), '([[1, 0, t[0]], [0, 1, t[1]], [0, 0, 1]])\n', (5054, 5095), True, 'import numpy as np\n'), ((5301, 5338), 'cv2.imwrite', 'cv2.imwrite', (['"""task1_pano.jpg"""', 'result'], {}), "('task1_pano.jpg', result)\n", (5312, 5338), False, 'import cv2\n'), ((557, 588), 'numpy.mean', 'np.mean', (['point_list[:2]'], {'axis': '(1)'}), '(point_list[:2], axis=1)\n', (564, 588), True, 'import numpy as np\n'), ((654, 692), 'numpy.diag', 'np.diag', (['[1 / max_std, 1 / max_std, 1]'], {}), '([1 / max_std, 1 / max_std, 1])\n', (661, 692), True, 'import numpy as np\n'), ((1083, 1096), 'numpy.asarray', 'np.asarray', (['A'], {}), '(A)\n', (1093, 1096), True, 'import numpy as np\n'), ((1112, 1128), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (1125, 1128), True, 'import numpy as np\n'), ((1592, 1605), 'numpy.asarray', 'np.asarray', (['A'], {}), '(A)\n', (1602, 1605), True, 'import numpy as np\n'), ((1622, 1638), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (1635, 1638), True, 'import numpy as np\n'), ((1643, 1677), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (1662, 1677), True, 'import numpy as np\n'), ((1719, 1740), 'numpy.reshape', 'np.reshape', (['L', '(3, 3)'], {}), '(L, (3, 3))\n', (1729, 1740), True, 'import numpy as np\n'), ((2361, 2373), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2369, 2373), True, 'import numpy as np\n'), ((2440, 2452), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2448, 2452), True, 'import numpy as np\n'), ((2975, 2987), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2983, 2987), True, 'import numpy as np\n'), ((764, 785), 'numpy.dot', 'np.dot', (['c', 'point_list'], {}), '(c, point_list)\n', (770, 785), True, 'import numpy as np\n'), ((3124, 3177), 'numpy.float32', 'np.float32', (['[keyp1[m.queryIdx].pt for m in goodMatch]'], {}), '([keyp1[m.queryIdx].pt for m in goodMatch])\n', (3134, 3177), True, 'import numpy as np\n'), ((3208, 3261), 'numpy.float32', 'np.float32', (['[keyp2[m.trainIdx].pt for m in goodMatch]'], {}), '([keyp2[m.trainIdx].pt for m in goodMatch])\n', (3218, 3261), True, 'import numpy as np\n'), ((3821, 3881), 'numpy.float32', 'np.float32', (['[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]'], {}), '([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]])\n', (3831, 3881), True, 'import numpy as np\n'), ((4613, 4661), 'numpy.float32', 'np.float32', (['[[0, 0], [0, h1], [w1, h1], [w1, 0]]'], {}), '([[0, 0], [0, h1], [w1, h1], [w1, 0]])\n', (4623, 4661), True, 'import numpy as np\n'), ((4687, 4735), 'numpy.float32', 'np.float32', (['[[0, 0], [0, h2], [w2, h2], [w2, 0]]'], {}), '([[0, 0], [0, h2], [w2, h2], [w2, 0]])\n', (4697, 4735), True, 'import numpy as np\n'), ((607, 637), 'numpy.std', 'np.std', (['point_list[:2]'], {'axis': '(1)'}), '(point_list[:2], axis=1)\n', (613, 637), True, 'import numpy as np\n')] |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
def rand_choice(prob=0.5):
"""Returns True if a randomly chosen number is less than or equal to `prob', by default this is a 50/50 chance."""
return random.random() <= prob
def img_bounds(img):
"""Returns the minimum and maximum indices of non-zero lines in axis 0 of `img', followed by that for axis 1."""
ax0 = np.any(img, axis=0)
ax1 = np.any(img, axis=1)
return np.concatenate((np.where(ax0)[0][[0, -1]], np.where(ax1)[0][[0, -1]]))
def in_bounds(x, y, margin, maxx, maxy):
"""Returns True if (x,y) is within the rectangle (margin,margin,maxx-margin,maxy-margin)."""
return margin <= x < (maxx - margin) and margin <= y < (maxy - margin)
def is_empty(img):
"""Returns True if `img' is empty, that is its maximum value is not greater than its minimum."""
return not (img.max() > img.min()) # use > instead of <= so that an image full of NaNs will result in True
def ensure_tuple_size(tup, dim):
"""Returns a copy of `tup' with `dim' values by either shortened or padded with zeros as necessary."""
tup = tuple(tup) + (0,) * dim
return tup[:dim]
def zero_margins(img, margin):
"""Returns True if the values within `margin' indices of the edges of `img' in dimensions 1 and 2 are 0."""
if np.any(img[:, :, :margin]) or np.any(img[:, :, -margin:]):
return False
if np.any(img[:, :margin, :]) or np.any(img[:, -margin:, :]):
return False
return True
def rescale_array(arr, minv=0.0, maxv=1.0, dtype=np.float32):
"""Rescale the values of numpy array `arr' to be from `minv' to `maxv'."""
if dtype is not None:
arr = arr.astype(dtype)
mina = np.min(arr)
maxa = np.max(arr)
if mina == maxa:
return arr * minv
norm = (arr - mina) / (maxa - mina) # normalize the array first
return (norm * (maxv - minv)) + minv # rescale by minv and maxv, which is the normalized array by default
def rescale_instance_array(arr, minv=0.0, maxv=1.0, dtype=np.float32):
"""Rescale each array slice along the first dimension of `arr' independently."""
out = np.zeros(arr.shape, dtype)
for i in range(arr.shape[0]):
out[i] = rescale_array(arr[i], minv, maxv, dtype)
return out
def rescale_array_int_max(arr, dtype=np.uint16):
"""Rescale the array `arr' to be between the minimum and maximum values of the type `dtype'."""
info = np.iinfo(dtype)
return rescale_array(arr, info.min, info.max).astype(dtype)
def copypaste_arrays(src, dest, srccenter, destcenter, dims):
"""
Calculate the slices to copy a sliced area of array `src' into array `dest'. The area has dimensions `dims' (use 0
or None to copy everything in that dimension), the source area is centered at `srccenter' index in `src' and copied
into area centered at `destcenter' in `dest'. The dimensions of the copied area will be clipped to fit within the
source and destination arrays so a smaller area may be copied than expected. Return value is the tuples of slice
objects indexing the copied area in `src', and those indexing the copy area in `dest'.
Example:
src=np.random.randint(0,10,(6,6))
dest=np.zeros_like(src)
srcslices,destslices=copypasteArrays(src,dest,(3,2),(2,1),(3,4))
dest[destslices]=src[srcslices]
print(src)
print(dest)
>>> [[9 5 6 6 9 6]
[4 3 5 6 1 2]
[0 7 3 2 4 1]
[3 0 0 1 5 1]
[9 4 7 1 8 2]
[6 6 5 8 6 7]]
[[0 0 0 0 0 0]
[7 3 2 4 0 0]
[0 0 1 5 0 0]
[4 7 1 8 0 0]
[0 0 0 0 0 0]
[0 0 0 0 0 0]]
"""
srcslices = [slice(None)] * src.ndim
destslices = [slice(None)] * dest.ndim
for i, ss, ds, sc, dc, dim in zip(range(src.ndim), src.shape, dest.shape, srccenter, destcenter, dims):
if dim:
# dimension before midpoint, clip to size fitting in both arrays
d1 = np.clip(dim // 2, 0, min(sc, dc))
# dimension after midpoint, clip to size fitting in both arrays
d2 = np.clip(dim // 2 + 1, 0, min(ss - sc, ds - dc))
srcslices[i] = slice(sc - d1, sc + d2)
destslices[i] = slice(dc - d1, dc + d2)
return tuple(srcslices), tuple(destslices)
def resize_center(img, *resize_dims, fill_value=0):
"""
Resize `img' by cropping or expanding the image from the center. The `resizeDims' values are the output dimensions
(or None to use original dimension of `img'). If a dimension is smaller than that of `img' then the result will be
cropped and if larger padded with zeros, in both cases this is done relative to the center of `img'. The result is
a new image with the specified dimensions and values from `img' copied into its center.
"""
resize_dims = tuple(resize_dims[i] or img.shape[i] for i in range(len(resize_dims)))
dest = np.full(resize_dims, fill_value, img.dtype)
half_img_shape = np.asarray(img.shape) // 2
half_dest_shape = np.asarray(dest.shape) // 2
srcslices, destslices = copypaste_arrays(img, dest, half_img_shape, half_dest_shape, resize_dims)
dest[destslices] = img[srcslices]
return dest
def one_hot(labels, num_classes):
"""
Converts label image `labels' to a one-hot vector with `num_classes' number of channels as last dimension.
"""
labels = labels % num_classes
y = np.eye(num_classes)
onehot = y[labels.flatten()]
return onehot.reshape(tuple(labels.shape) + (num_classes,)).astype(labels.dtype)
| [
"numpy.full",
"numpy.asarray",
"numpy.zeros",
"numpy.iinfo",
"numpy.any",
"random.random",
"numpy.min",
"numpy.max",
"numpy.where",
"numpy.eye"
] | [((943, 962), 'numpy.any', 'np.any', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (949, 962), True, 'import numpy as np\n'), ((973, 992), 'numpy.any', 'np.any', (['img'], {'axis': '(1)'}), '(img, axis=1)\n', (979, 992), True, 'import numpy as np\n'), ((2271, 2282), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (2277, 2282), True, 'import numpy as np\n'), ((2294, 2305), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (2300, 2305), True, 'import numpy as np\n'), ((2703, 2729), 'numpy.zeros', 'np.zeros', (['arr.shape', 'dtype'], {}), '(arr.shape, dtype)\n', (2711, 2729), True, 'import numpy as np\n'), ((3000, 3015), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (3008, 3015), True, 'import numpy as np\n'), ((5542, 5585), 'numpy.full', 'np.full', (['resize_dims', 'fill_value', 'img.dtype'], {}), '(resize_dims, fill_value, img.dtype)\n', (5549, 5585), True, 'import numpy as np\n'), ((6047, 6066), 'numpy.eye', 'np.eye', (['num_classes'], {}), '(num_classes)\n', (6053, 6066), True, 'import numpy as np\n'), ((769, 784), 'random.random', 'random.random', ([], {}), '()\n', (782, 784), False, 'import random\n'), ((1873, 1899), 'numpy.any', 'np.any', (['img[:, :, :margin]'], {}), '(img[:, :, :margin])\n', (1879, 1899), True, 'import numpy as np\n'), ((1903, 1930), 'numpy.any', 'np.any', (['img[:, :, -margin:]'], {}), '(img[:, :, -margin:])\n', (1909, 1930), True, 'import numpy as np\n'), ((1961, 1987), 'numpy.any', 'np.any', (['img[:, :margin, :]'], {}), '(img[:, :margin, :])\n', (1967, 1987), True, 'import numpy as np\n'), ((1991, 2018), 'numpy.any', 'np.any', (['img[:, -margin:, :]'], {}), '(img[:, -margin:, :])\n', (1997, 2018), True, 'import numpy as np\n'), ((5607, 5628), 'numpy.asarray', 'np.asarray', (['img.shape'], {}), '(img.shape)\n', (5617, 5628), True, 'import numpy as np\n'), ((5656, 5678), 'numpy.asarray', 'np.asarray', (['dest.shape'], {}), '(dest.shape)\n', (5666, 5678), True, 'import numpy as np\n'), ((1020, 1033), 'numpy.where', 'np.where', (['ax0'], {}), '(ax0)\n', (1028, 1033), True, 'import numpy as np\n'), ((1047, 1060), 'numpy.where', 'np.where', (['ax1'], {}), '(ax1)\n', (1055, 1060), True, 'import numpy as np\n')] |
"""
@author: <NAME> <<EMAIL>>
"""
import numpy
from .hosemi_crf_ad import HOSemiCRFADModelRepresentation, HOSemiCRFAD
from .utilities import HO_AStarSearcher, vectorized_logsumexp
class HOCRFADModelRepresentation(HOSemiCRFADModelRepresentation):
"""Model representation that will hold data structures to be used in :class:`HOCRFAD` class
it includes all attributes in the :class:`HOSemiCRFADModelRepresentation` parent class
"""
def __init__(self):
# call super class
super().__init__()
def filter_activated_states(self, activated_states, accum_active_states, boundary):
"""filter/prune states and y features
Args:
activated_states: dictionary containing possible active states/y features
it has the form {patt_len:{patt_1, patt_2, ...}}
accum_active_states: dictionary of only possible active states by position
it has the form {pos_1:{state_1, state_2, ...}}
boundary: tuple (u,v) representing the current boundary in the sequence
"""
Z_elems = self.Z_elems
filtered_activestates = {}
__, pos = boundary
for z_len in activated_states:
if z_len == 1:
continue
start_pos = pos - z_len + 1
if (start_pos, start_pos) in accum_active_states:
filtered_activestates[z_len] = set()
for z_patt in activated_states[z_len]:
check = True
zelems = Z_elems[z_patt]
for i in range(z_len):
pos_bound = (start_pos + i, start_pos + i)
if pos_bound not in accum_active_states:
check = False
break
if zelems[i] not in accum_active_states[pos_bound]:
check = False
break
if check:
filtered_activestates[z_len].add(z_patt)
return filtered_activestates
class HOCRFAD(HOSemiCRFAD):
"""higher-order CRF model that uses algorithmic differentiation in gradient computation
Args:
model: an instance of :class:`HOCRFADModelRepresentation` class
seqs_representer: an instance of :class:`SeqsRepresenter` class
seqs_info: dictionary holding sequences info
Keyword Arguments:
load_info_fromdisk: integer from 0 to 5 specifying number of cached data
to be kept in memory. 0 means keep everything while
5 means load everything from disk
Attributes:
model: an instance of :class:`HOCRFADModelRepresentation` class
weights: a numpy vector representing feature weights
seqs_representer: an instance of :class:`pyseqlab.feature_extraction.SeqsRepresenter` class
seqs_info: dictionary holding sequences info
beam_size: determines the size of the beam for state pruning
fun_dict: a function map
def_cached_entities: a list of the names of cached entities sorted (descending)
based on estimated space required in memory
"""
def __init__(self, model, seqs_representer, seqs_info, load_info_fromdisk=5):
super().__init__(model, seqs_representer, seqs_info, load_info_fromdisk)
def compute_fpotential(self, w, active_features):
"""compute the potential of active features in a specified boundary
Args:
w: weight vector (numpy vector)
active_features: dictionary of activated features in a specified boundary
"""
model = self.model
pky_codebook = model.pky_codebook
z_pky_map = model.z_pky_map
f_potential = numpy.zeros(len(pky_codebook))
# to consider caching the w_indx and fval as in cached_pf
for z in active_features:
w_indx, f_val = active_features[z]
potential = numpy.dot(w[w_indx], f_val)
# get all pky's in coded format where z maintains a suffix relation with them
pky_c_list = z_pky_map[z]
f_potential[pky_c_list] += potential
return f_potential
def compute_forward_vec(self, w, seq_id):
"""compute the forward matrix (alpha matrix)
Args:
w: weight vector (numpy vector)
seq_id: integer representing unique id assigned to the sequence
.. note::
activefeatures need to be loaded first in :attr:`seqs.info`
"""
model = self.model
pi_pky_map = model.pi_pky_map
P_codebook = model.P_codebook
P_len = model.P_len
T = self.seqs_info[seq_id]["T"]
active_features = self.seqs_info[seq_id]["activefeatures"]
alpha = numpy.ones((T + 1, len(P_codebook)), dtype="longdouble") * (-numpy.inf)
alpha[0, P_codebook[""]] = 0
fpotential_perboundary = {}
for j in range(1, T + 1):
boundary = (j, j)
# compute f_potential
f_potential = self.compute_fpotential(w, active_features[boundary])
fpotential_perboundary[boundary] = f_potential
for pi in pi_pky_map:
pi_c = P_codebook[pi]
if j >= P_len[pi]:
pky_c_list, pk_c_list = pi_pky_map[pi]
vec = f_potential[pky_c_list] + alpha[j - 1, pk_c_list]
alpha[j, pi_c] = vectorized_logsumexp(vec)
self.seqs_info[seq_id]["fpotential"] = fpotential_perboundary
return alpha
def compute_backward_vec(self, w, seq_id):
"""compute the backward matrix (beta matrix)
Args:
w: weight vector (numpy vector)
seq_id: integer representing unique id assigned to the sequence
.. note::
fpotential per boundary dictionary should be available in :attr:`seqs.info`
"""
model = self.model
pi_pky_map = model.pi_pky_map
P_codebook = model.P_codebook
len_P = len(P_codebook)
T = self.seqs_info[seq_id]["T"]
fpotential_perboundary = self.seqs_info[seq_id]["fpotential"]
beta = numpy.ones((T + 2, len(P_codebook)), dtype="longdouble") * (-numpy.inf)
beta[T + 1, :] = 0
for j in reversed(range(1, T + 1)):
track_comp = numpy.ones((len_P, len_P), dtype="longdouble") * (-numpy.inf)
f_potential = fpotential_perboundary[j, j]
for pi in pi_pky_map:
pi_c = P_codebook[pi]
pky_c_list, pk_c_list = pi_pky_map[pi]
vec = f_potential[pky_c_list] + beta[j + 1, pi_c]
track_comp[pk_c_list, pi_c] = vec
for p_c in P_codebook.values():
beta[j, p_c] = vectorized_logsumexp(track_comp[p_c, :])
return beta
def compute_marginals(self, seq_id):
""" compute the marginal (i.e. probability of each y pattern at each position)
Args:
seq_id: integer representing unique id assigned to the sequence
.. note::
- fpotential per boundary dictionary should be available in :attr:`seqs.info`
- alpha matrix should be available in :attr:`seqs.info`
- beta matrix should be available in :attr:`seqs.info`
- Z (i.e. P(x)) should be available in :attr:`seqs.info`
"""
model = self.model
Z_codebook = model.Z_codebook
z_pi_piy = model.z_pi_piy_map
T = self.seqs_info[seq_id]["T"]
L = self.model.L
alpha = self.seqs_info[seq_id]["alpha"]
beta = self.seqs_info[seq_id]["beta"]
Z = self.seqs_info[seq_id]["Z"]
fpotential_perboundary = self.seqs_info[seq_id]["fpotential"]
P_marginals = numpy.zeros((T + 1, len(model.Z_codebook)), dtype="longdouble")
for j in range(1, T + 1):
for d in range(L):
u = j
v = j + d
if v > T:
break
boundary = (u, v)
f_potential = fpotential_perboundary[boundary]
for z in Z_codebook:
pi_c, piy_c, pk_c = z_pi_piy[z]
numerator = (
alpha[u - 1, pi_c] + f_potential[piy_c] + beta[v + 1, pk_c]
)
P_marginals[j, Z_codebook[z]] = numpy.exp(
vectorized_logsumexp(numerator) - Z
)
return P_marginals
# def compute_marginals(self, w, seq_id):
# model = self.model
# P_codebook = model.P_codebook
# len_P = len(P_codebook)
# pi_z_pk = model.pi_z_pk
# Z_codebook = model.Z_codebook
# T = self.seqs_info[seq_id]["T"]
# alpha = self.seqs_info[seq_id]['alpha']
# beta = self.seqs_info[seq_id]['beta']
# P_marginals = numpy.zeros((T+1, len(model.Z_codebook)), dtype='longdouble')
# print("alpha ", alpha)
# Z = self.seqs_info[seq_id]['Z']
# print("Z ", Z)
# fpotential_perboundary = self.seqs_info[seq_id]['fpotential']
# print(pi_z_pk)
# print(P_codebook)
# f_transition = model.f_transition
# pky_z = model.z_pky
# pky_codebook = model.pky_codebook
# print("pky_z ", pky_z)
# for j in reversed(range(1, T+1)):
# marginal_dict = {}
# f_potential = fpotential_perboundary[j, j]
# for pi in f_transition:
# beta_pi = beta[j+1, P_codebook[pi]]
# for pky in f_transition[pi]:
# pk, y = f_transition[pi][pky]
# accum = alpha[j-1, P_codebook[pk]] + f_potential[pky_codebook[pky]] + beta_pi
# for z_patt in pky_z[pky]:
# if(z_patt in marginal_dict):
# marginal_dict[z_patt] = numpy.logaddexp(marginal_dict[z_patt], accum)
# else:
# marginal_dict[z_patt] = accum
# print("j ", j)
# print("marginal ", marginal_dict)
# for z_patt in marginal_dict:
# P_marginals[j, Z_codebook[z_patt]] = numpy.exp(marginal_dict[z_patt]-Z)
# self.seqs_info[seq_id]['P_marginal'] = P_marginals
# print(P_marginals)
# return(P_marginals)
def compute_feature_expectation(self, seq_id, P_marginals, grad):
"""compute the features expectations (i.e. expected count of the feature based on learned model)
Args:
seq_id: integer representing unique id assigned to the sequence
P_marginals: probability matrix for y patterns at each position in time
grad: numpy vector with dimension equal to the weight vector. It represents the gradient
that will be computed using the feature expectation and the global features of the sequence
.. note::
- activefeatures (per boundary) dictionary should be available in :attr:`seqs.info`
- P_marginal (marginal probability matrix) should be available in :attr:`seqs.info`
"""
activefeatures = self.seqs_info[seq_id]["activefeatures"]
Z_codebook = self.model.Z_codebook
for boundary, features_dict in activefeatures.items():
u, __ = boundary
for z_patt in features_dict:
w_indx, f_val = features_dict[z_patt]
grad[w_indx] += f_val * P_marginals[u, Z_codebook[z_patt]]
def prune_states(self, j, delta, beam_size):
"""prune states that fall off the specified beam size
Args:
j: current position (integer) in the sequence
delta: score matrix
beam_size: specified size of the beam (integer)
"""
P_codebook_rev = self.model.P_codebook_rev
P_elems = self.model.P_elems
# pi_lendict = self.model.pi_lendict
# # sort the pi in descending order of their score
# indx_sorted_pi = numpy.argsort(delta[j,:])[::-1]
# # identify states falling out of the beam
# indx_falling_pi = indx_sorted_pi[beam_size:]
# # identify top-k states/pi
# indx_topk_pi = indx_sorted_pi[:beam_size]
# # remove the effect of states/pi falling out of the beam
# delta[j, indx_falling_pi] = -numpy.inf
# using argpartition as better alternative to argsort
indx_partitioned_pi = numpy.argpartition(-delta[j, :], beam_size)
# identify top-k states/pi
indx_topk_pi = indx_partitioned_pi[:beam_size]
# # identify states falling out of the beam
# indx_falling_pi = indx_partitioned_pi[beam_size:]
# # remove the effect of states/pi falling out of the beam
# delta[j, indx_falling_pi] = -numpy.inf
# get topk states
topk_pi = {P_codebook_rev[indx] for indx in indx_topk_pi}
topk_states = set()
for pi in topk_pi:
topk_states.add(P_elems[pi][-1])
return topk_states
def viterbi(self, w, seq_id, beam_size, stop_off_beam=False, y_ref=[], K=1):
"""decode sequences using viterbi decoder
Args:
w: weight vector (numpy vector)
seq_id: integer representing unique id assigned to the sequence
beam_size: integer representing the size of the beam
Keyword Arguments:
stop_off_beam: boolean indicating if to stop when the reference state \
falls off the beam (used in perceptron/search based learning)
y_ref: reference sequence list of labels (used while learning)
K: integer indicating number of decoded sequences required (i.e. top-k list)
"""
model = self.model
P_elems = model.P_elems
pi_pky_map = model.pi_pky_map
P_codebook = model.P_codebook
P_codebook_rev = model.P_codebook_rev
len_P = len(P_codebook)
P_len = model.P_len
num_states = model.num_states
T = self.seqs_info[seq_id]["T"]
# records max score at every time step
delta = numpy.ones((T + 1, len_P), dtype="longdouble") * (-numpy.inf)
# the score for the empty sequence at time 0 is 1
delta[0, P_codebook[""]] = 0
back_track = {}
# records where violation occurs -- it is 1-based indexing
viol_index = []
if beam_size == num_states:
# case of exact search and decoding
l = {}
l["activefeatures"] = (seq_id,)
self.check_cached_info(seq_id, l)
active_features = self.seqs_info[seq_id]["activefeatures"]
for j in range(1, T + 1):
boundary = (j, j)
# vector of size len(pky)
f_potential = self.compute_fpotential(w, active_features[boundary])
# ^print("f_potential ", f_potential)
for pi in pi_pky_map:
pi_c = P_codebook[pi]
pky_c_list, pk_c_list = pi_pky_map[pi]
vec = f_potential[pky_c_list] + delta[j - 1, pk_c_list]
delta[j, pi_c] = numpy.max(vec)
# print("max chosen ", delta[j, P_codebook[pi]])
argmax_ind = numpy.argmax(vec)
# print("argmax chosen ", argmax_ind)
pk_c_max = pk_c_list[argmax_ind]
pk = P_codebook_rev[pk_c_max]
y = P_elems[pk][-1]
back_track[j, pi_c] = (pk_c_max, y)
else:
# case of inexact search and decoding
l = dict()
l["seg_features"] = (seq_id,)
self.check_cached_info(seq_id, l)
# tracks active states by boundary
accum_activestates = {}
for j in range(1, T + 1):
boundary = (j, j)
active_features = self.identify_activefeatures(
seq_id, boundary, accum_activestates
)
# vector of size len(pky)
f_potential = self.compute_fpotential(w, active_features)
# ^print("f_potential ", f_potential)
for pi in pi_pky_map:
pi_c = P_codebook[pi]
pky_c_list, pk_c_list = pi_pky_map[pi]
vec = f_potential[pky_c_list] + delta[j - 1, pk_c_list]
delta[j, pi_c] = numpy.max(vec)
# print("max chosen ", delta[j, P_codebook[pi]])
argmax_ind = numpy.argmax(vec)
# print("argmax chosen ", argmax_ind)
pk_c_max = pk_c_list[argmax_ind]
pk = P_codebook_rev[pk_c_max]
y = P_elems[pk][-1]
back_track[j, pi_c] = (pk_c_max, y)
topk_states = self.prune_states(j, delta, beam_size)
# update tracked active states -- to consider renaming it
accum_activestates[boundary] = accum_activestates[
boundary
].intersection(topk_states)
# ^print('delta[{},:] = {} '.format(j, delta[j,:]))
# ^print("topk_states ", topk_states)
if y_ref:
if y_ref[j - 1] not in topk_states:
viol_index.append(j)
if stop_off_beam:
T = j
break
if K == 1:
# decoding the sequence
Y_decoded = []
p_T_c = numpy.argmax(delta[T, :])
p_T = P_codebook_rev[p_T_c]
y_T = P_elems[p_T][-1]
Y_decoded.append((p_T_c, y_T))
t = T - 1
while t > 0:
p_tplus1_c = Y_decoded[-1][0]
p_t_c, y_t = back_track[(t + 1, p_tplus1_c)]
Y_decoded.append((p_t_c, y_t))
t -= 1
Y_decoded.reverse()
Y_decoded = [yt for __, yt in Y_decoded]
# print("Y_decoded {}".format(Y_decoded))
# print('delta ', delta)
# print('backtrack ', back_track)
# print("P_codebook ", P_codebook)
return (Y_decoded, viol_index)
else:
asearcher = HO_AStarSearcher(P_codebook_rev, P_elems)
topK = asearcher.search(delta, back_track, T, K)
# print('topk ', topK)
return (topK, viol_index)
if __name__ == "__main__":
pass
| [
"numpy.argmax",
"numpy.ones",
"numpy.argpartition",
"numpy.max",
"numpy.dot"
] | [((13207, 13250), 'numpy.argpartition', 'numpy.argpartition', (['(-delta[j, :])', 'beam_size'], {}), '(-delta[j, :], beam_size)\n', (13225, 13250), False, 'import numpy\n'), ((4210, 4237), 'numpy.dot', 'numpy.dot', (['w[w_indx]', 'f_val'], {}), '(w[w_indx], f_val)\n', (4219, 4237), False, 'import numpy\n'), ((14999, 15045), 'numpy.ones', 'numpy.ones', (['(T + 1, len_P)'], {'dtype': '"""longdouble"""'}), "((T + 1, len_P), dtype='longdouble')\n", (15009, 15045), False, 'import numpy\n'), ((18461, 18486), 'numpy.argmax', 'numpy.argmax', (['delta[T, :]'], {}), '(delta[T, :])\n', (18473, 18486), False, 'import numpy\n'), ((6700, 6746), 'numpy.ones', 'numpy.ones', (['(len_P, len_P)'], {'dtype': '"""longdouble"""'}), "((len_P, len_P), dtype='longdouble')\n", (6710, 6746), False, 'import numpy\n'), ((16040, 16054), 'numpy.max', 'numpy.max', (['vec'], {}), '(vec)\n', (16049, 16054), False, 'import numpy\n'), ((16157, 16174), 'numpy.argmax', 'numpy.argmax', (['vec'], {}), '(vec)\n', (16169, 16174), False, 'import numpy\n'), ((17324, 17338), 'numpy.max', 'numpy.max', (['vec'], {}), '(vec)\n', (17333, 17338), False, 'import numpy\n'), ((17441, 17458), 'numpy.argmax', 'numpy.argmax', (['vec'], {}), '(vec)\n', (17453, 17458), False, 'import numpy\n')] |
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
from bin.tts.text.symbols import symbols
from bin.tts.utils import get_sinusoid_encoding_table
from src.transformer.attention import MultiHeadedAttention
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
output = torch.bmm(attn, v)
return output, attn
class MultiHeadAttention_tts(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k)
self.w_ks = nn.Linear(d_model, n_head * d_k)
self.w_vs = nn.Linear(d_model, n_head * d_v)
self.attention = ScaledDotProductAttention(
temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1,
len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1,
len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1,
len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(
sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output, attn
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, kernel_size, dropout=0.1):
super().__init__()
# Use Conv1D
# position-wise
self.w_1 = nn.Conv1d(d_in, d_hid,
kernel_size=kernel_size[0], padding=(kernel_size[0]-1)//2)
# position-wise
self.w_2 = nn.Conv1d(d_hid, d_in,
kernel_size=kernel_size[1], padding=(kernel_size[1]-1)//2)
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
class FFTBlock(torch.nn.Module):
"""FFT Block"""
def __init__(self, att_n_head, att_d_feat, fft_filter ,fft_kernel, dropout=0.1):
super(FFTBlock, self).__init__()
# we use MultiHeadedAttention from this project, the dimension of key and value
# are calculated based on n_head and feature dim. Howerver, for PositionwiseFeedForward
# , we used the one from the original project.
self.slf_attn = MultiHeadAttention_tts(att_n_head, att_d_feat, att_d_feat//att_n_head, att_d_feat//att_n_head, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(att_d_feat, fft_filter, fft_kernel, dropout=dropout)
def forward(self, enc_input, mask=None, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask)
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
enc_output = self.pos_ffn(enc_output)
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
return enc_output
#return enc_output, enc_slf_attn
class TextEncoder(nn.Module):
''' Encoder '''
"""
def __init__(self,
n_src_vocab=len(symbols)+1,
len_max_seq=hp.max_seq_len, --->max_seq_len
d_word_vec=hp.encoder_hidden, --->word_dim
n_layers=hp.encoder_layer, --->n_layers
n_head=hp.encoder_head, --->att_n_head
d_k=hp.encoder_hidden // hp.encoder_head, --->att_k
d_v=hp.encoder_hidden // hp.encoder_head, --->att_v
d_model=hp.encoder_hidden, --->att_d_feat
d_inner=hp.fft_conv1d_filter_size, --->fft_filter
dropout=hp.encoder_dropout):
"""
def __init__(self, max_seq_len, word_dim, n_layer,
att_n_head, att_d_feat,
fft_filter, fft_kernel, dropout, n_src_vocab=len(symbols)+1):
super(TextEncoder, self).__init__()
self.max_seq_len = max_seq_len
self.word_dim = word_dim
PAD = 0
n_position = max_seq_len + 1
self.src_word_emb = nn.Embedding(n_src_vocab, word_dim, padding_idx=PAD)
self.position_enc = nn.Parameter(get_sinusoid_encoding_table(n_position, word_dim).unsqueeze(0), requires_grad=False)
self.layer_stack = nn.ModuleList([FFTBlock(att_n_head, att_d_feat, fft_filter, fft_kernel, dropout=dropout) for _ in range(n_layer)])
def forward(self, src_seq, mask, return_attns=False):
batch_size, max_len = src_seq.shape[0], src_seq.shape[1]
# -- Prepare masks
slf_attn_mask = mask.unsqueeze(1).expand(-1, max_len, -1)
# slf_attn_mask = ~slf_attn_mask
# print(slf_attn_mask.long())
# -- Forward
if not self.training and src_seq.shape[1] > self.max_seq_len:
enc_output = self.src_word_emb(src_seq) + get_sinusoid_encoding_table(src_seq.shape[1], self.word_dim)[:src_seq.shape[1], :].unsqueeze(0).expand(batch_size, -1, -1).to(src_seq.device)
else:
enc_output = self.src_word_emb(src_seq) + self.position_enc[:, :max_len, :].expand(batch_size, -1, -1)
for enc_layer in self.layer_stack:
enc_output = enc_layer(enc_output, mask=mask, slf_attn_mask=slf_attn_mask)
return enc_output | [
"torch.nn.Dropout",
"bin.tts.utils.get_sinusoid_encoding_table",
"torch.bmm",
"numpy.power",
"torch.nn.Embedding",
"torch.nn.Conv1d",
"torch.nn.LayerNorm",
"torch.nn.Softmax",
"torch.nn.Linear"
] | [((459, 476), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (469, 476), True, 'import torch.nn as nn\n'), ((741, 759), 'torch.bmm', 'torch.bmm', (['attn', 'v'], {}), '(attn, v)\n', (750, 759), False, 'import torch\n'), ((1062, 1094), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(n_head * d_k)'], {}), '(d_model, n_head * d_k)\n', (1071, 1094), True, 'import torch.nn as nn\n'), ((1115, 1147), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(n_head * d_k)'], {}), '(d_model, n_head * d_k)\n', (1124, 1147), True, 'import torch.nn as nn\n'), ((1168, 1200), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(n_head * d_v)'], {}), '(d_model, n_head * d_v)\n', (1177, 1200), True, 'import torch.nn as nn\n'), ((1332, 1353), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (1344, 1353), True, 'import torch.nn as nn\n'), ((1373, 1405), 'torch.nn.Linear', 'nn.Linear', (['(n_head * d_v)', 'd_model'], {}), '(n_head * d_v, d_model)\n', (1382, 1405), True, 'import torch.nn as nn\n'), ((1430, 1449), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1440, 1449), True, 'import torch.nn as nn\n'), ((2925, 3014), 'torch.nn.Conv1d', 'nn.Conv1d', (['d_in', 'd_hid'], {'kernel_size': 'kernel_size[0]', 'padding': '((kernel_size[0] - 1) // 2)'}), '(d_in, d_hid, kernel_size=kernel_size[0], padding=(kernel_size[0] -\n 1) // 2)\n', (2934, 3014), True, 'import torch.nn as nn\n'), ((3080, 3169), 'torch.nn.Conv1d', 'nn.Conv1d', (['d_hid', 'd_in'], {'kernel_size': 'kernel_size[1]', 'padding': '((kernel_size[1] - 1) // 2)'}), '(d_hid, d_in, kernel_size=kernel_size[1], padding=(kernel_size[1] -\n 1) // 2)\n', (3089, 3169), True, 'import torch.nn as nn\n'), ((3218, 3236), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_in'], {}), '(d_in)\n', (3230, 3236), True, 'import torch.nn as nn\n'), ((3260, 3279), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3270, 3279), True, 'import torch.nn as nn\n'), ((5815, 5867), 'torch.nn.Embedding', 'nn.Embedding', (['n_src_vocab', 'word_dim'], {'padding_idx': 'PAD'}), '(n_src_vocab, word_dim, padding_idx=PAD)\n', (5827, 5867), True, 'import torch.nn as nn\n'), ((1286, 1304), 'numpy.power', 'np.power', (['d_k', '(0.5)'], {}), '(d_k, 0.5)\n', (1294, 1304), True, 'import numpy as np\n'), ((5909, 5958), 'bin.tts.utils.get_sinusoid_encoding_table', 'get_sinusoid_encoding_table', (['n_position', 'word_dim'], {}), '(n_position, word_dim)\n', (5936, 5958), False, 'from bin.tts.utils import get_sinusoid_encoding_table\n'), ((6587, 6647), 'bin.tts.utils.get_sinusoid_encoding_table', 'get_sinusoid_encoding_table', (['src_seq.shape[1]', 'self.word_dim'], {}), '(src_seq.shape[1], self.word_dim)\n', (6614, 6647), False, 'from bin.tts.utils import get_sinusoid_encoding_table\n')] |
import os
import pandas as pd
import argparse
import sqlite3
import numpy as np
def get_args():
desc = ('Extracts the locations, novelty, and transcript assignments of'
' exons/introns in a TALON database or GTF file. All positions '
'are 1-based.')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--gtf', dest='gtf', default=None,
help = 'TALON GTF file from which to extract exons/introns')
parser.add_argument('--db', dest='db', default=None,
help = 'TALON database from which to extract exons/introns')
parser.add_argument('--ref', dest='ref_gtf',
help = ('GTF reference file (ie GENCODE). Will be used to '
'label novelty.'))
parser.add_argument('--mode', dest='mode',
help= ("Choices are 'intron' or 'exon' (default is 'intron'). "
"Determines whether to include introns or exons in the "
"output"), default='intron')
parser.add_argument('--outprefix', dest='outprefix',
help = 'Prefix for output file')
args = parser.parse_args()
if args.gtf and args.db:
raise Exception('only input gtf or db')
return args
# creates a dictionary of the last field of a gtf
# adapted from <NAME>
def get_fields(tab_fields):
attributes = {}
# remove trailing newline and split by semicolon
description = tab_fields[-1].strip('\n')
description = description.split(';')
# Parse description
for fields in description:
if fields == "" or fields == " ": continue
fields = fields.split()
if fields[0] == '': fields = fields[1:]
key = fields[0].replace('"', '')
val = ' '.join(fields[1:]).replace('"', '')
attributes[key] = val
# Put in placeholders for important attributes (such as gene_id) if they
# are absent
if "gene_id" not in attributes:
attributes["gene_id"] = "NULL"
return attributes
# create loc_df (for nodes), edge_df (for edges), and t_df (for paths)
def create_dfs_db(db):
# make sure file exists
if not os.path.exists(db):
raise Exception('TALON db file not found. Check path.')
# open db connection
conn = sqlite3.connect(db)
c = conn.cursor()
# loc_df
q = 'SELECT loc.* FROM location loc'
c.execute(q)
locs = c.fetchall()
loc_df = pd.DataFrame(locs,
columns=['location_ID', 'genome_build',
'chrom', 'position'])
# do some df reformatting, add strand
loc_df.drop('genome_build', axis=1, inplace=True)
loc_df.rename({'location_ID': 'vertex_id',
'position': 'coord'},
inplace=True, axis=1)
loc_df.vertex_id = loc_df.vertex_id.map(int)
# edge_df
q = """SELECT * FROM edge """
c.execute(q)
edges = c.fetchall()
edge_df = pd.DataFrame(edges,
columns=['edge_id', 'v1', 'v2',
'edge_type', 'strand'])
edge_df.v1 = edge_df.v1.map(int)
edge_df.v2 = edge_df.v2.map(int)
edge_df['talon_edge_id'] = edge_df.edge_id
edge_df['edge_id'] = edge_df.apply(lambda x: (int(x.v1), int(x.v2)), axis=1)
# t_df
t_df = pd.DataFrame()
# get tid, gid, gname, and paths
q = """SELECT ga.value, ta.value,
t.start_exon, t.jn_path, t.end_exon,
t.start_vertex, t.end_vertex
FROM gene_annotations ga
JOIN transcripts t ON ga.ID=t.gene_ID
JOIN transcript_annotations ta ON t.transcript_ID=ta.ID
WHERE ta.attribute='transcript_id'
AND (ga.attribute='gene_name'
OR ga.attribute='gene_id')
"""
c.execute(q)
data = c.fetchall()
# get fields from each transcript and add to dataframe
gids, tids, paths = zip(*[(i[0], i[1], i[2:]) for i in data[::2]])
gnames = [i[0] for i in data[1::2]]
paths = get_db_edge_paths(paths)
t_df['tid'] = np.asarray(tids)
t_df['path'] = np.asarray(paths)
t_df = create_dupe_index(t_df, 'tid')
t_df = set_dupe_index(t_df, 'tid')
# furnish the last bit of info in each df
t_df['path'] = [[int(n) for n in path]
for path in get_db_vertex_paths(paths, edge_df)]
loc_df = create_dupe_index(loc_df, 'vertex_id')
loc_df = set_dupe_index(loc_df, 'vertex_id')
edge_df.drop('talon_edge_id', axis=1, inplace=True)
edge_df = create_dupe_index(edge_df, 'edge_id')
edge_df = set_dupe_index(edge_df, 'edge_id')
return loc_df, edge_df, t_df
# create loc_df (nodes), edge_df (edges), and t_df (transcripts) from gtf
# adapted from <NAME> and TALON
def create_dfs_gtf(gtf_file):
# make sure file exists
if not os.path.exists(gtf_file):
raise Exception('GTF file not found. Check path.')
# depending on the strand, determine the stard and stop
# coords of an intron or exon
def find_edge_start_stop(v1, v2, strand):
if strand == '-':
start = max([v1, v2])
stop = min([v1, v2])
elif strand == '+':
start = min([v1, v2])
stop = max([v1, v2])
return start, stop
# dictionaries to hold unique edges and transcripts
transcripts = {}
exons = {}
with open(gtf_file) as gtf:
for line in gtf:
# ignore header lines
if line.startswith('#'):
continue
# split each entry
line = line.strip().split('\t')
# get some fields from gtf that we care about
chrom = line[0]
entry_type = line[2]
start = int(line[3])
stop = int(line[4])
strand = line[6]
fields = line[-1]
# transcript entry
if entry_type == "transcript":
attributes = get_fields(line)
tid = attributes['transcript_id']
gid = attributes['gene_id']
# add transcript to dictionary
transcript = {tid: {'gid': gid,
'tid': tid,
'strand': strand,
'exons': []}}
transcripts.update(transcript)
# exon entry
elif entry_type == "exon":
attributes = get_fields(line)
start, stop = find_edge_start_stop(start, stop, strand)
eid = '{}_{}_{}_{}_exon'.format(chrom, start, stop, strand)
tid = attributes['transcript_id']
# add novel exon to dictionary
if eid not in exons:
edge = {eid: {'eid': eid,
'chrom': chrom,
'v1': start,
'v2': stop,
'strand': strand}}
exons.update(edge)
# add this exon to the transcript's list of exons
if tid in transcripts:
transcripts[tid]['exons'].append(eid)
# once we have all transcripts, make loc_df
locs = {}
vertex_id = 0
for edge_id, edge in exons.items():
chrom = edge['chrom']
strand = edge['strand']
v1 = edge['v1']
v2 = edge['v2']
# exon start
key = (chrom, v1)
if key not in locs:
locs[key] = vertex_id
vertex_id += 1
# exon end
key = (chrom, v2)
if key not in locs:
locs[key] = vertex_id
vertex_id += 1
# add locs-indexed path to transcripts, and populate edges
edges = {}
for _,t in transcripts.items():
t['path'] = []
strand = t['strand']
t_exons = t['exons']
for i, exon_id in enumerate(t_exons):
# pull some information from exon dict
exon = exons[exon_id]
chrom = exon['chrom']
v1 = exon['v1']
v2 = exon['v2']
strand = exon['strand']
# add current exon and subsequent intron
# (if not the last exon) for each exon to edges
key = (chrom, v1, v2, strand)
v1_key = (chrom, v1)
v2_key = (chrom, v2)
edge_id = (locs[v1_key], locs[v2_key])
if key not in edges:
edges[key] = {'edge_id': edge_id, 'edge_type': 'exon'}
# add exon locs to path
t['path'] += list(edge_id)
# if this isn't the last exon, we also needa add an intron
# this consists of v2 of the prev exon and v1 of the next exon
if i < len(t_exons)-1:
next_exon = exons[t_exons[i+1]]
v1 = next_exon['v1']
key = (chrom, v2, v1, strand)
v1_key = (chrom, v1)
edge_id = (locs[v2_key], locs[v1_key])
if key not in edges:
edges[key] = {'edge_id': edge_id, 'edge_type': 'intron'}
# turn transcripts, edges, and locs into dataframes
locs = [{'chrom': key[0],
'coord': key[1],
'vertex_id': vertex_id} for key, vertex_id in locs.items()]
loc_df = pd.DataFrame(locs)
edges = [{'v1': item['edge_id'][0],
'v2': item['edge_id'][1],
'strand': key[3],
'edge_id': item['edge_id'],
'edge_type': item['edge_type']} for key, item in edges.items()]
edge_df = pd.DataFrame(edges)
transcripts = [{'tid': key,
'gid': item['gid'],
'path': item['path']} for key, item in transcripts.items()]
t_df = pd.DataFrame(transcripts)
# final df formatting
loc_df = create_dupe_index(loc_df, 'vertex_id')
loc_df = set_dupe_index(loc_df, 'vertex_id')
edge_df = create_dupe_index(edge_df, 'edge_id')
edge_df = set_dupe_index(edge_df, 'edge_id')
t_df = create_dupe_index(t_df, 'tid')
t_df = set_dupe_index(t_df, 'tid')
return loc_df, edge_df, t_df
# convert talon query into edge path
def get_db_edge_paths(paths):
edge_paths = []
for p in paths:
if p[1] == None:
edge_paths.append([p[0]])
else:
edge_paths.append(
[p[0], *[int(i) for i in p[1].split(',')], p[2]])
return edge_paths
# convert edge path to vertex path
def get_db_vertex_paths(paths, edge_df):
vertex_paths = []
for p in paths:
path = []
for i, e in enumerate(p):
entry = edge_df.loc[edge_df.talon_edge_id == e]
if i == 0:
path.extend([entry.v1.values[0], entry.v2.values[0]])
else: path.append(entry.v2.values[0])
vertex_paths.append(path)
return vertex_paths
# creates the duplicate index
def create_dupe_index(df, ind_name):
df[ind_name+'_back'] = df[ind_name]
return df
def add_coord_info(edge_df, loc_df):
edge_df['chrom'] = edge_df.apply(lambda x: loc_df.loc[x.v1, 'chrom'], axis=1)
edge_df['start'] = edge_df.apply(lambda x: loc_df.loc[x.v1, 'coord'], axis=1)
edge_df['stop'] = edge_df.apply(lambda x: loc_df.loc[x.v2, 'coord'], axis=1)
return edge_df
def subset_edges(edge_df, mode='intron'):
sjs = edge_df[edge_df.apply(
lambda x: True if x.edge_type == mode else False, axis=1)]
return sjs
def determine_sj_novelty(ref_edge_df, edge_df):
# Merge known starts from ref_edge_df with the query edges
ref_edge_df['start_known'] = True
edge_df = edge_df.merge(ref_edge_df[['chrom', 'start', 'strand', 'start_known']],
how = 'left',
on = ['chrom', 'strand', 'start'])
edge_df.fillna(value=False, inplace=True)
# Merge known ends from ref_edge_df with the query edges
ref_edge_df['stop_known'] = True
edge_df = edge_df.merge(ref_edge_df[['chrom', 'stop', 'strand', 'stop_known']],
how = 'left',
on = ['chrom', 'strand', 'stop'])
edge_df.fillna(value=False, inplace=True)
# Now determine whether the edge in whole has been seen before
ref_edge_df['combination_known'] = True
edge_df = edge_df.merge(ref_edge_df[['chrom', 'start', 'stop', 'strand',
'combination_known']],
how = 'left', on = ['chrom', 'strand', 'start', 'stop'])
edge_df.fillna(value=False, inplace=True)
return edge_df
# renames old index dupe column in df and resets the index
def reset_dupe_index(df, ind_name):
df.rename({ind_name: ind_name+'_back'}, inplace=True, axis=1)
df.reset_index(inplace=True)
return(df)
# set index, rename dupe index in df
def set_dupe_index(df, ind_name):
df.set_index(ind_name, inplace=True)
df.rename({ind_name+'_back': ind_name}, inplace=True, axis=1)
return(df)
def format_edge_df(edge_df):
edge_df.reset_index(drop=True, inplace=True)
edge_df.drop(['edge_type', 'v1', 'v2'], axis=1, inplace=True)
return edge_df
def find_tids_from_sj(edge_df, t_df, mode='intron'):
if mode == 'exon':
t_df['edges'] = t_df.apply(
lambda x: [(x.path[i], x.path[i+1]) for i in range(len(x.path[:-1]))][::2],
axis=1)
elif mode == 'intron':
t_df['edges'] = t_df.apply(
lambda x: [(x.path[i], x.path[i+1]) for i in range(len(x.path[:-1]))][1::2],
axis=1)
edge_df['tids'] = edge_df.apply(lambda x: add_tids_to_sj(x, t_df), axis=1)
edge_df.reset_index(drop=True, inplace=True)
edge_df.drop('edge_id', inplace=True, axis=1)
return edge_df
def add_tids_to_sj(x, t_df):
return ','.join([tid for tid, edges in zip(t_df.tid, t_df.edges) if x.edge_id in edges])
def main():
args = get_args()
ref_loc_df, ref_edge_df, ref_t_df = create_dfs_gtf(args.ref_gtf)
ref_edge_df = add_coord_info(ref_edge_df, ref_loc_df)
ref_edge_df = subset_edges(ref_edge_df, mode=args.mode)
ref_edge_df = format_edge_df(ref_edge_df)
if args.db:
loc_df, edge_df, t_df = create_dfs_db(args.db)
elif args.gtf:
loc_df, edge_df, t_df = create_dfs_gtf(args.gtf)
edge_df = add_coord_info(edge_df, loc_df)
edge_df = subset_edges(edge_df, mode=args.mode)
edge_df = format_edge_df(edge_df)
edge_df = determine_sj_novelty(ref_edge_df, edge_df)
edge_df = find_tids_from_sj(edge_df, t_df, mode=args.mode)
edge_df = edge_df.rename(columns={'tids': 'transcript_ids'})
edge_df.to_csv('{}_{}s.tsv'.format(args.outprefix, args.mode),
sep='\t', index=False, columns=["chrom","start","stop",
"strand", "start_known",
"stop_known",
"combination_known",
"transcript_ids"])
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"argparse.ArgumentParser",
"numpy.asarray",
"os.path.exists",
"sqlite3.connect"
] | [((296, 337), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc'}), '(description=desc)\n', (319, 337), False, 'import argparse\n'), ((2141, 2160), 'sqlite3.connect', 'sqlite3.connect', (['db'], {}), '(db)\n', (2156, 2160), False, 'import sqlite3\n'), ((2276, 2361), 'pandas.DataFrame', 'pd.DataFrame', (['locs'], {'columns': "['location_ID', 'genome_build', 'chrom', 'position']"}), "(locs, columns=['location_ID', 'genome_build', 'chrom', 'position']\n )\n", (2288, 2361), True, 'import pandas as pd\n'), ((2695, 2770), 'pandas.DataFrame', 'pd.DataFrame', (['edges'], {'columns': "['edge_id', 'v1', 'v2', 'edge_type', 'strand']"}), "(edges, columns=['edge_id', 'v1', 'v2', 'edge_type', 'strand'])\n", (2707, 2770), True, 'import pandas as pd\n'), ((2986, 3000), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2998, 3000), True, 'import pandas as pd\n'), ((3634, 3650), 'numpy.asarray', 'np.asarray', (['tids'], {}), '(tids)\n', (3644, 3650), True, 'import numpy as np\n'), ((3667, 3684), 'numpy.asarray', 'np.asarray', (['paths'], {}), '(paths)\n', (3677, 3684), True, 'import numpy as np\n'), ((7803, 7821), 'pandas.DataFrame', 'pd.DataFrame', (['locs'], {}), '(locs)\n', (7815, 7821), True, 'import pandas as pd\n'), ((8028, 8047), 'pandas.DataFrame', 'pd.DataFrame', (['edges'], {}), '(edges)\n', (8040, 8047), True, 'import pandas as pd\n'), ((8176, 8201), 'pandas.DataFrame', 'pd.DataFrame', (['transcripts'], {}), '(transcripts)\n', (8188, 8201), True, 'import pandas as pd\n'), ((2032, 2050), 'os.path.exists', 'os.path.exists', (['db'], {}), '(db)\n', (2046, 2050), False, 'import os\n'), ((4348, 4372), 'os.path.exists', 'os.path.exists', (['gtf_file'], {}), '(gtf_file)\n', (4362, 4372), False, 'import os\n')] |
#coding:utf-8
import scipy.io as scio
import numpy as np
from PIL import Image
from scipy import misc as smisc
import argparse
import os
import imageio
def gaussian_filter(shape=[3, 3], sigma=1.0):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def density_map(k_size, sigma, im_file, points, imgid):
"""
:param k_size: Gaussian kernel size
:param sigma:
:param im_file: image file
:param points: array, N x 2
:return:
"""
image = imageio.imread(im_file)
im_sz = np.shape(image)
# assert im_sz == 3 # RGB
h = im_sz[0]
w = im_sz[1]
im_density = np.zeros(shape=[h, w])
H = gaussian_filter(shape=[k_size, k_size], sigma=sigma)
hk_size = int(k_size / 2)
for j in range(points.shape[0]):
x, y = map(int, points[j])
if x == w: x -= 1
if y == h: y -= 1
if x < 0 or y < 0 or x > w or y > h:
continue
min_img_x = max(0, x - hk_size)
min_img_y = max(0, y - hk_size)
max_img_x = min(x + hk_size + 1, w - 1)
max_img_y = min(y + hk_size + 1, h - 1)
kernel_x_min = (hk_size - x if x <= hk_size else 0)
kernel_y_min = (hk_size - y if y <= hk_size else 0)
kernel_x_max = kernel_x_min + max_img_x - min_img_x
kernel_y_max = kernel_y_min + max_img_y - min_img_y
im_density[min_img_y:max_img_y, min_img_x:max_img_x] += H[kernel_y_min:kernel_y_max, kernel_x_min:kernel_x_max]
return im_density
def show_example(args, image_id, density):
imageio.imsave(os.path.join(args.example_dir, str(image_id)+'.jpg'), density)
def main(args):
label_dir = args.label_dir
image_dir = args.image_dir
save_dir = args.save_dir
label_files = os.listdir(label_dir)
print('Number of labeled images: %d' % len(label_files))
for label_file in label_files:
print(label_file)
if '._' in label_file:
continue
image_id = label_file.split('.')[0].split('_')[1]
image_file = os.path.join(image_dir, image_id + '.jpg')
with open(os.path.join(label_dir, label_file), 'r') as f:
raw_labels = f.read()
raw_labels = raw_labels.strip().split('\n')
labels = np.zeros([len(raw_labels), 2])
for i in range(len(raw_labels)):
xy = raw_labels[i]
# print(xy)
try:
x, y = xy.strip().split(' ')
except:
continue
labels[i, 0] = int(x)
labels[i, 1] = int(y)
im_density = density_map(k_size=args.k_size, sigma=args.sigma, im_file=image_file, points=labels, imgid=image_id)
prob = np.random.uniform()
if prob < args.prob_train:
scio.savemat(os.path.join(save_dir, 'train', image_id + '.mat'), {'map': im_density})
else:
scio.savemat(os.path.join(save_dir, 'test', image_id + '.mat'), {'map': im_density})
# assert abs(im_density.sum() - len(raw_labels)) < 1
show_example(args, image_id, 255 / (im_density.max() - im_density.min()) * (im_density - im_density.min()))
print('%s Ok, NO. of people: %d, density map: %.5f' % (image_id, len(raw_labels), im_density.sum()))
if __name__ == '__main__':
# im_file = 'imgs_0025.jpg'
# with open(os.path.join('160labels', 'new_0025.txt'), 'r') as f:
# raw_labels = f.read()
# raw_labels = raw_labels.strip().split('\n')
# labels = np.zeros([len(raw_labels), 2])
# for i in range(len(raw_labels)):
# xy = raw_labels[i]
# x, y = xy.strip().split(' ')
# labels[i, 0] = int(x)
# labels[i, 1] = int(y)
# den = density_map(3.0, 1.0, im_file, labels)
# import matplotlib.pyplot as plt
# plt.imsave('show.png', den)
# plt.show()
args = argparse.ArgumentParser()
args.add_argument('--label_dir', type=str, default=r'D:\daily-info\temp\baidu\160labels', help='dir of label files')
args.add_argument('--image_dir', type=str, default=r'\\169.255.73.1\gjy\人群计数原版视频数据\最终版整理数据集\processedDataStage1\imgs', help='dir of images')
args.add_argument('--save_dir', type=str, default='.\save', help='dir to save mat files')
args.add_argument('--example_dir', type=str, default='audioCountingData/stage1/example')
args.add_argument('--prob_train', type=float, default=0.8)
args.add_argument('--sigma', type=float, default=4.0)
args.add_argument('--k_size', type=float, default=15.0)
opt = args.parse_args()
main(opt)
| [
"numpy.random.uniform",
"argparse.ArgumentParser",
"imageio.imread",
"numpy.zeros",
"numpy.shape",
"numpy.finfo",
"numpy.exp",
"os.path.join",
"os.listdir"
] | [((420, 468), 'numpy.exp', 'np.exp', (['(-(x * x + y * y) / (2.0 * sigma * sigma))'], {}), '(-(x * x + y * y) / (2.0 * sigma * sigma))\n', (426, 468), True, 'import numpy as np\n'), ((804, 827), 'imageio.imread', 'imageio.imread', (['im_file'], {}), '(im_file)\n', (818, 827), False, 'import imageio\n'), ((840, 855), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (848, 855), True, 'import numpy as np\n'), ((940, 962), 'numpy.zeros', 'np.zeros', ([], {'shape': '[h, w]'}), '(shape=[h, w])\n', (948, 962), True, 'import numpy as np\n'), ((2058, 2079), 'os.listdir', 'os.listdir', (['label_dir'], {}), '(label_dir)\n', (2068, 2079), False, 'import os\n'), ((4123, 4148), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4146, 4148), False, 'import argparse\n'), ((2333, 2375), 'os.path.join', 'os.path.join', (['image_dir', "(image_id + '.jpg')"], {}), "(image_dir, image_id + '.jpg')\n", (2345, 2375), False, 'import os\n'), ((2984, 3003), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3001, 3003), True, 'import numpy as np\n'), ((2394, 2429), 'os.path.join', 'os.path.join', (['label_dir', 'label_file'], {}), '(label_dir, label_file)\n', (2406, 2429), False, 'import os\n'), ((3064, 3114), 'os.path.join', 'os.path.join', (['save_dir', '"""train"""', "(image_id + '.mat')"], {}), "(save_dir, 'train', image_id + '.mat')\n", (3076, 3114), False, 'import os\n'), ((3176, 3225), 'os.path.join', 'os.path.join', (['save_dir', '"""test"""', "(image_id + '.mat')"], {}), "(save_dir, 'test', image_id + '.mat')\n", (3188, 3225), False, 'import os\n'), ((478, 495), 'numpy.finfo', 'np.finfo', (['h.dtype'], {}), '(h.dtype)\n', (486, 495), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from numpyviz import VisualArray
start = np.random.randint(99, size=(5,7,3))
height, width = start.shape[:2]
v, u = np.indices((height, width))
print(u)
print(v)
print(np.around(-0.5 + u / (width-1), 2))
print(np.around((-0.5 + v / (height-1)) * height / width, 2))
va = VisualArray(start)
cells = va.get_indices()
va.set_colors(cells.T, color='yellow', basecolor='lightblue')
va.vizualize(fixview=True)
va.ax.set_title('array of shape (5,7,3)')
plt.show() | [
"matplotlib.pyplot.show",
"numpy.indices",
"numpy.around",
"numpy.random.randint",
"numpyviz.VisualArray"
] | [((93, 130), 'numpy.random.randint', 'np.random.randint', (['(99)'], {'size': '(5, 7, 3)'}), '(99, size=(5, 7, 3))\n', (110, 130), True, 'import numpy as np\n'), ((168, 195), 'numpy.indices', 'np.indices', (['(height, width)'], {}), '((height, width))\n', (178, 195), True, 'import numpy as np\n'), ((323, 341), 'numpyviz.VisualArray', 'VisualArray', (['start'], {}), '(start)\n', (334, 341), False, 'from numpyviz import VisualArray\n'), ((498, 508), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (506, 508), True, 'import matplotlib.pyplot as plt\n'), ((220, 256), 'numpy.around', 'np.around', (['(-0.5 + u / (width - 1))', '(2)'], {}), '(-0.5 + u / (width - 1), 2)\n', (229, 256), True, 'import numpy as np\n'), ((262, 318), 'numpy.around', 'np.around', (['((-0.5 + v / (height - 1)) * height / width)', '(2)'], {}), '((-0.5 + v / (height - 1)) * height / width, 2)\n', (271, 318), True, 'import numpy as np\n')] |
import itertools
from numbers import Number
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import numpy as np
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import Batch, BatchRequest, RuntimeBatchRequest
from great_expectations.rule_based_profiler.helpers.util import (
NP_EPSILON,
compute_bootstrap_quantiles,
compute_quantiles,
get_parameter_value_and_validate_return_type,
)
from great_expectations.rule_based_profiler.types import Domain, ParameterContainer
from great_expectations.util import is_numeric
from great_expectations.rule_based_profiler.parameter_builder.parameter_builder import ( # isort:skip
MetricComputationResult,
MetricValues,
MetricComputationDetails,
ParameterBuilder,
)
MAX_DECIMALS: int = 9
DEFAULT_BOOTSTRAP_NUM_RESAMPLES: int = 9999
class NumericMetricRangeMultiBatchParameterBuilder(ParameterBuilder):
"""
A Multi-Batch implementation for obtaining the range estimation bounds for a resolved (evaluated) numeric metric,
using domain_kwargs, value_kwargs, metric_name, and false_positive_rate (tolerance) as arguments.
This Multi-Batch ParameterBuilder is general in the sense that any metric that computes numbers can be accommodated.
On the other hand, it is specific in the sense that the parameter names will always have the semantics of numeric
ranges, which will incorporate the requirements, imposed by the configured false_positive_rate tolerances.
The implementation supports two methods of estimating parameter values from data:
* bootstrapped (default) -- a statistical technique (see "https://en.wikipedia.org/wiki/Bootstrapping_(statistics)")
* one-shot -- assumes that metric values, computed on batch data, are normally distributed and computes the mean
and the standard error using the queried batches as the single sample of the distribution (fast, but inaccurate).
"""
RECOGNIZED_SAMPLING_METHOD_NAMES: set = {
"oneshot",
"bootstrap",
}
RECOGNIZED_TRUNCATE_DISTRIBUTION_KEYS: set = {
"lower_bound",
"upper_bound",
}
def __init__(
self,
name: str,
metric_name: str,
metric_domain_kwargs: Optional[Union[str, dict]] = None,
metric_value_kwargs: Optional[Union[str, dict]] = None,
sampling_method: str = "bootstrap",
enforce_numeric_metric: Union[str, bool] = True,
replace_nan_with_zero: Union[str, bool] = True,
reduce_scalar_metric: Union[str, bool] = True,
false_positive_rate: Union[str, float] = 5.0e-2,
num_bootstrap_samples: Optional[Union[str, int]] = None,
round_decimals: Optional[Union[str, int]] = None,
truncate_values: Optional[
Union[str, Dict[str, Union[Optional[int], Optional[float]]]]
] = None,
batch_list: Optional[List[Batch]] = None,
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest, dict]] = None,
data_context: Optional["DataContext"] = None, # noqa: F821
):
"""
Args:
name: the name of this parameter -- this is user-specified parameter name (from configuration);
it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter."
and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>").
metric_name: the name of a metric used in MetricConfiguration (must be a supported and registered metric)
metric_domain_kwargs: used in MetricConfiguration
metric_value_kwargs: used in MetricConfiguration
sampling_method: choice of the sampling algorithm: "oneshot" (one observation) or "bootstrap" (default)
enforce_numeric_metric: used in MetricConfiguration to insure that metric computations return numeric values
replace_nan_with_zero: if False, then if the computed metric gives NaN, then exception is raised; otherwise,
if True (default), then if the computed metric gives NaN, then it is converted to the 0.0 (float) value.
reduce_scalar_metric: if True (default), then reduces computation of 1-dimensional metric to scalar value.
false_positive_rate: user-configured fraction between 0 and 1 expressing desired false positive rate for
identifying unexpected values as judged by the upper- and lower- quantiles of the observed metric data.
num_bootstrap_samples: Applicable only for the "bootstrap" sampling method -- if omitted (default), then
9999 is used (default in "https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html").
round_decimals: user-configured non-negative integer indicating the number of decimals of the
rounding precision of the computed parameter values (i.e., min_value, max_value) prior to packaging them on
output. If omitted, then no rounding is performed, unless the computed value is already an integer.
truncate_values: user-configured directive for whether or not to allow the computed parameter values
(i.e., lower_bound, upper_bound) to take on values outside the specified bounds when packaged on output.
batch_list: explicitly passed Batch objects for parameter computation (take precedence over batch_request).
batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation.
data_context: DataContext
"""
super().__init__(
name=name,
batch_list=batch_list,
batch_request=batch_request,
data_context=data_context,
)
self._metric_name = metric_name
self._metric_domain_kwargs = metric_domain_kwargs
self._metric_value_kwargs = metric_value_kwargs
self._sampling_method = sampling_method
self._enforce_numeric_metric = enforce_numeric_metric
self._replace_nan_with_zero = replace_nan_with_zero
self._reduce_scalar_metric = reduce_scalar_metric
self._false_positive_rate = false_positive_rate
self._num_bootstrap_samples = num_bootstrap_samples
self._round_decimals = round_decimals
if not truncate_values:
truncate_values = {
"lower_bound": None,
"upper_bound": None,
}
else:
if not isinstance(truncate_values, str):
truncate_values_keys: set = set(truncate_values.keys())
if (
not truncate_values_keys
<= NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_TRUNCATE_DISTRIBUTION_KEYS
):
raise ge_exceptions.ProfilerExecutionError(
message=f"""Unrecognized truncate_values key(s) in {self.__class__.__name__}:
"{str(truncate_values_keys - NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_TRUNCATE_DISTRIBUTION_KEYS)}" \
detected.
"""
)
self._truncate_values = truncate_values
@property
def fully_qualified_parameter_name(self) -> str:
return f"$parameter.{self.name}"
"""
Full getter/setter accessors for needed properties are for configuring MetricMultiBatchParameterBuilder dynamically.
"""
@property
def metric_name(self) -> str:
return self._metric_name
@property
def metric_domain_kwargs(self) -> Optional[Union[str, dict]]:
return self._metric_domain_kwargs
@property
def metric_value_kwargs(self) -> Optional[Union[str, dict]]:
return self._metric_value_kwargs
@metric_value_kwargs.setter
def metric_value_kwargs(self, value: Optional[Union[str, dict]]) -> None:
self._metric_value_kwargs = value
@property
def sampling_method(self) -> str:
return self._sampling_method
@property
def enforce_numeric_metric(self) -> Union[str, bool]:
return self._enforce_numeric_metric
@property
def replace_nan_with_zero(self) -> Union[str, bool]:
return self._replace_nan_with_zero
@property
def reduce_scalar_metric(self) -> Union[str, bool]:
return self._reduce_scalar_metric
@property
def false_positive_rate(self) -> Union[str, float]:
return self._false_positive_rate
@property
def num_bootstrap_samples(self) -> Optional[Union[str, int]]:
return self._num_bootstrap_samples
@property
def round_decimals(self) -> Optional[Union[str, int]]:
return self._round_decimals
@property
def truncate_values(
self,
) -> Optional[Union[str, Dict[str, Union[Optional[int], Optional[float]]]]]:
return self._truncate_values
def _build_parameters(
self,
parameter_container: ParameterContainer,
domain: Domain,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> Tuple[Any, dict]:
"""
Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional
details.
return: Tuple containing computed_parameter_value and parameter_computation_details metadata.
The algorithm operates according to the following steps:
1. Obtain batch IDs of interest using DataContext and BatchRequest (unless passed explicitly as argument). Note
that this specific BatchRequest was specified as part of configuration for the present ParameterBuilder class.
2. Set up metric_domain_kwargs and metric_value_kwargs (using configuration and/or variables and parameters).
3. Instantiate the Validator object corresponding to BatchRequest (with a temporary expectation_suite_name) in
order to have access to all Batch objects, on each of which the specified metric_name will be computed.
4. Perform metric computations and obtain the result in the array-like form (one metric value per each Batch).
5. Using the configured directives and heuristics, determine whether or not the ranges should be clipped.
6. Using the configured directives and heuristics, determine if return values should be rounded to an integer.
7. Convert the multi-dimensional metric computation results to a numpy array (for further computations).
Steps 8 -- 10 are for the "oneshot" sampling method only (the "bootstrap" method achieves same automatically):
8. Compute the mean and the standard deviation of the metric (aggregated over all the gathered Batch objects).
9. Compute number of standard deviations (as floating point) needed (around the mean) to achieve the specified
false_positive_rate (note that false_positive_rate of 0.0 would result in infinite number of standard deviations,
hence it is "nudged" by small quantity "epsilon" above 0.0 if false_positive_rate of 0.0 appears as argument).
(Please refer to "https://en.wikipedia.org/wiki/Normal_distribution" and references therein for background.)
10. Compute the "band" around the mean as the min_value and max_value (to be used in ExpectationConfiguration).
11. Return [low, high] for the desired metric as estimated by the specified sampling method.
12. Set up the arguments and call build_parameter_container() to store the parameter as part of "rule state".
"""
metric_computation_result: MetricComputationResult = self.get_metrics(
metric_name=self.metric_name,
metric_domain_kwargs=self.metric_domain_kwargs,
metric_value_kwargs=self.metric_value_kwargs,
enforce_numeric_metric=self.enforce_numeric_metric,
replace_nan_with_zero=self.replace_nan_with_zero,
domain=domain,
variables=variables,
parameters=parameters,
)
metric_values: MetricValues = metric_computation_result.metric_values
details: MetricComputationDetails = metric_computation_result.details
# Obtain sampling_method directive from "rule state" (i.e., variables and parameters); from instance variable otherwise.
sampling_method: str = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=self.sampling_method,
expected_return_type=str,
variables=variables,
parameters=parameters,
)
if (
sampling_method
not in NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_SAMPLING_METHOD_NAMES
):
raise ge_exceptions.ProfilerExecutionError(
message=f"""The directive "sampling_method" for {self.__class__.__name__} can be only one of
{NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_SAMPLING_METHOD_NAMES} ("{sampling_method}" was detected).
"""
)
estimator: Callable
etimator_kwargs: dict
if sampling_method == "bootstrap":
estimator = self._get_bootstrap_estimate
estimator_kwargs = {
"false_positive_rate": self.false_positive_rate,
"num_bootstrap_samples": self.num_bootstrap_samples,
}
else:
estimator = self._get_deterministic_estimate
estimator_kwargs = {
"false_positive_rate": self.false_positive_rate,
}
metric_value_range: np.ndarray = self._estimate_metric_value_range(
metric_values=cast(np.ndarray, metric_values),
estimator=estimator,
domain=domain,
variables=variables,
parameters=parameters,
**estimator_kwargs,
)
return (
{
"value_range": metric_value_range,
},
details,
)
def _estimate_metric_value_range(
self,
metric_values: np.ndarray,
estimator: Callable,
domain: Optional[Domain] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
**kwargs,
) -> np.ndarray:
"""
This method accepts an estimator Callable and data samples in the format "N x R^m", where "N" (most significant
dimension) is the number of measurements (e.g., one per Batch of data), while "R^m" is the multi-dimensional
metric, whose values are being estimated. Thus, for each element in the "R^m" hypercube, an "N"-dimensional
vector of sample measurements is constructed and given to the estimator to apply its specific algorithm for
computing the range of values in this vector. Estimator algorithms differ based on their use of data samples.
"""
truncate_values: Dict[str, Number] = self._get_truncate_values_using_heuristics(
metric_values=metric_values,
domain=domain,
variables=variables,
parameters=parameters,
)
lower_bound: Optional[float] = truncate_values.get("lower_bound")
upper_bound: Optional[float] = truncate_values.get("upper_bound")
round_decimals: int = self._get_round_decimals_using_heuristics(
metric_values=metric_values,
domain=domain,
variables=variables,
parameters=parameters,
)
min_value: Number
max_value: Number
lower_quantile: Number
upper_quantile: Number
# Outer-most dimension is data samples (e.g., one per Batch); the rest are dimensions of the actual metric.
metric_value_shape: tuple = metric_values.shape[1:]
# Generate all permutations of indexes for accessing every element of the multi-dimensional metric.
metric_value_shape_idx: int
axes: List[np.ndarray] = [
np.indices(dimensions=(metric_value_shape_idx,))[0]
for metric_value_shape_idx in metric_value_shape
]
metric_value_indices: List[tuple] = list(itertools.product(*tuple(axes)))
# Generate all permutations of indexes for accessing estimates of every element of the multi-dimensional metric.
# Prefixing multi-dimensional index with "(slice(None, None, None),)" is equivalent to "[:,]" access.
metric_value_idx: tuple
metric_value_vector_indices: List[tuple] = [
(slice(None, None, None),) + metric_value_idx
for metric_value_idx in metric_value_indices
]
# Since range includes min and max values, value range estimate contains 2-element least-significant dimension.
metric_value_range_shape: tuple = metric_value_shape + (2,)
# Initialize value range estimate for multi-dimensional metric to all trivial values (to be updated in situ).
metric_value_range: np.ndarray = np.zeros(shape=metric_value_range_shape)
metric_value_vector: np.ndarray
metric_value_range_min_idx: tuple
metric_value_range_max_idx: tuple
# Traverse indices of sample vectors corresponding to every element of multi-dimensional metric.
for metric_value_idx in metric_value_vector_indices:
# Obtain "N"-element-long vector of samples for each element of multi-dimensional metric.
metric_value_vector = metric_values[metric_value_idx]
if np.all(np.isclose(metric_value_vector, metric_value_vector[0])):
# Computation is unnecessary if distribution is degenerate.
lower_quantile = upper_quantile = metric_value_vector[0]
else:
# Compute low and high estimates for vector of samples for given element of multi-dimensional metric.
lower_quantile, upper_quantile = estimator(
metric_values=metric_value_vector,
domain=domain,
variables=variables,
parameters=parameters,
**kwargs,
)
if round_decimals == 0:
min_value = round(float(cast(float, lower_quantile)))
max_value = round(float(cast(float, upper_quantile)))
else:
min_value = round(float(cast(float, lower_quantile)), round_decimals)
max_value = round(float(cast(float, upper_quantile)), round_decimals)
if lower_bound is not None:
min_value = max(cast(float, min_value), lower_bound)
if upper_bound is not None:
max_value = min(cast(float, max_value), upper_bound)
# Obtain index of metric element (by discarding "N"-element samples dimension).
metric_value_idx = metric_value_idx[1:]
# Compute indices for min and max value range estimates.
metric_value_range_min_idx = metric_value_idx + (
slice(0, 1, None),
) # appends "[0]" element
metric_value_range_max_idx = metric_value_idx + (
slice(1, 2, None),
) # appends "[0]" element
# Store computed min and max value estimates into allocated range estimate for multi-dimensional metric.
metric_value_range[metric_value_range_min_idx] = min_value
metric_value_range[metric_value_range_max_idx] = max_value
# As a simplification, apply reduction to scalar in case of one-dimensional metric (for convenience).
if metric_value_range.shape[0] == 1:
metric_value_range = metric_value_range[0]
return metric_value_range
def _get_truncate_values_using_heuristics(
self,
metric_values: np.ndarray,
domain: Domain,
*,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> Dict[str, Union[Optional[int], Optional[float]]]:
# Obtain truncate_values directive from "rule state" (i.e., variables and parameters); from instance variable otherwise.
truncate_values: Dict[
str, Optional[Number]
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=self.truncate_values,
expected_return_type=dict,
variables=variables,
parameters=parameters,
)
distribution_boundary: Optional[Union[int, float]]
if not all(
[
(
distribution_boundary is None
or is_numeric(value=distribution_boundary)
)
for distribution_boundary in truncate_values.values()
]
):
raise ge_exceptions.ProfilerExecutionError(
message=f"""The directive "truncate_values" for {self.__class__.__name__} must specify the
[lower_bound, upper_bound] closed interval, where either boundary is a numeric value (or None).
"""
)
lower_bound: Optional[Number] = truncate_values.get("lower_bound")
upper_bound: Optional[Number] = truncate_values.get("upper_bound")
if lower_bound is None and np.all(np.greater(metric_values, NP_EPSILON)):
lower_bound = 0.0
if upper_bound is None and np.all(np.less(metric_values, (-NP_EPSILON))):
upper_bound = 0.0
return {
"lower_bound": lower_bound,
"upper_bound": upper_bound,
}
def _get_round_decimals_using_heuristics(
self,
metric_values: np.ndarray,
domain: Domain,
*,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> int:
# Obtain round_decimals directive from "rule state" (i.e., variables and parameters); from instance variable otherwise.
round_decimals: Optional[
Union[Any]
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=self.round_decimals,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
if round_decimals is None:
round_decimals = MAX_DECIMALS
else:
if not isinstance(round_decimals, int) or (round_decimals < 0):
raise ge_exceptions.ProfilerExecutionError(
message=f"""The directive "round_decimals" for {self.__class__.__name__} can be 0 or a
positive integer, or must be omitted (or set to None).
"""
)
if np.issubdtype(metric_values.dtype, np.integer):
round_decimals = 0
return round_decimals
def _get_bootstrap_estimate(
self,
metric_values: np.ndarray,
domain: Domain,
*,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
**kwargs,
) -> Tuple[Number, Number]:
# Obtain false_positive_rate from "rule state" (i.e., variables and parameters); from instance variable otherwise.
false_positive_rate: np.float64 = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=kwargs.get("false_positive_rate", 5.0e-2),
expected_return_type=(float, np.float64),
variables=variables,
parameters=parameters,
)
if not (0.0 <= false_positive_rate <= 1.0):
raise ge_exceptions.ProfilerExecutionError(
message=f"The confidence level for {self.__class__.__name__} is outside of [0.0, 1.0] closed interval."
)
# Obtain num_bootstrap_samples override from "rule state" (i.e., variables and parameters); from instance variable otherwise.
num_bootstrap_samples: Optional[
int
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=kwargs.get("num_bootstrap_samples"),
expected_return_type=None,
variables=variables,
parameters=parameters,
)
n_resamples: int
if num_bootstrap_samples is None:
n_resamples = DEFAULT_BOOTSTRAP_NUM_RESAMPLES
else:
n_resamples = num_bootstrap_samples
return compute_bootstrap_quantiles(
metric_values=metric_values,
false_positive_rate=false_positive_rate,
n_resamples=n_resamples,
)
def _get_deterministic_estimate(
self,
metric_values: np.ndarray,
domain: Domain,
*,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
**kwargs,
) -> Tuple[Number, Number]:
# Obtain false_positive_rate from "rule state" (i.e., variables and parameters); from instance variable otherwise.
false_positive_rate: np.float64 = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=kwargs.get("false_positive_rate", 5.0e-2),
expected_return_type=(float, np.float64),
variables=variables,
parameters=parameters,
)
if not (0.0 <= false_positive_rate <= 1.0):
raise ge_exceptions.ProfilerExecutionError(
message=f"The confidence level for {self.__class__.__name__} is outside of [0.0, 1.0] closed interval."
)
return compute_quantiles(
metric_values=metric_values,
false_positive_rate=false_positive_rate,
)
| [
"typing.cast",
"great_expectations.util.is_numeric",
"numpy.zeros",
"numpy.greater",
"numpy.indices",
"great_expectations.exceptions.ProfilerExecutionError",
"great_expectations.rule_based_profiler.helpers.util.get_parameter_value_and_validate_return_type",
"numpy.isclose",
"great_expectations.rule_... | [((12420, 12599), 'great_expectations.rule_based_profiler.helpers.util.get_parameter_value_and_validate_return_type', 'get_parameter_value_and_validate_return_type', ([], {'domain': 'domain', 'parameter_reference': 'self.sampling_method', 'expected_return_type': 'str', 'variables': 'variables', 'parameters': 'parameters'}), '(domain=domain,\n parameter_reference=self.sampling_method, expected_return_type=str,\n variables=variables, parameters=parameters)\n', (12464, 12599), False, 'from great_expectations.rule_based_profiler.helpers.util import NP_EPSILON, compute_bootstrap_quantiles, compute_quantiles, get_parameter_value_and_validate_return_type\n'), ((17082, 17122), 'numpy.zeros', 'np.zeros', ([], {'shape': 'metric_value_range_shape'}), '(shape=metric_value_range_shape)\n', (17090, 17122), True, 'import numpy as np\n'), ((20330, 20510), 'great_expectations.rule_based_profiler.helpers.util.get_parameter_value_and_validate_return_type', 'get_parameter_value_and_validate_return_type', ([], {'domain': 'domain', 'parameter_reference': 'self.truncate_values', 'expected_return_type': 'dict', 'variables': 'variables', 'parameters': 'parameters'}), '(domain=domain,\n parameter_reference=self.truncate_values, expected_return_type=dict,\n variables=variables, parameters=parameters)\n', (20374, 20510), False, 'from great_expectations.rule_based_profiler.helpers.util import NP_EPSILON, compute_bootstrap_quantiles, compute_quantiles, get_parameter_value_and_validate_return_type\n'), ((22140, 22319), 'great_expectations.rule_based_profiler.helpers.util.get_parameter_value_and_validate_return_type', 'get_parameter_value_and_validate_return_type', ([], {'domain': 'domain', 'parameter_reference': 'self.round_decimals', 'expected_return_type': 'None', 'variables': 'variables', 'parameters': 'parameters'}), '(domain=domain,\n parameter_reference=self.round_decimals, expected_return_type=None,\n variables=variables, parameters=parameters)\n', (22184, 22319), False, 'from great_expectations.rule_based_profiler.helpers.util import NP_EPSILON, compute_bootstrap_quantiles, compute_quantiles, get_parameter_value_and_validate_return_type\n'), ((22806, 22852), 'numpy.issubdtype', 'np.issubdtype', (['metric_values.dtype', 'np.integer'], {}), '(metric_values.dtype, np.integer)\n', (22819, 22852), True, 'import numpy as np\n'), ((24562, 24688), 'great_expectations.rule_based_profiler.helpers.util.compute_bootstrap_quantiles', 'compute_bootstrap_quantiles', ([], {'metric_values': 'metric_values', 'false_positive_rate': 'false_positive_rate', 'n_resamples': 'n_resamples'}), '(metric_values=metric_values,\n false_positive_rate=false_positive_rate, n_resamples=n_resamples)\n', (24589, 24688), False, 'from great_expectations.rule_based_profiler.helpers.util import NP_EPSILON, compute_bootstrap_quantiles, compute_quantiles, get_parameter_value_and_validate_return_type\n'), ((25731, 25823), 'great_expectations.rule_based_profiler.helpers.util.compute_quantiles', 'compute_quantiles', ([], {'metric_values': 'metric_values', 'false_positive_rate': 'false_positive_rate'}), '(metric_values=metric_values, false_positive_rate=\n false_positive_rate)\n', (25748, 25823), False, 'from great_expectations.rule_based_profiler.helpers.util import NP_EPSILON, compute_bootstrap_quantiles, compute_quantiles, get_parameter_value_and_validate_return_type\n'), ((12830, 13090), 'great_expectations.exceptions.ProfilerExecutionError', 'ge_exceptions.ProfilerExecutionError', ([], {'message': 'f"""The directive "sampling_method" for {self.__class__.__name__} can be only one of\n{NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_SAMPLING_METHOD_NAMES} ("{sampling_method}" was detected).\n"""'}), '(message=\n f"""The directive "sampling_method" for {self.__class__.__name__} can be only one of\n{NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_SAMPLING_METHOD_NAMES} ("{sampling_method}" was detected).\n"""\n )\n', (12866, 13090), True, 'import great_expectations.exceptions as ge_exceptions\n'), ((20930, 21168), 'great_expectations.exceptions.ProfilerExecutionError', 'ge_exceptions.ProfilerExecutionError', ([], {'message': 'f"""The directive "truncate_values" for {self.__class__.__name__} must specify the\n[lower_bound, upper_bound] closed interval, where either boundary is a numeric value (or None).\n"""'}), '(message=\n f"""The directive "truncate_values" for {self.__class__.__name__} must specify the\n[lower_bound, upper_bound] closed interval, where either boundary is a numeric value (or None).\n"""\n )\n', (20966, 21168), True, 'import great_expectations.exceptions as ge_exceptions\n'), ((23723, 23874), 'great_expectations.exceptions.ProfilerExecutionError', 'ge_exceptions.ProfilerExecutionError', ([], {'message': 'f"""The confidence level for {self.__class__.__name__} is outside of [0.0, 1.0] closed interval."""'}), "(message=\n f'The confidence level for {self.__class__.__name__} is outside of [0.0, 1.0] closed interval.'\n )\n", (23759, 23874), True, 'import great_expectations.exceptions as ge_exceptions\n'), ((25543, 25694), 'great_expectations.exceptions.ProfilerExecutionError', 'ge_exceptions.ProfilerExecutionError', ([], {'message': 'f"""The confidence level for {self.__class__.__name__} is outside of [0.0, 1.0] closed interval."""'}), "(message=\n f'The confidence level for {self.__class__.__name__} is outside of [0.0, 1.0] closed interval.'\n )\n", (25579, 25694), True, 'import great_expectations.exceptions as ge_exceptions\n'), ((13733, 13764), 'typing.cast', 'cast', (['np.ndarray', 'metric_values'], {}), '(np.ndarray, metric_values)\n', (13737, 13764), False, 'from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast\n'), ((16087, 16135), 'numpy.indices', 'np.indices', ([], {'dimensions': '(metric_value_shape_idx,)'}), '(dimensions=(metric_value_shape_idx,))\n', (16097, 16135), True, 'import numpy as np\n'), ((17604, 17659), 'numpy.isclose', 'np.isclose', (['metric_value_vector', 'metric_value_vector[0]'], {}), '(metric_value_vector, metric_value_vector[0])\n', (17614, 17659), True, 'import numpy as np\n'), ((21383, 21420), 'numpy.greater', 'np.greater', (['metric_values', 'NP_EPSILON'], {}), '(metric_values, NP_EPSILON)\n', (21393, 21420), True, 'import numpy as np\n'), ((21496, 21531), 'numpy.less', 'np.less', (['metric_values', '(-NP_EPSILON)'], {}), '(metric_values, -NP_EPSILON)\n', (21503, 21531), True, 'import numpy as np\n'), ((22572, 22765), 'great_expectations.exceptions.ProfilerExecutionError', 'ge_exceptions.ProfilerExecutionError', ([], {'message': 'f"""The directive "round_decimals" for {self.__class__.__name__} can be 0 or a\npositive integer, or must be omitted (or set to None).\n"""'}), '(message=\n f"""The directive "round_decimals" for {self.__class__.__name__} can be 0 or a\npositive integer, or must be omitted (or set to None).\n"""\n )\n', (22608, 22765), True, 'import great_expectations.exceptions as ge_exceptions\n'), ((18669, 18691), 'typing.cast', 'cast', (['float', 'min_value'], {}), '(float, min_value)\n', (18673, 18691), False, 'from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast\n'), ((18779, 18801), 'typing.cast', 'cast', (['float', 'max_value'], {}), '(float, max_value)\n', (18783, 18801), False, 'from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast\n'), ((18306, 18333), 'typing.cast', 'cast', (['float', 'lower_quantile'], {}), '(float, lower_quantile)\n', (18310, 18333), False, 'from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast\n'), ((18376, 18403), 'typing.cast', 'cast', (['float', 'upper_quantile'], {}), '(float, upper_quantile)\n', (18380, 18403), False, 'from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast\n'), ((18464, 18491), 'typing.cast', 'cast', (['float', 'lower_quantile'], {}), '(float, lower_quantile)\n', (18468, 18491), False, 'from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast\n'), ((18550, 18577), 'typing.cast', 'cast', (['float', 'upper_quantile'], {}), '(float, upper_quantile)\n', (18554, 18577), False, 'from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast\n'), ((20759, 20798), 'great_expectations.util.is_numeric', 'is_numeric', ([], {'value': 'distribution_boundary'}), '(value=distribution_boundary)\n', (20769, 20798), False, 'from great_expectations.util import is_numeric\n')] |
import tensorflow as tf
import numpy as np
import copy
import lib.layer as layer
import lib.config as C
import param as P
class Policy_net:
def __init__(self, name: str, sess, ob_space, act_space_array, activation=tf.nn.relu):
"""
:param name: string
"""
self.sess = sess
self.map_width = 64
self.map_channels = C.MAP_CHANNELS
self.use_norm = False
self.sl_training = False
with tf.variable_scope(name):
self.obs = tf.placeholder(dtype=tf.float32, shape=[None, ob_space], name='obs')
self.map_data = tf.placeholder(dtype=tf.float32,
shape=[None, self.map_channels, self.map_width, self.map_width],
name="map_data")
with tf.variable_scope('policy_net'):
with tf.variable_scope('controller'):
layer_1 = layer.dense_layer(self.obs, 256, "DenseLayer1", func=activation)
layer_2 = layer.dense_layer(layer_1, 256, "DenseLayer2", func=activation)
self.controller_info = layer.dense_layer(layer_2, 64, "Info", func=None)
with tf.variable_scope('battle'):
self.minimap_info = self.cnn_map(self.map_data)
self.battle_info = tf.concat([self.controller_info, self.minimap_info], axis=1)
layer_5 = layer.dense_layer(self.battle_info, 256, "DenseLayer1", func=activation)
self.battle_probs = layer.dense_layer(layer_5, act_space_array[0], "battle_output",
func=tf.nn.softmax)
self.battle_act = tf.multinomial(tf.log(self.battle_probs), num_samples=1)
self.battle_act = tf.reshape(self.battle_act, shape=[-1])
layer_6 = layer.dense_layer(self.battle_info, 512, "PosLayer1", func=activation)
layer_7 = layer.dense_layer(layer_6, 256, "PosLayer2", func=activation)
self.battle_pos_probs = layer.dense_layer(layer_7, act_space_array[1], "battle_pos",
func=tf.nn.softmax)
self.battle_pos = tf.multinomial(tf.log(self.battle_pos_probs), num_samples=1)
self.battle_pos = tf.reshape(self.battle_pos, shape=[-1])
with tf.variable_scope('value_net'):
layer_1 = layer.dense_layer(self.obs, 256, "DenseLayer1", func=activation)
minimap_info = self.cnn_map(self.map_data)
layer_1 = tf.concat([layer_1, minimap_info], axis=1)
layer_2 = layer.dense_layer(layer_1, 128, "DenseLayer2", func=activation)
layer_3 = layer.dense_layer(layer_2, 128, "DenseLayer3", func=activation)
self.v_preds = layer.dense_layer(layer_3, 1, "DenseLayer4", func=None)
self.scope = tf.get_variable_scope().name
def get_action(self, obs, map, verbose=True):
battle_act_probs, battle_act, battle_pos_probs, battle_pos, v_preds \
= self.sess.run([self.battle_probs, self.battle_act, self.battle_pos_probs, self.battle_pos, self.v_preds],
feed_dict={self.obs: obs.reshape([1, -1]), self.map_data: map.reshape(
[1, self.map_channels, self.map_width, self.map_width])})
if verbose:
print("Battle:", 'act_probs:', battle_act_probs, 'act:', battle_act)
print("Battle:", 'pos_probs:', battle_pos_probs, 'pos:', battle_pos)
print("Value:", v_preds)
print("\n")
return battle_act[0], battle_pos[0], v_preds[0]
def get_values(self, obs, map):
v_preds = self.sess.run(self.v_preds, feed_dict={self.obs: obs.reshape([1, -1]),
self.map_data: map.reshape(
[1, self.map_channels, self.map_width, self.map_width])})
v_preds = np.asscalar(v_preds)
return v_preds
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def cnn_map(self, map_data, trainable=True):
map_data = tf.transpose(map_data, [0, 2, 3, 1])
with tf.variable_scope("cnn"):
if self.use_norm:
map_data = layer.batch_norm(map_data, self.sl_training, "BN", trainable=trainable)
c1 = layer.conv2d_layer(map_data, 3, 32, "Conv1", trainable=trainable)
c1 = layer.max_pool(c1)
if self.use_norm:
c1 = layer.batch_norm(c1, self.sl_training, "Norm1", trainable=trainable)
c2 = layer.conv2d_layer(c1, 3, 64, "Conv2", trainable=trainable)
c2 = layer.max_pool(c2)
if self.use_norm:
c2 = layer.batch_norm(c2, self.sl_training, "Norm2", trainable=trainable)
c3 = layer.conv2d_layer(c2, 3, 64, "Conv3", trainable=trainable)
c3 = layer.max_pool(c3)
if self.use_norm:
c3 = layer.batch_norm(c3, self.sl_training, "Norm3", trainable=trainable)
c4 = layer.conv2d_layer(c3, 3, 3, "Conv4", trainable=trainable)
if self.use_norm:
c4 = layer.batch_norm(c4, self.sl_training, "Norm4", trainable=trainable)
c4 = tf.reshape(c4, [-1, self.map_width * 3])
return c4
class PPOTrain:
def __init__(self, name, sess, Policy, Old_Policy, gamma=0.995, clip_value=0.2, c_1=0.01, c_2=1e-6, epoch_num=20):
"""
:param Policy:
:param Old_Policy:
:param gamma:
:param clip_value:
:param c_1: parameter for value difference
:param c_2: parameter for entropy bonus
:param epoch_num: num for update
"""
self.Policy = Policy
self.Old_Policy = Old_Policy
self.sess = sess
self.epoch_num = epoch_num
self.gamma = P.gamma
self.lamda = P.lamda
self.batch_size = P.batch_size
self.clip_value = P.clip_value
self.c_1 = P.c_1
self.c_2 = P.c_2
self.adam_lr = P.lr
self.restore_model = P.restore_model
self.adam_epsilon = 1e-5
self.update_count = 0
with tf.variable_scope(name):
pi_trainable = self.Policy.get_trainable_variables()
old_pi_trainable = self.Old_Policy.get_trainable_variables()
# assign_operations for policy parameter values to old policy parameters
with tf.variable_scope('assign_op'):
self.assign_ops = []
for v_old, v in zip(old_pi_trainable, pi_trainable):
self.assign_ops.append(tf.assign(v_old, v))
# inputs for train_op
with tf.variable_scope('train_inp'):
self.battle_actions = tf.placeholder(dtype=tf.int32, shape=[None], name='battle_actions')
self.battle_pos = tf.placeholder(dtype=tf.int32, shape=[None], name='battle_pos')
self.rewards = tf.placeholder(dtype=tf.float32, shape=[None], name='rewards')
self.v_preds_next = tf.placeholder(dtype=tf.float32, shape=[None], name='v_preds_next')
self.gaes = tf.placeholder(dtype=tf.float32, shape=[None], name='gaes')
self.returns = tf.placeholder(dtype=tf.float32, shape=[None], name='returns')
# define distribute variable
self.returns_sum = tf.get_variable(name="returns_sum", shape=[], initializer=tf.zeros_initializer)
self.loss_p_sum = tf.get_variable(name="loss_p_sum", shape=[], initializer=tf.zeros_initializer)
self.loss_v_sum = tf.get_variable(name="loss_v_sum", shape=[], initializer=tf.zeros_initializer)
self.loss_e_sum = tf.get_variable(name="loss_e_sum", shape=[], initializer=tf.zeros_initializer)
self.loss_all_sum = tf.get_variable(name="loss_all_sum", shape=[], initializer=tf.zeros_initializer)
self.proc_num = tf.get_variable(name="proc_num", shape=[], initializer=tf.zeros_initializer)
battle_act_probs = self.Policy.battle_probs
battle_act_probs_old = self.Old_Policy.battle_probs
battle_pos_probs = self.Policy.battle_pos_probs
battle_pos_probs_old = self.Old_Policy.battle_pos_probs
# probabilities of actions which agent took with policy
battle_act_probs = battle_act_probs * tf.one_hot(indices=self.battle_actions,
depth=battle_act_probs.shape[1])
battle_act_probs = tf.reduce_sum(battle_act_probs, axis=1)
battle_pos_probs = battle_pos_probs * tf.one_hot(indices=self.battle_pos, depth=battle_pos_probs.shape[1])
battle_pos_probs = tf.reduce_sum(battle_pos_probs, axis=1)
act_probs = battle_act_probs * battle_pos_probs
# probabilities of actions which agent took with old policy
battle_act_probs_old = battle_act_probs_old * tf.one_hot(indices=self.battle_actions,
depth=battle_act_probs_old.shape[1])
battle_act_probs_old = tf.reduce_sum(battle_act_probs_old, axis=1)
battle_pos_probs_old = battle_pos_probs_old * tf.one_hot(indices=self.battle_pos,
depth=battle_pos_probs_old.shape[1])
battle_pos_probs_old = tf.reduce_sum(battle_pos_probs_old, axis=1)
act_probs_old = battle_act_probs_old * battle_pos_probs_old
with tf.variable_scope('loss'):
# construct computation graph for loss_clip
# ratios = tf.divide(act_probs, act_probs_old)
ratios = tf.exp(tf.log(tf.clip_by_value(act_probs, 1e-10, 1.0)) - tf.log(
tf.clip_by_value(act_probs_old, 1e-10, 1.0)))
clipped_ratios = tf.clip_by_value(ratios, clip_value_min=1 - self.clip_value,
clip_value_max=1 + self.clip_value)
loss_clip = tf.minimum(tf.multiply(self.gaes, ratios), tf.multiply(self.gaes, clipped_ratios))
self.loss_clip = -tf.reduce_mean(loss_clip)
# construct computation graph for loss of entropy bonus
battle_act_entropy = -tf.reduce_sum(
self.Policy.battle_probs * tf.log(tf.clip_by_value(self.Policy.battle_probs, 1e-10, 1.0)), axis=1)
battle_pos_entropy = -tf.reduce_sum(
self.Policy.battle_pos_probs * tf.log(tf.clip_by_value(self.Policy.battle_pos_probs, 1e-10, 1.0)),
axis=1)
entropy = battle_act_entropy + battle_pos_entropy
self.entropy = tf.reduce_mean(entropy, axis=0) # mean of entropy of pi(obs)
# construct computation graph for loss of value function
v_preds = self.Policy.v_preds
loss_vf = tf.squared_difference(self.rewards + self.gamma * self.v_preds_next, v_preds)
self.loss_vf = tf.reduce_mean(loss_vf)
# construct computation graph for loss
self.total_loss = self.loss_clip + self.c_1 * self.loss_vf - self.c_2 * self.entropy
self.sum_mean_returns = tf.summary.scalar('mean_return_dis',
self.returns_sum / (self.proc_num + 0.0001))
self.sum_p_loss = tf.summary.scalar('policy_loss_dis', self.loss_p_sum / (self.proc_num + 0.0001))
self.sum_v_loss = tf.summary.scalar('value_loss_dis', self.loss_v_sum / (self.proc_num + 0.0001))
self.sum_e_loss = tf.summary.scalar('entropy_loss_dis', self.loss_e_sum / (self.proc_num + 0.0001))
self.sum_total_loss = tf.summary.scalar('total_loss_dis', self.loss_all_sum / (self.proc_num + 0.0001))
self.merged_dis = tf.summary.merge(
[self.sum_mean_returns, self.sum_p_loss, self.sum_v_loss, self.sum_e_loss,
self.sum_total_loss])
optimizer = tf.train.AdamOptimizer(learning_rate=self.adam_lr, epsilon=self.adam_epsilon)
self.gradients = optimizer.compute_gradients(self.total_loss, var_list=pi_trainable)
self.train_op = optimizer.minimize(self.total_loss, var_list=pi_trainable)
self.train_value_op = optimizer.minimize(self.loss_vf, var_list=pi_trainable)
def get_loss(self, obs, map_data, battle_actions, battle_pos, gaes, rewards, v_preds_next):
loss_p, loss_v, loss_e, loss_all = self.sess.run([self.loss_clip, self.loss_vf, self.entropy, self.total_loss],
feed_dict={self.Policy.obs: obs,
self.Policy.map_data: map_data,
self.Old_Policy.obs: obs,
self.Old_Policy.map_data: map_data,
self.battle_actions: battle_actions,
self.battle_pos: battle_pos,
self.rewards: rewards,
self.v_preds_next: v_preds_next,
self.gaes: gaes}
)
return loss_p, loss_v, loss_e, loss_all
def train(self, obs, map_data, battle_actions, battle_pos, gaes, rewards, v_preds_next):
_, total_loss = self.sess.run([self.train_op, self.total_loss], feed_dict={self.Policy.obs: obs,
self.Policy.map_data: map_data,
self.Old_Policy.obs: obs,
self.Old_Policy.map_data: map_data,
self.battle_actions: battle_actions,
self.battle_pos: battle_pos,
self.rewards: rewards,
self.v_preds_next: v_preds_next,
self.gaes: gaes})
return total_loss
def train_value(self, obs, map_data, gaes, rewards, v_preds_next):
_, value_loss = self.sess.run([self.train_value_op, self.loss_vf], feed_dict={self.Policy.obs: obs,
self.Policy.map_data: map_data,
self.Old_Policy.obs: obs,
self.Old_Policy.map_data: map_data,
self.rewards: rewards,
self.v_preds_next: v_preds_next,
self.gaes: gaes})
return value_loss
def get_summary_dis(self):
return self.sess.run(self.merged_dis)
def assign_policy_parameters(self):
# assign policy parameter values to old policy parameters
return self.sess.run(self.assign_ops)
def reset_mean_returns(self):
self.sess.run(self.returns_sum.assign(0))
self.sess.run(self.loss_p_sum.assign(0))
self.sess.run(self.loss_v_sum.assign(0))
self.sess.run(self.loss_e_sum.assign(0))
self.sess.run(self.loss_all_sum.assign(0))
self.sess.run(self.proc_num.assign(0))
def get_gaes(self, rewards, v_preds, v_preds_next):
deltas = [r_t + self.gamma * v_next - v for r_t, v_next, v in zip(rewards, v_preds_next, v_preds)]
# calculate generative advantage estimator(lambda = 1), see ppo paper eq(11)
gaes = copy.deepcopy(deltas)
for t in reversed(range(len(gaes) - 1)): # is T-1, where T is time step which run policy
gaes[t] = gaes[t] + self.gamma * self.lamda * gaes[t + 1]
return gaes
def ppo_train_dis(self, observations, map_data, battle_actions, battle_pos, rewards, v_preds, v_preds_next,
gaes, returns, verbose=False):
if verbose:
print('PPO train now..........')
# convert list to numpy array for feeding tf.placeholder
# ob_space = C._SIZE_HIGH_NET_INPUT + C._SIZE_HIGH_NET_OUT + C._SIZE_POP_NET_INPUT
# observations = np.reshape(observations, newshape=[-1] + list(ob_space))
observations = np.array(observations).astype(dtype=np.float32)
map_data = np.array(map_data).astype(dtype=np.float32)
battle_actions = np.array(battle_actions).astype(dtype=np.int32)
battle_pos = np.array(battle_pos).astype(dtype=np.int32)
gaes = np.array(gaes).astype(dtype=np.float32).reshape(-1)
gaes = (gaes - gaes.mean()) / gaes.std()
rewards = np.array(rewards).astype(dtype=np.float32).reshape(-1)
v_preds_next = np.array(v_preds_next).astype(dtype=np.float32).reshape(-1)
inp = [observations, map_data, battle_actions, battle_pos, gaes, rewards, v_preds_next]
if observations.shape[0] <= 0:
return
# self.assign_policy_parameters()
# train
# batch_size = max(observations.shape[0] // 10, self.batch_size)
batch_size = self.batch_size
# print('batch_size is:', batch_size)
for epoch in range(self.epoch_num + 10):
# sample indices from [low, high)
sample_indices = np.random.randint(low=0, high=observations.shape[0], size=batch_size)
sampled_inp = [np.take(a=a, indices=sample_indices, axis=0) for a in inp] # sample training data
if self.restore_model and self.update_count < -1:
value_loss = self.train_value(obs=sampled_inp[0],
map_data=sampled_inp[1],
gaes=sampled_inp[4],
rewards=sampled_inp[5],
v_preds_next=sampled_inp[6])
else:
total_loss = self.train(obs=sampled_inp[0],
map_data=sampled_inp[1],
battle_actions=sampled_inp[2],
battle_pos=sampled_inp[3],
gaes=sampled_inp[4],
rewards=sampled_inp[5],
v_preds_next=sampled_inp[6])
if verbose:
print('total_loss:', total_loss)
self.update_count += 1
sample_indices = np.random.randint(low=0, high=observations.shape[0], size=64)
sampled_inp = [np.take(a=a, indices=sample_indices, axis=0) for a in inp] # sample training data
loss_p, loss_v, loss_e, loss_all = self.get_loss(obs=sampled_inp[0],
map_data=sampled_inp[1],
battle_actions=sampled_inp[2],
battle_pos=sampled_inp[3],
gaes=sampled_inp[4],
rewards=sampled_inp[5],
v_preds_next=sampled_inp[6])
self.sess.run(self.loss_p_sum.assign_add(loss_p))
self.sess.run(self.loss_v_sum.assign_add(loss_v))
self.sess.run(self.loss_e_sum.assign_add(loss_e))
self.sess.run(self.loss_all_sum.assign_add(loss_all))
print('returns:', returns)
print('np.mean(returns):', np.mean(returns))
self.sess.run(self.returns_sum.assign_add(np.mean(returns)))
self.sess.run(self.proc_num.assign_add(1))
if verbose:
print('PPO train end..........')
return
| [
"lib.layer.max_pool",
"tensorflow.reduce_sum",
"tensorflow.clip_by_value",
"tensorflow.get_collection",
"tensorflow.reshape",
"tensorflow.get_variable_scope",
"tensorflow.multiply",
"tensorflow.assign",
"numpy.random.randint",
"numpy.mean",
"lib.layer.batch_norm",
"tensorflow.summary.merge",
... | [((4108, 4128), 'numpy.asscalar', 'np.asscalar', (['v_preds'], {}), '(v_preds)\n', (4119, 4128), True, 'import numpy as np\n'), ((4197, 4257), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES', 'self.scope'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)\n', (4214, 4257), True, 'import tensorflow as tf\n'), ((4313, 4376), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', 'self.scope'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)\n', (4330, 4376), True, 'import tensorflow as tf\n'), ((4447, 4483), 'tensorflow.transpose', 'tf.transpose', (['map_data', '[0, 2, 3, 1]'], {}), '(map_data, [0, 2, 3, 1])\n', (4459, 4483), True, 'import tensorflow as tf\n'), ((16846, 16867), 'copy.deepcopy', 'copy.deepcopy', (['deltas'], {}), '(deltas)\n', (16859, 16867), False, 'import copy\n'), ((19772, 19833), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'observations.shape[0]', 'size': '(64)'}), '(low=0, high=observations.shape[0], size=64)\n', (19789, 19833), True, 'import numpy as np\n'), ((460, 483), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (477, 483), True, 'import tensorflow as tf\n'), ((508, 576), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, ob_space]', 'name': '"""obs"""'}), "(dtype=tf.float32, shape=[None, ob_space], name='obs')\n", (522, 576), True, 'import tensorflow as tf\n'), ((605, 724), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, self.map_channels, self.map_width, self.map_width]', 'name': '"""map_data"""'}), "(dtype=tf.float32, shape=[None, self.map_channels, self.\n map_width, self.map_width], name='map_data')\n", (619, 724), True, 'import tensorflow as tf\n'), ((4497, 4521), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cnn"""'], {}), "('cnn')\n", (4514, 4521), True, 'import tensorflow as tf\n'), ((4669, 4734), 'lib.layer.conv2d_layer', 'layer.conv2d_layer', (['map_data', '(3)', '(32)', '"""Conv1"""'], {'trainable': 'trainable'}), "(map_data, 3, 32, 'Conv1', trainable=trainable)\n", (4687, 4734), True, 'import lib.layer as layer\n'), ((4752, 4770), 'lib.layer.max_pool', 'layer.max_pool', (['c1'], {}), '(c1)\n', (4766, 4770), True, 'import lib.layer as layer\n'), ((4909, 4968), 'lib.layer.conv2d_layer', 'layer.conv2d_layer', (['c1', '(3)', '(64)', '"""Conv2"""'], {'trainable': 'trainable'}), "(c1, 3, 64, 'Conv2', trainable=trainable)\n", (4927, 4968), True, 'import lib.layer as layer\n'), ((4986, 5004), 'lib.layer.max_pool', 'layer.max_pool', (['c2'], {}), '(c2)\n', (5000, 5004), True, 'import lib.layer as layer\n'), ((5144, 5203), 'lib.layer.conv2d_layer', 'layer.conv2d_layer', (['c2', '(3)', '(64)', '"""Conv3"""'], {'trainable': 'trainable'}), "(c2, 3, 64, 'Conv3', trainable=trainable)\n", (5162, 5203), True, 'import lib.layer as layer\n'), ((5221, 5239), 'lib.layer.max_pool', 'layer.max_pool', (['c3'], {}), '(c3)\n', (5235, 5239), True, 'import lib.layer as layer\n'), ((5379, 5437), 'lib.layer.conv2d_layer', 'layer.conv2d_layer', (['c3', '(3)', '(3)', '"""Conv4"""'], {'trainable': 'trainable'}), "(c3, 3, 3, 'Conv4', trainable=trainable)\n", (5397, 5437), True, 'import lib.layer as layer\n'), ((5576, 5616), 'tensorflow.reshape', 'tf.reshape', (['c4', '[-1, self.map_width * 3]'], {}), '(c4, [-1, self.map_width * 3])\n', (5586, 5616), True, 'import tensorflow as tf\n'), ((6501, 6524), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (6518, 6524), True, 'import tensorflow as tf\n'), ((8898, 8937), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['battle_act_probs'], {'axis': '(1)'}), '(battle_act_probs, axis=1)\n', (8911, 8937), True, 'import tensorflow as tf\n'), ((9088, 9127), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['battle_pos_probs'], {'axis': '(1)'}), '(battle_pos_probs, axis=1)\n', (9101, 9127), True, 'import tensorflow as tf\n'), ((9501, 9544), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['battle_act_probs_old'], {'axis': '(1)'}), '(battle_act_probs_old, axis=1)\n', (9514, 9544), True, 'import tensorflow as tf\n'), ((9780, 9823), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['battle_pos_probs_old'], {'axis': '(1)'}), '(battle_pos_probs_old, axis=1)\n', (9793, 9823), True, 'import tensorflow as tf\n'), ((12289, 12406), 'tensorflow.summary.merge', 'tf.summary.merge', (['[self.sum_mean_returns, self.sum_p_loss, self.sum_v_loss, self.sum_e_loss,\n self.sum_total_loss]'], {}), '([self.sum_mean_returns, self.sum_p_loss, self.sum_v_loss,\n self.sum_e_loss, self.sum_total_loss])\n', (12305, 12406), True, 'import tensorflow as tf\n'), ((12462, 12539), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.adam_lr', 'epsilon': 'self.adam_epsilon'}), '(learning_rate=self.adam_lr, epsilon=self.adam_epsilon)\n', (12484, 12539), True, 'import tensorflow as tf\n'), ((18566, 18635), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'observations.shape[0]', 'size': 'batch_size'}), '(low=0, high=observations.shape[0], size=batch_size)\n', (18583, 18635), True, 'import numpy as np\n'), ((19857, 19901), 'numpy.take', 'np.take', ([], {'a': 'a', 'indices': 'sample_indices', 'axis': '(0)'}), '(a=a, indices=sample_indices, axis=0)\n', (19864, 19901), True, 'import numpy as np\n'), ((20823, 20839), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (20830, 20839), True, 'import numpy as np\n'), ((824, 855), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""policy_net"""'], {}), "('policy_net')\n", (841, 855), True, 'import tensorflow as tf\n'), ((2447, 2477), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""value_net"""'], {}), "('value_net')\n", (2464, 2477), True, 'import tensorflow as tf\n'), ((2505, 2569), 'lib.layer.dense_layer', 'layer.dense_layer', (['self.obs', '(256)', '"""DenseLayer1"""'], {'func': 'activation'}), "(self.obs, 256, 'DenseLayer1', func=activation)\n", (2522, 2569), True, 'import lib.layer as layer\n'), ((2656, 2698), 'tensorflow.concat', 'tf.concat', (['[layer_1, minimap_info]'], {'axis': '(1)'}), '([layer_1, minimap_info], axis=1)\n', (2665, 2698), True, 'import tensorflow as tf\n'), ((2726, 2789), 'lib.layer.dense_layer', 'layer.dense_layer', (['layer_1', '(128)', '"""DenseLayer2"""'], {'func': 'activation'}), "(layer_1, 128, 'DenseLayer2', func=activation)\n", (2743, 2789), True, 'import lib.layer as layer\n'), ((2816, 2879), 'lib.layer.dense_layer', 'layer.dense_layer', (['layer_2', '(128)', '"""DenseLayer3"""'], {'func': 'activation'}), "(layer_2, 128, 'DenseLayer3', func=activation)\n", (2833, 2879), True, 'import lib.layer as layer\n'), ((2911, 2966), 'lib.layer.dense_layer', 'layer.dense_layer', (['layer_3', '(1)', '"""DenseLayer4"""'], {'func': 'None'}), "(layer_3, 1, 'DenseLayer4', func=None)\n", (2928, 2966), True, 'import lib.layer as layer\n'), ((2993, 3016), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (3014, 3016), True, 'import tensorflow as tf\n'), ((4580, 4651), 'lib.layer.batch_norm', 'layer.batch_norm', (['map_data', 'self.sl_training', '"""BN"""'], {'trainable': 'trainable'}), "(map_data, self.sl_training, 'BN', trainable=trainable)\n", (4596, 4651), True, 'import lib.layer as layer\n'), ((4822, 4890), 'lib.layer.batch_norm', 'layer.batch_norm', (['c1', 'self.sl_training', '"""Norm1"""'], {'trainable': 'trainable'}), "(c1, self.sl_training, 'Norm1', trainable=trainable)\n", (4838, 4890), True, 'import lib.layer as layer\n'), ((5057, 5125), 'lib.layer.batch_norm', 'layer.batch_norm', (['c2', 'self.sl_training', '"""Norm2"""'], {'trainable': 'trainable'}), "(c2, self.sl_training, 'Norm2', trainable=trainable)\n", (5073, 5125), True, 'import lib.layer as layer\n'), ((5292, 5360), 'lib.layer.batch_norm', 'layer.batch_norm', (['c3', 'self.sl_training', '"""Norm3"""'], {'trainable': 'trainable'}), "(c3, self.sl_training, 'Norm3', trainable=trainable)\n", (5308, 5360), True, 'import lib.layer as layer\n'), ((5489, 5557), 'lib.layer.batch_norm', 'layer.batch_norm', (['c4', 'self.sl_training', '"""Norm4"""'], {'trainable': 'trainable'}), "(c4, self.sl_training, 'Norm4', trainable=trainable)\n", (5505, 5557), True, 'import lib.layer as layer\n'), ((6767, 6797), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""assign_op"""'], {}), "('assign_op')\n", (6784, 6797), True, 'import tensorflow as tf\n'), ((7021, 7051), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""train_inp"""'], {}), "('train_inp')\n", (7038, 7051), True, 'import tensorflow as tf\n'), ((7091, 7158), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None]', 'name': '"""battle_actions"""'}), "(dtype=tf.int32, shape=[None], name='battle_actions')\n", (7105, 7158), True, 'import tensorflow as tf\n'), ((7193, 7256), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None]', 'name': '"""battle_pos"""'}), "(dtype=tf.int32, shape=[None], name='battle_pos')\n", (7207, 7256), True, 'import tensorflow as tf\n'), ((7289, 7351), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None]', 'name': '"""rewards"""'}), "(dtype=tf.float32, shape=[None], name='rewards')\n", (7303, 7351), True, 'import tensorflow as tf\n'), ((7388, 7455), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None]', 'name': '"""v_preds_next"""'}), "(dtype=tf.float32, shape=[None], name='v_preds_next')\n", (7402, 7455), True, 'import tensorflow as tf\n'), ((7484, 7543), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None]', 'name': '"""gaes"""'}), "(dtype=tf.float32, shape=[None], name='gaes')\n", (7498, 7543), True, 'import tensorflow as tf\n'), ((7575, 7637), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None]', 'name': '"""returns"""'}), "(dtype=tf.float32, shape=[None], name='returns')\n", (7589, 7637), True, 'import tensorflow as tf\n'), ((7719, 7798), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""returns_sum"""', 'shape': '[]', 'initializer': 'tf.zeros_initializer'}), "(name='returns_sum', shape=[], initializer=tf.zeros_initializer)\n", (7734, 7798), True, 'import tensorflow as tf\n'), ((7833, 7911), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""loss_p_sum"""', 'shape': '[]', 'initializer': 'tf.zeros_initializer'}), "(name='loss_p_sum', shape=[], initializer=tf.zeros_initializer)\n", (7848, 7911), True, 'import tensorflow as tf\n'), ((7946, 8024), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""loss_v_sum"""', 'shape': '[]', 'initializer': 'tf.zeros_initializer'}), "(name='loss_v_sum', shape=[], initializer=tf.zeros_initializer)\n", (7961, 8024), True, 'import tensorflow as tf\n'), ((8059, 8137), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""loss_e_sum"""', 'shape': '[]', 'initializer': 'tf.zeros_initializer'}), "(name='loss_e_sum', shape=[], initializer=tf.zeros_initializer)\n", (8074, 8137), True, 'import tensorflow as tf\n'), ((8174, 8259), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""loss_all_sum"""', 'shape': '[]', 'initializer': 'tf.zeros_initializer'}), "(name='loss_all_sum', shape=[], initializer=tf.zeros_initializer\n )\n", (8189, 8259), True, 'import tensorflow as tf\n'), ((8288, 8364), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""proc_num"""', 'shape': '[]', 'initializer': 'tf.zeros_initializer'}), "(name='proc_num', shape=[], initializer=tf.zeros_initializer)\n", (8303, 8364), True, 'import tensorflow as tf\n'), ((8733, 8805), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'self.battle_actions', 'depth': 'battle_act_probs.shape[1]'}), '(indices=self.battle_actions, depth=battle_act_probs.shape[1])\n', (8743, 8805), True, 'import tensorflow as tf\n'), ((8988, 9056), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'self.battle_pos', 'depth': 'battle_pos_probs.shape[1]'}), '(indices=self.battle_pos, depth=battle_pos_probs.shape[1])\n', (8998, 9056), True, 'import tensorflow as tf\n'), ((9320, 9396), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'self.battle_actions', 'depth': 'battle_act_probs_old.shape[1]'}), '(indices=self.battle_actions, depth=battle_act_probs_old.shape[1])\n', (9330, 9396), True, 'import tensorflow as tf\n'), ((9603, 9675), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'self.battle_pos', 'depth': 'battle_pos_probs_old.shape[1]'}), '(indices=self.battle_pos, depth=battle_pos_probs_old.shape[1])\n', (9613, 9675), True, 'import tensorflow as tf\n'), ((9915, 9940), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (9932, 9940), True, 'import tensorflow as tf\n'), ((10254, 10355), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ratios'], {'clip_value_min': '(1 - self.clip_value)', 'clip_value_max': '(1 + self.clip_value)'}), '(ratios, clip_value_min=1 - self.clip_value, clip_value_max\n =1 + self.clip_value)\n', (10270, 10355), True, 'import tensorflow as tf\n'), ((11114, 11145), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['entropy'], {'axis': '(0)'}), '(entropy, axis=0)\n', (11128, 11145), True, 'import tensorflow as tf\n'), ((11322, 11399), 'tensorflow.squared_difference', 'tf.squared_difference', (['(self.rewards + self.gamma * self.v_preds_next)', 'v_preds'], {}), '(self.rewards + self.gamma * self.v_preds_next, v_preds)\n', (11343, 11399), True, 'import tensorflow as tf\n'), ((11431, 11454), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_vf'], {}), '(loss_vf)\n', (11445, 11454), True, 'import tensorflow as tf\n'), ((11653, 11739), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean_return_dis"""', '(self.returns_sum / (self.proc_num + 0.0001))'], {}), "('mean_return_dis', self.returns_sum / (self.proc_num + \n 0.0001))\n", (11670, 11739), True, 'import tensorflow as tf\n'), ((11827, 11912), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""policy_loss_dis"""', '(self.loss_p_sum / (self.proc_num + 0.0001))'], {}), "('policy_loss_dis', self.loss_p_sum / (self.proc_num + 0.0001)\n )\n", (11844, 11912), True, 'import tensorflow as tf\n'), ((11942, 12021), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""value_loss_dis"""', '(self.loss_v_sum / (self.proc_num + 0.0001))'], {}), "('value_loss_dis', self.loss_v_sum / (self.proc_num + 0.0001))\n", (11959, 12021), True, 'import tensorflow as tf\n'), ((12056, 12142), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""entropy_loss_dis"""', '(self.loss_e_sum / (self.proc_num + 0.0001))'], {}), "('entropy_loss_dis', self.loss_e_sum / (self.proc_num + \n 0.0001))\n", (12073, 12142), True, 'import tensorflow as tf\n'), ((12176, 12262), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_loss_dis"""', '(self.loss_all_sum / (self.proc_num + 0.0001))'], {}), "('total_loss_dis', self.loss_all_sum / (self.proc_num + \n 0.0001))\n", (12193, 12262), True, 'import tensorflow as tf\n'), ((17549, 17571), 'numpy.array', 'np.array', (['observations'], {}), '(observations)\n', (17557, 17571), True, 'import numpy as np\n'), ((17616, 17634), 'numpy.array', 'np.array', (['map_data'], {}), '(map_data)\n', (17624, 17634), True, 'import numpy as np\n'), ((17686, 17710), 'numpy.array', 'np.array', (['battle_actions'], {}), '(battle_actions)\n', (17694, 17710), True, 'import numpy as np\n'), ((17755, 17775), 'numpy.array', 'np.array', (['battle_pos'], {}), '(battle_pos)\n', (17763, 17775), True, 'import numpy as np\n'), ((18663, 18707), 'numpy.take', 'np.take', ([], {'a': 'a', 'indices': 'sample_indices', 'axis': '(0)'}), '(a=a, indices=sample_indices, axis=0)\n', (18670, 18707), True, 'import numpy as np\n'), ((20891, 20907), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (20898, 20907), True, 'import numpy as np\n'), ((878, 909), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""controller"""'], {}), "('controller')\n", (895, 909), True, 'import tensorflow as tf\n'), ((941, 1005), 'lib.layer.dense_layer', 'layer.dense_layer', (['self.obs', '(256)', '"""DenseLayer1"""'], {'func': 'activation'}), "(self.obs, 256, 'DenseLayer1', func=activation)\n", (958, 1005), True, 'import lib.layer as layer\n'), ((1036, 1099), 'lib.layer.dense_layer', 'layer.dense_layer', (['layer_1', '(256)', '"""DenseLayer2"""'], {'func': 'activation'}), "(layer_1, 256, 'DenseLayer2', func=activation)\n", (1053, 1099), True, 'import lib.layer as layer\n'), ((1143, 1192), 'lib.layer.dense_layer', 'layer.dense_layer', (['layer_2', '(64)', '"""Info"""'], {'func': 'None'}), "(layer_2, 64, 'Info', func=None)\n", (1160, 1192), True, 'import lib.layer as layer\n'), ((1215, 1242), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""battle"""'], {}), "('battle')\n", (1232, 1242), True, 'import tensorflow as tf\n'), ((1351, 1411), 'tensorflow.concat', 'tf.concat', (['[self.controller_info, self.minimap_info]'], {'axis': '(1)'}), '([self.controller_info, self.minimap_info], axis=1)\n', (1360, 1411), True, 'import tensorflow as tf\n'), ((1443, 1515), 'lib.layer.dense_layer', 'layer.dense_layer', (['self.battle_info', '(256)', '"""DenseLayer1"""'], {'func': 'activation'}), "(self.battle_info, 256, 'DenseLayer1', func=activation)\n", (1460, 1515), True, 'import lib.layer as layer\n'), ((1556, 1644), 'lib.layer.dense_layer', 'layer.dense_layer', (['layer_5', 'act_space_array[0]', '"""battle_output"""'], {'func': 'tf.nn.softmax'}), "(layer_5, act_space_array[0], 'battle_output', func=tf.nn.\n softmax)\n", (1573, 1644), True, 'import lib.layer as layer\n'), ((1831, 1870), 'tensorflow.reshape', 'tf.reshape', (['self.battle_act'], {'shape': '[-1]'}), '(self.battle_act, shape=[-1])\n', (1841, 1870), True, 'import tensorflow as tf\n'), ((1902, 1972), 'lib.layer.dense_layer', 'layer.dense_layer', (['self.battle_info', '(512)', '"""PosLayer1"""'], {'func': 'activation'}), "(self.battle_info, 512, 'PosLayer1', func=activation)\n", (1919, 1972), True, 'import lib.layer as layer\n'), ((2003, 2064), 'lib.layer.dense_layer', 'layer.dense_layer', (['layer_6', '(256)', '"""PosLayer2"""'], {'func': 'activation'}), "(layer_6, 256, 'PosLayer2', func=activation)\n", (2020, 2064), True, 'import lib.layer as layer\n'), ((2109, 2194), 'lib.layer.dense_layer', 'layer.dense_layer', (['layer_7', 'act_space_array[1]', '"""battle_pos"""'], {'func': 'tf.nn.softmax'}), "(layer_7, act_space_array[1], 'battle_pos', func=tf.nn.softmax\n )\n", (2126, 2194), True, 'import lib.layer as layer\n'), ((2389, 2428), 'tensorflow.reshape', 'tf.reshape', (['self.battle_pos'], {'shape': '[-1]'}), '(self.battle_pos, shape=[-1])\n', (2399, 2428), True, 'import tensorflow as tf\n'), ((10440, 10470), 'tensorflow.multiply', 'tf.multiply', (['self.gaes', 'ratios'], {}), '(self.gaes, ratios)\n', (10451, 10470), True, 'import tensorflow as tf\n'), ((10472, 10510), 'tensorflow.multiply', 'tf.multiply', (['self.gaes', 'clipped_ratios'], {}), '(self.gaes, clipped_ratios)\n', (10483, 10510), True, 'import tensorflow as tf\n'), ((10546, 10571), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_clip'], {}), '(loss_clip)\n', (10560, 10571), True, 'import tensorflow as tf\n'), ((1751, 1776), 'tensorflow.log', 'tf.log', (['self.battle_probs'], {}), '(self.battle_probs)\n', (1757, 1776), True, 'import tensorflow as tf\n'), ((2305, 2334), 'tensorflow.log', 'tf.log', (['self.battle_pos_probs'], {}), '(self.battle_pos_probs)\n', (2311, 2334), True, 'import tensorflow as tf\n'), ((6948, 6967), 'tensorflow.assign', 'tf.assign', (['v_old', 'v'], {}), '(v_old, v)\n', (6957, 6967), True, 'import tensorflow as tf\n'), ((17815, 17829), 'numpy.array', 'np.array', (['gaes'], {}), '(gaes)\n', (17823, 17829), True, 'import numpy as np\n'), ((17934, 17951), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (17942, 17951), True, 'import numpy as np\n'), ((18012, 18034), 'numpy.array', 'np.array', (['v_preds_next'], {}), '(v_preds_next)\n', (18020, 18034), True, 'import numpy as np\n'), ((10104, 10143), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['act_probs', '(1e-10)', '(1.0)'], {}), '(act_probs, 1e-10, 1.0)\n', (10120, 10143), True, 'import tensorflow as tf\n'), ((10175, 10218), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['act_probs_old', '(1e-10)', '(1.0)'], {}), '(act_probs_old, 1e-10, 1.0)\n', (10191, 10218), True, 'import tensorflow as tf\n'), ((10752, 10806), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.Policy.battle_probs', '(1e-10)', '(1.0)'], {}), '(self.Policy.battle_probs, 1e-10, 1.0)\n', (10768, 10806), True, 'import tensorflow as tf\n'), ((10928, 10986), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.Policy.battle_pos_probs', '(1e-10)', '(1.0)'], {}), '(self.Policy.battle_pos_probs, 1e-10, 1.0)\n', (10944, 10986), True, 'import tensorflow as tf\n')] |
import numpy as np
from scipy.sparse import coo_matrix
import math
import tensorflow as tf
from model import Model
class MessageGraph():
sender_indices = None
receiver_indices = None
message_types = None
def __init__(self, edges, vertex_count, label_count):
self.vertex_count = vertex_count
self.label_count = label_count
self.edges = edges
self.process(self.edges)
def process(self, triplets):
triplets = tf.transpose(triplets)
self.sender_indices = triplets[0]
self.receiver_indices = triplets[2]
self.message_types = triplets[1]
self.edge_count = tf.shape(self.sender_indices)[0]
def get_sender_indices(self):
return self.sender_indices
def get_type_indices(self):
return self.message_types
def get_receiver_indices(self):
return self.receiver_indices
'''
def compute_normalized_values(self, receiver_indices, message_types):
if self.normalization[0] == "global":
mrs = receiver_indices
else:
mrs = [tuple(x) for x in np.vstack((receiver_indices, message_types)).transpose()]
counts = {}
for mr in mrs:
if mr in counts:
counts[mr] += 1.0
else:
counts[mr] = 1.0
return np.array([1.0 / counts[mr] for mr in mrs]).astype(np.float32)
def compute_sparse_mtr(self):
if self.normalization[0] != "none":
mtr_values = self.compute_normalized_mtr(self.receiver_indices, self.message_types)
else:
mtr_values = np.ones_like(self.message_types).astype(np.int32)
message_indices = np.arange(self.edge_count).astype(np.int32)
mtr_indices = np.vstack((self.receiver_indices, message_indices)).transpose()
mtr_shape = [self.vertex_count, self.edge_count]
return mtr_indices, mtr_values, mtr_shape
'''
def forward_incidence_matrix(self, normalization):
if normalization[0] == "none":
mtr_values = tf.to_float(tf.ones_like(self.receiver_indices))
message_indices = tf.range(self.edge_count)
mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.receiver_indices, message_indices])))
mtr_shape = tf.to_int64(tf.stack([self.vertex_count, self.edge_count]))
tensor = tf.SparseTensor(indices=mtr_indices,
values=mtr_values,
dense_shape=mtr_shape)
return tensor
elif normalization[0] == "global":
mtr_values = tf.to_float(tf.ones_like(self.receiver_indices))
message_indices = tf.range(self.edge_count)
mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.receiver_indices, message_indices])))
mtr_shape = tf.to_int64(tf.stack([self.vertex_count, self.edge_count]))
tensor = tf.sparse_softmax(tf.SparseTensor(indices=mtr_indices,
values=mtr_values,
dense_shape=mtr_shape))
return tensor
elif normalization[0] == "local":
mtr_values = tf.to_float(tf.ones_like(self.receiver_indices))
message_indices = tf.range(self.edge_count)
mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.message_types, self.receiver_indices, message_indices])))
mtr_shape = tf.to_int64(tf.stack([self.label_count*2, self.vertex_count, self.edge_count]))
tensor = tf.sparse_softmax(tf.SparseTensor(indices=mtr_indices,
values=mtr_values,
dense_shape=mtr_shape))
tensor = tf.sparse_reduce_sum_sparse(tensor, 0)
return tensor
def backward_incidence_matrix(self, normalization):
if normalization[0] == "none":
mtr_values = tf.to_float(tf.ones_like(self.sender_indices))
message_indices = tf.range(self.edge_count)
mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.sender_indices, message_indices])))
mtr_shape = tf.to_int64(tf.stack([self.vertex_count, self.edge_count]))
tensor = tf.SparseTensor(indices=mtr_indices,
values=mtr_values,
dense_shape=mtr_shape)
return tensor
elif normalization[0] == "global":
mtr_values = tf.to_float(tf.ones_like(self.sender_indices))
message_indices = tf.range(self.edge_count)
mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.sender_indices, message_indices])))
mtr_shape = tf.to_int64(tf.stack([self.vertex_count, self.edge_count]))
tensor = tf.sparse_softmax(tf.SparseTensor(indices=mtr_indices,
values=mtr_values,
dense_shape=mtr_shape))
return tensor
elif normalization[0] == "local":
mtr_values = tf.to_float(tf.ones_like(self.sender_indices))
message_indices = tf.range(self.edge_count)
mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.message_types, self.sender_indices, message_indices])))
mtr_shape = tf.to_int64(tf.stack([self.label_count*2, self.vertex_count, self.edge_count]))
tensor = tf.sparse_softmax(tf.SparseTensor(indices=mtr_indices,
values=mtr_values,
dense_shape=mtr_shape))
tensor = tf.sparse_reduce_sum_sparse(tensor, 0)
return tensor
class Representation(Model):
normalization="global"
graph = None
X = None
def __init__(self, triples, settings, bipartite=False):
self.triples = np.array(triples)
self.entity_count = settings['EntityCount']
self.relation_count = settings['RelationCount']
self.edge_count = self.triples.shape[0]*2
#self.process(self.triples)
#self.graph = None#MessageGraph(triples, self.entity_count, self.relation_count)
def get_graph(self):
if self.graph is None:
self.graph = MessageGraph(self.X, self.entity_count, self.relation_count)
return self.graph
def local_initialize_train(self):
self.X = tf.placeholder(tf.int32, shape=[None, 3], name='graph_edges')
def local_get_train_input_variables(self):
return [self.X]
def local_get_test_input_variables(self):
return [self.X]
'''
def compute_normalized_values(self, receiver_indices, message_types):
if self.normalization == "global":
mrs = receiver_indices
else:
mrs = [tuple(x) for x in np.vstack((receiver_indices, message_types)).transpose()]
counts = {}
for mr in mrs:
if mr in counts:
counts[mr] += 1.0
else:
counts[mr] = 1.0
return np.array([1.0 / counts[mr] for mr in mrs]).astype(np.float32)
def compute_sparse_mtr(self):
if self.normalization != "none":
mtr_values = self.compute_normalized_values(self.receiver_indices, self.message_types)
else:
mtr_values = np.ones_like(self.message_types).astype(np.int32)
message_indices = np.arange(self.edge_count).astype(np.int32)
mtr_indices = np.vstack((self.receiver_indices, message_indices)).transpose()
mtr_shape = [self.entity_count, self.edge_count]
return mtr_indices, mtr_values, mtr_shape
def process(self, triplets):
triplets = triplets.transpose()
self.sender_indices = np.hstack((triplets[0], triplets[2])).astype(np.int32)
self.receiver_indices = np.hstack((triplets[2], triplets[0])).astype(np.int32)
self.message_types = np.hstack(
(triplets[1], triplets[1] + self.relation_count)).astype(np.int32)
'''
| [
"tensorflow.range",
"tensorflow.transpose",
"tensorflow.ones_like",
"tensorflow.placeholder",
"tensorflow.shape",
"tensorflow.stack",
"numpy.array",
"tensorflow.sparse_reduce_sum_sparse",
"tensorflow.SparseTensor"
] | [((472, 494), 'tensorflow.transpose', 'tf.transpose', (['triplets'], {}), '(triplets)\n', (484, 494), True, 'import tensorflow as tf\n'), ((5911, 5928), 'numpy.array', 'np.array', (['triples'], {}), '(triples)\n', (5919, 5928), True, 'import numpy as np\n'), ((6441, 6502), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, 3]', 'name': '"""graph_edges"""'}), "(tf.int32, shape=[None, 3], name='graph_edges')\n", (6455, 6502), True, 'import tensorflow as tf\n'), ((649, 678), 'tensorflow.shape', 'tf.shape', (['self.sender_indices'], {}), '(self.sender_indices)\n', (657, 678), True, 'import tensorflow as tf\n'), ((2140, 2165), 'tensorflow.range', 'tf.range', (['self.edge_count'], {}), '(self.edge_count)\n', (2148, 2165), True, 'import tensorflow as tf\n'), ((2377, 2455), 'tensorflow.SparseTensor', 'tf.SparseTensor', ([], {'indices': 'mtr_indices', 'values': 'mtr_values', 'dense_shape': 'mtr_shape'}), '(indices=mtr_indices, values=mtr_values, dense_shape=mtr_shape)\n', (2392, 2455), True, 'import tensorflow as tf\n'), ((4053, 4078), 'tensorflow.range', 'tf.range', (['self.edge_count'], {}), '(self.edge_count)\n', (4061, 4078), True, 'import tensorflow as tf\n'), ((4288, 4366), 'tensorflow.SparseTensor', 'tf.SparseTensor', ([], {'indices': 'mtr_indices', 'values': 'mtr_values', 'dense_shape': 'mtr_shape'}), '(indices=mtr_indices, values=mtr_values, dense_shape=mtr_shape)\n', (4303, 4366), True, 'import tensorflow as tf\n'), ((2073, 2108), 'tensorflow.ones_like', 'tf.ones_like', (['self.receiver_indices'], {}), '(self.receiver_indices)\n', (2085, 2108), True, 'import tensorflow as tf\n'), ((2307, 2353), 'tensorflow.stack', 'tf.stack', (['[self.vertex_count, self.edge_count]'], {}), '([self.vertex_count, self.edge_count])\n', (2315, 2353), True, 'import tensorflow as tf\n'), ((2704, 2729), 'tensorflow.range', 'tf.range', (['self.edge_count'], {}), '(self.edge_count)\n', (2712, 2729), True, 'import tensorflow as tf\n'), ((3988, 4021), 'tensorflow.ones_like', 'tf.ones_like', (['self.sender_indices'], {}), '(self.sender_indices)\n', (4000, 4021), True, 'import tensorflow as tf\n'), ((4218, 4264), 'tensorflow.stack', 'tf.stack', (['[self.vertex_count, self.edge_count]'], {}), '([self.vertex_count, self.edge_count])\n', (4226, 4264), True, 'import tensorflow as tf\n'), ((4613, 4638), 'tensorflow.range', 'tf.range', (['self.edge_count'], {}), '(self.edge_count)\n', (4621, 4638), True, 'import tensorflow as tf\n'), ((2218, 2268), 'tensorflow.stack', 'tf.stack', (['[self.receiver_indices, message_indices]'], {}), '([self.receiver_indices, message_indices])\n', (2226, 2268), True, 'import tensorflow as tf\n'), ((2637, 2672), 'tensorflow.ones_like', 'tf.ones_like', (['self.receiver_indices'], {}), '(self.receiver_indices)\n', (2649, 2672), True, 'import tensorflow as tf\n'), ((2871, 2917), 'tensorflow.stack', 'tf.stack', (['[self.vertex_count, self.edge_count]'], {}), '([self.vertex_count, self.edge_count])\n', (2879, 2917), True, 'import tensorflow as tf\n'), ((2959, 3037), 'tensorflow.SparseTensor', 'tf.SparseTensor', ([], {'indices': 'mtr_indices', 'values': 'mtr_values', 'dense_shape': 'mtr_shape'}), '(indices=mtr_indices, values=mtr_values, dense_shape=mtr_shape)\n', (2974, 3037), True, 'import tensorflow as tf\n'), ((3302, 3327), 'tensorflow.range', 'tf.range', (['self.edge_count'], {}), '(self.edge_count)\n', (3310, 3327), True, 'import tensorflow as tf\n'), ((3789, 3827), 'tensorflow.sparse_reduce_sum_sparse', 'tf.sparse_reduce_sum_sparse', (['tensor', '(0)'], {}), '(tensor, 0)\n', (3816, 3827), True, 'import tensorflow as tf\n'), ((4131, 4179), 'tensorflow.stack', 'tf.stack', (['[self.sender_indices, message_indices]'], {}), '([self.sender_indices, message_indices])\n', (4139, 4179), True, 'import tensorflow as tf\n'), ((4548, 4581), 'tensorflow.ones_like', 'tf.ones_like', (['self.sender_indices'], {}), '(self.sender_indices)\n', (4560, 4581), True, 'import tensorflow as tf\n'), ((4778, 4824), 'tensorflow.stack', 'tf.stack', (['[self.vertex_count, self.edge_count]'], {}), '([self.vertex_count, self.edge_count])\n', (4786, 4824), True, 'import tensorflow as tf\n'), ((4866, 4944), 'tensorflow.SparseTensor', 'tf.SparseTensor', ([], {'indices': 'mtr_indices', 'values': 'mtr_values', 'dense_shape': 'mtr_shape'}), '(indices=mtr_indices, values=mtr_values, dense_shape=mtr_shape)\n', (4881, 4944), True, 'import tensorflow as tf\n'), ((5207, 5232), 'tensorflow.range', 'tf.range', (['self.edge_count'], {}), '(self.edge_count)\n', (5215, 5232), True, 'import tensorflow as tf\n'), ((5672, 5710), 'tensorflow.sparse_reduce_sum_sparse', 'tf.sparse_reduce_sum_sparse', (['tensor', '(0)'], {}), '(tensor, 0)\n', (5699, 5710), True, 'import tensorflow as tf\n'), ((2782, 2832), 'tensorflow.stack', 'tf.stack', (['[self.receiver_indices, message_indices]'], {}), '([self.receiver_indices, message_indices])\n', (2790, 2832), True, 'import tensorflow as tf\n'), ((3235, 3270), 'tensorflow.ones_like', 'tf.ones_like', (['self.receiver_indices'], {}), '(self.receiver_indices)\n', (3247, 3270), True, 'import tensorflow as tf\n'), ((3489, 3557), 'tensorflow.stack', 'tf.stack', (['[self.label_count * 2, self.vertex_count, self.edge_count]'], {}), '([self.label_count * 2, self.vertex_count, self.edge_count])\n', (3497, 3557), True, 'import tensorflow as tf\n'), ((3597, 3675), 'tensorflow.SparseTensor', 'tf.SparseTensor', ([], {'indices': 'mtr_indices', 'values': 'mtr_values', 'dense_shape': 'mtr_shape'}), '(indices=mtr_indices, values=mtr_values, dense_shape=mtr_shape)\n', (3612, 3675), True, 'import tensorflow as tf\n'), ((4691, 4739), 'tensorflow.stack', 'tf.stack', (['[self.sender_indices, message_indices]'], {}), '([self.sender_indices, message_indices])\n', (4699, 4739), True, 'import tensorflow as tf\n'), ((5142, 5175), 'tensorflow.ones_like', 'tf.ones_like', (['self.sender_indices'], {}), '(self.sender_indices)\n', (5154, 5175), True, 'import tensorflow as tf\n'), ((5392, 5460), 'tensorflow.stack', 'tf.stack', (['[self.label_count * 2, self.vertex_count, self.edge_count]'], {}), '([self.label_count * 2, self.vertex_count, self.edge_count])\n', (5400, 5460), True, 'import tensorflow as tf\n'), ((5500, 5578), 'tensorflow.SparseTensor', 'tf.SparseTensor', ([], {'indices': 'mtr_indices', 'values': 'mtr_values', 'dense_shape': 'mtr_shape'}), '(indices=mtr_indices, values=mtr_values, dense_shape=mtr_shape)\n', (5515, 5578), True, 'import tensorflow as tf\n'), ((3380, 3450), 'tensorflow.stack', 'tf.stack', (['[self.message_types, self.receiver_indices, message_indices]'], {}), '([self.message_types, self.receiver_indices, message_indices])\n', (3388, 3450), True, 'import tensorflow as tf\n'), ((5285, 5353), 'tensorflow.stack', 'tf.stack', (['[self.message_types, self.sender_indices, message_indices]'], {}), '([self.message_types, self.sender_indices, message_indices])\n', (5293, 5353), True, 'import tensorflow as tf\n')] |
import strax
import straxen
from straxen.get_corrections import get_correction_from_cmt
import numpy as np
import numba
from straxen.numbafied_scipy import numba_gammaln, numba_betainc
from scipy.special import loggamma
import tarfile
import tempfile
export, __all__ = strax.exporter()
@export
@strax.takes_config(
strax.Option('s1_optical_map', help='S1 (x, y, z) optical/pattern map.', infer_type=False,
default='XENONnT_s1_xyz_patterns_LCE_corrected_qes_MCva43fa9b_wires.pkl'),
strax.Option('s2_optical_map', help='S2 (x, y) optical/pattern map.', infer_type=False,
default='XENONnT_s2_xy_patterns_LCE_corrected_qes_MCva43fa9b_wires.pkl'),
strax.Option('s2_tf_model', help='S2 (x, y) optical data-driven model', infer_type=False,
default='XENONnT_s2_optical_map_data_driven_ML_v0_2021_11_25.tar.gz'),
strax.Option('s1_aft_map', help='Date drive S1 area fraction top map.', infer_type=False,
default='s1_aft_dd_xyz_XENONnT_Kr83m_41500eV_31Oct2021.json'),
strax.Option('mean_pe_per_photon', help='Mean of full VUV single photon response',
default=1.2, infer_type=False,),
strax.Option('gain_model', infer_type=False,
help='PMT gain model. Specify as (model_type, model_config)'),
strax.Option('n_tpc_pmts', type=int,
help='Number of TPC PMTs'),
strax.Option('n_top_pmts', type=int,
help='Number of top TPC PMTs'),
strax.Option('s1_min_area_pattern_fit', infer_type=False,
help='Skip EventPatternFit reconstruction if S1 area (PE) is less than this',
default=2),
strax.Option('s2_min_area_pattern_fit', infer_type=False,
help='Skip EventPatternFit reconstruction if S2 area (PE) is less than this',
default=10),
strax.Option('store_per_channel', default=False, type=bool,
help='Store normalized LLH per channel for each peak'),
strax.Option('max_r_pattern_fit', default=straxen.tpc_r, type=float,
help='Maximal radius of the peaks where llh calculation will be performed'),
strax.Option(name='electron_drift_velocity', infer_type=False,
help='Vertical electron drift velocity in cm/ns (1e4 m/ms)',
default=("electron_drift_velocity", "ONLINE", True)),
strax.Option(name='electron_drift_time_gate', infer_type=False,
help='Electron drift time from the gate in ns',
default=("electron_drift_time_gate", "ONLINE", True)),
)
class EventPatternFit(strax.Plugin):
'''
Plugin that provides patter information for events
'''
depends_on = ('event_area_per_channel', 'event_basics', 'event_positions')
provides = 'event_pattern_fit'
__version__ = '0.1.1'
def infer_dtype(self):
dtype = [('s2_2llh', np.float32,
'Modified Poisson likelihood value for main S2 in the event'),
('s2_neural_2llh', np.float32,
'Data-driven based likelihood value for main S2 in the event'),
('alt_s2_2llh', np.float32,
'Modified Poisson likelihood value for alternative S2'),
('alt_s2_neural_2llh', np.float32,
'Data-driven based likelihood value for alternative S2 in the event'),
('s1_2llh', np.float32,
'Modified Poisson likelihood value for main S1'),
('s1_top_2llh', np.float32,
'Modified Poisson likelihood value for main S1, calculated from top array'),
('s1_bottom_2llh', np.float32,
'Modified Poisson likelihood value for main S1, calculated from bottom array'),
('s1_area_fraction_top_continuous_probability', np.float32,
'Continuous binomial test for S1 area fraction top'),
('s1_area_fraction_top_discrete_probability', np.float32,
'Discrete binomial test for S1 area fraction top'),
('s1_photon_fraction_top_continuous_probability', np.float32,
'Continuous binomial test for S1 photon fraction top'),
('s1_photon_fraction_top_discrete_probability', np.float32,
'Discrete binomial test for S1 photon fraction top'),
('alt_s1_area_fraction_top_continuous_probability', np.float32,
'Continuous binomial test for alternative S1 area fraction top'),
('alt_s1_area_fraction_top_discrete_probability', np.float32,
'Discrete binomial test for alternative S1 area fraction top'),
('alt_s1_photon_fraction_top_continuous_probability', np.float32,
'Continuous binomial test for alternative S1 photon fraction top'),
('alt_s1_photon_fraction_top_discrete_probability', np.float32,
'Discrete binomial test for alternative S1 photon fraction top')]
if self.config['store_per_channel']:
dtype += [
(('2LLH per channel for main S2', 's2_2llh_per_channel'),
np.float32, (self.config['n_top_pmts'], )),
(('2LLH per channel for alternative S2', 'alt_s2_2llh_per_channel'),
np.float32, (self.config['n_top_pmts'], )),
(('Pattern main S2', 's2_pattern'),
np.float32, (self.config['n_top_pmts'], )),
(('Pattern alt S2', 'alt_s2_pattern'),
np.float32, (self.config['n_top_pmts'], )),
(('Pattern for main S1', 's1_pattern'),
np.float32, (self.config['n_tpc_pmts'], )),
(('2LLH per channel for main S1', 's1_2llh_per_channel'),
np.float32, (self.config['n_tpc_pmts'], )),
]
dtype += strax.time_fields
return dtype
def setup(self):
self.electron_drift_velocity = get_correction_from_cmt(self.run_id, self.config['electron_drift_velocity'])
self.electron_drift_time_gate = get_correction_from_cmt(self.run_id, self.config['electron_drift_time_gate'])
self.mean_pe_photon = self.config['mean_pe_per_photon']
# Getting S1 AFT maps
self.s1_aft_map = straxen.InterpolatingMap(
straxen.get_resource(
self.config['s1_aft_map'],
fmt=self._infer_map_format(self.config['s1_aft_map'])))
# Getting optical maps
self.s1_pattern_map = straxen.InterpolatingMap(
straxen.get_resource(
self.config['s1_optical_map'],
fmt=self._infer_map_format(self.config['s1_optical_map'])))
self.s2_pattern_map = straxen.InterpolatingMap(
straxen.get_resource(
self.config['s2_optical_map'],
fmt=self._infer_map_format(self.config['s2_optical_map'])))
# Getting S2 data-driven tensorflow models
downloader = straxen.MongoDownloader()
self.model_file = downloader.download_single(self.config['s2_tf_model'])
with tempfile.TemporaryDirectory() as tmpdirname:
tar = tarfile.open(self.model_file, mode="r:gz")
tar.extractall(path=tmpdirname)
import tensorflow as tf
def _logl_loss(patterns_true, likelihood):
return likelihood / 10.
self.model = tf.keras.models.load_model(tmpdirname,
custom_objects={"_logl_loss": _logl_loss})
self.model_chi2 = tf.keras.Model(self.model.inputs,
self.model.get_layer('Likelihood').output)
# Getting gain model to get dead PMTs
self.to_pe = straxen.get_correction_from_cmt(self.run_id, self.config['gain_model'])
self.dead_PMTs = np.where(self.to_pe == 0)[0]
self.pmtbool = ~np.in1d(np.arange(0, self.config['n_tpc_pmts']), self.dead_PMTs)
self.pmtbool_top = self.pmtbool[:self.config['n_top_pmts']]
self.pmtbool_bottom = self.pmtbool[self.config['n_top_pmts']:self.config['n_tpc_pmts']]
def compute(self, events):
result = np.zeros(len(events), dtype=self.dtype)
result['time'] = events['time']
result['endtime'] = strax.endtime(events)
# Computing LLH values for S1s
self.compute_s1_llhvalue(events, result)
# Computing LLH values for S2s
self.compute_s2_llhvalue(events, result)
# Computing chi2 values for S2s
self.compute_s2_neural_llhvalue(events, result)
# Computing binomial test for s1 area fraction top
positions = np.vstack([events['x'], events['y'], events['z']]).T
aft_prob = self.s1_aft_map(positions)
alt_s1_interaction_drift_time = events['s2_center_time']-events['alt_s1_center_time']
alt_s1_interaction_z = -self.electron_drift_velocity*(alt_s1_interaction_drift_time-self.electron_drift_time_gate)
alt_positions = np.vstack([events['x'], events['y'], alt_s1_interaction_z]).T
alt_aft_prob = self.s1_aft_map(alt_positions)
# main s1 events
mask_s1 = ~np.isnan(aft_prob)
mask_s1 &= ~np.isnan(events['s1_area'])
mask_s1 &= ~np.isnan(events['s1_area_fraction_top'])
# default value is nan, it will be overwrite if the event satisfy the requirements
result['s1_area_fraction_top_continuous_probability'][:] = np.nan
result['s1_area_fraction_top_discrete_probability'][:] = np.nan
result['s1_photon_fraction_top_continuous_probability'][:] = np.nan
result['s1_photon_fraction_top_discrete_probability'][:] = np.nan
# compute binomial test only if we have events that have valid aft prob, s1 area and s1 aft
if np.sum(mask_s1):
arg = aft_prob[mask_s1], events['s1_area'][mask_s1], events['s1_area_fraction_top'][mask_s1]
result['s1_area_fraction_top_continuous_probability'][mask_s1] = s1_area_fraction_top_probability(*arg)
result['s1_area_fraction_top_discrete_probability'][mask_s1] = s1_area_fraction_top_probability(*arg, 'discrete')
arg = aft_prob[mask_s1], events['s1_area'][mask_s1]/self.config['mean_pe_per_photon'], events['s1_area_fraction_top'][mask_s1]
result['s1_photon_fraction_top_continuous_probability'][mask_s1] = s1_area_fraction_top_probability(*arg)
result['s1_photon_fraction_top_discrete_probability'][mask_s1] = s1_area_fraction_top_probability(*arg, 'discrete')
# alternative s1 events
mask_alt_s1 = ~np.isnan(alt_aft_prob)
mask_alt_s1 &= ~np.isnan(events['alt_s1_area'])
mask_alt_s1 &= ~np.isnan(events['alt_s1_area_fraction_top'])
# default value is nan, it will be ovewrite if the event satisfy the requirments
result['alt_s1_area_fraction_top_continuous_probability'][:] = np.nan
result['alt_s1_area_fraction_top_discrete_probability'][:] = np.nan
result['alt_s1_photon_fraction_top_continuous_probability'][:] = np.nan
result['alt_s1_photon_fraction_top_discrete_probability'][:] = np.nan
# compute binomial test only if we have events that have valid aft prob, alt s1 area and alt s1 aft
if np.sum(mask_alt_s1):
arg = aft_prob[mask_alt_s1], events['alt_s1_area'][mask_alt_s1], events['alt_s1_area_fraction_top'][mask_alt_s1]
result['alt_s1_area_fraction_top_continuous_probability'][mask_alt_s1] = s1_area_fraction_top_probability(*arg)
result['alt_s1_area_fraction_top_discrete_probability'][mask_alt_s1] = s1_area_fraction_top_probability(*arg, 'discrete')
arg = aft_prob[mask_alt_s1], events['alt_s1_area'][mask_alt_s1]/self.config['mean_pe_per_photon'], events['alt_s1_area_fraction_top'][mask_alt_s1]
result['alt_s1_photon_fraction_top_continuous_probability'][mask_alt_s1] = s1_area_fraction_top_probability(*arg)
result['alt_s1_photon_fraction_top_discrete_probability'][mask_alt_s1] = s1_area_fraction_top_probability(*arg, 'discrete')
return result
def compute_s1_llhvalue(self, events, result):
# Selecting S1s for pattern fit calculation
# - must exist (index != -1)
# - must have total area larger minimal one
# - must have positive AFT
x, y, z = events['x'], events['y'], events['z']
cur_s1_bool = events['s1_area']>self.config['s1_min_area_pattern_fit']
cur_s1_bool &= events['s1_index']!=-1
cur_s1_bool &= events['s1_area_fraction_top']>=0
cur_s1_bool &= np.isfinite(x)
cur_s1_bool &= np.isfinite(y)
cur_s1_bool &= np.isfinite(z)
cur_s1_bool &= (x**2 + y**2) < self.config['max_r_pattern_fit']**2
# default value is nan, it will be ovewrite if the event satisfy the requirments
result['s1_2llh'][:] = np.nan
result['s1_top_2llh'][:] = np.nan
result['s1_bottom_2llh'][:] = np.nan
# Making expectation patterns [ in PE ]
if np.sum(cur_s1_bool):
s1_map_effs = self.s1_pattern_map(np.array([x, y, z]).T)[cur_s1_bool, :]
s1_area = events['s1_area'][cur_s1_bool]
s1_pattern = s1_area[:, None]*(s1_map_effs[:, self.pmtbool])/np.sum(s1_map_effs[:, self.pmtbool], axis=1)[:, None]
s1_pattern_top = (events['s1_area_fraction_top'][cur_s1_bool]*s1_area)
s1_pattern_top = s1_pattern_top[:, None]*((s1_map_effs[:, :self.config['n_top_pmts']])[:, self.pmtbool_top])
s1_pattern_top /= np.sum((s1_map_effs[:, :self.config['n_top_pmts']])[:, self.pmtbool_top], axis=1)[:, None]
s1_pattern_bottom = ((1-events['s1_area_fraction_top'][cur_s1_bool])*s1_area)
s1_pattern_bottom = s1_pattern_bottom[:, None]*((s1_map_effs[:, self.config['n_top_pmts']:])[:, self.pmtbool_bottom])
s1_pattern_bottom /= np.sum((s1_map_effs[:, self.config['n_top_pmts']:])[:, self.pmtbool_bottom], axis=1)[:, None]
# Getting pattern from data
s1_area_per_channel_ = events['s1_area_per_channel'][cur_s1_bool,:]
s1_area_per_channel = s1_area_per_channel_[:, self.pmtbool]
s1_area_per_channel_top = (s1_area_per_channel_[:, :self.config['n_top_pmts']])[:, self.pmtbool_top]
s1_area_per_channel_bottom = (s1_area_per_channel_[:, self.config['n_top_pmts']:])[:, self.pmtbool_bottom]
# Top and bottom
arg1 = s1_pattern/self.mean_pe_photon, s1_area_per_channel, self.mean_pe_photon
arg2 = s1_area_per_channel/self.mean_pe_photon, s1_area_per_channel, self.mean_pe_photon
norm_llh_val = (neg2llh_modpoisson(*arg1) - neg2llh_modpoisson(*arg2))
result['s1_2llh'][cur_s1_bool] = np.sum(norm_llh_val, axis=1)
# If needed to stire - store only top and bottom array, but not together
if self.config['store_per_channel']:
# Storring pattern information
store_patterns = np.zeros((s1_pattern.shape[0], self.config['n_tpc_pmts']) )
store_patterns[:, self.pmtbool] = s1_pattern
result['s1_pattern'][cur_s1_bool] = store_patterns
# Storing actual LLH values
store_2LLH_ch = np.zeros((norm_llh_val.shape[0], self.config['n_tpc_pmts']) )
store_2LLH_ch[:, self.pmtbool] = norm_llh_val
result['s1_2llh_per_channel'][cur_s1_bool] = store_2LLH_ch
# Top
arg1 = s1_pattern_top/self.mean_pe_photon, s1_area_per_channel_top, self.mean_pe_photon
arg2 = s1_area_per_channel_top/self.mean_pe_photon, s1_area_per_channel_top, self.mean_pe_photon
norm_llh_val = (neg2llh_modpoisson(*arg1) - neg2llh_modpoisson(*arg2))
result['s1_top_2llh'][cur_s1_bool] = np.sum(norm_llh_val, axis=1)
# Bottom
arg1 = s1_pattern_bottom/self.mean_pe_photon, s1_area_per_channel_bottom, self.mean_pe_photon
arg2 = s1_area_per_channel_bottom/self.mean_pe_photon, s1_area_per_channel_bottom, self.mean_pe_photon
norm_llh_val = (neg2llh_modpoisson(*arg1) - neg2llh_modpoisson(*arg2))
result['s1_bottom_2llh'][cur_s1_bool] = np.sum(norm_llh_val, axis=1)
def compute_s2_llhvalue(self, events, result):
for t_ in ['s2', 'alt_s2']:
# Selecting S2s for pattern fit calculation
# - must exist (index != -1)
# - must have total area larger minimal one
# - must have positive AFT
x, y = events[t_+'_x'], events[t_+'_y']
s2_mask = (events[t_+'_area']>self.config['s2_min_area_pattern_fit'])
s2_mask &= (events[t_+'_area_fraction_top']>0)
s2_mask &= (x**2 + y**2) < self.config['max_r_pattern_fit']**2
# default value is nan, it will be ovewrite if the event satisfy the requirments
result[t_+'_2llh'][:] = np.nan
# Making expectation patterns [ in PE ]
if np.sum(s2_mask):
s2_map_effs = self.s2_pattern_map(np.array([x, y]).T)[s2_mask, 0:self.config['n_top_pmts']]
s2_map_effs = s2_map_effs[:, self.pmtbool_top]
s2_top_area = (events[t_+'_area_fraction_top']*events[t_+'_area'])[s2_mask]
s2_pattern = s2_top_area[:, None]*s2_map_effs/np.sum(s2_map_effs, axis=1)[:,None]
# Getting pattern from data
s2_top_area_per_channel = events[t_+'_area_per_channel'][s2_mask, 0:self.config['n_top_pmts']]
s2_top_area_per_channel = s2_top_area_per_channel[:, self.pmtbool_top]
# Calculating LLH, this is shifted Poisson
# we get area expectation and we need to scale them to get
# photon expectation
norm_llh_val = (neg2llh_modpoisson(
mu = s2_pattern/self.mean_pe_photon,
areas = s2_top_area_per_channel,
mean_pe_photon=self.mean_pe_photon)
-
neg2llh_modpoisson(
mu = s2_top_area_per_channel/self.mean_pe_photon,
areas = s2_top_area_per_channel,
mean_pe_photon=self.mean_pe_photon)
)
result[t_+'_2llh'][s2_mask] = np.sum(norm_llh_val, axis=1)
if self.config['store_per_channel']:
store_patterns = np.zeros((s2_pattern.shape[0], self.config['n_top_pmts']) )
store_patterns[:, self.pmtbool_top] = s2_pattern
result[t_+'_pattern'][s2_mask] = store_patterns#:s2_pattern[s2_mask]
store_2LLH_ch = np.zeros((norm_llh_val.shape[0], self.config['n_top_pmts']) )
store_2LLH_ch[:, self.pmtbool_top] = norm_llh_val
result[t_+'_2llh_per_channel'][s2_mask] = store_2LLH_ch
def compute_s2_neural_llhvalue(self, events, result):
for t_ in ['s2', 'alt_s2']:
x, y = events[t_ + '_x'], events[t_ + '_y']
s2_mask = (events[t_ + '_area'] > self.config['s2_min_area_pattern_fit'])
s2_mask &= (events[t_ + '_area_fraction_top'] > 0)
# default value is nan, it will be ovewrite if the event satisfy the requirements
result[t_ + '_neural_2llh'][:] = np.nan
# Produce position and top pattern to feed tensorflow model, return chi2/N
if np.sum(s2_mask):
s2_pos = np.stack((x, y)).T[s2_mask]
s2_pat = events[t_ + '_area_per_channel'][s2_mask, 0:self.config['n_top_pmts']]
# Output[0]: loss function, -2*log-likelihood, Output[1]: chi2
result[t_ + '_neural_2llh'][s2_mask] = self.model_chi2.predict({'xx': s2_pos, 'yy': s2_pat})[1]
@staticmethod
def _infer_map_format(map_name, known_formats=('pkl', 'json', 'json.gz')):
for fmt in known_formats:
if map_name.endswith(fmt):
return fmt
raise ValueError(f'Extension of {map_name} not in {known_formats}')
def neg2llh_modpoisson(mu=None, areas=None, mean_pe_photon=1.0):
"""
Modified poisson distribution with proper normalization for shifted poisson.
mu - expected number of photons per channel
areas - observed areas per channel
mean_pe_photon - mean of area responce for one photon
"""
with np.errstate(divide='ignore', invalid='ignore'):
fraction = areas/mean_pe_photon
res = 2.*(mu -
(fraction)*np.log(mu) +
loggamma((fraction)+1) +
np.log(mean_pe_photon)
)
is_zero = areas <= 0 # If area equals or smaller than 0 - assume 0
res[is_zero] = 2.*mu[is_zero]
# if zero channel has negative expectation, assume LLH to be 0 there
# this happens in the normalization factor calculation when mu is received from area
neg_mu = mu < 0.0
res[is_zero | neg_mu] = 0.0
return res
# continuous and discrete binomial test
@numba.njit
def lbinom_pmf(k, n, p):
"""Log of binomial probability mass function approximated with gamma function"""
scale_log = numba_gammaln(n + 1) - numba_gammaln(n - k + 1) - numba_gammaln(k + 1)
ret_log = scale_log + k * np.log(p) + (n - k) * np.log(1 - p)
return ret_log
@numba.njit
def binom_pmf(k, n, p):
"""Binomial probability mass function approximated with gamma function"""
return np.exp(lbinom_pmf(k, n, p))
@numba.njit
def binom_cdf(k, n, p):
if k >= n:
return 1.0
return numba_betainc(n - k, k + 1, 1.0 - p)
@numba.njit
def binom_sf(k, n, p):
return 1 - binom_cdf(k, n, p)
@numba.njit
def lbinom_pmf_diriv(k, n, p, dk=1e-7):
"""Numerical dirivitive of Binomial pmf approximated with gamma function"""
if k + dk < n:
return (lbinom_pmf(k + dk, n, p) - lbinom_pmf(k, n, p)) / dk
else:
return (lbinom_pmf(k - dk, n, p) - lbinom_pmf(k, n, p)) / - dk
@numba.njit(cache=True)
def _numeric_derivative(y0, y1, err, target, x_min, x_max, x0, x1):
"""Get close to <target> by doing a numeric derivative"""
if abs(y1 - y0) < err:
# break by passing dx == 0
return 0., x1, x1
x = (target - y0) / (y1 - y0) * (x1 - x0) + x0
x = min(x, x_max)
x = max(x, x_min)
dx = abs(x - x1)
x0 = x1
x1 = x
return dx, x0, x1
@numba.njit
def lbinom_pmf_mode(x_min, x_max, target, args, err=1e-7, max_iter=50):
"""Find the root of the derivative of log Binomial pmf with secant method"""
x0 = x_min
x1 = x_max
dx = abs(x1 - x0)
while (dx > err) and (max_iter > 0):
y0 = lbinom_pmf_diriv(x0, *args)
y1 = lbinom_pmf_diriv(x1, *args)
dx, x0, x1 = _numeric_derivative(y0, y1, err, target, x_min, x_max, x0, x1)
max_iter -= 1
return x1
@numba.njit
def lbinom_pmf_inverse(x_min, x_max, target, args, err=1e-7, max_iter=50):
"""Find the where the log Binomial pmf cross target with secant method"""
x0 = x_min
x1 = x_max
dx = abs(x1 - x0)
while (dx > err) and (max_iter > 0):
y0 = lbinom_pmf(x0, *args)
y1 = lbinom_pmf(x1, *args)
dx, x0, x1 = _numeric_derivative(y0, y1, err, target, x_min, x_max, x0, x1)
max_iter -= 1
return x1
@numba.njit
def binom_test(k, n, p):
"""
The main purpose of this algorithm is to find the value j on the
other side of the mode that has the same probability as k, and
integrate the tails outward from k and j. In the case where either
k or j are zero, only the non-zero tail is integrated.
"""
mode = lbinom_pmf_mode(0, n, 0, (n, p))
if k <= mode:
j_min, j_max = mode, n
else:
j_min, j_max = 0, mode
target = lbinom_pmf(k, n, p)
j = lbinom_pmf_inverse(j_min, j_max, target, (n, p))
pval = 0
if min(k, j) > 0:
pval += binom_cdf(min(k, j), n, p)
if max(k, j) > 0:
pval += binom_sf(max(k, j), n, p)
pval = min(1.0, pval)
return pval
@np.vectorize
@numba.njit
def s1_area_fraction_top_probability(aft_prob, area_tot, area_fraction_top, mode='continuous'):
"""Function to compute the S1 AFT probability"""
area_top = area_tot * area_fraction_top
# Raise a warning in case one of these three condition is verified
# and return binomial test equal to nan since they are not physical
# k: size_top, n: size_tot, p: aft_prob
do_test = True
if area_tot < area_top:
# warnings.warn(f'n {area_tot} must be >= k {area_top}')
binomial_test = np.nan
do_test = False
if (aft_prob > 1.0) or (aft_prob < 0.0):
# warnings.warn(f'p {aft_prob} must be in range [0, 1]')
binomial_test = np.nan
do_test = False
if area_top < 0:
# warnings.warn(f'k {area_top} must be >= 0')
binomial_test = np.nan
do_test = False
if do_test:
if mode == 'discrete':
binomial_test = binom_pmf(area_top, area_tot, aft_prob)
else:
binomial_test = binom_test(area_top, area_tot, aft_prob)
return binomial_test
| [
"numpy.sum",
"strax.endtime",
"numba.njit",
"numpy.isnan",
"straxen.numbafied_scipy.numba_gammaln",
"numpy.arange",
"scipy.special.loggamma",
"tempfile.TemporaryDirectory",
"numpy.isfinite",
"straxen.numbafied_scipy.numba_betainc",
"tarfile.open",
"strax.exporter",
"straxen.get_corrections.g... | [((270, 286), 'strax.exporter', 'strax.exporter', ([], {}), '()\n', (284, 286), False, 'import strax\n'), ((22440, 22462), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (22450, 22462), False, 'import numba\n'), ((321, 494), 'strax.Option', 'strax.Option', (['"""s1_optical_map"""'], {'help': '"""S1 (x, y, z) optical/pattern map."""', 'infer_type': '(False)', 'default': '"""XENONnT_s1_xyz_patterns_LCE_corrected_qes_MCva43fa9b_wires.pkl"""'}), "('s1_optical_map', help='S1 (x, y, z) optical/pattern map.',\n infer_type=False, default=\n 'XENONnT_s1_xyz_patterns_LCE_corrected_qes_MCva43fa9b_wires.pkl')\n", (333, 494), False, 'import strax\n'), ((508, 677), 'strax.Option', 'strax.Option', (['"""s2_optical_map"""'], {'help': '"""S2 (x, y) optical/pattern map."""', 'infer_type': '(False)', 'default': '"""XENONnT_s2_xy_patterns_LCE_corrected_qes_MCva43fa9b_wires.pkl"""'}), "('s2_optical_map', help='S2 (x, y) optical/pattern map.',\n infer_type=False, default=\n 'XENONnT_s2_xy_patterns_LCE_corrected_qes_MCva43fa9b_wires.pkl')\n", (520, 677), False, 'import strax\n'), ((691, 859), 'strax.Option', 'strax.Option', (['"""s2_tf_model"""'], {'help': '"""S2 (x, y) optical data-driven model"""', 'infer_type': '(False)', 'default': '"""XENONnT_s2_optical_map_data_driven_ML_v0_2021_11_25.tar.gz"""'}), "('s2_tf_model', help='S2 (x, y) optical data-driven model',\n infer_type=False, default=\n 'XENONnT_s2_optical_map_data_driven_ML_v0_2021_11_25.tar.gz')\n", (703, 859), False, 'import strax\n'), ((873, 1033), 'strax.Option', 'strax.Option', (['"""s1_aft_map"""'], {'help': '"""Date drive S1 area fraction top map."""', 'infer_type': '(False)', 'default': '"""s1_aft_dd_xyz_XENONnT_Kr83m_41500eV_31Oct2021.json"""'}), "('s1_aft_map', help='Date drive S1 area fraction top map.',\n infer_type=False, default=\n 's1_aft_dd_xyz_XENONnT_Kr83m_41500eV_31Oct2021.json')\n", (885, 1033), False, 'import strax\n'), ((1047, 1165), 'strax.Option', 'strax.Option', (['"""mean_pe_per_photon"""'], {'help': '"""Mean of full VUV single photon response"""', 'default': '(1.2)', 'infer_type': '(False)'}), "('mean_pe_per_photon', help=\n 'Mean of full VUV single photon response', default=1.2, infer_type=False)\n", (1059, 1165), False, 'import strax\n'), ((1184, 1295), 'strax.Option', 'strax.Option', (['"""gain_model"""'], {'infer_type': '(False)', 'help': '"""PMT gain model. Specify as (model_type, model_config)"""'}), "('gain_model', infer_type=False, help=\n 'PMT gain model. Specify as (model_type, model_config)')\n", (1196, 1295), False, 'import strax\n'), ((1313, 1376), 'strax.Option', 'strax.Option', (['"""n_tpc_pmts"""'], {'type': 'int', 'help': '"""Number of TPC PMTs"""'}), "('n_tpc_pmts', type=int, help='Number of TPC PMTs')\n", (1325, 1376), False, 'import strax\n'), ((1399, 1466), 'strax.Option', 'strax.Option', (['"""n_top_pmts"""'], {'type': 'int', 'help': '"""Number of top TPC PMTs"""'}), "('n_top_pmts', type=int, help='Number of top TPC PMTs')\n", (1411, 1466), False, 'import strax\n'), ((1489, 1644), 'strax.Option', 'strax.Option', (['"""s1_min_area_pattern_fit"""'], {'infer_type': '(False)', 'help': '"""Skip EventPatternFit reconstruction if S1 area (PE) is less than this"""', 'default': '(2)'}), "('s1_min_area_pattern_fit', infer_type=False, help=\n 'Skip EventPatternFit reconstruction if S1 area (PE) is less than this',\n default=2)\n", (1501, 1644), False, 'import strax\n'), ((1675, 1831), 'strax.Option', 'strax.Option', (['"""s2_min_area_pattern_fit"""'], {'infer_type': '(False)', 'help': '"""Skip EventPatternFit reconstruction if S2 area (PE) is less than this"""', 'default': '(10)'}), "('s2_min_area_pattern_fit', infer_type=False, help=\n 'Skip EventPatternFit reconstruction if S2 area (PE) is less than this',\n default=10)\n", (1687, 1831), False, 'import strax\n'), ((1862, 1981), 'strax.Option', 'strax.Option', (['"""store_per_channel"""'], {'default': '(False)', 'type': 'bool', 'help': '"""Store normalized LLH per channel for each peak"""'}), "('store_per_channel', default=False, type=bool, help=\n 'Store normalized LLH per channel for each peak')\n", (1874, 1981), False, 'import strax\n'), ((1999, 2148), 'strax.Option', 'strax.Option', (['"""max_r_pattern_fit"""'], {'default': 'straxen.tpc_r', 'type': 'float', 'help': '"""Maximal radius of the peaks where llh calculation will be performed"""'}), "('max_r_pattern_fit', default=straxen.tpc_r, type=float, help=\n 'Maximal radius of the peaks where llh calculation will be performed')\n", (2011, 2148), False, 'import strax\n'), ((2166, 2352), 'strax.Option', 'strax.Option', ([], {'name': '"""electron_drift_velocity"""', 'infer_type': '(False)', 'help': '"""Vertical electron drift velocity in cm/ns (1e4 m/ms)"""', 'default': "('electron_drift_velocity', 'ONLINE', True)"}), "(name='electron_drift_velocity', infer_type=False, help=\n 'Vertical electron drift velocity in cm/ns (1e4 m/ms)', default=(\n 'electron_drift_velocity', 'ONLINE', True))\n", (2178, 2352), False, 'import strax\n'), ((2382, 2557), 'strax.Option', 'strax.Option', ([], {'name': '"""electron_drift_time_gate"""', 'infer_type': '(False)', 'help': '"""Electron drift time from the gate in ns"""', 'default': "('electron_drift_time_gate', 'ONLINE', True)"}), "(name='electron_drift_time_gate', infer_type=False, help=\n 'Electron drift time from the gate in ns', default=(\n 'electron_drift_time_gate', 'ONLINE', True))\n", (2394, 2557), False, 'import strax\n'), ((22026, 22062), 'straxen.numbafied_scipy.numba_betainc', 'numba_betainc', (['(n - k)', '(k + 1)', '(1.0 - p)'], {}), '(n - k, k + 1, 1.0 - p)\n', (22039, 22062), False, 'from straxen.numbafied_scipy import numba_gammaln, numba_betainc\n'), ((6005, 6081), 'straxen.get_corrections.get_correction_from_cmt', 'get_correction_from_cmt', (['self.run_id', "self.config['electron_drift_velocity']"], {}), "(self.run_id, self.config['electron_drift_velocity'])\n", (6028, 6081), False, 'from straxen.get_corrections import get_correction_from_cmt\n'), ((6122, 6199), 'straxen.get_corrections.get_correction_from_cmt', 'get_correction_from_cmt', (['self.run_id', "self.config['electron_drift_time_gate']"], {}), "(self.run_id, self.config['electron_drift_time_gate'])\n", (6145, 6199), False, 'from straxen.get_corrections import get_correction_from_cmt\n'), ((7055, 7080), 'straxen.MongoDownloader', 'straxen.MongoDownloader', ([], {}), '()\n', (7078, 7080), False, 'import straxen\n'), ((7844, 7915), 'straxen.get_correction_from_cmt', 'straxen.get_correction_from_cmt', (['self.run_id', "self.config['gain_model']"], {}), "(self.run_id, self.config['gain_model'])\n", (7875, 7915), False, 'import straxen\n'), ((8397, 8418), 'strax.endtime', 'strax.endtime', (['events'], {}), '(events)\n', (8410, 8418), False, 'import strax\n'), ((9949, 9964), 'numpy.sum', 'np.sum', (['mask_s1'], {}), '(mask_s1)\n', (9955, 9964), True, 'import numpy as np\n'), ((11448, 11467), 'numpy.sum', 'np.sum', (['mask_alt_s1'], {}), '(mask_alt_s1)\n', (11454, 11467), True, 'import numpy as np\n'), ((12801, 12815), 'numpy.isfinite', 'np.isfinite', (['x'], {}), '(x)\n', (12812, 12815), True, 'import numpy as np\n'), ((12839, 12853), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (12850, 12853), True, 'import numpy as np\n'), ((12877, 12891), 'numpy.isfinite', 'np.isfinite', (['z'], {}), '(z)\n', (12888, 12891), True, 'import numpy as np\n'), ((13258, 13277), 'numpy.sum', 'np.sum', (['cur_s1_bool'], {}), '(cur_s1_bool)\n', (13264, 13277), True, 'import numpy as np\n'), ((20856, 20902), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (20867, 20902), True, 'import numpy as np\n'), ((21682, 21702), 'straxen.numbafied_scipy.numba_gammaln', 'numba_gammaln', (['(k + 1)'], {}), '(k + 1)\n', (21695, 21702), False, 'from straxen.numbafied_scipy import numba_gammaln, numba_betainc\n'), ((7175, 7204), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7202, 7204), False, 'import tempfile\n'), ((7238, 7280), 'tarfile.open', 'tarfile.open', (['self.model_file'], {'mode': '"""r:gz"""'}), "(self.model_file, mode='r:gz')\n", (7250, 7280), False, 'import tarfile\n'), ((7482, 7567), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['tmpdirname'], {'custom_objects': "{'_logl_loss': _logl_loss}"}), "(tmpdirname, custom_objects={'_logl_loss':\n _logl_loss})\n", (7508, 7567), True, 'import tensorflow as tf\n'), ((7941, 7966), 'numpy.where', 'np.where', (['(self.to_pe == 0)'], {}), '(self.to_pe == 0)\n', (7949, 7966), True, 'import numpy as np\n'), ((8782, 8832), 'numpy.vstack', 'np.vstack', (["[events['x'], events['y'], events['z']]"], {}), "([events['x'], events['y'], events['z']])\n", (8791, 8832), True, 'import numpy as np\n'), ((9131, 9190), 'numpy.vstack', 'np.vstack', (["[events['x'], events['y'], alt_s1_interaction_z]"], {}), "([events['x'], events['y'], alt_s1_interaction_z])\n", (9140, 9190), True, 'import numpy as np\n'), ((9305, 9323), 'numpy.isnan', 'np.isnan', (['aft_prob'], {}), '(aft_prob)\n', (9313, 9323), True, 'import numpy as np\n'), ((9344, 9371), 'numpy.isnan', 'np.isnan', (["events['s1_area']"], {}), "(events['s1_area'])\n", (9352, 9371), True, 'import numpy as np\n'), ((9392, 9432), 'numpy.isnan', 'np.isnan', (["events['s1_area_fraction_top']"], {}), "(events['s1_area_fraction_top'])\n", (9400, 9432), True, 'import numpy as np\n'), ((10762, 10784), 'numpy.isnan', 'np.isnan', (['alt_aft_prob'], {}), '(alt_aft_prob)\n', (10770, 10784), True, 'import numpy as np\n'), ((10809, 10840), 'numpy.isnan', 'np.isnan', (["events['alt_s1_area']"], {}), "(events['alt_s1_area'])\n", (10817, 10840), True, 'import numpy as np\n'), ((10865, 10909), 'numpy.isnan', 'np.isnan', (["events['alt_s1_area_fraction_top']"], {}), "(events['alt_s1_area_fraction_top'])\n", (10873, 10909), True, 'import numpy as np\n'), ((14996, 15024), 'numpy.sum', 'np.sum', (['norm_llh_val'], {'axis': '(1)'}), '(norm_llh_val, axis=1)\n', (15002, 15024), True, 'import numpy as np\n'), ((16065, 16093), 'numpy.sum', 'np.sum', (['norm_llh_val'], {'axis': '(1)'}), '(norm_llh_val, axis=1)\n', (16071, 16093), True, 'import numpy as np\n'), ((16472, 16500), 'numpy.sum', 'np.sum', (['norm_llh_val'], {'axis': '(1)'}), '(norm_llh_val, axis=1)\n', (16478, 16500), True, 'import numpy as np\n'), ((17290, 17305), 'numpy.sum', 'np.sum', (['s2_mask'], {}), '(s2_mask)\n', (17296, 17305), True, 'import numpy as np\n'), ((19900, 19915), 'numpy.sum', 'np.sum', (['s2_mask'], {}), '(s2_mask)\n', (19906, 19915), True, 'import numpy as np\n'), ((21632, 21652), 'straxen.numbafied_scipy.numba_gammaln', 'numba_gammaln', (['(n + 1)'], {}), '(n + 1)\n', (21645, 21652), False, 'from straxen.numbafied_scipy import numba_gammaln, numba_betainc\n'), ((21655, 21679), 'straxen.numbafied_scipy.numba_gammaln', 'numba_gammaln', (['(n - k + 1)'], {}), '(n - k + 1)\n', (21668, 21679), False, 'from straxen.numbafied_scipy import numba_gammaln, numba_betainc\n'), ((21755, 21768), 'numpy.log', 'np.log', (['(1 - p)'], {}), '(1 - p)\n', (21761, 21768), True, 'import numpy as np\n'), ((8002, 8041), 'numpy.arange', 'np.arange', (['(0)', "self.config['n_tpc_pmts']"], {}), "(0, self.config['n_tpc_pmts'])\n", (8011, 8041), True, 'import numpy as np\n'), ((13780, 13859), 'numpy.sum', 'np.sum', (["s1_map_effs[:, :self.config['n_top_pmts']][:, self.pmtbool_top]"], {'axis': '(1)'}), "(s1_map_effs[:, :self.config['n_top_pmts']][:, self.pmtbool_top], axis=1)\n", (13786, 13859), True, 'import numpy as np\n'), ((14125, 14211), 'numpy.sum', 'np.sum', (["s1_map_effs[:, self.config['n_top_pmts']:][:, self.pmtbool_bottom]"], {'axis': '(1)'}), "(s1_map_effs[:, self.config['n_top_pmts']:][:, self.pmtbool_bottom],\n axis=1)\n", (14131, 14211), True, 'import numpy as np\n'), ((15240, 15298), 'numpy.zeros', 'np.zeros', (["(s1_pattern.shape[0], self.config['n_tpc_pmts'])"], {}), "((s1_pattern.shape[0], self.config['n_tpc_pmts']))\n", (15248, 15298), True, 'import numpy as np\n'), ((15506, 15566), 'numpy.zeros', 'np.zeros', (["(norm_llh_val.shape[0], self.config['n_tpc_pmts'])"], {}), "((norm_llh_val.shape[0], self.config['n_tpc_pmts']))\n", (15514, 15566), True, 'import numpy as np\n'), ((18767, 18795), 'numpy.sum', 'np.sum', (['norm_llh_val'], {'axis': '(1)'}), '(norm_llh_val, axis=1)\n', (18773, 18795), True, 'import numpy as np\n'), ((21070, 21092), 'numpy.log', 'np.log', (['mean_pe_photon'], {}), '(mean_pe_photon)\n', (21076, 21092), True, 'import numpy as np\n'), ((21733, 21742), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (21739, 21742), True, 'import numpy as np\n'), ((13490, 13534), 'numpy.sum', 'np.sum', (['s1_map_effs[:, self.pmtbool]'], {'axis': '(1)'}), '(s1_map_effs[:, self.pmtbool], axis=1)\n', (13496, 13534), True, 'import numpy as np\n'), ((18887, 18945), 'numpy.zeros', 'np.zeros', (["(s2_pattern.shape[0], self.config['n_top_pmts'])"], {}), "((s2_pattern.shape[0], self.config['n_top_pmts']))\n", (18895, 18945), True, 'import numpy as np\n'), ((19142, 19202), 'numpy.zeros', 'np.zeros', (["(norm_llh_val.shape[0], self.config['n_top_pmts'])"], {}), "((norm_llh_val.shape[0], self.config['n_top_pmts']))\n", (19150, 19202), True, 'import numpy as np\n'), ((21027, 21049), 'scipy.special.loggamma', 'loggamma', (['(fraction + 1)'], {}), '(fraction + 1)\n', (21035, 21049), False, 'from scipy.special import loggamma\n'), ((13325, 13344), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (13333, 13344), True, 'import numpy as np\n'), ((17633, 17660), 'numpy.sum', 'np.sum', (['s2_map_effs'], {'axis': '(1)'}), '(s2_map_effs, axis=1)\n', (17639, 17660), True, 'import numpy as np\n'), ((19942, 19958), 'numpy.stack', 'np.stack', (['(x, y)'], {}), '((x, y))\n', (19950, 19958), True, 'import numpy as np\n'), ((17357, 17373), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (17365, 17373), True, 'import numpy as np\n'), ((20996, 21006), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (21002, 21006), True, 'import numpy as np\n')] |
#########################################
# Time Series Figures
#########################################
#### Import Libraries and Functions
from pyhydroqc import anomaly_utilities, rules_detect, calibration
from pyhydroqc.parameters import site_params
import matplotlib.pyplot as plt
import datetime
import pandas as pd
import numpy as np
import os
colors = ['#0C7BDC', '#F3870D', '#24026A', '#AF3C31']
# FIGURES 3 (gap values and drift correction), 4 (threshold), C1 (detection example), C2 (long labeled event),
# C3 (model detection for calibration events)
# These figures all use data from Main Street.
#### Retrieve data
#########################################
site = 'MainStreet'
sensors = ['temp', 'cond', 'ph', 'do']
years = [2014, 2015, 2016, 2017, 2018, 2019]
sensor_array = anomaly_utilities.get_data(sensors=sensors, site=site, years=years, path="./LRO_data/")
#### Rules Based Anomaly Detection
#########################################
range_count = dict()
persist_count = dict()
rules_metrics = dict()
for snsr in sensor_array:
sensor_array[snsr], range_count[snsr] = rules_detect.range_check(df=sensor_array[snsr],
maximum=site_params[site][snsr]['max_range'],
minimum=site_params[site][snsr]['min_range'])
sensor_array[snsr], persist_count[snsr] = rules_detect.persistence(df=sensor_array[snsr],
length=site_params[site][snsr]['persist'],
output_grp=True)
sensor_array[snsr] = rules_detect.interpolate(df=sensor_array[snsr])
print('Rules based detection complete.\n')
### Find Gap Values
#########################################
# Subset of sensors that are calibrated
calib_sensors = sensors[1:4]
# Initialize data structures
calib_dates = dict()
gaps = dict()
shifts = dict()
tech_shifts = dict()
for cal_snsr in calib_sensors:
# Import calibration dates
calib_dates[cal_snsr] = pd.read_csv('./LRO_data/' + site + '_' + cal_snsr + '_calib_dates.csv',
header=1, parse_dates=True, infer_datetime_format=True)
calib_dates[cal_snsr]['start'] = pd.to_datetime(calib_dates[cal_snsr]['start'])
calib_dates[cal_snsr]['end'] = pd.to_datetime(calib_dates[cal_snsr]['end'])
# Ensure date range of calibrations correspond to imported data
calib_dates[cal_snsr] = calib_dates[cal_snsr].loc[(calib_dates[cal_snsr]['start'] > min(sensor_array[cal_snsr].index)) &
(calib_dates[cal_snsr]['end'] < max(sensor_array[cal_snsr].index))]
# Initialize dataframe to store determined gap values and associated dates
gaps[cal_snsr] = pd.DataFrame(columns=['end', 'gap'],
index=range(min(calib_dates[cal_snsr].index), max(calib_dates[cal_snsr].index)+1))
if len(calib_dates[cal_snsr]) > 0:
# Initialize data structures
shifts[cal_snsr] = []
# Loop through each calibration event date.
for i in range(min(calib_dates[cal_snsr].index), max(calib_dates[cal_snsr].index)+1):
# Apply find_gap routine, add to dataframe, add output of shifts to list.
gap, end = calibration.find_gap(observed=sensor_array[cal_snsr]['observed'],
calib_date=calib_dates[cal_snsr]['end'][i],
hours=2,
show_shift=False)
gaps[cal_snsr].loc[i]['end'] = end
gaps[cal_snsr].loc[i]['gap'] = gap
print('Gap value determination complete.\n')
# Review gaps and make adjustments as needed before performing drift correction
gaps['cond'].loc[3, 'gap'] = 4
gaps['cond'].loc[4, 'gap'] = 10
gaps['cond'].loc[21, 'gap'] = 0
gaps['cond'].loc[39, 'gap'] = -5
gaps['cond'].loc[41, 'gap'] = 4
gaps['ph'].loc[33, 'gap'] = -0.04
gaps['ph'].loc[43, 'gap'] = 0.12
gaps['ph'].loc[43, 'end'] = '2019-08-15 15:00'
#### Perform Linear Drift Correction
#########################################
calib_sensors = sensors[1:4]
for cal_snsr in calib_sensors:
# Set start dates for drift correction at the previously identified calibration (one month back for the first calibration.)
gaps[cal_snsr]['start'] = gaps[cal_snsr]['end'].shift(1)
gaps[cal_snsr]['start'][0] = gaps[cal_snsr]['end'][0] - pd.Timedelta(days=30)
if len(gaps[cal_snsr]) > 0:
for i in range(min(gaps[cal_snsr].index), max(gaps[cal_snsr].index) + 1):
result, sensor_array[cal_snsr]['observed'] = calibration.lin_drift_cor(observed=sensor_array[cal_snsr]['observed'],
start=gaps[cal_snsr]['start'][i],
end=gaps[cal_snsr]['end'][i],
gap=gaps[cal_snsr]['gap'][i],
replace=True)
print('Linear drift correction complete.\n')
## FIGURE 3 ##
#########################################
# Compare calibration and drift correction to Observed data and to technician corrected.
cal_snsr = 'ph'
df = sensor_array[cal_snsr]
plt.figure(figsize=(10, 4))
plt.plot(df['raw'], colors[0], label='Observed data')
plt.plot(df['cor'], colors[1], label='Technician corrected')
plt.plot(df['observed'], colors[3], label='Algorithm corrected')
plt.xlim(datetime.datetime(2014, 7, 24), datetime.datetime(2014, 8, 1)) # Specify date range of plot
plt.ylim(7.6, 8.4)
plt.legend()
plt.ylabel('pH')
plt.xlabel('Date')
plt.show()
plt.savefig('Figures/Figure3.png', bbox_inches='tight')
## FIGURE 4 ##
#########################################
# Examine thresholds and model residuals
# set working directory for importing model results.
os.chdir('Examples/Plotting')
ARIMA_detections = pd.read_csv('ARIMA_detections_MainStreet_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
ARIMA_threshold = pd.read_csv('ARIMA_threshold_MainStreet_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
plt.figure(figsize=(10, 4))
plt.plot(ARIMA_detections['residual'], 'b', label='Model residuals')
plt.plot(ARIMA_threshold['low'], 'c', label='Upper threshold')
plt.plot(ARIMA_threshold['high'], 'm', mfc='none', label='Lower threshold')
plt.xlim(datetime.datetime(2015, 7, 8), datetime.datetime(2015, 8, 14)) # Specify date range of plot
plt.ylim(-200, 150)
plt.xticks(pd.date_range(start='7/9/2015', end='8/14/2015', freq='5D')) # Specify xticks at 5-day intervals
plt.legend()
plt.ylabel('Specific conductance, μS/cm')
plt.xlabel('Date')
plt.show()
plt.savefig('Figures/Figure4.png', bbox_inches='tight')
## FIGURE C1 ##
#########################################
# Detection example
LSTM_multivar_bidir_detections = pd.read_csv('LSTM_multivar_bidir_detections_MainStreet_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
LSTM_multivar_bidir_df = pd.read_csv('LSTM_multivar_bidir_df_MainStreet_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
raw = LSTM_multivar_bidir_df['raw']
predictions = LSTM_multivar_bidir_detections['prediction']
labels = LSTM_multivar_bidir_df['labeled_event']
detections = LSTM_multivar_bidir_df['detected_event']
plt.figure(figsize=(10, 4))
plt.plot(raw, color=colors[0], label='Observed data')
plt.plot(predictions, color=colors[2], label='Model prediction')
plt.plot(raw[labels > 0], 'o', color=colors[1], mfc='none', markersize=5, markeredgewidth=1, label='Technician labeled anomalies')
plt.plot(predictions[detections > 0], 'x', color=colors[3], markersize=6, markeredgewidth=2, label='Algorithm detected anomalies')
plt.xlim(datetime.datetime(2017, 12, 18), datetime.datetime(2017, 12, 27)) # Specify date range of plot
plt.ylim(-20, 1220)
plt.yticks(range(0, 1200, 200))
plt.xticks(pd.date_range(start='12/18/2017', end='12/27/2017', freq='2D')) # Specify xticks at 2-day intervals
plt.ylabel('Specific conductance, μS/cm')
plt.xlabel('Date')
plt.legend(labelspacing=0.2, loc='upper left', ncol=2, fontsize=9, handletextpad=0.2, columnspacing=0.25)
plt.annotate('true\npositive\nevent', xy=(datetime.datetime(2017, 12, 18, 16, 0), 415), xycoords='data',
xytext=(datetime.datetime(2017, 12, 19, 8, 0), 800), textcoords='data',
arrowprops=dict(facecolor='black', width=1.5, headwidth=7),
horizontalalignment='center', verticalalignment='top')
plt.annotate('true\npositive\nevent', xy=(datetime.datetime(2017, 12, 26, 12, 0), 450), xycoords='data',
xytext=(datetime.datetime(2017, 12, 25, 20, 0), 850), textcoords='data',
arrowprops=dict(facecolor='black', width=1.5, headwidth=7),
horizontalalignment='center', verticalalignment='top')
plt.annotate('false\npositive\nevents', xy=(datetime.datetime(2017, 12, 20, 18, 0), 365), xycoords='data',
xytext=(datetime.datetime(2017, 12, 22, 20, 0), 175), textcoords='data',
arrowprops=dict(facecolor='black', width=1.5, headwidth=7),
horizontalalignment='center', verticalalignment='top')
plt.annotate('', xy=(datetime.datetime(2017, 12, 22, 15, 0), 350), xycoords='data',
xytext=(datetime.datetime(2017, 12, 22, 18, 0), 190), textcoords='data',
arrowprops=dict(facecolor='black', width=1.5, headwidth=7),
horizontalalignment='center', verticalalignment='top')
plt.annotate(' ', xy=(datetime.datetime(2017, 12, 23, 10, 0), 700), xycoords='data',
xytext=(datetime.datetime(2017, 12, 22, 20, 0), 190), textcoords='data',
arrowprops=dict(facecolor='black', width=1.5, headwidth=7),
horizontalalignment='center', verticalalignment='top')
plt.annotate(' ', xy=(datetime.datetime(2017, 12, 25, 12, 0), 460), xycoords='data',
xytext=(datetime.datetime(2017, 12, 22, 20, 0), 170), textcoords='data',
arrowprops=dict(facecolor='black', width=1.5, headwidth=7),
horizontalalignment='center', verticalalignment='top')
plt.savefig('Figures/FigureC1.png', bbox_inches='tight')
## FIGURE C2 ##
#########################################
# Compare technician and algorithm detections
LSTM_multivar_bidir_detections = pd.read_csv('LSTM_multivar_bidir_detections_MainStreet_ph.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
LSTM_multivar_bidir_df = pd.read_csv('LSTM_multivar_bidir_df_MainStreet_ph.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
raw = LSTM_multivar_bidir_df['raw']
predictions = LSTM_multivar_bidir_detections['prediction']
labels = LSTM_multivar_bidir_df['labeled_event']
detections = LSTM_multivar_bidir_df['detected_event']
plt.figure(figsize=(12, 4))
plt.plot(raw, color=colors[0], label='Observed data')
plt.plot(predictions, color=colors[2], label='Model prediction')
plt.plot(raw[labels > 0], 'o', color=colors[1], mfc='none', markersize=5, markeredgewidth=1, label='Technician labeled anomalies')
plt.plot(predictions[detections > 0], 'x', color=colors[3], markersize=6, markeredgewidth=2, label='Algorithm detected anomalies')
plt.xlim(datetime.datetime(2018, 6, 1), datetime.datetime(2018, 10, 30)) # Specify date range of plot
plt.ylim(8.25, 10.5)
plt.xticks(pd.date_range(start='6/1/2018', end='10/30/2018', freq='15D')) # Specify xticks at 15-day intervals
plt.legend()
plt.ylabel('pH')
plt.xlabel('Date')
plt.show()
plt.savefig('Figures/FigureC2.png', bbox_inches='tight')
## FIGURE C3 ##
#########################################
# Examine calibration events
figC3 = plt.figure(figsize=(10, 6))
ax = figC3.add_subplot(2, 1, 1)
ax.plot(raw, color=colors[0], label='Observed data')
ax.plot(predictions, color=colors[2], label='Model prediction')
ax.plot(raw[labels > 0], 'o', color=colors[1], mfc='none', markersize=5, markeredgewidth=1, label='Technician labeled anomalies')
ax.plot(predictions[detections > 0], 'x', color=colors[3], markersize=6, markeredgewidth=2, label='Algorithm detected anomalies')
ax.set_xlim(datetime.datetime(2017, 10, 2), datetime.datetime(2017, 10, 5)) # Specify date range of plot
ax.set_ylim(8.35, 8.7)
ax.set_xticks(pd.date_range(start='10/2/2017', end='10/5/2017', freq='1D')) # Specify xticks at 1-day intervals
ax.set_yticks(np.arange(8.4, 8.75, 0.1))
ax.legend()
ax.set_ylabel('pH')
ax.annotate('a', xy=(0.015, 0.9), xycoords='axes fraction', fontsize=15)
ax = figC3.add_subplot(2, 1, 2)
ax.plot(raw, color=colors[0], label='Observed data')
ax.plot(predictions, color=colors[2], label='Model prediction')
ax.plot(raw[labels > 0], 'o', color=colors[1], mfc='none', markersize=5, markeredgewidth=1, label='Technician labeled anomalies')
ax.plot(predictions[detections > 0], 'x', color=colors[3], markersize=6, markeredgewidth=2, label='Algorithm detected anomalies')
ax.set_xlim(datetime.datetime(2019, 8, 27), datetime.datetime(2019, 8, 30)) # Specify date range of plot
ax.set_ylim(8.1, 9.0)
ax.set_xticks(pd.date_range(start='8/27/2019', end='8/30/2019', freq='1D')) # Specify xticks at 1-day intervals
ax.set_ylabel('pH')
ax.annotate('b', xy=(0.015, 0.9), xycoords='axes fraction', fontsize=15)
plt.xlabel('Date')
plt.savefig('Figures/FigureC3.png', bbox_inches='tight')
# FIGURES 5 (detection example), C4 (model comparison examples)
# These figures all use data from Tony Grove.
#### Retrieve data
#########################################
os.chdir('..')
os.chdir('..')
site = 'TonyGrove'
sensors = ['temp', 'cond', 'ph', 'do']
years = [2014, 2015, 2016, 2017, 2018, 2019]
sensor_array = anomaly_utilities.get_data(sensors=sensors, site=site, years=years, path="./LRO_data/")
os.chdir('Examples/Plotting/')
ARIMA_detections = pd.read_csv('ARIMA_detections_TonyGrove_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
ARIMA_df = pd.read_csv('ARIMA_df_TonyGrove_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
LSTM_univar_detections = pd.read_csv('LSTM_univar_detections_TonyGrove_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
LSTM_univar_df = pd.read_csv('LSTM_univar_df_anomalies_TonyGrove_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
LSTM_univar_bidir_detections = pd.read_csv('LSTM_univar_bidir_detections_TonyGrove_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
LSTM_univar_bidir_df = pd.read_csv('LSTM_univar_bidir_df_anomalies_TonyGrove_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
LSTM_multivar_detections = pd.read_csv('LSTM_multivar_detections_TonyGrove_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
LSTM_multivar_df = pd.read_csv('LSTM_multivar_df_TonyGrove_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
LSTM_multivar_bidir_detections = pd.read_csv('LSTM_multivar_bidir_detections_TonyGrove_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
LSTM_multivar_bidir_df = pd.read_csv('LSTM_multivar_bidir_df_TonyGrove_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
## FIGURE 5 ##
#########################################
# Detection example
raw = sensor_array['cond']['raw']
labels = ARIMA_df['labeled_event']
predictions = ARIMA_detections['prediction']
detections = ARIMA_df['detected_event']
plt.figure(figsize=(10, 4))
plt.plot(raw, color=colors[0], label='Observed data')
plt.plot(predictions, color=colors[2], label='Model prediction')
plt.plot(raw[labels > 0], 'o', color=colors[1], mfc='none', markersize=5, markeredgewidth=1, label='Technician labeled anomalies')
plt.plot(predictions[detections > 0], 'x', color=colors[3], markersize=6, markeredgewidth=2, label='Algorithm detected anomalies')
plt.xlim(datetime.datetime(2018, 11, 9), datetime.datetime(2018, 11, 16)) # Specify date range of plot
plt.ylim(330, 425)
plt.xticks(pd.date_range(start='11/9/2018', end='11/16/2018', freq='1D')) # Specify xticks at 1-day intervals
plt.legend()
plt.ylabel('Specific conductance, μS/cm')
plt.xlabel('Date')
plt.annotate('false\npositive\nevent', xy=(datetime.datetime(2018, 11, 11, 11, 0), 359), xycoords='data',
xytext=(datetime.datetime(2018, 11, 11, 0, 0), 345), textcoords='data',
arrowprops=dict(facecolor='black', width=1.5, headwidth=7),
horizontalalignment='center', verticalalignment='top')
plt.annotate('true\npositive\nevents', xy=(datetime.datetime(2018, 11, 12, 15, 0), 356), xycoords='data',
xytext=(datetime.datetime(2018, 11, 13, 2, 0), 345), textcoords='data',
arrowprops=dict(facecolor='black', width=1.5, headwidth=7),
horizontalalignment='center', verticalalignment='top')
plt.annotate(' ', xy=(datetime.datetime(2018, 11, 13, 10, 0), 380), xycoords='data',
xytext=(datetime.datetime(2018, 11, 13, 2, 0), 345), textcoords='data',
arrowprops=dict(facecolor='black', width=1.5, headwidth=7),
horizontalalignment='center', verticalalignment='top')
plt.annotate('false\nnegative\nevent', xy=(datetime.datetime(2018, 11, 14, 17, 0), 357), xycoords='data',
xytext=(datetime.datetime(2018, 11, 15, 5, 0), 345), textcoords='data',
arrowprops=dict(facecolor='black', width=1.5, headwidth=7),
horizontalalignment='center', verticalalignment='top')
plt.show()
plt.savefig('Figures/Figure5.png', bbox_inches='tight')
## FIGURE C4 ##
#########################################
# Model comparison
raw = sensor_array['cond']['raw']
labels = ARIMA_df['labeled_event']
predictions = dict()
detections = dict()
predictions['ARIMA'] = ARIMA_detections['prediction']
detections['ARIMA'] = ARIMA_df['detected_event']
predictions['lstm_univar'] = LSTM_univar_detections['prediction']
detections['lstm_univar'] = LSTM_univar_df['detected_event']
predictions['lstm_univar_bidir'] = LSTM_univar_bidir_detections['prediction']
detections['lstm_univar_bidir'] = LSTM_univar_bidir_df['detected_event']
predictions['lstm_univar_bidir'] = LSTM_univar_bidir_detections['prediction']
detections['lstm_univar_bidir'] = LSTM_univar_bidir_df['detected_event']
predictions['lstm_multivar'] = LSTM_multivar_detections['prediction']
detections['lstm_multivar'] = LSTM_multivar_df['detected_event']
predictions['lstm_multivar_bidir'] = LSTM_multivar_bidir_detections['prediction']
detections['lstm_multivar_bidir'] = LSTM_multivar_bidir_df['detected_event']
model_type = ['ARIMA', 'lstm_univar', 'lstm_univar_bidir', 'lstm_multivar', 'lstm_multivar_bidir']
model_text = ['ARIMA', 'LSTM univariate', 'LSTM univariate bidirectional', 'LSTM multivariate', 'LSTM multivariate bidirectional']
fig, ax = plt.subplots(nrows=5, ncols=3, figsize=(15, 8), sharex='col', sharey='col',
gridspec_kw={'width_ratios': [3, 1, 1], 'height_ratios': [1, 1, 1, 1, 1], 'hspace': 0, 'wspace': 0.11})
fig.text(0.08, 0.5, 'Specific conductance, μS/cm', va='center', rotation='vertical', fontsize=14)
fig.text(0.5, 0.05, 'Date', va='center', fontsize=14)
for i, mdl in enumerate(model_type):
ax[i][0].plot(raw, color=colors[0], label='Observed data')
ax[i][0].plot(predictions[mdl], color=colors[2], label='Model prediction')
ax[i][0].plot(raw[labels > 0], 'o', color=colors[1], mfc='none', markersize=5, markeredgewidth=1, label='Technician labeled anomalies')
ax[i][0].plot(predictions[mdl][detections[mdl] > 0], 'x', color=colors[3], markersize=6, markeredgewidth=2, label='Algorithm detected anomalies')
ax[i][0].set_xlim(datetime.datetime(2014, 9, 18, 18), datetime.datetime(2014, 9, 20, 6))
ax[i][0].set_ylim(295, 375)
ax[i][0].set_yticks(ticks=[300, 320, 340, 360])
ax[i][0].set_xticks(pd.date_range(start='9/18/2014 18:00', end='9/20/2014 6:00', freq='8H'))
ax[i][1].plot(raw, color=colors[0], label='Observed data')
ax[i][1].plot(predictions[mdl], color=colors[2], label='Model prediction')
ax[i][1].plot(raw[labels > 0], 'o', color=colors[1], mfc='none', markersize=5, markeredgewidth=1, label='Technician labeled anomalies')
ax[i][1].plot(predictions[mdl][detections[mdl] > 0], 'x', color=colors[3], markersize=6, markeredgewidth=2, label='Algorithm detected anomalies')
ax[i][1].set_xlim(datetime.datetime(2015, 2, 5, 6, 0), datetime.datetime(2015, 2, 5, 18))
ax[i][1].set_ylim(350, 440)
ax[i][1].set_yticks(ticks=[350, 375, 400, 425])
ax[i][1].set_xticks(pd.date_range(start='2/5/2015 8:00', end='2/5/2015 18:00', freq='6H'))
ax[i][2].plot(raw, color=colors[0], label='Observed data')
ax[i][2].plot(predictions[mdl], color=colors[2], label='Model prediction')
ax[i][2].plot(raw[labels > 0], 'o', color=colors[1], mfc='none', markersize=5, markeredgewidth=1, label='Technician labeled anomalies')
ax[i][2].plot(predictions[mdl][detections[mdl] > 0], 'x', color=colors[3], markersize=6, markeredgewidth=2, label='Algorithm detected anomalies')
ax[i][2].set_xlim(datetime.datetime(2015, 7, 6), datetime.datetime(2015, 7, 30))
ax[i][2].set_ylim(310, 385)
ax[i][2].set_yticks(ticks=[325, 350, 375])
ax[i][2].set_xticks(pd.date_range(start='7/8/2015', end='7/30/2015', freq='5D'))
ax[i][0].annotate(model_text[i],
xy=(datetime.datetime(2014, 9, 18, 20), 365),
xytext=(datetime.datetime(2014, 9, 18, 20), 365),
annotation_clip=False, rotation=0,
ha='left', va='center', fontname='Arial Narrow',
horizontalalignment='right', verticalalignment='top')
ax[4][0].legend(ncol=1, labelspacing=0.2, fontsize=9, handletextpad=0.2, columnspacing=0.25, loc='lower right')
plt.savefig('Figures/FigureC4.png', bbox_inches='tight')
###################################################
| [
"matplotlib.pyplot.savefig",
"pyhydroqc.anomaly_utilities.get_data",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"numpy.arange",
"os.chdir",
"pyhydroqc.calibration.lin_drift_cor",
"pandas.Timedelta",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"pandas.date_range",
"pyhydroqc.rul... | [((793, 885), 'pyhydroqc.anomaly_utilities.get_data', 'anomaly_utilities.get_data', ([], {'sensors': 'sensors', 'site': 'site', 'years': 'years', 'path': '"""./LRO_data/"""'}), "(sensors=sensors, site=site, years=years, path=\n './LRO_data/')\n", (819, 885), False, 'from pyhydroqc import anomaly_utilities, rules_detect, calibration\n'), ((5496, 5523), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (5506, 5523), True, 'import matplotlib.pyplot as plt\n'), ((5524, 5577), 'matplotlib.pyplot.plot', 'plt.plot', (["df['raw']", 'colors[0]'], {'label': '"""Observed data"""'}), "(df['raw'], colors[0], label='Observed data')\n", (5532, 5577), True, 'import matplotlib.pyplot as plt\n'), ((5578, 5638), 'matplotlib.pyplot.plot', 'plt.plot', (["df['cor']", 'colors[1]'], {'label': '"""Technician corrected"""'}), "(df['cor'], colors[1], label='Technician corrected')\n", (5586, 5638), True, 'import matplotlib.pyplot as plt\n'), ((5639, 5703), 'matplotlib.pyplot.plot', 'plt.plot', (["df['observed']", 'colors[3]'], {'label': '"""Algorithm corrected"""'}), "(df['observed'], colors[3], label='Algorithm corrected')\n", (5647, 5703), True, 'import matplotlib.pyplot as plt\n'), ((5806, 5824), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(7.6)', '(8.4)'], {}), '(7.6, 8.4)\n', (5814, 5824), True, 'import matplotlib.pyplot as plt\n'), ((5825, 5837), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5835, 5837), True, 'import matplotlib.pyplot as plt\n'), ((5838, 5854), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""pH"""'], {}), "('pH')\n", (5848, 5854), True, 'import matplotlib.pyplot as plt\n'), ((5855, 5873), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (5865, 5873), True, 'import matplotlib.pyplot as plt\n'), ((5874, 5884), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5882, 5884), True, 'import matplotlib.pyplot as plt\n'), ((5885, 5940), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figures/Figure3.png"""'], {'bbox_inches': '"""tight"""'}), "('Figures/Figure3.png', bbox_inches='tight')\n", (5896, 5940), True, 'import matplotlib.pyplot as plt\n'), ((6094, 6123), 'os.chdir', 'os.chdir', (['"""Examples/Plotting"""'], {}), "('Examples/Plotting')\n", (6102, 6123), False, 'import os\n'), ((6144, 6268), 'pandas.read_csv', 'pd.read_csv', (['"""ARIMA_detections_MainStreet_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('ARIMA_detections_MainStreet_cond.csv', header=0, index_col=0,\n parse_dates=True, infer_datetime_format=True)\n", (6155, 6268), True, 'import pandas as pd\n'), ((6407, 6530), 'pandas.read_csv', 'pd.read_csv', (['"""ARIMA_threshold_MainStreet_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('ARIMA_threshold_MainStreet_cond.csv', header=0, index_col=0,\n parse_dates=True, infer_datetime_format=True)\n", (6418, 6530), True, 'import pandas as pd\n'), ((6647, 6674), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (6657, 6674), True, 'import matplotlib.pyplot as plt\n'), ((6675, 6743), 'matplotlib.pyplot.plot', 'plt.plot', (["ARIMA_detections['residual']", '"""b"""'], {'label': '"""Model residuals"""'}), "(ARIMA_detections['residual'], 'b', label='Model residuals')\n", (6683, 6743), True, 'import matplotlib.pyplot as plt\n'), ((6744, 6806), 'matplotlib.pyplot.plot', 'plt.plot', (["ARIMA_threshold['low']", '"""c"""'], {'label': '"""Upper threshold"""'}), "(ARIMA_threshold['low'], 'c', label='Upper threshold')\n", (6752, 6806), True, 'import matplotlib.pyplot as plt\n'), ((6807, 6882), 'matplotlib.pyplot.plot', 'plt.plot', (["ARIMA_threshold['high']", '"""m"""'], {'mfc': '"""none"""', 'label': '"""Lower threshold"""'}), "(ARIMA_threshold['high'], 'm', mfc='none', label='Lower threshold')\n", (6815, 6882), True, 'import matplotlib.pyplot as plt\n'), ((6985, 7004), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-200)', '(150)'], {}), '(-200, 150)\n', (6993, 7004), True, 'import matplotlib.pyplot as plt\n'), ((7114, 7126), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7124, 7126), True, 'import matplotlib.pyplot as plt\n'), ((7127, 7168), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Specific conductance, μS/cm"""'], {}), "('Specific conductance, μS/cm')\n", (7137, 7168), True, 'import matplotlib.pyplot as plt\n'), ((7169, 7187), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (7179, 7187), True, 'import matplotlib.pyplot as plt\n'), ((7188, 7198), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7196, 7198), True, 'import matplotlib.pyplot as plt\n'), ((7199, 7254), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figures/Figure4.png"""'], {'bbox_inches': '"""tight"""'}), "('Figures/Figure4.png', bbox_inches='tight')\n", (7210, 7254), True, 'import matplotlib.pyplot as plt\n'), ((7368, 7506), 'pandas.read_csv', 'pd.read_csv', (['"""LSTM_multivar_bidir_detections_MainStreet_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('LSTM_multivar_bidir_detections_MainStreet_cond.csv', header=0,\n index_col=0, parse_dates=True, infer_datetime_format=True)\n", (7379, 7506), True, 'import pandas as pd\n'), ((7708, 7838), 'pandas.read_csv', 'pd.read_csv', (['"""LSTM_multivar_bidir_df_MainStreet_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('LSTM_multivar_bidir_df_MainStreet_cond.csv', header=0,\n index_col=0, parse_dates=True, infer_datetime_format=True)\n", (7719, 7838), True, 'import pandas as pd\n'), ((8207, 8234), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (8217, 8234), True, 'import matplotlib.pyplot as plt\n'), ((8235, 8288), 'matplotlib.pyplot.plot', 'plt.plot', (['raw'], {'color': 'colors[0]', 'label': '"""Observed data"""'}), "(raw, color=colors[0], label='Observed data')\n", (8243, 8288), True, 'import matplotlib.pyplot as plt\n'), ((8289, 8353), 'matplotlib.pyplot.plot', 'plt.plot', (['predictions'], {'color': 'colors[2]', 'label': '"""Model prediction"""'}), "(predictions, color=colors[2], label='Model prediction')\n", (8297, 8353), True, 'import matplotlib.pyplot as plt\n'), ((8354, 8488), 'matplotlib.pyplot.plot', 'plt.plot', (['raw[labels > 0]', '"""o"""'], {'color': 'colors[1]', 'mfc': '"""none"""', 'markersize': '(5)', 'markeredgewidth': '(1)', 'label': '"""Technician labeled anomalies"""'}), "(raw[labels > 0], 'o', color=colors[1], mfc='none', markersize=5,\n markeredgewidth=1, label='Technician labeled anomalies')\n", (8362, 8488), True, 'import matplotlib.pyplot as plt\n'), ((8485, 8619), 'matplotlib.pyplot.plot', 'plt.plot', (['predictions[detections > 0]', '"""x"""'], {'color': 'colors[3]', 'markersize': '(6)', 'markeredgewidth': '(2)', 'label': '"""Algorithm detected anomalies"""'}), "(predictions[detections > 0], 'x', color=colors[3], markersize=6,\n markeredgewidth=2, label='Algorithm detected anomalies')\n", (8493, 8619), True, 'import matplotlib.pyplot as plt\n'), ((8721, 8740), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-20)', '(1220)'], {}), '(-20, 1220)\n', (8729, 8740), True, 'import matplotlib.pyplot as plt\n'), ((8885, 8926), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Specific conductance, μS/cm"""'], {}), "('Specific conductance, μS/cm')\n", (8895, 8926), True, 'import matplotlib.pyplot as plt\n'), ((8927, 8945), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (8937, 8945), True, 'import matplotlib.pyplot as plt\n'), ((8946, 9055), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labelspacing': '(0.2)', 'loc': '"""upper left"""', 'ncol': '(2)', 'fontsize': '(9)', 'handletextpad': '(0.2)', 'columnspacing': '(0.25)'}), "(labelspacing=0.2, loc='upper left', ncol=2, fontsize=9,\n handletextpad=0.2, columnspacing=0.25)\n", (8956, 9055), True, 'import matplotlib.pyplot as plt\n'), ((10997, 11053), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figures/FigureC1.png"""'], {'bbox_inches': '"""tight"""'}), "('Figures/FigureC1.png', bbox_inches='tight')\n", (11008, 11053), True, 'import matplotlib.pyplot as plt\n'), ((11193, 11329), 'pandas.read_csv', 'pd.read_csv', (['"""LSTM_multivar_bidir_detections_MainStreet_ph.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('LSTM_multivar_bidir_detections_MainStreet_ph.csv', header=0,\n index_col=0, parse_dates=True, infer_datetime_format=True)\n", (11204, 11329), True, 'import pandas as pd\n'), ((11531, 11660), 'pandas.read_csv', 'pd.read_csv', (['"""LSTM_multivar_bidir_df_MainStreet_ph.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('LSTM_multivar_bidir_df_MainStreet_ph.csv', header=0, index_col\n =0, parse_dates=True, infer_datetime_format=True)\n", (11542, 11660), True, 'import pandas as pd\n'), ((12036, 12063), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (12046, 12063), True, 'import matplotlib.pyplot as plt\n'), ((12064, 12117), 'matplotlib.pyplot.plot', 'plt.plot', (['raw'], {'color': 'colors[0]', 'label': '"""Observed data"""'}), "(raw, color=colors[0], label='Observed data')\n", (12072, 12117), True, 'import matplotlib.pyplot as plt\n'), ((12118, 12182), 'matplotlib.pyplot.plot', 'plt.plot', (['predictions'], {'color': 'colors[2]', 'label': '"""Model prediction"""'}), "(predictions, color=colors[2], label='Model prediction')\n", (12126, 12182), True, 'import matplotlib.pyplot as plt\n'), ((12183, 12317), 'matplotlib.pyplot.plot', 'plt.plot', (['raw[labels > 0]', '"""o"""'], {'color': 'colors[1]', 'mfc': '"""none"""', 'markersize': '(5)', 'markeredgewidth': '(1)', 'label': '"""Technician labeled anomalies"""'}), "(raw[labels > 0], 'o', color=colors[1], mfc='none', markersize=5,\n markeredgewidth=1, label='Technician labeled anomalies')\n", (12191, 12317), True, 'import matplotlib.pyplot as plt\n'), ((12314, 12448), 'matplotlib.pyplot.plot', 'plt.plot', (['predictions[detections > 0]', '"""x"""'], {'color': 'colors[3]', 'markersize': '(6)', 'markeredgewidth': '(2)', 'label': '"""Algorithm detected anomalies"""'}), "(predictions[detections > 0], 'x', color=colors[3], markersize=6,\n markeredgewidth=2, label='Algorithm detected anomalies')\n", (12322, 12448), True, 'import matplotlib.pyplot as plt\n'), ((12548, 12568), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(8.25)', '(10.5)'], {}), '(8.25, 10.5)\n', (12556, 12568), True, 'import matplotlib.pyplot as plt\n'), ((12681, 12693), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12691, 12693), True, 'import matplotlib.pyplot as plt\n'), ((12694, 12710), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""pH"""'], {}), "('pH')\n", (12704, 12710), True, 'import matplotlib.pyplot as plt\n'), ((12711, 12729), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (12721, 12729), True, 'import matplotlib.pyplot as plt\n'), ((12730, 12740), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12738, 12740), True, 'import matplotlib.pyplot as plt\n'), ((12741, 12797), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figures/FigureC2.png"""'], {'bbox_inches': '"""tight"""'}), "('Figures/FigureC2.png', bbox_inches='tight')\n", (12752, 12797), True, 'import matplotlib.pyplot as plt\n'), ((12895, 12922), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (12905, 12922), True, 'import matplotlib.pyplot as plt\n'), ((14463, 14481), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (14473, 14481), True, 'import matplotlib.pyplot as plt\n'), ((14483, 14539), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figures/FigureC3.png"""'], {'bbox_inches': '"""tight"""'}), "('Figures/FigureC3.png', bbox_inches='tight')\n", (14494, 14539), True, 'import matplotlib.pyplot as plt\n'), ((14714, 14728), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (14722, 14728), False, 'import os\n'), ((14729, 14743), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (14737, 14743), False, 'import os\n'), ((14862, 14954), 'pyhydroqc.anomaly_utilities.get_data', 'anomaly_utilities.get_data', ([], {'sensors': 'sensors', 'site': 'site', 'years': 'years', 'path': '"""./LRO_data/"""'}), "(sensors=sensors, site=site, years=years, path=\n './LRO_data/')\n", (14888, 14954), False, 'from pyhydroqc import anomaly_utilities, rules_detect, calibration\n'), ((14951, 14981), 'os.chdir', 'os.chdir', (['"""Examples/Plotting/"""'], {}), "('Examples/Plotting/')\n", (14959, 14981), False, 'import os\n'), ((15002, 15125), 'pandas.read_csv', 'pd.read_csv', (['"""ARIMA_detections_TonyGrove_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('ARIMA_detections_TonyGrove_cond.csv', header=0, index_col=0,\n parse_dates=True, infer_datetime_format=True)\n", (15013, 15125), True, 'import pandas as pd\n'), ((15257, 15372), 'pandas.read_csv', 'pd.read_csv', (['"""ARIMA_df_TonyGrove_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('ARIMA_df_TonyGrove_cond.csv', header=0, index_col=0,\n parse_dates=True, infer_datetime_format=True)\n", (15268, 15372), True, 'import pandas as pd\n'), ((15518, 15647), 'pandas.read_csv', 'pd.read_csv', (['"""LSTM_univar_detections_TonyGrove_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('LSTM_univar_detections_TonyGrove_cond.csv', header=0,\n index_col=0, parse_dates=True, infer_datetime_format=True)\n", (15529, 15647), True, 'import pandas as pd\n'), ((15785, 15916), 'pandas.read_csv', 'pd.read_csv', (['"""LSTM_univar_df_anomalies_TonyGrove_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('LSTM_univar_df_anomalies_TonyGrove_cond.csv', header=0,\n index_col=0, parse_dates=True, infer_datetime_format=True)\n", (15796, 15916), True, 'import pandas as pd\n'), ((16068, 16203), 'pandas.read_csv', 'pd.read_csv', (['"""LSTM_univar_bidir_detections_TonyGrove_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('LSTM_univar_bidir_detections_TonyGrove_cond.csv', header=0,\n index_col=0, parse_dates=True, infer_datetime_format=True)\n", (16079, 16203), True, 'import pandas as pd\n'), ((16347, 16484), 'pandas.read_csv', 'pd.read_csv', (['"""LSTM_univar_bidir_df_anomalies_TonyGrove_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('LSTM_univar_bidir_df_anomalies_TonyGrove_cond.csv', header=0,\n index_col=0, parse_dates=True, infer_datetime_format=True)\n", (16358, 16484), True, 'import pandas as pd\n'), ((16632, 16763), 'pandas.read_csv', 'pd.read_csv', (['"""LSTM_multivar_detections_TonyGrove_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('LSTM_multivar_detections_TonyGrove_cond.csv', header=0,\n index_col=0, parse_dates=True, infer_datetime_format=True)\n", (16643, 16763), True, 'import pandas as pd\n'), ((16903, 17026), 'pandas.read_csv', 'pd.read_csv', (['"""LSTM_multivar_df_TonyGrove_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('LSTM_multivar_df_TonyGrove_cond.csv', header=0, index_col=0,\n parse_dates=True, infer_datetime_format=True)\n", (16914, 17026), True, 'import pandas as pd\n'), ((17180, 17317), 'pandas.read_csv', 'pd.read_csv', (['"""LSTM_multivar_bidir_detections_TonyGrove_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('LSTM_multivar_bidir_detections_TonyGrove_cond.csv', header=0,\n index_col=0, parse_dates=True, infer_datetime_format=True)\n", (17191, 17317), True, 'import pandas as pd\n'), ((17463, 17592), 'pandas.read_csv', 'pd.read_csv', (['"""LSTM_multivar_bidir_df_TonyGrove_cond.csv"""'], {'header': '(0)', 'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('LSTM_multivar_bidir_df_TonyGrove_cond.csv', header=0,\n index_col=0, parse_dates=True, infer_datetime_format=True)\n", (17474, 17592), True, 'import pandas as pd\n'), ((17947, 17974), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (17957, 17974), True, 'import matplotlib.pyplot as plt\n'), ((17975, 18028), 'matplotlib.pyplot.plot', 'plt.plot', (['raw'], {'color': 'colors[0]', 'label': '"""Observed data"""'}), "(raw, color=colors[0], label='Observed data')\n", (17983, 18028), True, 'import matplotlib.pyplot as plt\n'), ((18029, 18093), 'matplotlib.pyplot.plot', 'plt.plot', (['predictions'], {'color': 'colors[2]', 'label': '"""Model prediction"""'}), "(predictions, color=colors[2], label='Model prediction')\n", (18037, 18093), True, 'import matplotlib.pyplot as plt\n'), ((18094, 18228), 'matplotlib.pyplot.plot', 'plt.plot', (['raw[labels > 0]', '"""o"""'], {'color': 'colors[1]', 'mfc': '"""none"""', 'markersize': '(5)', 'markeredgewidth': '(1)', 'label': '"""Technician labeled anomalies"""'}), "(raw[labels > 0], 'o', color=colors[1], mfc='none', markersize=5,\n markeredgewidth=1, label='Technician labeled anomalies')\n", (18102, 18228), True, 'import matplotlib.pyplot as plt\n'), ((18225, 18359), 'matplotlib.pyplot.plot', 'plt.plot', (['predictions[detections > 0]', '"""x"""'], {'color': 'colors[3]', 'markersize': '(6)', 'markeredgewidth': '(2)', 'label': '"""Algorithm detected anomalies"""'}), "(predictions[detections > 0], 'x', color=colors[3], markersize=6,\n markeredgewidth=2, label='Algorithm detected anomalies')\n", (18233, 18359), True, 'import matplotlib.pyplot as plt\n'), ((18460, 18478), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(330)', '(425)'], {}), '(330, 425)\n', (18468, 18478), True, 'import matplotlib.pyplot as plt\n'), ((18590, 18602), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18600, 18602), True, 'import matplotlib.pyplot as plt\n'), ((18603, 18644), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Specific conductance, μS/cm"""'], {}), "('Specific conductance, μS/cm')\n", (18613, 18644), True, 'import matplotlib.pyplot as plt\n'), ((18645, 18663), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (18655, 18663), True, 'import matplotlib.pyplot as plt\n'), ((19983, 19993), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19991, 19993), True, 'import matplotlib.pyplot as plt\n'), ((19994, 20049), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figures/Figure5.png"""'], {'bbox_inches': '"""tight"""'}), "('Figures/Figure5.png', bbox_inches='tight')\n", (20005, 20049), True, 'import matplotlib.pyplot as plt\n'), ((21309, 21497), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(5)', 'ncols': '(3)', 'figsize': '(15, 8)', 'sharex': '"""col"""', 'sharey': '"""col"""', 'gridspec_kw': "{'width_ratios': [3, 1, 1], 'height_ratios': [1, 1, 1, 1, 1], 'hspace': 0,\n 'wspace': 0.11}"}), "(nrows=5, ncols=3, figsize=(15, 8), sharex='col', sharey='col',\n gridspec_kw={'width_ratios': [3, 1, 1], 'height_ratios': [1, 1, 1, 1, 1\n ], 'hspace': 0, 'wspace': 0.11})\n", (21321, 21497), True, 'import matplotlib.pyplot as plt\n'), ((24288, 24344), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figures/FigureC4.png"""'], {'bbox_inches': '"""tight"""'}), "('Figures/FigureC4.png', bbox_inches='tight')\n", (24299, 24344), True, 'import matplotlib.pyplot as plt\n'), ((1096, 1240), 'pyhydroqc.rules_detect.range_check', 'rules_detect.range_check', ([], {'df': 'sensor_array[snsr]', 'maximum': "site_params[site][snsr]['max_range']", 'minimum': "site_params[site][snsr]['min_range']"}), "(df=sensor_array[snsr], maximum=site_params[site][\n snsr]['max_range'], minimum=site_params[site][snsr]['min_range'])\n", (1120, 1240), False, 'from pyhydroqc import anomaly_utilities, rules_detect, calibration\n'), ((1420, 1532), 'pyhydroqc.rules_detect.persistence', 'rules_detect.persistence', ([], {'df': 'sensor_array[snsr]', 'length': "site_params[site][snsr]['persist']", 'output_grp': '(True)'}), "(df=sensor_array[snsr], length=site_params[site][\n snsr]['persist'], output_grp=True)\n", (1444, 1532), False, 'from pyhydroqc import anomaly_utilities, rules_detect, calibration\n'), ((1695, 1742), 'pyhydroqc.rules_detect.interpolate', 'rules_detect.interpolate', ([], {'df': 'sensor_array[snsr]'}), '(df=sensor_array[snsr])\n', (1719, 1742), False, 'from pyhydroqc import anomaly_utilities, rules_detect, calibration\n'), ((2111, 2242), 'pandas.read_csv', 'pd.read_csv', (["('./LRO_data/' + site + '_' + cal_snsr + '_calib_dates.csv')"], {'header': '(1)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('./LRO_data/' + site + '_' + cal_snsr + '_calib_dates.csv',\n header=1, parse_dates=True, infer_datetime_format=True)\n", (2122, 2242), True, 'import pandas as pd\n'), ((2316, 2362), 'pandas.to_datetime', 'pd.to_datetime', (["calib_dates[cal_snsr]['start']"], {}), "(calib_dates[cal_snsr]['start'])\n", (2330, 2362), True, 'import pandas as pd\n'), ((2398, 2442), 'pandas.to_datetime', 'pd.to_datetime', (["calib_dates[cal_snsr]['end']"], {}), "(calib_dates[cal_snsr]['end'])\n", (2412, 2442), True, 'import pandas as pd\n'), ((5713, 5743), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(7)', '(24)'], {}), '(2014, 7, 24)\n', (5730, 5743), False, 'import datetime\n'), ((5745, 5774), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(8)', '(1)'], {}), '(2014, 8, 1)\n', (5762, 5774), False, 'import datetime\n'), ((6892, 6921), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(7)', '(8)'], {}), '(2015, 7, 8)\n', (6909, 6921), False, 'import datetime\n'), ((6923, 6953), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(8)', '(14)'], {}), '(2015, 8, 14)\n', (6940, 6953), False, 'import datetime\n'), ((7016, 7075), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""7/9/2015"""', 'end': '"""8/14/2015"""', 'freq': '"""5D"""'}), "(start='7/9/2015', end='8/14/2015', freq='5D')\n", (7029, 7075), True, 'import pandas as pd\n'), ((8625, 8656), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(18)'], {}), '(2017, 12, 18)\n', (8642, 8656), False, 'import datetime\n'), ((8658, 8689), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(27)'], {}), '(2017, 12, 27)\n', (8675, 8689), False, 'import datetime\n'), ((8784, 8846), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""12/18/2017"""', 'end': '"""12/27/2017"""', 'freq': '"""2D"""'}), "(start='12/18/2017', end='12/27/2017', freq='2D')\n", (8797, 8846), True, 'import pandas as pd\n'), ((12454, 12483), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(6)', '(1)'], {}), '(2018, 6, 1)\n', (12471, 12483), False, 'import datetime\n'), ((12485, 12516), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(10)', '(30)'], {}), '(2018, 10, 30)\n', (12502, 12516), False, 'import datetime\n'), ((12580, 12641), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""6/1/2018"""', 'end': '"""10/30/2018"""', 'freq': '"""15D"""'}), "(start='6/1/2018', end='10/30/2018', freq='15D')\n", (12593, 12641), True, 'import pandas as pd\n'), ((13344, 13374), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(10)', '(2)'], {}), '(2017, 10, 2)\n', (13361, 13374), False, 'import datetime\n'), ((13376, 13406), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(10)', '(5)'], {}), '(2017, 10, 5)\n', (13393, 13406), False, 'import datetime\n'), ((13475, 13535), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""10/2/2017"""', 'end': '"""10/5/2017"""', 'freq': '"""1D"""'}), "(start='10/2/2017', end='10/5/2017', freq='1D')\n", (13488, 13535), True, 'import pandas as pd\n'), ((13588, 13613), 'numpy.arange', 'np.arange', (['(8.4)', '(8.75)', '(0.1)'], {}), '(8.4, 8.75, 0.1)\n', (13597, 13613), True, 'import numpy as np\n'), ((14141, 14171), 'datetime.datetime', 'datetime.datetime', (['(2019)', '(8)', '(27)'], {}), '(2019, 8, 27)\n', (14158, 14171), False, 'import datetime\n'), ((14173, 14203), 'datetime.datetime', 'datetime.datetime', (['(2019)', '(8)', '(30)'], {}), '(2019, 8, 30)\n', (14190, 14203), False, 'import datetime\n'), ((14271, 14331), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""8/27/2019"""', 'end': '"""8/30/2019"""', 'freq': '"""1D"""'}), "(start='8/27/2019', end='8/30/2019', freq='1D')\n", (14284, 14331), True, 'import pandas as pd\n'), ((18365, 18395), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(11)', '(9)'], {}), '(2018, 11, 9)\n', (18382, 18395), False, 'import datetime\n'), ((18397, 18428), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(11)', '(16)'], {}), '(2018, 11, 16)\n', (18414, 18428), False, 'import datetime\n'), ((18490, 18551), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""11/9/2018"""', 'end': '"""11/16/2018"""', 'freq': '"""1D"""'}), "(start='11/9/2018', end='11/16/2018', freq='1D')\n", (18503, 18551), True, 'import pandas as pd\n'), ((4555, 4576), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(30)'}), '(days=30)\n', (4567, 4576), True, 'import pandas as pd\n'), ((22152, 22186), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(9)', '(18)', '(18)'], {}), '(2014, 9, 18, 18)\n', (22169, 22186), False, 'import datetime\n'), ((22188, 22221), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(9)', '(20)', '(6)'], {}), '(2014, 9, 20, 6)\n', (22205, 22221), False, 'import datetime\n'), ((22331, 22402), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""9/18/2014 18:00"""', 'end': '"""9/20/2014 6:00"""', 'freq': '"""8H"""'}), "(start='9/18/2014 18:00', end='9/20/2014 6:00', freq='8H')\n", (22344, 22402), True, 'import pandas as pd\n'), ((22858, 22893), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(2)', '(5)', '(6)', '(0)'], {}), '(2015, 2, 5, 6, 0)\n', (22875, 22893), False, 'import datetime\n'), ((22895, 22928), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(2)', '(5)', '(18)'], {}), '(2015, 2, 5, 18)\n', (22912, 22928), False, 'import datetime\n'), ((23038, 23107), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2/5/2015 8:00"""', 'end': '"""2/5/2015 18:00"""', 'freq': '"""6H"""'}), "(start='2/5/2015 8:00', end='2/5/2015 18:00', freq='6H')\n", (23051, 23107), True, 'import pandas as pd\n'), ((23563, 23592), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(7)', '(6)'], {}), '(2015, 7, 6)\n', (23580, 23592), False, 'import datetime\n'), ((23594, 23624), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(7)', '(30)'], {}), '(2015, 7, 30)\n', (23611, 23624), False, 'import datetime\n'), ((23729, 23788), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""7/8/2015"""', 'end': '"""7/30/2015"""', 'freq': '"""5D"""'}), "(start='7/8/2015', end='7/30/2015', freq='5D')\n", (23742, 23788), True, 'import pandas as pd\n'), ((3373, 3513), 'pyhydroqc.calibration.find_gap', 'calibration.find_gap', ([], {'observed': "sensor_array[cal_snsr]['observed']", 'calib_date': "calib_dates[cal_snsr]['end'][i]", 'hours': '(2)', 'show_shift': '(False)'}), "(observed=sensor_array[cal_snsr]['observed'],\n calib_date=calib_dates[cal_snsr]['end'][i], hours=2, show_shift=False)\n", (3393, 3513), False, 'from pyhydroqc import anomaly_utilities, rules_detect, calibration\n'), ((4748, 4935), 'pyhydroqc.calibration.lin_drift_cor', 'calibration.lin_drift_cor', ([], {'observed': "sensor_array[cal_snsr]['observed']", 'start': "gaps[cal_snsr]['start'][i]", 'end': "gaps[cal_snsr]['end'][i]", 'gap': "gaps[cal_snsr]['gap'][i]", 'replace': '(True)'}), "(observed=sensor_array[cal_snsr]['observed'],\n start=gaps[cal_snsr]['start'][i], end=gaps[cal_snsr]['end'][i], gap=\n gaps[cal_snsr]['gap'][i], replace=True)\n", (4773, 4935), False, 'from pyhydroqc import anomaly_utilities, rules_detect, calibration\n'), ((9095, 9133), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(18)', '(16)', '(0)'], {}), '(2017, 12, 18, 16, 0)\n', (9112, 9133), False, 'import datetime\n'), ((9179, 9216), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(19)', '(8)', '(0)'], {}), '(2017, 12, 19, 8, 0)\n', (9196, 9216), False, 'import datetime\n'), ((9426, 9464), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(26)', '(12)', '(0)'], {}), '(2017, 12, 26, 12, 0)\n', (9443, 9464), False, 'import datetime\n'), ((9510, 9548), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(25)', '(20)', '(0)'], {}), '(2017, 12, 25, 20, 0)\n', (9527, 9548), False, 'import datetime\n'), ((9761, 9799), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(20)', '(18)', '(0)'], {}), '(2017, 12, 20, 18, 0)\n', (9778, 9799), False, 'import datetime\n'), ((9845, 9883), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(22)', '(20)', '(0)'], {}), '(2017, 12, 22, 20, 0)\n', (9862, 9883), False, 'import datetime\n'), ((10072, 10110), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(22)', '(15)', '(0)'], {}), '(2017, 12, 22, 15, 0)\n', (10089, 10110), False, 'import datetime\n'), ((10156, 10194), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(22)', '(18)', '(0)'], {}), '(2017, 12, 22, 18, 0)\n', (10173, 10194), False, 'import datetime\n'), ((10387, 10425), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(23)', '(10)', '(0)'], {}), '(2017, 12, 23, 10, 0)\n', (10404, 10425), False, 'import datetime\n'), ((10471, 10509), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(22)', '(20)', '(0)'], {}), '(2017, 12, 22, 20, 0)\n', (10488, 10509), False, 'import datetime\n'), ((10706, 10744), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(25)', '(12)', '(0)'], {}), '(2017, 12, 25, 12, 0)\n', (10723, 10744), False, 'import datetime\n'), ((10790, 10828), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(12)', '(22)', '(20)', '(0)'], {}), '(2017, 12, 22, 20, 0)\n', (10807, 10828), False, 'import datetime\n'), ((18707, 18745), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(11)', '(11)', '(11)', '(0)'], {}), '(2018, 11, 11, 11, 0)\n', (18724, 18745), False, 'import datetime\n'), ((18792, 18829), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(11)', '(11)', '(0)', '(0)'], {}), '(2018, 11, 11, 0, 0)\n', (18809, 18829), False, 'import datetime\n'), ((19040, 19078), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(11)', '(12)', '(15)', '(0)'], {}), '(2018, 11, 12, 15, 0)\n', (19057, 19078), False, 'import datetime\n'), ((19125, 19162), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(11)', '(13)', '(2)', '(0)'], {}), '(2018, 11, 13, 2, 0)\n', (19142, 19162), False, 'import datetime\n'), ((19360, 19398), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(11)', '(13)', '(10)', '(0)'], {}), '(2018, 11, 13, 10, 0)\n', (19377, 19398), False, 'import datetime\n'), ((19445, 19482), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(11)', '(13)', '(2)', '(0)'], {}), '(2018, 11, 13, 2, 0)\n', (19462, 19482), False, 'import datetime\n'), ((19693, 19731), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(11)', '(14)', '(17)', '(0)'], {}), '(2018, 11, 14, 17, 0)\n', (19710, 19731), False, 'import datetime\n'), ((19778, 19815), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(11)', '(15)', '(5)', '(0)'], {}), '(2018, 11, 15, 5, 0)\n', (19795, 19815), False, 'import datetime\n'), ((23854, 23888), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(9)', '(18)', '(20)'], {}), '(2014, 9, 18, 20)\n', (23871, 23888), False, 'import datetime\n'), ((23926, 23960), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(9)', '(18)', '(20)'], {}), '(2014, 9, 18, 20)\n', (23943, 23960), False, 'import datetime\n')] |
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy
from utils import ReplayBuffer
# set up device
device = torch.device("cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.max_action = max_action
self.l1 = nn.Linear(state_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, action_dim)
def forward(self, state):
x = F.relu(self.l1(state))
x = F.relu(self.l2(x))
return self.max_action * torch.tanh(self.l3(x))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, 1)
def forward(self, state, action):
x = F.relu(self.l1(torch.cat([state, action], 1)))
x = F.relu(self.l2(x))
return self.l3(x)
class DDPG(object):
def __init__(self, state_dim, action_dim, max_action, discount=0.99, tau=0.005):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters())
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters())
self.state_dim = state_dim
self.action_dim = action_dim
self.max_action = max_action
self.discount = discount
self.tau = tau
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def run_episode(self, replay_buffer, batch_size=100):
# random sample from replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
# compute target Q value
target_Q = self.critic_target(next_state, self.actor_target(next_state))
# split the target_Q from network, so that it becomes a constant which has no gradient
target_Q = reward + (not_done * self.discount * target_Q).detach()
# compute current Q value
current_Q = self.critic(state, action)
# compute critic loss
critic_loss = F.mse_loss(current_Q, target_Q)
# optimize the critic network
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# compute actor loss
actor_loss = -self.critic(state, self.actor(state)).mean()
# optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# update the target network by alternative target network method
# need to be optimized
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = deepcopy(self.critic)
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.actor_target = deepcopy(self.actor)
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(self, env_name, seed, eval_episodes=10):
eval_env = gym.make(env_name)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = self.select_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
def run(self, env, file_name, args):
if args.load_model != "":
policy_file = file_name if args.load_model == "default" else args.load_model
self.load(f"./models/{policy_file}")
replay_buffer = ReplayBuffer(self.state_dim, self.action_dim)
# Evaluate untrained policy
evaluations = [self.eval_policy(args.env, args.seed)]
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num = 0
# Initialize data list for plot
x_timesteps = []
y_rewards = []
for t in range(int(args.max_timesteps)):
episode_timesteps += 1
# Select action randomly or according to policy
if t < args.start_timesteps:
action = env.action_space.sample()
else:
action = (
self.select_action(np.array(state))
+ np.random.normal(0, self.max_action * args.expl_noise, size=self.action_dim)
).clip(-self.max_action, self.max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
# Train agent after collecting sufficient data
if t >= args.start_timesteps:
self.run_episode(replay_buffer, args.batch_size)
if done:
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
print(
f"Total T: {t + 1}/{args.max_timesteps} Episode Num: {episode_num + 1} Reward: {episode_reward:.3f}")
# Compute and reset plot info
y_rewards.append(episode_reward)
x_timesteps.append(t+1)
# Reset environment
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % args.eval_freq == 0:
evaluations.append(self.eval_policy(args.env, args.seed))
np.save(f"./results/{file_name}", evaluations)
if args.save_model: self.save(f"./models/{file_name}")
return x_timesteps, y_rewards
| [
"copy.deepcopy",
"numpy.save",
"gym.make",
"torch.load",
"torch.nn.functional.mse_loss",
"torch.cat",
"numpy.array",
"numpy.random.normal",
"torch.nn.Linear",
"torch.device",
"utils.ReplayBuffer"
] | [((181, 200), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (193, 200), False, 'import torch\n'), ((381, 406), 'torch.nn.Linear', 'nn.Linear', (['state_dim', '(400)'], {}), '(state_dim, 400)\n', (390, 406), True, 'import torch.nn as nn\n'), ((425, 444), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(300)'], {}), '(400, 300)\n', (434, 444), True, 'import torch.nn as nn\n'), ((463, 489), 'torch.nn.Linear', 'nn.Linear', (['(300)', 'action_dim'], {}), '(300, action_dim)\n', (472, 489), True, 'import torch.nn as nn\n'), ((775, 813), 'torch.nn.Linear', 'nn.Linear', (['(state_dim + action_dim)', '(400)'], {}), '(state_dim + action_dim, 400)\n', (784, 813), True, 'import torch.nn as nn\n'), ((832, 851), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(300)'], {}), '(400, 300)\n', (841, 851), True, 'import torch.nn as nn\n'), ((870, 887), 'torch.nn.Linear', 'nn.Linear', (['(300)', '(1)'], {}), '(300, 1)\n', (879, 887), True, 'import torch.nn as nn\n'), ((1251, 1271), 'copy.deepcopy', 'deepcopy', (['self.actor'], {}), '(self.actor)\n', (1259, 1271), False, 'from copy import deepcopy\n'), ((1438, 1459), 'copy.deepcopy', 'deepcopy', (['self.critic'], {}), '(self.critic)\n', (1446, 1459), False, 'from copy import deepcopy\n'), ((2476, 2507), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['current_Q', 'target_Q'], {}), '(current_Q, target_Q)\n', (2486, 2507), True, 'import torch.nn.functional as F\n'), ((3940, 3961), 'copy.deepcopy', 'deepcopy', (['self.critic'], {}), '(self.critic)\n', (3948, 3961), False, 'from copy import deepcopy\n'), ((4147, 4167), 'copy.deepcopy', 'deepcopy', (['self.actor'], {}), '(self.actor)\n', (4155, 4167), False, 'from copy import deepcopy\n'), ((4361, 4379), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (4369, 4379), False, 'import gym\n'), ((5211, 5256), 'utils.ReplayBuffer', 'ReplayBuffer', (['self.state_dim', 'self.action_dim'], {}), '(self.state_dim, self.action_dim)\n', (5223, 5256), False, 'from utils import ReplayBuffer\n'), ((3787, 3819), 'torch.load', 'torch.load', (["(filename + '_critic')"], {}), "(filename + '_critic')\n", (3797, 3819), False, 'import torch\n'), ((3867, 3909), 'torch.load', 'torch.load', (["(filename + '_critic_optimizer')"], {}), "(filename + '_critic_optimizer')\n", (3877, 3909), False, 'import torch\n'), ((3998, 4029), 'torch.load', 'torch.load', (["(filename + '_actor')"], {}), "(filename + '_actor')\n", (4008, 4029), False, 'import torch\n'), ((4076, 4117), 'torch.load', 'torch.load', (["(filename + '_actor_optimizer')"], {}), "(filename + '_actor_optimizer')\n", (4086, 4117), False, 'import torch\n'), ((954, 983), 'torch.cat', 'torch.cat', (['[state, action]', '(1)'], {}), '([state, action], 1)\n', (963, 983), False, 'import torch\n'), ((7378, 7424), 'numpy.save', 'np.save', (['f"""./results/{file_name}"""', 'evaluations'], {}), "(f'./results/{file_name}', evaluations)\n", (7385, 7424), True, 'import numpy as np\n'), ((4600, 4615), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (4608, 4615), True, 'import numpy as np\n'), ((5937, 6013), 'numpy.random.normal', 'np.random.normal', (['(0)', '(self.max_action * args.expl_noise)'], {'size': 'self.action_dim'}), '(0, self.max_action * args.expl_noise, size=self.action_dim)\n', (5953, 6013), True, 'import numpy as np\n'), ((5894, 5909), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (5902, 5909), True, 'import numpy as np\n')] |
# import numpy as np
# import matplotlib.pyplot as plt
# import visualization.panda.world as world
# import robot_math as rm
# import modeling.geometric_model as gm
#
# sp2d = rm.gen_2d_spiral_points(max_radius=.2, radial_granularity=.001, tangential_granularity=.01)
# plt.plot(sp2d[:,0], sp2d[:,1])
# plt.show()
#
# base = world.World(cam_pos=np.array([1, 1, 1]), lookat_pos=np.array([0, 0, 0.25]))
# sp = rm.gen_3d_spiral_points(pos=np.array([0, 0, .25]),
# rotmat=rm.rotmat_from_axangle(np.array([1, 0, 0]), np.pi / 6),
# max_radius=.20,
# radial_granularity=.001,
# tangential_granularity=.01,)
# for id in range(len(sp) - 1):
# pnt0 = sp[id, :]
# pnt1 = sp[id + 1, :]
# gm.gen_stick(spos=pnt0, epos=pnt1, type="round").attach_to(base)
# base.run()
import time
import numpy as np
import math
import matplotlib.pyplot as plot
def concentric_circle_hex_polar(layer, radians, start_rot_angle=0.):
def get_param(layer_id):
radians_base = 0.866025 * radians * (layer_id + 1)
n_list = np.linspace(layer_id - 1, 0, int((layer_id + 1) / 2))
angle_diff = np.append(np.array([math.pi / 6]), np.arctan(n_list / ((layer_id + 1) * 1.732051)))
# print("angle_diff:", angle_diff)
angle_minus = np.zeros(len(angle_diff))
angle_minus[:-1] = angle_diff[1:]
angle_half = angle_diff - angle_minus
angle_list = np.append(angle_half, np.zeros(int(layer_id / 2)))
angle_list = angle_list + angle_list[::-1]
# print("angle_list:", len(angle_list), angle_list)
angle_diff_total = np.append(angle_diff[1:], angle_diff[1:][::-1][(layer_id % 2):])
radians_list = np.append(radians_base / np.cos(angle_diff_total), radians * (layer_id + 1))
# print("radiasn_list:", len(radians_list), radians_list)
return angle_list, radians_list
def get_pose_from_angle(angle_list, radians_list):
# angle_list[0] += start_rot_angle
angle_list_total = np.cumsum(np.tile(angle_list, 6))
angle_list_total = angle_list_total + np.array([start_rot_angle]).repeat(len(angle_list_total))
radians_list_total = np.tile(radians_list, 6)
x_list = radians_list_total * np.sin(angle_list_total)
y_list = radians_list_total * np.cos(angle_list_total)
return x_list, y_list
x_list = np.array([])
y_list = np.array([])
for layer_id in range(layer):
# print("layer_id", layer_id)
angle_list, radians_list = get_param(layer_id)
x_layer, y_layer = get_pose_from_angle(angle_list, radians_list)
x_list = np.append(x_list, x_layer)
y_list = np.append(y_list, y_layer)
return x_list, y_list
def concentric_circle_hex_equipartition(layer, radians, start_rot_angle=0.):
def get_hex(layer_id):
angle_list = np.arange(start_rot_angle, (math.pi * 2 + start_rot_angle), math.pi / 3)
angle_list = np.append(angle_list, start_rot_angle)
x_vertex = np.sin(angle_list) * radians * (layer_id + 1)
y_vertex = np.cos(angle_list) * radians * (layer_id + 1)
return x_vertex, y_vertex
x_list = np.array([])
y_list = np.array([])
for layer_id in range(layer):
x_vertex, y_vertex = get_hex(layer_id)
for i in range(6):
x_list = np.append(x_list, np.linspace(x_vertex[i], x_vertex[i + 1], num=layer_id + 1, endpoint=False))
y_list = np.append(y_list, np.linspace(y_vertex[i], y_vertex[i + 1], num=layer_id + 1, endpoint=False))
return x_list, y_list
def gen_regpoly(radius, nedges=12):
angle_list = np.linspace(0, np.pi * 2, nedges+1, endpoint=True)
x_vertex = np.sin(angle_list) * radius
y_vertex = np.cos(angle_list) * radius
return np.column_stack((x_vertex, y_vertex))
def gen_2d_isosceles_verts(nlevel, edge_length, nedges=12):
xy_array = np.asarray([[0,0]])
for level in range(nlevel):
xy_vertex = gen_regpoly(radius=edge_length*(level+1), nedges=nedges)
for i in range(nedges):
xy_array = np.append(xy_array,
np.linspace(xy_vertex[i, :], xy_vertex[i + 1, :], num=level + 1, endpoint=False),
axis=0)
return xy_array
def gen_2d_equilateral_verts(nlevel, edge_length):
return gen_2d_isosceles_verts(nlevel=nlevel, edge_length=edge_length, nedges=6)
def gen_3d_isosceles_verts(pos, rotmat, nlevel=5, edge_length=0.001, nedges=12):
xy_array = gen_2d_isosceles_verts(nlevel=nlevel, edge_length=edge_length, nedges=nedges)
xyz_array = np.pad(xy_array, ((0,0), (0,1)), mode='constant', constant_values=0)
return rotmat.dot((xyz_array+pos).T).T
def gen_3d_equilateral_verts(pos, rotmat, nlevel=5, edge_length=0.001):
return gen_3d_isosceles_verts(pos=pos, rotmat=rotmat, nlevel=nlevel, edge_length=edge_length, nedges=6)
def gen_2d_equilaterial_verts(nlevel, edge_length):
nangle = 12
levels = np.arange(1, nlevel + 1, 1) * edge_length
angles = np.linspace(0, np.pi * 2, nangle+1, endpoint=True)
x_verts = np.outer(levels, np.sin(angles)).flatten()
y_verts = np.outer(levels, np.cos(angles)).flatten()
xy_vertex = np.row_stack((x_verts, y_verts)).T
xy_list = np.empty((0, 2))
for level in range(nlevel):
for i in range(nangle):
xy_list = np.append(xy_list,
np.linspace(xy_vertex[level*(nangle+1)+i, :], xy_vertex[level*(nangle+1)+i + 1, :], num=level + 1, endpoint=False),
axis=0)
return xy_list
if __name__ == "__main__":
tic = time.time()
for i in range(200):
x_list, y_list = concentric_circle_hex_polar(5, 1, math.pi / 8)
toc1 = time.time()
print(toc1 - tic)
tic = time.time()
for i in range(200):
x_list, y_list = concentric_circle_hex_equipartition(5, 1, math.pi / 8)
toc1 = time.time()
print(toc1 - tic)
tic = time.time()
for i in range(200):
xy_list = gen_2d_equilateral_verts(5, 1)
toc1 = time.time()
print(toc1 - tic)
# for i in range(200):
# xy_list = gen_2d_equilaterial_verts(5, 1)
# toc1 = time.time()
# print(toc1 - tic)
plot.plot(xy_list[:,0], xy_list[:,1], "o-")
# plot.plot(x_list[:], y_list[:], "o-")
plot.show()
| [
"numpy.pad",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.empty",
"numpy.asarray",
"time.time",
"numpy.append",
"numpy.sin",
"numpy.array",
"numpy.tile",
"numpy.linspace",
"numpy.column_stack",
"numpy.arange",
"numpy.cos",
"numpy.row_stack",
"numpy.arctan"
] | [((2443, 2455), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2451, 2455), True, 'import numpy as np\n'), ((2469, 2481), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2477, 2481), True, 'import numpy as np\n'), ((3235, 3247), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3243, 3247), True, 'import numpy as np\n'), ((3261, 3273), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3269, 3273), True, 'import numpy as np\n'), ((3695, 3747), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi * 2)', '(nedges + 1)'], {'endpoint': '(True)'}), '(0, np.pi * 2, nedges + 1, endpoint=True)\n', (3706, 3747), True, 'import numpy as np\n'), ((3843, 3880), 'numpy.column_stack', 'np.column_stack', (['(x_vertex, y_vertex)'], {}), '((x_vertex, y_vertex))\n', (3858, 3880), True, 'import numpy as np\n'), ((3957, 3977), 'numpy.asarray', 'np.asarray', (['[[0, 0]]'], {}), '([[0, 0]])\n', (3967, 3977), True, 'import numpy as np\n'), ((4662, 4732), 'numpy.pad', 'np.pad', (['xy_array', '((0, 0), (0, 1))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(xy_array, ((0, 0), (0, 1)), mode='constant', constant_values=0)\n", (4668, 4732), True, 'import numpy as np\n'), ((5092, 5144), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi * 2)', '(nangle + 1)'], {'endpoint': '(True)'}), '(0, np.pi * 2, nangle + 1, endpoint=True)\n', (5103, 5144), True, 'import numpy as np\n'), ((5322, 5338), 'numpy.empty', 'np.empty', (['(0, 2)'], {}), '((0, 2))\n', (5330, 5338), True, 'import numpy as np\n'), ((5689, 5700), 'time.time', 'time.time', ([], {}), '()\n', (5698, 5700), False, 'import time\n'), ((5809, 5820), 'time.time', 'time.time', ([], {}), '()\n', (5818, 5820), False, 'import time\n'), ((5853, 5864), 'time.time', 'time.time', ([], {}), '()\n', (5862, 5864), False, 'import time\n'), ((5981, 5992), 'time.time', 'time.time', ([], {}), '()\n', (5990, 5992), False, 'import time\n'), ((6025, 6036), 'time.time', 'time.time', ([], {}), '()\n', (6034, 6036), False, 'import time\n'), ((6122, 6133), 'time.time', 'time.time', ([], {}), '()\n', (6131, 6133), False, 'import time\n'), ((6290, 6335), 'matplotlib.pyplot.plot', 'plot.plot', (['xy_list[:, 0]', 'xy_list[:, 1]', '"""o-"""'], {}), "(xy_list[:, 0], xy_list[:, 1], 'o-')\n", (6299, 6335), True, 'import matplotlib.pyplot as plot\n'), ((6382, 6393), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (6391, 6393), True, 'import matplotlib.pyplot as plot\n'), ((1683, 1745), 'numpy.append', 'np.append', (['angle_diff[1:]', 'angle_diff[1:][::-1][layer_id % 2:]'], {}), '(angle_diff[1:], angle_diff[1:][::-1][layer_id % 2:])\n', (1692, 1745), True, 'import numpy as np\n'), ((2248, 2272), 'numpy.tile', 'np.tile', (['radians_list', '(6)'], {}), '(radians_list, 6)\n', (2255, 2272), True, 'import numpy as np\n'), ((2699, 2725), 'numpy.append', 'np.append', (['x_list', 'x_layer'], {}), '(x_list, x_layer)\n', (2708, 2725), True, 'import numpy as np\n'), ((2743, 2769), 'numpy.append', 'np.append', (['y_list', 'y_layer'], {}), '(y_list, y_layer)\n', (2752, 2769), True, 'import numpy as np\n'), ((2924, 2994), 'numpy.arange', 'np.arange', (['start_rot_angle', '(math.pi * 2 + start_rot_angle)', '(math.pi / 3)'], {}), '(start_rot_angle, math.pi * 2 + start_rot_angle, math.pi / 3)\n', (2933, 2994), True, 'import numpy as np\n'), ((3018, 3056), 'numpy.append', 'np.append', (['angle_list', 'start_rot_angle'], {}), '(angle_list, start_rot_angle)\n', (3027, 3056), True, 'import numpy as np\n'), ((3761, 3779), 'numpy.sin', 'np.sin', (['angle_list'], {}), '(angle_list)\n', (3767, 3779), True, 'import numpy as np\n'), ((3804, 3822), 'numpy.cos', 'np.cos', (['angle_list'], {}), '(angle_list)\n', (3810, 3822), True, 'import numpy as np\n'), ((5037, 5064), 'numpy.arange', 'np.arange', (['(1)', '(nlevel + 1)', '(1)'], {}), '(1, nlevel + 1, 1)\n', (5046, 5064), True, 'import numpy as np\n'), ((5273, 5305), 'numpy.row_stack', 'np.row_stack', (['(x_verts, y_verts)'], {}), '((x_verts, y_verts))\n', (5285, 5305), True, 'import numpy as np\n'), ((1220, 1243), 'numpy.array', 'np.array', (['[math.pi / 6]'], {}), '([math.pi / 6])\n', (1228, 1243), True, 'import numpy as np\n'), ((1245, 1292), 'numpy.arctan', 'np.arctan', (['(n_list / ((layer_id + 1) * 1.732051))'], {}), '(n_list / ((layer_id + 1) * 1.732051))\n', (1254, 1292), True, 'import numpy as np\n'), ((2091, 2113), 'numpy.tile', 'np.tile', (['angle_list', '(6)'], {}), '(angle_list, 6)\n', (2098, 2113), True, 'import numpy as np\n'), ((2311, 2335), 'numpy.sin', 'np.sin', (['angle_list_total'], {}), '(angle_list_total)\n', (2317, 2335), True, 'import numpy as np\n'), ((2374, 2398), 'numpy.cos', 'np.cos', (['angle_list_total'], {}), '(angle_list_total)\n', (2380, 2398), True, 'import numpy as np\n'), ((1796, 1820), 'numpy.cos', 'np.cos', (['angle_diff_total'], {}), '(angle_diff_total)\n', (1802, 1820), True, 'import numpy as np\n'), ((3076, 3094), 'numpy.sin', 'np.sin', (['angle_list'], {}), '(angle_list)\n', (3082, 3094), True, 'import numpy as np\n'), ((3141, 3159), 'numpy.cos', 'np.cos', (['angle_list'], {}), '(angle_list)\n', (3147, 3159), True, 'import numpy as np\n'), ((3421, 3496), 'numpy.linspace', 'np.linspace', (['x_vertex[i]', 'x_vertex[i + 1]'], {'num': '(layer_id + 1)', 'endpoint': '(False)'}), '(x_vertex[i], x_vertex[i + 1], num=layer_id + 1, endpoint=False)\n', (3432, 3496), True, 'import numpy as np\n'), ((3537, 3612), 'numpy.linspace', 'np.linspace', (['y_vertex[i]', 'y_vertex[i + 1]'], {'num': '(layer_id + 1)', 'endpoint': '(False)'}), '(y_vertex[i], y_vertex[i + 1], num=layer_id + 1, endpoint=False)\n', (3548, 3612), True, 'import numpy as np\n'), ((4193, 4278), 'numpy.linspace', 'np.linspace', (['xy_vertex[i, :]', 'xy_vertex[i + 1, :]'], {'num': '(level + 1)', 'endpoint': '(False)'}), '(xy_vertex[i, :], xy_vertex[i + 1, :], num=level + 1, endpoint=False\n )\n', (4204, 4278), True, 'import numpy as np\n'), ((5174, 5188), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (5180, 5188), True, 'import numpy as np\n'), ((5231, 5245), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (5237, 5245), True, 'import numpy as np\n'), ((5476, 5607), 'numpy.linspace', 'np.linspace', (['xy_vertex[level * (nangle + 1) + i, :]', 'xy_vertex[level * (nangle + 1) + i + 1, :]'], {'num': '(level + 1)', 'endpoint': '(False)'}), '(xy_vertex[level * (nangle + 1) + i, :], xy_vertex[level * (\n nangle + 1) + i + 1, :], num=level + 1, endpoint=False)\n', (5487, 5607), True, 'import numpy as np\n'), ((2161, 2188), 'numpy.array', 'np.array', (['[start_rot_angle]'], {}), '([start_rot_angle])\n', (2169, 2188), True, 'import numpy as np\n')] |
import pandas as pd
import timemachines
import numpy as np
TEMPLATE = 'https://raw.githubusercontent.com/microprediction/precisedata/main/returns/fathom_data_N.csv'
col = 'fathom_xx'
from timemachines.skaters.simple.movingaverage import EMA_SKATERS
from timemachines.skaters.simple.thinking import THINKING_SKATERS
from timemachines.skaters.simple.hypocraticensemble import HYPOCRATIC_ENSEMBLE_SKATERS
from timemachines.skating import prior
from timemachines.skatertools.utilities.conventions import targets
SKATERS = EMA_SKATERS + THINKING_SKATERS + HYPOCRATIC_ENSEMBLE_SKATERS
n_skaters = len(SKATERS)
n_burn = 100
print(n_skaters)
N=1
def residuals(f, y, k=1, e=100, n_burn=50):
""" Feed fast skater all data points, then report residuals """
assert n_burn>k
es = [e for _ in y]
x, _ = prior(f=f, y=y, k=k, e=es, x0=y[0])
yt = targets(y)
xk = [ xt[-1] for xt in x]
return np.array(xk[n_burn:])-np.array(yt[n_burn:])
if __name__=='__main__':
for N in range(350):
try:
df = pd.read_csv(TEMPLATE.replace('N',str(N)))
y = df['fathom_xx'].values
y = [ yt for yt in y if ~np.isnan(yt)]
Z = None
cols = [ f.__name__ for f in SKATERS ]
df_out = pd.DataFrame(columns = cols )
for f in SKATERS:
z = residuals(f, y=y, k=1, e=100, n_burn = 400)
df_out[f.__name__]=z
name = 'skater_residuals_'+str(N)+'.csv'
df_out.to_csv(name)
print(name)
except:
pass
| [
"pandas.DataFrame",
"timemachines.skatertools.utilities.conventions.targets",
"numpy.isnan",
"timemachines.skating.prior",
"numpy.array"
] | [((810, 845), 'timemachines.skating.prior', 'prior', ([], {'f': 'f', 'y': 'y', 'k': 'k', 'e': 'es', 'x0': 'y[0]'}), '(f=f, y=y, k=k, e=es, x0=y[0])\n', (815, 845), False, 'from timemachines.skating import prior\n'), ((855, 865), 'timemachines.skatertools.utilities.conventions.targets', 'targets', (['y'], {}), '(y)\n', (862, 865), False, 'from timemachines.skatertools.utilities.conventions import targets\n'), ((908, 929), 'numpy.array', 'np.array', (['xk[n_burn:]'], {}), '(xk[n_burn:])\n', (916, 929), True, 'import numpy as np\n'), ((930, 951), 'numpy.array', 'np.array', (['yt[n_burn:]'], {}), '(yt[n_burn:])\n', (938, 951), True, 'import numpy as np\n'), ((1258, 1284), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'cols'}), '(columns=cols)\n', (1270, 1284), True, 'import pandas as pd\n'), ((1151, 1163), 'numpy.isnan', 'np.isnan', (['yt'], {}), '(yt)\n', (1159, 1163), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''
Module that prepare a list of documents to be processed with LDA.
'''
import re
import numpy as np
import nltk
#import spellChecker as sc
from collections import Counter
from nltk.corpus import stopwords
'''
Words to be masked
'''
set_conectores_aditivos = ["más aún", "todavía más", "incluso", "así mismo",
"de igual modo","igualmente", "por otro lado",
"también", "tampoco", "además"]
set_conectores_adversativos = ["no obstante", "por el contrario", "aun asi",
"aun así","ahora bien", "de todas formas",
"despues de todo", "en contraste",
"por otra parte", "en cambio", "tampoco", "pero",
"más que","sin embargo"]
set_conectores_consecutivos = ["porque","ya que", "debido", "dado que",
"pues bien","pues","puesto que","entonces",
"asi pues","por ello", "a causa", "por ende",
"en consecuencia", "por consiguiente",
"de modo que","por lo tanto"]
set_conectores_condicionales = ["en vista","por supuesto", "aun que","aunque",
"aun cuando", "a pesar"]
set_conectores_explicativos = ["es decir","osea", "o sea","en otras palabras",
"en otra palabras"]
set_conectores_conclusion = ["en resumen", "en suma", "dicho de otro modo",
"en síntesis", "finalmente", "concluyendo",
"en conclusión", "por último", "sintetizando"]
set_conectores_ejemplificacion = ["por ejemplo", "ejemplo","asi", "así como",
"asi como", "para ilustrar", "es decir"]
set_conectores_temporales_posterioridad = ["más tarde", "luego", "después",
"posteriormente"]
set_conectores_espaciales = ["al lado", "alado", "abajo","izquierda",
"derecha", "medio", "fondo", "junto a","junto",
"debajo", "aquí", "allá","allí", "acá"]
set_comparacion = ["es como", "es similar", "análogo","es semejante",
"es parecido"]
set_emocional_positiva = ["bien", "buena", "bueno", "bonito", "bonita",
"increíble", "excelente","fabuloso", "emocionante",
"impresionante","maravilloso","espectacular",
"bacan", "bakan", "bkn","perfecto"]
set_emocional_negativa = ["mala","malo","mal", "maldad", "lata", "fome",
"feo","fea", "horrible", "malvada", "malvado",
"desagradable", "incómodo", "nefasto", "funesto",
"tragedia","trágico", "desdicha", "desgracia",
"miedo", "tenebroso", "paupérrimo"]
preg_pal_porque = ["por qué","por que"]#,"por que","porque","pq"]
preg_pal_explica = ["explica","expliquen","explique"]
set_direccion = ['tienes que','tienen que','vamos a','van a','hay que','hagan','hagamos','haga','tiene que','tenemos que']
set_administracion = ['señorita','señor','por favor','advertencia','puedo avanzar','usted']
set_to_be_replaced = {}
set_to_be_replaced['TAMBIEN_TAMPOCO'] = set_conectores_aditivos
set_to_be_replaced['EN_CAMBIO'] = set_conectores_adversativos
set_to_be_replaced['YA_QUE'] = set_conectores_consecutivos
set_to_be_replaced['AUNQUE'] = set_conectores_condicionales
set_to_be_replaced['ES_DECIR'] = set_conectores_explicativos
set_to_be_replaced['POR_ULTIMO'] = set_conectores_conclusion
set_to_be_replaced['POR_EJEMPLO'] = set_conectores_ejemplificacion
set_to_be_replaced['LUEGO'] = set_conectores_temporales_posterioridad
set_to_be_replaced['AQUI_ALLA'] = set_conectores_espaciales
set_to_be_replaced['ES_COMO'] = set_comparacion
set_to_be_replaced['BIEN'] = set_emocional_positiva
set_to_be_replaced['MAL'] = set_emocional_negativa
set_to_be_replaced['POR_QUE'] = preg_pal_porque
set_to_be_replaced['EXPLICA'] = preg_pal_explica
set_to_be_replaced['TIENES_QUE'] = set_direccion
set_to_be_replaced['ADMINISTRACION'] = set_administracion
set_to_be_replaced['A_NAME'] = ['a_name']
# read stop words
STOPWORDS = stopwords.words('spanish') + ['está','va','si']
# replace words in a string according to the sets defined above
def replace_words(x):
for key,value in set_to_be_replaced.items():
text = ' '+key+' '
for y in value:
my_regex = r'\s?'+re.escape(y)+r'[\s.,;\-!:]?'
x = re.sub(my_regex,text,x)
return x
# STEMMER
STEMMER = nltk.SnowballStemmer('spanish')
def stemming(word):
word = word.decode('utf-8')
if word == word.lower():
return word[:7]
else:
return word
with open('names.txt','rb') as f:
NAMES = [x.rstrip() for x in reversed(f.readlines())]
def detect_names(x):
text = ' A_NAME '
for y in NAMES:
my_regex = r'\s'+re.escape(y)+r'[\s.,;\-!:]?'
x = re.sub(my_regex,text,x)
return x
# Preprocessing Function
def preprocessing(x,isexample=False,withStopWords=False,withStemming=False):
# x: string. A sentence.
# output: list of stemmed words in order.
if isexample:
print('Original text:')
print('\t'+x)
# replace all names with a tag
x = detect_names(x)
if isexample:
print('Nombres:')
print('\t'+x)
# lowercase
x = x.lower()
if isexample:
print('lowercase:')
print('\t'+x)
x = replace_words(x)
#transform numbers to NUMBER
x = re.sub('[-+]?\d*,?\d+',' NUMBER ',x)
if isexample:
print('Numbers to NUMBER:')
print('\t'+x)
#remove symbols
#my_regex = r'[^\w]'
#x = re.sub(my_regex, ' ', x)
x = x.replace(',',' ')
x = x.replace('*',' ')
x = x.replace(':',' ')
x = x.replace(';',' ')
x = x.replace('.',' ')
x = x.replace('-',' ')
if isexample:
print('Remove Symbols:')
print('\t'+x)
#remove stop words
wordList = x.split()
if not(withStopWords):
wordList = [x for x in wordList if x not in STOPWORDS]
if isexample:
print("Remove Stop Words:")
print(wordList)
#stemming
if withStemming:
wordList = [stemming(x) for x in wordList]
if isexample:
print("Stemming:")
print(wordList)
return wordList
def preprocessList(X):
wordSentenceList = [preprocessing(x, withSpellChecker = False, withStemming = False) for x in X]
wordList = [x for S in wordSentenceList for x in S]
dictFreq = Counter(wordList)
dictionary = list(dictFreq.keys())
dictCorrected = dictionary
dictCorrected = [sc.correction(x) for x in dictCorrected]
dictCorrected = [stemming(x) for x in dictCorrected]
dictMatch = dict(zip(dictionary,dictCorrected))
dictRepresent = dict()
for t in list(set(dictCorrected)):
candidates = [x for x in dictionary if dictMatch[x]==t]
maxFreq = max([dictFreq[x] for x in candidates])
candidates = [x for x in candidates if dictFreq[x]==maxFreq]
dictRepresent[t] = candidates[0]
dictRepresent['WHY'] = 'por qué'
res = [ [dictRepresent[dictMatch[x]] for x in S] for S in wordSentenceList]
return res
def createDictionary(wordSentenceList):
wordSets = [list(set(x)) for x in wordSentenceList]
dictionary = sorted(list(set([x for s in wordSets for x in s])))
return dictionary
def tdmtx(wordSentenceList,dictionary):
return np.array([ np.array([d in s for d in dictionary])*1 for s in wordSentenceList])
def tf_idf_mtx(wordSentenceList):
dictionary = createDictionary(wordSentenceList)
tfmtx = np.array([np.array([x.count(d) for d in dictionary]) for x in wordSentenceList])
N = len(wordSentenceList)
idfmtx = np.matlib.repmat(np.log(N/np.sum(tfmtx>0,axis=0)),N,1)
tf_idf = tfmtx*idfmtx
return tf_idf
def minfo_pairs(wordSentenceList,dictionary,th = 10):
mtx = tdmtx(wordSentenceList,dictionary)
comtx = np.matmul(np.transpose(mtx),mtx)
p1word = np.diag(comtx)/(mtx.shape[0])
triup = np.triu(comtx*(comtx>th),1)
wa, wb = np.nonzero(triup)
rawcount = triup[wa,wb]
p2words = rawcount/mtx.shape[0]
minfo = np.log2(p2words/(p1word[wa]*p1word[wb]))
sort_idx = np.argsort(-minfo)
sort_minfo = minfo[sort_idx]
tempa = [dictionary[x] for x in wa[sort_idx]]
tempb = [dictionary[x] for x in wb[sort_idx]]
return [tempa, tempb, sort_minfo]
'''
for y in set_conectores_adversativos:
my_regex = r"\b" + re.escape(y) + r"\b"
text = 'CONECTOR_ADVERSATIVO',
x = re.sub(my_regex,text,x)
for y in set_conectores_consecutivos:
my_regex = r"\b" + re.escape(y) + r"\b"
text = 'CONECTOR_CONSECUTIVO'
x = re.sub(my_regex,text,x)
for y in set_conectores_condicionales:
my_regex = r"\b" + re.escape(y) + r"\b"
text = 'CONECTOR_CONDICIONAL'
x = re.sub(my_regex,text,x)
for y in set_conectores_explicativos:
my_regex = r"\b" + re.escape(y) + r"\b"
text = 'CONECTOR_EXPLICATIVO'
x = re.sub(my_regex,text,x)
for y in set_conectores_ejemplificacion:
my_regex = r"\b" + re.escape(y) + r"\b"
text = 'CONECTOR_EJEMPLO'
x = re.sub(my_regex,text,x)
for y in set_conectores_temporales_posterioridad:
my_regex = r"\b" + re.escape(y) + r"\b"
text = 'CONECTOR_TEMPORAL_POST'
x = re.sub(my_regex,text,x)
for y in set_conectores_espaciales:
my_regex = r"\b" + re.escape(y) + r"\b"
text = 'CONECTOR_ESPACIAL'
x = re.sub(my_regex,text,x)
for y in set_comparacion:
my_regex = r"\b" + re.escape(y) + r"\b"
text = 'CONECTOR_COMPARACION'
x = re.sub(my_regex,text,x)
for y in set_emocional_positiva:
my_regex = r"\b" + re.escape(y) + r"\b"
text = 'EMOCION_POSIT'
x = re.sub(my_regex,text,x)
for y in set_emocional_negativa:
my_regex = r"\b" + re.escape(y) + r"\b"
text = 'EMOCION_NEG'
x = re.sub(my_regex,text,x)
for y in preg_pal_porque:
my_regex = r"\b" + re.escape(y) + r"\b"
text = 'POR_QUE'
x = re.sub(my_regex,text,x)
for y in preg_pal_explica:
my_regex = r"\b" + re.escape(y) + r"\b"
text = 'EXPLICA'
x = re.sub(my_regex,text,x)
''' | [
"numpy.diag",
"numpy.triu",
"numpy.sum",
"numpy.log2",
"numpy.transpose",
"re.escape",
"numpy.nonzero",
"numpy.argsort",
"numpy.array",
"nltk.corpus.stopwords.words",
"collections.Counter",
"re.sub",
"nltk.SnowballStemmer"
] | [((4675, 4706), 'nltk.SnowballStemmer', 'nltk.SnowballStemmer', (['"""spanish"""'], {}), "('spanish')\n", (4695, 4706), False, 'import nltk\n'), ((4307, 4333), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""spanish"""'], {}), "('spanish')\n", (4322, 4333), False, 'from nltk.corpus import stopwords\n'), ((5656, 5696), 're.sub', 're.sub', (['"""[-+]?\\\\d*,?\\\\d+"""', '""" NUMBER """', 'x'], {}), "('[-+]?\\\\d*,?\\\\d+', ' NUMBER ', x)\n", (5662, 5696), False, 'import re\n'), ((6704, 6721), 'collections.Counter', 'Counter', (['wordList'], {}), '(wordList)\n', (6711, 6721), False, 'from collections import Counter\n'), ((8311, 8343), 'numpy.triu', 'np.triu', (['(comtx * (comtx > th))', '(1)'], {}), '(comtx * (comtx > th), 1)\n', (8318, 8343), True, 'import numpy as np\n'), ((8352, 8369), 'numpy.nonzero', 'np.nonzero', (['triup'], {}), '(triup)\n', (8362, 8369), True, 'import numpy as np\n'), ((8446, 8490), 'numpy.log2', 'np.log2', (['(p2words / (p1word[wa] * p1word[wb]))'], {}), '(p2words / (p1word[wa] * p1word[wb]))\n', (8453, 8490), True, 'import numpy as np\n'), ((8502, 8520), 'numpy.argsort', 'np.argsort', (['(-minfo)'], {}), '(-minfo)\n', (8512, 8520), True, 'import numpy as np\n'), ((5066, 5091), 're.sub', 're.sub', (['my_regex', 'text', 'x'], {}), '(my_regex, text, x)\n', (5072, 5091), False, 'import re\n'), ((8233, 8250), 'numpy.transpose', 'np.transpose', (['mtx'], {}), '(mtx)\n', (8245, 8250), True, 'import numpy as np\n'), ((8269, 8283), 'numpy.diag', 'np.diag', (['comtx'], {}), '(comtx)\n', (8276, 8283), True, 'import numpy as np\n'), ((4617, 4642), 're.sub', 're.sub', (['my_regex', 'text', 'x'], {}), '(my_regex, text, x)\n', (4623, 4642), False, 'import re\n'), ((5025, 5037), 're.escape', 're.escape', (['y'], {}), '(y)\n', (5034, 5037), False, 'import re\n'), ((7709, 7749), 'numpy.array', 'np.array', (['[(d in s) for d in dictionary]'], {}), '([(d in s) for d in dictionary])\n', (7717, 7749), True, 'import numpy as np\n'), ((8028, 8053), 'numpy.sum', 'np.sum', (['(tfmtx > 0)'], {'axis': '(0)'}), '(tfmtx > 0, axis=0)\n', (8034, 8053), True, 'import numpy as np\n'), ((4572, 4584), 're.escape', 're.escape', (['y'], {}), '(y)\n', (4581, 4584), False, 'import re\n')] |
#!/usr/bin/env python
# encoding: utf-8
'''
@author: <NAME>
@contact: <EMAIL>
@file: logistic regression.py
@time: 7/21/20 3:30 PM
@desc:
'''
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
def show_data(frame_data):
positive = frame_data[frame_data["species"] == 1]
negative = frame_data[frame_data["species"] == 0]
sns.swarmplot(x="sepal_length", y="petal_length", hue="species", data=positive, palette="Set2")
sns.swarmplot(x="sepal_length", y="petal_length", hue="species", data=negative, palette="Set1")
plt.show()
def data_process(data_name=None, is_showdata=None):
df = sns.load_dataset(data_name)
iris_data = df[["sepal_length", "petal_length", "species"]].replace({"species": "setosa"}, 0).replace(
{"species": "virginica"}, 1)
iris_data = iris_data[~iris_data["species"].isin(["versicolor"])] # del the row of species = versicolor
iris_data.insert(0, "ones", 1)
if is_showdata == True:
show_data(iris_data)
all_data = np.array(iris_data.as_matrix(), dtype=np.float64)
np.random.shuffle(all_data)
x = all_data[:,:-1].reshape(-1,3)
y = all_data[:, -1].reshape(-1,1)
return x, y
def sigmoid(z):
return 1 / (1+np.exp(-z))
def model(x, theta):
"""
:param x: feature 1, x1, x2
:param theta: theta0-theta2
:return: theta1 + theta1*x1 + theta2*x2
"""
return sigmoid(np.dot(x, theta.T))
def gradient(x, y, theta):
error = model(x,theta)-y
gradient = np.sum(np.multiply(error, x), axis=0) / len(x)
return gradient
def cost(x, y, theta):
pos = np.multiply(-y, np.log(model(x,theta)))
neg = np.multiply((1-y), np.log(1-model(x, theta)))
return np.sum(pos-neg) / len(x)
def main(data_name, is_showdata=True):
x, y = data_process(data_name, is_showdata)
thetas = np.ones([1, 3])
lr = 0.01
costs = []
for i in range(200):
loss = cost(x,y, thetas)
grad = gradient(x,y,thetas)
thetas = thetas - lr*grad
costs.append(loss)
sns.set_style("white")
plt.plot(costs)
plt.title("loss")
plt.show()
if __name__ == '__main__':
main(data_name='iris', is_showdata=False)
| [
"matplotlib.pyplot.title",
"seaborn.set_style",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.plot",
"numpy.multiply",
"seaborn.load_dataset",
"numpy.ones",
"seaborn.swarmplot",
"numpy.exp",
"numpy.dot",
"numpy.random.shuffle"
] | [((356, 456), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""sepal_length"""', 'y': '"""petal_length"""', 'hue': '"""species"""', 'data': 'positive', 'palette': '"""Set2"""'}), "(x='sepal_length', y='petal_length', hue='species', data=\n positive, palette='Set2')\n", (369, 456), True, 'import seaborn as sns\n'), ((456, 556), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""sepal_length"""', 'y': '"""petal_length"""', 'hue': '"""species"""', 'data': 'negative', 'palette': '"""Set1"""'}), "(x='sepal_length', y='petal_length', hue='species', data=\n negative, palette='Set1')\n", (469, 556), True, 'import seaborn as sns\n'), ((556, 566), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (564, 566), True, 'import matplotlib.pyplot as plt\n'), ((629, 656), 'seaborn.load_dataset', 'sns.load_dataset', (['data_name'], {}), '(data_name)\n', (645, 656), True, 'import seaborn as sns\n'), ((1071, 1098), 'numpy.random.shuffle', 'np.random.shuffle', (['all_data'], {}), '(all_data)\n', (1088, 1098), True, 'import numpy as np\n'), ((1833, 1848), 'numpy.ones', 'np.ones', (['[1, 3]'], {}), '([1, 3])\n', (1840, 1848), True, 'import numpy as np\n'), ((2037, 2059), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (2050, 2059), True, 'import seaborn as sns\n'), ((2064, 2079), 'matplotlib.pyplot.plot', 'plt.plot', (['costs'], {}), '(costs)\n', (2072, 2079), True, 'import matplotlib.pyplot as plt\n'), ((2084, 2101), 'matplotlib.pyplot.title', 'plt.title', (['"""loss"""'], {}), "('loss')\n", (2093, 2101), True, 'import matplotlib.pyplot as plt\n'), ((2106, 2116), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2114, 2116), True, 'import matplotlib.pyplot as plt\n'), ((1405, 1423), 'numpy.dot', 'np.dot', (['x', 'theta.T'], {}), '(x, theta.T)\n', (1411, 1423), True, 'import numpy as np\n'), ((1706, 1723), 'numpy.sum', 'np.sum', (['(pos - neg)'], {}), '(pos - neg)\n', (1712, 1723), True, 'import numpy as np\n'), ((1227, 1237), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (1233, 1237), True, 'import numpy as np\n'), ((1504, 1525), 'numpy.multiply', 'np.multiply', (['error', 'x'], {}), '(error, x)\n', (1515, 1525), True, 'import numpy as np\n')] |
import os
import cv2
import torch
import numpy as np
from lib.utils.net_tools import load_ckpt
from lib.utils.logging import setup_logging
import torchvision.transforms as transforms
from tools.parse_arg_test import TestOptions
from data.load_dataset import CustomerDataLoader
from lib.models.metric_depth_model import MetricDepthModel
from lib.core.config import cfg, merge_cfg_from_file
from lib.models.image_transfer import bins_to_depth
logger = setup_logging(__name__)
def scale_torch(img, scale):
"""
Scale the image and output it in torch.tensor.
:param img: input image. [C, H, W]
:param scale: the scale factor. float
:return: img. [C, H, W]
"""
img = np.transpose(img, (2, 0, 1))
img = img[::-1, :, :]
img = img.astype(np.float32)
img /= scale
img = torch.from_numpy(img.copy())
img = transforms.Normalize(cfg.DATASET.RGB_PIXEL_MEANS, cfg.DATASET.RGB_PIXEL_VARS)(img)
return img
if __name__ == '__main__':
test_args = TestOptions().parse()
test_args.thread = 1
test_args.batchsize = 1
merge_cfg_from_file(test_args)
data_loader = CustomerDataLoader(test_args)
test_datasize = len(data_loader)
logger.info('{:>15}: {:<30}'.format('test_data_size', test_datasize))
# load model
model = MetricDepthModel()
model.eval()
# load checkpoint
if test_args.load_ckpt:
load_ckpt(test_args, model)
model.cuda()
model = torch.nn.DataParallel(model)
path = os.path.join(cfg.ROOT_DIR, './test_any_imgs_examples') # the dir of imgs
imgs_list = os.listdir(path)
for i in imgs_list:
print(i)
with torch.no_grad():
img = cv2.imread(os.path.join(path, i))
img_resize = cv2.resize(img, (int(img.shape[1]), int(img.shape[0])), interpolation=cv2.INTER_LINEAR)
img_torch = scale_torch(img_resize, 255)
img_torch = img_torch[None, :, :, :].cuda()
_, pred_depth_softmax= model.module.depth_model(img_torch)
pred_depth = bins_to_depth(pred_depth_softmax)
pred_depth = pred_depth.cpu().numpy().squeeze()
pred_depth_scale = (pred_depth / pred_depth.max() * 60000).astype(np.uint16) # scale 60000 for visualization
cv2.imwrite(os.path.join(path, i.split('.')[0] + '-raw.png'), pred_depth_scale) | [
"torch.no_grad",
"os.path.join",
"lib.models.metric_depth_model.MetricDepthModel",
"torchvision.transforms.Normalize",
"numpy.transpose",
"tools.parse_arg_test.TestOptions",
"lib.models.image_transfer.bins_to_depth",
"data.load_dataset.CustomerDataLoader",
"torch.nn.DataParallel",
"lib.utils.net_t... | [((451, 474), 'lib.utils.logging.setup_logging', 'setup_logging', (['__name__'], {}), '(__name__)\n', (464, 474), False, 'from lib.utils.logging import setup_logging\n'), ((692, 720), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (704, 720), True, 'import numpy as np\n'), ((1068, 1098), 'lib.core.config.merge_cfg_from_file', 'merge_cfg_from_file', (['test_args'], {}), '(test_args)\n', (1087, 1098), False, 'from lib.core.config import cfg, merge_cfg_from_file\n'), ((1118, 1147), 'data.load_dataset.CustomerDataLoader', 'CustomerDataLoader', (['test_args'], {}), '(test_args)\n', (1136, 1147), False, 'from data.load_dataset import CustomerDataLoader\n'), ((1288, 1306), 'lib.models.metric_depth_model.MetricDepthModel', 'MetricDepthModel', ([], {}), '()\n', (1304, 1306), False, 'from lib.models.metric_depth_model import MetricDepthModel\n'), ((1441, 1469), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (1462, 1469), False, 'import torch\n'), ((1482, 1536), 'os.path.join', 'os.path.join', (['cfg.ROOT_DIR', '"""./test_any_imgs_examples"""'], {}), "(cfg.ROOT_DIR, './test_any_imgs_examples')\n", (1494, 1536), False, 'import os\n'), ((1571, 1587), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1581, 1587), False, 'import os\n'), ((846, 923), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['cfg.DATASET.RGB_PIXEL_MEANS', 'cfg.DATASET.RGB_PIXEL_VARS'], {}), '(cfg.DATASET.RGB_PIXEL_MEANS, cfg.DATASET.RGB_PIXEL_VARS)\n', (866, 923), True, 'import torchvision.transforms as transforms\n'), ((1384, 1411), 'lib.utils.net_tools.load_ckpt', 'load_ckpt', (['test_args', 'model'], {}), '(test_args, model)\n', (1393, 1411), False, 'from lib.utils.net_tools import load_ckpt\n'), ((989, 1002), 'tools.parse_arg_test.TestOptions', 'TestOptions', ([], {}), '()\n', (1000, 1002), False, 'from tools.parse_arg_test import TestOptions\n'), ((1642, 1657), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1655, 1657), False, 'import torch\n'), ((2030, 2063), 'lib.models.image_transfer.bins_to_depth', 'bins_to_depth', (['pred_depth_softmax'], {}), '(pred_depth_softmax)\n', (2043, 2063), False, 'from lib.models.image_transfer import bins_to_depth\n'), ((1688, 1709), 'os.path.join', 'os.path.join', (['path', 'i'], {}), '(path, i)\n', (1700, 1709), False, 'import os\n')] |
# =============================================================================
# caching.py - Supervised caching of function results.
# Copyright (C) 1999, 2000, 2001, 2002 <NAME>
# Australian National University (1999-2003)
# Geoscience Australia (2003-present)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License (http://www.gnu.org/copyleft/gpl.html)
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
#
#
# Contact address: <EMAIL>
#
# Version 1.5.6 February 2002
# =============================================================================
"""Module caching.py - Supervised caching of function results.
Public functions:
cache(my_F,args) -- Cache values returned from callable object my_F given args.
cachestat() -- Reports statistics about cache hits and time saved.
test() -- Conducts a basic test of the caching functionality.
See doc strings of individual functions for detailed documentation.
"""
from __future__ import division
# -----------------------------------------------------------------------------
# Initialisation code
# Determine platform
#
from builtins import zip
from builtins import input
from builtins import str
from builtins import range
from past.builtins import basestring
from past.utils import old_div
from os import getenv
import collections
import inspect
import types
import time
import sys
import os
if os.name in ['nt', 'dos', 'win32', 'what else?']:
unix = False
else:
unix = True
import anuga.utilities.log as log
from anuga.utilities import system_tools
import numpy as num
#from future
cache_dir = '.python_cache'
# Make default caching directory name
# We are changing the 'data directory' environment variable from
# INUNDATIONHOME to ANUGADATA - this gives a changeover.
if unix:
homedir = getenv('ANUGADATA')
if not homedir:
homedir = getenv('INUNDATIONHOME')
if not homedir:
homedir = '~'
else:
# Since homedir will be a group area, individually label the caches
user = getenv('LOGNAME')
if not user:
cache_dir += '_' + user
CR = '\n'
else:
homedir = 'c:'
CR = '\r\n' #FIXME: Not tested under windows
cachedir = os.path.join(homedir, cache_dir)
# It turns out hashes are no longer stable under Python3 (grr).
# https://stackoverflow.com/questions/27522626/hash-function-in-python-3-3-returns-different-results-between-sessions
# https://stackoverflow.com/questions/30585108/disable-hash-randomization-from-within-python-program
#
# The fix is to use another hashing library.
if system_tools.major_version == 3:
import hashlib
def hash(x):
res = hashlib.sha256(str(x).encode()).hexdigest()
#print('MY:', x, res)
return res
# -----------------------------------------------------------------------------
# Options directory with default values - to be set by user
#
options = {
'cachedir': cachedir, # Default cache directory
'maxfiles': 1000000, # Maximum number of cached files
'savestat': True, # Log caching info to stats file
'verbose': True, # Write messages to standard output
'bin': True, # Use binary format (more efficient)
'compression': True, # Use zlib compression
'bytecode': True, # Recompute if bytecode has changed
'expire': False # Automatically remove files that have been accessed
# least recently
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_option(key, value):
"""Function to set values in the options directory.
USAGE:
set_option(key, value)
ARGUMENTS:
key -- Key in options dictionary. (Required)
value -- New value for key. (Required)
DESCRIPTION:
Function to set values in the options directory.
Raises an exception if key is not in options.
"""
if key in options:
options[key] = value
else:
raise KeyError(key) # Key not found, raise an exception
# -----------------------------------------------------------------------------
# Function cache - the main routine
def cache(my_F,
args=(),
kwargs={},
dependencies=None,
cachedir=None,
verbose=None,
compression=None,
evaluate=False,
test=False,
clear=False,
return_filename=False):
"""Supervised caching of function results. Also known as memoization.
USAGE:
result = cache(my_F, args, kwargs, dependencies, cachedir, verbose,
compression, evaluate, test, return_filename)
ARGUMENTS:
my_F -- Callable object (Required)
args -- Arguments to my_F (Default: ())
kwargs -- Keyword arguments to my_F (Default: {})
dependencies -- Filenames that my_F depends on (Default: None)
cachedir -- Directory for cache files (Default: options['cachedir'])
verbose -- Flag verbose output to stdout
(Default: options['verbose'])
compression -- Flag zlib compression (Default: options['compression'])
evaluate -- Flag forced evaluation of my_F (Default: False)
test -- Flag test for cached results (Default: False)
clear -- Flag delete cached results (Default: False)
return_filename -- Flag return of cache filename (Default: False)
DESCRIPTION:
A Python function call of the form
result = my_F(arg1,...,argn)
can be replaced by
from caching import cache
result = cache(my_F,(arg1,...,argn))
The latter form returns the same output as the former but reuses cached
results if the function has been computed previously in the same context.
'result' and the arguments can be simple types, tuples, list, dictionaries or
objects, but not unhashable types such as functions or open file objects.
The function 'my_F' may be a member function of an object or a module.
This type of caching is particularly useful for computationally intensive
functions with few frequently used combinations of input arguments. Note that
if the inputs or output are very large caching might not save time because
disc access may dominate the execution time.
If the function definition changes after a result has been cached it will be
detected by examining the functions bytecode (co_code, co_consts,
func_defaults, co_argcount) and it will be recomputed.
LIMITATIONS:
1 Caching uses function(*args, **kwargs) to evaluate and will work
with anything that can be pickled, so any limitation in function(,)
or pickle extends to caching.
2 A function to be cached should not depend on global variables
as wrong results may occur if globals are changed after a result has
been cached.
-----------------------------------------------------------------------------
Additional functionality:
Keyword args
Keyword arguments (kwargs) can be added as a dictionary of keyword: value
pairs, following Python's 'extended call syntax'.
A Python function call of the form
result = my_F(arg1,...,argn, kwarg1=val1,...,kwargm=valm)
is then cached as follows
from caching import cache
result = cache(my_F,(arg1,...,argn), {kwarg1:val1,...,kwargm:valm})
The default value of kwargs is {}
Explicit dependencies:
The call
cache(my_F,(arg1,...,argn), dependencies = <list of filenames>)
Checks the size, creation time and modification time of each listed file.
If any file has changed the function is recomputed and the results stored
again.
Specify caching directory:
The call
cache(my_F,(arg1,...,argn), cachedir = <cachedir>)
designates <cachedir> where cached data are stored. Use ~ to indicate users
home directory - not $HOME. The default is ~/.python_cache on a UNIX
platform and c:/.python_cache on a Win platform.
Silent operation:
The call
cache(my_F,(arg1,...,argn), verbose=False)
suppresses messages to standard output.
Compression:
The call
cache(my_F,(arg1,...,argn), compression=False)
disables compression. (Default: compression=True). If the requested compressed
or uncompressed file is not there, it'll try the other version.
Forced evaluation:
The call
cache(my_F,(arg1,...,argn), evaluate=True)
forces the function to evaluate even though cached data may exist.
Testing for presence of cached result:
The call
cache(my_F,(arg1,...,argn), test=True)
retrieves cached result if it exists, otherwise None. The function will not
be evaluated. If both evaluate and test are switched on, evaluate takes
precedence.
??NOTE: In case of hash collisions, this may return the wrong result as
??it only checks if *a* cached result is present.
# I think this was due to the bytecode option being False for some reason. (23/1/2009).
Obtain cache filenames:
The call
cache(my_F,(arg1,...,argn), return_filename=True)
returns the hashed base filename under which this function and its
arguments would be cached
Clearing cached results:
The call
cache(my_F,'clear')
clears all cached data for 'my_F' and
cache('clear')
clears all cached data.
NOTE: The string 'clear' can be passed an *argument* to my_F using
cache(my_F,('clear',)) or cache(my_F,tuple(['clear'])).
New form of clear:
cache(my_F,(arg1,...,argn), clear=True)
clears cached data for particular combination my_F and args
"""
# Imports and input checks
#
import time, string
if not cachedir:
cachedir = options['cachedir']
if verbose is None: # Do NOT write 'if not verbose:', it could be zero.
verbose = options['verbose']
if compression is None: # Do NOT write 'if not compression:',
# it could be zero.
compression = options['compression']
# Create cache directory if needed
CD = checkdir(cachedir,verbose)
# Handle the case cache('clear')
if isinstance(my_F, basestring):
if my_F.lower() == 'clear':
clear_cache(CD,verbose=verbose)
return
# Handle the case cache(my_F, 'clear')
if isinstance(args, basestring):
if args.lower() == 'clear':
clear_cache(CD,my_F,verbose=verbose)
return
# Force singleton arg into a tuple
if not isinstance(args, tuple):
args = tuple([args])
# Check that kwargs is a dictionary
if not isinstance(kwargs, dict):
raise TypeError
# Hash arguments (and keyword args) to integer
arghash = myhash((args, kwargs))
# Get sizes and timestamps for files listed in dependencies.
# Force singletons into a tuple.
if dependencies and not isinstance(dependencies, (tuple, list)):
dependencies = tuple([dependencies])
deps = get_depstats(dependencies)
# Extract function name from my_F object
funcname = get_funcname(my_F)
# Create cache filename
FN = funcname+'_'+str(arghash)
#print()
#print('FN', FN)
#print('repr(arghash)', repr(arghash))
#print('arghash', arghash)
#print()
if return_filename:
return(FN)
if clear:
for file_type in file_types:
file_name = CD+FN+'_'+file_type
for fn in [file_name, file_name + '.z']:
if os.access(fn, os.F_OK):
if unix:
os.remove(fn)
else:
# FIXME: os.remove doesn't work under windows
os.system('del '+fn)
if verbose is True:
log.critical('MESSAGE (caching): File %s deleted' % fn)
##else:
## log.critical('%s was not accessed' % fn)
return None
#-------------------------------------------------------------------
# Check if previous computation has been cached
if evaluate is True:
Retrieved = None # Force evaluation of my_F regardless of caching status.
reason = 5
else:
T, FN, Retrieved, reason, comptime, loadtime, compressed = \
CacheLookup(CD, FN, my_F,
args, kwargs,
deps,
verbose,
compression,
dependencies)
if not Retrieved:
if test: # Do not attempt to evaluate function
T = None
else: # Evaluate function and save to cache
if verbose is True:
msg1(funcname, args, kwargs,reason)
# Remove expired files automatically
if options['expire']:
DeleteOldFiles(CD,verbose)
# Save args before function is evaluated in case
# they are modified by function
save_args_to_cache(CD,FN,args,kwargs,compression)
# Execute and time function with supplied arguments
t0 = time.time()
T = my_F(*args, **kwargs) # Built-in 'apply' deprecated in Py3K
#comptime = round(time.time()-t0)
comptime = time.time()-t0
if verbose is True:
msg2(funcname,args,kwargs,comptime,reason)
# Save results and estimated loading time to cache
loadtime = save_results_to_cache(T, CD, FN, my_F, deps, comptime, \
funcname, dependencies, compression)
if verbose is True:
msg3(loadtime, CD, FN, deps, compression)
compressed = compression
if options['savestat'] and (not test or Retrieved):
##if options['savestat']:
addstatsline(CD,funcname,FN,Retrieved,reason,comptime,loadtime,compressed)
return(T) # Return results in all cases
# -----------------------------------------------------------------------------
def cachestat(sortidx=4, period=-1, showuser=None, cachedir=None):
"""Generate statistics of caching efficiency.
USAGE:
cachestat(sortidx, period, showuser, cachedir)
ARGUMENTS:
sortidx -- Index of field by which lists are (default: 4)
Legal values are
0: 'Name'
1: 'Hits'
2: 'CPU'
3: 'Time Saved'
4: 'Gain(%)'
5: 'Size'
period -- If set to -1 all available caching history is used.
If set 0 only the current month is used (default -1).
showuser -- Flag for additional table showing user statistics
(default: None).
cachedir -- Directory for cache files (default: options['cachedir']).
DESCRIPTION:
Logged caching statistics is converted into summaries of the form
--------------------------------------------------------------------------
Function Name Hits Exec(s) Cache(s) Saved(s) Gain(%) Size
--------------------------------------------------------------------------
"""
__cachestat(sortidx, period, showuser, cachedir)
return
# -----------------------------------------------------------------------------
# Has mostly been moved to proper unit test.
# What remains here includes example of the
# cache statistics form.
def test(cachedir=None, verbose=False, compression=None):
"""Test the functionality of caching.
USAGE:
test(verbose)
ARGUMENTS:
verbose -- Flag whether caching will output its statistics (default=False)
cachedir -- Directory for cache files (Default: options['cachedir'])
compression -- Flag zlib compression (Default: options['compression'])
"""
import string, time
# Initialise
#
#import caching
#reload(caching)
if not cachedir:
cachedir = options['cachedir']
if verbose is None: # Do NOT write 'if not verbose:', it could be zero.
verbose = options['verbose']
if compression is None: # Do NOT write 'if not compression:',
# it could be zero.
compression = options['compression']
else:
try:
set_option('compression', compression)
except:
logtesterror('Set option failed')
try:
import zlib
except:
log.critical()
log.critical('*** Could not find zlib, default to no-compression ***')
log.critical('*** Installing zlib will improve performance of caching ***')
log.critical()
compression = 0
set_option('compression', compression)
log.critical('\nTesting caching module - please stand by\n')
# Define a test function to be cached
#
def f(a,b,c,N,x=0,y='abcdefg'):
"""f(a,b,c,N)
Do something time consuming and produce a complex result.
"""
import string
B = []
for n in range(N):
s = str(n+2.0/(n + 4.0))+'.a'*10
B.append((a,b,c,s,n,x,y))
return(B)
# Check that default cachedir is OK
#
CD = checkdir(cachedir,verbose)
# Make a dependency file
#
try:
DepFN = CD + 'testfile.tmp'
DepFN_wildcard = CD + 'test*.tmp'
Depfile = open(DepFN,'w')
Depfile.write('We are the knights who say NI!')
Depfile.close()
logtestOK('Wrote file %s' %DepFN)
except:
logtesterror('Could not open file %s for writing - check your environment' \
% DepFN)
# Check set_option (and switch stats off
#
try:
set_option('savestat',0)
assert(options['savestat'] == 0)
logtestOK('Set option')
except:
logtesterror('Set option failed')
# Make some test input arguments
#
N = 5000 #Make N fairly small here
a = [1,2]
b = ('Thou shalt count the number three',4)
c = {'Five is right out': 6, (7,8): 9}
x = 3
y = 'holy hand granate'
# Test caching
#
if compression:
comprange = 2
else:
comprange = 1
for comp in range(comprange):
# Evaluate and store
#
try:
T1 = cache(f,(a,b,c,N), {'x':x, 'y':y}, evaluate=1, \
verbose=verbose, compression=comp)
if comp:
logtestOK('Caching evaluation with compression')
else:
logtestOK('Caching evaluation without compression')
except:
if comp:
logtesterror('Caching evaluation with compression failed - try caching.test(compression=0)')
else:
logtesterror('Caching evaluation failed - try caching.test(verbose=1)')
# Retrieve
#
try:
T2 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
compression=comp)
if comp:
logtestOK('Caching retrieval with compression')
else:
logtestOK('Caching retrieval without compression')
except:
if comp:
logtesterror('Caching retrieval with compression failed - try caching.test(compression=0)')
else:
logtesterror('Caching retrieval failed - try caching.test(verbose=1)')
# Reference result
#
T3 = f(a,b,c,N,x=x,y=y) # Compute without caching
if T1 == T2 and T2 == T3:
if comp:
logtestOK('Basic caching functionality (with compression)')
else:
logtestOK('Basic caching functionality (without compression)')
else:
logtesterror('Cached result does not match computed result')
# Test return_filename
#
try:
FN = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
return_filename=1)
assert(FN[:2] == 'f[')
logtestOK('Return of cache filename')
except:
logtesterror('Return of cache filename failed')
# Test existence of cachefiles
#
try:
(datafile,compressed0) = myopen(CD+FN+'_'+file_types[0],"rb",compression)
(argsfile,compressed1) = myopen(CD+FN+'_'+file_types[1],"rb",compression)
(admfile,compressed2) = myopen(CD+FN+'_'+file_types[2],"rb",compression)
logtestOK('Presence of cache files')
datafile.close()
argsfile.close()
admfile.close()
except:
logtesterror('Expected cache files did not exist')
# Test 'test' function when cache is present
#
try:
#T1 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
# evaluate=1)
T4 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, test=1)
assert(T1 == T4)
logtestOK("Option 'test' when cache file present")
except:
logtesterror("Option 'test' when cache file present failed")
# Test that 'clear' works
#
#try:
# cache(f,'clear',verbose=verbose)
# logtestOK('Clearing of cache files')
#except:
# logtesterror('Clear does not work')
try:
cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, clear=1)
logtestOK('Clearing of cache files')
except:
logtesterror('Clear does not work')
# Test 'test' function when cache is absent
#
try:
T4 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, test=1)
assert(T4 is None)
logtestOK("Option 'test' when cache absent")
except:
logtesterror("Option 'test' when cache absent failed")
# Test dependencies
#
T1 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
dependencies=DepFN)
T2 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
dependencies=DepFN)
if T1 == T2:
logtestOK('Basic dependencies functionality')
else:
logtesterror('Dependencies do not work')
# Test basic wildcard dependency
#
T3 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
dependencies=DepFN_wildcard)
if T1 == T3:
logtestOK('Basic dependencies with wildcard functionality')
else:
logtesterror('Dependencies with wildcards do not work')
# Test that changed timestamp in dependencies triggers recomputation
# Modify dependency file
Depfile = open(DepFN,'a')
Depfile.write('You must cut down the mightiest tree in the forest with <NAME>')
Depfile.close()
T3 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
dependencies=DepFN, test = 1)
if T3 is None:
logtestOK('Changed dependencies recognised')
else:
logtesterror('Changed dependencies not recognised')
# Test recomputation when dependencies have changed
#
T3 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
dependencies=DepFN)
if T1 == T3:
logtestOK('Recomputed value with changed dependencies')
else:
logtesterror('Recomputed value with changed dependencies failed')
# Performance test (with statistics)
# Don't really rely on this as it will depend on specific computer.
#
set_option('savestat',1)
N = 20*N #Should be large on fast computers...
tt = time.time()
T1 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose)
t1 = time.time() - tt
tt = time.time()
T2 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose)
t2 = time.time() - tt
if T1 == T2:
if t1 > t2:
logtestOK('Performance test: relative time saved = %s pct' \
%str(round(old_div((t1-t2)*100,t1),2)))
else:
logtesterror('Basic caching failed for new problem')
# Test presence of statistics file
#
try:
DIRLIST = os.listdir(CD)
SF = []
for FN in DIRLIST:
if string.find(FN,statsfile) >= 0:
fid = open(CD+FN,'r')
fid.close()
logtestOK('Statistics files present')
except:
logtestOK('Statistics files cannot be opened')
print_header_box('Show sample output of the caching function:')
T2 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=0)
T2 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=0)
T2 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=1)
print_header_box('Show sample output of cachestat():')
if unix:
cachestat()
else:
try:
import time
t = time.strptime('2030','%Y')
cachestat()
except:
log.critical('cachestat() does not work here, because it relies on '
'time.strptime() which is unavailable in Windows')
logtestOK('Caching self test completed')
# Test setoption (not yet implemented)
#
#==============================================================================
# Auxiliary functions
#==============================================================================
# Import pickler
# cPickle is used by functions mysave, myload, and compare
#
#import cPickle # 10 to 100 times faster than pickle
#import pickle as pickler
import dill as pickler
#pickler = cPickle
# Local immutable constants
#
comp_level = 1 # Compression level for zlib.
# comp_level = 1 works well.
textwidth1 = 16 # Text width of key fields in report forms.
#textwidth2 = 132 # Maximal width of textual representation of
textwidth2 = 300 # Maximal width of textual representation of
# arguments.
textwidth3 = 16 # Initial width of separation lines. Is modified.
textwidth4 = 50 # Text width in logtestOK()
statsfile = '.cache_stat' # Basefilename for cached statistics.
# It will reside in the chosen cache directory.
file_types = ['Result', # File name extension for cached function results.
'Args', # File name extension for stored function args.
'Admin'] # File name extension for administrative info.
Reason_msg = ['OK', # Verbose reasons for recomputation
'No cached result',
'Dependencies have changed',
'Arguments have changed',
'Bytecode has changed',
'Recomputation was requested by caller',
'Cached file was unreadable']
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def CacheLookup(CD, FN, my_F, args, kwargs, deps, verbose, compression,
dependencies):
"""Determine whether cached result exists and return info.
USAGE:
(T, FN, Retrieved, reason, comptime, loadtime, compressed) = \
CacheLookup(CD, FN, my_F, args, kwargs, deps, verbose, compression, \
dependencies)
INPUT ARGUMENTS:
CD -- Cache Directory
FN -- Suggested cache file name
my_F -- Callable object
args -- Tuple of arguments
kwargs -- Dictionary of keyword arguments
deps -- Dependencies time stamps
verbose -- Flag text output
compression -- Flag zlib compression
dependencies -- Given list of dependencies
OUTPUT ARGUMENTS:
T -- Cached result if present otherwise None
FN -- File name under which new results must be saved
Retrieved -- True if a valid cached result was found
reason -- 0: OK (if Retrieved),
1: No cached result,
2: Dependencies have changed,
3: Arguments have changed
4: Bytecode has changed
5: Recomputation was forced
6: Unreadable file
comptime -- Number of seconds it took to computed cachged result
loadtime -- Number of seconds it took to load cached result
compressed -- Flag (0,1) if cached results were compressed or not
DESCRIPTION:
Determine if cached result exists as follows:
Load in saved arguments and bytecode stored under hashed filename.
If they are identical to current arguments and bytecode and if dependencies
have not changed their time stamp, then return cached result.
Otherwise return filename under which new results should be cached.
Hash collisions are handled recursively by calling CacheLookup again with a
modified filename.
"""
import time, string
# Assess whether cached result exists - compressed or not.
#
if verbose:
log.critical('Caching: looking for cached files %s_{%s,%s,%s}.z'
% (CD+FN, file_types[0], file_types[1], file_types[2]))
(datafile,compressed0) = myopen(CD+FN+'_'+file_types[0],"rb",compression)
(argsfile,compressed1) = myopen(CD+FN+'_'+file_types[1],"rb",compression)
(admfile,compressed2) = myopen(CD+FN+'_'+file_types[2],"rb",compression)
if verbose is True and deps is not None:
log.critical('Caching: Dependencies are %s' % list(deps.keys()))
if not (argsfile and datafile and admfile) or \
not (compressed0 == compressed1 and compressed0 == compressed2):
# Cached result does not exist or files were compressed differently
#
# This will ensure that evaluation will take place unless all files are
# present.
reason = 1
return(None,FN,None,reason,None,None,None) #Recompute using same filename
compressed = compressed0 # Remember if compressed files were actually used
datafile.close()
# Retrieve arguments and adm. info
#
R, reason = myload(argsfile, compressed) # The original arguments
argsfile.close()
if reason > 0:
# Recompute using same filename
return(None, FN, None, reason, None, None, None)
else:
(argsref, kwargsref) = R
R, reason = myload(admfile, compressed)
admfile.close()
if reason > 0:
return(None,FN,None,reason,None,None,None) # Recompute using same filename
depsref = R[0] # Dependency statistics
comptime = R[1] # The computation time
coderef = R[2] # The byte code
funcname = R[3] # The function name
# Check if dependencies have changed
#
if dependencies and not compare(depsref, deps):
if verbose:
log.critical('Dependencies %s have changed - recomputing' % dependencies)
# Don't use cached file - recompute
reason = 2
return(None, FN, None, reason, None, None, None)
# Get bytecode from my_F
#
bytecode = get_bytecode(my_F)
# Check if arguments or bytecode have changed
if compare(argsref, args) and compare(kwargsref, kwargs) and \
(not options['bytecode'] or compare(bytecode, coderef)):
# Arguments and dependencies match. Get cached results
T, loadtime, compressed, reason = load_from_cache(CD, FN, compressed)
if reason > 0:
# Recompute using same FN
return(None, FN, None, reason, None, None, None)
Retrieved = 1
reason = 0
if verbose:
msg4(funcname,args,kwargs,deps,comptime,loadtime,CD,FN,compressed)
if loadtime >= comptime:
log.critical('Caching did not yield any gain.')
log.critical('Consider executing function %s without caching.'
% funcname)
else:
# Non matching arguments or bytecodes signify a hash-collision.
# This is resolved by recursive search of cache filenames
# until either a matching or an unused filename is found.
#
(T, FN, Retrieved, reason, comptime, loadtime, compressed) = \
CacheLookup(CD, FN+'x', my_F, args, kwargs, deps,
verbose, compression, dependencies)
# The real reason is that args or bytecodes have changed.
# Not that the recursive seach has found an unused filename
if not Retrieved:
if not compare(bytecode, coderef):
reason = 4 # Bytecode has changed
else:
reason = 3 # Arguments have changed
# PADARN NOTE 17/12/12: Adding a special case to handle the existence of a
# FitInterpolate object. C Structures are serialised so they can be pickled.
#---------------------------------------------------------------------------
from anuga.fit_interpolate.general_fit_interpolate import FitInterpolate
# Setup for quad_tree extension
#from anuga.utilities import compile
#if compile.can_use_C_extension('quad_tree_ext.c'):
#import quad_tree_ext
#else:
# msg = "C implementation of quad tree extension not avaliable"
# raise Exception(msg)
# Setup for sparse_matrix extension
#from anuga.utilities import compile
#if compile.can_use_C_extension('sparse_matrix_ext.c'):
#else:
# msg = "C implementation of sparse_matrix extension not avaliable"
# raise Exception(msg)
import anuga.utilities.sparse_matrix_ext as sparse_matrix_ext
import anuga.utilities.quad_tree_ext as quad_tree_ext
from anuga.geometry.aabb import AABB
if isinstance(T, FitInterpolate):
if hasattr(T,"D"):
T.D=sparse_matrix_ext.deserialise_dok(T.D)
if hasattr(T,"AtA"):
T.AtA=sparse_matrix_ext.deserialise_dok(T.AtA)
if hasattr(T,"root"):
T.build_quad_tree(verbose=verbose)
#---------------------------------------------------------------------------
return((T, FN, Retrieved, reason, comptime, loadtime, compressed))
# -----------------------------------------------------------------------------
def clear_cache(CD, my_F=None, verbose=None):
"""Clear cache for my_F.
USAGE:
clear(CD, my_F, verbose)
ARGUMENTS:
CD -- Caching directory (required)
my_F -- Function object (default: None)
verbose -- Flag verbose output (default: None)
DESCRIPTION:
If my_F is None, clear everything,
otherwise clear only files pertaining to my_F.
"""
import os, re
if CD[-1] != os.sep:
CD = CD+os.sep
if verbose is None:
verbose = options['verbose']
# FIXME: Windows version needs to be tested
if my_F:
funcname = get_funcname(my_F)
if verbose:
log.critical('Clearing %s' % CD+funcname+'*')
file_names = os.listdir(CD)
for file_name in file_names:
#RE = re.search('^' + funcname,file_name) #Inefficient
#if RE:
if file_name[:len(funcname)] == funcname:
if unix:
os.remove(CD+file_name)
else:
os.system('del '+CD+file_name)
# FIXME: os.remove doesn't work under windows
else:
file_names = os.listdir(CD)
if len(file_names) > 0:
if verbose:
log.critical('Remove the following files:')
for file_name in file_names:
log.critical(' ' + file_name)
A = input('Delete (Y/N)[N] ?')
else:
A = 'Y'
if A == 'Y' or A == 'y':
for file_name in file_names:
if unix:
os.remove(CD+file_name)
else:
os.system('del '+CD+file_name)
# FIXME: os.remove doesn't work under windows
#exitcode=os.system('/bin/rm '+CD+'* 2> /dev/null')
# -----------------------------------------------------------------------------
def DeleteOldFiles(CD,verbose=None):
"""Remove expired files
USAGE:
DeleteOldFiles(CD,verbose=None)
"""
if verbose is None:
verbose = options['verbose']
maxfiles = options['maxfiles']
# FIXME: Windows version
import os
block = 1000 # How many files to delete per invokation
Files = os.listdir(CD)
numfiles = len(Files)
if not unix: return # FIXME: Windows case ?
if numfiles > maxfiles:
delfiles = numfiles-maxfiles+block
if verbose:
log.critical('Deleting %d expired files:' % delfiles)
os.system('ls -lur '+CD+'* | head -' + repr(delfiles)) # List them
os.system('ls -ur '+CD+'* | head -' + repr(delfiles) + ' | xargs /bin/rm')
# Delete them
# FIXME: Replace this with os.listdir and os.remove
# -----------------------------------------------------------------------------
def save_args_to_cache(CD, FN, args, kwargs, compression):
"""Save arguments to cache
USAGE:
save_args_to_cache(CD,FN,args,kwargs,compression)
"""
import time, os, sys
(argsfile, compressed) = myopen(CD+FN+'_'+file_types[1], 'wb', compression)
if argsfile is None:
msg = 'ERROR (caching): Could not open argsfile for writing: %s' %FN
raise IOError(msg)
mysave((args,kwargs),argsfile,compression) # Save args and kwargs to cache
argsfile.close()
# Change access rights if possible
#
#if unix:
# try:
# exitcode=os.system('chmod 666 '+argsfile.name)
# except:
# pass
#else:
# pass # FIXME: Take care of access rights under Windows
return
# -----------------------------------------------------------------------------
def save_results_to_cache(T, CD, FN, my_F, deps, comptime, funcname,
dependencies, compression):
"""Save computed results T and admin info to cache
USAGE:
save_results_to_cache(T, CD, FN, my_F, deps, comptime, funcname,
dependencies, compression)
"""
import time, os, sys
verbose = False
# PADARN NOTE 17/12/12: Adding a special case to handle the existence of a
# FitInterpolate object. C Structures are serialised so they can be pickled.
#---------------------------------------------------------------------------
from anuga.fit_interpolate.general_fit_interpolate import FitInterpolate
import anuga.utilities.quad_tree_ext as quad_tree_ext
import anuga.utilities.sparse_matrix_ext as sparse_matrix_ext
from anuga.geometry.aabb import AABB
if isinstance(T, FitInterpolate):
if hasattr(T,"D"):
T.D=sparse_matrix_ext.serialise_dok(T.D)
if hasattr(T,"AtA"):
T.AtA=sparse_matrix_ext.serialise_dok(T.AtA)
if hasattr(T,"root"):
T.root.root=None
#---------------------------------------------------------------------------
(datafile, compressed1) = myopen(CD+FN+'_'+file_types[0],'wb',compression)
(admfile, compressed2) = myopen(CD+FN+'_'+file_types[2],'wb',compression)
if not datafile:
if verbose:
log.critical('ERROR: Could not open %s' % datafile.name)
raise IOError
if not admfile:
if verbose:
log.critical('ERROR: Could not open %s' % admfile.name)
raise IOError
t0 = time.time()
mysave(T,datafile,compression) # Save data to cache
datafile.close()
#savetime = round(time.time()-t0,2)
savetime = time.time()-t0
bytecode = get_bytecode(my_F) # Get bytecode from function object
admtup = (deps, comptime, bytecode, funcname) # Gather admin info
mysave(admtup,admfile,compression) # Save admin info to cache
admfile.close()
# Change access rights if possible
#
#if unix:
# try:
# exitcode=os.system('chmod 666 '+datafile.name)
# exitcode=os.system('chmod 666 '+admfile.name)
# except:
# pass
#else:
# pass # FIXME: Take care of access rights under Windows
return(savetime)
# -----------------------------------------------------------------------------
def load_from_cache(CD, FN, compression):
"""Load previously cached data from file FN
USAGE:
load_from_cache(CD,FN,compression)
"""
import time
(datafile, compressed) = myopen(CD+FN+'_'+file_types[0],"rb",compression)
t0 = time.time()
T, reason = myload(datafile,compressed)
loadtime = time.time()-t0
datafile.close()
return T, loadtime, compressed, reason
# -----------------------------------------------------------------------------
def myopen(FN, mode, compression=True):
"""Open file FN using given mode
USAGE:
myopen(FN, mode, compression=True)
ARGUMENTS:
FN -- File name to be opened
mode -- Open mode (as in open)
compression -- Flag zlib compression
DESCRIPTION:
if compression
Attempt first to open FN + '.z'
If this fails try to open FN
else do the opposite
Return file handle plus info about whether it was compressed or not.
"""
import string
# Determine if file exists already (if writing was requested)
# This info is only used to determine if access modes should be set
#
if 'w' in mode or 'a' in mode:
try:
file = open(FN+'.z','r')
file.close()
new_file = 0
except:
try:
file = open(FN,'r')
file.close()
new_file = 0
except:
new_file = 1
else:
new_file = 0 #Assume it exists if mode was not 'w'
compressed = 0
if compression:
try:
file = open(FN+'.z',mode)
compressed = 1
except:
try:
file = open(FN,mode)
except:
file = None
else:
try:
file = open(FN,mode)
except:
try:
file = open(FN+'.z',mode)
compressed = 1
except:
file = None
# Now set access rights if it is a new file
#
if file and new_file:
if unix:
exitcode=os.system('chmod 666 '+file.name)
else:
pass # FIXME: Take care of access rights under Windows
return(file, compressed)
# -----------------------------------------------------------------------------
def myload(file, compressed):
"""Load data from file
USAGE:
myload(file, compressed)
"""
reason = 0
try:
if compressed:
import zlib
RsC = file.read()
try:
Rs = zlib.decompress(RsC)
except:
# File "./caching.py", line 1032, in load_from_cache
# T = myload(datafile,compressed)
# File "./caching.py", line 1124, in myload
# Rs = zlib.decompress(RsC)
# zlib.error: Error -5 while decompressing data
#raise Exception
reason = 6 # Unreadable file
return None, reason
del RsC # Free up some space
R = pickler.loads(Rs)
else:
try:
R = pickler.load(file)
#except EOFError, e:
except:
#Catch e.g., file with 0 length or corrupted
reason = 6 # Unreadable file
return None, reason
except MemoryError:
if options['verbose']:
log.critical('ERROR: Out of memory while loading %s, aborting'
% file.name)
# Raise the error again for now
#
raise MemoryError
return R, reason
# -----------------------------------------------------------------------------
def mysave(T, file, compression):
"""Save data T to file
USAGE:
mysave(T, file, compression)
"""
bin = options['bin']
if compression:
try:
import zlib
except:
log.critical()
log.critical('*** Could not find zlib ***')
log.critical('*** Try to run caching with compression off ***')
log.critical("*** caching.set_option('compression', 0) ***")
raise Exception
try:
Ts = pickler.dumps(T, bin)
except MemoryError:
msg = '****WARNING (caching.py): Could not pickle data for compression.'
msg += ' Try using compression = False'
raise MemoryError(msg)
else:
# Compressed pickling
TsC = zlib.compress(Ts, comp_level)
file.write(TsC)
else:
# Uncompressed pickling
pickler.dump(T, file, bin)
# FIXME: This may not work on Windoze network drives.
# The error msg is IOError: [Errno 22] Invalid argument
# Testing with small files was OK, though.
# I think this is an OS problem.
# Excerpt from http://www.ultraseek.com/support/faqs/4173.html
# The error is caused when there is a problem with server disk access (I/0). This happens at the OS level, and there is no controlling these errors through the Ultraseek application.
#
#Ultraseek contains an embedded Python interpreter. The exception "exceptions.IOError: [Errno 22] Invalid argument" is generated by the Python interpreter. The exception is thrown when a disk access operation fails due to an I/O-related reason.
#
#The following extract is taken from the site http://www.python.org:
#
#---------------------------------------------------------------------------------------------
#exception IOError
#Raised when an I/O operation (such as a print statement, the built-in open() function or a method of a file object) fails for an I/O-related reason, e.g., ``file not found'' or ``disk full''.
#This class is derived from EnvironmentError. See the discussion above for more information on exception instance attributes.
#---------------------------------------------------------------------------------------------
#
#The error code(s) that accompany exceptions are described at:
#http://www.python.org/dev/doc/devel//lib/module-errno.html
#
#You can view several postings on this error message by going to http://www.python.org, and typing the below into the search box:
#
#exceptions.IOError invalid argument Errno 22
#try:
# pickler.dump(T,file,bin)
#except IOError, e:
# print e
# msg = 'Could not store to %s, bin=%s' %(file, bin)
# raise Exception(msg)
# -----------------------------------------------------------------------------
def myhash(T, ids=None):
"""Compute hashed integer from a range of inputs.
If T is not hashable being e.g. a tuple T, myhash will recursively
hash the values individually
USAGE:
myhash(T)
ARGUMENTS:
T -- Anything
"""
# Replacing Python2: if type(T) in [TupleType, ListType, DictType, InstanceType]:
if isinstance(T, (tuple, list, dict)) or type(T) is type:
# Keep track of unique id's to protect against infinite recursion
if ids is None: ids = []
# Check if T has already been encountered
i = id(T)
if i in ids:
return 0 # T has been hashed already
else:
ids.append(i)
# Start hashing
# On some architectures None, False and True gets different hash values
if T is None:
return(-1)
if T is False:
return(0)
if T is True:
return(1)
# Get hash values for hashable entries
if isinstance(T, (tuple, list)):
#print('LIST or TUPLE', T)
hvals = ''
for t in T:
h = myhash(t, ids)
hvals += str(h)
val = hash(hvals)
elif isinstance(T, dict):
#print('DICT')
I = list(T.items())
if system_tools.major_version == 2:
# Make dictionary ordering unique
I.sort()
else:
# As of Python 3.7 they now are ordered: https://mail.python.org/pipermail/python-dev/2017-December/151283.html
pass
val = myhash(I, ids)
elif isinstance(T, num.ndarray):
#print('NUM')
T = num.array(T) # Ensure array is contiguous
# Use mean value for efficiency
val = hash(num.average(T.flat))
elif callable(T):
#print('CALLABLE')
I = myhash(T.__dict__, ids)
val = myhash(I, ids)
elif type(T) is type: #isinstance(T, object): # This is instead of the old InstanceType:
#elif isinstance(T, object): # This is instead of the old InstanceType:
#print('OBJECT', T, dir(T), type(T))
# Use the attribute values
val = myhash(T.__dict__, ids)
else:
# This must be a simple Python type that should hash without problems
#print('ALL', T, type(T))
val = hash(str(T))
#print(ids, val)
return(val)
def compare(A, B, ids=None):
"""Safe comparison of general objects
USAGE:
compare(A,B)
DESCRIPTION:
Return 1 if A and B they are identical, 0 otherwise
"""
# Keep track of unique id's to protect against infinite recursion
if ids is None: ids = {}
# Check if T has already been encountered
iA = id(A)
iB = id(B)
if (iA, iB) in ids:
# A and B have been compared already
return ids[(iA, iB)]
else:
ids[(iA, iB)] = True
# Check if arguments are of same type
if type(A) != type(B):
return False
# Compare recursively based on argument type
if isinstance(A, (tuple, list)):
N = len(A)
if len(B) != N:
identical = False
else:
identical = True
for i in range(N):
if not compare(A[i], B[i], ids):
identical = False; break
elif isinstance(A, dict):
if len(A) != len(B):
identical = False
else:
# Dictionaries are now ordered as of Python 3.7
# Make dictionary ordering unique
#a = list(A.items()); a.sort()
#b = list(B.items()); b.sort()
identical = compare(A, B, ids)
elif isinstance(A, num.ndarray):
# Use element by element comparison
identical = num.alltrue(A==B)
#elif type(A) == types.InstanceType:
elif type(A) is type:
# Take care of special case where elements are instances
# Base comparison on attributes
identical = compare(A.__dict__,
B.__dict__,
ids)
else:
# Fall back to general code
try:
identical = (A == B)
except:
import pickle
# Use pickle to compare data
# The native pickler must be used
# since the faster cPickle does not
# guarantee a unique translation
# FIXME (Ole): Try to fall back on the dill pickler
try:
identical = (pickle.dumps(A,0) == pickle.dumps(B,0))
except:
identical = False
# Record result of comparison and return
ids[(iA, iB)] = identical
return(identical)
# -----------------------------------------------------------------------------
def nospace(s):
"""Replace spaces in string s with underscores
USAGE:
nospace(s)
ARGUMENTS:
s -- string
"""
import string
newstr = ''
for i in range(len(s)):
if s[i] == ' ':
newstr = newstr+'_'
else:
newstr = newstr+s[i]
return(newstr)
# -----------------------------------------------------------------------------
def get_funcname(my_F):
"""Retrieve name of function object func (depending on its type)
USAGE:
get_funcname(my_F)
"""
import string
if type(my_F) == types.FunctionType:
funcname = my_F.__name__
elif type(my_F) == types.BuiltinFunctionType:
funcname = my_F.__name__
else:
if system_tools.major_version == 3:
tab = str.maketrans("<>'"," ")
tmp = str.translate(repr(my_F), tab)
tmp = str.split(tmp)
elif system_tools.major_version == 2:
tab = string.maketrans("<>'"," ")
tmp = string.translate(repr(my_F), tab)
tmp = string.split(tmp)
else:
raise Exception('Unsupported version: %' % system_tools.version)
funcname = ' '.join(tmp)
# Truncate memory address as in
# class __main__.Dummy at 0x00A915D0
index = funcname.find('at 0x')
if index >= 0:
funcname = funcname[:index+5] # Keep info that there is an address
funcname = nospace(funcname)
return(funcname)
# -----------------------------------------------------------------------------
def get_bytecode(my_F):
""" Get bytecode and associated values from function object.
It is assumed that my_F is callable and there either
a function
a class
a method
a callable object
or a builtin function
USAGE:
get_bytecode(my_F)
"""
if type(my_F) == types.FunctionType:
# Function
return get_func_code_details(my_F)
elif type(my_F) == types.MethodType:
# Method
return get_func_code_details(my_F.__func__)
elif type(my_F) in [types.BuiltinFunctionType, types.BuiltinMethodType]:
# Built-in functions are assumed not to change
return None, 0, 0, 0
elif inspect.isclass(my_F):
return get_func_code_details(my_F.__init__)
elif hasattr(my_F, '__call__'):
bytecode = get_func_code_details(my_F.__call__.__func__)
# Add hash value of object to detect attribute changes
return bytecode + (myhash(my_F),)
else:
msg = 'Unknown function type: %s' % type(my_F)
raise Exception(msg)
def get_func_code_details(my_F):
"""Extract co_code, co_consts, co_argcount, func_defaults
"""
bytecode = my_F.__code__.co_code
consts = my_F.__code__.co_consts
argcount = my_F.__code__.co_argcount
defaults = my_F.__defaults__
return bytecode, consts, argcount, defaults
# -----------------------------------------------------------------------------
def get_depstats(dependencies):
""" Build dictionary of dependency files and their size, mod. time and ctime.
USAGE:
get_depstats(dependencies):
"""
d = {}
if dependencies:
#Expand any wildcards
import glob
expanded_dependencies = []
for FN in dependencies:
expanded_FN = glob.glob(FN)
if expanded_FN == []:
errmsg = 'ERROR (caching.py): Dependency '+FN+' does not exist.'
raise Exception(errmsg)
expanded_dependencies += expanded_FN
for FN in expanded_dependencies:
if not isinstance(FN, basestring):
errmsg = 'ERROR (caching.py): Dependency must be a string.\n'
errmsg += ' Dependency given: %s' %FN
raise Exception(errmsg)
if not os.access(FN,os.F_OK):
errmsg = 'ERROR (caching.py): Dependency '+FN+' does not exist.'
raise Exception(errmsg)
(size,atime,mtime,ctime) = filestat(FN)
# We don't use atime because that would cause recomputation every time.
# We don't use ctime because that is irrelevant and confusing for users.
d.update({FN : (size,mtime)})
return(d)
# -----------------------------------------------------------------------------
def filestat(FN):
"""A safe wrapper using os.stat to get basic file statistics
The built-in os.stat breaks down if file sizes are too large (> 2GB ?)
USAGE:
filestat(FN)
DESCRIPTION:
Must compile Python with
CFLAGS="`getconf LFS_CFLAGS`" OPT="-g -O2 $CFLAGS" \
configure
as given in section 8.1.1 Large File Support in the Libray Reference
"""
import os, time
try:
stats = os.stat(FN)
size = stats[6]
atime = stats[7]
mtime = stats[8]
ctime = stats[9]
except:
# Hack to get the results anyway (works only on Unix at the moment)
#
log.critical('Hack to get os.stat when files are too large')
if unix:
tmp = '/tmp/cach.tmp.'+repr(time.time())+repr(os.getpid())
# Unique filename, FIXME: Use random number
# Get size and access time (atime)
#
exitcode=os.system('ls -l --full-time --time=atime '+FN+' > '+tmp)
(size,atime) = get_lsline(tmp)
# Get size and modification time (mtime)
#
exitcode=os.system('ls -l --full-time '+FN+' > '+tmp)
(size,mtime) = get_lsline(tmp)
# Get size and ctime
#
exitcode=os.system('ls -l --full-time --time=ctime '+FN+' > '+tmp)
(size,ctime) = get_lsline(tmp)
try:
exitcode=os.system('rm '+tmp)
# FIXME: Gives error if file doesn't exist
except:
pass
else:
pass
raise Exception # FIXME: Windows case
return(int(size),atime,mtime,ctime)
# -----------------------------------------------------------------------------
def get_lsline(FN):
"""get size and time for filename
USAGE:
get_lsline(file_name)
DESCRIPTION:
Read in one line 'ls -la' item from file (generated by filestat) and
convert time to seconds since epoch. Return file size and time.
"""
import string, time
f = open(FN,'r')
info = f.read()
info = string.split(info)
size = info[4]
week = info[5]
mon = info[6]
day = info[7]
hour = info[8]
year = info[9]
str = week+' '+mon+' '+day+' '+hour+' '+year
timetup = time.strptime(str)
t = time.mktime(timetup)
return(size, t)
# -----------------------------------------------------------------------------
def checkdir(CD, verbose=None, warn=False):
"""Check or create caching directory
USAGE:
checkdir(CD,verbose):
ARGUMENTS:
CD -- Directory
verbose -- Flag verbose output (default: None)
DESCRIPTION:
If CD does not exist it will be created if possible
"""
import os
import os.path
if CD[-1] != os.sep:
CD = CD + os.sep # Add separator for directories
CD = os.path.expanduser(CD) # Expand ~ or ~user in pathname
if not (os.access(CD,os.R_OK and os.W_OK) or CD == ''):
try:
exitcode=os.mkdir(CD)
# Change access rights if possible
#
if unix:
exitcode=os.system('chmod 777 '+CD)
else:
pass # FIXME: What about acces rights under Windows?
if verbose: log.critical('MESSAGE: Directory %s created.' % CD)
except:
if warn is True:
log.critical('WARNING: Directory %s could not be created.' % CD)
if unix:
CD = '/tmp/'
else:
CD = 'C:'
if warn is True:
log.critical('Using directory %s instead' % CD)
return(CD)
checkdir(cachedir, warn=True)
#==============================================================================
# Statistics
#==============================================================================
def addstatsline(CD, funcname, FN, Retrieved, reason, comptime, loadtime,
compression):
"""Add stats entry
USAGE:
addstatsline(CD,funcname,FN,Retrieved,reason,comptime,loadtime,compression)
DESCRIPTION:
Make one entry in the stats file about one cache hit recording time saved
and other statistics. The data are used by the function cachestat.
"""
import os, time
try:
TimeTuple = time.localtime(time.time())
extension = time.strftime('%b%Y',TimeTuple)
SFN = CD+statsfile+'.'+extension
#statfile = open(SFN,'a')
(statfile, dummy) = myopen(SFN,'a',compression=0)
# Change access rights if possible
#
#if unix:
# try:
# exitcode=os.system('chmod 666 '+SFN)
# except:
# pass
except:
log.critical('Warning: Stat file could not be opened')
try:
if 'USER' in os.environ:
user = os.environ['USER']
else:
user = 'Nobody'
date = time.asctime(TimeTuple)
if Retrieved:
hit = '1'
else:
hit = '0'
# Get size of result file
#
if compression:
stats = os.stat(CD+FN+'_'+file_types[0]+'.z')
else:
stats = os.stat(CD+FN+'_'+file_types[0])
if stats:
size = stats[6]
else:
size = -1 # Error condition, but don't crash. This is just statistics
# Build entry
#
entry = date + ',' +\
user + ',' +\
FN + ',' +\
str(int(size)) + ',' +\
str(compression) + ',' +\
hit + ',' +\
str(reason) + ',' +\
str(round(comptime,4)) + ',' +\
str(round(loadtime,4)) +\
CR
statfile.write(entry)
statfile.close()
except:
log.critical('Warning: Writing of stat file failed')
# -----------------------------------------------------------------------------
# FIXME: should take cachedir as an optional arg
#
def __cachestat(sortidx=4, period=-1, showuser=None, cachedir=None):
""" List caching statistics.
USAGE:
__cachestat(sortidx=4,period=-1,showuser=None,cachedir=None):
Generate statistics of caching efficiency.
The parameter sortidx determines by what field lists are sorted.
If the optional keyword period is set to -1,
all available caching history is used.
If it is 0 only the current month is used.
Future versions will include more than one month....
OMN 20/8/2000
"""
import os
import os.path
from string import split, rstrip, find
from time import strptime, localtime, strftime, mktime, ctime
# sortidx = 4 # Index into Fields[1:]. What to sort by.
Fields = ['Name', 'Hits', 'Exec(s)', \
'Cache(s)', 'Saved(s)', 'Gain(%)', 'Size']
Widths = [25,7,9,9,9,9,13]
#Types = ['s','d','d','d','d','.2f','d']
Types = ['s','d','.2f','.2f','.2f','.2f','d']
Dictnames = ['Function', 'User']
if not cachedir:
cachedir = checkdir(options['cachedir'])
SD = os.path.expanduser(cachedir) # Expand ~ or ~user in pathname
if period == -1: # Take all available stats
SFILENAME = statsfile
else: # Only stats from current month
# MAKE THIS MORE GENERAL SO period > 0 counts several months backwards!
TimeTuple = localtime(time())
extension = strftime('%b%Y',TimeTuple)
SFILENAME = statsfile+'.'+extension
DIRLIST = os.listdir(SD)
SF = []
for FN in DIRLIST:
if find(FN,SFILENAME) >= 0:
SF.append(FN)
blocksize = 15000000
total_read = 0
total_hits = 0
total_discarded = 0
firstday = mktime(strptime('2030','%Y'))
# FIXME: strptime don't exist in WINDOWS ?
lastday = 0
FuncDict = {}
UserDict = {}
for FN in SF:
input = open(SD+FN,'r')
log.critical('Reading file %s' % SD+FN)
while True:
A = input.readlines(blocksize)
if len(A) == 0: break
total_read = total_read + len(A)
for record in A:
record = tuple(split(rstrip(record),','))
if len(record) == 9:
timestamp = record[0]
try:
t = mktime(strptime(timestamp))
except:
total_discarded = total_discarded + 1
continue
if t > lastday:
lastday = t
if t < firstday:
firstday = t
user = record[1]
my_F = record[2]
# Strip hash-stamp off
#
i = find(my_F,'[')
my_F = my_F[:i]
size = float(record[3])
# Compression kepword can be Boolean
if record[4] in ['True', '1']:
compression = 1
elif record[4] in ['False', '0']:
compression = 0
else:
log.critical('Unknown value of compression %s' % str(record[4]))
log.critical(str(record))
total_discarded = total_discarded + 1
continue
#compression = int(record[4]) # Can be Boolean
hit = int(record[5])
reason = int(record[6]) # Not used here
cputime = float(record[7])
loadtime = float(record[8])
if hit:
total_hits = total_hits + 1
saving = cputime-loadtime
if cputime != 0:
rel_saving = round(old_div(100.0*saving,cputime),2)
else:
#rel_saving = round(1.0*saving,2)
rel_saving = 100.0 - round(1.0*saving,2) # A bit of a hack
info = [1,cputime,loadtime,saving,rel_saving,size]
UpdateDict(UserDict,user,info)
UpdateDict(FuncDict,my_F,info)
else:
pass #Stats on recomputations and their reasons could go in here
else:
total_discarded = total_discarded + 1
input.close()
# Compute averages of all sums and write list
#
if total_read == 0:
printline(Widths,'=')
log.critical('CACHING STATISTICS: No valid records read')
printline(Widths,'=')
return
log.critical()
printline(Widths,'=')
log.critical('CACHING STATISTICS: '+ctime(firstday)+' to '+ctime(lastday))
printline(Widths,'=')
log.critical(' Total number of valid records %d' % total_read)
log.critical(' Total number of discarded records %d' % total_discarded)
log.critical(' Total number of hits %d' % total_hits)
log.critical()
log.critical(' Fields %s are averaged over number of hits' % Fields[2:])
log.critical(' Time is measured in seconds and size in bytes')
log.critical(' Tables are sorted by %s' % Fields[1:][sortidx])
if showuser:
Dictionaries = [FuncDict, UserDict]
else:
Dictionaries = [FuncDict]
i = 0
for Dict in Dictionaries:
for key in list(Dict.keys()):
rec = Dict[key]
for n in range(len(rec)):
if n > 0:
rec[n] = round(old_div(1.0*rec[n],rec[0]),2)
Dict[key] = rec
# Sort and output
#
keylist = SortDict(Dict,sortidx)
# Write Header
#
log.critical()
printline(Widths,'-')
n = 0
for s in Fields:
if s == Fields[0]: # Left justify
s = Dictnames[i] + ' ' + s; i=i+1
#exec "print '%-" + str(Widths[n]) + "s'%s,"; n=n+1
log.critical('%-*s' % (Widths[n], s))
n += 1
else:
#exec "print '%" + str(Widths[n]) + "s'%s,"; n=n+1
log.critical('%*s' % (Widths[n], s))
n += 1
log.critical()
printline(Widths,'-')
# Output Values
#
for key in keylist:
rec = Dict[key]
n = 0
if len(key) > Widths[n]: key = key[:Widths[n]-3] + '...'
#exec "print '%-" + str(Widths[n]) + Types[n]+"'%key,";n=n+1
log.critical('%-*s' % (Widths[n], str(key)))
n += 1
for val in rec:
#exec "print '%" + str(Widths[n]) + Types[n]+"'%val,"; n=n+1
log.critical('%*s' % (Widths[n], str(key)))
n += 1
log.critical()
log.critical()
#==============================================================================
# Auxiliary stats functions
#==============================================================================
def UpdateDict(Dict, key, info):
"""Update dictionary by adding new values to existing.
USAGE:
UpdateDict(Dict,key,info)
"""
if key in Dict:
dinfo = Dict[key]
for n in range(len(dinfo)):
dinfo[n] = info[n] + dinfo[n]
else:
dinfo = info[:] # Make a copy of info list
Dict[key] = dinfo
return Dict
# -----------------------------------------------------------------------------
def SortDict(Dict, sortidx=0):
"""Sort dictionary
USAGE:
SortDict(Dict,sortidx):
DESCRIPTION:
Sort dictionary of lists according field number 'sortidx'
"""
sortlist = []
keylist = list(Dict.keys())
for key in keylist:
rec = Dict[key]
if not isinstance(rec, (list, tuple)):
rec = [rec]
if sortidx > len(rec)-1:
msg = 'ERROR: Sorting index too large, sortidx = %s' % str(sortidx)
raise IndexError(msg)
val = rec[sortidx]
sortlist.append(val)
A = list(zip(sortlist,keylist))
A.sort()
keylist = [x[1] for x in A] # keylist sorted by sortidx
return(keylist)
# -----------------------------------------------------------------------------
def printline(Widths,char):
"""Print textline in fixed field.
USAGE:
printline(Widths,char)
"""
s = ''
for n in range(len(Widths)):
s = s+Widths[n]*char
if n > 0:
s = s+char
log.critical(s)
#==============================================================================
# Messages
#==============================================================================
def msg1(funcname, args, kwargs, reason):
"""Message 1
USAGE:
msg1(funcname, args, kwargs, reason):
"""
import string
print_header_box('Evaluating function %s' %funcname)
msg7(args, kwargs)
msg8(reason)
print_footer()
# -----------------------------------------------------------------------------
def msg2(funcname, args, kwargs, comptime, reason):
"""Message 2
USAGE:
msg2(funcname, args, kwargs, comptime, reason)
"""
import string
#try:
# R = Reason_msg[reason]
#except:
# R = 'Unknown reason'
#print_header_box('Caching statistics (storing) - %s' %R)
print_header_box('Caching statistics (storing)')
msg6(funcname,args,kwargs)
msg8(reason)
log.critical(str.ljust('| CPU time:', textwidth1) +
str(round(comptime,2)) + ' seconds')
# -----------------------------------------------------------------------------
def msg3(savetime, CD, FN, deps, compression):
"""Message 3
USAGE:
msg3(savetime, CD, FN, deps, compression)
"""
import string
log.critical(str.ljust('| Loading time:', textwidth1) +
str(round(savetime,2)) + ' seconds (estimated)')
msg5(CD,FN,deps,compression)
# -----------------------------------------------------------------------------
def msg4(funcname, args, kwargs, deps, comptime, loadtime, CD, FN, compression):
"""Message 4
USAGE:
msg4(funcname, args, kwargs, deps, comptime, loadtime, CD, FN, compression)
"""
import string
print_header_box('Caching statistics (retrieving)')
msg6(funcname,args,kwargs)
log.critical(str.ljust('| CPU time:', textwidth1) +
str(round(comptime,2)) + ' seconds')
log.critical(str.ljust('| Loading time:', textwidth1) +
str(round(loadtime,2)) + ' seconds')
log.critical(str.ljust('| Time saved:', textwidth1) +
str(round(comptime-loadtime,2)) + ' seconds')
msg5(CD,FN,deps,compression)
# -----------------------------------------------------------------------------
def msg5(CD, FN, deps, compression):
"""Message 5
USAGE:
msg5(CD, FN, deps, compression)
DESCRIPTION:
Print dependency stats. Used by msg3 and msg4
"""
import os, time, string
log.critical('|')
log.critical(str.ljust('| Caching dir: ', textwidth1) + CD)
if compression:
suffix = '.z'
bytetext = 'bytes, compressed'
else:
suffix = ''
bytetext = 'bytes'
for file_type in file_types:
file_name = FN + '_' + file_type + suffix
stats = os.stat(CD+file_name)
log.critical(str.ljust('| ' + file_type + ' file: ', textwidth1) +
file_name + '('+ str(stats[6]) + ' ' + bytetext + ')')
log.critical('|')
if len(deps) > 0:
log.critical('| Dependencies: ')
dependencies = list(deps.keys())
dlist = []; maxd = 0
tlist = []; maxt = 0
slist = []; maxs = 0
for d in dependencies:
stats = deps[d]
t = time.ctime(stats[1])
s = str(stats[0])
#if s[-1] == 'L':
# s = s[:-1] # Strip rightmost 'long integer' L off.
# # FIXME: Unnecessary in versions later than 1.5.2
if len(d) > maxd: maxd = len(d)
if len(t) > maxt: maxt = len(t)
if len(s) > maxs: maxs = len(s)
dlist.append(d)
tlist.append(t)
slist.append(s)
for n in range(len(dlist)):
d = str.ljust(dlist[n]+':', maxd+1)
t = str.ljust(tlist[n], maxt)
s = str.rjust(slist[n], maxs)
log.critical('| %s %s %s bytes' % (d, t, s))
else:
log.critical('| No dependencies')
print_footer()
# -----------------------------------------------------------------------------
def msg6(funcname, args, kwargs):
"""Message 6
USAGE:
msg6(funcname, args, kwargs)
"""
import string
log.critical(str.ljust('| Function:', textwidth1) + funcname)
msg7(args, kwargs)
# -----------------------------------------------------------------------------
def msg7(args, kwargs):
"""Message 7
USAGE:
msg7(args, kwargs):
"""
import string
args_present = 0
if args:
if len(args) == 1:
log.critical(str.ljust('| Argument:', textwidth1) +
mkargstr(args[0], textwidth2))
else:
log.critical(str.ljust('| Arguments:', textwidth1) +
mkargstr(args, textwidth2))
args_present = 1
if kwargs:
if len(kwargs) == 1:
log.critical(str.ljust('| Keyword Arg:', textwidth1) +
mkargstr(kwargs, textwidth2))
else:
log.critical(str.ljust('| Keyword Args:', textwidth1) +
mkargstr(kwargs, textwidth2))
args_present = 1
if not args_present:
log.critical('| No arguments') # Default if no args or kwargs present
# -----------------------------------------------------------------------------
def msg8(reason):
"""Message 8
USAGE:
msg8(reason):
"""
import string
try:
R = Reason_msg[reason]
except:
R = 'Unknown'
log.critical(str.ljust('| Reason:', textwidth1) + R)
# -----------------------------------------------------------------------------
def print_header_box(line):
"""Print line in a nice box.
USAGE:
print_header_box(line)
"""
global textwidth3
import time
time_stamp = time.ctime(time.time())
line = time_stamp + '. ' + line
N = len(line) + 1
s = '+' + '-'*N + CR
log.critical(s + '| ' + line + CR + s)
textwidth3 = N
# -----------------------------------------------------------------------------
def print_footer():
"""Print line same width as that of print_header_box.
"""
N = textwidth3
s = '+' + '-'*N + CR
log.critical(s)
# -----------------------------------------------------------------------------
def mkargstr(args, textwidth, argstr = '', level=0):
""" Generate a string containing first textwidth characters of arguments.
USAGE:
mkargstr(args, textwidth, argstr = '', level=0)
DESCRIPTION:
Exactly the same as str(args) possibly followed by truncation,
but faster if args is huge.
"""
if level > 10:
# Protect against circular structures
return '...'
WasTruncated = 0
if not isinstance(args, (tuple, list, dict)):
if isinstance(args, basestring):
argstr = argstr + "'"+str(args)+"'"
else:
# Truncate large numeric arrays before using str()
if isinstance(args, num.ndarray):
# if len(args.flat) > textwidth:
# Changed by Duncan and Nick 21/2/07 .flat has problems with
# non-contigous arrays and ravel is equal to .flat except it
# can work with non-contiguous arrays
if len(num.ravel(args)) > textwidth:
args = 'Array: ' + str(args.shape)
argstr = argstr + str(args)
else:
if isinstance(args, dict):
argstr = argstr + "{"
for key in list(args.keys()):
argstr = argstr + mkargstr(key, textwidth, level=level+1) + ": " + \
mkargstr(args[key], textwidth, level=level+1) + ", "
if len(argstr) > textwidth:
WasTruncated = 1
break
argstr = argstr[:-2] # Strip off trailing comma
argstr = argstr + "}"
else:
if isinstance(args, tuple):
lc = '('
rc = ')'
else:
lc = '['
rc = ']'
argstr = argstr + lc
for arg in args:
argstr = argstr + mkargstr(arg, textwidth, level=level+1) + ', '
if len(argstr) > textwidth:
WasTruncated = 1
break
# Strip off trailing comma and space unless singleton tuple
#
if isinstance(args, tuple) and len(args) == 1:
argstr = argstr[:-1]
else:
argstr = argstr[:-2]
argstr = argstr + rc
if len(argstr) > textwidth:
WasTruncated = 1
if WasTruncated:
argstr = argstr[:textwidth]+'...'
return(argstr)
# -----------------------------------------------------------------------------
def logtestOK(msg):
"""Print OK msg if test is OK.
USAGE
logtestOK(message)
"""
import string
log.critical(str.ljust(msg, textwidth4) + ' - OK' )
#raise StandardError
# -----------------------------------------------------------------------------
def logtesterror(msg):
"""Print error if test fails.
USAGE
logtesterror(message)
"""
log.critical('ERROR (caching.test): %s' % msg)
log.critical('Please send this code example and output to ')
log.critical('<EMAIL>')
log.critical()
log.critical()
raise Exception
#-------------------------------------------------------------
if __name__ == "__main__":
pass
| [
"os.mkdir",
"string.split",
"os.remove",
"time",
"past.utils.old_div",
"numpy.ravel",
"string.maketrans",
"time.ctime",
"time.strftime",
"dill.loads",
"time.mktime",
"glob.glob",
"builtins.range",
"os.path.join",
"os.path.expanduser",
"time.asctime",
"builtins.input.readlines",
"in... | [((2814, 2846), 'os.path.join', 'os.path.join', (['homedir', 'cache_dir'], {}), '(homedir, cache_dir)\n', (2826, 2846), False, 'import os, time, string\n'), ((2404, 2423), 'os.getenv', 'getenv', (['"""ANUGADATA"""'], {}), "('ANUGADATA')\n", (2410, 2423), False, 'from os import getenv\n'), ((16818, 16880), 'anuga.utilities.log.critical', 'log.critical', (['"""\nTesting caching module - please stand by\n"""'], {}), '("""\nTesting caching module - please stand by\n""")\n', (16830, 16880), True, 'import anuga.utilities.log as log\n'), ((18176, 18192), 'builtins.range', 'range', (['comprange'], {}), '(comprange)\n', (18181, 18192), False, 'from builtins import range\n'), ((23231, 23242), 'time.time', 'time.time', ([], {}), '()\n', (23240, 23242), False, 'import time\n'), ((23336, 23347), 'time.time', 'time.time', ([], {}), '()\n', (23345, 23347), False, 'import time\n'), ((35357, 35371), 'os.listdir', 'os.listdir', (['CD'], {}), '(CD)\n', (35367, 35371), False, 'import os, time, string\n'), ((38296, 38307), 'time.time', 'time.time', ([], {}), '()\n', (38305, 38307), False, 'import time\n'), ((39284, 39295), 'time.time', 'time.time', ([], {}), '()\n', (39293, 39295), False, 'import time\n'), ((55825, 55843), 'string.split', 'string.split', (['info'], {}), '(info)\n', (55837, 55843), False, 'import string\n'), ((56007, 56025), 'time.strptime', 'time.strptime', (['str'], {}), '(str)\n', (56020, 56025), False, 'import time\n'), ((56032, 56052), 'time.mktime', 'time.mktime', (['timetup'], {}), '(timetup)\n', (56043, 56052), False, 'import time\n'), ((56552, 56574), 'os.path.expanduser', 'os.path.expanduser', (['CD'], {}), '(CD)\n', (56570, 56574), False, 'import os, time, string\n'), ((60473, 60501), 'os.path.expanduser', 'os.path.expanduser', (['cachedir'], {}), '(cachedir)\n', (60491, 60501), False, 'import os, time, string\n'), ((60861, 60875), 'os.listdir', 'os.listdir', (['SD'], {}), '(SD)\n', (60871, 60875), False, 'import os, time, string\n'), ((63544, 63558), 'anuga.utilities.log.critical', 'log.critical', ([], {}), '()\n', (63556, 63558), True, 'import anuga.utilities.log as log\n'), ((63686, 63749), 'anuga.utilities.log.critical', 'log.critical', (["(' Total number of valid records %d' % total_read)"], {}), "(' Total number of valid records %d' % total_read)\n", (63698, 63749), True, 'import anuga.utilities.log as log\n'), ((63752, 63824), 'anuga.utilities.log.critical', 'log.critical', (["(' Total number of discarded records %d' % total_discarded)"], {}), "(' Total number of discarded records %d' % total_discarded)\n", (63764, 63824), True, 'import anuga.utilities.log as log\n'), ((63827, 63881), 'anuga.utilities.log.critical', 'log.critical', (["(' Total number of hits %d' % total_hits)"], {}), "(' Total number of hits %d' % total_hits)\n", (63839, 63881), True, 'import anuga.utilities.log as log\n'), ((63884, 63898), 'anuga.utilities.log.critical', 'log.critical', ([], {}), '()\n', (63896, 63898), True, 'import anuga.utilities.log as log\n'), ((63902, 63975), 'anuga.utilities.log.critical', 'log.critical', (["(' Fields %s are averaged over number of hits' % Fields[2:])"], {}), "(' Fields %s are averaged over number of hits' % Fields[2:])\n", (63914, 63975), True, 'import anuga.utilities.log as log\n'), ((63978, 64041), 'anuga.utilities.log.critical', 'log.critical', (['""" Time is measured in seconds and size in bytes"""'], {}), "(' Time is measured in seconds and size in bytes')\n", (63990, 64041), True, 'import anuga.utilities.log as log\n'), ((64044, 64107), 'anuga.utilities.log.critical', 'log.critical', (["(' Tables are sorted by %s' % Fields[1:][sortidx])"], {}), "(' Tables are sorted by %s' % Fields[1:][sortidx])\n", (64056, 64107), True, 'import anuga.utilities.log as log\n'), ((66974, 66989), 'anuga.utilities.log.critical', 'log.critical', (['s'], {}), '(s)\n', (66986, 66989), True, 'import anuga.utilities.log as log\n'), ((69393, 69410), 'anuga.utilities.log.critical', 'log.critical', (['"""|"""'], {}), "('|')\n", (69405, 69410), True, 'import anuga.utilities.log as log\n'), ((69850, 69867), 'anuga.utilities.log.critical', 'log.critical', (['"""|"""'], {}), "('|')\n", (69862, 69867), True, 'import anuga.utilities.log as log\n'), ((72585, 72623), 'anuga.utilities.log.critical', 'log.critical', (["(s + '| ' + line + CR + s)"], {}), "(s + '| ' + line + CR + s)\n", (72597, 72623), True, 'import anuga.utilities.log as log\n'), ((72866, 72881), 'anuga.utilities.log.critical', 'log.critical', (['s'], {}), '(s)\n', (72878, 72881), True, 'import anuga.utilities.log as log\n'), ((75536, 75582), 'anuga.utilities.log.critical', 'log.critical', (["('ERROR (caching.test): %s' % msg)"], {}), "('ERROR (caching.test): %s' % msg)\n", (75548, 75582), True, 'import anuga.utilities.log as log\n'), ((75585, 75645), 'anuga.utilities.log.critical', 'log.critical', (['"""Please send this code example and output to """'], {}), "('Please send this code example and output to ')\n", (75597, 75645), True, 'import anuga.utilities.log as log\n'), ((75648, 75671), 'anuga.utilities.log.critical', 'log.critical', (['"""<EMAIL>"""'], {}), "('<EMAIL>')\n", (75660, 75671), True, 'import anuga.utilities.log as log\n'), ((75674, 75688), 'anuga.utilities.log.critical', 'log.critical', ([], {}), '()\n', (75686, 75688), True, 'import anuga.utilities.log as log\n'), ((75691, 75705), 'anuga.utilities.log.critical', 'log.critical', ([], {}), '()\n', (75703, 75705), True, 'import anuga.utilities.log as log\n'), ((2462, 2486), 'os.getenv', 'getenv', (['"""INUNDATIONHOME"""'], {}), "('INUNDATIONHOME')\n", (2468, 2486), False, 'from os import getenv\n'), ((2631, 2648), 'os.getenv', 'getenv', (['"""LOGNAME"""'], {}), "('LOGNAME')\n", (2637, 2648), False, 'from os import getenv\n'), ((11647, 11659), 'builtins.str', 'str', (['arghash'], {}), '(arghash)\n', (11650, 11659), False, 'from builtins import str\n'), ((17093, 17101), 'builtins.range', 'range', (['N'], {}), '(N)\n', (17098, 17101), False, 'from builtins import range\n'), ((23309, 23320), 'time.time', 'time.time', ([], {}), '()\n', (23318, 23320), False, 'import time\n'), ((23414, 23425), 'time.time', 'time.time', ([], {}), '()\n', (23423, 23425), False, 'import time\n'), ((23734, 23748), 'os.listdir', 'os.listdir', (['CD'], {}), '(CD)\n', (23744, 23748), False, 'import os, time, string\n'), ((28501, 28627), 'anuga.utilities.log.critical', 'log.critical', (["('Caching: looking for cached files %s_{%s,%s,%s}.z' % (CD + FN, file_types\n [0], file_types[1], file_types[2]))"], {}), "('Caching: looking for cached files %s_{%s,%s,%s}.z' % (CD + FN,\n file_types[0], file_types[1], file_types[2]))\n", (28513, 28627), True, 'import anuga.utilities.log as log\n'), ((34025, 34039), 'os.listdir', 'os.listdir', (['CD'], {}), '(CD)\n', (34035, 34039), False, 'import os, time, string\n'), ((34384, 34398), 'os.listdir', 'os.listdir', (['CD'], {}), '(CD)\n', (34394, 34398), False, 'import os, time, string\n'), ((38434, 38445), 'time.time', 'time.time', ([], {}), '()\n', (38443, 38445), False, 'import time\n'), ((39352, 39363), 'time.time', 'time.time', ([], {}), '()\n', (39361, 39363), False, 'import time\n'), ((43124, 43150), 'dill.dump', 'pickler.dump', (['T', 'file', 'bin'], {}), '(T, file, bin)\n', (43136, 43150), True, 'import dill as pickler\n'), ((54346, 54357), 'os.stat', 'os.stat', (['FN'], {}), '(FN)\n', (54353, 54357), False, 'import os, time, string\n'), ((57904, 57936), 'time.strftime', 'time.strftime', (['"""%b%Y"""', 'TimeTuple'], {}), "('%b%Y', TimeTuple)\n", (57917, 57936), False, 'import time\n'), ((58386, 58409), 'time.asctime', 'time.asctime', (['TimeTuple'], {}), '(TimeTuple)\n', (58398, 58409), False, 'import time\n'), ((60781, 60808), 'time.strftime', 'strftime', (['"""%b%Y"""', 'TimeTuple'], {}), "('%b%Y', TimeTuple)\n", (60789, 60808), False, 'from time import strptime, localtime, strftime, mktime, ctime\n'), ((61059, 61081), 'time.strptime', 'strptime', (['"""2030"""', '"""%Y"""'], {}), "('2030', '%Y')\n", (61067, 61081), False, 'from time import strptime, localtime, strftime, mktime, ctime\n'), ((61233, 61274), 'anuga.utilities.log.critical', 'log.critical', (["('Reading file %s' % SD + FN)"], {}), "('Reading file %s' % SD + FN)\n", (61245, 61274), True, 'import anuga.utilities.log as log\n'), ((63326, 63339), 'builtins.input.close', 'input.close', ([], {}), '()\n', (63337, 63339), False, 'from builtins import input\n'), ((63446, 63503), 'anuga.utilities.log.critical', 'log.critical', (['"""CACHING STATISTICS: No valid records read"""'], {}), "('CACHING STATISTICS: No valid records read')\n", (63458, 63503), True, 'import anuga.utilities.log as log\n'), ((64518, 64532), 'anuga.utilities.log.critical', 'log.critical', ([], {}), '()\n', (64530, 64532), True, 'import anuga.utilities.log as log\n'), ((64929, 64943), 'anuga.utilities.log.critical', 'log.critical', ([], {}), '()\n', (64941, 64943), True, 'import anuga.utilities.log as log\n'), ((65432, 65446), 'anuga.utilities.log.critical', 'log.critical', ([], {}), '()\n', (65444, 65446), True, 'import anuga.utilities.log as log\n'), ((66573, 66595), 'builtins.zip', 'zip', (['sortlist', 'keylist'], {}), '(sortlist, keylist)\n', (66576, 66595), False, 'from builtins import zip\n'), ((69682, 69705), 'os.stat', 'os.stat', (['(CD + file_name)'], {}), '(CD + file_name)\n', (69689, 69705), False, 'import os, time, string\n'), ((69892, 69925), 'anuga.utilities.log.critical', 'log.critical', (['"""| Dependencies: """'], {}), "('| Dependencies: ')\n", (69904, 69925), True, 'import anuga.utilities.log as log\n'), ((70692, 70725), 'anuga.utilities.log.critical', 'log.critical', (['"""| No dependencies"""'], {}), "('| No dependencies')\n", (70704, 70725), True, 'import anuga.utilities.log as log\n'), ((71864, 71894), 'anuga.utilities.log.critical', 'log.critical', (['"""| No arguments"""'], {}), "('| No arguments')\n", (71876, 71894), True, 'import anuga.utilities.log as log\n'), ((72487, 72498), 'time.time', 'time.time', ([], {}), '()\n', (72496, 72498), False, 'import time\n'), ((13407, 13418), 'time.time', 'time.time', ([], {}), '()\n', (13416, 13418), False, 'import time\n'), ((16544, 16558), 'anuga.utilities.log.critical', 'log.critical', ([], {}), '()\n', (16556, 16558), True, 'import anuga.utilities.log as log\n'), ((16563, 16638), 'anuga.utilities.log.critical', 'log.critical', (['"""*** Could not find zlib, default to no-compression ***"""'], {}), "('*** Could not find zlib, default to no-compression ***')\n", (16575, 16638), True, 'import anuga.utilities.log as log\n'), ((16643, 16718), 'anuga.utilities.log.critical', 'log.critical', (['"""*** Installing zlib will improve performance of caching ***"""'], {}), "('*** Installing zlib will improve performance of caching ***')\n", (16655, 16718), True, 'import anuga.utilities.log as log\n'), ((16723, 16737), 'anuga.utilities.log.critical', 'log.critical', ([], {}), '()\n', (16735, 16737), True, 'import anuga.utilities.log as log\n'), ((24360, 24387), 'time.strptime', 'time.strptime', (['"""2030"""', '"""%Y"""'], {}), "('2030', '%Y')\n", (24373, 24387), False, 'import time\n'), ((30190, 30263), 'anuga.utilities.log.critical', 'log.critical', (["('Dependencies %s have changed - recomputing' % dependencies)"], {}), "('Dependencies %s have changed - recomputing' % dependencies)\n", (30202, 30263), True, 'import anuga.utilities.log as log\n'), ((32918, 32956), 'anuga.utilities.sparse_matrix_ext.deserialise_dok', 'sparse_matrix_ext.deserialise_dok', (['T.D'], {}), '(T.D)\n', (32951, 32956), True, 'import anuga.utilities.sparse_matrix_ext as sparse_matrix_ext\n'), ((32996, 33036), 'anuga.utilities.sparse_matrix_ext.deserialise_dok', 'sparse_matrix_ext.deserialise_dok', (['T.AtA'], {}), '(T.AtA)\n', (33029, 33036), True, 'import anuga.utilities.sparse_matrix_ext as sparse_matrix_ext\n'), ((33961, 34010), 'anuga.utilities.log.critical', 'log.critical', (["('Clearing %s' % CD + funcname + '*')"], {}), "('Clearing %s' % CD + funcname + '*')\n", (33973, 34010), True, 'import anuga.utilities.log as log\n'), ((35531, 35584), 'anuga.utilities.log.critical', 'log.critical', (["('Deleting %d expired files:' % delfiles)"], {}), "('Deleting %d expired files:' % delfiles)\n", (35543, 35584), True, 'import anuga.utilities.log as log\n'), ((37651, 37687), 'anuga.utilities.sparse_matrix_ext.serialise_dok', 'sparse_matrix_ext.serialise_dok', (['T.D'], {}), '(T.D)\n', (37682, 37687), True, 'import anuga.utilities.sparse_matrix_ext as sparse_matrix_ext\n'), ((37727, 37765), 'anuga.utilities.sparse_matrix_ext.serialise_dok', 'sparse_matrix_ext.serialise_dok', (['T.AtA'], {}), '(T.AtA)\n', (37758, 37765), True, 'import anuga.utilities.sparse_matrix_ext as sparse_matrix_ext\n'), ((38096, 38152), 'anuga.utilities.log.critical', 'log.critical', (["('ERROR: Could not open %s' % datafile.name)"], {}), "('ERROR: Could not open %s' % datafile.name)\n", (38108, 38152), True, 'import anuga.utilities.log as log\n'), ((38214, 38269), 'anuga.utilities.log.critical', 'log.critical', (["('ERROR: Could not open %s' % admfile.name)"], {}), "('ERROR: Could not open %s' % admfile.name)\n", (38226, 38269), True, 'import anuga.utilities.log as log\n'), ((40901, 40936), 'os.system', 'os.system', (["('chmod 666 ' + file.name)"], {}), "('chmod 666 ' + file.name)\n", (40910, 40936), False, 'import os, time, string\n'), ((41766, 41783), 'dill.loads', 'pickler.loads', (['Rs'], {}), '(Rs)\n', (41779, 41783), True, 'import dill as pickler\n'), ((42770, 42791), 'dill.dumps', 'pickler.dumps', (['T', 'bin'], {}), '(T, bin)\n', (42783, 42791), True, 'import dill as pickler\n'), ((43028, 43057), 'zlib.compress', 'zlib.compress', (['Ts', 'comp_level'], {}), '(Ts, comp_level)\n', (43041, 43057), False, 'import zlib\n'), ((46117, 46123), 'builtins.str', 'str', (['h'], {}), '(h)\n', (46120, 46123), False, 'from builtins import str\n'), ((48147, 48155), 'builtins.range', 'range', (['N'], {}), '(N)\n', (48152, 48155), False, 'from builtins import range\n'), ((52997, 53010), 'glob.glob', 'glob.glob', (['FN'], {}), '(FN)\n', (53006, 53010), False, 'import glob\n'), ((54535, 54595), 'anuga.utilities.log.critical', 'log.critical', (['"""Hack to get os.stat when files are too large"""'], {}), "('Hack to get os.stat when files are too large')\n", (54547, 54595), True, 'import anuga.utilities.log as log\n'), ((56617, 56651), 'os.access', 'os.access', (['CD', '(os.R_OK and os.W_OK)'], {}), '(CD, os.R_OK and os.W_OK)\n', (56626, 56651), False, 'import os, time, string\n'), ((56689, 56701), 'os.mkdir', 'os.mkdir', (['CD'], {}), '(CD)\n', (56697, 56701), False, 'import os, time, string\n'), ((57875, 57886), 'time.time', 'time.time', ([], {}), '()\n', (57884, 57886), False, 'import time\n'), ((58218, 58272), 'anuga.utilities.log.critical', 'log.critical', (['"""Warning: Stat file could not be opened"""'], {}), "('Warning: Stat file could not be opened')\n", (58230, 58272), True, 'import anuga.utilities.log as log\n'), ((58546, 58591), 'os.stat', 'os.stat', (["(CD + FN + '_' + file_types[0] + '.z')"], {}), "(CD + FN + '_' + file_types[0] + '.z')\n", (58553, 58591), False, 'import os, time, string\n'), ((58608, 58646), 'os.stat', 'os.stat', (["(CD + FN + '_' + file_types[0])"], {}), "(CD + FN + '_' + file_types[0])\n", (58615, 58646), False, 'import os, time, string\n'), ((59234, 59286), 'anuga.utilities.log.critical', 'log.critical', (['"""Warning: Writing of stat file failed"""'], {}), "('Warning: Writing of stat file failed')\n", (59246, 59286), True, 'import anuga.utilities.log as log\n'), ((60757, 60763), 'time', 'time', ([], {}), '()\n', (60761, 60763), False, 'import time\n'), ((60914, 60933), 'string.find', 'find', (['FN', 'SFILENAME'], {}), '(FN, SFILENAME)\n', (60918, 60933), False, 'from string import split, rstrip, find\n'), ((61300, 61326), 'builtins.input.readlines', 'input.readlines', (['blocksize'], {}), '(blocksize)\n', (61315, 61326), False, 'from builtins import input\n'), ((63644, 63658), 'time.ctime', 'ctime', (['lastday'], {}), '(lastday)\n', (63649, 63658), False, 'from time import strptime, localtime, strftime, mktime, ctime\n'), ((65413, 65427), 'anuga.utilities.log.critical', 'log.critical', ([], {}), '()\n', (65425, 65427), True, 'import anuga.utilities.log as log\n'), ((69426, 69466), 'builtins.str.ljust', 'str.ljust', (['"""| Caching dir: """', 'textwidth1'], {}), "('| Caching dir: ', textwidth1)\n", (69435, 69466), False, 'from builtins import str\n'), ((70098, 70118), 'time.ctime', 'time.ctime', (['stats[1]'], {}), '(stats[1])\n', (70108, 70118), False, 'import time\n'), ((70129, 70142), 'builtins.str', 'str', (['stats[0]'], {}), '(stats[0])\n', (70132, 70142), False, 'from builtins import str\n'), ((70524, 70559), 'builtins.str.ljust', 'str.ljust', (["(dlist[n] + ':')", '(maxd + 1)'], {}), "(dlist[n] + ':', maxd + 1)\n", (70533, 70559), False, 'from builtins import str\n'), ((70566, 70591), 'builtins.str.ljust', 'str.ljust', (['tlist[n]', 'maxt'], {}), '(tlist[n], maxt)\n', (70575, 70591), False, 'from builtins import str\n'), ((70602, 70627), 'builtins.str.rjust', 'str.rjust', (['slist[n]', 'maxs'], {}), '(slist[n], maxs)\n', (70611, 70627), False, 'from builtins import str\n'), ((70635, 70679), 'anuga.utilities.log.critical', 'log.critical', (["('| %s %s %s bytes' % (d, t, s))"], {}), "('| %s %s %s bytes' % (d, t, s))\n", (70647, 70679), True, 'import anuga.utilities.log as log\n'), ((70955, 70991), 'builtins.str.ljust', 'str.ljust', (['"""| Function:"""', 'textwidth1'], {}), "('| Function:', textwidth1)\n", (70964, 70991), False, 'from builtins import str\n'), ((72194, 72228), 'builtins.str.ljust', 'str.ljust', (['"""| Reason:"""', 'textwidth1'], {}), "('| Reason:', textwidth1)\n", (72203, 72228), False, 'from builtins import str\n'), ((75284, 75310), 'builtins.str.ljust', 'str.ljust', (['msg', 'textwidth4'], {}), '(msg, textwidth4)\n', (75293, 75310), False, 'from builtins import str\n'), ((11957, 11979), 'os.access', 'os.access', (['fn', 'os.F_OK'], {}), '(fn, os.F_OK)\n', (11966, 11979), False, 'import os, time, string\n'), ((13558, 13569), 'time.time', 'time.time', ([], {}), '()\n', (13567, 13569), False, 'import time\n'), ((17113, 17137), 'builtins.str', 'str', (['(n + 2.0 / (n + 4.0))'], {}), '(n + 2.0 / (n + 4.0))\n', (17116, 17137), False, 'from builtins import str\n'), ((23793, 23819), 'string.find', 'string.find', (['FN', 'statsfile'], {}), '(FN, statsfile)\n', (23804, 23819), False, 'import string\n'), ((24425, 24551), 'anuga.utilities.log.critical', 'log.critical', (['"""cachestat() does not work here, because it relies on time.strptime() which is unavailable in Windows"""'], {}), "(\n 'cachestat() does not work here, because it relies on time.strptime() which is unavailable in Windows'\n )\n", (24437, 24551), True, 'import anuga.utilities.log as log\n'), ((31026, 31073), 'anuga.utilities.log.critical', 'log.critical', (['"""Caching did not yield any gain."""'], {}), "('Caching did not yield any gain.')\n", (31038, 31073), True, 'import anuga.utilities.log as log\n'), ((31082, 31156), 'anuga.utilities.log.critical', 'log.critical', (["('Consider executing function %s without caching.' % funcname)"], {}), "('Consider executing function %s without caching.' % funcname)\n", (31094, 31156), True, 'import anuga.utilities.log as log\n'), ((34453, 34496), 'anuga.utilities.log.critical', 'log.critical', (['"""Remove the following files:"""'], {}), "('Remove the following files:')\n", (34465, 34496), True, 'import anuga.utilities.log as log\n'), ((34593, 34619), 'builtins.input', 'input', (['"""Delete (Y/N)[N] ?"""'], {}), "('Delete (Y/N)[N] ?')\n", (34598, 34619), False, 'from builtins import input\n'), ((41325, 41345), 'zlib.decompress', 'zlib.decompress', (['RsC'], {}), '(RsC)\n', (41340, 41345), False, 'import zlib\n'), ((41817, 41835), 'dill.load', 'pickler.load', (['file'], {}), '(file)\n', (41829, 41835), True, 'import dill as pickler\n'), ((42058, 42133), 'anuga.utilities.log.critical', 'log.critical', (["('ERROR: Out of memory while loading %s, aborting' % file.name)"], {}), "('ERROR: Out of memory while loading %s, aborting' % file.name)\n", (42070, 42133), True, 'import anuga.utilities.log as log\n'), ((42517, 42531), 'anuga.utilities.log.critical', 'log.critical', ([], {}), '()\n', (42529, 42531), True, 'import anuga.utilities.log as log\n'), ((42538, 42581), 'anuga.utilities.log.critical', 'log.critical', (['"""*** Could not find zlib ***"""'], {}), "('*** Could not find zlib ***')\n", (42550, 42581), True, 'import anuga.utilities.log as log\n'), ((42588, 42651), 'anuga.utilities.log.critical', 'log.critical', (['"""*** Try to run caching with compression off ***"""'], {}), "('*** Try to run caching with compression off ***')\n", (42600, 42651), True, 'import anuga.utilities.log as log\n'), ((42658, 42718), 'anuga.utilities.log.critical', 'log.critical', (['"""*** caching.set_option(\'compression\', 0) ***"""'], {}), '("*** caching.set_option(\'compression\', 0) ***")\n', (42670, 42718), True, 'import anuga.utilities.log as log\n'), ((46592, 46604), 'numpy.array', 'num.array', (['T'], {}), '(T)\n', (46601, 46604), True, 'import numpy as num\n'), ((48743, 48762), 'numpy.alltrue', 'num.alltrue', (['(A == B)'], {}), '(A == B)\n', (48754, 48762), True, 'import numpy as num\n'), ((50509, 50536), 'builtins.str.maketrans', 'str.maketrans', (['"""<>\'"""', '""" """'], {}), '("<>\'", \' \')\n', (50522, 50536), False, 'from builtins import str\n'), ((50591, 50605), 'builtins.str.split', 'str.split', (['tmp'], {}), '(tmp)\n', (50600, 50605), False, 'from builtins import str\n'), ((51899, 51920), 'inspect.isclass', 'inspect.isclass', (['my_F'], {}), '(my_F)\n', (51914, 51920), False, 'import inspect\n'), ((53452, 53474), 'os.access', 'os.access', (['FN', 'os.F_OK'], {}), '(FN, os.F_OK)\n', (53461, 53474), False, 'import os, time, string\n'), ((54790, 54853), 'os.system', 'os.system', (["('ls -l --full-time --time=atime ' + FN + ' > ' + tmp)"], {}), "('ls -l --full-time --time=atime ' + FN + ' > ' + tmp)\n", (54799, 54853), False, 'import os, time, string\n'), ((54956, 55006), 'os.system', 'os.system', (["('ls -l --full-time ' + FN + ' > ' + tmp)"], {}), "('ls -l --full-time ' + FN + ' > ' + tmp)\n", (54965, 55006), False, 'import os, time, string\n'), ((55089, 55152), 'os.system', 'os.system', (["('ls -l --full-time --time=ctime ' + FN + ' > ' + tmp)"], {}), "('ls -l --full-time --time=ctime ' + FN + ' > ' + tmp)\n", (55098, 55152), False, 'import os, time, string\n'), ((56784, 56812), 'os.system', 'os.system', (["('chmod 777 ' + CD)"], {}), "('chmod 777 ' + CD)\n", (56793, 56812), False, 'import os, time, string\n'), ((56903, 56954), 'anuga.utilities.log.critical', 'log.critical', (["('MESSAGE: Directory %s created.' % CD)"], {}), "('MESSAGE: Directory %s created.' % CD)\n", (56915, 56954), True, 'import anuga.utilities.log as log\n'), ((64741, 64778), 'anuga.utilities.log.critical', 'log.critical', (["('%-*s' % (Widths[n], s))"], {}), "('%-*s' % (Widths[n], s))\n", (64753, 64778), True, 'import anuga.utilities.log as log\n'), ((64873, 64909), 'anuga.utilities.log.critical', 'log.critical', (["('%*s' % (Widths[n], s))"], {}), "('%*s' % (Widths[n], s))\n", (64885, 64909), True, 'import anuga.utilities.log as log\n'), ((66471, 66483), 'builtins.str', 'str', (['sortidx'], {}), '(sortidx)\n', (66474, 66483), False, 'from builtins import str\n'), ((67902, 67938), 'builtins.str.ljust', 'str.ljust', (['"""| CPU time:"""', 'textwidth1'], {}), "('| CPU time:', textwidth1)\n", (67911, 67938), False, 'from builtins import str\n'), ((68231, 68271), 'builtins.str.ljust', 'str.ljust', (['"""| Loading time:"""', 'textwidth1'], {}), "('| Loading time:', textwidth1)\n", (68240, 68271), False, 'from builtins import str\n'), ((68763, 68799), 'builtins.str.ljust', 'str.ljust', (['"""| CPU time:"""', 'textwidth1'], {}), "('| CPU time:', textwidth1)\n", (68772, 68799), False, 'from builtins import str\n'), ((68869, 68909), 'builtins.str.ljust', 'str.ljust', (['"""| Loading time:"""', 'textwidth1'], {}), "('| Loading time:', textwidth1)\n", (68878, 68909), False, 'from builtins import str\n'), ((68979, 69017), 'builtins.str.ljust', 'str.ljust', (['"""| Time saved:"""', 'textwidth1'], {}), "('| Time saved:', textwidth1)\n", (68988, 69017), False, 'from builtins import str\n'), ((73961, 73970), 'builtins.str', 'str', (['args'], {}), '(args)\n', (73964, 73970), False, 'from builtins import str\n'), ((34224, 34249), 'os.remove', 'os.remove', (['(CD + file_name)'], {}), '(CD + file_name)\n', (34233, 34249), False, 'import os, time, string\n'), ((34272, 34306), 'os.system', 'os.system', (["('del ' + CD + file_name)"], {}), "('del ' + CD + file_name)\n", (34281, 34306), False, 'import os, time, string\n'), ((34546, 34579), 'anuga.utilities.log.critical', 'log.critical', (["(' ' + file_name)"], {}), "(' ' + file_name)\n", (34558, 34579), True, 'import anuga.utilities.log as log\n'), ((46690, 46709), 'numpy.average', 'num.average', (['T.flat'], {}), '(T.flat)\n', (46701, 46709), True, 'import numpy as num\n'), ((50660, 50690), 'string.maketrans', 'string.maketrans', (['"""<>\'"""', '""" """'], {}), '("<>\'", \' \')\n', (50676, 50690), False, 'import string\n'), ((50748, 50765), 'string.split', 'string.split', (['tmp'], {}), '(tmp)\n', (50760, 50765), False, 'import string\n'), ((55213, 55235), 'os.system', 'os.system', (["('rm ' + tmp)"], {}), "('rm ' + tmp)\n", (55222, 55235), False, 'import os, time, string\n'), ((56998, 57062), 'anuga.utilities.log.critical', 'log.critical', (["('WARNING: Directory %s could not be created.' % CD)"], {}), "('WARNING: Directory %s could not be created.' % CD)\n", (57010, 57062), True, 'import anuga.utilities.log as log\n'), ((57162, 57209), 'anuga.utilities.log.critical', 'log.critical', (["('Using directory %s instead' % CD)"], {}), "('Using directory %s instead' % CD)\n", (57174, 57209), True, 'import anuga.utilities.log as log\n'), ((61939, 61954), 'string.find', 'find', (['my_F', '"""["""'], {}), "(my_F, '[')\n", (61943, 61954), False, 'from string import split, rstrip, find\n'), ((63621, 63636), 'time.ctime', 'ctime', (['firstday'], {}), '(firstday)\n', (63626, 63636), False, 'from time import strptime, localtime, strftime, mktime, ctime\n'), ((71291, 71327), 'builtins.str.ljust', 'str.ljust', (['"""| Argument:"""', 'textwidth1'], {}), "('| Argument:', textwidth1)\n", (71300, 71327), False, 'from builtins import str\n'), ((71409, 71446), 'builtins.str.ljust', 'str.ljust', (['"""| Arguments:"""', 'textwidth1'], {}), "('| Arguments:', textwidth1)\n", (71418, 71446), False, 'from builtins import str\n'), ((71587, 71626), 'builtins.str.ljust', 'str.ljust', (['"""| Keyword Arg:"""', 'textwidth1'], {}), "('| Keyword Arg:', textwidth1)\n", (71596, 71626), False, 'from builtins import str\n'), ((71707, 71747), 'builtins.str.ljust', 'str.ljust', (['"""| Keyword Args:"""', 'textwidth1'], {}), "('| Keyword Args:', textwidth1)\n", (71716, 71747), False, 'from builtins import str\n'), ((73499, 73508), 'builtins.str', 'str', (['args'], {}), '(args)\n', (73502, 73508), False, 'from builtins import str\n'), ((12026, 12039), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (12035, 12039), False, 'import os, time, string\n'), ((12134, 12156), 'os.system', 'os.system', (["('del ' + fn)"], {}), "('del ' + fn)\n", (12143, 12156), False, 'import os, time, string\n'), ((12197, 12252), 'anuga.utilities.log.critical', 'log.critical', (["('MESSAGE (caching): File %s deleted' % fn)"], {}), "('MESSAGE (caching): File %s deleted' % fn)\n", (12209, 12252), True, 'import anuga.utilities.log as log\n'), ((34757, 34782), 'os.remove', 'os.remove', (['(CD + file_name)'], {}), '(CD + file_name)\n', (34766, 34782), False, 'import os, time, string\n'), ((34809, 34843), 'os.system', 'os.system', (["('del ' + CD + file_name)"], {}), "('del ' + CD + file_name)\n", (34818, 34843), False, 'import os, time, string\n'), ((54662, 54673), 'os.getpid', 'os.getpid', ([], {}), '()\n', (54671, 54673), False, 'import os, time, string\n'), ((61446, 61460), 'string.rstrip', 'rstrip', (['record'], {}), '(record)\n', (61452, 61460), False, 'from string import split, rstrip, find\n'), ((64370, 64399), 'past.utils.old_div', 'old_div', (['(1.0 * rec[n])', 'rec[0]'], {}), '(1.0 * rec[n], rec[0])\n', (64377, 64399), False, 'from past.utils import old_div\n'), ((65225, 65233), 'builtins.str', 'str', (['key'], {}), '(key)\n', (65228, 65233), False, 'from builtins import str\n'), ((73861, 73876), 'numpy.ravel', 'num.ravel', (['args'], {}), '(args)\n', (73870, 73876), True, 'import numpy as num\n'), ((73920, 73935), 'builtins.str', 'str', (['args.shape'], {}), '(args.shape)\n', (73923, 73935), False, 'from builtins import str\n'), ((3279, 3285), 'builtins.str', 'str', (['x'], {}), '(x)\n', (3282, 3285), False, 'from builtins import str\n'), ((23557, 23585), 'past.utils.old_div', 'old_div', (['((t1 - t2) * 100)', 't1'], {}), '((t1 - t2) * 100, t1)\n', (23564, 23585), False, 'from past.utils import old_div\n'), ((47254, 47260), 'builtins.str', 'str', (['T'], {}), '(T)\n', (47257, 47260), False, 'from builtins import str\n'), ((54644, 54655), 'time.time', 'time.time', ([], {}), '()\n', (54653, 54655), False, 'import time\n'), ((61576, 61595), 'time.strptime', 'strptime', (['timestamp'], {}), '(timestamp)\n', (61584, 61595), False, 'from time import strptime, localtime, strftime, mktime, ctime\n'), ((65381, 65389), 'builtins.str', 'str', (['key'], {}), '(key)\n', (65384, 65389), False, 'from builtins import str\n'), ((69809, 69822), 'builtins.str', 'str', (['stats[6]'], {}), '(stats[6])\n', (69812, 69822), False, 'from builtins import str\n'), ((59037, 59048), 'builtins.str', 'str', (['reason'], {}), '(reason)\n', (59040, 59048), False, 'from builtins import str\n'), ((62331, 62342), 'builtins.str', 'str', (['record'], {}), '(record)\n', (62334, 62342), False, 'from builtins import str\n'), ((62827, 62859), 'past.utils.old_div', 'old_div', (['(100.0 * saving)', 'cputime'], {}), '(100.0 * saving, cputime)\n', (62834, 62859), False, 'from past.utils import old_div\n'), ((49493, 49511), 'pickle.dumps', 'pickle.dumps', (['A', '(0)'], {}), '(A, 0)\n', (49505, 49511), False, 'import pickle\n'), ((49514, 49532), 'pickle.dumps', 'pickle.dumps', (['B', '(0)'], {}), '(B, 0)\n', (49526, 49532), False, 'import pickle\n'), ((62290, 62304), 'builtins.str', 'str', (['record[4]'], {}), '(record[4])\n', (62293, 62304), False, 'from builtins import str\n'), ((69721, 69772), 'builtins.str.ljust', 'str.ljust', (["('| ' + file_type + ' file: ')", 'textwidth1'], {}), "('| ' + file_type + ' file: ', textwidth1)\n", (69730, 69772), False, 'from builtins import str\n'), ((58961, 58977), 'builtins.str', 'str', (['compression'], {}), '(compression)\n', (58964, 58977), False, 'from builtins import str\n')] |
#!../../../../virtualenv/bin/python3
# -*- coding: utf-8 -*-
# NB: The shebang line above assumes you've installed a python virtual environment alongside your working copy of the
# <4most-4gp-scripts> git repository. It also only works if you invoke this python script from the directory where it
# is located. If these two assumptions are incorrect (e.g. you're using Conda), you can still use this script by typing
# <python synthesize_rect_grid.py>, but <./synthesize_rect_grid.py> will not work.
"""
Take a rectangular grid of [Teff, log_g, Fe/H] parameter values, and synthesize a spectrum at each
point in parameter space. Assume solar abundance ratios for all other elements.
"""
import itertools
import logging
import numpy as np
from lib.base_synthesizer import Synthesizer
# Start logging our progress
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info("Synthesizing rectangular grid of spectra")
# Instantiate base synthesizer
synthesizer = Synthesizer(library_name="rect_grid",
logger=logger,
docstring=__doc__)
# Define limits and step size of rectangular grid
labels_to_vary = [
{"name": "Teff", "min": 5600, "max": 6401, "step": 100},
{"name": "logg", "min": 3.3, "max": 4.81, "step": 0.1},
{"name": "[Fe/H]", "min": -1., "max": 0.21, "step": 0.1}
]
# Create a list of all of the points in this grid
label_values = [np.arange(item['min'], item['max'], item['step']) for item in labels_to_vary]
label_combinations = itertools.product(*label_values)
# Turn into a list of stellar parameters
star_list = []
for grid_point in label_combinations:
star_name = "rect_grid"
item = {}
for index, label in enumerate(labels_to_vary):
x = float(grid_point[index]) + 1e-4
item[label['name']] = x
star_name += "_{:.1f}".format(x)
item["name"] = str(star_name)
star_list.append(item)
# Pass list of stars to synthesizer
synthesizer.set_star_list(star_list)
# Output data into sqlite3 db
synthesizer.dump_stellar_parameters_to_sqlite()
# Create new SpectrumLibrary
synthesizer.create_spectrum_library()
# Iterate over the spectra we're supposed to be synthesizing
synthesizer.do_synthesis()
# Close TurboSpectrum synthesizer instance
synthesizer.clean_up()
| [
"lib.base_synthesizer.Synthesizer",
"logging.basicConfig",
"numpy.arange",
"itertools.product",
"logging.getLogger"
] | [((817, 958), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""[%(asctime)s] %(levelname)s:%(filename)s:%(message)s"""', 'datefmt': '"""%d/%m/%Y %H:%M:%S"""'}), "(level=logging.INFO, format=\n '[%(asctime)s] %(levelname)s:%(filename)s:%(message)s', datefmt=\n '%d/%m/%Y %H:%M:%S')\n", (836, 958), False, 'import logging\n'), ((978, 1005), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (995, 1005), False, 'import logging\n'), ((1108, 1179), 'lib.base_synthesizer.Synthesizer', 'Synthesizer', ([], {'library_name': '"""rect_grid"""', 'logger': 'logger', 'docstring': '__doc__'}), "(library_name='rect_grid', logger=logger, docstring=__doc__)\n", (1119, 1179), False, 'from lib.base_synthesizer import Synthesizer\n'), ((1652, 1684), 'itertools.product', 'itertools.product', (['*label_values'], {}), '(*label_values)\n', (1669, 1684), False, 'import itertools\n'), ((1553, 1602), 'numpy.arange', 'np.arange', (["item['min']", "item['max']", "item['step']"], {}), "(item['min'], item['max'], item['step'])\n", (1562, 1602), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
from ..context import *
from ..ops.cntk2 import Input
from ..sgd import *
from ..reader import *
def test_parse_shapes_1():
output = '''\
FormNestedNetwork: WARNING: Was called twice for v3 Plus operation
Validating network. 5 nodes to process in pass 1.
Validating --> dummy_node = InputValue() : -> [2 {1} x *]
Validating --> v0 = LearnableParameter() : -> [4 x 1 {1,4}]
Validating --> v1 = Reshape (v0) : [4 x 1 {1,4}] -> [2 x 2 {1,2}]
Validating --> v2 = LearnableParameter() : -> [1 x 1 {1,1}]
Validating --> v3 = Plus (v1, v2) : [2 x 2 {1,2}], [1 x 1 {1,1}] -> [2 x 2 {1,2}]
Validating network. 2 nodes to process in pass 2.
Validating network, final pass.
5 out of 5 nodes do not share the minibatch layout with the input data.
Post-processing network complete.
'''
expected = {
'dummy_node': (2, np.NaN),
'v0': (4, 1),
'v1': (2, 2),
'v2': (1, 1),
'v3': (2, 2)
}
assert LocalExecutionContext._parse_shapes_from_output(output) == expected
def test_parse_shapes_2():
output = '''\
Validating --> v1 = LearnableParameter() : -> [3 x 2 {1,3}]
Validating --> v2 = InputValue() : -> [2 {1} x *]
Validating --> v3 = Times (v1, v2) : [3 x 2 {1,3}], [2 {1} x *] -> [3 {1} x *]
Validating --> v4 = LearnableParameter() : -> [3 x 1 {1,3}]
Validating --> v5 = Plus (v3, v4) : [3 {1} x *], [3 x 1 {1,3}] -> [3 x 1 {1,3} x *]
'''
expected = {
'v1': (3, 2),
'v2': (2, np.NaN),
'v3': (3, np.NaN),
'v4': (3, 1),
'v5': (3, 1, np.NaN),
}
assert LocalExecutionContext._parse_shapes_from_output(output) == expected
def test_parse_eval_result_output_1():
output = '''\
0 |w.shape 1 1
0 |w 60.000000
1 |w.shape 1 2
1 |w 22.000000
1 |w 24.000000'''
list_of_tensors = LocalExecutionContext._parse_result_output(output)
expected = [[[60]], [[22], [24]]]
assert len(list_of_tensors) == len(expected)
for res, exp in zip(list_of_tensors, expected):
assert np.allclose(res, np.asarray(exp))
def test_parse_eval_result_output_2():
output = '''\
0 |w.shape 8 1
0 |w 1.#IND -1.#IND 1.#INF00 -1.#INF nan -nan inf -inf
'''
data = LocalExecutionContext._parse_result_output(output)
data = data[0][0] # First sequence in first batch
assert len(data) == 8
# Windows
assert np.isnan(data[0])
assert np.isnan(data[1])
assert np.isinf(data[2]) and data[2] > 0
assert np.isinf(data[3]) and data[3] < 0
# Linux
assert np.isnan(data[4])
assert np.isnan(data[5])
assert np.isinf(data[6]) and data[6] > 0
assert np.isinf(data[7]) and data[7] < 0
def test_parse_test_result_output_1():
output = '''\
Final Results: Minibatch[1-1]: eval_node = 2.77790430 * 500; crit_node = 0.44370050 * 500; perplexity = 1.55846366
'''
result = LocalExecutionContext._parse_test_result(output)
assert result['perplexity'] == 1.55846366
assert result['eval_node'] == 2.77790430
assert result['crit_node'] == 0.44370050
assert len(result) == 3
def test_parse_test_result_output_2():
output = '''\
Final Results: Minibatch[1-1]: loss = 47.3% * 500; crit_node = 0.44370050 * 500; perplexity = 1.55846366
'''
result = LocalExecutionContext._parse_test_result(output)
assert result['perplexity'] == 1.55846366
assert result['loss'] == 0.473
assert result['crit_node'] == 0.44370050
assert len(result) == 3
def test_export_deferred_context():
X = Input(2)
reader = CNTKTextFormatReader("Data.txt")
my_sgd = SGDParams()
with DeferredExecutionContext() as ctx:
input_map=reader.map(X, alias='I', dim=2)
ctx.train(
root_nodes=[X],
training_params=my_sgd,
input_map=input_map)
ctx.test(
root_nodes=[X],
input_map=input_map)
ctx.write(input_map=input_map)
ctx.eval(X, input_map)
with open(ctx.export("name")) as config_file:
assert config_file.readlines()[-1] == "command=Train:Test:Write:Eval"
| [
"numpy.asarray",
"numpy.isinf",
"numpy.isnan"
] | [((2591, 2608), 'numpy.isnan', 'np.isnan', (['data[0]'], {}), '(data[0])\n', (2599, 2608), True, 'import numpy as np\n'), ((2620, 2637), 'numpy.isnan', 'np.isnan', (['data[1]'], {}), '(data[1])\n', (2628, 2637), True, 'import numpy as np\n'), ((2751, 2768), 'numpy.isnan', 'np.isnan', (['data[4]'], {}), '(data[4])\n', (2759, 2768), True, 'import numpy as np\n'), ((2780, 2797), 'numpy.isnan', 'np.isnan', (['data[5]'], {}), '(data[5])\n', (2788, 2797), True, 'import numpy as np\n'), ((2649, 2666), 'numpy.isinf', 'np.isinf', (['data[2]'], {}), '(data[2])\n', (2657, 2666), True, 'import numpy as np\n'), ((2694, 2711), 'numpy.isinf', 'np.isinf', (['data[3]'], {}), '(data[3])\n', (2702, 2711), True, 'import numpy as np\n'), ((2809, 2826), 'numpy.isinf', 'np.isinf', (['data[6]'], {}), '(data[6])\n', (2817, 2826), True, 'import numpy as np\n'), ((2854, 2871), 'numpy.isinf', 'np.isinf', (['data[7]'], {}), '(data[7])\n', (2862, 2871), True, 'import numpy as np\n'), ((2272, 2287), 'numpy.asarray', 'np.asarray', (['exp'], {}), '(exp)\n', (2282, 2287), True, 'import numpy as np\n')] |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2019 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from pymor.core.config import config
if config.HAVE_PYMESS:
import numpy as np
import scipy.linalg as spla
import pymess
from pymor.algorithms.genericsolvers import _parse_options
from pymor.algorithms.lyapunov import (mat_eqn_sparse_min_size, _solve_lyap_lrcf_check_args,
_solve_lyap_dense_check_args, _chol)
from pymor.algorithms.to_matrix import to_matrix
from pymor.bindings.scipy import _solve_ricc_check_args
from pymor.core.defaults import defaults
from pymor.core.logger import getLogger
from pymor.operators.constructions import IdentityOperator
@defaults('adi_maxit', 'adi_memory_usage', 'adi_output', 'adi_rel_change_tol', 'adi_res2_tol', 'adi_res2c_tol',
'adi_shifts_arp_m', 'adi_shifts_arp_p', 'adi_shifts_b0', 'adi_shifts_l0', 'adi_shifts_p',
'adi_shifts_paratype')
def lradi_solver_options(adi_maxit=500,
adi_memory_usage=pymess.MESS_MEMORY_MID,
adi_output=1,
adi_rel_change_tol=1e-10,
adi_res2_tol=1e-10,
adi_res2c_tol=1e-11,
adi_shifts_arp_m=32,
adi_shifts_arp_p=48,
adi_shifts_b0=None,
adi_shifts_l0=16,
adi_shifts_p=None,
adi_shifts_paratype=pymess.MESS_LRCFADI_PARA_ADAPTIVE_V):
"""Return available adi solver options with default values for the pymess backend.
Parameters
----------
adi_maxit
See `pymess.OptionsAdi`.
adi_memory_usage
See `pymess.OptionsAdi`.
adi_output
See `pymess.OptionsAdi`.
adi_rel_change_tol
See `pymess.OptionsAdi`.
adi_res2_tol
See `pymess.OptionsAdi`.
adi_res2c_tol
See `pymess.OptionsAdi`.
adi_shifts_arp_m
See `pymess.OptionsAdiShifts`.
adi_shifts_arp_p
See `pymess.OptionsAdiShifts`.
adi_shifts_b0
See `pymess.OptionsAdiShifts`.
adi_shifts_l0
See `pymess.OptionsAdiShifts`.
adi_shifts_p
See `pymess.OptionsAdiShifts`.
adi_shifts_paratype
See `pymess.OptionsAdiShifts`.
Returns
-------
A dict of available solvers with default solver options.
"""
lradi_opts = pymess.Options()
lradi_opts.adi.maxit = adi_maxit
lradi_opts.adi.memory_usage = adi_memory_usage
lradi_opts.adi.output = adi_output
lradi_opts.adi.rel_change_tol = adi_rel_change_tol
lradi_opts.adi.res2_tol = adi_res2_tol
lradi_opts.adi.res2c_tol = adi_res2c_tol
lradi_opts.adi.shifts.arp_m = adi_shifts_arp_m
lradi_opts.adi.shifts.arp_p = adi_shifts_arp_p
lradi_opts.adi.shifts.b0 = adi_shifts_b0
lradi_opts.adi.shifts.l0 = adi_shifts_l0
lradi_opts.adi.shifts.p = adi_shifts_p
lradi_opts.adi.shifts.paratype = adi_shifts_paratype
return lradi_opts
def lyap_lrcf_solver_options():
"""Return available Lyapunov solvers with default options for the pymess backend.
Also see :func:`lradi_solver_options`.
Returns
-------
A dict of available solvers with default solver options.
"""
return {'pymess_glyap': {'type': 'pymess_glyap'},
'pymess_lradi': {'type': 'pymess_lradi',
'opts': lradi_solver_options()}}
@defaults('default_solver')
def solve_lyap_lrcf(A, E, B, trans=False, options=None, default_solver=None):
"""Compute an approximate low-rank solution of a Lyapunov equation.
See :func:`pymor.algorithms.lyapunov.solve_lyap_lrcf` for a
general description.
This function uses `pymess.glyap` and `pymess.lradi`.
For both methods,
:meth:`~pymor.vectorarrays.interfaces.VectorArrayInterface.to_numpy`
and
:meth:`~pymor.vectorarrays.interfaces.VectorSpaceInterface.from_numpy`
need to be implemented for `A.source`.
Additionally, since `glyap` is a dense solver, it expects
:func:`~pymor.algorithms.to_matrix.to_matrix` to work for A and
E.
If the solver is not specified using the options or
default_solver arguments, `glyap` is used for small problems
(smaller than defined with
:func:`~pymor.algorithms.lyapunov.mat_eqn_sparse_min_size`) and
`lradi` for large problems.
Parameters
----------
A
The non-parametric |Operator| A.
E
The non-parametric |Operator| E or `None`.
B
The operator B as a |VectorArray| from `A.source`.
trans
Whether the first |Operator| in the Lyapunov equation is
transposed.
options
The solver options to use (see
:func:`lyap_lrcf_solver_options`).
default_solver
Default solver to use (pymess_lradi, pymess_glyap).
If `None`, choose solver depending on the dimension of A.
Returns
-------
Z
Low-rank Cholesky factor of the Lyapunov equation solution,
|VectorArray| from `A.source`.
"""
_solve_lyap_lrcf_check_args(A, E, B, trans)
if default_solver is None:
default_solver = 'pymess_lradi' if A.source.dim >= mat_eqn_sparse_min_size() else 'pymess_glyap'
options = _parse_options(options, lyap_lrcf_solver_options(), default_solver, None, False)
if options['type'] == 'pymess_glyap':
X = solve_lyap_dense(to_matrix(A, format='dense'),
to_matrix(E, format='dense') if E else None,
B.to_numpy().T if not trans else B.to_numpy(),
trans=trans, options=options)
Z = _chol(X)
elif options['type'] == 'pymess_lradi':
opts = options['opts']
opts.type = pymess.MESS_OP_NONE if not trans else pymess.MESS_OP_TRANSPOSE
eqn = LyapunovEquation(opts, A, E, B)
Z, status = pymess.lradi(eqn, opts)
relres = status.res2_norm / status.res2_0
if relres > opts.adi.res2_tol:
logger = getLogger('pymor.bindings.pymess.solve_lyap_lrcf')
logger.warning(f'Desired relative residual tolerance was not achieved '
f'({relres:e} > {opts.adi.res2_tol:e}).')
else:
raise ValueError(f'Unexpected Lyapunov equation solver ({options["type"]}).')
return A.source.from_numpy(Z.T)
def lyap_dense_solver_options():
"""Return available Lyapunov solvers with default options for the pymess backend.
Returns
-------
A dict of available solvers with default solver options.
"""
return {'pymess_glyap': {'type': 'pymess_glyap'}}
def solve_lyap_dense(A, E, B, trans=False, options=None):
"""Compute the solution of a Lyapunov equation.
See :func:`pymor.algorithms.lyapunov.solve_lyap_dense` for a
general description.
This function uses `pymess.glyap`.
Parameters
----------
A
The operator A as a 2D |NumPy array|.
E
The operator E as a 2D |NumPy array| or `None`.
B
The operator B as a 2D |NumPy array|.
trans
Whether the first operator in the Lyapunov equation is
transposed.
options
The solver options to use (see
:func:`lyap_dense_solver_options`).
Returns
-------
X
Lyapunov equation solution as a |NumPy array|.
"""
_solve_lyap_dense_check_args(A, E, B, trans)
options = _parse_options(options, lyap_lrcf_solver_options(), 'pymess_glyap', None, False)
if options['type'] == 'pymess_glyap':
Y = B.dot(B.T) if not trans else B.T.dot(B)
op = pymess.MESS_OP_NONE if not trans else pymess.MESS_OP_TRANSPOSE
X = pymess.glyap(A, E, Y, op=op)[0]
else:
raise ValueError(f'Unexpected Lyapunov equation solver ({options["type"]}).')
return X
@defaults('linesearch', 'maxit', 'absres_tol', 'relres_tol', 'nrm')
def dense_nm_gmpcare_solver_options(linesearch=False,
maxit=50,
absres_tol=1e-11,
relres_tol=1e-12,
nrm=0):
"""Return available Riccati solvers with default options for the pymess backend.
Also see :func:`lradi_solver_options`.
Parameters
----------
linesearch
See `pymess.dense_nm_gmpcare`.
maxit
See `pymess.dense_nm_gmpcare`.
absres_tol
See `pymess.dense_nm_gmpcare`.
relres_tol
See `pymess.dense_nm_gmpcare`.
nrm
See `pymess.dense_nm_gmpcare`.
Returns
-------
A dict of available solvers with default solver options.
"""
return {'linesearch': linesearch,
'maxit': maxit,
'absres_tol': absres_tol,
'relres_tol': relres_tol,
'nrm': nrm}
@defaults('newton_gstep', 'newton_k0', 'newton_linesearch', 'newton_maxit', 'newton_output', 'newton_res2_tol',
'newton_singleshifts')
def lrnm_solver_options(newton_gstep=0,
newton_k0=None,
newton_linesearch=0,
newton_maxit=30,
newton_output=1,
newton_res2_tol=1e-10,
newton_singleshifts=0):
"""Return available adi solver options with default values for the pymess backend.
Parameters
----------
newton_gstep
See `pymess.OptionsNewton`.
newton_k0
See `pymess.OptionsNewton`.
newton_linesearch
See `pymess.OptionsNewton`.
newton_maxit
See `pymess.OptionsNewton`.
newton_output
See `pymess.OptionsNewton`.
newton_res2_tol
See `pymess.OptionsNewton`.
newton_singleshifts
See `pymess.OptionsNewton`.
Returns
-------
A dict of available solvers with default solver options.
"""
lrnm_opts = lradi_solver_options()
lrnm_opts.nm.gstep = newton_gstep
lrnm_opts.nm.k0 = newton_k0
lrnm_opts.nm.linesearch = newton_linesearch
lrnm_opts.nm.maxit = newton_maxit
lrnm_opts.nm.output = newton_output
lrnm_opts.nm.res2_tol = newton_res2_tol
lrnm_opts.nm.singleshifts = newton_singleshifts
return lrnm_opts
def ricc_lrcf_solver_options():
"""Return available Riccati solvers with default options for the pymess backend.
Also see :func:`dense_nm_gmpcare_solver_options` and
:func:`lrnm_solver_options`.
Returns
-------
A dict of available solvers with default solver options.
"""
return {'pymess_dense_nm_gmpcare': {'type': 'pymess_dense_nm_gmpcare',
'opts': dense_nm_gmpcare_solver_options()},
'pymess_lrnm': {'type': 'pymess_lrnm',
'opts': lrnm_solver_options()}}
@defaults('default_solver')
def solve_ricc_lrcf(A, E, B, C, R=None, S=None, trans=False, options=None, default_solver=None):
"""Compute an approximate low-rank solution of a Riccati equation.
See :func:`pymor.algorithms.riccati.solve_ricc_lrcf` for a
general description.
This function uses `pymess.dense_nm_gmpcare` and `pymess.lrnm`.
For both methods,
:meth:`~pymor.vectorarrays.interfaces.VectorArrayInterface.to_numpy`
and
:meth:`~pymor.vectorarrays.interfaces.VectorSpaceInterface.from_numpy`
need to be implemented for `A.source`.
Additionally, since `dense_nm_gmpcare` is a dense solver, it
expects :func:`~pymor.algorithms.to_matrix.to_matrix` to work
for A and E.
If the solver is not specified using the options or
default_solver arguments, `dense_nm_gmpcare` is used for small
problems (smaller than defined with
:func:`~pymor.algorithms.lyapunov.mat_eqn_sparse_min_size`) and
`lrnm` for large problems.
Parameters
----------
A
The non-parametric |Operator| A.
E
The non-parametric |Operator| E or `None`.
B
The operator B as a |VectorArray| from `A.source`.
C
The operator C as a |VectorArray| from `A.source`.
R
The operator R as a 2D |NumPy array| or `None`.
S
The operator S as a |VectorArray| from `A.source` or `None`.
trans
Whether the first |Operator| in the Riccati equation is
transposed.
options
The solver options to use (see
:func:`ricc_lrcf_solver_options`).
default_solver
Default solver to use (pymess_lrnm,
pymess_dense_nm_gmpcare).
If `None`, chose solver depending on dimension `A`.
Returns
-------
Z
Low-rank Cholesky factor of the Riccati equation solution,
|VectorArray| from `A.source`.
"""
_solve_ricc_check_args(A, E, B, C, R, S, trans)
if default_solver is None:
default_solver = 'pymess_lrnm' if A.source.dim >= mat_eqn_sparse_min_size() else 'pymess_dense_nm_gmpcare'
options = _parse_options(options, ricc_lrcf_solver_options(), default_solver, None, False)
if options['type'] == 'pymess_dense_nm_gmpcare':
X = _call_pymess_dense_nm_gmpare(A, E, B, C, R, S, trans=trans, options=options['opts'], plus=False,
method_name='solve_ricc_lrcf')
Z = _chol(X)
elif options['type'] == 'pymess_lrnm':
if S is not None:
raise NotImplementedError
if R is not None:
import scipy.linalg as spla
Rc = spla.cholesky(R) # R = Rc^T * Rc
Rci = spla.solve_triangular(Rc, np.eye(Rc.shape[0])) # R^{-1} = Rci * Rci^T
if not trans:
C = C.lincomb(Rci.T) # C <- Rci^T * C = (C^T * Rci)^T
else:
B = B.lincomb(Rci.T) # B <- B * Rci
opts = options['opts']
opts.type = pymess.MESS_OP_NONE if not trans else pymess.MESS_OP_TRANSPOSE
eqn = RiccatiEquation(opts, A, E, B, C)
Z, status = pymess.lrnm(eqn, opts)
relres = status.res2_norm / status.res2_0
if relres > opts.adi.res2_tol:
logger = getLogger('pymor.bindings.pymess.solve_ricc_lrcf')
logger.warning(f'Desired relative residual tolerance was not achieved '
f'({relres:e} > {opts.adi.res2_tol:e}).')
else:
raise ValueError(f'Unexpected Riccati equation solver ({options["type"]}).')
return A.source.from_numpy(Z.T)
def pos_ricc_lrcf_solver_options():
"""Return available positive Riccati solvers with default options for the pymess backend.
Returns
-------
A dict of available solvers with default solver options.
"""
return {'pymess_dense_nm_gmpcare': {'type': 'pymess_dense_nm_gmpcare',
'opts': dense_nm_gmpcare_solver_options()}}
def solve_pos_ricc_lrcf(A, E, B, C, R=None, S=None, trans=False, options=None):
"""Compute an approximate low-rank solution of a positive Riccati equation.
See :func:`pymor.algorithms.riccati.solve_pos_ricc_lrcf` for a
general description.
This function uses `pymess.dense_nm_gmpcare`.
Parameters
----------
A
The non-parametric |Operator| A.
E
The non-parametric |Operator| E or `None`.
B
The operator B as a |VectorArray| from `A.source`.
C
The operator C as a |VectorArray| from `A.source`.
R
The operator R as a 2D |NumPy array| or `None`.
S
The operator S as a |VectorArray| from `A.source` or `None`.
trans
Whether the first |Operator| in the Riccati equation is
transposed.
options
The solver options to use (see
:func:`pos_ricc_lrcf_solver_options`).
Returns
-------
Z
Low-rank Cholesky factor of the Riccati equation solution,
|VectorArray| from `A.source`.
"""
_solve_ricc_check_args(A, E, B, C, R, S, trans)
options = _parse_options(options, pos_ricc_lrcf_solver_options(), 'pymess_dense_nm_gmpcare', None, False)
if options['type'] == 'pymess_dense_nm_gmpcare':
X = _call_pymess_dense_nm_gmpare(A, E, B, C, R, S, trans=trans, options=options['opts'], plus=True,
method_name='solve_pos_ricc_lrcf')
Z = _chol(X)
else:
raise ValueError(f'Unexpected positive Riccati equation solver ({options["type"]}).')
return A.source.from_numpy(Z.T)
def _call_pymess_dense_nm_gmpare(A, E, B, C, R, S, trans=False, options=None, plus=False, method_name=''):
"""Return the solution from pymess.dense_nm_gmpare solver."""
A = to_matrix(A, format='dense')
E = to_matrix(E, format='dense') if E else None
B = B.to_numpy().T
C = C.to_numpy()
S = S.to_numpy().T if S else None
Q = B.dot(B.T) if not trans else C.T.dot(C)
pymess_trans = pymess.MESS_OP_NONE if not trans else pymess.MESS_OP_TRANSPOSE
if not trans:
RinvC = spla.solve(R, C) if R is not None else C
G = C.T.dot(RinvC)
if S is not None:
RinvST = spla.solve(R, S.T) if R is not None else S.T
if not plus:
A -= S.dot(RinvC)
Q -= S.dot(RinvST)
else:
A += S.dot(RinvC)
Q += S.dot(RinvST)
else:
RinvBT = spla.solve(R, B.T) if R is not None else B.T
G = B.dot(RinvBT)
if S is not None:
RinvST = spla.solve(R, S.T) if R is not None else S.T
if not plus:
A -= RinvBT.T.dot(S.T)
Q -= S.dot(RinvST)
else:
A += RinvBT.T.dot(S.T)
Q += S.dot(RinvST)
X, absres, relres = pymess.dense_nm_gmpare(None,
A, E, Q, G,
plus=plus, trans=pymess_trans,
linesearch=options['linesearch'],
maxit=options['maxit'],
absres_tol=options['absres_tol'],
relres_tol=options['relres_tol'],
nrm=options['nrm'])
if absres > options['absres_tol']:
logger = getLogger('pymor.bindings.pymess.' + method_name)
logger.warning(f'Desired absolute residual tolerance was not achieved '
f'({absres:e} > {options["absres_tol"]:e}).')
if relres > options['relres_tol']:
logger = getLogger('pymor.bindings.pymess.' + method_name)
logger.warning(f'Desired relative residual tolerance was not achieved '
f'({relres:e} > {options["relres_tol"]:e}).')
return X
class LyapunovEquation(pymess.Equation):
"""Lyapunov equation class for pymess
Represents a (generalized) continuous-time algebraic Lyapunov
equation:
- if opt.type is `pymess.MESS_OP_NONE` and E is `None`:
.. math::
A X + X A^T + B B^T = 0,
- if opt.type is `pymess.MESS_OP_NONE` and E is not `None`:
.. math::
A X E^T + E X A^T + B B^T = 0,
- if opt.type is `pymess.MESS_OP_TRANSPOSE` and E is `None`:
.. math::
A^T X + X A + B^T B = 0,
- if opt.type is `pymess.MESS_OP_TRANSPOSE` and E is not `None`:
.. math::
A^T X E + E^T X A + B^T B = 0.
Parameters
----------
opt
pymess Options structure.
A
The non-parametric |Operator| A.
E
The non-parametric |Operator| E or `None`.
B
The operator B as a |VectorArray| from `A.source`.
"""
def __init__(self, opt, A, E, B):
super().__init__(name='LyapunovEquation', opt=opt, dim=A.source.dim)
self.a = A
self.e = E
self.rhs = B.to_numpy().T
self.p = []
def ax_apply(self, op, y):
y = self.a.source.from_numpy(y.T)
if op == pymess.MESS_OP_NONE:
x = self.a.apply(y)
else:
x = self.a.apply_adjoint(y)
return x.to_numpy().T
def ex_apply(self, op, y):
if self.e is None:
return y
y = self.a.source.from_numpy(y.T)
if op == pymess.MESS_OP_NONE:
x = self.e.apply(y)
else:
x = self.e.apply_adjoint(y)
return x.to_numpy().T
def ainv_apply(self, op, y):
y = self.a.source.from_numpy(y.T)
if op == pymess.MESS_OP_NONE:
x = self.a.apply_inverse(y)
else:
x = self.a.apply_inverse_adjoint(y)
return x.to_numpy().T
def einv_apply(self, op, y):
if self.e is None:
return y
y = self.a.source.from_numpy(y.T)
if op == pymess.MESS_OP_NONE:
x = self.e.apply_inverse(y)
else:
x = self.e.apply_inverse_adjoint(y)
return x.to_numpy().T
def apex_apply(self, op, p, idx_p, y):
y = self.a.source.from_numpy(y.T)
if op == pymess.MESS_OP_NONE:
x = self.a.apply(y)
if self.e is None:
x += p * y
else:
x += p * self.e.apply(y)
else:
x = self.a.apply_adjoint(y)
if self.e is None:
x += p.conjugate() * y
else:
x += p.conjugate() * self.e.apply_adjoint(y)
return x.to_numpy().T
def apeinv_apply(self, op, p, idx_p, y):
y = self.a.source.from_numpy(y.T)
e = IdentityOperator(self.a.source) if self.e is None else self.e
if p.imag == 0:
ape = self.a + p.real * e
else:
ape = self.a + p * e
if op == pymess.MESS_OP_NONE:
x = ape.apply_inverse(y)
else:
x = ape.apply_inverse_adjoint(y.conj()).conj()
return x.to_numpy().T
def parameter(self, arp_p, arp_m, B=None, K=None):
return None
class RiccatiEquation(pymess.Equation):
"""Riccati equation class for pymess
Represents a Riccati equation
- if opt.type is `pymess.MESS_OP_NONE` and E is `None`:
.. math::
A X + X A^T - X C^T C X + B B^T = 0,
- if opt.type is `pymess.MESS_OP_NONE` and E is not `None`:
.. math::
A X E^T + E X A^T - E X C^T C X E^T + B B^T = 0,
- if opt.type is `pymess.MESS_OP_TRANSPOSE` and E is `None`:
.. math::
A^T X + X A - X B B^T X + C^T C = 0,
- if opt.type is `pymess.MESS_OP_TRANSPOSE` and E is not `None`:
.. math::
A^T X E + E^T X A - E X B B^T X E^T + C^T C = 0.
Parameters
----------
opt
pymess Options structure.
A
The non-parametric |Operator| A.
E
The non-parametric |Operator| E or `None`.
B
The operator B as a |VectorArray| from `A.source`.
C
The operator C as a |VectorArray| from `A.source`.
"""
def __init__(self, opt, A, E, B, C):
super().__init__(name='RiccatiEquation', opt=opt, dim=A.source.dim)
self.a = A
self.e = E
self.b = B.to_numpy().T
self.c = C.to_numpy()
self.rhs = self.b if opt.type == pymess.MESS_OP_NONE else self.c.T
self.p = []
def ax_apply(self, op, y):
y = self.a.source.from_numpy(y.T)
if op == pymess.MESS_OP_NONE:
x = self.a.apply(y)
else:
x = self.a.apply_adjoint(y)
return x.to_numpy().T
def ex_apply(self, op, y):
if self.e is None:
return y
y = self.a.source.from_numpy(y.T)
if op == pymess.MESS_OP_NONE:
x = self.e.apply(y)
else:
x = self.e.apply_adjoint(y)
return x.to_numpy().T
def ainv_apply(self, op, y):
y = self.a.source.from_numpy(y.T)
if op == pymess.MESS_OP_NONE:
x = self.a.apply_inverse(y)
else:
x = self.a.apply_inverse_adjoint(y)
return x.to_numpy().T
def einv_apply(self, op, y):
if self.e is None:
return y
y = self.a.source.from_numpy(y.T)
if op == pymess.MESS_OP_NONE:
x = self.e.apply_inverse(y)
else:
x = self.e.apply_inverse_adjoint(y)
return x.to_numpy().T
def apex_apply(self, op, p, idx_p, y):
y = self.a.source.from_numpy(y.T)
if op == pymess.MESS_OP_NONE:
x = self.a.apply(y)
if self.e is None:
x += p * y
else:
x += p * self.e.apply(y)
else:
x = self.a.apply_adjoint(y)
if self.e is None:
x += p.conjugate() * y
else:
x += p.conjugate() * self.e.apply_adjoint(y)
return x.to_numpy().T
def apeinv_apply(self, op, p, idx_p, y):
y = self.a.source.from_numpy(y.T)
e = IdentityOperator(self.a.source) if self.e is None else self.e
if p.imag == 0:
ape = self.a + p.real * e
else:
ape = self.a + p * e
if op == pymess.MESS_OP_NONE:
x = ape.apply_inverse(y)
else:
x = ape.apply_inverse_adjoint(y.conj()).conj()
return x.to_numpy().T
def parameter(self, arp_p, arp_m, B=None, K=None):
return None
| [
"scipy.linalg.solve",
"numpy.eye",
"pymess.glyap",
"scipy.linalg.cholesky",
"pymor.algorithms.lyapunov.mat_eqn_sparse_min_size",
"pymor.algorithms.lyapunov._solve_lyap_dense_check_args",
"pymor.bindings.scipy._solve_ricc_check_args",
"pymess.dense_nm_gmpare",
"pymess.Options",
"pymess.lradi",
"p... | [((869, 1104), 'pymor.core.defaults.defaults', 'defaults', (['"""adi_maxit"""', '"""adi_memory_usage"""', '"""adi_output"""', '"""adi_rel_change_tol"""', '"""adi_res2_tol"""', '"""adi_res2c_tol"""', '"""adi_shifts_arp_m"""', '"""adi_shifts_arp_p"""', '"""adi_shifts_b0"""', '"""adi_shifts_l0"""', '"""adi_shifts_p"""', '"""adi_shifts_paratype"""'], {}), "('adi_maxit', 'adi_memory_usage', 'adi_output',\n 'adi_rel_change_tol', 'adi_res2_tol', 'adi_res2c_tol',\n 'adi_shifts_arp_m', 'adi_shifts_arp_p', 'adi_shifts_b0',\n 'adi_shifts_l0', 'adi_shifts_p', 'adi_shifts_paratype')\n", (877, 1104), False, 'from pymor.core.defaults import defaults\n'), ((3907, 3933), 'pymor.core.defaults.defaults', 'defaults', (['"""default_solver"""'], {}), "('default_solver')\n", (3915, 3933), False, 'from pymor.core.defaults import defaults\n'), ((8713, 8779), 'pymor.core.defaults.defaults', 'defaults', (['"""linesearch"""', '"""maxit"""', '"""absres_tol"""', '"""relres_tol"""', '"""nrm"""'], {}), "('linesearch', 'maxit', 'absres_tol', 'relres_tol', 'nrm')\n", (8721, 8779), False, 'from pymor.core.defaults import defaults\n'), ((9841, 9978), 'pymor.core.defaults.defaults', 'defaults', (['"""newton_gstep"""', '"""newton_k0"""', '"""newton_linesearch"""', '"""newton_maxit"""', '"""newton_output"""', '"""newton_res2_tol"""', '"""newton_singleshifts"""'], {}), "('newton_gstep', 'newton_k0', 'newton_linesearch', 'newton_maxit',\n 'newton_output', 'newton_res2_tol', 'newton_singleshifts')\n", (9849, 9978), False, 'from pymor.core.defaults import defaults\n'), ((12027, 12053), 'pymor.core.defaults.defaults', 'defaults', (['"""default_solver"""'], {}), "('default_solver')\n", (12035, 12053), False, 'from pymor.core.defaults import defaults\n'), ((2780, 2796), 'pymess.Options', 'pymess.Options', ([], {}), '()\n', (2794, 2796), False, 'import pymess\n'), ((5697, 5740), 'pymor.algorithms.lyapunov._solve_lyap_lrcf_check_args', '_solve_lyap_lrcf_check_args', (['A', 'E', 'B', 'trans'], {}), '(A, E, B, trans)\n', (5724, 5740), False, 'from pymor.algorithms.lyapunov import mat_eqn_sparse_min_size, _solve_lyap_lrcf_check_args, _solve_lyap_dense_check_args, _chol\n'), ((8210, 8254), 'pymor.algorithms.lyapunov._solve_lyap_dense_check_args', '_solve_lyap_dense_check_args', (['A', 'E', 'B', 'trans'], {}), '(A, E, B, trans)\n', (8238, 8254), False, 'from pymor.algorithms.lyapunov import mat_eqn_sparse_min_size, _solve_lyap_lrcf_check_args, _solve_lyap_dense_check_args, _chol\n'), ((14105, 14152), 'pymor.bindings.scipy._solve_ricc_check_args', '_solve_ricc_check_args', (['A', 'E', 'B', 'C', 'R', 'S', 'trans'], {}), '(A, E, B, C, R, S, trans)\n', (14127, 14152), False, 'from pymor.bindings.scipy import _solve_ricc_check_args\n'), ((17527, 17574), 'pymor.bindings.scipy._solve_ricc_check_args', '_solve_ricc_check_args', (['A', 'E', 'B', 'C', 'R', 'S', 'trans'], {}), '(A, E, B, C, R, S, trans)\n', (17549, 17574), False, 'from pymor.bindings.scipy import _solve_ricc_check_args\n'), ((18311, 18339), 'pymor.algorithms.to_matrix.to_matrix', 'to_matrix', (['A'], {'format': '"""dense"""'}), "(A, format='dense')\n", (18320, 18339), False, 'from pymor.algorithms.to_matrix import to_matrix\n'), ((19501, 19732), 'pymess.dense_nm_gmpare', 'pymess.dense_nm_gmpare', (['None', 'A', 'E', 'Q', 'G'], {'plus': 'plus', 'trans': 'pymess_trans', 'linesearch': "options['linesearch']", 'maxit': "options['maxit']", 'absres_tol': "options['absres_tol']", 'relres_tol': "options['relres_tol']", 'nrm': "options['nrm']"}), "(None, A, E, Q, G, plus=plus, trans=pymess_trans,\n linesearch=options['linesearch'], maxit=options['maxit'], absres_tol=\n options['absres_tol'], relres_tol=options['relres_tol'], nrm=options['nrm']\n )\n", (19523, 19732), False, 'import pymess\n'), ((6331, 6339), 'pymor.algorithms.lyapunov._chol', '_chol', (['X'], {}), '(X)\n', (6336, 6339), False, 'from pymor.algorithms.lyapunov import mat_eqn_sparse_min_size, _solve_lyap_lrcf_check_args, _solve_lyap_dense_check_args, _chol\n'), ((14669, 14677), 'pymor.algorithms.lyapunov._chol', '_chol', (['X'], {}), '(X)\n', (14674, 14677), False, 'from pymor.algorithms.lyapunov import mat_eqn_sparse_min_size, _solve_lyap_lrcf_check_args, _solve_lyap_dense_check_args, _chol\n'), ((17955, 17963), 'pymor.algorithms.lyapunov._chol', '_chol', (['X'], {}), '(X)\n', (17960, 17963), False, 'from pymor.algorithms.lyapunov import mat_eqn_sparse_min_size, _solve_lyap_lrcf_check_args, _solve_lyap_dense_check_args, _chol\n'), ((18352, 18380), 'pymor.algorithms.to_matrix.to_matrix', 'to_matrix', (['E'], {'format': '"""dense"""'}), "(E, format='dense')\n", (18361, 18380), False, 'from pymor.algorithms.to_matrix import to_matrix\n'), ((20140, 20189), 'pymor.core.logger.getLogger', 'getLogger', (["('pymor.bindings.pymess.' + method_name)"], {}), "('pymor.bindings.pymess.' + method_name)\n", (20149, 20189), False, 'from pymor.core.logger import getLogger\n'), ((20411, 20460), 'pymor.core.logger.getLogger', 'getLogger', (["('pymor.bindings.pymess.' + method_name)"], {}), "('pymor.bindings.pymess.' + method_name)\n", (20420, 20460), False, 'from pymor.core.logger import getLogger\n'), ((6064, 6092), 'pymor.algorithms.to_matrix.to_matrix', 'to_matrix', (['A'], {'format': '"""dense"""'}), "(A, format='dense')\n", (6073, 6092), False, 'from pymor.algorithms.to_matrix import to_matrix\n'), ((6584, 6607), 'pymess.lradi', 'pymess.lradi', (['eqn', 'opts'], {}), '(eqn, opts)\n', (6596, 6607), False, 'import pymess\n'), ((8553, 8581), 'pymess.glyap', 'pymess.glyap', (['A', 'E', 'Y'], {'op': 'op'}), '(A, E, Y, op=op)\n', (8565, 8581), False, 'import pymess\n'), ((15432, 15454), 'pymess.lrnm', 'pymess.lrnm', (['eqn', 'opts'], {}), '(eqn, opts)\n', (15443, 15454), False, 'import pymess\n'), ((18671, 18687), 'scipy.linalg.solve', 'spla.solve', (['R', 'C'], {}), '(R, C)\n', (18681, 18687), True, 'import scipy.linalg as spla\n'), ((19083, 19101), 'scipy.linalg.solve', 'spla.solve', (['R', 'B.T'], {}), '(R, B.T)\n', (19093, 19101), True, 'import scipy.linalg as spla\n'), ((23724, 23755), 'pymor.operators.constructions.IdentityOperator', 'IdentityOperator', (['self.a.source'], {}), '(self.a.source)\n', (23740, 23755), False, 'from pymor.operators.constructions import IdentityOperator\n'), ((27477, 27508), 'pymor.operators.constructions.IdentityOperator', 'IdentityOperator', (['self.a.source'], {}), '(self.a.source)\n', (27493, 27508), False, 'from pymor.operators.constructions import IdentityOperator\n'), ((5839, 5864), 'pymor.algorithms.lyapunov.mat_eqn_sparse_min_size', 'mat_eqn_sparse_min_size', ([], {}), '()\n', (5862, 5864), False, 'from pymor.algorithms.lyapunov import mat_eqn_sparse_min_size, _solve_lyap_lrcf_check_args, _solve_lyap_dense_check_args, _chol\n'), ((6127, 6155), 'pymor.algorithms.to_matrix.to_matrix', 'to_matrix', (['E'], {'format': '"""dense"""'}), "(E, format='dense')\n", (6136, 6155), False, 'from pymor.algorithms.to_matrix import to_matrix\n'), ((6730, 6780), 'pymor.core.logger.getLogger', 'getLogger', (['"""pymor.bindings.pymess.solve_lyap_lrcf"""'], {}), "('pymor.bindings.pymess.solve_lyap_lrcf')\n", (6739, 6780), False, 'from pymor.core.logger import getLogger\n'), ((14250, 14275), 'pymor.algorithms.lyapunov.mat_eqn_sparse_min_size', 'mat_eqn_sparse_min_size', ([], {}), '()\n', (14273, 14275), False, 'from pymor.algorithms.lyapunov import mat_eqn_sparse_min_size, _solve_lyap_lrcf_check_args, _solve_lyap_dense_check_args, _chol\n'), ((14892, 14908), 'scipy.linalg.cholesky', 'spla.cholesky', (['R'], {}), '(R)\n', (14905, 14908), True, 'import scipy.linalg as spla\n'), ((15577, 15627), 'pymor.core.logger.getLogger', 'getLogger', (['"""pymor.bindings.pymess.solve_ricc_lrcf"""'], {}), "('pymor.bindings.pymess.solve_ricc_lrcf')\n", (15586, 15627), False, 'from pymor.core.logger import getLogger\n'), ((18798, 18816), 'scipy.linalg.solve', 'spla.solve', (['R', 'S.T'], {}), '(R, S.T)\n', (18808, 18816), True, 'import scipy.linalg as spla\n'), ((19213, 19231), 'scipy.linalg.solve', 'spla.solve', (['R', 'S.T'], {}), '(R, S.T)\n', (19223, 19231), True, 'import scipy.linalg as spla\n'), ((15005, 15024), 'numpy.eye', 'np.eye', (['Rc.shape[0]'], {}), '(Rc.shape[0])\n', (15011, 15024), True, 'import numpy as np\n')] |
import os
import numpy as np
import torch.utils.data as td
import pandas as pd
import config
from csl_common.utils.nn import Batch
from csl_common.utils import geometry
from datasets import facedataset
def read_300W_detection(lmFilepath):
lms = []
with open(lmFilepath) as f:
for line in f:
try:
x,y = [float(e) for e in line.split()]
lms.append((x, y))
except:
pass
assert(len(lms) == 68)
landmarks = np.vstack(lms)
return landmarks
class W300(facedataset.FaceDataset):
CROP_SOURCES = ['bb_detector', 'bb_ground_truth', 'lm_ground_truth']
NUM_LANDMARKS = 68
ALL_LANDMARKS = list(range(NUM_LANDMARKS))
LANDMARKS_NO_OUTLINE = list(range(17,68))
LANDMARKS_ONLY_OUTLINE = list(range(17))
def __init__(self, root, cache_root=None, train=True, test_split='full',
crop_source='bb_detector', return_landmark_heatmaps=False,
return_modified_images=False, **kwargs):
test_split = test_split.lower()
if not train:
assert(test_split in ['train', 'common', 'challenging', '300w', 'full'])
assert(crop_source in W300.CROP_SOURCES)
self.bounding_box_dir = os.path.join(root, 'Bounding Boxes')
super().__init__(root=root,
cache_root=cache_root,
fullsize_img_dir=os.path.join(root, 'images'),
train=train,
test_split=test_split,
crop_source=crop_source,
return_landmark_heatmaps=return_landmark_heatmaps,
return_modified_images=return_modified_images,
**kwargs)
if self.crop_type == 'fullsize':
self.transform = lambda x:x
def _load_annotations(self, split):
import scipy.io
import glob
split_defs = {
'train': [
('train/afw', 'afw'),
('train/helen', 'helen_trainset'),
('train/lfpw', 'lfpw_trainset')
],
'common': [
('test/common/helen', 'helen_testset'),
('test/common/lfpw', 'lfpw_testset')
],
'challenging': [
('test/challenging/ibug', 'ibug')
],
'full': [
('test/common/helen', 'helen_testset'),
('test/common/lfpw', 'lfpw_testset'),
('test/challenging/ibug', 'ibug')
],
'300w': [
('test/300W/01_Indoor', None),
('test/300W/01_Outdoor', None)
]
}
ann = []
bboxes = []
for id, subset in enumerate(split_defs[split]):
im_dir, bbox_file_suffix = subset
# get image file paths and read GT landmarks
ext = "*.jpg"
if 'lfpw' in im_dir or '300W' in im_dir:
ext = "*.png"
for img_file in sorted(glob.glob(os.path.join(self.fullsize_img_dir, im_dir, ext))):
path_abs_noext = os.path.splitext(img_file)[0]
filename_noext = os.path.split(path_abs_noext)[1]
filename = os.path.split(img_file)[1]
path_rel = os.path.join(im_dir, filename)
# load landmarks from *.pts files
landmarks = read_300W_detection(path_abs_noext+'.pts')
ann.append({'imgName': str(filename), 'fname': path_rel, 'landmarks': landmarks})
# load supplied detected bounding boxes from MAT file
if bbox_file_suffix is not None:
mat_file = os.path.join(self.bounding_box_dir, 'bounding_boxes_{}.mat'.format(bbox_file_suffix))
subset_bboxes = scipy.io.loadmat(mat_file)
for item in subset_bboxes['bounding_boxes'][0]:
imgName, bb_detector, bb_ground_truth = item[0][0]
bboxes.append({'imgName': str(imgName[0]),
'bb_detector': bb_detector[0],
'bb_ground_truth': bb_ground_truth[0]})
annotations = pd.DataFrame(ann)
if len(bboxes) > 0:
df_bboxes = pd.DataFrame(bboxes)
annotations = annotations.merge(df_bboxes, on='imgName', how='left')
return annotations
@property
def labels(self):
return None
def __len__(self):
return len(self.annotations)
def __getitem__(self, idx):
sample = self.annotations.iloc[idx]
bb = sample.bb_detector if self.crop_source == 'bb_detector' else sample.bb_ground_truth
bb = geometry.extend_bbox(bb, dt=0.2, db=0.12)
landmarks = sample.landmarks.astype(np.float32)
landmarks_for_crop = None
if self.crop_source == 'lm_ground_truth':
landmarks_for_crop = landmarks
return self.get_sample(sample.fname, bb, landmarks_for_crop, landmarks_to_return=landmarks)
config.register_dataset(W300)
if __name__ == '__main__':
from csl_common.vis import vis
import torch
import config
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
dirs = config.get_dataset_paths('w300')
ds = W300(root=dirs[0], cache_root=dirs[1], train=False, deterministic=True, use_cache=False, image_size=256,
test_split='challenging', daug=0, align_face_orientation=True, crop_source='lm_ground_truth')
dl = td.DataLoader(ds, batch_size=10, shuffle=False, num_workers=0)
for data in dl:
batch = Batch(data, gpu=False)
inputs = batch.images.clone()
imgs = vis.to_disp_images(inputs, denorm=True)
imgs = vis.add_landmarks_to_images(imgs, batch.landmarks, radius=3, color=(0,255,0))
# imgs = vis.add_landmarks_to_images(imgs, data['landmarks_of'].numpy(), color=(1,0,0))
vis.vis_square(imgs, nCols=5, fx=1, fy=1, normalize=False) | [
"pandas.DataFrame",
"csl_common.utils.nn.Batch",
"csl_common.vis.vis.vis_square",
"torch.utils.data.DataLoader",
"torch.manual_seed",
"csl_common.vis.vis.to_disp_images",
"csl_common.vis.vis.add_landmarks_to_images",
"config.get_dataset_paths",
"config.register_dataset",
"torch.cuda.manual_seed_al... | [((5062, 5091), 'config.register_dataset', 'config.register_dataset', (['W300'], {}), '(W300)\n', (5085, 5091), False, 'import config\n'), ((501, 515), 'numpy.vstack', 'np.vstack', (['lms'], {}), '(lms)\n', (510, 515), True, 'import numpy as np\n'), ((5196, 5216), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (5213, 5216), False, 'import torch\n'), ((5221, 5250), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(0)'], {}), '(0)\n', (5247, 5250), False, 'import torch\n'), ((5263, 5295), 'config.get_dataset_paths', 'config.get_dataset_paths', (['"""w300"""'], {}), "('w300')\n", (5287, 5295), False, 'import config\n'), ((5527, 5589), 'torch.utils.data.DataLoader', 'td.DataLoader', (['ds'], {'batch_size': '(10)', 'shuffle': '(False)', 'num_workers': '(0)'}), '(ds, batch_size=10, shuffle=False, num_workers=0)\n', (5540, 5589), True, 'import torch.utils.data as td\n'), ((1254, 1290), 'os.path.join', 'os.path.join', (['root', '"""Bounding Boxes"""'], {}), "(root, 'Bounding Boxes')\n", (1266, 1290), False, 'import os\n'), ((4229, 4246), 'pandas.DataFrame', 'pd.DataFrame', (['ann'], {}), '(ann)\n', (4241, 4246), True, 'import pandas as pd\n'), ((4734, 4775), 'csl_common.utils.geometry.extend_bbox', 'geometry.extend_bbox', (['bb'], {'dt': '(0.2)', 'db': '(0.12)'}), '(bb, dt=0.2, db=0.12)\n', (4754, 4775), False, 'from csl_common.utils import geometry\n'), ((5627, 5649), 'csl_common.utils.nn.Batch', 'Batch', (['data'], {'gpu': '(False)'}), '(data, gpu=False)\n', (5632, 5649), False, 'from csl_common.utils.nn import Batch\n'), ((5703, 5742), 'csl_common.vis.vis.to_disp_images', 'vis.to_disp_images', (['inputs'], {'denorm': '(True)'}), '(inputs, denorm=True)\n', (5721, 5742), False, 'from csl_common.vis import vis\n'), ((5758, 5837), 'csl_common.vis.vis.add_landmarks_to_images', 'vis.add_landmarks_to_images', (['imgs', 'batch.landmarks'], {'radius': '(3)', 'color': '(0, 255, 0)'}), '(imgs, batch.landmarks, radius=3, color=(0, 255, 0))\n', (5785, 5837), False, 'from csl_common.vis import vis\n'), ((5940, 5998), 'csl_common.vis.vis.vis_square', 'vis.vis_square', (['imgs'], {'nCols': '(5)', 'fx': '(1)', 'fy': '(1)', 'normalize': '(False)'}), '(imgs, nCols=5, fx=1, fy=1, normalize=False)\n', (5954, 5998), False, 'from csl_common.vis import vis\n'), ((4299, 4319), 'pandas.DataFrame', 'pd.DataFrame', (['bboxes'], {}), '(bboxes)\n', (4311, 4319), True, 'import pandas as pd\n'), ((1418, 1446), 'os.path.join', 'os.path.join', (['root', '"""images"""'], {}), "(root, 'images')\n", (1430, 1446), False, 'import os\n'), ((3332, 3362), 'os.path.join', 'os.path.join', (['im_dir', 'filename'], {}), '(im_dir, filename)\n', (3344, 3362), False, 'import os\n'), ((3068, 3116), 'os.path.join', 'os.path.join', (['self.fullsize_img_dir', 'im_dir', 'ext'], {}), '(self.fullsize_img_dir, im_dir, ext)\n', (3080, 3116), False, 'import os\n'), ((3154, 3180), 'os.path.splitext', 'os.path.splitext', (['img_file'], {}), '(img_file)\n', (3170, 3180), False, 'import os\n'), ((3218, 3247), 'os.path.split', 'os.path.split', (['path_abs_noext'], {}), '(path_abs_noext)\n', (3231, 3247), False, 'import os\n'), ((3278, 3301), 'os.path.split', 'os.path.split', (['img_file'], {}), '(img_file)\n', (3291, 3301), False, 'import os\n')] |
import pytest
import imageio
import numpy as np
from bentoml.yatai.client import YataiClient
from tests.bento_service_examples.example_bento_service import ExampleBentoService
def pytest_configure():
'''
global constants for tests
'''
# async request client
async def assert_request(
method,
url,
headers=None,
data=None,
timeout=None,
assert_status=None,
assert_data=None,
):
if assert_status is None:
assert_status = 200
import aiohttp
try:
async with aiohttp.ClientSession() as sess:
async with sess.request(
method, url, data=data, headers=headers, timeout=timeout
) as r:
data = await r.read()
except RuntimeError:
# the event loop has been closed due to previous task failed, ignore
return
if callable(assert_status):
assert assert_status(r.status)
else:
assert r.status == assert_status
if assert_data is not None:
if callable(assert_data):
assert assert_data(data)
else:
assert data == assert_data
pytest.assert_request = assert_request
# dataframe json orients
pytest.DF_ORIENTS = {
'split',
'records',
'index',
'columns',
'values',
# 'table', # TODO(bojiang)
}
pytest.DF_AUTO_ORIENTS = {
'records',
'columns',
}
def pytest_addoption(parser):
parser.addoption("--batch-request", action="store_false")
@pytest.fixture()
def is_batch_request(pytestconfig):
return pytestconfig.getoption("batch_request")
@pytest.fixture()
def img_file(tmpdir):
img_file_ = tmpdir.join("test_img.jpg")
imageio.imwrite(str(img_file_), np.zeros((10, 10)))
return str(img_file_)
@pytest.fixture()
def json_file(tmpdir):
json_file_ = tmpdir.join("test_json.json")
with open(json_file_, "w") as of:
of.write('{"name": "kaith", "game": "morrowind"}')
return str(json_file_)
@pytest.fixture()
def bin_file(tmpdir):
bin_file_ = tmpdir.join("bin_file")
with open(bin_file_, "wb") as of:
of.write("â".encode('gb18030'))
return str(bin_file_)
@pytest.fixture()
def bin_files(tmpdir):
for i in range(10):
bin_file_ = tmpdir.join(f"{i}")
with open(bin_file_, "wb") as of:
of.write(f"â{i}".encode('gb18030'))
return str(tmpdir.join("*"))
@pytest.fixture()
def img_files(tmpdir):
for i in range(10):
img_file_ = tmpdir.join(f"test_img_{i}.jpg")
imageio.imwrite(str(img_file_), np.zeros((10, 10)))
return str(tmpdir.join("*.jpg"))
class TestModel(object):
def predict_dataframe(self, df):
return df["col1"].sum()
def predict_image(self, input_datas):
for input_data in input_datas:
assert input_data is not None
return [input_data.shape for input_data in input_datas]
def predict_legacy_images(self, original, compared):
return (original == compared).all()
def predict_json(self, input_jsons):
assert input_jsons
return [{"ok": True}] * len(input_jsons)
def predict_legacy_json(self, input_json):
assert input_json is not None
return {"ok": True}
@pytest.fixture()
def example_bento_service_class():
# When the ExampleBentoService got saved and loaded again in the test, the two class
# attribute below got set to the loaded BentoService class. Resetting it here so it
# does not effect other tests
ExampleBentoService._bento_service_bundle_path = None
ExampleBentoService._bento_service_bundle_version = None
return ExampleBentoService
@pytest.fixture()
def bento_service(example_bento_service_class): # pylint:disable=redefined-outer-name
"""Create a new ExampleBentoService
"""
test_model = TestModel()
test_svc = example_bento_service_class()
test_svc.pack('model', test_model)
return test_svc
@pytest.fixture()
def bento_bundle_path(bento_service): # pylint:disable=redefined-outer-name
"""Create a new ExampleBentoService, saved it to tmpdir, and return full saved_path
"""
saved_path = bento_service.save()
yield saved_path
delete_saved_bento_service(bento_service.name, bento_service.version)
def delete_saved_bento_service(name, version):
yc = YataiClient()
yc.repository.dangerously_delete_bento(name, version)
| [
"bentoml.yatai.client.YataiClient",
"numpy.zeros",
"pytest.fixture",
"aiohttp.ClientSession"
] | [((1654, 1670), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1668, 1670), False, 'import pytest\n'), ((1761, 1777), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1775, 1777), False, 'import pytest\n'), ((1929, 1945), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1943, 1945), False, 'import pytest\n'), ((2143, 2159), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2157, 2159), False, 'import pytest\n'), ((2329, 2345), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2343, 2345), False, 'import pytest\n'), ((2559, 2575), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2573, 2575), False, 'import pytest\n'), ((3394, 3410), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (3408, 3410), False, 'import pytest\n'), ((3810, 3826), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (3824, 3826), False, 'import pytest\n'), ((4098, 4114), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (4112, 4114), False, 'import pytest\n'), ((4479, 4492), 'bentoml.yatai.client.YataiClient', 'YataiClient', ([], {}), '()\n', (4490, 4492), False, 'from bentoml.yatai.client import YataiClient\n'), ((1880, 1898), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (1888, 1898), True, 'import numpy as np\n'), ((2716, 2734), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (2724, 2734), True, 'import numpy as np\n'), ((588, 611), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (609, 611), False, 'import aiohttp\n')] |
"""
Tests the SynthData class which is used to generate synthetic stellar
kinematic data from simple, Gaussian distributions.
"""
from __future__ import print_function, division, unicode_literals
from astropy.table import Table, join
import numpy as np
import sys
sys.path.insert(0,'..')
from chronostar.synthdata import SynthData
from chronostar.component import SphereComponent, EllipComponent
from chronostar import tabletool
PARS = np.array([
[0., 0., 0., 0., 0., 0., 10., 5., 1e-5],
[5., 0.,-5., 0., 0., 0., 10., 5., 40.]
])
STARCOUNTS = [50, 30]
COMPONENTS = SphereComponent
def test_initialisation():
"""Basic sanity check to see if things start off ok"""
sd = SynthData(pars=PARS, starcounts=STARCOUNTS, Components=COMPONENTS)
assert np.allclose(PARS, sd.pars)
assert sd.ncomps == len(PARS)
assert np.allclose(PARS[0], sd.components[0].get_pars())
assert np.allclose(np.array(STARCOUNTS), sd.starcounts)
sd2 = SynthData(pars=PARS[0], starcounts=STARCOUNTS[0],
Components=COMPONENTS)
assert np.allclose(np.array([STARCOUNTS[0]]), sd2.starcounts)
starcounts = 50.
sd3 = SynthData(pars=PARS[0], starcounts=starcounts,
Components=COMPONENTS)
assert np.allclose(np.array([np.int(starcounts)]), sd3.starcounts)
def test_generateInitXYZUVW():
"""Check that the mean of initial xyzuvw of stars matches that of the
initialising component"""
starcounts = (int(1e6),)
sd = SynthData(pars=PARS[:1], starcounts=starcounts, Components=COMPONENTS)
sd.generate_all_init_cartesian()
comp = SphereComponent(PARS[0])
init_xyzuvw = sd.extract_data_as_array([dim + '0' for dim in 'xyzuvw'])
assert np.allclose(comp.get_mean(), np.mean(init_xyzuvw, axis=0),
atol=0.1)
def test_projectStars():
"""Check that the mean of stars after projection matches the mean
of the component after projection"""
starcounts = (int(1e3),)
sd = SynthData(pars=PARS[:1], starcounts=starcounts, Components=COMPONENTS)
sd.generate_all_init_cartesian()
sd.project_stars()
comp_mean_now, comp_covmatrix_now = \
sd.components[0].get_currentday_projection()
final_xyzuvw = sd.extract_data_as_array([dim + '_now' for dim in 'xzyuvw'])
assert np.allclose(comp_mean_now, final_xyzuvw.mean(axis=0), atol=1.)
def test_measureXYZUVW():
"""Check measurements of xyzuvw_now to astrometry occur properly.
Will use extremely dense component as case study as this ensures stars
all have more or less the same true values"""
compact_comp_pars = np.copy(PARS[0])
compact_comp_pars[6] = 1e-15
compact_comp_pars[7] = 1e-15
compact_comp_pars[8] = 1e-15
starcounts = [1000]
sd = SynthData(pars=np.array([compact_comp_pars]), starcounts=starcounts,
Components=COMPONENTS)
sd.generate_all_init_cartesian()
sd.project_stars()
sd.measure_astrometry()
for colname in SynthData.DEFAULT_ASTR_COLNAMES:
assert np.allclose(sd.GERROR[colname + '_error'],
sd.table[colname + '_error'])
# Check spread of data is similar to Gaia error, we use
# a large tolerance so a small number of stars can be used
assert np.isclose(sd.GERROR[colname + '_error'],
np.std(sd.table[colname]),
rtol=1e-1)
def test_storeTable():
"""Check storing table and loading works"""
filename = 'temp_data/test_storeTable_output.fits'
sd = SynthData(pars=PARS, starcounts=STARCOUNTS, Components=COMPONENTS)
sd.synthesise_everything()
sd.store_table(filename=filename, overwrite=True)
stored_table = Table.read(filename)
assert np.allclose(sd.table['parallax'], stored_table['parallax'])
def test_synthesiseEverything():
"""Check everything goes to plan with single call"""
sd = SynthData(pars=PARS, starcounts=STARCOUNTS, Components=COMPONENTS)
sd.synthesise_everything()
assert np.isclose(np.sum(STARCOUNTS), len(sd.table))
def test_storeAndLoad():
"""Check that storing and loading works as expected"""
filename = 'temp_data/test_synthesiseEverything_output.fits'
sd = SynthData(pars=PARS, starcounts=STARCOUNTS, Components=COMPONENTS)
sd.synthesise_everything(filename=filename, overwrite=True)
# Trying to store table at `filename` without overwrite throws error
try:
sd.synthesise_everything(filename=filename, overwrite=False)
except IOError:
pass
#TODO: implement some means of storing (and loading) entire object
def test_artificialMeasurement():
"""Ensure that scaling the measurement uncertainty scales the reported
uncertainties appropriately, and that offsets in data due to error scale
with input error"""
pars = PARS[:1]
starcounts = [100]
sd_dict = {}
names = ['perf', 'good', 'norm', 'bad']
m_err_dict = {
'perf':1e-10,
'good':1e-1,
'norm':1.0,
'bad':1e1,
}
for name in names:
np.random.seed(1)
sd = SynthData(pars=pars, starcounts=starcounts,
measurement_error=m_err_dict[name],
Components=COMPONENTS)
sd.synthesise_everything()
sd_dict[name] = sd
# Assert that measurement errors are stored correctly in columns
for name in names[1:]:
assert np.allclose(
sd_dict[name].table['radial_velocity_error'],
m_err_dict[name]*SynthData.GERROR['radial_velocity_error']
)
# Get reference for degree of offset expected
norm_offset = np.mean(
np.abs(sd_dict['perf'].table['radial_velocity']
- sd_dict['norm'].table['radial_velocity'])
)
bad_offset = np.mean(
np.abs(sd_dict['perf'].table['radial_velocity']
- sd_dict['bad'].table['radial_velocity'])
)
good_offset = np.mean(
np.abs(sd_dict['perf'].table['radial_velocity']
- sd_dict['good'].table['radial_velocity'])
)
# Check the average offset scales with incorporated measurement error
assert np.isclose(norm_offset*m_err_dict['bad'], bad_offset)
assert np.isclose(norm_offset*m_err_dict['good'], good_offset)
def test_multiple_synth_components():
"""Check initialising with multiple components works"""
age = 1e-10
dx = 5.
dv = 2.
ass_pars1 = np.array([10, 20, 30, 40, 50, 60, dx, dv, age])
comp1 = SphereComponent(ass_pars1)
ass_pars2 = np.array([0., 0., 0, 0, 0, 0, dx, dv, age])
comp2 = SphereComponent(ass_pars2)
starcounts = [100, 100]
try:
synth_data = SynthData(pars=[ass_pars1, ass_pars2],
starcounts=starcounts[0],
Components=SphereComponent)
raise UserWarning('AssertionError should have been thrown by synthdata')
except AssertionError:
pass
synth_data = SynthData(pars=[ass_pars1, ass_pars2],
starcounts=starcounts,
Components=SphereComponent)
synth_data.synthesise_everything()
assert len(synth_data.table) == np.sum(starcounts)
means = tabletool.build_data_dict_from_table(
synth_data.table,
main_colnames=[el+'0' for el in 'xyzuvw'],
only_means=True
)
assert np.allclose(comp2.get_mean(), means[starcounts[0]:].mean(axis=0),
atol=2.)
assert np.allclose(comp1.get_mean(), means[:starcounts[0]].mean(axis=0),
atol=2.)
def test_different_component_forms():
"""Check component forms can be different"""
tiny_age = 1e-10
mean1 = np.zeros(6)
covmatrix1 = np.eye(6) * 4
comp1 = SphereComponent(attributes={
'mean':mean1,
'covmatrix':covmatrix1,
'age':tiny_age,
})
mean2 = np.zeros(6) + 10.
covmatrix2 = np.eye(6) * 9
comp2 = EllipComponent(attributes={
'mean':mean2,
'covmatrix':covmatrix2,
'age':tiny_age,
})
starcounts = [100,100]
synth_data = SynthData(pars=[comp1.get_pars(), comp2.get_pars()],
starcounts=starcounts,
Components=[SphereComponent, EllipComponent])
synth_data.synthesise_everything()
assert len(synth_data.table) == np.sum(starcounts)
def test_background_component():
"""Create artificial association composed of two stars at opposite vertices
of unit 6D rectangle. Then base background density distribution on that."""
background_density = 100
# Since the background double the span of data, by setting the means as
# follows, the backbround should extend from 0 to 1 in each dimension,
# which greatly simplifies reasoning about densities and starcounts.
upper_mean = np.zeros(6) + 0.75
lower_mean = np.zeros(6) + 0.25
narrow_dx = 1e-10
narrow_dv = 1e-10
tiny_age = 1e-10
upper_pars = np.hstack((upper_mean, narrow_dx, narrow_dv, tiny_age))
lower_pars = np.hstack((lower_mean, narrow_dx, narrow_dv, tiny_age))
starcounts = [1,1]
synth_data = SynthData(pars=[upper_pars, lower_pars],
starcounts=starcounts,
background_density=background_density)
synth_data.generate_all_init_cartesian()
means = tabletool.build_data_dict_from_table(
synth_data.table[2:],
main_colnames=[el+'0' for el in 'xyzuvw'],
only_means=True,
)
assert np.allclose(0.5, np.mean(means, axis=0), atol=0.1)
assert np.allclose(1.0, np.max(means, axis=0), atol=0.1)
assert np.allclose(0.0, np.min(means, axis=0), atol=0.1)
assert len(synth_data.table) == background_density + 2
if __name__ == '__main__':
pass
| [
"numpy.sum",
"numpy.random.seed",
"numpy.abs",
"numpy.allclose",
"numpy.isclose",
"numpy.mean",
"numpy.copy",
"numpy.std",
"numpy.max",
"numpy.int",
"chronostar.component.EllipComponent",
"chronostar.synthdata.SynthData",
"numpy.hstack",
"numpy.min",
"chronostar.tabletool.build_data_dict... | [((266, 290), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (281, 290), False, 'import sys\n'), ((440, 554), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 5.0, 1e-05], [5.0, 0.0, -5.0, 0.0, \n 0.0, 0.0, 10.0, 5.0, 40.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 5.0, 1e-05], [5.0, 0.0, -5.0,\n 0.0, 0.0, 0.0, 10.0, 5.0, 40.0]])\n', (448, 554), True, 'import numpy as np\n'), ((689, 755), 'chronostar.synthdata.SynthData', 'SynthData', ([], {'pars': 'PARS', 'starcounts': 'STARCOUNTS', 'Components': 'COMPONENTS'}), '(pars=PARS, starcounts=STARCOUNTS, Components=COMPONENTS)\n', (698, 755), False, 'from chronostar.synthdata import SynthData\n'), ((768, 794), 'numpy.allclose', 'np.allclose', (['PARS', 'sd.pars'], {}), '(PARS, sd.pars)\n', (779, 794), True, 'import numpy as np\n'), ((962, 1034), 'chronostar.synthdata.SynthData', 'SynthData', ([], {'pars': 'PARS[0]', 'starcounts': 'STARCOUNTS[0]', 'Components': 'COMPONENTS'}), '(pars=PARS[0], starcounts=STARCOUNTS[0], Components=COMPONENTS)\n', (971, 1034), False, 'from chronostar.synthdata import SynthData\n'), ((1153, 1222), 'chronostar.synthdata.SynthData', 'SynthData', ([], {'pars': 'PARS[0]', 'starcounts': 'starcounts', 'Components': 'COMPONENTS'}), '(pars=PARS[0], starcounts=starcounts, Components=COMPONENTS)\n', (1162, 1222), False, 'from chronostar.synthdata import SynthData\n'), ((1489, 1559), 'chronostar.synthdata.SynthData', 'SynthData', ([], {'pars': 'PARS[:1]', 'starcounts': 'starcounts', 'Components': 'COMPONENTS'}), '(pars=PARS[:1], starcounts=starcounts, Components=COMPONENTS)\n', (1498, 1559), False, 'from chronostar.synthdata import SynthData\n'), ((1609, 1633), 'chronostar.component.SphereComponent', 'SphereComponent', (['PARS[0]'], {}), '(PARS[0])\n', (1624, 1633), False, 'from chronostar.component import SphereComponent, EllipComponent\n'), ((1989, 2059), 'chronostar.synthdata.SynthData', 'SynthData', ([], {'pars': 'PARS[:1]', 'starcounts': 'starcounts', 'Components': 'COMPONENTS'}), '(pars=PARS[:1], starcounts=starcounts, Components=COMPONENTS)\n', (1998, 2059), False, 'from chronostar.synthdata import SynthData\n'), ((2617, 2633), 'numpy.copy', 'np.copy', (['PARS[0]'], {}), '(PARS[0])\n', (2624, 2633), True, 'import numpy as np\n'), ((3549, 3615), 'chronostar.synthdata.SynthData', 'SynthData', ([], {'pars': 'PARS', 'starcounts': 'STARCOUNTS', 'Components': 'COMPONENTS'}), '(pars=PARS, starcounts=STARCOUNTS, Components=COMPONENTS)\n', (3558, 3615), False, 'from chronostar.synthdata import SynthData\n'), ((3720, 3740), 'astropy.table.Table.read', 'Table.read', (['filename'], {}), '(filename)\n', (3730, 3740), False, 'from astropy.table import Table, join\n'), ((3753, 3812), 'numpy.allclose', 'np.allclose', (["sd.table['parallax']", "stored_table['parallax']"], {}), "(sd.table['parallax'], stored_table['parallax'])\n", (3764, 3812), True, 'import numpy as np\n'), ((3914, 3980), 'chronostar.synthdata.SynthData', 'SynthData', ([], {'pars': 'PARS', 'starcounts': 'STARCOUNTS', 'Components': 'COMPONENTS'}), '(pars=PARS, starcounts=STARCOUNTS, Components=COMPONENTS)\n', (3923, 3980), False, 'from chronostar.synthdata import SynthData\n'), ((4230, 4296), 'chronostar.synthdata.SynthData', 'SynthData', ([], {'pars': 'PARS', 'starcounts': 'STARCOUNTS', 'Components': 'COMPONENTS'}), '(pars=PARS, starcounts=STARCOUNTS, Components=COMPONENTS)\n', (4239, 4296), False, 'from chronostar.synthdata import SynthData\n'), ((6190, 6245), 'numpy.isclose', 'np.isclose', (["(norm_offset * m_err_dict['bad'])", 'bad_offset'], {}), "(norm_offset * m_err_dict['bad'], bad_offset)\n", (6200, 6245), True, 'import numpy as np\n'), ((6255, 6312), 'numpy.isclose', 'np.isclose', (["(norm_offset * m_err_dict['good'])", 'good_offset'], {}), "(norm_offset * m_err_dict['good'], good_offset)\n", (6265, 6312), True, 'import numpy as np\n'), ((6466, 6513), 'numpy.array', 'np.array', (['[10, 20, 30, 40, 50, 60, dx, dv, age]'], {}), '([10, 20, 30, 40, 50, 60, dx, dv, age])\n', (6474, 6513), True, 'import numpy as np\n'), ((6526, 6552), 'chronostar.component.SphereComponent', 'SphereComponent', (['ass_pars1'], {}), '(ass_pars1)\n', (6541, 6552), False, 'from chronostar.component import SphereComponent, EllipComponent\n'), ((6569, 6614), 'numpy.array', 'np.array', (['[0.0, 0.0, 0, 0, 0, 0, dx, dv, age]'], {}), '([0.0, 0.0, 0, 0, 0, 0, dx, dv, age])\n', (6577, 6614), True, 'import numpy as np\n'), ((6625, 6651), 'chronostar.component.SphereComponent', 'SphereComponent', (['ass_pars2'], {}), '(ass_pars2)\n', (6640, 6651), False, 'from chronostar.component import SphereComponent, EllipComponent\n'), ((7004, 7098), 'chronostar.synthdata.SynthData', 'SynthData', ([], {'pars': '[ass_pars1, ass_pars2]', 'starcounts': 'starcounts', 'Components': 'SphereComponent'}), '(pars=[ass_pars1, ass_pars2], starcounts=starcounts, Components=\n SphereComponent)\n', (7013, 7098), False, 'from chronostar.synthdata import SynthData\n'), ((7255, 7377), 'chronostar.tabletool.build_data_dict_from_table', 'tabletool.build_data_dict_from_table', (['synth_data.table'], {'main_colnames': "[(el + '0') for el in 'xyzuvw']", 'only_means': '(True)'}), "(synth_data.table, main_colnames=[(el +\n '0') for el in 'xyzuvw'], only_means=True)\n", (7291, 7377), False, 'from chronostar import tabletool\n'), ((7752, 7763), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (7760, 7763), True, 'import numpy as np\n'), ((7807, 7896), 'chronostar.component.SphereComponent', 'SphereComponent', ([], {'attributes': "{'mean': mean1, 'covmatrix': covmatrix1, 'age': tiny_age}"}), "(attributes={'mean': mean1, 'covmatrix': covmatrix1, 'age':\n tiny_age})\n", (7822, 7896), False, 'from chronostar.component import SphereComponent, EllipComponent\n'), ((7995, 8083), 'chronostar.component.EllipComponent', 'EllipComponent', ([], {'attributes': "{'mean': mean2, 'covmatrix': covmatrix2, 'age': tiny_age}"}), "(attributes={'mean': mean2, 'covmatrix': covmatrix2, 'age':\n tiny_age})\n", (8009, 8083), False, 'from chronostar.component import SphereComponent, EllipComponent\n'), ((9026, 9081), 'numpy.hstack', 'np.hstack', (['(upper_mean, narrow_dx, narrow_dv, tiny_age)'], {}), '((upper_mean, narrow_dx, narrow_dv, tiny_age))\n', (9035, 9081), True, 'import numpy as np\n'), ((9099, 9154), 'numpy.hstack', 'np.hstack', (['(lower_mean, narrow_dx, narrow_dv, tiny_age)'], {}), '((lower_mean, narrow_dx, narrow_dv, tiny_age))\n', (9108, 9154), True, 'import numpy as np\n'), ((9197, 9303), 'chronostar.synthdata.SynthData', 'SynthData', ([], {'pars': '[upper_pars, lower_pars]', 'starcounts': 'starcounts', 'background_density': 'background_density'}), '(pars=[upper_pars, lower_pars], starcounts=starcounts,\n background_density=background_density)\n', (9206, 9303), False, 'from chronostar.synthdata import SynthData\n'), ((9412, 9539), 'chronostar.tabletool.build_data_dict_from_table', 'tabletool.build_data_dict_from_table', (['synth_data.table[2:]'], {'main_colnames': "[(el + '0') for el in 'xyzuvw']", 'only_means': '(True)'}), "(synth_data.table[2:], main_colnames=[(\n el + '0') for el in 'xyzuvw'], only_means=True)\n", (9448, 9539), False, 'from chronostar import tabletool\n'), ((913, 933), 'numpy.array', 'np.array', (['STARCOUNTS'], {}), '(STARCOUNTS)\n', (921, 933), True, 'import numpy as np\n'), ((1078, 1103), 'numpy.array', 'np.array', (['[STARCOUNTS[0]]'], {}), '([STARCOUNTS[0]])\n', (1086, 1103), True, 'import numpy as np\n'), ((1750, 1778), 'numpy.mean', 'np.mean', (['init_xyzuvw'], {'axis': '(0)'}), '(init_xyzuvw, axis=0)\n', (1757, 1778), True, 'import numpy as np\n'), ((3034, 3106), 'numpy.allclose', 'np.allclose', (["sd.GERROR[colname + '_error']", "sd.table[colname + '_error']"], {}), "(sd.GERROR[colname + '_error'], sd.table[colname + '_error'])\n", (3045, 3106), True, 'import numpy as np\n'), ((4035, 4053), 'numpy.sum', 'np.sum', (['STARCOUNTS'], {}), '(STARCOUNTS)\n', (4041, 4053), True, 'import numpy as np\n'), ((5072, 5089), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (5086, 5089), True, 'import numpy as np\n'), ((5103, 5210), 'chronostar.synthdata.SynthData', 'SynthData', ([], {'pars': 'pars', 'starcounts': 'starcounts', 'measurement_error': 'm_err_dict[name]', 'Components': 'COMPONENTS'}), '(pars=pars, starcounts=starcounts, measurement_error=m_err_dict[\n name], Components=COMPONENTS)\n', (5112, 5210), False, 'from chronostar.synthdata import SynthData\n'), ((5426, 5549), 'numpy.allclose', 'np.allclose', (["sd_dict[name].table['radial_velocity_error']", "(m_err_dict[name] * SynthData.GERROR['radial_velocity_error'])"], {}), "(sd_dict[name].table['radial_velocity_error'], m_err_dict[name] *\n SynthData.GERROR['radial_velocity_error'])\n", (5437, 5549), True, 'import numpy as np\n'), ((5676, 5772), 'numpy.abs', 'np.abs', (["(sd_dict['perf'].table['radial_velocity'] - sd_dict['norm'].table[\n 'radial_velocity'])"], {}), "(sd_dict['perf'].table['radial_velocity'] - sd_dict['norm'].table[\n 'radial_velocity'])\n", (5682, 5772), True, 'import numpy as np\n'), ((5832, 5927), 'numpy.abs', 'np.abs', (["(sd_dict['perf'].table['radial_velocity'] - sd_dict['bad'].table[\n 'radial_velocity'])"], {}), "(sd_dict['perf'].table['radial_velocity'] - sd_dict['bad'].table[\n 'radial_velocity'])\n", (5838, 5927), True, 'import numpy as np\n'), ((5987, 6083), 'numpy.abs', 'np.abs', (["(sd_dict['perf'].table['radial_velocity'] - sd_dict['good'].table[\n 'radial_velocity'])"], {}), "(sd_dict['perf'].table['radial_velocity'] - sd_dict['good'].table[\n 'radial_velocity'])\n", (5993, 6083), True, 'import numpy as np\n'), ((6710, 6807), 'chronostar.synthdata.SynthData', 'SynthData', ([], {'pars': '[ass_pars1, ass_pars2]', 'starcounts': 'starcounts[0]', 'Components': 'SphereComponent'}), '(pars=[ass_pars1, ass_pars2], starcounts=starcounts[0], Components\n =SphereComponent)\n', (6719, 6807), False, 'from chronostar.synthdata import SynthData\n'), ((7224, 7242), 'numpy.sum', 'np.sum', (['starcounts'], {}), '(starcounts)\n', (7230, 7242), True, 'import numpy as np\n'), ((7781, 7790), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (7787, 7790), True, 'import numpy as np\n'), ((7934, 7945), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (7942, 7945), True, 'import numpy as np\n'), ((7969, 7978), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (7975, 7978), True, 'import numpy as np\n'), ((8404, 8422), 'numpy.sum', 'np.sum', (['starcounts'], {}), '(starcounts)\n', (8410, 8422), True, 'import numpy as np\n'), ((8889, 8900), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (8897, 8900), True, 'import numpy as np\n'), ((8925, 8936), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (8933, 8936), True, 'import numpy as np\n'), ((9602, 9624), 'numpy.mean', 'np.mean', (['means'], {'axis': '(0)'}), '(means, axis=0)\n', (9609, 9624), True, 'import numpy as np\n'), ((9664, 9685), 'numpy.max', 'np.max', (['means'], {'axis': '(0)'}), '(means, axis=0)\n', (9670, 9685), True, 'import numpy as np\n'), ((9725, 9746), 'numpy.min', 'np.min', (['means'], {'axis': '(0)'}), '(means, axis=0)\n', (9731, 9746), True, 'import numpy as np\n'), ((2782, 2811), 'numpy.array', 'np.array', (['[compact_comp_pars]'], {}), '([compact_comp_pars])\n', (2790, 2811), True, 'import numpy as np\n'), ((3348, 3373), 'numpy.std', 'np.std', (['sd.table[colname]'], {}), '(sd.table[colname])\n', (3354, 3373), True, 'import numpy as np\n'), ((1276, 1294), 'numpy.int', 'np.int', (['starcounts'], {}), '(starcounts)\n', (1282, 1294), True, 'import numpy as np\n')] |
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List
from typing import Tuple
import pytest
import numpy as np
from nncf.experimental.onnx.samplers import ONNXBatchSampler
from nncf.experimental.onnx.samplers import ONNXRandomBatchSampler
from nncf.experimental.post_training.api.dataloader import DataLoader
INPUT_SHAPE = [3, 10, 10]
DATASET_SAMPLES = [(np.zeros(INPUT_SHAPE), 0),
(np.ones(INPUT_SHAPE), 1),
(100 * np.ones(INPUT_SHAPE), 2)]
class TestDataloader(DataLoader):
def __init__(self, samples: List[Tuple[np.ndarray, int]]):
super().__init__(shuffle=False)
self.samples = samples
def __getitem__(self, item):
return self.samples[item]
def __len__(self):
return 3
@pytest.mark.parametrize("batch_size", (1, 2, 3))
def test_batch_sampler(batch_size):
dataloader = TestDataloader(DATASET_SAMPLES)
dataloader.batch_size = batch_size
sampler = ONNXBatchSampler(dataloader)
for i, sample in enumerate(sampler):
ref_sample = []
ref_target = []
for j in range(i * batch_size, i * batch_size + batch_size):
ref_sample.extend([DATASET_SAMPLES[j][0]])
ref_target.extend([DATASET_SAMPLES[j][1]])
ref_sample = np.stack(ref_sample)
ref_target = np.stack(ref_target)
assert np.array_equal(sample[0], ref_sample)
assert np.array_equal(sample[1], ref_target)
@pytest.mark.parametrize("batch_size", (1, 2, 3))
def test_random_batch_sampler(batch_size):
np.random.seed(0)
dataloader = TestDataloader(DATASET_SAMPLES)
dataloader.batch_size = batch_size
sampler = ONNXRandomBatchSampler(dataloader)
random_permuated_indices = [0, 2, 1]
for i, sample in enumerate(sampler):
ref_sample = []
ref_target = []
for j in range(i * batch_size, i * batch_size + batch_size):
ref_sample.extend([DATASET_SAMPLES[random_permuated_indices[j]][0]])
ref_target.extend([DATASET_SAMPLES[random_permuated_indices[j]][1]])
ref_sample = np.stack(ref_sample)
ref_target = np.stack(ref_target)
assert np.array_equal(sample[0], ref_sample)
assert np.array_equal(sample[1], ref_target)
| [
"numpy.stack",
"numpy.random.seed",
"nncf.experimental.onnx.samplers.ONNXBatchSampler",
"nncf.experimental.onnx.samplers.ONNXRandomBatchSampler",
"numpy.zeros",
"numpy.ones",
"numpy.array_equal",
"pytest.mark.parametrize"
] | [((1317, 1365), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size"""', '(1, 2, 3)'], {}), "('batch_size', (1, 2, 3))\n", (1340, 1365), False, 'import pytest\n'), ((1994, 2042), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size"""', '(1, 2, 3)'], {}), "('batch_size', (1, 2, 3))\n", (2017, 2042), False, 'import pytest\n'), ((1504, 1532), 'nncf.experimental.onnx.samplers.ONNXBatchSampler', 'ONNXBatchSampler', (['dataloader'], {}), '(dataloader)\n', (1520, 1532), False, 'from nncf.experimental.onnx.samplers import ONNXBatchSampler\n'), ((2090, 2107), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2104, 2107), True, 'import numpy as np\n'), ((2210, 2244), 'nncf.experimental.onnx.samplers.ONNXRandomBatchSampler', 'ONNXRandomBatchSampler', (['dataloader'], {}), '(dataloader)\n', (2232, 2244), False, 'from nncf.experimental.onnx.samplers import ONNXRandomBatchSampler\n'), ((910, 931), 'numpy.zeros', 'np.zeros', (['INPUT_SHAPE'], {}), '(INPUT_SHAPE)\n', (918, 931), True, 'import numpy as np\n'), ((957, 977), 'numpy.ones', 'np.ones', (['INPUT_SHAPE'], {}), '(INPUT_SHAPE)\n', (964, 977), True, 'import numpy as np\n'), ((1822, 1842), 'numpy.stack', 'np.stack', (['ref_sample'], {}), '(ref_sample)\n', (1830, 1842), True, 'import numpy as np\n'), ((1864, 1884), 'numpy.stack', 'np.stack', (['ref_target'], {}), '(ref_target)\n', (1872, 1884), True, 'import numpy as np\n'), ((1900, 1937), 'numpy.array_equal', 'np.array_equal', (['sample[0]', 'ref_sample'], {}), '(sample[0], ref_sample)\n', (1914, 1937), True, 'import numpy as np\n'), ((1953, 1990), 'numpy.array_equal', 'np.array_equal', (['sample[1]', 'ref_target'], {}), '(sample[1], ref_target)\n', (1967, 1990), True, 'import numpy as np\n'), ((2627, 2647), 'numpy.stack', 'np.stack', (['ref_sample'], {}), '(ref_sample)\n', (2635, 2647), True, 'import numpy as np\n'), ((2669, 2689), 'numpy.stack', 'np.stack', (['ref_target'], {}), '(ref_target)\n', (2677, 2689), True, 'import numpy as np\n'), ((2705, 2742), 'numpy.array_equal', 'np.array_equal', (['sample[0]', 'ref_sample'], {}), '(sample[0], ref_sample)\n', (2719, 2742), True, 'import numpy as np\n'), ((2758, 2795), 'numpy.array_equal', 'np.array_equal', (['sample[1]', 'ref_target'], {}), '(sample[1], ref_target)\n', (2772, 2795), True, 'import numpy as np\n'), ((1009, 1029), 'numpy.ones', 'np.ones', (['INPUT_SHAPE'], {}), '(INPUT_SHAPE)\n', (1016, 1029), True, 'import numpy as np\n')] |
"""
****************************************
* @author: <NAME>
* Date: 8/26/21
****************************************
"""
"""
****************************************
* @author: <NAME>
* Date: 5/22/21
****************************************
"""
import time
import tensorflow.keras as keras
import pandas as pd
from tqdm import tqdm
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, SimpleRNN, LSTM, GRU, Bidirectional
from random import sample
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
import copy
activation_fcts = [
'relu', "sigmoid", "softmax", "softplus", "softsign", "tanh", "selu", "elu", "exponential"
]
optimizers = ["sgd", "rmsprop", "adam", "adadelta", "adagrad", "adamax", "nadam", "ftrl"]
losses = ["mae", "mape", "mse", "msle", "poisson", "categorical_crossentropy"]
rnn_layer_types = ['SimpleRNN', 'LSTM', 'GRU']
class TimeHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.train_start_time = time.time()
self.epoch_times = []
self.batch_times = []
self.epoch_times_detail = []
self.batch_times_detail = []
def on_train_end(self, logs={}):
self.train_end_time = time.time()
def on_epoch_begin(self, epoch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={}):
epoch_time_end = time.time()
self.epoch_times.append(epoch_time_end - self.epoch_time_start)
self.epoch_times_detail.append((self.epoch_time_start, epoch_time_end))
def on_train_batch_begin(self, batch, logs={}):
self.bacth_time_start = time.time()
def on_train_batch_end(self, batch, logs={}):
batch_time_end = time.time()
self.batch_times.append(batch_time_end - self.bacth_time_start)
self.batch_times_detail.append((self.bacth_time_start, batch_time_end))
def relative_by_train_start(self):
self.epoch_times_detail = np.array(self.epoch_times_detail) - self.train_start_time
self.batch_times_detail = np.array(self.batch_times_detail) - self.train_start_time
self.train_end_time = np.array(self.train_end_time) - self.train_start_time
class gen_bidirectional_rnn:
def __init__(
self,
rnn_layers_num_lower=1,
rnn_layers_num_upper=10,
rnn_layer_size_lower=1,
rnn_layer_size_upper=101,
dense_layers_num_lower=1,
dense_layers_num_upper=3,
dense_layer_size_lower=1,
dense_layer_size_upper=6,
activation='random',
optimizer='random',
loss='random',
rnn_layer_type='random'
):
self.rnn_layers_num_lower = rnn_layers_num_lower
self.rnn_layers_num_upper = rnn_layers_num_upper
self.rnn_layer_size_lower = rnn_layer_size_lower
self.rnn_layer_size_upper = rnn_layer_size_upper
self.dense_layers_num_lower = dense_layers_num_lower,
self.dense_layers_num_upper = dense_layers_num_upper,
self.dense_layer_size_lower = dense_layer_size_lower,
self.dense_layer_size_upper = dense_layer_size_upper,
self.activation_pick = activation
self.optimizer_pick = optimizer
self.loss_pick = loss
self.rnn_layer_type_pick = rnn_layer_type
self.activation_fcts = activation_fcts
self.optimizers = optimizers
self.losses = losses
self.rnn_layer_types = rnn_layer_types
@staticmethod
def nothing(x):
return x
@staticmethod
def build_RNN_model(
layer_type, rnn_layer_sizes, dense_layer_sizes, activations, optimizer, loss
):
if layer_type == 'SimpleRNN':
rnn_layer = SimpleRNN
if layer_type == 'LSTM':
rnn_layer = LSTM
if layer_type == 'GRU':
rnn_layer = GRU
model = Sequential()
for index, size in enumerate(rnn_layer_sizes + dense_layer_sizes):
if index < len(rnn_layer_sizes) - 1:
model.add(
Bidirectional(
rnn_layer(units=size, activation=activations[index], return_sequences=True)
)
)
elif index == len(rnn_layer_sizes) - 1:
model.add(Bidirectional(rnn_layer(units=size, activation=activations[index])))
else:
model.add(Dense(units=size, activation=activations[index]))
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
return model
@staticmethod
def get_RNN_model_features(keras_model):
layers = [
layer_info for layer_info in keras_model.get_config()['layers']
if layer_info['class_name'] == 'Bidirectional' or layer_info['class_name'] == 'Dense'
]
layer_sizes = []
acts = []
layer_Type = []
for l in layers:
if l['class_name'] == 'Dense':
layer_sizes.append(l['config']['units'])
acts.append(l['config']['activation'])
layer_Type.append(l['class_name'])
else:
layer_sizes.append(l['config']['layer']['config']['units'])
acts.append(l['config']['layer']['config']['activation'])
layer_Type.append(l['config']['layer']['class_name'])
return layer_sizes, acts, layer_Type
def generate_model(self):
rnn_layers_num = np.random.randint(self.rnn_layers_num_lower, self.rnn_layers_num_upper)
rnn_layer_sizes = np.random.randint(
self.rnn_layer_size_lower, self.rnn_layer_size_upper, rnn_layers_num
)
dense_layers_num = np.random.randint(
self.dense_layers_num_lower, self.dense_layers_num_upper
)
dense_layer_sizes = np.random.randint(
self.dense_layer_size_lower, self.dense_layer_size_upper, dense_layers_num
)
if self.activation_pick == 'random':
activations = np.random.choice(self.activation_fcts, rnn_layers_num + dense_layers_num)
else:
activations = np.random.choice([self.activation_pick],
rnn_layers_num + dense_layers_num)
if self.optimizer_pick == 'random':
optimizer = np.random.choice(self.optimizers)
else:
optimizer = self.optimizer_pick
if self.loss_pick == 'random':
loss = np.random.choice(self.losses)
else:
loss = self.loss_pick
if self.rnn_layer_type_pick == 'random':
rnn_layer = np.random.choice(self.rnn_layer_types)
else:
rnn_layer = self.rnn_layer_type_pick
return {
#'model': gen_bidirectional_rnn.build_RNN_model(rnn_layer, list(rnn_layer_sizes), list(dense_layer_sizes), activations, optimizer, loss),
'rnn_layer_sizes': [int(i) for i in rnn_layer_sizes],
'dense_layer_sizes': [int(i) for i in dense_layer_sizes],
'activations': list(activations),
'optimizer': optimizer,
'loss': loss,
'rnn_type': rnn_layer,
}
def generate_model_configs(self, num_model_data=1000, progress=True):
model_configs = []
if progress:
loop_fun = tqdm
else:
loop_fun = gen_bidirectional_rnn.nothing
for i in loop_fun(range(num_model_data)):
data = self.generate_model()
#del data['model']
model_configs.append(data)
return model_configs
class model_train_data:
def __init__(
self,
model_configs,
input_dims=None,
batch_sizes=None,
epochs=None,
truncate_from=None,
trials=None,
input_dim_strategy='same'
):
"""
@param model_configs:
@param input_dims: input data number of features
@param batch_sizes:
@param epochs:
@param truncate_from:
@param trials:
@param input_dim_strategy: 'random' or 'same', same will be same size as first layer size
"""
self.model_configs = []
for info_dict in model_configs:
d2 = copy.deepcopy(info_dict)
self.model_configs.append(d2)
self.input_dims = input_dims if input_dims is not None else list(range(1, 101))
self.batch_sizes = batch_sizes if batch_sizes is not None else [2**i for i in range(1, 9)]
self.epochs = epochs if epochs is not None else 10
self.truncate_from = truncate_from if truncate_from is not None else 2
self.trials = trials if trials is not None else 5
self.input_dim_strategy = input_dim_strategy
self.activation_fcts = activation_fcts
self.optimizers = optimizers
self.losses = losses
self.act_mapping = dict((act, index + 1) for index, act in enumerate(self.activation_fcts))
self.opt_mapping = dict((opt, index + 1) for index, opt in enumerate(self.optimizers))
self.loss_mapping = dict((loss, index + 1) for index, loss in enumerate(self.losses))
def get_train_data(self, progress=True):
model_data = []
model_configs = []
if progress:
loop_fun = tqdm
else:
loop_fun = gen_bidirectional_rnn.nothing
for info_dict in self.model_configs:
d2 = copy.deepcopy(info_dict)
model_configs.append(d2)
for model_config in loop_fun(model_configs):
model = gen_bidirectional_rnn.build_RNN_model(
layer_type=model_config['rnn_type'],
rnn_layer_sizes=model_config['rnn_layer_sizes'],
dense_layer_sizes=model_config['dense_layer_sizes'],
activations=model_config['activations'],
optimizer=model_config['optimizer'],
loss=model_config['loss']
)
batch_sizes = sample(self.batch_sizes, 1)
input_dim = sample(self.input_dims, 1)[0]
for batch_size in batch_sizes:
batch_size_data_batch = []
batch_size_data_epoch = []
if self.input_dim_strategy == 'same':
try:
input_shape = model.get_config(
)['layers'][0]['config']['layer']['config']['units']
except:
input_shape = model.get_config(
)['layers'][0]['config']['batch_input_shape'][2]
else:
input_shape = input_dim
out_shape = model.get_config()['layers'][-1]['config']['units']
x = np.ones((batch_size, 1, input_shape), dtype=np.float32)
y = np.ones((batch_size, out_shape), dtype=np.float32)
for _ in range(self.trials):
time_callback = TimeHistory()
model.fit(
x,
y,
epochs=self.epochs,
batch_size=batch_size,
callbacks=[time_callback],
verbose=False
)
times_batch = np.array(time_callback.batch_times) * 1000
times_epoch = np.array(time_callback.epoch_times) * 1000
batch_size_data_batch.extend(times_batch)
batch_size_data_epoch.extend(times_epoch)
batch_times_truncated = batch_size_data_batch[self.truncate_from:]
epoch_times_trancuted = batch_size_data_epoch[self.truncate_from:]
recovered_time = [
np.median(batch_times_truncated)
] * self.truncate_from + batch_times_truncated
model_config['batch_size'] = batch_size
model_config['batch_time'] = np.median(batch_times_truncated)
model_config['epoch_time'] = np.median(epoch_times_trancuted)
model_config['setup_time'] = np.sum(batch_size_data_batch) - sum(recovered_time)
model_config['input_dim'] = input_shape
model_data.append(model_config)
return model_data
class convert_bidirectional_rnn_data:
def __init__(self):
self.optimizers = optimizers
self.rnn_layer_types = rnn_layer_types
unique_all_rnns = sorted(list(set(self.rnn_layer_types)))
unique_all_optimizers = sorted(list(set(self.optimizers)))
opt_enc = OneHotEncoder(handle_unknown='ignore')
rnn_enc = OneHotEncoder(handle_unknown='ignore')
x_rnns = [[i] for i in unique_all_rnns]
rnn_enc.fit(x_rnns)
x_opts = [[i] for i in unique_all_optimizers]
opt_enc.fit(x_opts)
self.rnn_enc = rnn_enc
self.opt_enc = opt_enc
@staticmethod
def get_rnn_type(model):
return [
i['config']['layer']['class_name'] for i in model.get_config()['layers']
if i['class_name'] == 'Bidirectional'
][0]
@staticmethod
def get_units_sum_rnn_keras(model_obj):
layers = [
layer_info for layer_info in model_obj.get_config()['layers']
if layer_info['class_name'] == 'Bidirectional'
]
layer_sizes = []
for l in layers:
layer_sizes.append(l['config']['layer']['config']['units'])
return sum(layer_sizes)
@staticmethod
def get_units_sum_dense_keras(model_obj):
return sum([
layer['config']['units'] for layer in model_obj.get_config()['layers']
if layer['class_name'] == 'Dense'
])
def convert_model_config(self, model_config_rnn, data_type='Units', min_max_scaler=True):
"""
@param model_config_dense:
@param data_type: str "Units" or "FLOPs"
@param min_max_scaler:
@return:
"""
if data_type.lower().startswith('f'):
print('currently FLOPs is not avaliable for RNN')
all_batch_sizes = []
all_optimizers = []
all_rnn_types = []
flops_data = []
dense_units_data = []
rnn_units_data = []
times_data = []
for index, model_config in enumerate(tqdm(model_config_rnn)):
batch_size = model_config['batch_size']
all_batch_sizes.append(batch_size)
all_optimizers.append(model_config['optimizer'])
all_rnn_types.append(model_config['rnn_type'])
dense_units_data.append(sum(model_config['dense_layer_sizes']))
rnn_units_data.append(sum(model_config['rnn_layer_sizes']))
times_data.append(model_config['batch_time'])
rnn_data = []
for rnn_size, dense_size, batch, opt, rnn_type in tqdm(list(zip(rnn_units_data,
dense_units_data,
all_batch_sizes,
all_optimizers,
all_rnn_types))):
optimizer_onehot = list(self.opt_enc.transform([[opt]]).toarray()[0])
rnn_type_onehot = list(self.rnn_enc.transform([[rnn_type]]).toarray()[0])
rnn_data.append([rnn_size, dense_size, batch] + optimizer_onehot + rnn_type_onehot)
if min_max_scaler:
scaler = MinMaxScaler()
scaler.fit(rnn_data)
scaler_rnn_data = scaler.transform(rnn_data)
return scaler_rnn_data, np.array(times_data), scaler
else:
return rnn_data, np.array(times_data), None
def convert_model_keras(
self, rnn_model_obj, optimizer, batch_size, data_type='Unit', scaler=None
):
rnn_type = convert_bidirectional_rnn_data.get_rnn_type(rnn_model_obj)
dense_unit_sum = convert_bidirectional_rnn_data.get_units_sum_dense_keras(rnn_model_obj)
rnn_unit_sum = convert_bidirectional_rnn_data.get_units_sum_rnn_keras(rnn_model_obj)
optimizer_onehot = list(self.opt_enc.transform([[optimizer]]).toarray()[0])
rnn_type_onehot = list(self.rnn_enc.transform([[rnn_type]]).toarray()[0])
layer_data = [rnn_unit_sum, dense_unit_sum, batch_size] + optimizer_onehot + rnn_type_onehot
if scaler is not None:
scaled_data = scaler.transform(np.array([layer_data]))
return scaled_data
else:
return layer_data
| [
"copy.deepcopy",
"tqdm.tqdm",
"numpy.sum",
"tensorflow.keras.layers.Dense",
"random.sample",
"numpy.median",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.preprocessing.OneHotEncoder",
"numpy.ones",
"time.time",
"numpy.random.randint",
"numpy.array",
"tensorflow.keras.models.Sequential",
... | [((1069, 1080), 'time.time', 'time.time', ([], {}), '()\n', (1078, 1080), False, 'import time\n'), ((1283, 1294), 'time.time', 'time.time', ([], {}), '()\n', (1292, 1294), False, 'import time\n'), ((1374, 1385), 'time.time', 'time.time', ([], {}), '()\n', (1383, 1385), False, 'import time\n'), ((1456, 1467), 'time.time', 'time.time', ([], {}), '()\n', (1465, 1467), False, 'import time\n'), ((1705, 1716), 'time.time', 'time.time', ([], {}), '()\n', (1714, 1716), False, 'import time\n'), ((1793, 1804), 'time.time', 'time.time', ([], {}), '()\n', (1802, 1804), False, 'import time\n'), ((3915, 3927), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3925, 3927), False, 'from tensorflow.keras.models import Sequential\n'), ((5496, 5567), 'numpy.random.randint', 'np.random.randint', (['self.rnn_layers_num_lower', 'self.rnn_layers_num_upper'], {}), '(self.rnn_layers_num_lower, self.rnn_layers_num_upper)\n', (5513, 5567), True, 'import numpy as np\n'), ((5594, 5685), 'numpy.random.randint', 'np.random.randint', (['self.rnn_layer_size_lower', 'self.rnn_layer_size_upper', 'rnn_layers_num'], {}), '(self.rnn_layer_size_lower, self.rnn_layer_size_upper,\n rnn_layers_num)\n', (5611, 5685), True, 'import numpy as np\n'), ((5731, 5806), 'numpy.random.randint', 'np.random.randint', (['self.dense_layers_num_lower', 'self.dense_layers_num_upper'], {}), '(self.dense_layers_num_lower, self.dense_layers_num_upper)\n', (5748, 5806), True, 'import numpy as np\n'), ((5857, 5954), 'numpy.random.randint', 'np.random.randint', (['self.dense_layer_size_lower', 'self.dense_layer_size_upper', 'dense_layers_num'], {}), '(self.dense_layer_size_lower, self.dense_layer_size_upper,\n dense_layers_num)\n', (5874, 5954), True, 'import numpy as np\n'), ((12582, 12620), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (12595, 12620), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((12639, 12677), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (12652, 12677), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((2031, 2064), 'numpy.array', 'np.array', (['self.epoch_times_detail'], {}), '(self.epoch_times_detail)\n', (2039, 2064), True, 'import numpy as np\n'), ((2123, 2156), 'numpy.array', 'np.array', (['self.batch_times_detail'], {}), '(self.batch_times_detail)\n', (2131, 2156), True, 'import numpy as np\n'), ((2211, 2240), 'numpy.array', 'np.array', (['self.train_end_time'], {}), '(self.train_end_time)\n', (2219, 2240), True, 'import numpy as np\n'), ((6045, 6118), 'numpy.random.choice', 'np.random.choice', (['self.activation_fcts', '(rnn_layers_num + dense_layers_num)'], {}), '(self.activation_fcts, rnn_layers_num + dense_layers_num)\n', (6061, 6118), True, 'import numpy as np\n'), ((6159, 6234), 'numpy.random.choice', 'np.random.choice', (['[self.activation_pick]', '(rnn_layers_num + dense_layers_num)'], {}), '([self.activation_pick], rnn_layers_num + dense_layers_num)\n', (6175, 6234), True, 'import numpy as np\n'), ((6346, 6379), 'numpy.random.choice', 'np.random.choice', (['self.optimizers'], {}), '(self.optimizers)\n', (6362, 6379), True, 'import numpy as np\n'), ((6496, 6525), 'numpy.random.choice', 'np.random.choice', (['self.losses'], {}), '(self.losses)\n', (6512, 6525), True, 'import numpy as np\n'), ((6647, 6685), 'numpy.random.choice', 'np.random.choice', (['self.rnn_layer_types'], {}), '(self.rnn_layer_types)\n', (6663, 6685), True, 'import numpy as np\n'), ((8261, 8285), 'copy.deepcopy', 'copy.deepcopy', (['info_dict'], {}), '(info_dict)\n', (8274, 8285), False, 'import copy\n'), ((9441, 9465), 'copy.deepcopy', 'copy.deepcopy', (['info_dict'], {}), '(info_dict)\n', (9454, 9465), False, 'import copy\n'), ((9994, 10021), 'random.sample', 'sample', (['self.batch_sizes', '(1)'], {}), '(self.batch_sizes, 1)\n', (10000, 10021), False, 'from random import sample\n'), ((14313, 14335), 'tqdm.tqdm', 'tqdm', (['model_config_rnn'], {}), '(model_config_rnn)\n', (14317, 14335), False, 'from tqdm import tqdm\n'), ((15544, 15558), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (15556, 15558), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((10046, 10072), 'random.sample', 'sample', (['self.input_dims', '(1)'], {}), '(self.input_dims, 1)\n', (10052, 10072), False, 'from random import sample\n'), ((10740, 10795), 'numpy.ones', 'np.ones', (['(batch_size, 1, input_shape)'], {'dtype': 'np.float32'}), '((batch_size, 1, input_shape), dtype=np.float32)\n', (10747, 10795), True, 'import numpy as np\n'), ((10816, 10866), 'numpy.ones', 'np.ones', (['(batch_size, out_shape)'], {'dtype': 'np.float32'}), '((batch_size, out_shape), dtype=np.float32)\n', (10823, 10866), True, 'import numpy as np\n'), ((11947, 11979), 'numpy.median', 'np.median', (['batch_times_truncated'], {}), '(batch_times_truncated)\n', (11956, 11979), True, 'import numpy as np\n'), ((12025, 12057), 'numpy.median', 'np.median', (['epoch_times_trancuted'], {}), '(epoch_times_trancuted)\n', (12034, 12057), True, 'import numpy as np\n'), ((15685, 15705), 'numpy.array', 'np.array', (['times_data'], {}), '(times_data)\n', (15693, 15705), True, 'import numpy as np\n'), ((15757, 15777), 'numpy.array', 'np.array', (['times_data'], {}), '(times_data)\n', (15765, 15777), True, 'import numpy as np\n'), ((16515, 16537), 'numpy.array', 'np.array', (['[layer_data]'], {}), '([layer_data])\n', (16523, 16537), True, 'import numpy as np\n'), ((12103, 12132), 'numpy.sum', 'np.sum', (['batch_size_data_batch'], {}), '(batch_size_data_batch)\n', (12109, 12132), True, 'import numpy as np\n'), ((4445, 4493), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'size', 'activation': 'activations[index]'}), '(units=size, activation=activations[index])\n', (4450, 4493), False, 'from tensorflow.keras.layers import Dense, SimpleRNN, LSTM, GRU, Bidirectional\n'), ((11283, 11318), 'numpy.array', 'np.array', (['time_callback.batch_times'], {}), '(time_callback.batch_times)\n', (11291, 11318), True, 'import numpy as np\n'), ((11360, 11395), 'numpy.array', 'np.array', (['time_callback.epoch_times'], {}), '(time_callback.epoch_times)\n', (11368, 11395), True, 'import numpy as np\n'), ((11749, 11781), 'numpy.median', 'np.median', (['batch_times_truncated'], {}), '(batch_times_truncated)\n', (11758, 11781), True, 'import numpy as np\n')] |
import pickle
import matplotlib.pyplot as plt
import numpy as np
list_a = list(range(1000))
list_b = [i*2 for i in range(317)]
list_c = [i*3 for i in range(256)]
list_d = [i*4 for i in range(1530)]
with open("Paint_a.pickle", "wb") as f:
pickle.dump(list_a, f)
with open("Paint_b.pickle", "wb") as f:
pickle.dump(list_b, f)
with open("Paint_c.pickle", "wb") as f:
pickle.dump(list_c, f)
with open("Paint_d.pickle", "wb") as f:
pickle.dump(list_d, f)
with open("Paint_a.pickle", "rb") as f:
a = pickle.load(f)
with open("Paint_b.pickle", "rb") as f:
b = pickle.load(f)
with open("Paint_c.pickle", "rb") as f:
c = pickle.load(f)
with open("Paint_d.pickle", "rb") as f:
d = pickle.load(f)
len_a = len(a)
len_b = len(b)
len_c = len(c)
len_d = len(d)
sampleNo_a = 2000 - len_a
sampleNo_b = 2000 - len_b
sampleNo_c = 2000 - len_c
sampleNo_d = 2000 - len_d
mu = 0
sigma = 5
np.random.seed(0)
s_a = np.random.normal(mu, sigma, sampleNo_a )
s_b = np.random.normal(mu, sigma, sampleNo_b )
s_c = np.random.normal(mu, sigma, sampleNo_c )
s_d = np.random.normal(mu, sigma, sampleNo_d )
# print(s_a)
print(type(s_a))
for idx in range(2000-len_a):
a.append(a[len_a-1]+s_a[idx])
for idx in range(2000-len_b):
b.append(b[len_b-1]+s_b[idx])
for idx in range(2000-len_c):
c.append(c[len_c-1]+s_c[idx])
for idx in range(2000-len_d):
d.append(d[len_d-1]+s_d[idx])
# plt.figure(figsize=(9, 7))
# ,marker = 'o',markerfacecolor='r',markersize = 10
plt.plot(range(len(a)), a, color='blue', linestyle='-', label='a', linewidth=3)
plt.plot(range(len(b)), b, color='red', linestyle='--', label='b', linewidth=3)
plt.plot(range(len(c)), c, color='green', marker = 'o', markersize = 3, linestyle='-.', label='c', linewidth=3)
plt.plot(range(len(d)), d, color='magenta', marker = 'v', markersize = 3, linestyle=':',label='d', linewidth=3)
plt.ylabel('valid reward',size=20)
plt.xlabel('epochs',size=20)
plt.grid() # 此参数为默认参数
# plt.xticks(np.arange(0, 251, step=50), fontsize=20)
# plt.yticks(np.arange(0, 0.361, step=0.09), fontsize=20)
plt.legend(loc='best')
plt.title('Valid Reward Plot')
plt.savefig('paint.png')
plt.close() | [
"matplotlib.pyplot.title",
"pickle.dump",
"numpy.random.seed",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"pickle.load",
"numpy.random.normal",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig"
] | [((911, 928), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (925, 928), True, 'import numpy as np\n'), ((935, 974), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma', 'sampleNo_a'], {}), '(mu, sigma, sampleNo_a)\n', (951, 974), True, 'import numpy as np\n'), ((982, 1021), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma', 'sampleNo_b'], {}), '(mu, sigma, sampleNo_b)\n', (998, 1021), True, 'import numpy as np\n'), ((1029, 1068), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma', 'sampleNo_c'], {}), '(mu, sigma, sampleNo_c)\n', (1045, 1068), True, 'import numpy as np\n'), ((1076, 1115), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma', 'sampleNo_d'], {}), '(mu, sigma, sampleNo_d)\n', (1092, 1115), True, 'import numpy as np\n'), ((1871, 1906), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""valid reward"""'], {'size': '(20)'}), "('valid reward', size=20)\n", (1881, 1906), True, 'import matplotlib.pyplot as plt\n'), ((1906, 1935), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {'size': '(20)'}), "('epochs', size=20)\n", (1916, 1935), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1945), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1943, 1945), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2092), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2080, 2092), True, 'import matplotlib.pyplot as plt\n'), ((2093, 2123), 'matplotlib.pyplot.title', 'plt.title', (['"""Valid Reward Plot"""'], {}), "('Valid Reward Plot')\n", (2102, 2123), True, 'import matplotlib.pyplot as plt\n'), ((2124, 2148), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""paint.png"""'], {}), "('paint.png')\n", (2135, 2148), True, 'import matplotlib.pyplot as plt\n'), ((2149, 2160), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2158, 2160), True, 'import matplotlib.pyplot as plt\n'), ((245, 267), 'pickle.dump', 'pickle.dump', (['list_a', 'f'], {}), '(list_a, f)\n', (256, 267), False, 'import pickle\n'), ((313, 335), 'pickle.dump', 'pickle.dump', (['list_b', 'f'], {}), '(list_b, f)\n', (324, 335), False, 'import pickle\n'), ((381, 403), 'pickle.dump', 'pickle.dump', (['list_c', 'f'], {}), '(list_c, f)\n', (392, 403), False, 'import pickle\n'), ((449, 471), 'pickle.dump', 'pickle.dump', (['list_d', 'f'], {}), '(list_d, f)\n', (460, 471), False, 'import pickle\n'), ((521, 535), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (532, 535), False, 'import pickle\n'), ((585, 599), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (596, 599), False, 'import pickle\n'), ((649, 663), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (660, 663), False, 'import pickle\n'), ((713, 727), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (724, 727), False, 'import pickle\n')] |
# -*- coding: utf-8 -*-
"""
This example demonstrates a very basic use of flowcharts: filter data,
displaying both the input and output of the filter. The behavior of
he filter can be reprogrammed by the user.
Basic steps are:
- create a flowchart and two plots
- input noisy data to the flowchart
- flowchart connects data to the first plot, where it is displayed
- add a gaussian filter to lowpass the data, then display it in the second plot.
"""
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.flowchart import Flowchart
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import numpy as np
import pyqtgraph.metaarray as metaarray
app = QtGui.QApplication([])
## Create main window with grid layout
win = QtGui.QMainWindow()
win.setWindowTitle('pyqtgraph example: Flowchart')
cw = QtGui.QWidget()
win.setCentralWidget(cw)
layout = QtGui.QGridLayout()
cw.setLayout(layout)
## Create flowchart, define input/output terminals
fc = Flowchart(terminals={
'dataIn': {'io': 'in'},
'dataOut': {'io': 'out'}
})
w = fc.widget()
## Add flowchart control panel to the main window
layout.addWidget(fc.widget(), 0, 0, 2, 1)
## Add two plot widgets
pw1 = pg.PlotWidget()
pw2 = pg.PlotWidget()
layout.addWidget(pw1, 0, 1)
layout.addWidget(pw2, 1, 1)
win.show()
## generate signal data to pass through the flowchart
data = np.random.normal(size=1000)
data[200:300] += 1
data += np.sin(np.linspace(0, 100, 1000))
data = metaarray.MetaArray(data, info=[{'name': 'Time', 'values': np.linspace(0, 1.0, len(data))}, {}])
## Feed data into the input terminal of the flowchart
fc.setInput(dataIn=data)
## populate the flowchart with a basic set of processing nodes.
## (usually we let the user do this)
plotList = {'Top Plot': pw1, 'Bottom Plot': pw2}
pw1Node = fc.createNode('PlotWidget', pos=(0, -150))
pw1Node.setPlotList(plotList)
pw1Node.setPlot(pw1)
pw2Node = fc.createNode('PlotWidget', pos=(150, -150))
pw2Node.setPlot(pw2)
pw2Node.setPlotList(plotList)
fNode = fc.createNode('GaussianFilter', pos=(0, 0))
fNode.ctrls['sigma'].setValue(5)
fc.connectTerminals(fc['dataIn'], fNode['In'])
fc.connectTerminals(fc['dataIn'], pw1Node['In'])
fc.connectTerminals(fNode['Out'], pw2Node['In'])
fc.connectTerminals(fNode['Out'], fc['dataOut'])
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| [
"pyqtgraph.flowchart.Flowchart",
"pyqtgraph.Qt.QtGui.QMainWindow",
"pyqtgraph.Qt.QtGui.QWidget",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"pyqtgraph.Qt.QtGui.QGridLayout",
"numpy.random.normal",
"pyqtgraph.Qt.QtGui.QApplication",
"numpy.linspace",
"pyqtgraph.PlotWidget"
] | [((715, 737), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (733, 737), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((784, 803), 'pyqtgraph.Qt.QtGui.QMainWindow', 'QtGui.QMainWindow', ([], {}), '()\n', (801, 803), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((860, 875), 'pyqtgraph.Qt.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (873, 875), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((910, 929), 'pyqtgraph.Qt.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (927, 929), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((1008, 1079), 'pyqtgraph.flowchart.Flowchart', 'Flowchart', ([], {'terminals': "{'dataIn': {'io': 'in'}, 'dataOut': {'io': 'out'}}"}), "(terminals={'dataIn': {'io': 'in'}, 'dataOut': {'io': 'out'}})\n", (1017, 1079), False, 'from pyqtgraph.flowchart import Flowchart\n'), ((1234, 1249), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {}), '()\n', (1247, 1249), True, 'import pyqtgraph as pg\n'), ((1256, 1271), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {}), '()\n', (1269, 1271), True, 'import pyqtgraph as pg\n'), ((1402, 1429), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000)'}), '(size=1000)\n', (1418, 1429), True, 'import numpy as np\n'), ((1464, 1489), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(1000)'], {}), '(0, 100, 1000)\n', (1475, 1489), True, 'import numpy as np\n'), ((2523, 2552), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (2550, 2552), False, 'from pyqtgraph.Qt import QtGui, QtCore\n')] |
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
import itertools
import glob
from os.path import join
import numpy as np
import pytest
from pytest import approx
import biotite
import biotite.structure as struc
import biotite.structure.io.pdbx as pdbx
import biotite.sequence as seq
from ..util import data_dir
def test_get_model_count():
pdbx_file = pdbx.PDBxFile.read(join(data_dir("structure"), "1l2y.cif"))
test_model_count = pdbx.get_model_count(pdbx_file)
ref_model_count = pdbx.get_structure(pdbx_file).stack_depth()
assert test_model_count == ref_model_count
@pytest.mark.parametrize(
"category, key, exp_value",
[
(
"audit_author", "name",
["<NAME>.", "<NAME>.", "<NAME>."]
),
(
"struct_ref_seq", "pdbx_PDB_id_code", "1L2Y"
),
(
"pdbx_nmr_ensemble", "conformer_selection_criteria",
"structures with acceptable covalent geometry, "
"structures with the least restraint violations"
)
]
)
def test_parsing(category, key, exp_value):
pdbx_file = pdbx.PDBxFile.read(join(data_dir("structure"), "1l2y.cif"))
cat_dict = pdbx_file[category]
value = cat_dict[key]
if isinstance(value, np.ndarray):
assert value.tolist() == exp_value
else:
assert value == exp_value
@pytest.mark.parametrize(
"string, use_array",
itertools.product(
["", " ", "\n", "\t"],
[False, True]
)
)
def test_empty_values(string, use_array):
"""
Test whether empty strings for field values are properly replaced
by ``'.'``.
"""
LENGTH = 10
ref_value = np.full(LENGTH, string, dtype="U1") if use_array else ""
pdbx_file = pdbx.PDBxFile()
pdbx_file.set_category(
category="test_category",
block="test",
category_dict={
"test_field": ref_value
}
)
test_value = pdbx_file["test_category"]["test_field"]
if use_array:
assert test_value.tolist() == ["."] * LENGTH
else:
assert test_value == "."
@pytest.mark.parametrize(
"path, model",
itertools.product(
glob.glob(join(data_dir("structure"), "*.cif")),
[None, 1, -1]
)
)
def test_conversion(path, model):
pdbx_file = pdbx.PDBxFile.read(path)
try:
array1 = pdbx.get_structure(pdbx_file, model=model)
except biotite.InvalidFileError:
if model is None:
# The file cannot be parsed into an AtomArrayStack,
# as the models contain different numbers of atoms
# -> skip this test case
return
else:
raise
pdbx_file = pdbx.PDBxFile()
pdbx.set_structure(pdbx_file, array1, data_block="test")
array2 = pdbx.get_structure(pdbx_file, model=model)
if array1.box is not None:
assert np.allclose(array1.box, array2.box)
assert array1.bonds == array2.bonds
for category in array1.get_annotation_categories():
assert array1.get_annotation(category).tolist() == \
array2.get_annotation(category).tolist()
assert array1.coord.tolist() == array2.coord.tolist()
def test_extra_fields():
path = join(data_dir("structure"), "1l2y.cif")
pdbx_file = pdbx.PDBxFile.read(path)
stack1 = pdbx.get_structure(pdbx_file, extra_fields=["atom_id","b_factor",
"occupancy","charge"])
pdbx_file = pdbx.PDBxFile()
pdbx.set_structure(pdbx_file, stack1, data_block="test")
stack2 = pdbx.get_structure(pdbx_file, extra_fields=["atom_id","b_factor",
"occupancy","charge"])
assert stack1 == stack2
path = join(data_dir("structure"), "1l2y.cif")
pdbx_file = pdbx.PDBxFile.read(path)
stack1 = pdbx.get_structure(
pdbx_file,
extra_fields=[
"atom_id", "b_factor", "occupancy", "charge"
]
)
pdbx_file = pdbx.PDBxFile()
pdbx.set_structure(pdbx_file, stack1, data_block="test")
stack2 = pdbx.get_structure(
pdbx_file,
extra_fields=[
"atom_id", "b_factor", "occupancy", "charge"
]
)
assert stack1.ins_code.tolist() == stack2.ins_code.tolist()
assert stack1.atom_id.tolist() == stack2.atom_id.tolist()
assert stack1.b_factor.tolist() == approx(stack2.b_factor.tolist())
assert stack1.occupancy.tolist() == approx(stack2.occupancy.tolist())
assert stack1.charge.tolist() == stack2.charge.tolist()
assert stack1 == stack2
def test_unequal_lengths():
valid_category_dict = {
"foo1" : ["1", "2", "3"],
"foo2" : ["1", "2", "3"]
}
# Arrays have unequal lengths -> invalid
invalid_category_dict = {
"foo1" : ["1", "2", "3"],
"foo2" : ["1", "2", "3", "4"]
}
pdbx_file = pdbx.PDBxFile()
pdbx_file.set_category("test", valid_category_dict, block="test_block")
with pytest.raises(ValueError):
pdbx_file.set_category(
"test", invalid_category_dict, block="test_block"
)
def test_list_assemblies():
"""
Test the :func:`list_assemblies()` function based on a known
example.
"""
path = join(data_dir("structure"), "1f2n.cif")
pdbx_file = pdbx.PDBxFile.read(path)
assembly_list = pdbx.list_assemblies(pdbx_file)
assert assembly_list == {
"1": "complete icosahedral assembly",
"2": "icosahedral asymmetric unit",
"3": "icosahedral pentamer",
"4": "icosahedral 23 hexamer",
"5": "icosahedral asymmetric unit, std point frame",
"6": "crystal asymmetric unit, crystal frame"
}
@pytest.mark.parametrize("model", [None, 1, -1])
def test_get_assembly(model):
"""
Test whether the :func:`get_assembly()` function produces the same
number of peptide chains as the
``_pdbx_struct_assembly.oligomeric_count`` field indicates.
Furthermore, check if the number of atoms in the entire assembly
is a multiple of the numbers of atoms in a monomer.
"""
path = join(data_dir("structure"), "1f2n.cif")
pdbx_file = pdbx.PDBxFile.read(path)
assembly_category = pdbx_file.get_category(
"pdbx_struct_assembly", expect_looped=True
)
# Test each available assembly
for id, ref_oligomer_count in zip(
assembly_category["id"],
assembly_category["oligomeric_count"]
):
print("Assembly ID:", id)
try:
assembly = pdbx.get_assembly(
pdbx_file, assembly_id=id, model=model
)
except biotite.InvalidFileError:
if model is None:
# The file cannot be parsed into an AtomArrayStack,
# as the models contain different numbers of atoms
# -> skip this test case
return
else:
raise
protein_assembly = assembly[..., struc.filter_amino_acids(assembly)]
test_oligomer_count = struc.get_chain_count(protein_assembly)
if model is None:
assert isinstance(assembly, struc.AtomArrayStack)
else:
assert isinstance(assembly, struc.AtomArray)
assert test_oligomer_count == int(ref_oligomer_count)
# The atom count of the entire assembly should be a multiple
# a monomer,
monomer_atom_count = pdbx.get_structure(pdbx_file).array_length()
assert assembly.array_length() % monomer_atom_count == 0
def test_get_sequence():
file = pdbx.PDBxFile.read(join(data_dir("structure"), "5ugo.cif"))
sequences = pdbx.get_sequence(file)
file = pdbx.PDBxFile.read(join(data_dir("structure"), "4gxy.cif"))
sequences += pdbx.get_sequence(file)
assert (str(sequences[0]) == "CCGACGGCGCATCAGC")
assert (type(sequences[0]) is seq.NucleotideSequence)
assert (str(sequences[1]) == "GCTGATGCGCC")
assert (type(sequences[1]) is seq.NucleotideSequence)
assert (str(sequences[2]) == "GTCGG")
assert (type(sequences[2]) is seq.NucleotideSequence)
assert (str(sequences[3]) == "MSKRKAPQETLNGGITDMLTELANFEKNVSQAIHKYN"
"AYRKAASVIAKYPHKIKSGAEAKKLPGVGTKIAEKIDEFLATGKLRKLEKIRQD"
"DTSSSINFLTRVSGIGPSAARKFVDEGIKTLEDLRKNEDKLNHHQRIGLKYFGD"
"FEKRIPREEMLQMQDIVLNEVKKVDSEYIATVCGSFRRGAESSGDMDVLLTHPS"
"FTSESTKQPKLLHQVVEQLQKVHFITDTLSKGETKFMGVCQLPSKNDEKEYPHR"
"RIDIRLIPKDQYYCGVLYFTGSDIFNKNMRAHALEKGFTINEYTIRPLGVTGVA"
"GEPLPVDSEKDIFDYIQWKYREPKDRSE"
)
assert (type(sequences[3]) is seq.ProteinSequence)
assert (str(sequences[4]) == "GGCGGCAGGTGCTCCCGACCCTGCGGTCGGGAGTTAA"
"AAGGGAAGCCGGTGCAAGTCCGGCACGGTCCCGCCACTGTGACGGGGAGTCGCC"
"CCTCGGGATGTGCCACTGGCCCGAAGGCCGGGAAGGCGGAGGGGCGGCGAGGAT"
"CCGGAGTCAGGAAACCTGCCTGCCGTC"
)
assert (type(sequences[4]) is seq.NucleotideSequence)
| [
"biotite.structure.io.pdbx.set_structure",
"numpy.full",
"biotite.structure.io.pdbx.PDBxFile",
"biotite.structure.io.pdbx.get_structure",
"numpy.allclose",
"biotite.structure.io.pdbx.get_model_count",
"biotite.structure.io.pdbx.get_sequence",
"pytest.raises",
"biotite.structure.io.pdbx.PDBxFile.read... | [((695, 1029), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""category, key, exp_value"""', "[('audit_author', 'name', ['<NAME>.', '<NAME>.', '<NAME>.']), (\n 'struct_ref_seq', 'pdbx_PDB_id_code', '1L2Y'), ('pdbx_nmr_ensemble',\n 'conformer_selection_criteria',\n 'structures with acceptable covalent geometry, structures with the least restraint violations'\n )]"], {}), "('category, key, exp_value', [('audit_author',\n 'name', ['<NAME>.', '<NAME>.', '<NAME>.']), ('struct_ref_seq',\n 'pdbx_PDB_id_code', '1L2Y'), ('pdbx_nmr_ensemble',\n 'conformer_selection_criteria',\n 'structures with acceptable covalent geometry, structures with the least restraint violations'\n )])\n", (718, 1029), False, 'import pytest\n'), ((5812, 5859), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model"""', '[None, 1, -1]'], {}), "('model', [None, 1, -1])\n", (5835, 5859), False, 'import pytest\n'), ((547, 578), 'biotite.structure.io.pdbx.get_model_count', 'pdbx.get_model_count', (['pdbx_file'], {}), '(pdbx_file)\n', (567, 578), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((1849, 1864), 'biotite.structure.io.pdbx.PDBxFile', 'pdbx.PDBxFile', ([], {}), '()\n', (1862, 1864), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((1518, 1573), 'itertools.product', 'itertools.product', (["['', ' ', '\\n', '\\t']", '[False, True]'], {}), "(['', ' ', '\\n', '\\t'], [False, True])\n", (1535, 1573), False, 'import itertools\n'), ((2414, 2438), 'biotite.structure.io.pdbx.PDBxFile.read', 'pdbx.PDBxFile.read', (['path'], {}), '(path)\n', (2432, 2438), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((2812, 2827), 'biotite.structure.io.pdbx.PDBxFile', 'pdbx.PDBxFile', ([], {}), '()\n', (2825, 2827), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((2832, 2888), 'biotite.structure.io.pdbx.set_structure', 'pdbx.set_structure', (['pdbx_file', 'array1'], {'data_block': '"""test"""'}), "(pdbx_file, array1, data_block='test')\n", (2850, 2888), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((2907, 2949), 'biotite.structure.io.pdbx.get_structure', 'pdbx.get_structure', (['pdbx_file'], {'model': 'model'}), '(pdbx_file, model=model)\n', (2925, 2949), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((3402, 3426), 'biotite.structure.io.pdbx.PDBxFile.read', 'pdbx.PDBxFile.read', (['path'], {}), '(path)\n', (3420, 3426), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((3440, 3534), 'biotite.structure.io.pdbx.get_structure', 'pdbx.get_structure', (['pdbx_file'], {'extra_fields': "['atom_id', 'b_factor', 'occupancy', 'charge']"}), "(pdbx_file, extra_fields=['atom_id', 'b_factor',\n 'occupancy', 'charge'])\n", (3458, 3534), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((3577, 3592), 'biotite.structure.io.pdbx.PDBxFile', 'pdbx.PDBxFile', ([], {}), '()\n', (3590, 3592), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((3597, 3653), 'biotite.structure.io.pdbx.set_structure', 'pdbx.set_structure', (['pdbx_file', 'stack1'], {'data_block': '"""test"""'}), "(pdbx_file, stack1, data_block='test')\n", (3615, 3653), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((3667, 3761), 'biotite.structure.io.pdbx.get_structure', 'pdbx.get_structure', (['pdbx_file'], {'extra_fields': "['atom_id', 'b_factor', 'occupancy', 'charge']"}), "(pdbx_file, extra_fields=['atom_id', 'b_factor',\n 'occupancy', 'charge'])\n", (3685, 3761), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((3885, 3909), 'biotite.structure.io.pdbx.PDBxFile.read', 'pdbx.PDBxFile.read', (['path'], {}), '(path)\n', (3903, 3909), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((3923, 4017), 'biotite.structure.io.pdbx.get_structure', 'pdbx.get_structure', (['pdbx_file'], {'extra_fields': "['atom_id', 'b_factor', 'occupancy', 'charge']"}), "(pdbx_file, extra_fields=['atom_id', 'b_factor',\n 'occupancy', 'charge'])\n", (3941, 4017), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((4075, 4090), 'biotite.structure.io.pdbx.PDBxFile', 'pdbx.PDBxFile', ([], {}), '()\n', (4088, 4090), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((4095, 4151), 'biotite.structure.io.pdbx.set_structure', 'pdbx.set_structure', (['pdbx_file', 'stack1'], {'data_block': '"""test"""'}), "(pdbx_file, stack1, data_block='test')\n", (4113, 4151), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((4170, 4264), 'biotite.structure.io.pdbx.get_structure', 'pdbx.get_structure', (['pdbx_file'], {'extra_fields': "['atom_id', 'b_factor', 'occupancy', 'charge']"}), "(pdbx_file, extra_fields=['atom_id', 'b_factor',\n 'occupancy', 'charge'])\n", (4188, 4264), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((4970, 4985), 'biotite.structure.io.pdbx.PDBxFile', 'pdbx.PDBxFile', ([], {}), '()\n', (4983, 4985), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((5414, 5438), 'biotite.structure.io.pdbx.PDBxFile.read', 'pdbx.PDBxFile.read', (['path'], {}), '(path)\n', (5432, 5438), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((5460, 5491), 'biotite.structure.io.pdbx.list_assemblies', 'pdbx.list_assemblies', (['pdbx_file'], {}), '(pdbx_file)\n', (5480, 5491), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((6270, 6294), 'biotite.structure.io.pdbx.PDBxFile.read', 'pdbx.PDBxFile.read', (['path'], {}), '(path)\n', (6288, 6294), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((7746, 7769), 'biotite.structure.io.pdbx.get_sequence', 'pdbx.get_sequence', (['file'], {}), '(file)\n', (7763, 7769), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((7858, 7881), 'biotite.structure.io.pdbx.get_sequence', 'pdbx.get_sequence', (['file'], {}), '(file)\n', (7875, 7881), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((1775, 1810), 'numpy.full', 'np.full', (['LENGTH', 'string'], {'dtype': '"""U1"""'}), "(LENGTH, string, dtype='U1')\n", (1782, 1810), True, 'import numpy as np\n'), ((2470, 2512), 'biotite.structure.io.pdbx.get_structure', 'pdbx.get_structure', (['pdbx_file'], {'model': 'model'}), '(pdbx_file, model=model)\n', (2488, 2512), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((3001, 3036), 'numpy.allclose', 'np.allclose', (['array1.box', 'array2.box'], {}), '(array1.box, array2.box)\n', (3012, 3036), True, 'import numpy as np\n'), ((5072, 5097), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5085, 5097), False, 'import pytest\n'), ((7140, 7179), 'biotite.structure.get_chain_count', 'struc.get_chain_count', (['protein_assembly'], {}), '(protein_assembly)\n', (7161, 7179), True, 'import biotite.structure as struc\n'), ((601, 630), 'biotite.structure.io.pdbx.get_structure', 'pdbx.get_structure', (['pdbx_file'], {}), '(pdbx_file)\n', (619, 630), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((6635, 6692), 'biotite.structure.io.pdbx.get_assembly', 'pdbx.get_assembly', (['pdbx_file'], {'assembly_id': 'id', 'model': 'model'}), '(pdbx_file, assembly_id=id, model=model)\n', (6652, 6692), True, 'import biotite.structure.io.pdbx as pdbx\n'), ((7074, 7108), 'biotite.structure.filter_amino_acids', 'struc.filter_amino_acids', (['assembly'], {}), '(assembly)\n', (7098, 7108), True, 'import biotite.structure as struc\n'), ((7522, 7551), 'biotite.structure.io.pdbx.get_structure', 'pdbx.get_structure', (['pdbx_file'], {}), '(pdbx_file)\n', (7540, 7551), True, 'import biotite.structure.io.pdbx as pdbx\n')] |
import numpy as np
import matplotlib.pyplot as plt
def normalize_cell(supercell):
normalize = []
for r in np.array(supercell):
normalize.append(r/np.linalg.norm(r))
return np.array(normalize)
class TrajectoryAnalysis:
def __init__(self, trajectories):
self.trajectories = trajectories
self.n_dim = trajectories[0].get_dimension()
self.n_traj = len(trajectories)
self.states = set()
for traj in trajectories:
self.states |= traj.get_states()
self.states = self.states
self._points_ratio = {}
self._segment_ratio = {}
def __str__(self):
txt_data = '\nTrajectory Analysis\n'
txt_data += '------------------------------\n'
txt_data += 'Number of trajectories: {}\n'.format(self.n_traj)
txt_data += 'Dimension: {}\n'.format(self.n_dim)
txt_data += 'Number of nodes: {}\n'.format(self.get_number_of_nodes())
txt_data += 'States: {}\n'.format(self.states)
return txt_data
def get_number_of_nodes(self):
return len([traj.get_number_of_nodes() for traj in self.trajectories])
def get_states(self):
return self.states
def get_lifetime_ratio(self, state):
return np.average([traj.get_time_ratio(state) for traj in self.trajectories])
def get_segment_ration(self, state=None):
if state not in self._segment_ratio:
sum_t = np.nansum([traj.get_n_segments(state) for traj in self.trajectories])
if sum_t != 0:
self._segment_ratio[state] = [traj.get_n_segments(state)/float(sum_t) for traj in self.trajectories]
else:
self._segment_ratio[state] = None
return self._segment_ratio[state]
def diffusion_coeff_tensor(self, state, unit_cell=None):
"""
calculate the average diffusion tensor defined as:
DiffTensor = 1/2 * <DiffLen^2> / <time>
:param state: electronic state to analyze
:return:
"""
total_traj = np.sum([traj.get_n_subtrajectories(state) for traj in self.trajectories])
if total_traj == 0:
return None
tensor_list = [traj.get_diffusion_tensor(state)*traj.get_n_subtrajectories(state) for traj in
self.trajectories if traj.get_diffusion_tensor(state) is not None]
tensor = np.sum(tensor_list, axis=0)/total_traj
if unit_cell is not None:
trans_mat = normalize_cell(unit_cell)
mat_inv = np.linalg.inv(trans_mat)
# tensor = np.dot(mat_inv.T, tensor)
tensor = np.dot(np.dot(mat_inv.T, tensor), mat_inv)
return tensor
def diffusion_length_square_tensor(self, state, unit_cell=None):
"""
calculate the average diffusion length tensor defined as:
DiffLenTen = 2 * DiffTensor * lifetime
:param state: electronic state to analyze
:return:
"""
total_traj = np.sum([traj.get_n_subtrajectories(state) for traj in self.trajectories])
if total_traj == 0:
return None
tensor_list = [traj.get_diffusion_length_square_tensor(state)*traj.get_n_subtrajectories(state)
for traj in self.trajectories if traj.get_diffusion_length_square_tensor(state) is not None]
tensor = np.sum(tensor_list, axis=0)/total_traj
if unit_cell is not None:
trans_mat = normalize_cell(unit_cell)
mat_inv = np.linalg.inv(trans_mat)
# tensor = np.dot(mat_inv.T, tensor)
tensor = np.dot(np.dot(mat_inv.T, tensor), mat_inv)
return tensor
def diffusion_coefficient_old(self, state=None):
"""
Return the average diffusion coefficient defined as:
DiffCoeff = 1/(2*z) * <DiffLen^2>/<time>
:return:
"""
if state is None:
sum_diff = 0
sum_n_subtraj = 0
for istate in self.get_states():
n_subtraj = len(self.get_segment_ration(istate))
diffusion_list = [traj.get_diffusion(istate)*s for traj, s in zip(self.trajectories, self.get_segment_ration(istate))
if istate in traj.get_states()]
if not np.isnan(diffusion_list).all():
sum_diff += np.nansum(diffusion_list) * n_subtraj
# sum_diff += np.nanmean(diffusion_list) * self.get_lifetime_ratio(s)
sum_n_subtraj += n_subtraj
return sum_diff/sum_n_subtraj
if self.get_segment_ration(state) is None:
return None
return np.nansum([traj.get_diffusion(state)*s for traj, s in zip(self.trajectories, self.get_segment_ration(state))])
# return np.nanmean([traj.get_diffusion(state) for traj in self.trajectories])
def diffusion_coefficient(self, state=None):
"""
Return the average diffusion coefficient defined as:
DiffCoeff = 1/(2*z) * <DiffLen^2>/<time>
:return:
"""
total_traj = np.sum([traj.get_n_subtrajectories(state) for traj in self.trajectories])
if total_traj == 0:
return None
return np.sum([traj.get_diffusion(state)*traj.get_n_subtrajectories(state) for traj in self.trajectories if
traj.get_diffusion(state) is not None])/total_traj
def lifetime_old(self, state=None):
if state is None:
sum_diff = 0
sum_n_subtraj = 0
for istate in self.get_states():
n_subtraj = len(self.get_segment_ration(istate))
lifetime_list = [traj.get_lifetime(istate)*s for traj, s in zip(self.trajectories, self.get_segment_ration(istate))
if istate in traj.get_states()]
# diffusion_list = [traj.get_lifetime(s) for traj in self.trajectories]
if not np.isnan(lifetime_list).all():
# sum_diff += np.nanmean(diffusion_list) * self.get_lifetime_ratio(s)
sum_diff += np.nansum(lifetime_list) * n_subtraj
sum_n_subtraj += n_subtraj
return sum_diff/sum_n_subtraj
if self.get_segment_ration(state) is None:
return None
return np.nansum([traj.get_lifetime(state)*s for traj, s in zip(self.trajectories, self.get_segment_ration(state))])
# return np.average([traj.get_lifetime(state) for traj in self.trajectories])
def lifetime(self, state=None):
total_traj = np.sum([traj.get_n_subtrajectories(state) for traj in self.trajectories])
if total_traj == 0:
return None
return np.sum([traj.get_lifetime(state)*traj.get_n_subtrajectories(state) for traj in self.trajectories if
traj.get_lifetime(state) is not None])/total_traj
def diffusion_length(self, state=None):
"""
Return the average diffusion coefficient defined as:
DiffLen = SQRT(2 * z * DiffCoeff * LifeTime)
:return:
"""
total_traj = np.sum([traj.get_n_subtrajectories(state) for traj in self.trajectories])
if total_traj == 0:
return None
length2 = np.sum([traj.get_diffusion_length_square(state)*traj.get_n_subtrajectories(state) for traj in self.trajectories
if traj.get_diffusion_length_square(state) is not None])/total_traj
return np.sqrt(length2)
def plot_2d(self, state=None):
plt = None
for traj in self.trajectories:
plt = traj.plot_2d(state, show_warnings=False)
return plt
def plot_distances(self, state=None):
plt = None
for traj in self.trajectories:
plt = traj.plot_distances(state)
return plt
def plot_exciton_density(self, state=None):
time_max = np.max([traj.get_simulation_times()[-1] for traj in self.trajectories]) * 1.1
t_range = np.linspace(0, time_max, 100)
ne_interp = []
for traj in self.trajectories:
ne = traj.get_number_of_excitons(state)
t = traj.get_simulation_times()
ne_interp.append(np.interp(t_range, t, ne, right=0))
plt.title('Averaged exciton number ({})'.format('' if state is None else state))
plt.ylim(bottom=0, top=np.max(ne_interp))
plt.xlim(left=0, right=time_max)
plt.xlabel('time (ns)')
plt.ylabel('# of excitons in supercell')
plt.plot(t_range, np.average(ne_interp, axis=0), label='Total' if state is None else state)
plt.legend()
return plt
def plot_histogram(self, state=None, normalized=False, bins=None):
distances = []
for traj in self.trajectories:
d, _ = traj.get_max_distances_vs_times(state)
distances += list(d)
plt.title('Distances histogram ({})'.format('' if state is None else state))
plt.xlabel('Distance (Angs)')
if normalized:
plt.ylabel('Probability density (Angs^-1)')
else:
plt.ylabel('# of occurrences')
try:
plt.hist(distances, density=normalized, bins=bins)
except AttributeError:
plt.hist(distances, normed=normalized, bins=bins)
return plt
def _get_diffusion_helper(trajectory, state):
return trajectory.get_diffusion(state)
class TrajectoryAnalysisParallel(TrajectoryAnalysis):
def __init__(self, trajectories, processors=2):
super().__init__(trajectories)
import concurrent.futures as futures
self._executor = futures.ProcessPoolExecutor(max_workers=processors)
# self._executor = futures.ThreadPoolExecutor(max_workers=processors)
def diffusion_coefficient(self, state=None):
"""
Return the average diffusion coefficient defined as:
1/(2*z) * <DiffLen^2>/<time>
*Parallel version*
:param state: state label
:return:
"""
import concurrent.futures as futures
if state is None:
sum_diff = 0
for s in self.get_states():
futures_list = []
for trajectory in self.trajectories:
futures_list.append(self._executor.submit(_get_diffusion_helper, trajectory, s))
diffusion_list = []
for f in futures.as_completed(futures_list):
diffusion_list.append(f.result())
if not np.isnan(diffusion_list).all():
sum_diff += np.nanmean(diffusion_list) * self.get_lifetime_ratio(s)
return sum_diff
futures_list = []
for trajectory in self.trajectories:
futures_list.append(self._executor.submit(_get_diffusion_helper, trajectory, state))
diffusion_list = []
for f in futures.as_completed(futures_list):
diffusion_list.append(f.result())
return np.nanmean(diffusion_list)
| [
"numpy.sum",
"concurrent.futures.ProcessPoolExecutor",
"numpy.isnan",
"numpy.linalg.norm",
"numpy.interp",
"numpy.nanmean",
"numpy.max",
"numpy.linspace",
"numpy.nansum",
"numpy.average",
"matplotlib.pyplot.legend",
"numpy.linalg.inv",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"concurrent... | [((116, 135), 'numpy.array', 'np.array', (['supercell'], {}), '(supercell)\n', (124, 135), True, 'import numpy as np\n'), ((194, 213), 'numpy.array', 'np.array', (['normalize'], {}), '(normalize)\n', (202, 213), True, 'import numpy as np\n'), ((7477, 7493), 'numpy.sqrt', 'np.sqrt', (['length2'], {}), '(length2)\n', (7484, 7493), True, 'import numpy as np\n'), ((7996, 8025), 'numpy.linspace', 'np.linspace', (['(0)', 'time_max', '(100)'], {}), '(0, time_max, 100)\n', (8007, 8025), True, 'import numpy as np\n'), ((8398, 8430), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'left': '(0)', 'right': 'time_max'}), '(left=0, right=time_max)\n', (8406, 8430), True, 'import matplotlib.pyplot as plt\n'), ((8439, 8462), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (ns)"""'], {}), "('time (ns)')\n", (8449, 8462), True, 'import matplotlib.pyplot as plt\n'), ((8471, 8511), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# of excitons in supercell"""'], {}), "('# of excitons in supercell')\n", (8481, 8511), True, 'import matplotlib.pyplot as plt\n'), ((8620, 8632), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8630, 8632), True, 'import matplotlib.pyplot as plt\n'), ((8973, 9002), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Distance (Angs)"""'], {}), "('Distance (Angs)')\n", (8983, 9002), True, 'import matplotlib.pyplot as plt\n'), ((9635, 9686), 'concurrent.futures.ProcessPoolExecutor', 'futures.ProcessPoolExecutor', ([], {'max_workers': 'processors'}), '(max_workers=processors)\n', (9662, 9686), True, 'import concurrent.futures as futures\n'), ((10880, 10914), 'concurrent.futures.as_completed', 'futures.as_completed', (['futures_list'], {}), '(futures_list)\n', (10900, 10914), True, 'import concurrent.futures as futures\n'), ((10978, 11004), 'numpy.nanmean', 'np.nanmean', (['diffusion_list'], {}), '(diffusion_list)\n', (10988, 11004), True, 'import numpy as np\n'), ((2390, 2417), 'numpy.sum', 'np.sum', (['tensor_list'], {'axis': '(0)'}), '(tensor_list, axis=0)\n', (2396, 2417), True, 'import numpy as np\n'), ((2536, 2560), 'numpy.linalg.inv', 'np.linalg.inv', (['trans_mat'], {}), '(trans_mat)\n', (2549, 2560), True, 'import numpy as np\n'), ((3360, 3387), 'numpy.sum', 'np.sum', (['tensor_list'], {'axis': '(0)'}), '(tensor_list, axis=0)\n', (3366, 3387), True, 'import numpy as np\n'), ((3506, 3530), 'numpy.linalg.inv', 'np.linalg.inv', (['trans_mat'], {}), '(trans_mat)\n', (3519, 3530), True, 'import numpy as np\n'), ((8538, 8567), 'numpy.average', 'np.average', (['ne_interp'], {'axis': '(0)'}), '(ne_interp, axis=0)\n', (8548, 8567), True, 'import numpy as np\n'), ((9038, 9081), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability density (Angs^-1)"""'], {}), "('Probability density (Angs^-1)')\n", (9048, 9081), True, 'import matplotlib.pyplot as plt\n'), ((9108, 9138), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# of occurrences"""'], {}), "('# of occurrences')\n", (9118, 9138), True, 'import matplotlib.pyplot as plt\n'), ((9164, 9214), 'matplotlib.pyplot.hist', 'plt.hist', (['distances'], {'density': 'normalized', 'bins': 'bins'}), '(distances, density=normalized, bins=bins)\n', (9172, 9214), True, 'import matplotlib.pyplot as plt\n'), ((164, 181), 'numpy.linalg.norm', 'np.linalg.norm', (['r'], {}), '(r)\n', (178, 181), True, 'import numpy as np\n'), ((2638, 2663), 'numpy.dot', 'np.dot', (['mat_inv.T', 'tensor'], {}), '(mat_inv.T, tensor)\n', (2644, 2663), True, 'import numpy as np\n'), ((3608, 3633), 'numpy.dot', 'np.dot', (['mat_inv.T', 'tensor'], {}), '(mat_inv.T, tensor)\n', (3614, 3633), True, 'import numpy as np\n'), ((8214, 8248), 'numpy.interp', 'np.interp', (['t_range', 't', 'ne'], {'right': '(0)'}), '(t_range, t, ne, right=0)\n', (8223, 8248), True, 'import numpy as np\n'), ((8371, 8388), 'numpy.max', 'np.max', (['ne_interp'], {}), '(ne_interp)\n', (8377, 8388), True, 'import numpy as np\n'), ((9258, 9307), 'matplotlib.pyplot.hist', 'plt.hist', (['distances'], {'normed': 'normalized', 'bins': 'bins'}), '(distances, normed=normalized, bins=bins)\n', (9266, 9307), True, 'import matplotlib.pyplot as plt\n'), ((10403, 10437), 'concurrent.futures.as_completed', 'futures.as_completed', (['futures_list'], {}), '(futures_list)\n', (10423, 10437), True, 'import concurrent.futures as futures\n'), ((4354, 4379), 'numpy.nansum', 'np.nansum', (['diffusion_list'], {}), '(diffusion_list)\n', (4363, 4379), True, 'import numpy as np\n'), ((6097, 6121), 'numpy.nansum', 'np.nansum', (['lifetime_list'], {}), '(lifetime_list)\n', (6106, 6121), True, 'import numpy as np\n'), ((10581, 10607), 'numpy.nanmean', 'np.nanmean', (['diffusion_list'], {}), '(diffusion_list)\n', (10591, 10607), True, 'import numpy as np\n'), ((4290, 4314), 'numpy.isnan', 'np.isnan', (['diffusion_list'], {}), '(diffusion_list)\n', (4298, 4314), True, 'import numpy as np\n'), ((5944, 5967), 'numpy.isnan', 'np.isnan', (['lifetime_list'], {}), '(lifetime_list)\n', (5952, 5967), True, 'import numpy as np\n'), ((10517, 10541), 'numpy.isnan', 'np.isnan', (['diffusion_list'], {}), '(diffusion_list)\n', (10525, 10541), True, 'import numpy as np\n')] |
from itertools import product
from scipy.signal.windows import blackman
from scipy.signal import convolve2d
from scipy.stats import gaussian_kde
import numpy as np
from PIL import Image
import time
import psutil
def smoothen(data,width):
kernel = blackman(width)
kernel /= np.sum(kernel)
return np.convolve(data,kernel,mode="same")
def smoothen_image(image,width):
kernel = blackman(width)[:,None]*blackman(width)
kernel /= np.sum(kernel)
result = np.empty_like(image)
for i in range(3):
result[:,:,i] = convolve2d(image[:,:,i],kernel,mode="same")
return result
def expand_mask(mask,width):
"""
Expands `mask` (array of booleans) by setting all elments within a distance of `width` to a True element to True.
"""
centre = np.array([width,width])
coordinates = np.indices((2*width+1,2*width+1))
distances = np.linalg.norm(coordinates-centre[:,None,None],axis=0)
kernel = (distances <= width).astype(float)
return convolve2d(mask,kernel,mode="same").astype(bool)
def color_sum(data):
return np.sum(data, axis=-1)
def color_distance(data1,data2):
"""
Determines the L1 norm between the two input arrays in color space. The color axis must be the last axis.
"""
return np.sum(np.abs(np.subtract(data1,data2,dtype=float)),axis=-1)
class TestColorDistance(object):
def test_symmetry(self):
size = (200,100,3)
x = np.random.randint(0,256,size=size,dtype=np.uint8)
y = np.random.randint(0,256,size=size,dtype=np.uint8)
np.testing.assert_array_equal(
color_distance(x,y),
color_distance(y,x),
)
def test_shape(self):
size = (200,100,3)
x = np.random.randint(0,256,size=size,dtype=np.uint8)
y = np.random.randint(0,256,size=size,dtype=np.uint8)
np.testing.assert_array_equal(color_distance(x,y).shape,size[:-1])
def show_image(array,mode="RGB"):
img = Image.fromarray(array,mode)
img.show()
time.sleep(3)
for proc in psutil.process_iter():
if proc.name() == "display":
proc.kill()
def gaussian_profile(abscissae,position,width):
profile = np.exp(-((abscissae-position)/width)**2/2)
return profile/sum(profile)
def radial_profile(data,centre,smooth_width=0.5,npoints=100):
radii = np.hypot( *( np.indices(data.shape)-np.asarray(centre)[:,None,None] ) )
r_max = np.max(radii)
abscissae = np.linspace(0,r_max,npoints)
sums = np.zeros_like(abscissae)
normalisation = np.zeros_like(abscissae)
for i,radii_line in enumerate(radii):
for j,radius in enumerate(radii_line):
profile = gaussian_profile(abscissae,radius,smooth_width)
normalisation += profile
sums += profile*data[i,j]
return abscissae,sums/normalisation
def excentricity(data,centre,smooth_width=0.5,npoints=100):
abscissae,values = radial_profile(data,centre,smooth_width=2,npoints=100)
radii = np.hypot( *( np.indices(data.shape)-np.asarray(centre)[:,None,None] ) )
sumsq = 0
for i,line in enumerate(data):
for j,intensity in enumerate(line):
expected_intensity = np.interp(radii[i,j],abscissae,values)
sumsq += (intensity - expected_intensity)**2
return np.sqrt(sumsq)/data.size
def new_excentricity(data,centre,width=0.1,cutoff=5,npoints=10):
radii = np.hypot( *( np.indices(data.shape)-np.asarray(centre)[:,None,None] ) )
r_max = np.max(radii)
interval = [ cutoff, r_max-cutoff ]
abscissae = np.linspace(*interval,npoints)
mask = np.logical_and( interval[0]<radii, radii<interval[1] )
kernel = gaussian_kde( radii.flatten(), bw_method=width, weights=data.flatten() )
normalisation = gaussian_kde( radii.flatten(), bw_method=width )
values = kernel.evaluate(abscissae)/normalisation.evaluate(abscissae)*np.average(data)
expected_intensity = np.interp(radii[mask],abscissae,values)
return np.sqrt(np.sum((data[mask]-expected_intensity)**2))/data.size
def circle_mask(resolution, centre, width):
circle_mask = np.zeros(resolution, dtype=bool)
circle_mask[tuple(centre)] = True
circle_mask = expand_mask(circle_mask, width)
return circle_mask
| [
"numpy.sum",
"scipy.signal.windows.blackman",
"numpy.linalg.norm",
"numpy.exp",
"numpy.random.randint",
"numpy.interp",
"numpy.convolve",
"psutil.process_iter",
"numpy.zeros_like",
"scipy.signal.convolve2d",
"numpy.empty_like",
"numpy.max",
"numpy.linspace",
"numpy.average",
"numpy.asarr... | [((249, 264), 'scipy.signal.windows.blackman', 'blackman', (['width'], {}), '(width)\n', (257, 264), False, 'from scipy.signal.windows import blackman\n'), ((276, 290), 'numpy.sum', 'np.sum', (['kernel'], {}), '(kernel)\n', (282, 290), True, 'import numpy as np\n'), ((299, 337), 'numpy.convolve', 'np.convolve', (['data', 'kernel'], {'mode': '"""same"""'}), "(data, kernel, mode='same')\n", (310, 337), True, 'import numpy as np\n'), ((431, 445), 'numpy.sum', 'np.sum', (['kernel'], {}), '(kernel)\n', (437, 445), True, 'import numpy as np\n'), ((456, 476), 'numpy.empty_like', 'np.empty_like', (['image'], {}), '(image)\n', (469, 476), True, 'import numpy as np\n'), ((739, 763), 'numpy.array', 'np.array', (['[width, width]'], {}), '([width, width])\n', (747, 763), True, 'import numpy as np\n'), ((778, 820), 'numpy.indices', 'np.indices', (['(2 * width + 1, 2 * width + 1)'], {}), '((2 * width + 1, 2 * width + 1))\n', (788, 820), True, 'import numpy as np\n'), ((825, 884), 'numpy.linalg.norm', 'np.linalg.norm', (['(coordinates - centre[:, None, None])'], {'axis': '(0)'}), '(coordinates - centre[:, None, None], axis=0)\n', (839, 884), True, 'import numpy as np\n'), ((1012, 1033), 'numpy.sum', 'np.sum', (['data'], {'axis': '(-1)'}), '(data, axis=-1)\n', (1018, 1033), True, 'import numpy as np\n'), ((1803, 1831), 'PIL.Image.fromarray', 'Image.fromarray', (['array', 'mode'], {}), '(array, mode)\n', (1818, 1831), False, 'from PIL import Image\n'), ((1844, 1857), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1854, 1857), False, 'import time\n'), ((1871, 1892), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (1890, 1892), False, 'import psutil\n'), ((2000, 2050), 'numpy.exp', 'np.exp', (['(-((abscissae - position) / width) ** 2 / 2)'], {}), '(-((abscissae - position) / width) ** 2 / 2)\n', (2006, 2050), True, 'import numpy as np\n'), ((2225, 2238), 'numpy.max', 'np.max', (['radii'], {}), '(radii)\n', (2231, 2238), True, 'import numpy as np\n'), ((2252, 2282), 'numpy.linspace', 'np.linspace', (['(0)', 'r_max', 'npoints'], {}), '(0, r_max, npoints)\n', (2263, 2282), True, 'import numpy as np\n'), ((2289, 2313), 'numpy.zeros_like', 'np.zeros_like', (['abscissae'], {}), '(abscissae)\n', (2302, 2313), True, 'import numpy as np\n'), ((2331, 2355), 'numpy.zeros_like', 'np.zeros_like', (['abscissae'], {}), '(abscissae)\n', (2344, 2355), True, 'import numpy as np\n'), ((3191, 3204), 'numpy.max', 'np.max', (['radii'], {}), '(radii)\n', (3197, 3204), True, 'import numpy as np\n'), ((3255, 3286), 'numpy.linspace', 'np.linspace', (['*interval', 'npoints'], {}), '(*interval, npoints)\n', (3266, 3286), True, 'import numpy as np\n'), ((3294, 3350), 'numpy.logical_and', 'np.logical_and', (['(interval[0] < radii)', '(radii < interval[1])'], {}), '(interval[0] < radii, radii < interval[1])\n', (3308, 3350), True, 'import numpy as np\n'), ((3612, 3653), 'numpy.interp', 'np.interp', (['radii[mask]', 'abscissae', 'values'], {}), '(radii[mask], abscissae, values)\n', (3621, 3653), True, 'import numpy as np\n'), ((3785, 3817), 'numpy.zeros', 'np.zeros', (['resolution'], {'dtype': 'bool'}), '(resolution, dtype=bool)\n', (3793, 3817), True, 'import numpy as np\n'), ((404, 419), 'scipy.signal.windows.blackman', 'blackman', (['width'], {}), '(width)\n', (412, 419), False, 'from scipy.signal.windows import blackman\n'), ((515, 562), 'scipy.signal.convolve2d', 'convolve2d', (['image[:, :, i]', 'kernel'], {'mode': '"""same"""'}), "(image[:, :, i], kernel, mode='same')\n", (525, 562), False, 'from scipy.signal import convolve2d\n'), ((1341, 1393), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': 'size', 'dtype': 'np.uint8'}), '(0, 256, size=size, dtype=np.uint8)\n', (1358, 1393), True, 'import numpy as np\n'), ((1397, 1449), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': 'size', 'dtype': 'np.uint8'}), '(0, 256, size=size, dtype=np.uint8)\n', (1414, 1449), True, 'import numpy as np\n'), ((1586, 1638), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': 'size', 'dtype': 'np.uint8'}), '(0, 256, size=size, dtype=np.uint8)\n', (1603, 1638), True, 'import numpy as np\n'), ((1642, 1694), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': 'size', 'dtype': 'np.uint8'}), '(0, 256, size=size, dtype=np.uint8)\n', (1659, 1694), True, 'import numpy as np\n'), ((3010, 3024), 'numpy.sqrt', 'np.sqrt', (['sumsq'], {}), '(sumsq)\n', (3017, 3024), True, 'import numpy as np\n'), ((3571, 3587), 'numpy.average', 'np.average', (['data'], {}), '(data)\n', (3581, 3587), True, 'import numpy as np\n'), ((380, 395), 'scipy.signal.windows.blackman', 'blackman', (['width'], {}), '(width)\n', (388, 395), False, 'from scipy.signal.windows import blackman\n'), ((933, 970), 'scipy.signal.convolve2d', 'convolve2d', (['mask', 'kernel'], {'mode': '"""same"""'}), "(mask, kernel, mode='same')\n", (943, 970), False, 'from scipy.signal import convolve2d\n'), ((1207, 1245), 'numpy.subtract', 'np.subtract', (['data1', 'data2'], {'dtype': 'float'}), '(data1, data2, dtype=float)\n', (1218, 1245), True, 'import numpy as np\n'), ((2915, 2956), 'numpy.interp', 'np.interp', (['radii[i, j]', 'abscissae', 'values'], {}), '(radii[i, j], abscissae, values)\n', (2924, 2956), True, 'import numpy as np\n'), ((3668, 3714), 'numpy.sum', 'np.sum', (['((data[mask] - expected_intensity) ** 2)'], {}), '((data[mask] - expected_intensity) ** 2)\n', (3674, 3714), True, 'import numpy as np\n'), ((2157, 2179), 'numpy.indices', 'np.indices', (['data.shape'], {}), '(data.shape)\n', (2167, 2179), True, 'import numpy as np\n'), ((2749, 2771), 'numpy.indices', 'np.indices', (['data.shape'], {}), '(data.shape)\n', (2759, 2771), True, 'import numpy as np\n'), ((3123, 3145), 'numpy.indices', 'np.indices', (['data.shape'], {}), '(data.shape)\n', (3133, 3145), True, 'import numpy as np\n'), ((2180, 2198), 'numpy.asarray', 'np.asarray', (['centre'], {}), '(centre)\n', (2190, 2198), True, 'import numpy as np\n'), ((2772, 2790), 'numpy.asarray', 'np.asarray', (['centre'], {}), '(centre)\n', (2782, 2790), True, 'import numpy as np\n'), ((3146, 3164), 'numpy.asarray', 'np.asarray', (['centre'], {}), '(centre)\n', (3156, 3164), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
from collections import OrderedDict
import numpy as np
class Region(metaclass=abc.ABCMeta):
"""
Base class for regions.
Parameters
----------
rid : int or str
region ID
coordinate_frame : `~gwcs.coordinate_frames.CoordinateFrame`
Coordinate frame in which the region is defined.
"""
def __init__(self, rid, coordinate_frame):
self._coordinate_system = coordinate_frame
self._rid = rid
@abc.abstractmethod
def __contains__(self, x, y):
"""
Determines if a pixel is within a region.
Parameters
----------
x, y : float
x , y values of a pixel
Returns
-------
True or False
Subclasses must define this method.
"""
def scan(self, mask):
"""
Sets mask values to region id for all pixels within the region.
Subclasses must define this method.
Parameters
----------
mask : ndarray
An array with the shape of the mask to be uised in `~gwcs.selector.RegionsSelector`.
Returns
-------
mask : ndarray
An array where the value of the elements is the region ID.
Pixels which are not included in any region are marked with 0 or "".
"""
class Polygon(Region):
"""
Represents a 2D polygon region with multiple vertices.
Parameters
----------
rid : str
polygon id
vertices : list of (x,y) tuples or lists
The list is ordered in such a way that when traversed in a
counterclockwise direction, the enclosed area is the polygon.
The last vertex must coincide with the first vertex, minimum
4 vertices are needed to define a triangle
coord_frame : str or `~gwcs.coordinate_frames.CoordinateFrame`
Coordinate frame in which the polygon is defined.
"""
def __init__(self, rid, vertices, coord_frame="detector"):
assert len(vertices) >= 4, ("Expected vertices to be "
"a list of minimum 4 tuples (x,y)")
super(Polygon, self).__init__(rid, coord_frame)
self._vertices = np.asarray(vertices)
self._bbox = self._get_bounding_box()
self._scan_line_range = list(range(self._bbox[1], self._bbox[3] + self._bbox[1] + 1))
# constructs a Global Edge Table (GET) in bbox coordinates
self._GET = self._construct_ordered_GET()
def _get_bounding_box(self):
x = self._vertices[:, 0].min()
y = self._vertices[:, 1].min()
w = self._vertices[:, 0].max() - x
h = self._vertices[:, 1].max() - y
return (x, y, w, h)
def _construct_ordered_GET(self):
"""
Construct a Global Edge Table (GET)
The GET is an OrderedDict. Keys are scan line numbers,
ordered from bbox.ymin to bbox.ymax, where bbox is the
bounding box of the polygon.
Values are lists of edges for which edge.ymin==scan_line_number.
Returns
-------
GET: OrderedDict
{scan_line: [edge1, edge2]}
"""
# edges is a list of Edge objects which define a polygon
# with these vertices
edges = self.get_edges()
GET = OrderedDict.fromkeys(self._scan_line_range)
ymin = np.asarray([e._ymin for e in edges])
for i in self._scan_line_range:
ymin_ind = (ymin == i).nonzero()[0]
if ymin_ind.any():
GET[i] = [edges[ymin_ind[0]]]
for j in ymin_ind[1:]:
GET[i].append(edges[j])
return GET
def get_edges(self):
"""
Create a list of Edge objects from vertices
"""
return [Edge(name='E{}'.format(i - 1), start=self._vertices[i - 1], stop=self._vertices[i])
for i in range(1, len(self._vertices))
]
def scan(self, data):
"""
This is the main function which scans the polygon and creates the mask
Parameters
----------
data : array
the mask array
it has all zeros initially, elements within a region are set to
the region's ID
Algorithm:
- Set the Global Edge Table (GET)
- Set y to be the smallest y coordinate that has an entry in GET
- Initialize the Active Edge Table (AET) to be empty
- For each scan line:
1. Add edges from GET to AET for which ymin==y
2. Remove edges from AET fro which ymax==y
3. Compute the intersection of the current scan line with all edges in the AET
4. Sort on X of intersection point
5. Set elements between pairs of X in the AET to the Edge's ID
"""
# TODO: 1.This algorithm does not mark pixels in the top row and left most column.
# Pad the initial pixel description on top and left with 1 px to prevent this.
# 2. Currently it uses intersection of the scan line with edges. If this is
# too slow it should use the 1/m increment (replace 3 above) (or the increment
# should be removed from the GET entry).
y = np.min(list(self._GET.keys()))
AET = []
scline = self._scan_line_range[-1]
while y <= scline:
AET = self.update_AET(y, AET)
scan_line = Edge('scan_line', start=[self._bbox[0], y],
stop=[self._bbox[0] + self._bbox[2], y])
x = [np.ceil(e.compute_AET_entry(scan_line)[1]) for e in AET if e is not None]
xnew = np.asarray(np.sort(x), dtype=np.int)
for i, j in zip(xnew[::2], xnew[1::2]):
data[y][i:j + 1] = self._rid
y = y + 1
return data
def update_AET(self, y, AET):
"""
Update the Active Edge Table (AET)
Add edges from GET to AET for which ymin of the edge is
equal to the y of the scan line.
Remove edges from AET for which ymax of the edge is
equal to y of the scan line.
"""
edge_cont = self._GET[y]
if edge_cont is not None:
for edge in edge_cont:
if edge._start[1] != edge._stop[1] and edge._ymin == y:
AET.append(edge)
for edge in AET[::-1]:
if edge is not None:
if edge._ymax == y:
AET.remove(edge)
return AET
def __contains__(self, px):
"""even-odd algorithm or smth else better sould be used"""
return px[0] >= self._bbox[0] and px[0] <= self._bbox[0] + self._bbox[2] and \
px[1] >= self._bbox[1] and px[1] <= self._bbox[1] + self._bbox[3]
class Edge:
"""
Edge representation.
An edge has a "start" and "stop" (x,y) vertices and an entry in the
GET table of a polygon. The GET entry is a list of these values:
[ymax, x_at_ymin, delta_x/delta_y]
"""
def __init__(self, name=None, start=None, stop=None, next=None):
self._start = None
if start is not None:
self._start = np.asarray(start)
self._name = name
self._stop = stop
if stop is not None:
self._stop = np.asarray(stop)
self._next = next
if self._stop is not None and self._start is not None:
if self._start[1] < self._stop[1]:
self._ymin = self._start[1]
self._yminx = self._start[0]
else:
self._ymin = self._stop[1]
self._yminx = self._stop[0]
self._ymax = max(self._start[1], self._stop[1])
self._xmin = min(self._start[0], self._stop[0])
self._xmax = max(self._start[0], self._stop[1])
else:
self._ymin = None
self._yminx = None
self._ymax = None
self._xmin = None
self._xmax = None
self.GET_entry = self.compute_GET_entry()
@property
def ymin(self):
return self._ymin
@property
def start(self):
return self._start
@property
def stop(self):
return self._stop
@property
def ymax(self):
return self._ymax
def compute_GET_entry(self):
"""
Compute the entry in the Global Edge Table
[ymax, x@ymin, 1/m]
"""
if self._start is None:
entry = None
else:
earr = np.asarray([self._start, self._stop])
if np.diff(earr[:, 1]).item() == 0:
return None
else:
entry = [self._ymax, self._yminx,
(np.diff(earr[:, 0]) / np.diff(earr[:, 1])).item(), None]
return entry
def compute_AET_entry(self, edge):
"""
Compute the entry for an edge in the current Active Edge Table
[ymax, x_intersect, 1/m]
note: currently 1/m is not used
"""
x = self.intersection(edge)[0]
return [self._ymax, x, self.GET_entry[2]]
def __repr__(self):
fmt = ""
if self._name is not None:
fmt += self._name
next = self.next
while next is not None:
fmt += "-->"
fmt += next._name
next = next.next
return fmt
@property
def next(self):
return self._next
@next.setter
def next(self, edge):
if self._name is None:
self._name = edge._name
self._stop = edge._stop
self._start = edge._start
self._next = edge.next
else:
self._next = edge
def intersection(self, edge):
u = self._stop - self._start
v = edge._stop - edge._start
w = self._start - edge._start
D = np.cross(u, v)
return np.cross(v, w) / D * u + self._start
def is_parallel(self, edge):
u = self._stop - self._start
v = edge._stop - edge._start
if np.cross(u, v):
return False
else:
return True
| [
"numpy.asarray",
"numpy.cross",
"collections.OrderedDict.fromkeys",
"numpy.sort",
"numpy.diff"
] | [((2271, 2291), 'numpy.asarray', 'np.asarray', (['vertices'], {}), '(vertices)\n', (2281, 2291), True, 'import numpy as np\n'), ((3360, 3403), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['self._scan_line_range'], {}), '(self._scan_line_range)\n', (3380, 3403), False, 'from collections import OrderedDict\n'), ((3419, 3455), 'numpy.asarray', 'np.asarray', (['[e._ymin for e in edges]'], {}), '([e._ymin for e in edges])\n', (3429, 3455), True, 'import numpy as np\n'), ((9884, 9898), 'numpy.cross', 'np.cross', (['u', 'v'], {}), '(u, v)\n', (9892, 9898), True, 'import numpy as np\n'), ((10070, 10084), 'numpy.cross', 'np.cross', (['u', 'v'], {}), '(u, v)\n', (10078, 10084), True, 'import numpy as np\n'), ((7187, 7204), 'numpy.asarray', 'np.asarray', (['start'], {}), '(start)\n', (7197, 7204), True, 'import numpy as np\n'), ((7311, 7327), 'numpy.asarray', 'np.asarray', (['stop'], {}), '(stop)\n', (7321, 7327), True, 'import numpy as np\n'), ((8529, 8566), 'numpy.asarray', 'np.asarray', (['[self._start, self._stop]'], {}), '([self._start, self._stop])\n', (8539, 8566), True, 'import numpy as np\n'), ((5692, 5702), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (5699, 5702), True, 'import numpy as np\n'), ((9914, 9928), 'numpy.cross', 'np.cross', (['v', 'w'], {}), '(v, w)\n', (9922, 9928), True, 'import numpy as np\n'), ((8582, 8601), 'numpy.diff', 'np.diff', (['earr[:, 1]'], {}), '(earr[:, 1])\n', (8589, 8601), True, 'import numpy as np\n'), ((8737, 8756), 'numpy.diff', 'np.diff', (['earr[:, 0]'], {}), '(earr[:, 0])\n', (8744, 8756), True, 'import numpy as np\n'), ((8759, 8778), 'numpy.diff', 'np.diff', (['earr[:, 1]'], {}), '(earr[:, 1])\n', (8766, 8778), True, 'import numpy as np\n')] |
__author__ = 'marko'
import numpy as np
from random import randint
from skimage.feature import hessian_matrix
from skimage.morphology import disk
from skimage.filters.rank import entropy
from preprocess import Preprocess
import cv2
class ImageSample(object):
'''Image wrapper class that is used for samples extraction from images'''
def __init__(self, img=None, path=None, block_size=5):
if path and not img:
img = cv2.imread(path)
#img = Preprocess().blur_image(img)
self.block_size = block_size
self.img_rgb = img.copy()
self.img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
#self.img_ycbcr = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
self.img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
self.height, self.width, _ = img.shape
self.Hxx, self.Hxy, self.Hyy = hessian_matrix(self.img_gray)
#vector, self.hog = hog(self.img_gray, orientations=8, pixels_per_cell=(3, 3),
# cells_per_block=(1, 1), visualise=True)
neighours = disk(25)
self.entropy = entropy(self.img_gray, neighours)
def to_samples(self, n='all'):
"""
Extract samples from image.
:param n: number of samples per image
:return: list of samples
"""
samples = []
add_sample = lambda fts: samples.append(fts) if fts else None
if n == 'all':
for i in xrange(self.height):
for j in xrange(self.width):
add_sample(self._get_features(i, j))
else:
cnt = 0
while len(samples) < n:
i, j = randint(0, self.height-1), randint(0, self.width-1)
add_sample(self._get_features(i, j))
return samples
def _get_features(self, i, j):
"""
Calculates vector of features for pixel given with i,j coordinate
:param i:
:param j:
:return:
"""
features = []
margin = self.block_size/2
for ii in xrange(i-margin, i+margin):
for jj in xrange(j-margin, j+margin):
if ii < 0 or jj < 0 or ii >= self.height or jj >= self.width:
return None
else:
features.extend(self.img_rgb[ii, jj])
features.extend(self.img_hsv[ii, jj])
#features.extend(self.img_ycbcr[ii, jj])
features.append(self.Hxx[ii, jj])
features.append(self.Hxy[ii, jj])
features.append(self.Hyy[ii, jj])
features.append(self.entropy[ii, jj])
#features.append(self.hog[ii, jj])
return features
# def find_vegetation(self, classifier):
# """
# Extract vegetation mask from image using given classifier
# :param classifier:
# :return:
# """
# result = np.zeros((self.height, self.width))
# samples = []
# ij = []
#
# for i in xrange(self.height):
# for j in xrange(self.width):
# fts = self._get_features(i, j)
# if fts:
# samples.append(fts)
# ij.append((i, j))
#
# classified = classifier.predict(samples)
#
# for point, label in zip(ij, classified):
# result[point] = label
#
# return result
def find_vegetation(self, classifier):
"""
Extract vegetation mask from image using given classifier
:param classifier:
:return:
"""
result = np.zeros((self.height, self.width))
samples = []
ij = []
limit = 20000
for i in xrange(self.height):
for j in xrange(self.width):
fts = self._get_features(i, j)
if fts:
samples.append(fts)
ij.append((i, j))
if len(samples) > limit:
classified = classifier.predict(samples)
for point, label in zip(ij, classified):
result[point] = label
samples = []
ij = []
classified = classifier.predict(samples)
for point, label in zip(ij, classified):
result[point] = label
samples = []
ij = []
return result
def find_vegetation_slow(self, classifier):
"""
Extract vegetation mask from image using given classifier
:param classifier:
:return:
"""
result = np.zeros((self.height, self.width))
for i in xrange(self.height):
for j in xrange(self.width):
fts = self._get_features(i, j)
if fts:
result[i, j] = classifier.predict(fts)
return result
| [
"random.randint",
"cv2.cvtColor",
"numpy.zeros",
"skimage.morphology.disk",
"cv2.imread",
"skimage.feature.hessian_matrix",
"skimage.filters.rank.entropy"
] | [((602, 638), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HSV'], {}), '(img, cv2.COLOR_RGB2HSV)\n', (614, 638), False, 'import cv2\n'), ((728, 765), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (740, 765), False, 'import cv2\n'), ((852, 881), 'skimage.feature.hessian_matrix', 'hessian_matrix', (['self.img_gray'], {}), '(self.img_gray)\n', (866, 881), False, 'from skimage.feature import hessian_matrix\n'), ((1051, 1059), 'skimage.morphology.disk', 'disk', (['(25)'], {}), '(25)\n', (1055, 1059), False, 'from skimage.morphology import disk\n'), ((1083, 1116), 'skimage.filters.rank.entropy', 'entropy', (['self.img_gray', 'neighours'], {}), '(self.img_gray, neighours)\n', (1090, 1116), False, 'from skimage.filters.rank import entropy\n'), ((3631, 3666), 'numpy.zeros', 'np.zeros', (['(self.height, self.width)'], {}), '((self.height, self.width))\n', (3639, 3666), True, 'import numpy as np\n'), ((4620, 4655), 'numpy.zeros', 'np.zeros', (['(self.height, self.width)'], {}), '((self.height, self.width))\n', (4628, 4655), True, 'import numpy as np\n'), ((446, 462), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (456, 462), False, 'import cv2\n'), ((1644, 1671), 'random.randint', 'randint', (['(0)', '(self.height - 1)'], {}), '(0, self.height - 1)\n', (1651, 1671), False, 'from random import randint\n'), ((1671, 1697), 'random.randint', 'randint', (['(0)', '(self.width - 1)'], {}), '(0, self.width - 1)\n', (1678, 1697), False, 'from random import randint\n')] |
# coding: utf-8
import os
import cv2
import numpy as np
from tqdm import tqdm
import dlib
from config import IMG_SIZE
from models.mobile_net import MobileNetDeepEstimator
from preprocessor import preprocess_input
detector = dlib.get_frontal_face_detector()
def preprocess(image_arr):
data = preprocess_input(image_arr)
return data
def detect_faces(img):
input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
detected = detector(input_img, 1)
faces = np.empty((len(detected), IMG_SIZE, IMG_SIZE, 3))
img_h, img_w, _ = np.shape(input_img)
for i, d in tqdm(enumerate(detected)):
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, d.bottom() + 1, d.width(), d.height()
xw1 = max(int(x1 - 0.4 * w), 0)
yw1 = max(int(y1 - 0.4 * h), 0)
xw2 = min(int(x2 + 0.4 * w), img_w - 1)
yw2 = min(int(y2 + 0.4 * h), img_h - 1)
cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
faces[i, :, :, :] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (IMG_SIZE, IMG_SIZE))
return faces
model = MobileNetDeepEstimator(IMG_SIZE, 1, 21, weights=None)()
model.load_weights(os.path.join("checkpoints", "weights.62-1.97.hdf5"))
base_path = "test"
with open('prediction.csv', 'w') as f:
f.write('Age, Gender')
with open("prediction.csv", "a") as f:
for _, _, imgs in os.walk(base_path):
for im in tqdm(imgs):
img = cv2.imread(os.path.join(base_path, im))
img_data = detect_faces(img)
img_data = preprocess(img_data)
results = model.predict(img_data)
predicted_gender = results[0]
ages = np.arange(0, 21).reshape(21, 1)
predicted_age = results[1].dot(ages).flatten()
res = '{},{}\n'.format(im,
int(predicted_age[0]*4.76),
predicted_gender[0])
print(res)
f.write(res)
| [
"tqdm.tqdm",
"models.mobile_net.MobileNetDeepEstimator",
"cv2.cvtColor",
"os.walk",
"numpy.shape",
"numpy.arange",
"dlib.get_frontal_face_detector",
"cv2.rectangle",
"preprocessor.preprocess_input",
"os.path.join",
"cv2.resize"
] | [((229, 261), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (259, 261), False, 'import dlib\n'), ((302, 329), 'preprocessor.preprocess_input', 'preprocess_input', (['image_arr'], {}), '(image_arr)\n', (318, 329), False, 'from preprocessor import preprocess_input\n'), ((387, 423), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (399, 423), False, 'import cv2\n'), ((545, 564), 'numpy.shape', 'np.shape', (['input_img'], {}), '(input_img)\n', (553, 564), True, 'import numpy as np\n'), ((1074, 1127), 'models.mobile_net.MobileNetDeepEstimator', 'MobileNetDeepEstimator', (['IMG_SIZE', '(1)', '(21)'], {'weights': 'None'}), '(IMG_SIZE, 1, 21, weights=None)\n', (1096, 1127), False, 'from models.mobile_net import MobileNetDeepEstimator\n'), ((1149, 1200), 'os.path.join', 'os.path.join', (['"""checkpoints"""', '"""weights.62-1.97.hdf5"""'], {}), "('checkpoints', 'weights.62-1.97.hdf5')\n", (1161, 1200), False, 'import os\n'), ((1351, 1369), 'os.walk', 'os.walk', (['base_path'], {}), '(base_path)\n', (1358, 1369), False, 'import os\n'), ((896, 950), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)', '(255, 0, 0)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (255, 0, 0), 2)\n', (909, 950), False, 'import cv2\n'), ((979, 1045), 'cv2.resize', 'cv2.resize', (['img[yw1:yw2 + 1, xw1:xw2 + 1, :]', '(IMG_SIZE, IMG_SIZE)'], {}), '(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (IMG_SIZE, IMG_SIZE))\n', (989, 1045), False, 'import cv2\n'), ((1389, 1399), 'tqdm.tqdm', 'tqdm', (['imgs'], {}), '(imgs)\n', (1393, 1399), False, 'from tqdm import tqdm\n'), ((1430, 1457), 'os.path.join', 'os.path.join', (['base_path', 'im'], {}), '(base_path, im)\n', (1442, 1457), False, 'import os\n'), ((1653, 1669), 'numpy.arange', 'np.arange', (['(0)', '(21)'], {}), '(0, 21)\n', (1662, 1669), True, 'import numpy as np\n')] |
import numpy as np
from envs.mpe.core import World, Agent, Landmark
from envs.mpe.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self, args, now_agent_num=None):
world = World()
# set any world properties first
world.dim_c = 3
num_landmarks = 3
world.collaborative = True
# add agents
num_agents = 2
assert num_agents==2, ("only 2 agents is supported, check the config.py.")
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = False
agent.size = 0.075
# speaker
world.agents[0].movable = False
# listener
world.agents[1].silent = True
# add landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = 0.04
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# assign goals to agents
for agent in world.agents:
agent.goal_a = None
agent.goal_b = None
# want listener to go to the goal landmark
world.agents[0].goal_a = world.agents[1]
world.agents[0].goal_b = np.random.choice(world.landmarks)
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.25,0.25,0.25])
# random properties for landmarks
world.landmarks[0].color = np.array([0.65,0.15,0.15])
world.landmarks[1].color = np.array([0.15,0.65,0.15])
world.landmarks[2].color = np.array([0.15,0.15,0.65])
# special colors for goals
world.agents[0].goal_a.color = world.agents[0].goal_b.color + np.array([0.45, 0.45, 0.45])
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1,+1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-1,+1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
# returns data for benchmarking purposes
return reward(agent, reward)
def reward(self, agent, world):
# squared distance from listener to landmark
reward = 0
a = world.agents[0]
dist2 = np.sum(np.square(a.goal_a.state.p_pos - a.goal_b.state.p_pos))
# sparse reward
cover_num = 0
if dist2 < world.agents[0].size + world.landmarks[0].size:
cover_num += 1
if cover_num == 1:
reward += 1
return 0.1 * reward
def share_reward(self, world):
return 0.0
def landmark_cover_state(self, world):
return None
def get_state(self, world):
# get all states
agent_pos = []
for agent in world.agents: # world.agents:
agent_pos.append(agent.state.p_pos)
agent_com = []
for agent in world.agents:
agent_com.append(agent.state.c)
entity_pos = []
for entity in world.landmarks: # world.entities:
entity_pos.append(entity.state.p_pos)
# get true goal states
goal_states = world.agents[0].goal_b.state.p_pos
return np.concatenate(agent_pos + agent_com + entity_pos), goal_states
def get_info(self, world):
num = 0
success = False
entity_cover_state = []
infos = {}
# cover
a = world.agents[0]
dist2 = np.sum(np.square(a.goal_a.state.p_pos - a.goal_b.state.p_pos))
num = 0.0
if dist2 < world.agents[0].size + world.landmarks[0].size:
num += 1.0
success = False
if num == 1:
success = True
# position info
pos_info = []
for agent in world.agents:
pos_info.append(agent.state.p_pos)
for landmark in world.landmarks:
pos_info.append(landmark.state.p_pos)
info_list = {'cover_rate': num, 'success': success, 'pos_state': np.array(pos_info)}
return info_list
def observation(self, agent, world):
# goal color
goal_color = np.zeros(world.dim_color)
if agent.goal_b is not None:
goal_color = agent.goal_b.color
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# communication of all other agents
comm = []
for other in world.agents:
if other is agent or (other.state.c is None): continue
comm.append(other.state.c)
# speaker
if not agent.movable:
return np.concatenate([goal_color])
# listener
if agent.silent:
return np.concatenate([agent.state.p_vel] + entity_pos + comm)
| [
"numpy.random.uniform",
"envs.mpe.core.Agent",
"numpy.square",
"numpy.zeros",
"envs.mpe.core.World",
"numpy.array",
"numpy.random.choice",
"numpy.concatenate",
"envs.mpe.core.Landmark"
] | [((210, 217), 'envs.mpe.core.World', 'World', ([], {}), '()\n', (215, 217), False, 'from envs.mpe.core import World, Agent, Landmark\n'), ((1488, 1521), 'numpy.random.choice', 'np.random.choice', (['world.landmarks'], {}), '(world.landmarks)\n', (1504, 1521), True, 'import numpy as np\n'), ((1755, 1783), 'numpy.array', 'np.array', (['[0.65, 0.15, 0.15]'], {}), '([0.65, 0.15, 0.15])\n', (1763, 1783), True, 'import numpy as np\n'), ((1817, 1845), 'numpy.array', 'np.array', (['[0.15, 0.65, 0.15]'], {}), '([0.15, 0.65, 0.15])\n', (1825, 1845), True, 'import numpy as np\n'), ((1879, 1907), 'numpy.array', 'np.array', (['[0.15, 0.15, 0.65]'], {}), '([0.15, 0.15, 0.65])\n', (1887, 1907), True, 'import numpy as np\n'), ((4621, 4646), 'numpy.zeros', 'np.zeros', (['world.dim_color'], {}), '(world.dim_color)\n', (4629, 4646), True, 'import numpy as np\n'), ((495, 502), 'envs.mpe.core.Agent', 'Agent', ([], {}), '()\n', (500, 502), False, 'from envs.mpe.core import World, Agent, Landmark\n'), ((851, 861), 'envs.mpe.core.Landmark', 'Landmark', ([], {}), '()\n', (859, 861), False, 'from envs.mpe.core import World, Agent, Landmark\n'), ((1636, 1664), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (1644, 1664), True, 'import numpy as np\n'), ((2011, 2039), 'numpy.array', 'np.array', (['[0.45, 0.45, 0.45]'], {}), '([0.45, 0.45, 0.45])\n', (2019, 2039), True, 'import numpy as np\n'), ((2143, 2181), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(+1)', 'world.dim_p'], {}), '(-1, +1, world.dim_p)\n', (2160, 2181), True, 'import numpy as np\n'), ((2213, 2234), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (2221, 2234), True, 'import numpy as np\n'), ((2263, 2284), 'numpy.zeros', 'np.zeros', (['world.dim_c'], {}), '(world.dim_c)\n', (2271, 2284), True, 'import numpy as np\n'), ((2375, 2413), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(+1)', 'world.dim_p'], {}), '(-1, +1, world.dim_p)\n', (2392, 2413), True, 'import numpy as np\n'), ((2448, 2469), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (2456, 2469), True, 'import numpy as np\n'), ((2761, 2815), 'numpy.square', 'np.square', (['(a.goal_a.state.p_pos - a.goal_b.state.p_pos)'], {}), '(a.goal_a.state.p_pos - a.goal_b.state.p_pos)\n', (2770, 2815), True, 'import numpy as np\n'), ((3692, 3742), 'numpy.concatenate', 'np.concatenate', (['(agent_pos + agent_com + entity_pos)'], {}), '(agent_pos + agent_com + entity_pos)\n', (3706, 3742), True, 'import numpy as np\n'), ((3955, 4009), 'numpy.square', 'np.square', (['(a.goal_a.state.p_pos - a.goal_b.state.p_pos)'], {}), '(a.goal_a.state.p_pos - a.goal_b.state.p_pos)\n', (3964, 4009), True, 'import numpy as np\n'), ((4492, 4510), 'numpy.array', 'np.array', (['pos_info'], {}), '(pos_info)\n', (4500, 4510), True, 'import numpy as np\n'), ((5214, 5242), 'numpy.concatenate', 'np.concatenate', (['[goal_color]'], {}), '([goal_color])\n', (5228, 5242), True, 'import numpy as np\n'), ((5306, 5361), 'numpy.concatenate', 'np.concatenate', (['([agent.state.p_vel] + entity_pos + comm)'], {}), '([agent.state.p_vel] + entity_pos + comm)\n', (5320, 5361), True, 'import numpy as np\n')] |
import pandas as pd
import seaborn as sn
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
##read in the file
file_name="train.csv"
titanic=pd.read_csv(file_name,header=0,sep=",")
###split data
X=titanic.drop("Survived",axis=1)
y=titanic["Survived"]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2, random_state=42) #80% of training, 20% test
###inspect data
X_train.head()
titanic_edl=pd.concat([X_train, y_train], axis=1)
titanic_edl.head()
titanic_edl=pd.concat([X_train, y_train], axis=1)
titanic_edl.head()
### inspect survival
titanic_edl[["Survived","PassengerId"]].groupby("Survived").nunique()
#### create 3 colmns : one for total family members and one for titles (embeded in names)
###one for cabins
titanic_edl['Title'] = titanic_edl['Name'].map(lambda x: x.split(', ')[1].split('.')[0])
titanic_edl['Title'].unique()
titanic_edl["Family_size"]=titanic_edl["SibSp"] + titanic_edl["Parch"]
titanic_edl["Level_cabin"]=titanic_edl["Cabin"].str[0]
titanic_edl["Level_cabin"][titanic_edl.Level_cabin.isna()]="No Cabin"
###percentage of survived customers from 1st class
first_class=titanic_edl["Pclass"]==1
survived_1=titanic_edl[["Pclass","Survived","PassengerId"]].loc[first_class].groupby("Survived").nunique()["PassengerId"]
survived_1_total=titanic_edl[["Pclass","Survived","PassengerId"]].loc[first_class].nunique()["PassengerId"]
prop_survived_1=survived_1/survived_1_total*100
print(prop_survived_1)
#####bar plot showing proportion of males to females over all classes
titanic_edl[["Pclass","Sex","PassengerId"]].groupby(["Pclass","Sex"]).count().plot.bar()
male_female=titanic_edl[["Pclass","Sex","PassengerId"]].groupby(["Pclass","Sex"]).count()
male_female.reset_index(inplace=True)
###plot with seaborn males/females
plt.figure(figsize=(5,5))
sn.barplot(x="Pclass",y="PassengerId",hue="Sex",data=male_female)
plt.xlabel("Class")
plt.ylabel("Number of passengers")
plt.title("Gender distribution")
plt.show()
###females outnumber males in all classes, except the 3ed class, where males are more
#than double the number of females
####survival of females and males in all classes
grouped_=titanic_edl[["Pclass","Sex","PassengerId","Survived"]].groupby(["Pclass","Sex","Survived"]).count()
grouped_.reset_index(inplace=True)
###find distribution of total survived? total non survived over class and sex
grouped_["SV_NSV"]=grouped_[["Survived","PassengerId"]].groupby("Survived").transform("sum")
grouped_["prop_surv"]=grouped_["PassengerId"]/grouped_["SV_NSV"]*100
grouped_.head()
#### plot survival based on sex over all classes, sum of probability of survival is 1
plt.figure(figsize=(5,5))
sn.barplot(x="Pclass",y="prop_surv",hue="Sex",data=grouped_[grouped_["Survived"]==1])
plt.xlabel("Class")
plt.ylabel("Survival rate")
plt.title("Survival rate based on classe for all genders)")
plt.show()
###when it comes to survival rates: females again have a higher survival rate than men,
# even in the 3ed class (by 5%), where men outnumber women
#women survival is actually almost more tahn double that of men
#### calculate how many % of women actually survived vs men
grouped_["Total_gender"]=grouped_[["Sex","PassengerId"]].groupby("Sex").transform("sum")
grouped_["Surv_sex"]=grouped_["PassengerId"]/grouped_["Total_gender"]*100
###plot survival rates based on gender
plt.figure(figsize=(5,5))
sn.boxplot(x="Sex",y="Surv_sex",hue="Survived", data=grouped_)
plt.xlabel("Sex")
plt.ylabel("Survival rate")
plt.title("Survival distribution based on gender (over all classes)")
plt.show()
#### age and fare
titanic_edl[["Survived","Age"]].groupby(["Survived"]).mean()
### on average the lower the age the higher the survival chances were (28.4)
titanic_edl[["Survived","Age","Sex"]].groupby(["Survived","Sex"]).mean().unstack().plot.bar()
plt.title("Age distribution per gender and survival")
plt.show()
#### overall age mean for surviving women passangers was higher than that of surviving male passangers,
# but also higher than that of non surviving females (which is strange ).
# basically age for women is directly prop to survival rate . for men the distribution is
# as expected --> namly older men died whilst younger survived
titanic_edl[["Survived","Age","Pclass"]].groupby(["Survived","Pclass"]).mean().unstack().plot.bar()
plt.ylabel("Age")
plt.xlabel("Survived, Class")
plt.title("Survived per age and class")
plt.show()
### this looks a bit more "normal": the survival age increases by class and is usually lower than the age on non survival
####let"s look at the age distribution for each passenger class
#We can set dodge as True in the swarmplot to split the distributions
fig, ax = plt.subplots(figsize=(12,6), nrows=1, ncols=3)
plt.suptitle('Age Distribution of Survivor state by Gender in Each Class', fontsize=16)
for i in range(3):
ax[i].set_title('Class {}'.format(i+1))
ax[i].set_ylim(-5,85)
sn.boxplot(data=titanic_edl[titanic_edl['Pclass']==i+1],
x='Survived',
y='Age',
hue='Sex',
hue_order=['male','female'],
dodge=True,
ax=ax[i])
ax[1].set_ylabel(None)
ax[2].set_ylabel(None)
ax[0].legend_.remove()
ax[1].legend_.remove()
##lets look at prices
titanic_edl[["Survived","Fare"]].groupby(["Survived"]).mean()
titanic_edl[["Survived","Fare","Sex"]].groupby(["Survived","Sex"]).mean().unstack().plot.bar()
plt.ylabel("Fare Prices")
plt.title("Average Fare price per gender and survival")
plt.show()
### fare prices for females who survived, where higher than those of men who survived
### fare price seems to be correlated to more than just class,
# # since females are less in absolute numbers, but whit higher fare rates
# # men in fisrt class are more than double than women in fisrt class
titanic_edl[["Survived","Fare","Sex","Pclass"]].groupby(["Survived","Sex","Pclass"]).mean().unstack().plot.bar(legend=False)
plt.ylabel("Fare")
plt.title("Fare Price distributed across survival state, per gender and class")
## men the ones that survive consitantly outpay the ones that don"t
##Let"s look at the distribution of Fare prices accross classes and genders
fig, ax = plt.subplots(figsize=(12,6), nrows=1, ncols=3)
plt.suptitle('Fare Price Distribution of Survivor state by Gender in Each Class', fontsize=16)
for i in range(3):
ax[i].set_title('Class {}'.format(i+1))
ax[i].set_ylim(-5,260)
sn.boxplot(data=titanic_edl[titanic_edl['Pclass']==i+1],
x='Survived',
y='Fare',
hue='Sex',
hue_order=['male','female'],
dodge=True,
ax=ax[i])
ax[1].set_ylabel(None)
ax[2].set_ylabel(None)
ax[0].legend_.remove()
ax[1].legend_.remove()
##lets look at prices
titanic_edl[["Survived","Fare"]].groupby(["Survived"]).mean()
titanic_edl[["Survived","Fare","Sex"]].groupby(["Survived","Sex"]).mean().unstack().plot.bar()
plt.ylabel("Fare Prices")
plt.title("Average Fare price per gender and survival")
plt.show()
### let"s see the connection amongst sex and sib/spouses and fare and sib/spuses
titanic_edl[["Survived","SibSp","PassengerId"]].groupby(["Survived","SibSp"]).count().unstack().plot.bar()
###survival with up to 4 siblings/spuses (small families)
#the most who survived where alone
###survival with up to 4 siblings/spuses (small families)
titanic_edl[["Survived","SibSp","Sex"]].groupby(["Survived","Sex"]).count()
titanic_edl[["Survived","SibSp","Sex","Fare"]].groupby(["Survived","SibSp","Sex"]).mean()
### the ones that survived have up to 4 siblings, and with 3 siblings you actually spend the highest amount of money
##only women with 3 siblings survived
titanic_edl[["Survived","SibSp","Pclass","Fare"]].groupby(["Survived","SibSp","Pclass"]).mean()
###fare price for 3 siblings is the same in survived and non survived--> it only matters if you are a female in oredr to survive
dist_sib_fare=titanic_edl[["Survived","SibSp","Pclass","Fare"]].groupby(["Survived","SibSp","Pclass"]).mean()
dist_sib_fare.reset_index(inplace=True)
plt.figure(figsize=(5,5))
sn.boxplot(x="SibSp",y="Fare",hue="Survived",data=dist_sib_fare)
plt.xlabel("Siblings")
plt.ylabel("Fare price")
plt.title("Fare prices based on #siblings")
plt.show()
###fare price and gender distribution
sex_sib=titanic_edl[["Survived","SibSp","Sex","Fare"]].groupby(["Survived","SibSp","Sex"]).mean()
sex_sib.reset_index(inplace=True)
plt.figure(figsize=(5,5))
sn.boxplot(x="Sex",y="Fare",hue="Survived",data=sex_sib)
plt.xlabel("Siblings")
plt.ylabel("Fare price")
plt.title("Fare prices based on #siblings")
plt.show()
#### let's look at the significance of name titles to survived class
titanic_edl.groupby("Title")["Survived"].aggregate(["mean","count"])
total_pop=titanic_edl["Survived"].count()
def weighted_survival(df):
weight=df["Survived"].count()/total_pop
surv=df["Survived"] * weight*100
return np.sum(surv)
titanic_edl.groupby("Title").apply(weighted_survival).plot.bar()
plt.title("Avg. weighted Suvival rate by title (adj. by population size")
plt.ylabel("Survival rate in %")
titanic_edl.groupby(["Title","Pclass"])["Survived"].mean().unstack().plot.bar()
plt.title("Avg. weighted Suvival rate by title and class(adj. by population size")
plt.ylabel("Survival rate in %")
###let's investigate family size alone
titanic_edl.groupby(["Family_size"])["Survived"].mean().plot.bar()
plt.title("Survival by family size ")
plt.ylabel("Survival rate in %")
###let's investigate family size based on ther factors: gender, class
titanic_edl.groupby(["Family_size","Pclass"])["Survived"].mean().unstack().plot.bar()
plt.title("Survival by family size and class")
plt.ylabel("Survival rate in %")
####is survival rate dependent on family size and sex?
titanic_edl.groupby(["Family_size","Sex"])["Survived"].mean().unstack().plot.bar()
plt.title("Survival by family size and class")
plt.ylabel("Survival rate in %")
### whats the undelying distribution of male/ females to family size
titanic_edl.groupby(["Family_size","Sex"])["PassengerId"].count().unstack().plot.bar()
plt.title("Survival by family size and class")
plt.ylabel("Number of passengers")
plt.show()
###let"s look at parent alone
titanic_edl.groupby(["Parch"])["Survived"].mean().plot.bar(legend=False)
plt.title("Survival by direct dependecy: parents")
plt.ylabel("Survval rate")
plt.show()
###above depedence of 3 there are no survivers
####Parch dosen"t seem to add any other value
###parents by direct dependency
titanic_edl.groupby(["Parch","Sex"])["Survived"].mean().unstack().plot.bar()
plt.title("Survival by direct dependecy: parents")
plt.ylabel("Survval rate")
plt.show()
###make a dummy variable that encodes having siblings >4 !!
# (the more dependency you have the less likly it is that you survive)
titanic_edl.groupby(["SibSp"])["Survived"].mean().plot.bar()
plt.title("Survival rate by direct dependency: child or spouse")
plt.ylabel("Survval rate")
plt.show()
### children dependent on gender
titanic_edl.groupby(["SibSp","Sex"])["Survived"].mean().unstack().plot.bar()
plt.title("Survival rate by gender and direct dependency: child or spouse")
plt.ylabel("Survval rate")
plt.show()
#####Let"s investigate if cabin is relevant for survival rate
titanic_edl["Level_cabin"].unique()
titanic_edl.groupby("Level_cabin")["Survived"].mean().plot.bar()
plt.title("Survival rates by cabin levels")
plt.ylabel("Survival rate")
###inspect cabin levels by class
titanic_edl.groupby(["Level_cabin","Pclass"])["Survived"].mean().unstack().plot.bar()
plt.title("Survival rates by cabin levels and class")
plt.ylabel("Survival rate")
####most of the upper level cabins belong to the 1st class--? there is a correlation between classse
# and cabins
titanic_edl.groupby(["Level_cabin","Pclass"])["Survived"].count().unstack().plot.bar()
plt.title("Survival rates by cabin levels and class")
plt.ylabel("#Passengers")
###bin split the data
titanic_edl["fam_size"]=pd.cut(titanic_edl["Family_size"], bins=3, labels=["small_fam","medium_fam","large_fam"])
titanic_edl["Sib_Sp_num"]=pd.cut(titanic_edl["SibSp"], bins=2, labels=["less_4","over_4"])
###heatmap with initial variables
one_hot_family=pd.get_dummies(titanic_edl["fam_size"])
one_hot_sibling=pd.get_dummies(titanic_edl["Sib_Sp_num"])
one_hot_sex=pd.get_dummies(titanic_edl["Sex"])
one_hot_title=pd.get_dummies(titanic_edl["Title"])
#with titles
# corr1=pd.concat([titanic_edl,one_hot_family,one_hot_sibling,one_hot_sex,one_hot_title],axis=1)
#without titles
corr1=pd.concat([titanic_edl,one_hot_family,one_hot_sibling,one_hot_sex],axis=1)
corr1.drop(["PassengerId","Family_size","Parch","SibSp","over_4","male", "medium_fam"],axis=1,inplace=True)
plt.figure(figsize=(10,10))
sn.heatmap(corr1.corr()[['Survived']],cmap="RdBu_r",center=0.0, annot=True)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.sum",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.suptitle",
"pandas.get_dummies",
"seaborn.barplot",
"pandas.cut",
"matplotlib.pyplot.figure",
"seaborn.boxplot",
"matplotlib.pyplot.ylabel",
"ma... | [((196, 237), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'header': '(0)', 'sep': '""","""'}), "(file_name, header=0, sep=',')\n", (207, 237), True, 'import pandas as pd\n'), ((338, 392), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (354, 392), False, 'from sklearn.model_selection import train_test_split\n'), ((463, 500), 'pandas.concat', 'pd.concat', (['[X_train, y_train]'], {'axis': '(1)'}), '([X_train, y_train], axis=1)\n', (472, 500), True, 'import pandas as pd\n'), ((533, 570), 'pandas.concat', 'pd.concat', (['[X_train, y_train]'], {'axis': '(1)'}), '([X_train, y_train], axis=1)\n', (542, 570), True, 'import pandas as pd\n'), ((1826, 1852), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (1836, 1852), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1920), 'seaborn.barplot', 'sn.barplot', ([], {'x': '"""Pclass"""', 'y': '"""PassengerId"""', 'hue': '"""Sex"""', 'data': 'male_female'}), "(x='Pclass', y='PassengerId', hue='Sex', data=male_female)\n", (1862, 1920), True, 'import seaborn as sn\n'), ((1918, 1937), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Class"""'], {}), "('Class')\n", (1928, 1937), True, 'import matplotlib.pyplot as plt\n'), ((1938, 1972), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of passengers"""'], {}), "('Number of passengers')\n", (1948, 1972), True, 'import matplotlib.pyplot as plt\n'), ((1973, 2005), 'matplotlib.pyplot.title', 'plt.title', (['"""Gender distribution"""'], {}), "('Gender distribution')\n", (1982, 2005), True, 'import matplotlib.pyplot as plt\n'), ((2006, 2016), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2014, 2016), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2706), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (2690, 2706), True, 'import matplotlib.pyplot as plt\n'), ((2706, 2801), 'seaborn.barplot', 'sn.barplot', ([], {'x': '"""Pclass"""', 'y': '"""prop_surv"""', 'hue': '"""Sex"""', 'data': "grouped_[grouped_['Survived'] == 1]"}), "(x='Pclass', y='prop_surv', hue='Sex', data=grouped_[grouped_[\n 'Survived'] == 1])\n", (2716, 2801), True, 'import seaborn as sn\n'), ((2792, 2811), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Class"""'], {}), "('Class')\n", (2802, 2811), True, 'import matplotlib.pyplot as plt\n'), ((2812, 2839), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survival rate"""'], {}), "('Survival rate')\n", (2822, 2839), True, 'import matplotlib.pyplot as plt\n'), ((2840, 2899), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival rate based on classe for all genders)"""'], {}), "('Survival rate based on classe for all genders)')\n", (2849, 2899), True, 'import matplotlib.pyplot as plt\n'), ((2900, 2910), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2908, 2910), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3418), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (3402, 3418), True, 'import matplotlib.pyplot as plt\n'), ((3418, 3482), 'seaborn.boxplot', 'sn.boxplot', ([], {'x': '"""Sex"""', 'y': '"""Surv_sex"""', 'hue': '"""Survived"""', 'data': 'grouped_'}), "(x='Sex', y='Surv_sex', hue='Survived', data=grouped_)\n", (3428, 3482), True, 'import seaborn as sn\n'), ((3481, 3498), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sex"""'], {}), "('Sex')\n", (3491, 3498), True, 'import matplotlib.pyplot as plt\n'), ((3499, 3526), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survival rate"""'], {}), "('Survival rate')\n", (3509, 3526), True, 'import matplotlib.pyplot as plt\n'), ((3527, 3596), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival distribution based on gender (over all classes)"""'], {}), "('Survival distribution based on gender (over all classes)')\n", (3536, 3596), True, 'import matplotlib.pyplot as plt\n'), ((3597, 3607), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3605, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3861, 3914), 'matplotlib.pyplot.title', 'plt.title', (['"""Age distribution per gender and survival"""'], {}), "('Age distribution per gender and survival')\n", (3870, 3914), True, 'import matplotlib.pyplot as plt\n'), ((3915, 3925), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3923, 3925), True, 'import matplotlib.pyplot as plt\n'), ((4358, 4375), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Age"""'], {}), "('Age')\n", (4368, 4375), True, 'import matplotlib.pyplot as plt\n'), ((4376, 4405), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Survived, Class"""'], {}), "('Survived, Class')\n", (4386, 4405), True, 'import matplotlib.pyplot as plt\n'), ((4406, 4445), 'matplotlib.pyplot.title', 'plt.title', (['"""Survived per age and class"""'], {}), "('Survived per age and class')\n", (4415, 4445), True, 'import matplotlib.pyplot as plt\n'), ((4446, 4456), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4454, 4456), True, 'import matplotlib.pyplot as plt\n'), ((4724, 4771), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 6)', 'nrows': '(1)', 'ncols': '(3)'}), '(figsize=(12, 6), nrows=1, ncols=3)\n', (4736, 4771), True, 'import matplotlib.pyplot as plt\n'), ((4771, 4862), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Age Distribution of Survivor state by Gender in Each Class"""'], {'fontsize': '(16)'}), "('Age Distribution of Survivor state by Gender in Each Class',\n fontsize=16)\n", (4783, 4862), True, 'import matplotlib.pyplot as plt\n'), ((5479, 5504), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fare Prices"""'], {}), "('Fare Prices')\n", (5489, 5504), True, 'import matplotlib.pyplot as plt\n'), ((5505, 5560), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Fare price per gender and survival"""'], {}), "('Average Fare price per gender and survival')\n", (5514, 5560), True, 'import matplotlib.pyplot as plt\n'), ((5561, 5571), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5569, 5571), True, 'import matplotlib.pyplot as plt\n'), ((5996, 6014), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fare"""'], {}), "('Fare')\n", (6006, 6014), True, 'import matplotlib.pyplot as plt\n'), ((6015, 6094), 'matplotlib.pyplot.title', 'plt.title', (['"""Fare Price distributed across survival state, per gender and class"""'], {}), "('Fare Price distributed across survival state, per gender and class')\n", (6024, 6094), True, 'import matplotlib.pyplot as plt\n'), ((6250, 6297), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 6)', 'nrows': '(1)', 'ncols': '(3)'}), '(figsize=(12, 6), nrows=1, ncols=3)\n', (6262, 6297), True, 'import matplotlib.pyplot as plt\n'), ((6297, 6400), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Fare Price Distribution of Survivor state by Gender in Each Class"""'], {'fontsize': '(16)'}), "(\n 'Fare Price Distribution of Survivor state by Gender in Each Class',\n fontsize=16)\n", (6309, 6400), True, 'import matplotlib.pyplot as plt\n'), ((7014, 7039), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fare Prices"""'], {}), "('Fare Prices')\n", (7024, 7039), True, 'import matplotlib.pyplot as plt\n'), ((7040, 7095), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Fare price per gender and survival"""'], {}), "('Average Fare price per gender and survival')\n", (7049, 7095), True, 'import matplotlib.pyplot as plt\n'), ((7096, 7106), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7104, 7106), True, 'import matplotlib.pyplot as plt\n'), ((8154, 8180), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (8164, 8180), True, 'import matplotlib.pyplot as plt\n'), ((8180, 8247), 'seaborn.boxplot', 'sn.boxplot', ([], {'x': '"""SibSp"""', 'y': '"""Fare"""', 'hue': '"""Survived"""', 'data': 'dist_sib_fare'}), "(x='SibSp', y='Fare', hue='Survived', data=dist_sib_fare)\n", (8190, 8247), True, 'import seaborn as sn\n'), ((8245, 8267), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Siblings"""'], {}), "('Siblings')\n", (8255, 8267), True, 'import matplotlib.pyplot as plt\n'), ((8268, 8292), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fare price"""'], {}), "('Fare price')\n", (8278, 8292), True, 'import matplotlib.pyplot as plt\n'), ((8293, 8336), 'matplotlib.pyplot.title', 'plt.title', (['"""Fare prices based on #siblings"""'], {}), "('Fare prices based on #siblings')\n", (8302, 8336), True, 'import matplotlib.pyplot as plt\n'), ((8337, 8347), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8345, 8347), True, 'import matplotlib.pyplot as plt\n'), ((8521, 8547), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (8531, 8547), True, 'import matplotlib.pyplot as plt\n'), ((8547, 8606), 'seaborn.boxplot', 'sn.boxplot', ([], {'x': '"""Sex"""', 'y': '"""Fare"""', 'hue': '"""Survived"""', 'data': 'sex_sib'}), "(x='Sex', y='Fare', hue='Survived', data=sex_sib)\n", (8557, 8606), True, 'import seaborn as sn\n'), ((8604, 8626), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Siblings"""'], {}), "('Siblings')\n", (8614, 8626), True, 'import matplotlib.pyplot as plt\n'), ((8627, 8651), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fare price"""'], {}), "('Fare price')\n", (8637, 8651), True, 'import matplotlib.pyplot as plt\n'), ((8652, 8695), 'matplotlib.pyplot.title', 'plt.title', (['"""Fare prices based on #siblings"""'], {}), "('Fare prices based on #siblings')\n", (8661, 8695), True, 'import matplotlib.pyplot as plt\n'), ((8696, 8706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8704, 8706), True, 'import matplotlib.pyplot as plt\n'), ((9090, 9163), 'matplotlib.pyplot.title', 'plt.title', (['"""Avg. weighted Suvival rate by title (adj. by population size"""'], {}), "('Avg. weighted Suvival rate by title (adj. by population size')\n", (9099, 9163), True, 'import matplotlib.pyplot as plt\n'), ((9164, 9196), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survival rate in %"""'], {}), "('Survival rate in %')\n", (9174, 9196), True, 'import matplotlib.pyplot as plt\n'), ((9278, 9365), 'matplotlib.pyplot.title', 'plt.title', (['"""Avg. weighted Suvival rate by title and class(adj. by population size"""'], {}), "(\n 'Avg. weighted Suvival rate by title and class(adj. by population size')\n", (9287, 9365), True, 'import matplotlib.pyplot as plt\n'), ((9361, 9393), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survival rate in %"""'], {}), "('Survival rate in %')\n", (9371, 9393), True, 'import matplotlib.pyplot as plt\n'), ((9502, 9539), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival by family size """'], {}), "('Survival by family size ')\n", (9511, 9539), True, 'import matplotlib.pyplot as plt\n'), ((9540, 9572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survival rate in %"""'], {}), "('Survival rate in %')\n", (9550, 9572), True, 'import matplotlib.pyplot as plt\n'), ((9731, 9777), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival by family size and class"""'], {}), "('Survival by family size and class')\n", (9740, 9777), True, 'import matplotlib.pyplot as plt\n'), ((9778, 9810), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survival rate in %"""'], {}), "('Survival rate in %')\n", (9788, 9810), True, 'import matplotlib.pyplot as plt\n'), ((9951, 9997), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival by family size and class"""'], {}), "('Survival by family size and class')\n", (9960, 9997), True, 'import matplotlib.pyplot as plt\n'), ((9998, 10030), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survival rate in %"""'], {}), "('Survival rate in %')\n", (10008, 10030), True, 'import matplotlib.pyplot as plt\n'), ((10190, 10236), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival by family size and class"""'], {}), "('Survival by family size and class')\n", (10199, 10236), True, 'import matplotlib.pyplot as plt\n'), ((10237, 10271), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of passengers"""'], {}), "('Number of passengers')\n", (10247, 10271), True, 'import matplotlib.pyplot as plt\n'), ((10272, 10282), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10280, 10282), True, 'import matplotlib.pyplot as plt\n'), ((10388, 10438), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival by direct dependecy: parents"""'], {}), "('Survival by direct dependecy: parents')\n", (10397, 10438), True, 'import matplotlib.pyplot as plt\n'), ((10439, 10465), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survval rate"""'], {}), "('Survval rate')\n", (10449, 10465), True, 'import matplotlib.pyplot as plt\n'), ((10466, 10476), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10474, 10476), True, 'import matplotlib.pyplot as plt\n'), ((10680, 10730), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival by direct dependecy: parents"""'], {}), "('Survival by direct dependecy: parents')\n", (10689, 10730), True, 'import matplotlib.pyplot as plt\n'), ((10731, 10757), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survval rate"""'], {}), "('Survval rate')\n", (10741, 10757), True, 'import matplotlib.pyplot as plt\n'), ((10758, 10768), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10766, 10768), True, 'import matplotlib.pyplot as plt\n'), ((10963, 11027), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival rate by direct dependency: child or spouse"""'], {}), "('Survival rate by direct dependency: child or spouse')\n", (10972, 11027), True, 'import matplotlib.pyplot as plt\n'), ((11028, 11054), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survval rate"""'], {}), "('Survval rate')\n", (11038, 11054), True, 'import matplotlib.pyplot as plt\n'), ((11055, 11065), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11063, 11065), True, 'import matplotlib.pyplot as plt\n'), ((11177, 11252), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival rate by gender and direct dependency: child or spouse"""'], {}), "('Survival rate by gender and direct dependency: child or spouse')\n", (11186, 11252), True, 'import matplotlib.pyplot as plt\n'), ((11253, 11279), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survval rate"""'], {}), "('Survval rate')\n", (11263, 11279), True, 'import matplotlib.pyplot as plt\n'), ((11280, 11290), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11288, 11290), True, 'import matplotlib.pyplot as plt\n'), ((11456, 11499), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival rates by cabin levels"""'], {}), "('Survival rates by cabin levels')\n", (11465, 11499), True, 'import matplotlib.pyplot as plt\n'), ((11500, 11527), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survival rate"""'], {}), "('Survival rate')\n", (11510, 11527), True, 'import matplotlib.pyplot as plt\n'), ((11648, 11701), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival rates by cabin levels and class"""'], {}), "('Survival rates by cabin levels and class')\n", (11657, 11701), True, 'import matplotlib.pyplot as plt\n'), ((11702, 11729), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Survival rate"""'], {}), "('Survival rate')\n", (11712, 11729), True, 'import matplotlib.pyplot as plt\n'), ((11934, 11987), 'matplotlib.pyplot.title', 'plt.title', (['"""Survival rates by cabin levels and class"""'], {}), "('Survival rates by cabin levels and class')\n", (11943, 11987), True, 'import matplotlib.pyplot as plt\n'), ((11988, 12013), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""#Passengers"""'], {}), "('#Passengers')\n", (11998, 12013), True, 'import matplotlib.pyplot as plt\n'), ((12062, 12157), 'pandas.cut', 'pd.cut', (["titanic_edl['Family_size']"], {'bins': '(3)', 'labels': "['small_fam', 'medium_fam', 'large_fam']"}), "(titanic_edl['Family_size'], bins=3, labels=['small_fam',\n 'medium_fam', 'large_fam'])\n", (12068, 12157), True, 'import pandas as pd\n'), ((12178, 12243), 'pandas.cut', 'pd.cut', (["titanic_edl['SibSp']"], {'bins': '(2)', 'labels': "['less_4', 'over_4']"}), "(titanic_edl['SibSp'], bins=2, labels=['less_4', 'over_4'])\n", (12184, 12243), True, 'import pandas as pd\n'), ((12294, 12333), 'pandas.get_dummies', 'pd.get_dummies', (["titanic_edl['fam_size']"], {}), "(titanic_edl['fam_size'])\n", (12308, 12333), True, 'import pandas as pd\n'), ((12350, 12391), 'pandas.get_dummies', 'pd.get_dummies', (["titanic_edl['Sib_Sp_num']"], {}), "(titanic_edl['Sib_Sp_num'])\n", (12364, 12391), True, 'import pandas as pd\n'), ((12404, 12438), 'pandas.get_dummies', 'pd.get_dummies', (["titanic_edl['Sex']"], {}), "(titanic_edl['Sex'])\n", (12418, 12438), True, 'import pandas as pd\n'), ((12453, 12489), 'pandas.get_dummies', 'pd.get_dummies', (["titanic_edl['Title']"], {}), "(titanic_edl['Title'])\n", (12467, 12489), True, 'import pandas as pd\n'), ((12624, 12702), 'pandas.concat', 'pd.concat', (['[titanic_edl, one_hot_family, one_hot_sibling, one_hot_sex]'], {'axis': '(1)'}), '([titanic_edl, one_hot_family, one_hot_sibling, one_hot_sex], axis=1)\n', (12633, 12702), True, 'import pandas as pd\n'), ((12808, 12836), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (12818, 12836), True, 'import matplotlib.pyplot as plt\n'), ((4953, 5103), 'seaborn.boxplot', 'sn.boxplot', ([], {'data': "titanic_edl[titanic_edl['Pclass'] == i + 1]", 'x': '"""Survived"""', 'y': '"""Age"""', 'hue': '"""Sex"""', 'hue_order': "['male', 'female']", 'dodge': '(True)', 'ax': 'ax[i]'}), "(data=titanic_edl[titanic_edl['Pclass'] == i + 1], x='Survived',\n y='Age', hue='Sex', hue_order=['male', 'female'], dodge=True, ax=ax[i])\n", (4963, 5103), True, 'import seaborn as sn\n'), ((6487, 6638), 'seaborn.boxplot', 'sn.boxplot', ([], {'data': "titanic_edl[titanic_edl['Pclass'] == i + 1]", 'x': '"""Survived"""', 'y': '"""Fare"""', 'hue': '"""Sex"""', 'hue_order': "['male', 'female']", 'dodge': '(True)', 'ax': 'ax[i]'}), "(data=titanic_edl[titanic_edl['Pclass'] == i + 1], x='Survived',\n y='Fare', hue='Sex', hue_order=['male', 'female'], dodge=True, ax=ax[i])\n", (6497, 6638), True, 'import seaborn as sn\n'), ((9011, 9023), 'numpy.sum', 'np.sum', (['surv'], {}), '(surv)\n', (9017, 9023), True, 'import numpy as np\n')] |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple, defaultdict
from distutils.util import strtobool
import itertools as it
import operator as op
import os
import numpy as onp
import six
from six.moves import xrange
from ..config import flags
from .. import core
from .. import ad_util
from .. import tree_util
from .. import linear_util as lu
from ..abstract_arrays import (ConcreteArray, ShapedArray, make_shaped_array,
array_types, scalar_types)
from ..core import AbstractTuple, JaxTuple, pack, valid_jaxtype, Literal
from ..util import partial, partialmethod, memoize, concatenate, safe_map, prod
from ..lib import xla_bridge as xb
from . import partial_eval as pe
from . import ad
FLAGS = flags.FLAGS
flags.DEFINE_bool('jax_device_values',
strtobool(os.getenv('JAX_DEVICE_VALUES', "True")),
'Enable device-persistent values.')
flags.DEFINE_bool('jax_debug_nans',
strtobool(os.getenv('JAX_DEBUG_NANS', "False")),
'Add nan checks to every operation.')
def apply_primitive(prim, *args, **params):
abstract_args = map(abstractify, args)
compiled_fun = xla_primitive_callable(prim, *abstract_args, **params)
return compiled_fun(*args)
@memoize
def xla_primitive_callable(prim, *abstract_args, **params):
shapes = tuple(map(xla_shape, abstract_args))
built_c = primitive_computation(prim, *shapes, **params)
result_shape = xla_shape_to_result_shape(built_c.GetReturnValueShape())
handle_result = result_handler(result_shape)
compiled = built_c.Compile(shapes, xb.get_compile_options(),
backend=xb.get_backend())
return partial(execute_compiled_primitive, prim.name, compiled, handle_result)
@memoize
def primitive_computation(prim, *shapes, **params):
c = xb.make_computation_builder("primitive_computation")
xla_args = map(c.ParameterWithShape, shapes)
xla_result = translation_rule(prim)(c, *xla_args, **params)
try:
return c.Build()
except RuntimeError as e:
# try for a better error message by using the abstract_eval checks
prim.abstract_eval(*map(aval_from_xla_shape, shapes), **params)
raise e
def aval_from_xla_shape(shape):
if shape.is_tuple():
return AbstractTuple(map(aval_from_xla_shape, shape.tuple_shapes()))
else:
return ShapedArray(shape.dimensions(), shape.element_type())
def execute_compiled_primitive(name, compiled, result_handler, *args):
input_bufs = [device_put(x) for x in args]
out_buf = compiled.Execute(input_bufs)
check_nans(name, out_buf)
return result_handler(out_buf)
def check_nans(name, buf):
FLAGS.jax_debug_nans and _check_nans(name, buf.shape(), buf)
def _check_nans(name, xla_shape, buf):
if xla_shape.is_tuple():
_map(partial(_check_nans, name), xla_shape.tuple_shapes(), buf.destructure())
else:
if onp.issubdtype(xla_shape.element_type(), onp.floating):
pyval = buf.to_py()
if onp.any(onp.isnan(pyval)):
msg = "invalid value (nan) encountered in {}"
raise FloatingPointError(msg.format(name))
def device_put(x, device_num=0):
"""Place a Python value `x` on device number `device_num`.
This is a wrapper around jax.lib.xla_bridge.device_put to handle
additional Python types, namely
1. the array-like types DeviceArray (which is already backed by device
memory, though may be on the wrong device) and its subclass DeviceConstant
(which represents a lazy value to be instantiated), and
2. the tuple-like types DeviceTuple (which is already backed by device
memory, though may be on the wrong device) and JaxTuple (which may have some
elements that are backed by device memory on the correct device).
In particular, this function avoids transferring data already placed on the
correct device, and handles instantiating DeviceConstants.
Args:
x: a tuplelike-tree with arraylike leaves representing the value to be
transferred to the device, where tuplelike means a JaxTuple or
DeviceTuple, and arraylike includes DeviceArray, DeviceConstant, and
anything that has an '__array__' attr.
device_num: an int representing the target physical device number.
Returns:
A buffer representing the input `x` placed on the appropriate device.
"""
x = canonicalize_pyval_dtype(x)
t = type(x)
if t is DeviceArray or t is DeviceTuple:
if x.device_buffer.device() == device_num:
return x.device_buffer
else:
# TODO(phawkins): perform a direct device-to-device copy rather than
# bouncing via the host.
return device_put(x.device_buffer.to_py(), device_num)
elif isinstance(x, DeviceConstant):
return instantiate_device_constant(x, device_num=device_num)
elif isinstance(x, (DeviceArray, onp.ndarray)):
return xb.device_put(x, device_num) # handle arraylikes
elif isinstance(x, JaxTuple):
element_bufs = tuple(map(partial(device_put, device_num=device_num), x))
return xb.make_tuple(element_bufs, device_num)
else:
raise TypeError(t)
def device_put_many(xs_and_devices):
"""Place multiple Python values on multiple devices in parallel.
This is a wrapper around jax.lib.xla_bridge.device_put_many to handle
additional Python types. See the docstring for jax.interpreters.xla.device_put
for more information.
Args:
xs_and_devices: a sequence of (pyval, device_num) pairs in which device_num
is an int representing the target physical device number and pyval is a
tuple-like tree with arraylike leaves (see the device_put docstring).
Returns:
A sequence of buffers representing the inputs placed on the corresponding
device numbers.
"""
transfer_indices = []
transfers = []
outputs = [None] * len(xs_and_devices)
for i, (x, device_num) in enumerate(xs_and_devices):
x = canonicalize_pyval_dtype(x)
t = type(x)
if t is DeviceArray or t is DeviceTuple:
if x.device_buffer.device() == device_num:
outputs[i] = x.device_buffer
else:
transfer_indices.append(i)
# TODO(phawkins): perform a direct device-to-device copy rather than
# bouncing via the host.
transfers.append((x.device_buffer.to_py(), device_num))
elif isinstance(x, DeviceConstant):
outputs[i] = instantiate_device_constant(x, device_num=device_num)
elif hasattr(t, '__array__'):
transfer_indices.append(i)
transfers.append((x, device_num)) # handle arraylikes
elif t is JaxTuple:
# TODO(mattjj,phawkins): improve this to avoid device_put call
element_bufs = tuple(map(partial(device_put, device_num=device_num), x))
outputs[i] = xb.make_tuple(element_bufs, device_num)
else:
raise TypeError(t)
transfer_results = xb.device_put_many(transfers)
for i, result in zip(transfer_indices, transfer_results):
outputs[i] = result
return outputs
# When we execute an XLA computation, we get a raw device buffer back and need
# to package it into a suitable Python object to return to the user. To avoid
# unnecessary device-to-host transfers, we typically return a DeviceValue that
# acts just like a familiar Python type (e.g. an ndarray or JaxTuple) but is
# lazy in that it only copies data back to the host as required. Since the same
# DeviceValue type is formed on every execution of a compiled computation, at
# compile time we set up result handler functions and thus avoid redoing some of
# the Python bookkeeping work on every execution. Since XLA shapes are slower to
# manipulate than simple Python builtins, we store the metadata required for
# forming the DeviceValue result in special ResultArray / ResultTuple classes.
# Every JaxType needs to map to an XLA type. However this function's design is
# based on the assumption that XLA types can be mapped uniquely back to a
# JaxType, i.e. that the mapping is bijective. That assumption could be relaxed,
# but it would mean we need to do a bit more bookkeping on the Python side to
# track abstract values of outputs.
def xla_shape_to_result_shape(xla_shape):
if xla_shape.is_tuple():
aval = aval_from_xla_shape(xla_shape)
result_shapes = tuple(map(xla_shape_to_result_shape, xla_shape.tuple_shapes()))
return ResultTuple((aval, result_shapes))
else:
shape, dtype = xla_shape.dimensions(), xla_shape.element_type()
ndim, size = len(shape), prod(shape)
return ResultArray((shape, dtype, ndim, size))
class ResultTuple(tuple): pass
class ResultArray(tuple): pass
def result_handler(result_shape):
if FLAGS.jax_device_values:
return device_persistent_result_handler(result_shape)
else:
return pyval_result_handler(result_shape)
def device_persistent_result_handler(result_shape):
t = type(result_shape)
if t is ResultArray:
return partial(DeviceArray, result_shape)
elif t is ResultTuple:
return partial(DeviceTuple, result_shape)
else:
raise TypeError(t)
def pyval_result_handler(result_shape):
del result_shape
def _tuple_to_jaxtuple(v):
if isinstance(v, tuple):
return JaxTuple(_tuple_to_jaxtuple(t) for t in v)
return v
def f(buf):
return _tuple_to_jaxtuple(buf.to_py())
return f
def compile_jaxpr(jaxpr, const_vals, *abstract_args):
arg_shapes = list(map(xla_shape, abstract_args))
built_c = jaxpr_computation(jaxpr, const_vals, (), *arg_shapes)
result_shape = xla_shape_to_result_shape(built_c.GetReturnValueShape())
return built_c.Compile(arg_shapes, xb.get_compile_options(),
backend=xb.get_backend()), result_shape
def build_jaxpr(jaxpr, const_vals, *abstract_args):
arg_shapes = list(map(xla_shape, abstract_args))
built_c = jaxpr_computation(jaxpr, const_vals, (), *arg_shapes)
return built_c
def _prefetch_jaxpr_literals(jaxpr):
"""Prefetches any DeviceArray values inside a jaxpr to the host."""
for eqn in jaxpr.eqns:
for v in eqn.invars:
if type(v) is core.Literal and isinstance(v.val, DeviceArray):
v.val.copy_to_host_async()
if eqn.bound_subjaxprs:
for subjaxpr, _const_bindings, _freevar_bindings in eqn.bound_subjaxprs:
_prefetch_jaxpr_literals(subjaxpr)
def jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):
assert not any(type(invar) in (tuple, list) for invar in jaxpr.invars)
c = xb.make_computation_builder("jaxpr_computation")
def read(v):
if type(v) is Literal:
return c.Constant(canonicalize_pyval_dtype(v.val))
else:
return env[v]
def write(v, node):
assert node is not None
env[v] = node
env = {}
write(core.unitvar, c.Tuple())
if const_vals:
for val in const_vals:
if isinstance(val, DeviceArray):
val.copy_to_host_async()
_map(write, jaxpr.constvars, map(c.Constant, const_vals))
_map(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes))
else:
all_freevars = it.chain(jaxpr.constvars, jaxpr.freevars)
_map(write, all_freevars, map(c.ParameterWithShape, freevar_shapes))
_map(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes))
_prefetch_jaxpr_literals(jaxpr)
for eqn in jaxpr.eqns:
if not eqn.restructure:
in_nodes = list(map(read, eqn.invars))
else:
in_nodes = [xla_pack(c, map(read, invars)) if type(invars) is tuple
else read(invars) for invars in eqn.invars]
in_shapes = _map(c.GetShape, in_nodes)
subcs = [
jaxpr_computation(
subjaxpr, (),
_map(c.GetShape, map(read, const_bindings + freevar_bindings)),
*in_shapes)
for subjaxpr, const_bindings, freevar_bindings in eqn.bound_subjaxprs]
subfuns = [(subc, _map(read, const_bindings + freevar_bindings))
for subc, (_, const_bindings, freevar_bindings)
in zip(subcs, eqn.bound_subjaxprs)]
ans = translation_rule(eqn.primitive)(c, *(subfuns + in_nodes), **eqn.params)
c.GetShape(ans) # force xla to do shape error checking
out_nodes = xla_destructure(c, ans) if eqn.destructure else [ans]
_map(write, eqn.outvars, out_nodes)
return c.Build(read(jaxpr.outvar))
def _map(f, *xs):
return tuple(map(f, *xs))
def xla_destructure(c, ans):
num_elements = len(c.GetShape(ans).tuple_shapes())
return [c.GetTupleElement(ans, i) for i in range(num_elements)]
def xla_pack(c, xs):
return c.Tuple(*xs)
def tuple_constant(c, val, canonicalize_types=True):
return c.Tuple(*map(c.Constant, val))
xb.register_constant_handler(JaxTuple, tuple_constant)
def translation_rule(p):
backend = xb.get_backend()
backend_specific_rule = backend_specific_translations[backend.platform].get(p)
try:
return backend_specific_rule or translations[p]
except KeyError:
raise NotImplementedError(
"XLA translation rule for '{}' not implemented".format(p))
def lower_fun(fun, c, *xla_args, **params):
xla_shapes = tuple(map(c.GetShape, xla_args))
avals = map(aval_from_xla_shape, xla_shapes)
pvals = [pe.PartialVal((a, core.unit)) for a in avals]
jaxpr, _, consts = pe.trace_unwrapped_to_jaxpr(fun, pvals, **params)
built_c = jaxpr_computation(jaxpr, consts, (), *xla_shapes)
return c.Call(built_c, xla_args)
translations = {}
backend_specific_translations = defaultdict(dict)
translations[core.pack_p] = lambda c, *xs: c.Tuple(*xs)
translations[core.call_p] = lambda c, subc_a1, *a2: c.Call(subc_a1[0],
subc_a1[1] + a2)
translations[core.identity_p] = lambda c, x: x
def zeros_like_translation_rule(c, x):
def _zeros_like(shape):
if shape.is_tuple():
return c.Tuple(*(_zeros_like(x) for x in shape.tuple_shapes()))
else:
return c.Broadcast(c.Constant(onp.array(0, shape.element_type())),
shape.dimensions())
return _zeros_like(c.GetShape(x))
def add_jaxvals_translation_rule(c, x, y):
x_shape, y_shape = map(c.GetShape, (x, y))
if x_shape.is_tuple() and y_shape.is_tuple():
xs = xla_destructure(c, x)
ys = xla_destructure(c, y)
return c.Tuple(*map(partial(add_jaxvals_translation_rule, c), xs, ys))
else:
return c.Add(x, y)
translations[ad_util.zeros_like_p] = zeros_like_translation_rule
translations[ad_util.add_jaxvals_p] = add_jaxvals_translation_rule
def canonicalize_pyval_dtype(x):
try:
return canonicalize_dtype_handlers[type(x)](x)
except KeyError:
msg = "No canonicalize handler registered for type: {}"
raise TypeError(msg.format(type(x)))
canonicalize_dtype_handlers = {}
def canonicalize_tuple_dtype(tup):
return JaxTuple(map(canonicalize_pyval_dtype, tup))
canonicalize_dtype_handlers[JaxTuple] = canonicalize_tuple_dtype
def canonicalize_ndarray_dtype(x):
return onp.asarray(x, xb.canonicalize_dtype(onp.result_type(x)))
for t in array_types:
canonicalize_dtype_handlers[t] = canonicalize_ndarray_dtype
def identity(x): return x
def abstractify(x):
try:
return pytype_aval_mappings[type(x)](x)
except KeyError:
raise TypeError("No abstraction handler for type: {}".format(type(x)))
pytype_aval_mappings = {}
def abstractify_tuple(tup):
return AbstractTuple(map(abstractify, tup))
pytype_aval_mappings[JaxTuple] = abstractify_tuple
pytype_aval_mappings[AbstractTuple] = abstractify_tuple
for t in array_types:
pytype_aval_mappings[t] = make_shaped_array
class DeviceValue(object):
"""A DeviceValue represents a value backed by device memory."""
__slots__ = ["device_buffer"]
def __init__(self, device_buffer):
self.device_buffer = device_buffer
def _check_if_deleted(self):
if self.device_buffer is None:
raise ValueError("DeviceValue has been deleted.")
def block_until_ready(self):
"""Blocks the caller until the buffer's value has been computed on device.
This method is mostly useful for timing microbenchmarks that wish to
time how long a computation takes, without transferring the result back
to the host.
"""
self._check_if_deleted()
self.device_buffer.block_host_until_ready()
class DeviceTuple(DeviceValue):
"""A DeviceTuple is a JaxTuple backed by a single device memory buffer."""
__slots__ = ["aval", "result_shapes"]
def __init__(self, result_shape, device_buffer):
self.device_buffer = device_buffer
self.aval, self.result_shapes = result_shape
def __iter__(self):
bufs = self.device_buffer.destructure()
handlers = map(device_persistent_result_handler, self.result_shapes)
elts = [handler(buf) for handler, buf in zip(handlers, bufs)]
return iter(elts)
def __len__(self):
return len(self.aval)
def __repr__(self):
return 'DeviceTuple(len={length})'.format(length=len(self))
def __eq__(self, other):
return tuple(self) == tuple(other)
# DeviceValues don't need to be dtype-canonicalized because we assume values on
# the device have already been canonicalized.
core.pytype_aval_mappings[DeviceTuple] = core.pytype_aval_mappings[JaxTuple]
pytype_aval_mappings[DeviceTuple] = op.attrgetter('aval')
canonicalize_dtype_handlers[DeviceTuple] = identity
def _device_tuple_constant_handler(c, val, canonicalize_types=True):
const = partial(c.Constant, canonicalize_types=canonicalize_types)
return c.Tuple(*map(const, val))
xb.register_constant_handler(DeviceTuple, _device_tuple_constant_handler)
# TODO(mattjj): could jit-compile a computation here
ad_util.jaxval_adders[DeviceTuple] = ad_util.add_jaxtuples
# TODO(phawkins): after Jaxlib 0.1.17 has been released, bump the minimum
# jaxlib version and change callers of this function to simply call
# the copy_to_host_async method directly.
def _copy_to_host_async(buffer):
if hasattr(buffer, "copy_to_host_async"):
buffer.copy_to_host_async()
def forward_method(attrname, self, fun, *args):
return fun(getattr(self, attrname), *args)
forward_to_value = partial(forward_method, "_value")
class DeviceArray(DeviceValue):
"""A DeviceArray is an ndarray backed by a single device memory buffer."""
# We don't subclass ndarray because that would open up a host of issues,
# but lax_numpy.py overrides isinstance behavior and attaches ndarray methods.
__slots__ = ["shape", "dtype", "ndim", "size", "_npy_value"]
__array_priority__ = 100.
def __init__(self, result_shape, device_buffer):
self.device_buffer = device_buffer
self.shape, self.dtype, self.ndim, self.size = result_shape
self._npy_value = None
# TODO make device_buffer a property, make the _npy_value writeable, invalidate
@property
def _value(self):
self._check_if_deleted()
if self._npy_value is None:
self._npy_value = self.device_buffer.to_py()
self._npy_value.flags.writeable = False
return self._npy_value
def copy(self):
"""Returns an ndarray (backed by host memory, not device memory)."""
return onp.asarray(self)
def copy_to_host_async(self):
"""Requests a copy of the buffer to the host."""
self._check_if_deleted()
if self._npy_value is None:
_copy_to_host_async(self.device_buffer)
def delete(self):
"""Deletes the device array and any cached copy on the host.
It is an error to access the contents of a `DeviceArray` after it has
been deleted.
Use of this method is optional; device buffers will be reclaimed
automatically by Python when a DeviceArray object is garbage collected.
However, it is sometimes useful to have more explicit control over the
time of deletion.
"""
self.device_buffer.delete()
self.device_buffer = None
self._npy_value = None
def __repr__(self):
return onp.array_repr(self)
def item(self):
if onp.issubdtype(self.dtype, onp.complexfloating):
return complex(self)
elif onp.issubdtype(self.dtype, onp.floating):
return float(self)
elif onp.issubdtype(self.dtype, onp.integer):
return int(self)
elif onp.issubdtype(self.dtype, onp.bool_):
return bool(self)
else:
raise TypeError(self.dtype)
def __len__(self):
try:
return self.shape[0]
except IndexError:
raise TypeError("len() of unsized object") # same as numpy error
def __iter__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
return (self[i] for i in xrange(self.shape[0]))
def __reversed__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array")
else:
return (self[i] for i in xrange(self.shape[0] - 1, -1, -1))
def __format__(self, format_spec):
# Simulates behavior of https://github.com/numpy/numpy/pull/9883
if self.ndim == 0:
return format(self._value[()], format_spec)
else:
return format(self._value, format_spec)
__array__ = partialmethod(forward_to_value, onp.asarray)
__str__ = partialmethod(forward_to_value, str)
__bool__ = __nonzero__ = partialmethod(forward_to_value, bool)
__float__ = partialmethod(forward_to_value, float)
__int__ = partialmethod(forward_to_value, int)
if six.PY2:
__long__ = partialmethod(forward_to_value, long) # noqa: F821
__complex__ = partialmethod(forward_to_value, complex)
__hex__ = partialmethod(forward_to_value, hex)
__oct__ = partialmethod(forward_to_value, oct)
# pickle saves and loads just like an ndarray
__reduce__ = partialmethod(forward_to_value, op.methodcaller("__reduce__"))
# clobbered when jax.numpy is imported, but useful in tests
def __eq__(self, other): return self._value == other
def __hash__(self):
# TODO(mattjj): this is not semantically correct because it is possible
# __eq__ is true for values with unequal __hash__ values. However, the
# main use case at the moment is memoization for which false negatives are
# fine.
return id(self)
scalar_types.add(DeviceArray)
# DeviceValues don't need to be canonicalized because we assume values on the
# device have already been canonicalized.
core.pytype_aval_mappings[DeviceArray] = ConcreteArray
pytype_aval_mappings[DeviceArray] = make_shaped_array
canonicalize_dtype_handlers[DeviceArray] = identity
def _device_array_constant_handler(c, val, canonicalize_types=True):
return c.Constant(onp.asarray(val), canonicalize_types=canonicalize_types)
xb.register_constant_handler(DeviceArray, _device_array_constant_handler)
pytype_aval_mappings[ConcreteArray] = make_shaped_array
pytype_aval_mappings[ShapedArray] = identity
class DeviceConstant(DeviceArray):
def copy_to_host_async(self): pass
@staticmethod
def constant_handler(c, constant_instance, canonicalize_types=True):
assert False
def instantiate_device_constant(const, cutoff=1e6, device_num=0):
# dispatch an XLA Computation to build the constant on the device if it's
# large, or alternatively build it on the host and transfer it if it's small
# TODO(mattjj): need a way to instantiate on a specific device
assert isinstance(const, DeviceConstant)
if const.size > cutoff and device_num == 0:
c = xb.make_computation_builder("constant_instantiating_computation")
xla_const = const.constant_handler(c, const)
compiled = c.Build(xla_const).Compile((), xb.get_compile_options(),
backend=xb.get_backend())
return compiled.Execute(())
else:
return xb.device_put(onp.asarray(const), device_num)
def xla_shape(x):
try:
return xb.Shape.array_shape(x.dtype, x.shape)
except AttributeError:
if type(x) in (core.AbstractTuple, core.JaxTuple):
return xb.Shape.tuple_shape(tuple(map(xla_shape, x)))
else:
raise TypeError(type(x))
def xla_call_impl(fun, *args, **params):
device_values = FLAGS.jax_device_values and params.pop('device_values')
compiled_fun = xla_callable(fun, device_values, *map(abstractify, args))
try:
return compiled_fun(*args)
except FloatingPointError:
print("Invalid value encountered in the output of a jit function. "
"Calling the de-optimized version.")
return fun.call_wrapped(*args) # probably won't return
@lu.memoize
def xla_callable(fun, device_values, *abstract_args):
pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args]
with core.new_master(pe.JaxprTrace, True) as master:
jaxpr, (pval, consts, env) = pe.trace_to_subjaxpr(fun, master, False).call_wrapped(pvals)
assert not env # no subtraces here (though cond might eventually need them)
compiled, result_shape = compile_jaxpr(jaxpr, consts, *abstract_args)
del master, consts, jaxpr, env
if device_values:
handle_result = device_persistent_result_handler(result_shape)
else:
handle_result = pyval_result_handler(result_shape)
return partial(execute_compiled, compiled, pval, handle_result)
def execute_compiled(compiled, pval, handle_result, *args):
input_bufs = [device_put(x) for x in args]
out_buf = compiled.Execute(input_bufs)
check_nans("jit-compiled computation", out_buf)
return pe.merge_pvals(handle_result(out_buf), pval)
def xla_call_translation_rule(c, subc_a1, *a2, **params):
subc, a1 = subc_a1
return c.Call(subc, a1 + a2)
xla_call_p = core.Primitive('xla_call')
xla_call = partial(core.call_bind, xla_call_p)
xla_call_p.def_custom_bind(xla_call)
xla_call_p.def_impl(xla_call_impl)
translations[xla_call_p] = xla_call_translation_rule
ad.primitive_transposes[xla_call_p] = partial(ad.call_transpose, xla_call_p)
| [
"numpy.result_type",
"numpy.asarray",
"operator.methodcaller",
"numpy.isnan",
"collections.defaultdict",
"numpy.array_repr",
"operator.attrgetter",
"six.moves.xrange",
"itertools.chain",
"os.getenv",
"numpy.issubdtype"
] | [((13888, 13905), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (13899, 13905), False, 'from collections import namedtuple, defaultdict\n'), ((17633, 17654), 'operator.attrgetter', 'op.attrgetter', (['"""aval"""'], {}), "('aval')\n", (17646, 17654), True, 'import operator as op\n'), ((1478, 1516), 'os.getenv', 'os.getenv', (['"""JAX_DEVICE_VALUES"""', '"""True"""'], {}), "('JAX_DEVICE_VALUES', 'True')\n", (1487, 1516), False, 'import os\n'), ((1637, 1673), 'os.getenv', 'os.getenv', (['"""JAX_DEBUG_NANS"""', '"""False"""'], {}), "('JAX_DEBUG_NANS', 'False')\n", (1646, 1673), False, 'import os\n'), ((11548, 11589), 'itertools.chain', 'it.chain', (['jaxpr.constvars', 'jaxpr.freevars'], {}), '(jaxpr.constvars, jaxpr.freevars)\n', (11556, 11589), True, 'import itertools as it\n'), ((19452, 19469), 'numpy.asarray', 'onp.asarray', (['self'], {}), '(self)\n', (19463, 19469), True, 'import numpy as onp\n'), ((20216, 20236), 'numpy.array_repr', 'onp.array_repr', (['self'], {}), '(self)\n', (20230, 20236), True, 'import numpy as onp\n'), ((20263, 20310), 'numpy.issubdtype', 'onp.issubdtype', (['self.dtype', 'onp.complexfloating'], {}), '(self.dtype, onp.complexfloating)\n', (20277, 20310), True, 'import numpy as onp\n'), ((21964, 21993), 'operator.methodcaller', 'op.methodcaller', (['"""__reduce__"""'], {}), "('__reduce__')\n", (21979, 21993), True, 'import operator as op\n'), ((22802, 22818), 'numpy.asarray', 'onp.asarray', (['val'], {}), '(val)\n', (22813, 22818), True, 'import numpy as onp\n'), ((15404, 15422), 'numpy.result_type', 'onp.result_type', (['x'], {}), '(x)\n', (15419, 15422), True, 'import numpy as onp\n'), ((20348, 20388), 'numpy.issubdtype', 'onp.issubdtype', (['self.dtype', 'onp.floating'], {}), '(self.dtype, onp.floating)\n', (20362, 20388), True, 'import numpy as onp\n'), ((23918, 23936), 'numpy.asarray', 'onp.asarray', (['const'], {}), '(const)\n', (23929, 23936), True, 'import numpy as onp\n'), ((3628, 3644), 'numpy.isnan', 'onp.isnan', (['pyval'], {}), '(pyval)\n', (3637, 3644), True, 'import numpy as onp\n'), ((20424, 20463), 'numpy.issubdtype', 'onp.issubdtype', (['self.dtype', 'onp.integer'], {}), '(self.dtype, onp.integer)\n', (20438, 20463), True, 'import numpy as onp\n'), ((20497, 20534), 'numpy.issubdtype', 'onp.issubdtype', (['self.dtype', 'onp.bool_'], {}), '(self.dtype, onp.bool_)\n', (20511, 20534), True, 'import numpy as onp\n'), ((20919, 20940), 'six.moves.xrange', 'xrange', (['self.shape[0]'], {}), '(self.shape[0])\n', (20925, 20940), False, 'from six.moves import xrange\n'), ((21085, 21118), 'six.moves.xrange', 'xrange', (['(self.shape[0] - 1)', '(-1)', '(-1)'], {}), '(self.shape[0] - 1, -1, -1)\n', (21091, 21118), False, 'from six.moves import xrange\n')] |
import os
from collections import OrderedDict
import torch
from torch.utils.data import Sampler
import numpy as np
from experiment_logger import get_logger
class FixedLengthBatchSampler(Sampler):
def __init__(self, data_source, batch_size, include_partial=False, rng=None):
self.data_source = data_source
self.active = False
if rng is None:
rng = np.random.RandomState(seed=11)
self.rng = rng
self.batch_size = batch_size
self.include_partial = include_partial
self._batch_size_cache = { 0: self.batch_size }
self.logger = get_logger()
def reset(self):
# Record the lengths of each example.
length_map = OrderedDict()
for i in range(len(self.data_source)):
x = self.data_source.dataset[i]
length_map.setdefault(len(x), []).append(i)
# Shuffle the order.
for length in length_map.keys():
self.rng.shuffle(length_map[length])
# Initialize state.
state = {}
for length, arr in length_map.items():
batch_size = self.batch_size
nbatches = len(arr) // batch_size
surplus = nbatches * batch_size < len(arr)
state[length] = dict(nbatches=nbatches, surplus=surplus, position=-1)
# Batch order, in terms of length.
order = []
for length, v in state.items():
order += [length] * v['nbatches']
## Optionally, add partial batches.
if self.include_partial:
for length, v in state.items():
if v['surplus']:
order += [length]
self.logger.info('# of batches = {}'.format(len(order)))
self.rng.shuffle(order)
self.length_map = length_map
self.state = state
self.order = order
self.index = -1
def get_next_batch(self, length):
batch_size = self.batch_size
position = self.state[length]['position'] + 1
start = position * batch_size
batch_index = self.length_map[length][start:start+batch_size]
self.state[length]['position'] = position
return batch_index
def __iter__(self):
self.reset()
for _ in range(len(self)):
index = self.index + 1
length = self.order[index]
self.index = index
yield self.get_next_batch(length)
def __len__(self):
return len(self.order)
class SimpleDataset(torch.utils.data.Dataset):
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, index):
item = self.dataset[index]
return index, item
def __len__(self):
return len(self.dataset)
| [
"collections.OrderedDict",
"experiment_logger.get_logger",
"numpy.random.RandomState"
] | [((609, 621), 'experiment_logger.get_logger', 'get_logger', ([], {}), '()\n', (619, 621), False, 'from experiment_logger import get_logger\n'), ((711, 724), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (722, 724), False, 'from collections import OrderedDict\n'), ((393, 423), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(11)'}), '(seed=11)\n', (414, 423), True, 'import numpy as np\n')] |
import numpy as np
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import nn
from torch.autograd import Variable
import torch
from crowd_nav.policy.cadrl import mlp, mlp2, conv_mlp2
from crowd_nav.policy.multi_human_rl import MultiHumanRL
class AFAModule(nn.Module):
def __init__(self, mlp, use_softmax=False):
r"""
:param mlp: mlp for learning weight
mode: transformation or aggregation
"""
super().__init__()
self.mlp = mlp
self.use_softmax = use_softmax
def forward(self, feature: torch.Tensor) -> torch.Tensor:
r"""
Parameters
----------
features : torch.Tensor
(B, C, N, M) or (B, C, N)
Returns
-------
new_features : torch.Tensor
transformation: (B, C, N, M) or (B, C, N)
aggregation: (B, C, N) or (B, C)
"""
B, C, N = feature.size()
feature = feature.view(B, C, N, 1).repeat(1, 1, 1, N) # (BN, C, M, M)
if feature.device.type == "cpu":
feature = feature - feature.transpose(2, 3).contiguous() + torch.mul(feature, torch.eye(N).view(1, 1, N, N)) # (BN, C, M, M)
else:
feature = feature - feature.transpose(2, 3).contiguous() + torch.mul(feature, torch.eye(N).view(1, 1, N, N).cuda()) # (BN, C, M, M)
weight = self.mlp(feature)
if self.use_softmax:
weight = F.softmax(weight, -1)
# feature = (feature * weight).sum(-1).view(B, N, C).transpose(1, 2).contiguous() # (B, C, N)
feature = (feature * weight).sum(-1).view(B, N, C).contiguous() # (B, N, C)
return feature
class GRU(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(GRU, self).__init__()
self.convz = nn.Conv1d(hidden_dim+input_dim, hidden_dim, 1, bias=False)
self.convr = nn.Conv1d(hidden_dim+input_dim, hidden_dim, 1, bias=False)
self.convq = nn.Conv1d(hidden_dim+input_dim, hidden_dim, 1, bias=False)
def forward(self, h, x):
hx = torch.cat([h,x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class ValueNetwork(nn.Module):
def __init__(self, input_dim, self_state_dim, joint_state_dim, in_mlp_dims, ia_mlp_dims, sort_mlp_dims, sort_attention_dims, aggregation_dims, action_dims, gamma=0.9, time_step=0.2, v_pref=1.0, with_global_state=True, with_interaction=True, with_om=False):
super().__init__()
# self.device = torch.device("cpu")
self.gpu = True
self.gamma = gamma
self.time_step = time_step
self.v_pref = v_pref
self.device = torch.device("cuda:0" if torch.cuda.is_available() and self.gpu else "cpu")
self.input_dim = input_dim
self.self_state_dim = self_state_dim
self.joint_state_dim = joint_state_dim
self.gru_hidden_dim = ia_mlp_dims[-1]*2
self.with_global_state = with_global_state
self.with_om = with_om
self.with_interaction = with_interaction
self.input_mlp = mlp(self.input_dim - self.self_state_dim, in_mlp_dims, last_relu=True) # [B,C,N]
if self.with_interaction:
self.mlp = conv_mlp2(ia_mlp_dims[-1], ia_mlp_dims)
self.afa_mlp = AFAModule(self.mlp, use_softmax=True) #[B,C,N]
self.ia_mlp = mlp(ia_mlp_dims[-1], ia_mlp_dims)
# avg+mlp1
if self.with_global_state:
self.sort_mlp = mlp(in_mlp_dims[-1]*2, sort_mlp_dims) #[B,C*2+13,N]
else:
self.sort_mlp = mlp(in_mlp_dims[-1]*2, sort_mlp_dims) #[B,C*2,N]
# self.gru = nn.GRU(ia_mlp_dims[-1]*2, self.gru_hidden_dim, batch_first=True)
# avg+mlp2cd
# (avg+mlp2)
self.sort_mlp_attention = mlp(sort_mlp_dims[-1]*2, sort_attention_dims)
self.gru = GRU(ia_mlp_dims[-1]*2, self.gru_hidden_dim)
self.gru2 = GRU(ia_mlp_dims[-1]*2, self.gru_hidden_dim)
self.gru3 = GRU(ia_mlp_dims[-1]*2, self.gru_hidden_dim)
self.motion_filter = mlp(self.gru_hidden_dim*2, aggregation_dims, last_relu=True)
self.motion_filter2 = mlp(self.gru_hidden_dim*2, aggregation_dims, last_relu=True)
action_input_dim = self.gru_hidden_dim + self.self_state_dim # 64 + 6
self.action_mlp = mlp(action_input_dim, action_dims) #56,128,64,64,1
self.attention_weights = None
def forward(self, state):
in_size = state.shape
# state_t = state.transpose(2,1)
self_state = state[:, 0, :self.self_state_dim]
agents_state = state[:, :, self.self_state_dim:] #(batch_sz*num_agents)*num_features
# state_att = state.view(-1,size[2])
in_mlp_output = self.input_mlp(agents_state)
if self.with_interaction:
in_mlp_output = in_mlp_output + self.afa_mlp(in_mlp_output.transpose(2,1).contiguous())
in_mlp_output = self.ia_mlp(in_mlp_output)
# new_features = F.avg_pool1d(new_features, kernel_size=[1, new_features.size(3)]).squeeze(-1) # (B, mlp[-1], npoint)
# compute attention scores
global_state = torch.mean(in_mlp_output, 1, keepdim=True) #[B,C,N]
global_state = global_state.repeat((1, in_size[1], 1)).contiguous()
if self.with_global_state:
sort_mlp_input = torch.cat([in_mlp_output, global_state], dim=-1) #batch_sz*num_agents*(in_mlp_dims[-1]*2 + self_state_size)
else:
sort_mlp_input = in_mlp_output #batch_sz*num_agents*(in_mlp_dims[-1]*2)
sort_mlp_output = self.sort_mlp(sort_mlp_input)
sort_mlp_global_state = torch.mean(sort_mlp_output, 1, keepdim=True) #100,1,100
sort_mlp_global_state = sort_mlp_global_state.repeat((1, in_size[1], 1)).contiguous()
sort_mlp_input = torch.cat([sort_mlp_output, sort_mlp_global_state], dim=-1)
sort_mlp_attention_output = self.sort_mlp_attention(sort_mlp_input)
scores = sort_mlp_attention_output.squeeze(dim=-1) #100,5
# masked softmax
scores_exp = torch.exp(scores) * (scores != 0).float()
weights = (scores_exp / torch.sum(scores_exp, dim=1, keepdim=True)).unsqueeze(-1)
self.attention_weights = weights[0, :, 0].data.cpu().numpy()
# output feature is a linear combination of input features
# features = sort_mlp_input.view(in_size[0], -1, in_size[1],) #100,5,50、
weighted_feature = torch.mul(weights, sort_mlp_input)#(100,5,1),(100,5,50)
gru_input = weighted_feature.view(in_size[0], -1, in_size[1])
h0 = sort_mlp_input.transpose(2,1).contiguous()
h1 = self.gru(gru_input, h0)
h2 = self.gru2(gru_input, h1)
h3 = self.gru3(gru_input, h2)
h1 = torch.mean(h1, dim=2, keepdim=False)
h2 = torch.mean(h2, dim=2, keepdim=False)
h3 = torch.mean(h3, dim=2, keepdim=False)
new_h2 = torch.cat([h1, h2], dim=1)
# TO DO: Using mlp + max pooling to extract the new hiden state h2
new_h2 = self.motion_filter(new_h2)
new_h3 = torch.cat([new_h2, h3], dim=1)
# TO DO: Using mlp + max pooling to extract the new hiden state h1
new_h3 = self.motion_filter2(new_h3)
# concatenate agent's state with global weighted humans' state
joint_state3 = torch.cat([self_state, new_h3], dim=1)
value3 = self.action_mlp(joint_state3) #[B,1,N]
value3 = value3.view(in_size[0],-1)
joint_state2 = torch.cat([self_state, new_h2], dim=1)
value2 = self.action_mlp(joint_state2) #[B,1,N]
value2 = value2.view(in_size[0],-1)
joint_state1 = torch.cat([self_state, h1], dim=1)
value1 = self.action_mlp(joint_state1) #[B,1,N]
value1 = value1.view(in_size[0],-1)
#TODO: Summarize these values by some tricks
value = value1 + pow(self.gamma, self.time_step * self.v_pref) * value2 + pow(self.gamma, self.time_step * 2.0 * self.v_pref) * value3
return value
class GRUCARL(MultiHumanRL):
"""
Simple CommNet layer, similar to PointWeb
"""
def __init__(self):
super().__init__()
self.name = 'GRUCARL'
def configure(self, config):
self.set_common_parameters(config)
in_mlp_dims = [int(x) for x in config.get('grucarl', 'in_mlp_dims').split(', ')]
ia_mlp_dims = [int(x) for x in config.get('grucarl', 'ia_mlp_dims').split(', ')]
sort_mlp_dims = [int(x) for x in config.get('grucarl', 'sort_mlp_dims').split(', ')]
sort_attention_dims = [int(x) for x in config.get('grucarl', 'sort_attention_dims').split(', ')]
aggregation_dims = [int(x) for x in config.get('grucarl', 'aggregation_dims').split(', ')]
action_dims = [int(x) for x in config.get('grucarl', 'action_dims').split(', ')]
self.with_om = config.getboolean('grucarl', 'with_om')
with_global_state = config.getboolean('grucarl', 'with_global_state')
with_interaction = config.getboolean('grucarl', 'with_interaction')
self.model = ValueNetwork(self.input_dim(), self.self_state_dim, self.joint_state_dim, in_mlp_dims, ia_mlp_dims, sort_mlp_dims, sort_attention_dims, aggregation_dims, action_dims, self.gamma, 0.2, 1.0, with_global_state, with_interaction, self.with_om)
self.multiagent_training = config.getboolean('grucarl', 'multiagent_training')
logging.info('Policy: {} {} global state'.format(self.name, 'w/' if with_global_state else 'w/o'))
logging.info('Policy: {} {} interaction state'.format(self.name, 'w/' if with_interaction else 'w/o'))
def get_attention_weights(self):
return self.model.attention_weights
def predict(self, state, scales = [1.0, 2.0, 3.0]):
"""
A base class for all methods that takes pairwise joint state as input to value network.
The input to the value network is always of shape (batch_size, # humans, rotated joint state length)
"""
if self.phase is None or self.device is None:
raise AttributeError('Phase, device attributes have to be set!')
if self.phase == 'train' and self.epsilon is None:
raise AttributeError('Epsilon attribute has to be set in training phase')
if self.reach_destination(state):
return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)
if self.action_space is None:
self.build_action_space(state.self_state.v_pref)
occupancy_maps = None
bInitialized = False
reward_list = []
probability = np.random.random()
if self.phase == 'train' and probability < self.epsilon:
max_action = self.action_space[np.random.choice(len(self.action_space))]
else:
self.action_values = list()
max_value = float('-inf')
max_action = None
for action in self.action_space:
for item in scales:
next_self_state = self.propagate(state.self_state, action, scale=item)
if self.query_env:
next_human_states, reward, done, info = self.env.multistep_lookahead(action, scale=item)
else:
next_human_states = [self.propagate(human_state, ActionXY(human_state.vx, human_state.vy), scale=item)
for human_state in state.human_states]
reward = self.compute_reward(next_self_state, next_human_states)
batch_next_states = torch.cat([torch.Tensor([next_self_state + next_human_state]).to(self.device)
for next_human_state in next_human_states], dim=0)
rotated_batch_input = self.rotate(batch_next_states).unsqueeze(0)
if self.with_om:
if occupancy_maps is None:
occupancy_maps = self.build_occupancy_maps(next_human_states).unsqueeze(0)
rotated_batch_input = torch.cat([rotated_batch_input, occupancy_maps], dim=2)
if bInitialized:
rotated_batch_inputs = torch.cat([rotated_batch_inputs, rotated_batch_input], dim=1)
else:
rotated_batch_inputs = rotated_batch_input
bInitialized = True
reward_list += [reward]
# VALUE UPDATE
# next_state_value, next_state_value2, next_state_value3 = self.model(rotated_batch_inputs).data.item()
# value1 = reward_list[0] + pow(self.gamma, self.time_step * scales[0] * state.self_state.v_pref) * next_state_value
# value2 = reward_list[1] + pow(self.gamma, self.time_step * scales[1] * state.self_state.v_pref) * next_state_value2
# value3 = reward_list[2] + pow(self.gamma, self.time_step * scales[2] * state.self_state.v_pref) * next_state_value3
next_state_value = self.model(rotated_batch_inputs).data.item()
value = reward_list[0] + pow(self.gamma, self.time_step * scales[1] * state.self_state.v_pref) * reward_list[1] + pow(self.gamma, self.time_step * scales[2] * state.self_state.v_pref) * reward_list[2] + pow(self.gamma, self.time_step * scales[0] * state.self_state.v_pref) * next_state_value
reward_list = []
self.action_values.append(value)
if value > max_value:
max_value = value
max_action = action
if max_action is None:
raise ValueError('Value network is not well trained. ')
if self.phase == 'train':
self.last_state = self.transform(state)
# print("Action:V:%f,\tR:%f\t"%(max_action.v, max_action.r))
return max_action
| [
"torch.mean",
"crowd_nav.policy.cadrl.conv_mlp2",
"torch.eye",
"torch.nn.Conv1d",
"torch.cat",
"torch.mul",
"torch.nn.functional.softmax",
"torch.exp",
"numpy.random.random",
"torch.cuda.is_available",
"torch.Tensor",
"torch.sum",
"crowd_nav.policy.cadrl.mlp"
] | [((1830, 1890), 'torch.nn.Conv1d', 'nn.Conv1d', (['(hidden_dim + input_dim)', 'hidden_dim', '(1)'], {'bias': '(False)'}), '(hidden_dim + input_dim, hidden_dim, 1, bias=False)\n', (1839, 1890), False, 'from torch import nn\n'), ((1910, 1970), 'torch.nn.Conv1d', 'nn.Conv1d', (['(hidden_dim + input_dim)', 'hidden_dim', '(1)'], {'bias': '(False)'}), '(hidden_dim + input_dim, hidden_dim, 1, bias=False)\n', (1919, 1970), False, 'from torch import nn\n'), ((1990, 2050), 'torch.nn.Conv1d', 'nn.Conv1d', (['(hidden_dim + input_dim)', 'hidden_dim', '(1)'], {'bias': '(False)'}), '(hidden_dim + input_dim, hidden_dim, 1, bias=False)\n', (1999, 2050), False, 'from torch import nn\n'), ((2093, 2117), 'torch.cat', 'torch.cat', (['[h, x]'], {'dim': '(1)'}), '([h, x], dim=1)\n', (2102, 2117), False, 'import torch\n'), ((3221, 3291), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['(self.input_dim - self.self_state_dim)', 'in_mlp_dims'], {'last_relu': '(True)'}), '(self.input_dim - self.self_state_dim, in_mlp_dims, last_relu=True)\n', (3224, 3291), False, 'from crowd_nav.policy.cadrl import mlp, mlp2, conv_mlp2\n'), ((3922, 3969), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['(sort_mlp_dims[-1] * 2)', 'sort_attention_dims'], {}), '(sort_mlp_dims[-1] * 2, sort_attention_dims)\n', (3925, 3969), False, 'from crowd_nav.policy.cadrl import mlp, mlp2, conv_mlp2\n'), ((4193, 4255), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['(self.gru_hidden_dim * 2)', 'aggregation_dims'], {'last_relu': '(True)'}), '(self.gru_hidden_dim * 2, aggregation_dims, last_relu=True)\n', (4196, 4255), False, 'from crowd_nav.policy.cadrl import mlp, mlp2, conv_mlp2\n'), ((4284, 4346), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['(self.gru_hidden_dim * 2)', 'aggregation_dims'], {'last_relu': '(True)'}), '(self.gru_hidden_dim * 2, aggregation_dims, last_relu=True)\n', (4287, 4346), False, 'from crowd_nav.policy.cadrl import mlp, mlp2, conv_mlp2\n'), ((4450, 4484), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['action_input_dim', 'action_dims'], {}), '(action_input_dim, action_dims)\n', (4453, 4484), False, 'from crowd_nav.policy.cadrl import mlp, mlp2, conv_mlp2\n'), ((5277, 5319), 'torch.mean', 'torch.mean', (['in_mlp_output', '(1)'], {'keepdim': '(True)'}), '(in_mlp_output, 1, keepdim=True)\n', (5287, 5319), False, 'import torch\n'), ((5766, 5810), 'torch.mean', 'torch.mean', (['sort_mlp_output', '(1)'], {'keepdim': '(True)'}), '(sort_mlp_output, 1, keepdim=True)\n', (5776, 5810), False, 'import torch\n'), ((5941, 6000), 'torch.cat', 'torch.cat', (['[sort_mlp_output, sort_mlp_global_state]'], {'dim': '(-1)'}), '([sort_mlp_output, sort_mlp_global_state], dim=-1)\n', (5950, 6000), False, 'import torch\n'), ((6568, 6602), 'torch.mul', 'torch.mul', (['weights', 'sort_mlp_input'], {}), '(weights, sort_mlp_input)\n', (6577, 6602), False, 'import torch\n'), ((6886, 6922), 'torch.mean', 'torch.mean', (['h1'], {'dim': '(2)', 'keepdim': '(False)'}), '(h1, dim=2, keepdim=False)\n', (6896, 6922), False, 'import torch\n'), ((6937, 6973), 'torch.mean', 'torch.mean', (['h2'], {'dim': '(2)', 'keepdim': '(False)'}), '(h2, dim=2, keepdim=False)\n', (6947, 6973), False, 'import torch\n'), ((6988, 7024), 'torch.mean', 'torch.mean', (['h3'], {'dim': '(2)', 'keepdim': '(False)'}), '(h3, dim=2, keepdim=False)\n', (6998, 7024), False, 'import torch\n'), ((7044, 7070), 'torch.cat', 'torch.cat', (['[h1, h2]'], {'dim': '(1)'}), '([h1, h2], dim=1)\n', (7053, 7070), False, 'import torch\n'), ((7207, 7237), 'torch.cat', 'torch.cat', (['[new_h2, h3]'], {'dim': '(1)'}), '([new_h2, h3], dim=1)\n', (7216, 7237), False, 'import torch\n'), ((7453, 7491), 'torch.cat', 'torch.cat', (['[self_state, new_h3]'], {'dim': '(1)'}), '([self_state, new_h3], dim=1)\n', (7462, 7491), False, 'import torch\n'), ((7616, 7654), 'torch.cat', 'torch.cat', (['[self_state, new_h2]'], {'dim': '(1)'}), '([self_state, new_h2], dim=1)\n', (7625, 7654), False, 'import torch\n'), ((7779, 7813), 'torch.cat', 'torch.cat', (['[self_state, h1]'], {'dim': '(1)'}), '([self_state, h1], dim=1)\n', (7788, 7813), False, 'import torch\n'), ((10722, 10740), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (10738, 10740), True, 'import numpy as np\n'), ((1470, 1491), 'torch.nn.functional.softmax', 'F.softmax', (['weight', '(-1)'], {}), '(weight, -1)\n', (1479, 1491), True, 'import torch.nn.functional as F\n'), ((3360, 3399), 'crowd_nav.policy.cadrl.conv_mlp2', 'conv_mlp2', (['ia_mlp_dims[-1]', 'ia_mlp_dims'], {}), '(ia_mlp_dims[-1], ia_mlp_dims)\n', (3369, 3399), False, 'from crowd_nav.policy.cadrl import mlp, mlp2, conv_mlp2\n'), ((3500, 3533), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['ia_mlp_dims[-1]', 'ia_mlp_dims'], {}), '(ia_mlp_dims[-1], ia_mlp_dims)\n', (3503, 3533), False, 'from crowd_nav.policy.cadrl import mlp, mlp2, conv_mlp2\n'), ((3617, 3656), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['(in_mlp_dims[-1] * 2)', 'sort_mlp_dims'], {}), '(in_mlp_dims[-1] * 2, sort_mlp_dims)\n', (3620, 3656), False, 'from crowd_nav.policy.cadrl import mlp, mlp2, conv_mlp2\n'), ((3711, 3750), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['(in_mlp_dims[-1] * 2)', 'sort_mlp_dims'], {}), '(in_mlp_dims[-1] * 2, sort_mlp_dims)\n', (3714, 3750), False, 'from crowd_nav.policy.cadrl import mlp, mlp2, conv_mlp2\n'), ((5470, 5518), 'torch.cat', 'torch.cat', (['[in_mlp_output, global_state]'], {'dim': '(-1)'}), '([in_mlp_output, global_state], dim=-1)\n', (5479, 5518), False, 'import torch\n'), ((6191, 6208), 'torch.exp', 'torch.exp', (['scores'], {}), '(scores)\n', (6200, 6208), False, 'import torch\n'), ((2235, 2263), 'torch.cat', 'torch.cat', (['[r * h, x]'], {'dim': '(1)'}), '([r * h, x], dim=1)\n', (2244, 2263), False, 'import torch\n'), ((2838, 2863), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2861, 2863), False, 'import torch\n'), ((6265, 6307), 'torch.sum', 'torch.sum', (['scores_exp'], {'dim': '(1)', 'keepdim': '(True)'}), '(scores_exp, dim=1, keepdim=True)\n', (6274, 6307), False, 'import torch\n'), ((12198, 12253), 'torch.cat', 'torch.cat', (['[rotated_batch_input, occupancy_maps]'], {'dim': '(2)'}), '([rotated_batch_input, occupancy_maps], dim=2)\n', (12207, 12253), False, 'import torch\n'), ((12338, 12399), 'torch.cat', 'torch.cat', (['[rotated_batch_inputs, rotated_batch_input]'], {'dim': '(1)'}), '([rotated_batch_inputs, rotated_batch_input], dim=1)\n', (12347, 12399), False, 'import torch\n'), ((1178, 1190), 'torch.eye', 'torch.eye', (['N'], {}), '(N)\n', (1187, 1190), False, 'import torch\n'), ((1330, 1342), 'torch.eye', 'torch.eye', (['N'], {}), '(N)\n', (1339, 1342), False, 'import torch\n'), ((11709, 11759), 'torch.Tensor', 'torch.Tensor', (['[next_self_state + next_human_state]'], {}), '([next_self_state + next_human_state])\n', (11721, 11759), False, 'import torch\n')] |
def calc_massive_common_envelope_evolution(primary_masses,
black_hole_masses,
companion_masses,
semi_major_axes,
eccentricities):
import numpy
a = 1.0
b = 0.5
c1 = a*(1.0-b)-2.0
c2 = -a*(1.0-b)-2.0
q_list = companion_masses/primary_masses
k_list = black_hole_masses/primary_masses
sma_ratios = (k_list+b*(1-k_list)+q_list)/(1+q_list)*k_list**c1*(b*(1-k_list)/q_list+1)**c1
terminal_semi_major_axes = semi_major_axes*sma_ratios
terminal_eccentricities = numpy.zeros_like(eccentricities)
terminal_companion_masses = companion_masses + 0.5*(primary_masses-black_hole_masses)
return terminal_semi_major_axes, terminal_eccentricities, terminal_companion_masses
def calc_wimpy_common_envelope_evolution(primary_masses,
black_hole_masses,
companion_masses,
semi_major_axes,
eccentricities,
al=1.0):
from calc_roche_lobe_radius_ratio_eggleton import calc_roche_lobe_radius_ratio_eggleton
import numpy
q_list = companion_masses/primary_masses
k_list = black_hole_masses/primary_masses
rl_list = 1.0/calc_roche_lobe_radius_ratio_eggleton(q_list)
sma_ratios = k_list/(2.0*(1.0-k_list)/(al*rl_list*q_list)+1.0)
terminal_semi_major_axes = semi_major_axes*sma_ratios
terminal_eccentricities = numpy.zeros_like(eccentricities)
terminal_companion_masses = companion_masses
return terminal_semi_major_axes, terminal_eccentricities, terminal_companion_masses
def evolve_unrestricted_common_envelope(primary_masses,
black_hole_masses,
companion_masses,
semi_major_axes,
eccentricities,
al=1.0):
import numpy
wimpy_sma, wimpy_e, wimpy_m2 = calc_wimpy_common_envelope_evolution(primary_masses,
black_hole_masses,
companion_masses,
semi_major_axes,
eccentricities,
al=al)
massive_sma, massive_e, massive_m2 = calc_massive_common_envelope_evolution(primary_masses,
black_hole_masses,
companion_masses,
semi_major_axes,
eccentricities)
q_list = companion_masses/primary_masses
mask = q_list > 0.5
ce_sma = numpy.where(mask,
massive_sma,
wimpy_sma)
ce_e = numpy.where(mask,
massive_e,
wimpy_e)
ce_m2 = numpy.where(mask,
massive_m2,
wimpy_m2)
return ce_sma, ce_e, ce_m2
def evolve_common_envelope(primary_masses,
black_hole_masses,
companion_masses,
semi_major_axes,
eccentricities,
al=1.0):
import numpy
ce_sma, ce_e, ce_m2 = evolve_unrestricted_common_envelope(primary_masses,
black_hole_masses,
companion_masses,
semi_major_axes,
eccentricities,
al=al)
maximum_agb_radius = 3e-5
periapses = semi_major_axes*(1-eccentricities)
is_within_rlo = maximum_agb_radius > periapses
final_sma = numpy.where(is_within_rlo,
ce_sma,
semi_major_axes)
final_e = numpy.where(is_within_rlo,
ce_e,
eccentricities)
final_m2 = numpy.where(is_within_rlo,
ce_m2,
companion_masses)
return final_sma, final_e, final_m2
| [
"calc_roche_lobe_radius_ratio_eggleton.calc_roche_lobe_radius_ratio_eggleton",
"numpy.zeros_like",
"numpy.where"
] | [((663, 695), 'numpy.zeros_like', 'numpy.zeros_like', (['eccentricities'], {}), '(eccentricities)\n', (679, 695), False, 'import numpy\n'), ((1644, 1676), 'numpy.zeros_like', 'numpy.zeros_like', (['eccentricities'], {}), '(eccentricities)\n', (1660, 1676), False, 'import numpy\n'), ((3270, 3311), 'numpy.where', 'numpy.where', (['mask', 'massive_sma', 'wimpy_sma'], {}), '(mask, massive_sma, wimpy_sma)\n', (3281, 3311), False, 'import numpy\n'), ((3373, 3410), 'numpy.where', 'numpy.where', (['mask', 'massive_e', 'wimpy_e'], {}), '(mask, massive_e, wimpy_e)\n', (3384, 3410), False, 'import numpy\n'), ((3469, 3508), 'numpy.where', 'numpy.where', (['mask', 'massive_m2', 'wimpy_m2'], {}), '(mask, massive_m2, wimpy_m2)\n', (3480, 3508), False, 'import numpy\n'), ((4501, 4552), 'numpy.where', 'numpy.where', (['is_within_rlo', 'ce_sma', 'semi_major_axes'], {}), '(is_within_rlo, ce_sma, semi_major_axes)\n', (4512, 4552), False, 'import numpy\n'), ((4623, 4671), 'numpy.where', 'numpy.where', (['is_within_rlo', 'ce_e', 'eccentricities'], {}), '(is_within_rlo, ce_e, eccentricities)\n', (4634, 4671), False, 'import numpy\n'), ((4739, 4790), 'numpy.where', 'numpy.where', (['is_within_rlo', 'ce_m2', 'companion_masses'], {}), '(is_within_rlo, ce_m2, companion_masses)\n', (4750, 4790), False, 'import numpy\n'), ((1443, 1488), 'calc_roche_lobe_radius_ratio_eggleton.calc_roche_lobe_radius_ratio_eggleton', 'calc_roche_lobe_radius_ratio_eggleton', (['q_list'], {}), '(q_list)\n', (1480, 1488), False, 'from calc_roche_lobe_radius_ratio_eggleton import calc_roche_lobe_radius_ratio_eggleton\n')] |
#
# MIT License
#
# Copyright (c) 2018 WillQ
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
import os
import numpy
import pandas
from pytz import utc
def random_quote_frame(length: int, timestamp: datetime.datetime = pandas.Timestamp(2018, 1, 3)) -> pandas.DataFrame:
df = pandas.DataFrame(numpy.random.uniform(1, 1000, size=(length, 4)),
columns=["bidSize", "bidPrice", "askPrice", "askSize"],
index=pandas.date_range(start=timestamp, periods=length, freq='S', tz=utc))
return df
def random_trade_frame(length: int, timestamp: datetime.datetime = pandas.Timestamp(2018, 1, 3)) -> pandas.DataFrame:
df = pandas.DataFrame(numpy.random.uniform(1, 1000, size=(length, 5)),
columns=["size", "price", "grossValue", "homeNotional", "foreignNotional"],
index=pandas.date_range(start=timestamp, periods=length, freq='S', tz=utc))
df['side'] = pandas.Series(numpy.random.randint(1, 2),
index=pandas.date_range(start=timestamp, periods=length, freq='S', tz=utc))
df['side'] = df['side'].astype(numpy.float64)
df['tickDirection'] = pandas.Series(numpy.random.randint(1, 4),
index=pandas.date_range(start=timestamp, periods=length, freq='S', tz=utc))
df['tickDirection'] = df['tickDirection'].astype(numpy.float64)
return df
def random_kline_data(length: int, endtime: datetime.datetime, freq: str = 'min') -> pandas.DataFrame:
df = pandas.DataFrame(numpy.random.uniform(1, 1000, size=(length, 6)),
columns=["high", "low", "open", "close", "volume", "turnover"],
index=pandas.date_range(end=endtime, periods=length, freq=freq, tz=utc))
return df
def random_kline_data_with_start_end(start: datetime.datetime, end: datetime.datetime,
freq: str = 'min') -> pandas.DataFrame:
date_range = pandas.date_range(start=start, end=end, freq=freq, tz=utc)
df = pandas.DataFrame(numpy.random.uniform(1, 1000, size=(len(date_range), 6)),
columns=["high", "low", "open", "close", "volume", "turnover"],
index=date_range)
return df
def random_quote_hdf(path: str, length: int = 3) -> None:
tmp_df = random_quote_frame(length=length)
tmp_df2 = random_quote_frame(length=length)
tmp_df.to_hdf(path, '/XBTUSD',
data_columns=True, index=False, complib='blosc:blosclz',
complevel=9, append=True, format='table')
tmp_df2.to_hdf(path, '/ETHUSD',
data_columns=True, index=False, complib='blosc:blosclz',
complevel=9, append=True, format='table')
def random_trade_hdf(path: str, length: int = 3) -> None:
tmp_df = random_trade_frame(length=length)
tmp_df2 = random_trade_frame(length=length)
tmp_df.to_hdf(path, '/XBTUSD',
data_columns=True, index=False, complib='blosc:blosclz',
complevel=9, append=True, format='table')
tmp_df2.to_hdf(path, '/ETHUSD',
data_columns=True, index=False, complib='blosc:blosclz',
complevel=9, append=True, format='table')
def get_resource_path(filename: str = "") -> str:
"""
get the resource path in the resource in the test dir.
/path/to/resource/filename
"""
current = os.path.abspath(__file__)
current_path = os.path.dirname(current)
resource_dir = os.path.join(current_path, 'resource')
return os.path.join(resource_dir, filename)
| [
"numpy.random.uniform",
"os.path.abspath",
"pandas.Timestamp",
"pandas.date_range",
"os.path.dirname",
"numpy.random.randint",
"os.path.join"
] | [((1248, 1276), 'pandas.Timestamp', 'pandas.Timestamp', (['(2018)', '(1)', '(3)'], {}), '(2018, 1, 3)\n', (1264, 1276), False, 'import pandas\n'), ((1641, 1669), 'pandas.Timestamp', 'pandas.Timestamp', (['(2018)', '(1)', '(3)'], {}), '(2018, 1, 3)\n', (1657, 1669), False, 'import pandas\n'), ((3021, 3079), 'pandas.date_range', 'pandas.date_range', ([], {'start': 'start', 'end': 'end', 'freq': 'freq', 'tz': 'utc'}), '(start=start, end=end, freq=freq, tz=utc)\n', (3038, 3079), False, 'import pandas\n'), ((4484, 4509), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4499, 4509), False, 'import os\n'), ((4529, 4553), 'os.path.dirname', 'os.path.dirname', (['current'], {}), '(current)\n', (4544, 4553), False, 'import os\n'), ((4573, 4611), 'os.path.join', 'os.path.join', (['current_path', '"""resource"""'], {}), "(current_path, 'resource')\n", (4585, 4611), False, 'import os\n'), ((4623, 4659), 'os.path.join', 'os.path.join', (['resource_dir', 'filename'], {}), '(resource_dir, filename)\n', (4635, 4659), False, 'import os\n'), ((1325, 1372), 'numpy.random.uniform', 'numpy.random.uniform', (['(1)', '(1000)'], {'size': '(length, 4)'}), '(1, 1000, size=(length, 4))\n', (1345, 1372), False, 'import numpy\n'), ((1718, 1765), 'numpy.random.uniform', 'numpy.random.uniform', (['(1)', '(1000)'], {'size': '(length, 5)'}), '(1, 1000, size=(length, 5))\n', (1738, 1765), False, 'import numpy\n'), ((2002, 2028), 'numpy.random.randint', 'numpy.random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (2022, 2028), False, 'import numpy\n'), ((2227, 2253), 'numpy.random.randint', 'numpy.random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (2247, 2253), False, 'import numpy\n'), ((2585, 2632), 'numpy.random.uniform', 'numpy.random.uniform', (['(1)', '(1000)'], {'size': '(length, 6)'}), '(1, 1000, size=(length, 6))\n', (2605, 2632), False, 'import numpy\n'), ((1488, 1556), 'pandas.date_range', 'pandas.date_range', ([], {'start': 'timestamp', 'periods': 'length', 'freq': '"""S"""', 'tz': 'utc'}), "(start=timestamp, periods=length, freq='S', tz=utc)\n", (1505, 1556), False, 'import pandas\n'), ((1901, 1969), 'pandas.date_range', 'pandas.date_range', ([], {'start': 'timestamp', 'periods': 'length', 'freq': '"""S"""', 'tz': 'utc'}), "(start=timestamp, periods=length, freq='S', tz=utc)\n", (1918, 1969), False, 'import pandas\n'), ((2067, 2135), 'pandas.date_range', 'pandas.date_range', ([], {'start': 'timestamp', 'periods': 'length', 'freq': '"""S"""', 'tz': 'utc'}), "(start=timestamp, periods=length, freq='S', tz=utc)\n", (2084, 2135), False, 'import pandas\n'), ((2301, 2369), 'pandas.date_range', 'pandas.date_range', ([], {'start': 'timestamp', 'periods': 'length', 'freq': '"""S"""', 'tz': 'utc'}), "(start=timestamp, periods=length, freq='S', tz=utc)\n", (2318, 2369), False, 'import pandas\n'), ((2756, 2821), 'pandas.date_range', 'pandas.date_range', ([], {'end': 'endtime', 'periods': 'length', 'freq': 'freq', 'tz': 'utc'}), '(end=endtime, periods=length, freq=freq, tz=utc)\n', (2773, 2821), False, 'import pandas\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import re
from operator import attrgetter
from unittest import TestCase
import numpy as np
import pandas as pd
import statsmodels
from kats.consts import TimeSeriesData
from kats.detectors.cusum_detection import (
CUSUMDetector,
MultiCUSUMDetector,
VectorizedCUSUMDetector,
)
from parameterized.parameterized import parameterized
from scipy.stats import chi2 # @manual
from sklearn.datasets import make_spd_matrix
statsmodels_ver = float(
re.findall("([0-9]+\\.[0-9]+)\\..*", statsmodels.__version__)[0]
)
class CUSUMDetectorTest(TestCase):
def setUp(self) -> None:
np.random.seed(10)
# increasing with variance detection setup
df_increase = pd.DataFrame(
{
"increase": np.concatenate(
[np.random.normal(1, 0.2, 30), np.random.normal(1.5, 0.2, 30)]
)
}
)
df_increase["time"] = pd.Series(pd.date_range("2019-01-01", "2019-03-01"))
inc_timeseries = TimeSeriesData(df_increase)
self.inc_detector = CUSUMDetector(inc_timeseries)
self.inc_change_points = self.inc_detector.detector()
self.inc_metadata = self.inc_change_points[0]
# decreasing detection setup
df_decrease = pd.DataFrame(
{
"decrease": np.concatenate(
[np.random.normal(1, 0.2, 50), np.random.normal(0.5, 0.2, 10)]
)
}
)
df_decrease["time"] = pd.Series(pd.date_range("2019-01-01", "2019-03-01"))
dec_timeseries = TimeSeriesData(df_decrease)
self.dec_detector = CUSUMDetector(dec_timeseries)
self.dec_change_points = self.dec_detector.detector()
self.dec_metadata = self.dec_change_points[0]
# seasonality setup
self.periodicity = 48
self.total_cycles = 3
harmonics = 2
noise_std = 3
seasonal_term = CUSUMDetectorTest.simulate_seasonal_term(
self.periodicity,
self.total_cycles,
noise_std=noise_std,
harmonics=harmonics,
)
seasonal_term = seasonal_term / seasonal_term.std() * 2
residual = np.random.normal(0, 1, self.periodicity * self.total_cycles)
self.seasonal_data = seasonal_term + residual
# seasonality with increase trend setup
trend_term = np.logspace(0, 1, self.periodicity * self.total_cycles)
data = self.seasonal_data + trend_term
data -= np.min(data)
df_seasonality = pd.DataFrame(
{
"time": pd.date_range(
"2020-01-01",
periods=self.periodicity * self.total_cycles,
freq="30T",
),
"seasonality": data,
}
)
timeseries = TimeSeriesData(df_seasonality)
self.season_inc_trend_detector = CUSUMDetector(timeseries)
self.season_inc_trend_change_points = self.season_inc_trend_detector.detector(
interest_window=[
self.periodicity * (self.total_cycles - 1),
self.periodicity * self.total_cycles - 1,
],
magnitude_quantile=1,
change_directions=["increase", "decrease"],
delta_std_ratio=0,
)
self.season_metadata = self.season_inc_trend_change_points[0]
# test on step change with no variance
df_increase_no_var = pd.DataFrame(
{
"increase": np.concatenate(
[np.random.normal(1, 0, 30), np.random.normal(2, 0, 30)]
)
}
)
df_increase_no_var["time"] = pd.Series(
pd.date_range("2019-01-01", "2019-03-01")
)
no_var_timeseries = TimeSeriesData(df_increase_no_var)
self.no_var_detector = CUSUMDetector(no_var_timeseries)
self.no_var_change_points = self.no_var_detector.detector()
# no seasonality setup
data = self.seasonal_data
data -= np.min(data)
df_seasonality = pd.DataFrame(
{
"time": pd.date_range(
"2020-01-01",
periods=self.periodicity * self.total_cycles,
freq="30T",
),
"seasonality": data,
}
)
timeseries = TimeSeriesData(df_seasonality)
self.no_season_detector = CUSUMDetector(timeseries)
self.no_season_change_points = self.no_season_detector.detector(
interest_window=[
self.periodicity * (self.total_cycles - 1),
self.periodicity * self.total_cycles - 1,
],
magnitude_quantile=1,
change_directions=["increase"],
delta_std_ratio=0,
)
# no regression setup
df_noregress = pd.DataFrame({"no_change": np.random.normal(1, 0.2, 60)})
df_noregress["time"] = pd.Series(pd.date_range("2019-01-01", "2019-03-01"))
timeseries = TimeSeriesData(df_noregress)
self.no_reg_detector = CUSUMDetector(timeseries)
self.no_reg_change_points = self.no_reg_detector.detector(start_point=20)
@parameterized.expand(
[
["inc_change_points", 1],
["dec_change_points", 1],
["season_inc_trend_change_points", 1],
["no_var_change_points", 1],
["no_reg_change_points", 0],
["no_season_change_points", 0],
]
)
def test_cp_len(self, cp_name, expected) -> None:
self.assertEqual(len(attrgetter(cp_name)(self)), expected)
@parameterized.expand(
[
["inc_metadata", 29],
["dec_metadata", 49],
]
)
def test_cp_index(self, metadata_name, expected) -> None:
self.assertLessEqual(
abs(attrgetter(metadata_name)(self).cp_index - expected), 1
)
@parameterized.expand(
[
["inc_metadata", "increase"],
["dec_metadata", "decrease"],
]
)
def test_direction(self, metadata_name, expected) -> None:
self.assertEqual(attrgetter(metadata_name)(self).direction, expected)
def test_increasing_mu(self) -> None:
self.assertLess(self.inc_metadata.mu0, self.inc_metadata.mu1)
def test_increasing_correct_delta(self) -> None:
self.assertEqual(
self.inc_metadata.delta, self.inc_metadata.mu1 - self.inc_metadata.mu0
)
def test_increasing_regression(self) -> None:
self.assertTrue(self.inc_metadata.regression_detected)
@parameterized.expand(
[
["season_metadata.p_value_int", "season_metadata.llr_int"],
["inc_metadata.p_value", "inc_metadata.llr"],
]
)
def test_p_val(self, pval_name, llr_name) -> None:
self.assertEqual(
attrgetter(pval_name)(self),
1 - chi2.cdf(attrgetter(llr_name)(self), 2),
)
def test_increasing_p_val_nan(self) -> None:
self.assertTrue(np.isnan(self.inc_metadata.p_value_int))
def test_increasing_llr_int(self) -> None:
self.assertEqual(self.inc_metadata.llr_int, np.inf)
def test_increasing_stable_changepoint(self) -> None:
self.assertTrue(self.inc_metadata.stable_changepoint)
@parameterized.expand(
[
["inc_detector", "inc_change_points"],
["dec_detector", "dec_change_points"],
["season_inc_trend_detector", "season_inc_trend_change_points"],
["no_var_detector", "no_var_change_points"],
["no_reg_detector", "no_reg_change_points"],
["no_season_detector", "no_season_change_points"],
]
)
def test_plot(self, detector_name, cp_name) -> None:
attrgetter(detector_name)(self).plot(attrgetter(cp_name)(self))
@staticmethod
def simulate_seasonal_term(
periodicity, total_cycles, noise_std=1.0, harmonics=None
):
duration = periodicity * total_cycles
assert duration == int(duration)
duration = int(duration)
harmonics = harmonics if harmonics else int(np.floor(periodicity / 2))
lambda_p = 2 * np.pi / float(periodicity)
gamma_jt = noise_std * np.random.randn((harmonics))
gamma_star_jt = noise_std * np.random.randn((harmonics))
total_timesteps = 100 * duration # Pad for burn in
series = np.zeros(total_timesteps)
for t in range(total_timesteps):
gamma_jtp1 = np.zeros_like(gamma_jt)
gamma_star_jtp1 = np.zeros_like(gamma_star_jt)
for j in range(1, harmonics + 1):
cos_j = np.cos(lambda_p * j)
sin_j = np.sin(lambda_p * j)
gamma_jtp1[j - 1] = (
gamma_jt[j - 1] * cos_j
+ gamma_star_jt[j - 1] * sin_j
+ noise_std * np.random.randn()
)
gamma_star_jtp1[j - 1] = (
-gamma_jt[j - 1] * sin_j
+ gamma_star_jt[j - 1] * cos_j
+ noise_std * np.random.randn()
)
series[t] = np.sum(gamma_jtp1)
gamma_jt = gamma_jtp1
gamma_star_jt = gamma_star_jtp1
wanted_series = series[-duration:] # Discard burn in
return wanted_series
def test_seasonality_with_increasing_trend_cp_index(self) -> None:
self.assertGreaterEqual(
self.season_metadata.cp_index, self.periodicity * (self.total_cycles - 1)
)
def test_logging_multivariate_error(self) -> None:
# test multivariate error
np.random.seed(10)
df_multi_var = pd.DataFrame(
{
"no_change": np.random.normal(1, 0.2, 60),
"no_change2": np.random.normal(1, 0.2, 60),
}
)
df_multi_var["time"] = pd.Series(pd.date_range("2019-01-01", "2019-03-01"))
with self.assertRaises(ValueError):
timeseries = TimeSeriesData(df_multi_var)
CUSUMDetector(timeseries)
@parameterized.expand(
[
["WARNING", 0.9],
["DEBUG", None],
]
)
def test_logging_neg_magnitude(self, level, mag_q) -> None:
# test logging setup - negative in magnitude
np.random.seed(10)
df_neg = pd.DataFrame({"no_change": -np.random.normal(1, 0.2, 60)})
df_neg["time"] = pd.Series(pd.date_range("2019-01-01", "2019-03-01"))
timeseries = TimeSeriesData(df_neg)
logging_detector = CUSUMDetector(timeseries)
with self.assertLogs(level=level):
logging_detector.detector(
magnitude_quantile=mag_q, interest_window=[40, 60]
)
def test_ts_without_name(self) -> None:
n = 10
time = pd.Series(pd.date_range(start="2018-01-01", periods=n, freq="D"))
value = pd.Series(np.arange(n))
ts = TimeSeriesData(time=time, value=value)
detector = CUSUMDetector(ts)
change_points = detector.detector()
detector.plot(change_points)
class MultiCUSUMDetectorTest(TestCase):
def setUp(self) -> None:
# increasing setup
self.D = 10
random_state = 10
np.random.seed(random_state)
mean1 = np.ones(self.D)
mean2 = mean1 * 2
sigma = make_spd_matrix(self.D, random_state=random_state)
df_increase = pd.DataFrame(
np.concatenate(
[
np.random.multivariate_normal(mean1, sigma, 60),
np.random.multivariate_normal(mean2, sigma, 30),
]
)
)
df_increase["time"] = pd.Series(pd.date_range("2019-01-01", "2019-04-01"))
timeseries_increase = TimeSeriesData(df_increase)
self.inc_change_points = MultiCUSUMDetector(timeseries_increase).detector()
self.inc_metadata = self.inc_change_points[0]
# decreasing setup
df_decrease = pd.DataFrame(
np.concatenate(
[
np.random.multivariate_normal(mean2, sigma, 60),
np.random.multivariate_normal(mean1, sigma, 30),
]
)
)
df_decrease["time"] = pd.Series(pd.date_range("2019-01-01", "2019-04-01"))
timeseries_decrease = TimeSeriesData(df_decrease)
self.dec_change_points = MultiCUSUMDetector(timeseries_decrease).detector()
self.dec_metadata = self.dec_change_points[0]
@parameterized.expand(
[
["inc_change_points"],
["dec_change_points"],
]
)
def test_cp_len(self, cp_name) -> None:
self.assertEqual(len(attrgetter(cp_name)(self)), 1)
@parameterized.expand(
[
["inc_metadata"],
["dec_metadata"],
]
)
def test_cp_index(self, cp_name) -> None:
self.assertLessEqual(abs(attrgetter(cp_name)(self).cp_index - 59), 1)
@parameterized.expand(
[
["inc_metadata.mu0", "inc_metadata.mu1"],
["dec_metadata.mu1", "dec_metadata.mu0"],
]
)
def test_mu(self, m1_name, m2_name) -> None:
for m1, m2 in zip(attrgetter(m1_name)(self), attrgetter(m2_name)(self)):
self.assertLess(m1, m2)
@parameterized.expand(
[
["inc_metadata", "inc_metadata.mu0", "inc_metadata.mu1"],
["dec_metadata", "dec_metadata.mu0", "dec_metadata.mu1"],
]
)
def test_correct_delta(self, metadata_name, mu0_name, mu1_name) -> None:
for d, diff in zip(
attrgetter(metadata_name)(self).delta,
attrgetter(mu1_name)(self) - attrgetter(mu0_name)(self),
):
self.assertEqual(d, diff)
@parameterized.expand(
[
["inc_metadata"],
["dec_metadata"],
]
)
def test_regression(self, metadata_name) -> None:
self.assertTrue(attrgetter(metadata_name)(self).regression_detected)
@parameterized.expand(
[
["inc_metadata"],
["dec_metadata"],
]
)
def test_p_val(self, metadata_name) -> None:
self.assertEqual(
attrgetter(metadata_name)(self).p_value,
1 - chi2.cdf(attrgetter(metadata_name)(self).llr, self.D + 1),
)
@parameterized.expand(
[
["inc_metadata"],
["dec_metadata"],
]
)
def test_gaussian_increase_p_val_nan(self, metadata_name) -> None:
self.assertTrue(np.isnan(attrgetter(metadata_name)(self).p_value_int))
@parameterized.expand(
[
["inc_metadata"],
["dec_metadata"],
]
)
def test_gaussian_increase_llr_int(self, metadata_name) -> None:
self.assertEqual(attrgetter(metadata_name)(self).llr_int, np.inf)
@parameterized.expand(
[
["inc_metadata"],
["dec_metadata"],
]
)
def test_gaussian_increase_stable_changepoint(self, metadata_name) -> None:
self.assertTrue(attrgetter(metadata_name)(self).stable_changepoint)
def test_no_changepoint(self) -> None:
D = 10
random_state = 10
np.random.seed(random_state)
mean = np.ones(D)
sigma = make_spd_matrix(D, random_state=random_state)
# Use the same mean for the entire series and there should be no changepoint
df_no_change = pd.DataFrame(np.random.multivariate_normal(mean, sigma, 90))
df_no_change["time"] = pd.Series(pd.date_range("2019-01-01", "2019-04-01"))
timeseries_no_change = TimeSeriesData(df_no_change)
change_points = MultiCUSUMDetector(timeseries_no_change).detector()
self.assertEqual(len(change_points), 0)
class VectorizedCUSUMDetectorTest(TestCase):
def setUp(self) -> None:
np.random.seed(10)
# increasing with variance detection setup
df = pd.DataFrame(
{
"increase": np.concatenate(
[np.random.normal(1, 0.2, 30), np.random.normal(1.5, 0.2, 30)]
),
"decrease": np.concatenate(
[np.random.normal(1, 0.2, 50), np.random.normal(0.5, 0.2, 10)]
),
}
)
df["time"] = pd.Series(pd.date_range("2019-01-01", "2019-03-01"))
self.inc_change_points = CUSUMDetector(
TimeSeriesData(df[["increase", "time"]])
).detector()
self.dec_change_points = CUSUMDetector(
TimeSeriesData(df[["decrease", "time"]])
).detector()
timeseries = TimeSeriesData(df)
change_points_vectorized_ = VectorizedCUSUMDetector(timeseries).detector_()
# take the change points in all columns with the corresponding directions
change_points_vectorized = [[], []]
for i in range(len(change_points_vectorized_)):
for change_points_ts in change_points_vectorized_[i]:
if change_points_ts.direction == df.columns.values[i]:
change_points_vectorized[i].append(change_points_ts)
# change points for the first column in the matrix
self.inc_change_points_vectorized = change_points_vectorized[0]
# change points for the second column in the matrix
self.dec_change_points_vectorized = change_points_vectorized[1]
def test_vectorized_results(self) -> None:
# check if vectorized CUSUM produces the same results with the original CUSUM
self.assertEqual(
self.inc_change_points[0].start_time,
self.inc_change_points_vectorized[0].start_time,
)
self.assertEqual(
self.dec_change_points[0].start_time,
self.dec_change_points_vectorized[0].start_time,
)
| [
"numpy.random.seed",
"numpy.sum",
"kats.detectors.cusum_detection.CUSUMDetector",
"numpy.logspace",
"numpy.floor",
"numpy.ones",
"numpy.isnan",
"numpy.sin",
"numpy.arange",
"numpy.random.normal",
"numpy.zeros_like",
"numpy.random.randn",
"re.findall",
"sklearn.datasets.make_spd_matrix",
... | [((5409, 5622), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_change_points', 1], ['dec_change_points', 1], [\n 'season_inc_trend_change_points', 1], ['no_var_change_points', 1], [\n 'no_reg_change_points', 0], ['no_season_change_points', 0]]"], {}), "([['inc_change_points', 1], ['dec_change_points', 1], [\n 'season_inc_trend_change_points', 1], ['no_var_change_points', 1], [\n 'no_reg_change_points', 0], ['no_season_change_points', 0]])\n", (5429, 5622), False, 'from parameterized.parameterized import parameterized\n'), ((5837, 5903), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_metadata', 29], ['dec_metadata', 49]]"], {}), "([['inc_metadata', 29], ['dec_metadata', 49]])\n", (5857, 5903), False, 'from parameterized.parameterized import parameterized\n'), ((6133, 6219), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_metadata', 'increase'], ['dec_metadata', 'decrease']]"], {}), "([['inc_metadata', 'increase'], ['dec_metadata',\n 'decrease']])\n", (6153, 6219), False, 'from parameterized.parameterized import parameterized\n'), ((6812, 6944), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['season_metadata.p_value_int', 'season_metadata.llr_int'], [\n 'inc_metadata.p_value', 'inc_metadata.llr']]"], {}), "([['season_metadata.p_value_int',\n 'season_metadata.llr_int'], ['inc_metadata.p_value', 'inc_metadata.llr']])\n", (6832, 6944), False, 'from parameterized.parameterized import parameterized\n'), ((7529, 7853), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_detector', 'inc_change_points'], ['dec_detector',\n 'dec_change_points'], ['season_inc_trend_detector',\n 'season_inc_trend_change_points'], ['no_var_detector',\n 'no_var_change_points'], ['no_reg_detector', 'no_reg_change_points'], [\n 'no_season_detector', 'no_season_change_points']]"], {}), "([['inc_detector', 'inc_change_points'], [\n 'dec_detector', 'dec_change_points'], ['season_inc_trend_detector',\n 'season_inc_trend_change_points'], ['no_var_detector',\n 'no_var_change_points'], ['no_reg_detector', 'no_reg_change_points'], [\n 'no_season_detector', 'no_season_change_points']])\n", (7549, 7853), False, 'from parameterized.parameterized import parameterized\n'), ((10315, 10372), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['WARNING', 0.9], ['DEBUG', None]]"], {}), "([['WARNING', 0.9], ['DEBUG', None]])\n", (10335, 10372), False, 'from parameterized.parameterized import parameterized\n'), ((12762, 12830), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_change_points'], ['dec_change_points']]"], {}), "([['inc_change_points'], ['dec_change_points']])\n", (12782, 12830), False, 'from parameterized.parameterized import parameterized\n'), ((12990, 13048), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_metadata'], ['dec_metadata']]"], {}), "([['inc_metadata'], ['dec_metadata']])\n", (13010, 13048), False, 'from parameterized.parameterized import parameterized\n'), ((13228, 13339), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_metadata.mu0', 'inc_metadata.mu1'], ['dec_metadata.mu1',\n 'dec_metadata.mu0']]"], {}), "([['inc_metadata.mu0', 'inc_metadata.mu1'], [\n 'dec_metadata.mu1', 'dec_metadata.mu0']])\n", (13248, 13339), False, 'from parameterized.parameterized import parameterized\n'), ((13556, 13702), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_metadata', 'inc_metadata.mu0', 'inc_metadata.mu1'], ['dec_metadata',\n 'dec_metadata.mu0', 'dec_metadata.mu1']]"], {}), "([['inc_metadata', 'inc_metadata.mu0',\n 'inc_metadata.mu1'], ['dec_metadata', 'dec_metadata.mu0',\n 'dec_metadata.mu1']])\n", (13576, 13702), False, 'from parameterized.parameterized import parameterized\n'), ((14024, 14082), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_metadata'], ['dec_metadata']]"], {}), "([['inc_metadata'], ['dec_metadata']])\n", (14044, 14082), False, 'from parameterized.parameterized import parameterized\n'), ((14269, 14327), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_metadata'], ['dec_metadata']]"], {}), "([['inc_metadata'], ['dec_metadata']])\n", (14289, 14327), False, 'from parameterized.parameterized import parameterized\n'), ((14596, 14654), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_metadata'], ['dec_metadata']]"], {}), "([['inc_metadata'], ['dec_metadata']])\n", (14616, 14654), False, 'from parameterized.parameterized import parameterized\n'), ((14860, 14918), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_metadata'], ['dec_metadata']]"], {}), "([['inc_metadata'], ['dec_metadata']])\n", (14880, 14918), False, 'from parameterized.parameterized import parameterized\n'), ((15117, 15175), 'parameterized.parameterized.parameterized.expand', 'parameterized.expand', (["[['inc_metadata'], ['dec_metadata']]"], {}), "([['inc_metadata'], ['dec_metadata']])\n", (15137, 15175), False, 'from parameterized.parameterized import parameterized\n'), ((649, 710), 're.findall', 're.findall', (['"""([0-9]+\\\\.[0-9]+)\\\\..*"""', 'statsmodels.__version__'], {}), "('([0-9]+\\\\.[0-9]+)\\\\..*', statsmodels.__version__)\n", (659, 710), False, 'import re\n'), ((790, 808), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (804, 808), True, 'import numpy as np\n'), ((1189, 1216), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (['df_increase'], {}), '(df_increase)\n', (1203, 1216), False, 'from kats.consts import TimeSeriesData\n'), ((1245, 1274), 'kats.detectors.cusum_detection.CUSUMDetector', 'CUSUMDetector', (['inc_timeseries'], {}), '(inc_timeseries)\n', (1258, 1274), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((1758, 1785), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (['df_decrease'], {}), '(df_decrease)\n', (1772, 1785), False, 'from kats.consts import TimeSeriesData\n'), ((1814, 1843), 'kats.detectors.cusum_detection.CUSUMDetector', 'CUSUMDetector', (['dec_timeseries'], {}), '(dec_timeseries)\n', (1827, 1843), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((2380, 2440), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(self.periodicity * self.total_cycles)'], {}), '(0, 1, self.periodicity * self.total_cycles)\n', (2396, 2440), True, 'import numpy as np\n'), ((2565, 2620), 'numpy.logspace', 'np.logspace', (['(0)', '(1)', '(self.periodicity * self.total_cycles)'], {}), '(0, 1, self.periodicity * self.total_cycles)\n', (2576, 2620), True, 'import numpy as np\n'), ((2684, 2696), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (2690, 2696), True, 'import numpy as np\n'), ((3023, 3053), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (['df_seasonality'], {}), '(df_seasonality)\n', (3037, 3053), False, 'from kats.consts import TimeSeriesData\n'), ((3095, 3120), 'kats.detectors.cusum_detection.CUSUMDetector', 'CUSUMDetector', (['timeseries'], {}), '(timeseries)\n', (3108, 3120), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((3982, 4016), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (['df_increase_no_var'], {}), '(df_increase_no_var)\n', (3996, 4016), False, 'from kats.consts import TimeSeriesData\n'), ((4048, 4080), 'kats.detectors.cusum_detection.CUSUMDetector', 'CUSUMDetector', (['no_var_timeseries'], {}), '(no_var_timeseries)\n', (4061, 4080), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((4231, 4243), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (4237, 4243), True, 'import numpy as np\n'), ((4570, 4600), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (['df_seasonality'], {}), '(df_seasonality)\n', (4584, 4600), False, 'from kats.consts import TimeSeriesData\n'), ((4635, 4660), 'kats.detectors.cusum_detection.CUSUMDetector', 'CUSUMDetector', (['timeseries'], {}), '(timeseries)\n', (4648, 4660), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((5235, 5263), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (['df_noregress'], {}), '(df_noregress)\n', (5249, 5263), False, 'from kats.consts import TimeSeriesData\n'), ((5295, 5320), 'kats.detectors.cusum_detection.CUSUMDetector', 'CUSUMDetector', (['timeseries'], {}), '(timeseries)\n', (5308, 5320), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((8639, 8664), 'numpy.zeros', 'np.zeros', (['total_timesteps'], {}), '(total_timesteps)\n', (8647, 8664), True, 'import numpy as np\n'), ((9874, 9892), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (9888, 9892), True, 'import numpy as np\n'), ((10547, 10565), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (10561, 10565), True, 'import numpy as np\n'), ((10743, 10765), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (['df_neg'], {}), '(df_neg)\n', (10757, 10765), False, 'from kats.consts import TimeSeriesData\n'), ((10793, 10818), 'kats.detectors.cusum_detection.CUSUMDetector', 'CUSUMDetector', (['timeseries'], {}), '(timeseries)\n', (10806, 10818), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((11177, 11215), 'kats.consts.TimeSeriesData', 'TimeSeriesData', ([], {'time': 'time', 'value': 'value'}), '(time=time, value=value)\n', (11191, 11215), False, 'from kats.consts import TimeSeriesData\n'), ((11236, 11253), 'kats.detectors.cusum_detection.CUSUMDetector', 'CUSUMDetector', (['ts'], {}), '(ts)\n', (11249, 11253), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((11487, 11515), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (11501, 11515), True, 'import numpy as np\n'), ((11532, 11547), 'numpy.ones', 'np.ones', (['self.D'], {}), '(self.D)\n', (11539, 11547), True, 'import numpy as np\n'), ((11590, 11640), 'sklearn.datasets.make_spd_matrix', 'make_spd_matrix', (['self.D'], {'random_state': 'random_state'}), '(self.D, random_state=random_state)\n', (11605, 11640), False, 'from sklearn.datasets import make_spd_matrix\n'), ((12019, 12046), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (['df_increase'], {}), '(df_increase)\n', (12033, 12046), False, 'from kats.consts import TimeSeriesData\n'), ((12590, 12617), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (['df_decrease'], {}), '(df_decrease)\n', (12604, 12617), False, 'from kats.consts import TimeSeriesData\n'), ((15474, 15502), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (15488, 15502), True, 'import numpy as np\n'), ((15518, 15528), 'numpy.ones', 'np.ones', (['D'], {}), '(D)\n', (15525, 15528), True, 'import numpy as np\n'), ((15545, 15590), 'sklearn.datasets.make_spd_matrix', 'make_spd_matrix', (['D'], {'random_state': 'random_state'}), '(D, random_state=random_state)\n', (15560, 15590), False, 'from sklearn.datasets import make_spd_matrix\n'), ((15876, 15904), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (['df_no_change'], {}), '(df_no_change)\n', (15890, 15904), False, 'from kats.consts import TimeSeriesData\n'), ((16113, 16131), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (16127, 16131), True, 'import numpy as np\n'), ((16882, 16900), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (['df'], {}), '(df)\n', (16896, 16900), False, 'from kats.consts import TimeSeriesData\n'), ((1121, 1162), 'pandas.date_range', 'pd.date_range', (['"""2019-01-01"""', '"""2019-03-01"""'], {}), "('2019-01-01', '2019-03-01')\n", (1134, 1162), True, 'import pandas as pd\n'), ((1689, 1730), 'pandas.date_range', 'pd.date_range', (['"""2019-01-01"""', '"""2019-03-01"""'], {}), "('2019-01-01', '2019-03-01')\n", (1702, 1730), True, 'import pandas as pd\n'), ((3901, 3942), 'pandas.date_range', 'pd.date_range', (['"""2019-01-01"""', '"""2019-03-01"""'], {}), "('2019-01-01', '2019-03-01')\n", (3914, 3942), True, 'import pandas as pd\n'), ((5170, 5211), 'pandas.date_range', 'pd.date_range', (['"""2019-01-01"""', '"""2019-03-01"""'], {}), "('2019-01-01', '2019-03-01')\n", (5183, 5211), True, 'import pandas as pd\n'), ((7253, 7292), 'numpy.isnan', 'np.isnan', (['self.inc_metadata.p_value_int'], {}), '(self.inc_metadata.p_value_int)\n', (7261, 7292), True, 'import numpy as np\n'), ((8467, 8493), 'numpy.random.randn', 'np.random.randn', (['harmonics'], {}), '(harmonics)\n', (8482, 8493), True, 'import numpy as np\n'), ((8532, 8558), 'numpy.random.randn', 'np.random.randn', (['harmonics'], {}), '(harmonics)\n', (8547, 8558), True, 'import numpy as np\n'), ((8731, 8754), 'numpy.zeros_like', 'np.zeros_like', (['gamma_jt'], {}), '(gamma_jt)\n', (8744, 8754), True, 'import numpy as np\n'), ((8785, 8813), 'numpy.zeros_like', 'np.zeros_like', (['gamma_star_jt'], {}), '(gamma_star_jt)\n', (8798, 8813), True, 'import numpy as np\n'), ((9386, 9404), 'numpy.sum', 'np.sum', (['gamma_jtp1'], {}), '(gamma_jtp1)\n', (9392, 9404), True, 'import numpy as np\n'), ((10129, 10170), 'pandas.date_range', 'pd.date_range', (['"""2019-01-01"""', '"""2019-03-01"""'], {}), "('2019-01-01', '2019-03-01')\n", (10142, 10170), True, 'import pandas as pd\n'), ((10242, 10270), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (['df_multi_var'], {}), '(df_multi_var)\n', (10256, 10270), False, 'from kats.consts import TimeSeriesData\n'), ((10283, 10308), 'kats.detectors.cusum_detection.CUSUMDetector', 'CUSUMDetector', (['timeseries'], {}), '(timeseries)\n', (10296, 10308), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((10678, 10719), 'pandas.date_range', 'pd.date_range', (['"""2019-01-01"""', '"""2019-03-01"""'], {}), "('2019-01-01', '2019-03-01')\n", (10691, 10719), True, 'import pandas as pd\n'), ((11068, 11122), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2018-01-01"""', 'periods': 'n', 'freq': '"""D"""'}), "(start='2018-01-01', periods=n, freq='D')\n", (11081, 11122), True, 'import pandas as pd\n'), ((11150, 11162), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (11159, 11162), True, 'import numpy as np\n'), ((11945, 11986), 'pandas.date_range', 'pd.date_range', (['"""2019-01-01"""', '"""2019-04-01"""'], {}), "('2019-01-01', '2019-04-01')\n", (11958, 11986), True, 'import pandas as pd\n'), ((12516, 12557), 'pandas.date_range', 'pd.date_range', (['"""2019-01-01"""', '"""2019-04-01"""'], {}), "('2019-01-01', '2019-04-01')\n", (12529, 12557), True, 'import pandas as pd\n'), ((15712, 15758), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'sigma', '(90)'], {}), '(mean, sigma, 90)\n', (15741, 15758), True, 'import numpy as np\n'), ((15801, 15842), 'pandas.date_range', 'pd.date_range', (['"""2019-01-01"""', '"""2019-04-01"""'], {}), "('2019-01-01', '2019-04-01')\n", (15814, 15842), True, 'import pandas as pd\n'), ((16572, 16613), 'pandas.date_range', 'pd.date_range', (['"""2019-01-01"""', '"""2019-03-01"""'], {}), "('2019-01-01', '2019-03-01')\n", (16585, 16613), True, 'import pandas as pd\n'), ((2775, 2864), 'pandas.date_range', 'pd.date_range', (['"""2020-01-01"""'], {'periods': '(self.periodicity * self.total_cycles)', 'freq': '"""30T"""'}), "('2020-01-01', periods=self.periodicity * self.total_cycles,\n freq='30T')\n", (2788, 2864), True, 'import pandas as pd\n'), ((4322, 4411), 'pandas.date_range', 'pd.date_range', (['"""2020-01-01"""'], {'periods': '(self.periodicity * self.total_cycles)', 'freq': '"""30T"""'}), "('2020-01-01', periods=self.periodicity * self.total_cycles,\n freq='30T')\n", (4335, 4411), True, 'import pandas as pd\n'), ((5097, 5125), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0.2)', '(60)'], {}), '(1, 0.2, 60)\n', (5113, 5125), True, 'import numpy as np\n'), ((7083, 7104), 'operator.attrgetter', 'attrgetter', (['pval_name'], {}), '(pval_name)\n', (7093, 7104), False, 'from operator import attrgetter\n'), ((8035, 8054), 'operator.attrgetter', 'attrgetter', (['cp_name'], {}), '(cp_name)\n', (8045, 8054), False, 'from operator import attrgetter\n'), ((8357, 8382), 'numpy.floor', 'np.floor', (['(periodicity / 2)'], {}), '(periodicity / 2)\n', (8365, 8382), True, 'import numpy as np\n'), ((8884, 8904), 'numpy.cos', 'np.cos', (['(lambda_p * j)'], {}), '(lambda_p * j)\n', (8890, 8904), True, 'import numpy as np\n'), ((8929, 8949), 'numpy.sin', 'np.sin', (['(lambda_p * j)'], {}), '(lambda_p * j)\n', (8935, 8949), True, 'import numpy as np\n'), ((9973, 10001), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0.2)', '(60)'], {}), '(1, 0.2, 60)\n', (9989, 10001), True, 'import numpy as np\n'), ((10033, 10061), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0.2)', '(60)'], {}), '(1, 0.2, 60)\n', (10049, 10061), True, 'import numpy as np\n'), ((12080, 12119), 'kats.detectors.cusum_detection.MultiCUSUMDetector', 'MultiCUSUMDetector', (['timeseries_increase'], {}), '(timeseries_increase)\n', (12098, 12119), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((12651, 12690), 'kats.detectors.cusum_detection.MultiCUSUMDetector', 'MultiCUSUMDetector', (['timeseries_decrease'], {}), '(timeseries_decrease)\n', (12669, 12690), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((13459, 13478), 'operator.attrgetter', 'attrgetter', (['m1_name'], {}), '(m1_name)\n', (13469, 13478), False, 'from operator import attrgetter\n'), ((13486, 13505), 'operator.attrgetter', 'attrgetter', (['m2_name'], {}), '(m2_name)\n', (13496, 13505), False, 'from operator import attrgetter\n'), ((15929, 15969), 'kats.detectors.cusum_detection.MultiCUSUMDetector', 'MultiCUSUMDetector', (['timeseries_no_change'], {}), '(timeseries_no_change)\n', (15947, 15969), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((16937, 16972), 'kats.detectors.cusum_detection.VectorizedCUSUMDetector', 'VectorizedCUSUMDetector', (['timeseries'], {}), '(timeseries)\n', (16960, 16972), False, 'from kats.detectors.cusum_detection import CUSUMDetector, MultiCUSUMDetector, VectorizedCUSUMDetector\n'), ((5793, 5812), 'operator.attrgetter', 'attrgetter', (['cp_name'], {}), '(cp_name)\n', (5803, 5812), False, 'from operator import attrgetter\n'), ((6353, 6378), 'operator.attrgetter', 'attrgetter', (['metadata_name'], {}), '(metadata_name)\n', (6363, 6378), False, 'from operator import attrgetter\n'), ((7998, 8023), 'operator.attrgetter', 'attrgetter', (['detector_name'], {}), '(detector_name)\n', (8008, 8023), False, 'from operator import attrgetter\n'), ((10611, 10639), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0.2)', '(60)'], {}), '(1, 0.2, 60)\n', (10627, 10639), True, 'import numpy as np\n'), ((11744, 11791), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean1', 'sigma', '(60)'], {}), '(mean1, sigma, 60)\n', (11773, 11791), True, 'import numpy as np\n'), ((11813, 11860), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean2', 'sigma', '(30)'], {}), '(mean2, sigma, 30)\n', (11842, 11860), True, 'import numpy as np\n'), ((12315, 12362), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean2', 'sigma', '(60)'], {}), '(mean2, sigma, 60)\n', (12344, 12362), True, 'import numpy as np\n'), ((12384, 12431), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean1', 'sigma', '(30)'], {}), '(mean1, sigma, 30)\n', (12413, 12431), True, 'import numpy as np\n'), ((12953, 12972), 'operator.attrgetter', 'attrgetter', (['cp_name'], {}), '(cp_name)\n', (12963, 12972), False, 'from operator import attrgetter\n'), ((13861, 13886), 'operator.attrgetter', 'attrgetter', (['metadata_name'], {}), '(metadata_name)\n', (13871, 13886), False, 'from operator import attrgetter\n'), ((13912, 13932), 'operator.attrgetter', 'attrgetter', (['mu1_name'], {}), '(mu1_name)\n', (13922, 13932), False, 'from operator import attrgetter\n'), ((13941, 13961), 'operator.attrgetter', 'attrgetter', (['mu0_name'], {}), '(mu0_name)\n', (13951, 13961), False, 'from operator import attrgetter\n'), ((14210, 14235), 'operator.attrgetter', 'attrgetter', (['metadata_name'], {}), '(metadata_name)\n', (14220, 14235), False, 'from operator import attrgetter\n'), ((14464, 14489), 'operator.attrgetter', 'attrgetter', (['metadata_name'], {}), '(metadata_name)\n', (14474, 14489), False, 'from operator import attrgetter\n'), ((15062, 15087), 'operator.attrgetter', 'attrgetter', (['metadata_name'], {}), '(metadata_name)\n', (15072, 15087), False, 'from operator import attrgetter\n'), ((15329, 15354), 'operator.attrgetter', 'attrgetter', (['metadata_name'], {}), '(metadata_name)\n', (15339, 15354), False, 'from operator import attrgetter\n'), ((16676, 16716), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (["df[['increase', 'time']]"], {}), "(df[['increase', 'time']])\n", (16690, 16716), False, 'from kats.consts import TimeSeriesData\n'), ((16798, 16838), 'kats.consts.TimeSeriesData', 'TimeSeriesData', (["df[['decrease', 'time']]"], {}), "(df[['decrease', 'time']])\n", (16812, 16838), False, 'from kats.consts import TimeSeriesData\n'), ((976, 1004), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0.2)', '(30)'], {}), '(1, 0.2, 30)\n', (992, 1004), True, 'import numpy as np\n'), ((1006, 1036), 'numpy.random.normal', 'np.random.normal', (['(1.5)', '(0.2)', '(30)'], {}), '(1.5, 0.2, 30)\n', (1022, 1036), True, 'import numpy as np\n'), ((1544, 1572), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0.2)', '(50)'], {}), '(1, 0.2, 50)\n', (1560, 1572), True, 'import numpy as np\n'), ((1574, 1604), 'numpy.random.normal', 'np.random.normal', (['(0.5)', '(0.2)', '(10)'], {}), '(0.5, 0.2, 10)\n', (1590, 1604), True, 'import numpy as np\n'), ((3742, 3768), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0)', '(30)'], {}), '(1, 0, 30)\n', (3758, 3768), True, 'import numpy as np\n'), ((3770, 3796), 'numpy.random.normal', 'np.random.normal', (['(2)', '(0)', '(30)'], {}), '(2, 0, 30)\n', (3786, 3796), True, 'import numpy as np\n'), ((7137, 7157), 'operator.attrgetter', 'attrgetter', (['llr_name'], {}), '(llr_name)\n', (7147, 7157), False, 'from operator import attrgetter\n'), ((9117, 9134), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (9132, 9134), True, 'import numpy as np\n'), ((9326, 9343), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (9341, 9343), True, 'import numpy as np\n'), ((14808, 14833), 'operator.attrgetter', 'attrgetter', (['metadata_name'], {}), '(metadata_name)\n', (14818, 14833), False, 'from operator import attrgetter\n'), ((16290, 16318), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0.2)', '(30)'], {}), '(1, 0.2, 30)\n', (16306, 16318), True, 'import numpy as np\n'), ((16320, 16350), 'numpy.random.normal', 'np.random.normal', (['(1.5)', '(0.2)', '(30)'], {}), '(1.5, 0.2, 30)\n', (16336, 16350), True, 'import numpy as np\n'), ((16436, 16464), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0.2)', '(50)'], {}), '(1, 0.2, 50)\n', (16452, 16464), True, 'import numpy as np\n'), ((16466, 16496), 'numpy.random.normal', 'np.random.normal', (['(0.5)', '(0.2)', '(10)'], {}), '(0.5, 0.2, 10)\n', (16482, 16496), True, 'import numpy as np\n'), ((6061, 6086), 'operator.attrgetter', 'attrgetter', (['metadata_name'], {}), '(metadata_name)\n', (6071, 6086), False, 'from operator import attrgetter\n'), ((13177, 13196), 'operator.attrgetter', 'attrgetter', (['cp_name'], {}), '(cp_name)\n', (13187, 13196), False, 'from operator import attrgetter\n'), ((14530, 14555), 'operator.attrgetter', 'attrgetter', (['metadata_name'], {}), '(metadata_name)\n', (14540, 14555), False, 'from operator import attrgetter\n')] |
__author__ = 'chenkovsky'
import pandas as pd
import numpy as np
from . import knn
from sklearn.neighbors import KDTree
class TestRecommender:
def setUp(self):
data = {1: {1: 3.0, 2: 4.0, 3: 3.5, 4: 5.0, 5: 3.0},
2: {1: 3.0, 2: 4.0, 3: 2.0, 4: 3.0, 5: 3.0, 6: 2.0},
3: {2: 3.5, 3: 2.5, 4: 4.0, 5: 4.5, 6: 3.0},
4: {1: 2.5, 2: 3.5, 3: 2.5, 4: 3.5, 5: 3.0, 6: 3.0},
5: {2: 4.5, 3: 1.0, 4: 4.0},
6: {1: 3.0, 2: 3.5, 3: 3.5, 4: 5.0, 5: 3.0, 6: 1.5},
7: {1: 2.5, 2: 3.0, 4: 3.5, 5: 4.0}}
df =pd.DataFrame(data)
m = np.matrix(df)
m = m.transpose()
self.matrix = np.nan_to_num(m)
def testUserBasedKNNRecommender(self):
rec = knn.UserBasedKNNRecommender(self.matrix)
assert(rec.recommend(4)[0] == [4,0,5])
def testUserBasedKNNRecommenderLazy(self):
rec = knn.UserBasedKNNRecommender(self.matrix,lazy = True)
assert(rec.recommend(4)[0] == [4,0,5])
def testUserBasedKNNRecommenderKDTree(self):
rec = knn.UserBasedKNNRecommender(self.matrix,lazy = True, kdt = KDTree(self.matrix, metric= 'euclidean'))
assert(rec.recommend(4)[0] == [4,0,5])
def testItemBasedKNNRecommender(self):
rec = knn.ItemBasedKNNRecommender(self.matrix)
assert(rec.recommend(4)[0] == [4,0,5])
def testItemBasedKNNRecommenderLazy(self):
rec = knn.ItemBasedKNNRecommender(self.matrix, lazy = True)
assert(rec.recommend(4)[0] == [4,0,5]) | [
"pandas.DataFrame",
"numpy.matrix",
"sklearn.neighbors.KDTree",
"numpy.nan_to_num"
] | [((562, 580), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (574, 580), True, 'import pandas as pd\n'), ((593, 606), 'numpy.matrix', 'np.matrix', (['df'], {}), '(df)\n', (602, 606), True, 'import numpy as np\n'), ((655, 671), 'numpy.nan_to_num', 'np.nan_to_num', (['m'], {}), '(m)\n', (668, 671), True, 'import numpy as np\n'), ((1100, 1139), 'sklearn.neighbors.KDTree', 'KDTree', (['self.matrix'], {'metric': '"""euclidean"""'}), "(self.matrix, metric='euclidean')\n", (1106, 1139), False, 'from sklearn.neighbors import KDTree\n')] |
import os
import numpy as np
import shutil
from vipy.globals import print
from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl
from vipy.image import Image
from vipy.show import savefig
from collections import defaultdict
import time
import PIL
import vipy.video
import webbrowser
import pathlib
import html
def montage(imlist, imgheight, imgwidth, gridrows=None, gridcols=None, aspectratio=1, crop=False, skip=True, border=1, border_bgr=(128,128,128), do_flush=False, verbose=False):
"""Create a montage image from the of provided list of vipy.image.Image objects.
Args:
imlist: [list, tuple] iterable of vipy.image.Image objects which is used to montage rowwise
imgheight: [int] The height of each individual image in the grid
imgwidth: [int] the width of each individual image in the grid
gridrows: [int] The number of images per row, and number of images per column. This defines the montage shape.
gridcols: [int] The number of images per row, and number of images per column. This defines the montage shape.
aspectratio: [float]. This is an optional parameter which defines the shape of the montage as (gridcols/gridrows) without specifying the gridrows, gridcols input
crop: [bool] If true, the vipy.image.Image objects should call crop(), which will trigger a load
skip: [bool] Whether images should be skipped on failure to load(), useful for lazy downloading
border: [int] a border of size in pixels surrounding each image in the grid
border_bgr [tuple (r,g,b)]: the border color in a bgr color tuple (b, g, r) in [0,255], uint8
do_flush: [bool] flush the loaded images as garbage collection for large montages
verbose: [bool] display optional verbose messages
Returns:
Return a vipy.image.Image montage which is of size (gridrows*(imgheight + 2*border), gridcols*(imgwidth+2*border))
"""
(m,n) = (imgheight, imgwidth)
(rows,cols) = (gridrows, gridcols)
n_imgs = len(imlist)
M = int(np.ceil(np.sqrt(n_imgs)))
N = M
if aspectratio != 1 and aspectratio is not None:
x = int(round((aspectratio * N - M) / (1 + aspectratio)))
N = N - x
M = M + x
elif rows is not None and cols is not None:
N = rows
M = cols
size = (M * m + ((M + 1) * border), N * n + ((N + 1) * border))
bc = border_bgr
img_montage = np.array(PIL.Image.new(mode='RGB', size=size, color=bc))
k = 0
for j in range(N):
for i in range(M):
if k >= n_imgs:
break
sliceM, sliceN = i * (m + border) + border, j * (n + border) + border
try:
if crop:
if imlist[k].bbox.valid() is False:
print('[vipy.visualize.montage] invalid bounding box "%s" ' % str(imlist[k].bbox))
if skip is False:
print('[vipy.visualize.montage] using original image')
im = imlist[k].rgb().resize(n,m).array()
else:
raise
else:
im = imlist[k].rgb().crop().resize(n,m).array()
else:
im = imlist[k].rgb().resize(n,m).array()
img_montage[sliceN:sliceN + n, sliceM:sliceM + m] = im
except KeyboardInterrupt:
raise
except Exception as exception:
print('[vipy.visualize.montage][%d/%d]: skipping "%s"' % (k+1, len(imlist), str(imlist[k])))
if skip:
print('[vipy.visualize.montage][%d/%d]: "%s"' % (k+1, len(imlist), str(exception)))
else:
raise
if do_flush:
imlist[k].clone(flush=True) # clear memory
if verbose and ((k % 100) == 0):
print('[vipy.visualize.montage][%d/%d] processing...' % (k, n_imgs))
k += 1
if k == 0:
print('[vipy.visualize.montage] Warning: No images were processed')
return Image(array=img_montage, colorspace=imlist[0].colorspace())
def videomontage(vidlist, imgheight, imgwidth, gridrows=None, gridcols=None, aspectratio=1, crop=False, skip=True, border=1, border_bgr=(128,128,128), do_flush=False, verbose=True, framerate=30.0, max_duration=None):
"""Generate a video montage for the provided videos by creating a image montage for every frame.
Args:
`vipy.visualize.montage`: See the args
framerate: [float] the framerate of the montage video. All of the input videos are resampled to this common frame rate
max_duration: [float] If not None, the maximum diuration of any element in the montage before it cycles
Returns:
An video file in outfile that shows each video tiled into a montage. <Like https://www.youtube.com/watch?v=HjNa7_T-Xkc>
.. warning::
- This loads every video into memory, so be careful with large montages!
- If max_duration is not set, then this can result in loading very long video elements in the montage, which will make for long videos
"""
assert len(vidlist) > 0, "Invalid input"
assert max_duration is None or max_duration > 0
assert framerate > 0
if verbose:
print('[vipy.visualize.videomontage]: Loading %d videos' % len(vidlist))
vidlist = [v.framerate(framerate) for v in vidlist] # resample to a common framerate, this must occur prior to load
vidlist = [v.load() for v in vidlist] # triggers load, make sure that the vidlist videos have a reasonably small frames
max_length = max([len(v) for v in vidlist]) if max_duration is None else int(round(max_duration * framerate))
if verbose:
print('[vipy.visualize.videomontage]: Maximum video length (frames) = %d' % (max_length))
# FIXME: use stream here:
# with Video(outfile).stream(write=True) as s:
# s.write(montage(...))
montagelist = [montage([v[k % len(v)].mindim(max(imgheight, imgwidth)).centercrop(imgheight, imgwidth) for v in vidlist], imgheight, imgwidth, gridrows, gridcols, aspectratio, crop, skip, border, border_bgr, do_flush, verbose=False)
for k in range(0, max_length)]
return vipy.video.Video(array=np.stack([im.array() for im in montagelist]), colorspace='rgb', framerate=framerate)
def urls(urllist, title='URL Visualization', imagewidth=1024, outfile=None, display=False):
"""Given a list of public image URLs, create a stand-alone HTML page to show them all.
Args:
urllist: [list] A list of urls to display
title: [str] The title of the html file
imagewidth: [int] The size of the images in the page
outfile: [str] The path to the output html file
display: [bool] open the html file in the default system viewer when complete
"""
assert all([isurl(url) for url in urls])
# Create summary page to show precomputed images
k_divid = 0
filename = outfile if outfile is not None else temphtml()
f = open(filename,'w')
f.write('<!DOCTYPE html>\n')
f.write('<html>\n')
f.write('<body>\n')
f.write('<div id="container" style="width:2400px">\n')
f.write('<div id="header">\n')
f.write('<h1 style="margin-bottom:0;">Title: %s</h1><br>\n' % title)
localtime = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
f.write('Summary HTML generated on %s<br>\n' % localtime)
f.write('Number of URLs: %d<br>\n' % len(urllist))
f.write('</div>\n')
f.write('<br>\n')
f.write('<hr>\n')
f.write('<div id="%04d" style="float:left;">\n' % k_divid)
k_divid = k_divid + 1
# Generate images and html
for url in urllist:
f.write('<p>\n</p>\n')
f.write('URL: <a href="%s">%s</a>\n' % (url, url))
f.write('<br>\n')
f.write('<img src="%s" alt="image" width=%d loading="lazy">\n' % (url, imagewidth))
f.write('<p>\n</p>\n')
f.write('<hr>\n')
f.write('<p>\n</p>\n')
f.write('</div>\n')
f.write('</body>\n')
f.write('</html>\n')
f.close()
# Display?
if display:
url = pathlib.Path(filename).as_uri()
print('[vipy.visualize.urls]: Opening "%s" in default browser' % url)
webbrowser.open(url)
return filename
def tohtml(imlist, imdict=None, title='Image Visualization', mindim=1024, outfile=None, display=False):
"""Given a list of vipy.image.Image objects, show the images along with the dictionary contents of imdict (one per image) in a single standalone HTML file
Args:
imlist: [list `vipy.image.Image`]
imdict: [list of dict] An optional list of dictionaries, such that each dictionary is visualized per image
title: [str] The title of the html file
imagewidth: [int] The size of the images in the page
outfile: [str] The path to the output html file
display: [bool] open the html file in the default system viewer when complete
Returns:
An html file in outfile that contains all the images as a standalone embedded file (no links or external files).
"""
assert all([isinstance(im, vipy.image.Image) for im in imlist])
assert imdict is None or (len(imdict) == len(imlist) and isinstance(imdict[0], dict)), "imdict must be one dictionary per image"
# Create summary page to show precomputed images
k_divid = 0
filename = outfile if outfile is not None else temphtml()
f = open(filename,'w')
f.write('<!DOCTYPE html>\n')
f.write('<html>\n')
f.write('<body>\n')
f.write('<div id="container" style="width:2400px">\n')
f.write('<div id="header">\n')
f.write('<h1 style="margin-bottom:0;">%s</h1><br>\n' % title)
localtime = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
f.write('Summary HTML generated on %s<br>\n' % localtime)
f.write('Number of Images: %d<br>\n' % len(imlist))
f.write('</div>\n')
f.write('<br>\n')
f.write('<hr>\n')
f.write('<div id="%04d" style="float:left;">\n' % k_divid)
k_divid = k_divid + 1
# Generate images and html
for (k,im) in enumerate(imlist):
# Write out associated dictionary (if provided)
f.write('<p>\n</p>\n')
if imdict is not None:
for (k,v) in imdict[k].items():
f.write('<b>%s</b>: %s<br>\n' % (html.escape(str(k)), html.escape(str(v))))
f.write('<br>\n')
# Write image as base64 encoded string
im = im.load().mindim(mindim)
f.write(im.html()) # base-64 encoded image with img tag
f.write('<p>\n</p>\n')
f.write('<hr>\n')
f.write('<p>\n</p>\n')
f.write('</div>\n')
f.write('</body>\n')
f.write('</html>\n')
f.close()
# Display?
if display:
url = pathlib.Path(filename).as_uri()
print('[vipy.visualize.tohtml]: Opening "%s" in default browser' % url)
webbrowser.open(url)
return filename
def imagelist(list_of_image_files, outdir, title='Image Visualization', imagewidth=64):
"""Given a list of image filenames wth absolute paths, copy to outdir, and create an index.html file that visualizes each.
"""
# FIXME: should this just call tohtml?
k_divid = 0
# Create summary page to show precomputed images
outdir = remkdir(outdir)
filename = os.path.join(remkdir(outdir), 'index.html')
f = open(filename,'w')
f.write('<!DOCTYPE html>\n')
f.write('<html>\n')
f.write('<body>\n')
f.write('<div id="container" style="width:2400px">\n')
f.write('<div id="header">\n')
f.write('<h1 style="margin-bottom:0;">Title: %s</h1><br>\n' % title)
localtime = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
f.write('Summary HTML generated on %s<br>\n' % localtime)
f.write('Number of Images: %d<br>\n' % len(list_of_image_files))
f.write('</div>\n')
f.write('<br>\n')
f.write('<hr>\n')
f.write('<div id="%04d" style="float:left;">\n' % k_divid)
k_divid = k_divid + 1
# Generate images and html
assert all([os.path.exists(f) and vipy.util.isimg(f) for f in list_of_image_files])
for (k, imsrc) in enumerate(list_of_image_files):
shutil.copyfile(imsrc, os.path.join(outdir, filetail(imsrc)))
imdst = filetail(imsrc)
f.write('<p>\n</p>\n')
f.write('<b>Filename: %s</b><br>\n' % imdst)
f.write('<br>\n')
f.write('<img src="%s" alt="image" width=%d loading="lazy">\n' % (imdst, imagewidth))
f.write('<p>\n</p>\n')
f.write('<hr>\n')
f.write('<p>\n</p>\n')
f.write('</div>\n')
f.write('</body>\n')
f.write('</html>\n')
f.close()
return filename
def imagetuplelist(list_of_tuples_of_image_files, outdir, title='Image Visualization', imagewidth=64):
"""Imagelist but put tuples on same row"""
k_divid = 0
# Create summary page to show precomputed images
outdir = remkdir(outdir)
filename = os.path.join(remkdir(outdir), 'index.html')
f = open(filename,'w')
f.write('<!DOCTYPE html>\n')
f.write('<html>\n')
f.write('<body>\n')
f.write('<div id="container" style="width:2400px">\n')
f.write('<div id="header">\n')
f.write('<h1 style="margin-bottom:0;">Title: %s</h1><br>\n' % title)
localtime = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
f.write('Summary HTML generated on %s<br>\n' % localtime)
f.write('Number of Tuples: %d<br>\n' % len(list_of_tuples_of_image_files))
f.write('</div>\n')
f.write('<br>\n')
f.write('<hr>\n')
f.write('<div id="%04d" style="float:left;">\n' % k_divid)
k_divid = k_divid + 1
# Generate images and html
for (k, imsrclist) in enumerate(list_of_tuples_of_image_files):
f.write('<p>\n</p>\n')
for imsrc in imsrclist:
shutil.copyfile(imsrc, os.path.join(outdir, filetail(imsrc)))
imdst = filetail(imsrc)
f.write('<b>Filename: %s</b><br>\n' % imdst)
f.write('<p>\n</p>\n')
f.write('<br>\n')
for imsrc in imsrclist:
imdst = filetail(imsrc)
f.write('<img src="%s" alt="image" width=%d loading="lazy">' % (imdst, imagewidth))
f.write('\n<p>\n</p>\n')
f.write('<hr>\n')
f.write('<p>\n</p>\n')
f.write('</div>\n')
f.write('</body>\n')
f.write('</html>\n')
f.close()
return filename
| [
"vipy.util.filetail",
"PIL.Image.new",
"webbrowser.open",
"os.path.exists",
"vipy.util.temphtml",
"vipy.util.isurl",
"time.time",
"pathlib.Path",
"vipy.util.remkdir",
"vipy.globals.print",
"numpy.sqrt"
] | [((11536, 11551), 'vipy.util.remkdir', 'remkdir', (['outdir'], {}), '(outdir)\n', (11543, 11551), False, 'from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl\n'), ((13165, 13180), 'vipy.util.remkdir', 'remkdir', (['outdir'], {}), '(outdir)\n', (13172, 13180), False, 'from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl\n'), ((2486, 2532), 'PIL.Image.new', 'PIL.Image.new', ([], {'mode': '"""RGB"""', 'size': 'size', 'color': 'bc'}), "(mode='RGB', size=size, color=bc)\n", (2499, 2532), False, 'import PIL\n'), ((4092, 4159), 'vipy.globals.print', 'print', (['"""[vipy.visualize.montage] Warning: No images were processed"""'], {}), "('[vipy.visualize.montage] Warning: No images were processed')\n", (4097, 4159), False, 'from vipy.globals import print\n'), ((5863, 5954), 'vipy.globals.print', 'print', (["('[vipy.visualize.videomontage]: Maximum video length (frames) = %d' %\n max_length)"], {}), "('[vipy.visualize.videomontage]: Maximum video length (frames) = %d' %\n max_length)\n", (5868, 5954), False, 'from vipy.globals import print\n'), ((7162, 7172), 'vipy.util.temphtml', 'temphtml', ([], {}), '()\n', (7170, 7172), False, 'from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl\n'), ((8344, 8413), 'vipy.globals.print', 'print', (['(\'[vipy.visualize.urls]: Opening "%s" in default browser\' % url)'], {}), '(\'[vipy.visualize.urls]: Opening "%s" in default browser\' % url)\n', (8349, 8413), False, 'from vipy.globals import print\n'), ((8422, 8442), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (8437, 8442), False, 'import webbrowser\n'), ((9648, 9658), 'vipy.util.temphtml', 'temphtml', ([], {}), '()\n', (9656, 9658), False, 'from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl\n'), ((11045, 11116), 'vipy.globals.print', 'print', (['(\'[vipy.visualize.tohtml]: Opening "%s" in default browser\' % url)'], {}), '(\'[vipy.visualize.tohtml]: Opening "%s" in default browser\' % url)\n', (11050, 11116), False, 'from vipy.globals import print\n'), ((11125, 11145), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (11140, 11145), False, 'import webbrowser\n'), ((11580, 11595), 'vipy.util.remkdir', 'remkdir', (['outdir'], {}), '(outdir)\n', (11587, 11595), False, 'from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl\n'), ((12513, 12528), 'vipy.util.filetail', 'filetail', (['imsrc'], {}), '(imsrc)\n', (12521, 12528), False, 'from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl\n'), ((13209, 13224), 'vipy.util.remkdir', 'remkdir', (['outdir'], {}), '(outdir)\n', (13216, 13224), False, 'from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl\n'), ((2106, 2121), 'numpy.sqrt', 'np.sqrt', (['n_imgs'], {}), '(n_imgs)\n', (2113, 2121), True, 'import numpy as np\n'), ((7008, 7018), 'vipy.util.isurl', 'isurl', (['url'], {}), '(url)\n', (7013, 7018), False, 'from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl\n'), ((7513, 7524), 'time.time', 'time.time', ([], {}), '()\n', (7522, 7524), False, 'import time\n'), ((9992, 10003), 'time.time', 'time.time', ([], {}), '()\n', (10001, 10003), False, 'import time\n'), ((11951, 11962), 'time.time', 'time.time', ([], {}), '()\n', (11960, 11962), False, 'import time\n'), ((13580, 13591), 'time.time', 'time.time', ([], {}), '()\n', (13589, 13591), False, 'import time\n'), ((14149, 14164), 'vipy.util.filetail', 'filetail', (['imsrc'], {}), '(imsrc)\n', (14157, 14164), False, 'from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl\n'), ((14331, 14346), 'vipy.util.filetail', 'filetail', (['imsrc'], {}), '(imsrc)\n', (14339, 14346), False, 'from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl\n'), ((3979, 4047), 'vipy.globals.print', 'print', (["('[vipy.visualize.montage][%d/%d] processing...' % (k, n_imgs))"], {}), "('[vipy.visualize.montage][%d/%d] processing...' % (k, n_imgs))\n", (3984, 4047), False, 'from vipy.globals import print\n'), ((8304, 8326), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (8316, 8326), False, 'import pathlib\n'), ((11005, 11027), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (11017, 11027), False, 'import pathlib\n'), ((12301, 12318), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (12315, 12318), False, 'import os\n'), ((12479, 12494), 'vipy.util.filetail', 'filetail', (['imsrc'], {}), '(imsrc)\n', (12487, 12494), False, 'from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl\n'), ((14111, 14126), 'vipy.util.filetail', 'filetail', (['imsrc'], {}), '(imsrc)\n', (14119, 14126), False, 'from vipy.util import remkdir, imlist, filetail, istuple, islist, isnumpy, filebase, temphtml, isurl\n'), ((3001, 3055), 'vipy.globals.print', 'print', (['"""[vipy.visualize.montage] using original image"""'], {}), "('[vipy.visualize.montage] using original image')\n", (3006, 3055), False, 'from vipy.globals import print\n')] |
from KernelMatrix import kernelmatrix
import numpy as np
def regularizedkernlstrain(xtr, ytr, kernel, sigma, lambd):
'''
Input:
xtr: training input
ytr: training output
kernel: type of kernel ('linear', 'polynomial', 'gaussian')
lambd: regularization parameter
Output:
c: model weights
Example of usage:
from regularizationNetworks import regularizedKernLSTrain
c = regularizedKernLSTrain.regularizedKernLSTrain(Xtr, Ytr, 'gaussian', 1, 1e-1);
'''
n = xtr.shape[0]
k = kernelmatrix(xtr, xtr, sigma, kernel)
c = np.dot(np.linalg.pinv(k + lambd * n * np.identity(n)), ytr)
return c
| [
"KernelMatrix.kernelmatrix",
"numpy.identity"
] | [((543, 580), 'KernelMatrix.kernelmatrix', 'kernelmatrix', (['xtr', 'xtr', 'sigma', 'kernel'], {}), '(xtr, xtr, sigma, kernel)\n', (555, 580), False, 'from KernelMatrix import kernelmatrix\n'), ((627, 641), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (638, 641), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
charm-cli.py: Simple command line interface for CHarm.
"""
import argparse
import logging
try:
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('font', **{'sans-serif': 'DejaVu Sans',
'serif': 'DejaVu Serif',
'family': 'sans-serif'})
import matplotlib.pyplot
except ImportError as e:
print('ERROR: {}'.format(e.msg))
exit(1)
try:
import numpy
except ImportError as e:
print('ERROR: {}'.format(e.msg))
exit(1)
try:
from LibCharm.Sequence import Sequence
from LibCharm import IO
except ImportError as e:
print('ERROR: {}'.format(e.msg))
exit(1)
def autolabel(rects, ax, labels, vertical=True):
"""
Automatically adds labels above a bar in a bar graph.
:param rects: list of bars to be labelled (e.g. generated by ax.bar())
:param ax: axis (axis object from matplotlib)
:param labels: list of labels
:param vertical: rotate the labels by 90° if true
"""
if vertical:
rotation = 'vertical'
else:
rotation = 'horizontal'
if len(labels) == len(rects):
heights = []
for rect in rects:
height = rect.get_height()
heights.append(height)
max_height = max(heights)
for rect in rects:
i = rects.index(rect)
label = labels[i]
height = rect.get_height()
if height > 0:
y = 1.05 * height
else:
y = 0.02 * max_height
ax.text(rect.get_x() + rect.get_width() / 2., y, str(label),
ha='center', va='bottom', rotation=rotation, size='x-small')
def plot_codon_usage(sequence, ax):
"""
Plot the codon usage for origin and target host as bar graph
:param sequence: LibCharm.Sequence object
:param ax : matplotlib axis object
"""
x1 = x2 = numpy.arange(len(sequence.codons))
bar_width = 0.5
xlabels = []
origin_f = []
target_f = []
# extract data to plot from sequence object
for c in sequence.codons:
origin_f.append(c['origin_f'])
target_f.append(c['target_f'])
xlabels.append(c['aa'])
# convert lists to numpy arrays
origin_f = numpy.array(origin_f)
target_f = numpy.array(target_f)
# plot data
p1 = ax.bar(x1, origin_f, color='b', width=bar_width)
p2 = ax.bar(x2 + (0.5 * bar_width), target_f, color='r', width=bar_width)
# hide top and right axes
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# set tick parameters
ax.tick_params(axis='both', which='both', direction='out')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# position xticks and labels on x axis to be centered for both bars
ax.set_xticks(x1 + bar_width / 2)
ax.set_xticklabels(xlabels, **{'family': 'monospace'})
ax.set_xlabel('amino acid')
# add a legend to the plot
ax.legend((p1, p2), ('Origin organism', 'Host organism'), loc=2, bbox_to_anchor=(1, 1))
ax.hlines(sequence.lower_threshold, 0, len(x1), colors='k', linestyles='solid', **{'linewidth': 1})
if not sequence.use_frequency:
# set the y axis label
ax.set_ylabel('codon usage [fraction]')
# specify the distance between the ticks on the y axis
major_locator = matplotlib.ticker.MultipleLocator(0.1)
minor_locator = matplotlib.ticker.MultipleLocator(0.01)
else:
# set the y axis label if frequency is used instead of fractions
ax.set_ylabel('codon usage [frequency/1000]')
# specify the distance between the ticks on the y axis
major_locator = matplotlib.ticker.MultipleLocator(10)
minor_locator = matplotlib.ticker.MultipleLocator(1)
# set the distance between the ticks on the y axis
ax.yaxis.set_major_locator(major_locator)
ax.yaxis.set_minor_locator(minor_locator)
def plot_codon_usage_differences(sequence, ax):
"""
Plot the difference in codon usage for origin and target host as bar graph
:param sequence: LibCharm.Sequence object
:param ax: matplotlib axis object
"""
# Generate a range of residues out of the length of the sequence array
x1 = numpy.arange(len(sequence.codons))
# Set the threshold according to use_frequency
if sequence.use_frequency:
threshold = 5
else:
threshold = 0.2
# Set width of bars
bar_width = 0.8
# Initialize array of labels for the x axis
xlabels = []
# Initialize arrays of data and labels for the bars
df = []
bar_labels = []
# walk over the codons in sequence
for c in sequence.codons:
# add final_df to data array
df.append(c['final_df'])
# add residue to xlabels
xlabels.append(c['aa'])
# generate bar label and add to list
label = u'{} → {}'.format(c['original'], c['new'])
bar_labels.append(label)
# convert lists to numpy arrays
bar_labels = numpy.array(bar_labels)
df = numpy.array(df)
# find bars that exceed the threshold
mask1 = numpy.ma.where(df > threshold)
mask2 = numpy.ma.where(df <= threshold)
# plot and color bars accordingly
p1 = ax.bar(x1[mask1], df[mask1], color='r', width=bar_width)
autolabel(p1, ax, bar_labels[mask1], vertical=True)
p2 = ax.bar(x1[mask2], df[mask2], color='b', width=bar_width)
autolabel(p2, ax, bar_labels[mask2], vertical=True)
# hide top and right axis
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='both', which='both', direction='out')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# set x axis labels to be centered and to use a monospaced font
ax.set_xticks(x1 + bar_width / 2)
ax.set_xticklabels(xlabels, **{'family': 'monospace'})
ax.set_xlabel('amino acid')
ax.set_ylabel(r'Differential codon usage $f_{origin} - f_{host}$')
if not sequence.use_frequency:
major_locator = matplotlib.ticker.MultipleLocator(0.05)
minor_locator = matplotlib.ticker.MultipleLocator(0.01)
else:
major_locator = matplotlib.ticker.MultipleLocator(10)
minor_locator = matplotlib.ticker.MultipleLocator(1)
ax.legend((p1, p2), (u'Δf > {}'.format(threshold), u'Δf ≤ {}'.format(threshold)), loc=2, bbox_to_anchor=(1, 1))
ax.yaxis.set_major_locator(major_locator)
ax.yaxis.set_minor_locator(minor_locator)
ax.hlines(threshold, 0, len(x1), colors='k', linestyles='dotted', **{'linewidth': 1})
def plot(sequence, prefix=None):
"""
Wrapper for plot_codon_usage_differences and plot_codon_usage
:param sequence: LibCharm.Sequence object
:param prefix: Resulting plot files will be prefixed with 'prefix'
"""
if prefix:
filename = '{}_charm_results.svg'.format(prefix)
else:
filename = 'charm_results.svg'
# Create a plot with two subplots
fig, axarr = matplotlib.pyplot.subplots(2, figsize=(50, 20), dpi=300)
# Actually plot data
plot_codon_usage(sequence, axarr[0])
plot_codon_usage_differences(sequence, axarr[1])
# Save plot as svg
matplotlib.pyplot.savefig(filename, format='svg', orientation='landscape', papertype='a4')
def parse_arguments():
"""
Parse command line arguments and return list of arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
parser.add_argument('-p', '--prefix', type=str, help='prefix for output files')
parser.add_argument('-f', '--frequency', action='store_true', help='use frequency/1000 instead of fraction')
parser.add_argument('-l', '--lower_frequency_alternative', action='store_true',
help='if two codons result in the same difference in codon usage '
'between origin and target host, use the lower frequency alternative')
parser.add_argument('-t', '--threshold', type=float,
help='Lower threshold of codon usage. Defaults to 0.1 and 5 for fraction and '
'frequency respectively')
parser.add_argument('-to', '--translation_table_origin', type=int,
help='id of translation table; Default is: standard genetic code = 1; '
'id corresponds to \'trans_table\' '
'on http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi')
parser.add_argument('-th', '--translation_table_host', type=int,
help='id of translation table; Default is: standard genetic code = 1; '
'id corresponds to \'trans_table\' '
'on http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi')
parser.add_argument('origin', type=int, help='species id of origin organism taken from '
'\'http://www.kazusa.or.jp/codon\' (e.g. \'83333\' for E. coli K12)')
parser.add_argument('host', type=int, help='species id of host organism taken from '
'\'http://www.kazusa.or.jp/codon\' (e.g. \'83333\' for E. coli K12)')
parser.add_argument('input', type=str, help='input file in FASTA format')
args = parser.parse_args()
return args
def initialize_logger(prefix):
"""
Initialization of logging subsystem. Two logging handlers are brought up:
'fh' which logs to a log file and 'ch' which logs to standard output.
:param prefix: prefix that is added to the filename
:return logger: return a logger instance
"""
logger = logging.getLogger('charm-cli')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
try:
if prefix:
log_filename = '{}_charm-cli.log'.format(prefix)
else:
log_filename = 'charm-cli.log'
fh = logging.FileHandler(log_filename, 'w')
fh.setLevel(logging.INFO)
logger.addHandler(fh)
except IOError as error:
logger.warning('WARNING: Cannot create log file! Run charm-cli from a directory to '
'which you have write access.')
logger.warning(error.msg)
pass
return logger
def main():
"""
Main function of charm-cli.py.
"""
# Parse command line arguments
args = parse_arguments()
# Initialize logging
logger = initialize_logger(args.prefix)
# Set translation tables according to user input. Defaults to standard genetic code (table 1)
if args.translation_table_origin:
translation_table_origin = args.translation_table_origin
else:
translation_table_origin = 1
if args.translation_table_host:
translation_table_host = args.translation_table_host
else:
translation_table_host = 1
# set threshold if provided by the user and otherwise fall back to defaults
if args.threshold:
lower_threshold = args.threshold
elif args.frequency:
lower_threshold = 5
else:
lower_threshold = 0.1
# initialize Sequence object with user provided input
sequence = Sequence(IO.load_file(args.input), args.origin, args.host,
translation_table_origin=translation_table_origin,
translation_table_host=translation_table_host,
use_frequency=args.frequency,
lower_threshold=lower_threshold,
lower_alternative=args.lower_frequency_alternative)
# harmonize the provided sequence
harmonized_codons = sequence.get_harmonized_codons()
# check if input and output sequence are identical
verify_sequence = sequence.verify_harmonized_sequence()
# log summary to standard output and log file
logger.info('SUMMARY:\n')
if verify_sequence:
text = 'Success! Translation of harmonized and original sequence match:\n\n' \
'{}\n'.format(sequence.harmonized_translated_sequence)
logger.info(text)
else:
logger.error('ERROR: Translations of harmonized and original sequence DO NOT match!')
logger.info('Harmonized codons: {}\n'.format(len(harmonized_codons)))
df_above_thresh = 0
for c in sequence.codons:
if c['final_df'] > 0.2:
df_above_thresh += 1
if df_above_thresh > 0:
logger.warning("WARNING: Difference in origin and target host codon usage of {} out of {} codons ({}%) exceeds 20%!\n".format(df_above_thresh,
len(sequence.codons),
round(df_above_thresh/len(sequence.codons)*100, 1)))
else:
logger.info("Differences of codon usage in origin and target host are within 20%.\n")
table_header = '{:<10} {:^3} {:^4} {:^4} {:^7} {:>6} {:<7} {:>6}'.format('position', 'aa', 'orig', 'new',
'initial', 'final', 'origin', 'target')
logger.info(table_header)
warnings = []
# Iterate over all codons in the sequence and print some statistics and information
for c in sequence.codons:
if str(c['original']) != str(c['new']):
line = '{:<10} {:^3} {:<4} -> {:<4} {:<5.2f} -> {:<3.2f} {:<5.2f} -> {:<3.2f}'.format(c['position'],
c['aa'],
c['original'],
c['new'],
c['initial_df'],
c['final_df'],
c['origin_f'],
c['target_f'])
else:
line = '{:<10} {:^3} {:<12} {:<5.2f} {:<5.2f} -> {:<3.2f}'.format(c['position'],
c['aa'],
c['original'],
c['initial_df'],
c['origin_f'],
c['target_f'])
if c['ambiguous']:
line += ' WARNING: Original codon is ambiguous!'
warnings.append('Codon {} ({}) coding for {} is ambiguous! {} was chosen for the '
'harmonized sequence!'.format(c['position'],
c['original'],
c['aa'],
c['new']))
logger.info(line)
logger.info('\nCodon-harmonized sequence:\n\n{}'.format(sequence.harmonized_sequence))
if warnings:
logger.warn('\nWARNINGS OCCURRED DURING HARMONIZATION:\n')
for warning in warnings:
logger.warn(warning)
plot(sequence, args.prefix)
# Exit gracefully
exit(0)
if __name__ == "__main__":
main()
| [
"matplotlib.rc",
"LibCharm.IO.load_file",
"argparse.ArgumentParser",
"logging.FileHandler",
"numpy.ma.where",
"logging.StreamHandler",
"logging.getLogger",
"matplotlib.use",
"numpy.array",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((175, 196), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (189, 196), False, 'import matplotlib\n'), ((201, 308), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **{'sans-serif': 'DejaVu Sans', 'serif':\n 'DejaVu Serif', 'family': 'sans-serif'})\n", (214, 308), False, 'import matplotlib\n'), ((2322, 2343), 'numpy.array', 'numpy.array', (['origin_f'], {}), '(origin_f)\n', (2333, 2343), False, 'import numpy\n'), ((2359, 2380), 'numpy.array', 'numpy.array', (['target_f'], {}), '(target_f)\n', (2370, 2380), False, 'import numpy\n'), ((5094, 5117), 'numpy.array', 'numpy.array', (['bar_labels'], {}), '(bar_labels)\n', (5105, 5117), False, 'import numpy\n'), ((5127, 5142), 'numpy.array', 'numpy.array', (['df'], {}), '(df)\n', (5138, 5142), False, 'import numpy\n'), ((5197, 5227), 'numpy.ma.where', 'numpy.ma.where', (['(df > threshold)'], {}), '(df > threshold)\n', (5211, 5227), False, 'import numpy\n'), ((5240, 5271), 'numpy.ma.where', 'numpy.ma.where', (['(df <= threshold)'], {}), '(df <= threshold)\n', (5254, 5271), False, 'import numpy\n'), ((7081, 7137), 'matplotlib.pyplot.subplots', 'matplotlib.pyplot.subplots', (['(2)'], {'figsize': '(50, 20)', 'dpi': '(300)'}), '(2, figsize=(50, 20), dpi=300)\n', (7107, 7137), False, 'import matplotlib\n'), ((7286, 7380), 'matplotlib.pyplot.savefig', 'matplotlib.pyplot.savefig', (['filename'], {'format': '"""svg"""', 'orientation': '"""landscape"""', 'papertype': '"""a4"""'}), "(filename, format='svg', orientation='landscape',\n papertype='a4')\n", (7311, 7380), False, 'import matplotlib\n'), ((7493, 7518), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7516, 7518), False, 'import argparse\n'), ((9811, 9841), 'logging.getLogger', 'logging.getLogger', (['"""charm-cli"""'], {}), "('charm-cli')\n", (9828, 9841), False, 'import logging\n'), ((9886, 9909), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (9907, 9909), False, 'import logging\n'), ((3430, 3468), 'matplotlib.ticker.MultipleLocator', 'matplotlib.ticker.MultipleLocator', (['(0.1)'], {}), '(0.1)\n', (3463, 3468), False, 'import matplotlib\n'), ((3493, 3532), 'matplotlib.ticker.MultipleLocator', 'matplotlib.ticker.MultipleLocator', (['(0.01)'], {}), '(0.01)\n', (3526, 3532), False, 'import matplotlib\n'), ((3757, 3794), 'matplotlib.ticker.MultipleLocator', 'matplotlib.ticker.MultipleLocator', (['(10)'], {}), '(10)\n', (3790, 3794), False, 'import matplotlib\n'), ((3819, 3855), 'matplotlib.ticker.MultipleLocator', 'matplotlib.ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (3852, 3855), False, 'import matplotlib\n'), ((6126, 6165), 'matplotlib.ticker.MultipleLocator', 'matplotlib.ticker.MultipleLocator', (['(0.05)'], {}), '(0.05)\n', (6159, 6165), False, 'import matplotlib\n'), ((6190, 6229), 'matplotlib.ticker.MultipleLocator', 'matplotlib.ticker.MultipleLocator', (['(0.01)'], {}), '(0.01)\n', (6223, 6229), False, 'import matplotlib\n'), ((6264, 6301), 'matplotlib.ticker.MultipleLocator', 'matplotlib.ticker.MultipleLocator', (['(10)'], {}), '(10)\n', (6297, 6301), False, 'import matplotlib\n'), ((6326, 6362), 'matplotlib.ticker.MultipleLocator', 'matplotlib.ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (6359, 6362), False, 'import matplotlib\n'), ((10126, 10164), 'logging.FileHandler', 'logging.FileHandler', (['log_filename', '"""w"""'], {}), "(log_filename, 'w')\n", (10145, 10164), False, 'import logging\n'), ((11384, 11408), 'LibCharm.IO.load_file', 'IO.load_file', (['args.input'], {}), '(args.input)\n', (11396, 11408), False, 'from LibCharm import IO\n')] |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import numpy as np
from .config import cfg
from ..nms.gpu_nms import gpu_nms
from ..nms.cpu_nms import cpu_nms
def nms(dets, thresh, force_cpu=False):
"""Dispatch to either CPU or GPU NMS implementations."""
if dets.shape[0] == 0:
return []
if cfg.USE_GPU_NMS and not force_cpu:
return gpu_nms(dets, thresh, device_id=cfg.GPU_ID)
else:
return cpu_nms(dets, thresh)
def nms_wrapper(scores, boxes, threshold = 0.7, class_sets = None):
"""
post-process the results of im_detect
:param scores: N * K numpy
:param boxes: N * (K * 4) numpy
:param class_sets: e.g. CLASSES = ('__background__','person','bike','motorbike','car','bus')
:return: a list of K-1 dicts, no background, each is {'class': classname, 'dets': None | [[x1,y1,x2,y2,score],...]}
"""
num_class = scores.shape[1] if class_sets is None else len(class_sets)
assert num_class * 4 == boxes.shape[1],\
'Detection scores and boxes dont match'
class_sets = ['class_' + str(i) for i in range(0, num_class)] if class_sets is None else class_sets
res = []
for ind, cls in enumerate(class_sets[1:]):
ind += 1 # skip background
cls_boxes = boxes[:, 4*ind : 4*(ind+1)]
cls_scores = scores[:, ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, thresh=0.3)
dets = dets[keep, :]
dets = dets[np.where(dets[:, 4] > threshold)]
r = {}
if dets.shape[0] > 0:
r['class'], r['dets'] = cls, dets
else:
r['class'], r['dets'] = cls, None
res.append(r)
return res | [
"numpy.where",
"numpy.hstack"
] | [((1683, 1715), 'numpy.where', 'np.where', (['(dets[:, 4] > threshold)'], {}), '(dets[:, 4] > threshold)\n', (1691, 1715), True, 'import numpy as np\n'), ((1528, 1577), 'numpy.hstack', 'np.hstack', (['(cls_boxes, cls_scores[:, np.newaxis])'], {}), '((cls_boxes, cls_scores[:, np.newaxis]))\n', (1537, 1577), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Test mapsequence functionality
"""
from __future__ import absolute_import
import numpy as np
import astropy.units as u
import sunpy
import sunpy.map
from sunpy.util.metadata import MetaDict
import pytest
import os
import sunpy.data.test
@pytest.fixture
def aia_map():
"""
Load SunPy's test AIA image.
"""
testpath = sunpy.data.test.rootdir
aia_file = os.path.join(testpath, "aia_171_level1.fits")
return sunpy.map.Map(aia_file)
@pytest.fixture
def masked_aia_map(aia_map):
"""
Put a simple mask in the test AIA image. A rectangular (not square) block
of True values are included to test that operations on the mask respect how
the mask is stored.
"""
aia_map_data = aia_map.data
aia_map_mask = np.zeros_like(aia_map_data)
aia_map_mask[0:2, 0:3] = True
return sunpy.map.Map(np.ma.masked_array(aia_map_data, mask=aia_map_mask),
aia_map.meta)
@pytest.fixture
def mapsequence_all_the_same(aia_map):
""" Simple `sunpy.map.mapsequence` for testing."""
return sunpy.map.Map([aia_map, aia_map], sequence=True)
@pytest.fixture
def mapsequence_all_the_same_all_have_masks(masked_aia_map):
""" Simple `sunpy.map.mapsequence` for testing, in which all the maps have
masks."""
return sunpy.map.Map([masked_aia_map, masked_aia_map], sequence=True)
@pytest.fixture
def mapsequence_all_the_same_some_have_masks(aia_map, masked_aia_map):
""" Simple `sunpy.map.mapsequence` for testing, in which at least some of the
maps have masks."""
return sunpy.map.Map([masked_aia_map, masked_aia_map, aia_map], sequence=True)
@pytest.fixture()
def mapsequence_different(aia_map):
""" Mapsequence allows that the size of the image data in each map be
different. This mapsequence contains such maps."""
return sunpy.map.Map([aia_map, aia_map.superpixel((4, 4)*u.pix)], sequence=True)
def test_all_maps_same_shape(mapsequence_all_the_same, mapsequence_different):
"""Make sure that Mapsequence knows if all the maps have the same shape"""
assert mapsequence_all_the_same.all_maps_same_shape()
assert not mapsequence_different.all_maps_same_shape()
def test_at_least_one_map_has_mask(mapsequence_all_the_same,
mapsequence_all_the_same_all_have_masks,
mapsequence_all_the_same_some_have_masks
):
""" Test that we can detect the presence of at least one masked map."""
assert not mapsequence_all_the_same.at_least_one_map_has_mask()
assert mapsequence_all_the_same_all_have_masks.at_least_one_map_has_mask()
assert mapsequence_all_the_same_some_have_masks.at_least_one_map_has_mask()
def test_as_array(mapsequence_all_the_same,
mapsequence_different,
mapsequence_all_the_same_all_have_masks,
mapsequence_all_the_same_some_have_masks):
"""Make sure the data in the mapsequence returns correctly, when all the
maps have the same shape. When they don't have the same shape, make
sure an error is raised."""
# Should raise a ValueError if the mapsequence has differently shaped maps in
# it.
with pytest.raises(ValueError):
mapsequence_different.as_array()
# Test the case when none of the maps have a mask
returned_array = mapsequence_all_the_same.as_array()
assert isinstance(returned_array, np.ndarray)
assert returned_array.ndim == 3
assert len(returned_array.shape) == 3
assert returned_array.shape[0] == 128
assert returned_array.shape[1] == 128
assert returned_array.shape[2] == 2
assert np.ma.getmask(returned_array) is np.ma.nomask
# Test the case when all the maps have masks
returned_array = mapsequence_all_the_same_all_have_masks.as_array()
assert isinstance(returned_array, np.ma.masked_array)
data = np.ma.getdata(returned_array)
assert data.ndim == 3
assert len(data.shape) == 3
assert data.shape[0] == 128
assert data.shape[1] == 128
assert data.shape[2] == 2
mask = np.ma.getmask(returned_array)
assert mask.ndim == 3
assert len(mask.shape) == 3
assert mask.shape[0] == 128
assert mask.shape[1] == 128
assert mask.shape[2] == 2
assert mask.dtype == bool
# Test the case when some of the maps have masks
returned_array = mapsequence_all_the_same_some_have_masks.as_array()
assert isinstance(returned_array, np.ma.masked_array)
data = np.ma.getdata(returned_array)
assert data.ndim == 3
assert len(data.shape) == 3
assert data.shape[0] == 128
assert data.shape[1] == 128
assert data.shape[2] == 3
mask = np.ma.getmask(mapsequence_all_the_same_some_have_masks.as_array())
assert mask.ndim == 3
assert len(mask.shape) == 3
assert mask.shape[0] == 128
assert mask.shape[1] == 128
assert mask.shape[2] == 3
assert np.all(mask[0:2, 0:3, 0])
assert np.all(mask[0:2, 0:3, 1])
assert np.all(np.logical_not(mask[0:2, 0:3, 2]))
def test_all_meta(mapsequence_all_the_same):
"""Tests that the correct number of map meta objects are returned, and
that they are all map meta objects."""
meta = mapsequence_all_the_same.all_meta()
assert len(meta) == 2
assert np.all(np.asarray([isinstance(h, MetaDict) for h in meta]))
assert np.all(np.asarray([meta[i] == mapsequence_all_the_same[i].meta for i in range(0, len(meta))]))
| [
"numpy.zeros_like",
"sunpy.map.Map",
"numpy.ma.getdata",
"numpy.logical_not",
"pytest.fixture",
"pytest.raises",
"numpy.ma.masked_array",
"numpy.ma.getmask",
"os.path.join",
"numpy.all"
] | [((1658, 1674), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1672, 1674), False, 'import pytest\n'), ((402, 447), 'os.path.join', 'os.path.join', (['testpath', '"""aia_171_level1.fits"""'], {}), "(testpath, 'aia_171_level1.fits')\n", (414, 447), False, 'import os\n'), ((459, 482), 'sunpy.map.Map', 'sunpy.map.Map', (['aia_file'], {}), '(aia_file)\n', (472, 482), False, 'import sunpy\n'), ((780, 807), 'numpy.zeros_like', 'np.zeros_like', (['aia_map_data'], {}), '(aia_map_data)\n', (793, 807), True, 'import numpy as np\n'), ((1082, 1130), 'sunpy.map.Map', 'sunpy.map.Map', (['[aia_map, aia_map]'], {'sequence': '(True)'}), '([aia_map, aia_map], sequence=True)\n', (1095, 1130), False, 'import sunpy\n'), ((1314, 1376), 'sunpy.map.Map', 'sunpy.map.Map', (['[masked_aia_map, masked_aia_map]'], {'sequence': '(True)'}), '([masked_aia_map, masked_aia_map], sequence=True)\n', (1327, 1376), False, 'import sunpy\n'), ((1583, 1654), 'sunpy.map.Map', 'sunpy.map.Map', (['[masked_aia_map, masked_aia_map, aia_map]'], {'sequence': '(True)'}), '([masked_aia_map, masked_aia_map, aia_map], sequence=True)\n', (1596, 1654), False, 'import sunpy\n'), ((3929, 3958), 'numpy.ma.getdata', 'np.ma.getdata', (['returned_array'], {}), '(returned_array)\n', (3942, 3958), True, 'import numpy as np\n'), ((4122, 4151), 'numpy.ma.getmask', 'np.ma.getmask', (['returned_array'], {}), '(returned_array)\n', (4135, 4151), True, 'import numpy as np\n'), ((4530, 4559), 'numpy.ma.getdata', 'np.ma.getdata', (['returned_array'], {}), '(returned_array)\n', (4543, 4559), True, 'import numpy as np\n'), ((4953, 4978), 'numpy.all', 'np.all', (['mask[0:2, 0:3, 0]'], {}), '(mask[0:2, 0:3, 0])\n', (4959, 4978), True, 'import numpy as np\n'), ((4990, 5015), 'numpy.all', 'np.all', (['mask[0:2, 0:3, 1]'], {}), '(mask[0:2, 0:3, 1])\n', (4996, 5015), True, 'import numpy as np\n'), ((867, 918), 'numpy.ma.masked_array', 'np.ma.masked_array', (['aia_map_data'], {'mask': 'aia_map_mask'}), '(aia_map_data, mask=aia_map_mask)\n', (885, 918), True, 'import numpy as np\n'), ((3249, 3274), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3262, 3274), False, 'import pytest\n'), ((3692, 3721), 'numpy.ma.getmask', 'np.ma.getmask', (['returned_array'], {}), '(returned_array)\n', (3705, 3721), True, 'import numpy as np\n'), ((5034, 5067), 'numpy.logical_not', 'np.logical_not', (['mask[0:2, 0:3, 2]'], {}), '(mask[0:2, 0:3, 2])\n', (5048, 5067), True, 'import numpy as np\n')] |
"""
From http://stackoverflow.com/a/13504757
"""
from scipy.interpolate import interp1d
from scipy.interpolate._fitpack import _bspleval
import numpy as np
class fast_interpolation:
def __init__(self, x, y, axis=-1):
assert len(x) == y.shape[axis]
self.x = x
self.y = y
self.axis = axis
self._f = interp1d(x, y, axis=axis, kind='slinear', copy=False)
def __getstate__(self):
return dict(x=self.x, y=self.y, axis=self.axis)
def __setstate__(self, state):
self.x = state['x']
self.y = state['y']
self.axis = state['axis']
self._f = interp1d(self.x, self.y, axis=self.axis,
kind='slinear', copy=False)
def __call__(self, new_x):
#assert new_x.shape == y.shape
xj, cvals, k = self._f._spline.tck
result = np.empty_like(new_x)
for i, value in enumerate(new_x.flat):
result.flat[i] = _bspleval(value, self.x, cvals[:, i], k, 0)
return result
| [
"scipy.interpolate._fitpack._bspleval",
"scipy.interpolate.interp1d",
"numpy.empty_like"
] | [((344, 397), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'y'], {'axis': 'axis', 'kind': '"""slinear"""', 'copy': '(False)'}), "(x, y, axis=axis, kind='slinear', copy=False)\n", (352, 397), False, 'from scipy.interpolate import interp1d\n'), ((627, 695), 'scipy.interpolate.interp1d', 'interp1d', (['self.x', 'self.y'], {'axis': 'self.axis', 'kind': '"""slinear"""', 'copy': '(False)'}), "(self.x, self.y, axis=self.axis, kind='slinear', copy=False)\n", (635, 695), False, 'from scipy.interpolate import interp1d\n'), ((854, 874), 'numpy.empty_like', 'np.empty_like', (['new_x'], {}), '(new_x)\n', (867, 874), True, 'import numpy as np\n'), ((951, 994), 'scipy.interpolate._fitpack._bspleval', '_bspleval', (['value', 'self.x', 'cvals[:, i]', 'k', '(0)'], {}), '(value, self.x, cvals[:, i], k, 0)\n', (960, 994), False, 'from scipy.interpolate._fitpack import _bspleval\n')] |
"""Class definitions for Speakers, Files, Utterances and Jobs"""
from __future__ import annotations
import os
import re
import sys
import traceback
from collections import Counter
from typing import (
TYPE_CHECKING,
Any,
ClassVar,
Dict,
Generator,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
import librosa
import numpy as np
from praatio import textgrid
from praatio.utilities.constants import Interval
from montreal_forced_aligner.abc import MfaCorpusClass
from montreal_forced_aligner.corpus.helper import get_wav_info, load_text
from montreal_forced_aligner.data import (
CtmInterval,
FileData,
SoundFileInformation,
SoundFileType,
TextFileType,
UtteranceData,
)
from montreal_forced_aligner.dictionary.multispeaker import MultispeakerSanitizationFunction
from montreal_forced_aligner.exceptions import CorpusError, TextGridParseError, TextParseError
if TYPE_CHECKING:
from montreal_forced_aligner.dictionary.pronunciation import PronunciationDictionaryMixin
__all__ = ["File", "Speaker", "Utterance"]
class Speaker(MfaCorpusClass):
"""
Class representing information about a speaker
Parameters
----------
name: str
Identifier of the speaker
Attributes
----------
utterances: :class:`~montreal_forced_aligner.corpus.classes.UtteranceCollection`
Utterances that the speaker is associated with
cmvn: str, optional
String pointing to any CMVN that has been calculated for this speaker
dictionary: :class:`~montreal_forced_aligner.dictionary.PronunciationDictionary`, optional
Pronunciation dictionary that the speaker is associated with
"""
def __init__(self, name):
self._name = name
self.utterances = UtteranceCollection()
self.cmvn = None
self.dictionary: Optional[PronunciationDictionaryMixin] = None
self.dictionary_name: Optional[str] = None
self.word_counts = Counter()
@property
def name(self) -> str:
"""Speaker name"""
return self._name
def __str__(self) -> str:
"""Return Speaker's name"""
return self.name
def __eq__(self, other: Union[Speaker, str]) -> bool:
"""Check if a Speaker is equal to another Speaker"""
if isinstance(other, Speaker):
return other.name == self.name
if isinstance(other, str):
return self.name == other
raise TypeError("Speakers can only be compared to other speakers and strings.")
def __lt__(self, other: Union[Speaker, str]) -> bool:
"""Check if a Speaker is less than another Speaker"""
if isinstance(other, Speaker):
return other.name < self.name
if isinstance(other, str):
return self.name < other
raise TypeError("Speakers can only be compared to other speakers and strings.")
def __gt__(self, other: Union[Speaker, str]) -> bool:
"""Check if a Speaker is greater than another Speaker"""
if isinstance(other, Speaker):
return other.name > self.name
if isinstance(other, str):
return self.name > other
raise TypeError("Speakers can only be compared to other speakers and strings.")
def __hash__(self) -> hash:
"""Get the hash of the speaker"""
return hash(self.name)
@property
def num_utterances(self) -> int:
"""Get the number of utterances for the speaker"""
return len(self.utterances)
def add_utterance(self, utterance: Utterance) -> None:
"""
Associate an utterance with a speaker
Parameters
----------
utterance: :class:`~montreal_forced_aligner.corpus.classes.Utterance`
Utterance to be added
"""
self.utterances.add_utterance(utterance)
def delete_utterance(self, utterance: Utterance) -> None:
"""
Delete an utterance associated with a speaker
Parameters
----------
utterance: :class:`~montreal_forced_aligner.corpus.classes.Utterance`
Utterance to be deleted
"""
identifier = utterance.name
del self.utterances[identifier]
def set_dictionary(self, dictionary: PronunciationDictionaryMixin) -> None:
"""
Set the dictionary for the speaker
Parameters
----------
dictionary: :class:`~montreal_forced_aligner.dictionary.PronunciationDictionary`
Pronunciation dictionary to associate with the speaker
"""
self.dictionary = dictionary
self.dictionary_name = dictionary.name
@property
def meta(self) -> Dict[str, str]:
"""Metadata for the speaker"""
data = {
"name": self.name,
"cmvn": self.cmvn,
}
if self.dictionary is not None:
data["dictionary"] = self.dictionary.name
return data
class File(MfaCorpusClass):
"""
File class for representing metadata and associations of Files
Parameters
----------
wav_path: str, optional
Sound file path
text_path: str, optional
Transcription file path
relative_path: str, optional
Relative path to the corpus root
Attributes
----------
utterances: :class:`~montreal_forced_aligner.corpus.classes.UtteranceCollection`
Utterances in the file
speaker_ordering: list[Speaker]
Ordering of speakers in the transcription file
wav_info: :class:`~montreal_forced_aligner.data.SoundFileInformation`
Information about sound file
waveform: numpy.array
Audio samples
aligned: bool
Flag for whether a file has alignments
Raises
------
:class:`~montreal_forced_aligner.exceptions.CorpusError`
If both wav_path and text_path are None
"""
textgrid_regex = re.compile(r"\.textgrid$", flags=re.IGNORECASE)
wav_regex = re.compile(r"\.wav$", flags=re.IGNORECASE)
def __init__(
self,
wav_path: Optional[str] = None,
text_path: Optional[str] = None,
relative_path: Optional[str] = None,
name: Optional[str] = None,
):
self.wav_path = wav_path
self.text_path = text_path
wav_check = self.wav_path is not None
text_check = self.text_path is not None
self._name = name
if not self._name:
if wav_check:
self._name = os.path.splitext(os.path.basename(self.wav_path))[0]
elif text_check:
self._name = os.path.splitext(os.path.basename(self.text_path))[0]
else:
raise CorpusError("File objects must have either a wav_path or text_path")
self.relative_path = relative_path
self.wav_info: Optional[SoundFileInformation] = None
self.waveform = None
self.speaker_ordering: List[Speaker] = []
self.utterances = UtteranceCollection()
self.aligned = False
if text_check:
if self.text_path.lower().endswith(".textgrid"):
self.text_type = TextFileType.TEXTGRID
else:
self.text_type = TextFileType.LAB
else:
self.text_type = TextFileType.NONE
if wav_check:
if self.wav_path.lower().endswith(".wav"):
self.sound_type = SoundFileType.WAV
else:
self.sound_type = SoundFileType.SOX
else:
self.sound_type = SoundFileType.NONE
@property
def multiprocessing_data(self) -> FileData:
"""
Data object for the file
"""
return FileData(
self.name,
self.wav_path,
self.text_path,
self.relative_path,
self.wav_info,
[s.name for s in self.speaker_ordering],
[u.multiprocessing_data for u in self.utterances],
)
@classmethod
def load_from_mp_data(cls, file_data: FileData) -> File:
"""
Construct a File from a multiprocessing file data class
Parameters
----------
file_data: :class:`~montreal_forced_aligner.data.FileData`
Data for the loaded file
Returns
-------
:class:`~montreal_forced_aligner.corpus.classes.File`
Loaded file
"""
file = File(
file_data.wav_path,
file_data.text_path,
relative_path=file_data.relative_path,
name=file_data.name,
)
file.wav_info = file_data.wav_info
for s in file_data.speaker_ordering:
file.add_speaker(Speaker(s))
for u in file_data.utterances:
u = Utterance.load_from_mp_data(u, file)
file.utterances.add_utterance(u)
return file
@classmethod
def parse_file(
cls,
file_name: str,
wav_path: Optional[str],
text_path: Optional[str],
relative_path: str,
speaker_characters: Union[int, str],
sanitize_function: Optional[MultispeakerSanitizationFunction] = None,
):
"""
Parse a collection of sound file and transcription file into a File
Parameters
----------
file_name: str
File identifier
wav_path: str
Full sound file path
text_path: str
Full transcription path
relative_path: str
Relative path from the corpus directory root
speaker_characters: int, optional
Number of characters in the file name to specify the speaker
sanitize_function: Callable, optional
Function to sanitize words and strip punctuation
Returns
-------
:class:`~montreal_forced_aligner.corpus.classes.File`
Parsed file
"""
file = File(wav_path, text_path, relative_path=relative_path, name=file_name)
if file.has_sound_file:
root = os.path.dirname(wav_path)
file.wav_info = get_wav_info(wav_path)
else:
root = os.path.dirname(text_path)
if not speaker_characters:
speaker_name = os.path.basename(root)
elif isinstance(speaker_characters, int):
speaker_name = file_name[:speaker_characters]
elif speaker_characters == "prosodylab":
speaker_name = file_name.split("_")[1]
else:
speaker_name = file_name
root_speaker = None
if speaker_characters or file.text_type != TextFileType.TEXTGRID:
root_speaker = Speaker(speaker_name)
file.load_text(
root_speaker=root_speaker,
sanitize_function=sanitize_function,
)
return file
def __eq__(self, other: Union[File, str]) -> bool:
"""Check if a File is equal to another File"""
if isinstance(other, File):
return other.name == self.name
if isinstance(other, str):
return self.name == other
raise TypeError("Files can only be compared to other files and strings.")
def __lt__(self, other: Union[File, str]) -> bool:
"""Check if a File is less than another File"""
if isinstance(other, File):
return other.name < self.name
if isinstance(other, str):
return self.name < other
raise TypeError("Files can only be compared to other files and strings.")
def __gt__(self, other: Union[File, str]) -> bool:
"""Check if a File is greater than another File"""
if isinstance(other, File):
return other.name > self.name
if isinstance(other, str):
return self.name > other
raise TypeError("Files can only be compared to other files and strings.")
def __hash__(self) -> hash:
"""Get the hash of the file"""
return hash(self.name)
@property
def name(self) -> str:
"""Name of the file"""
return self._name
@property
def is_fully_aligned(self) -> bool:
"""
Check if all utterances have been aligned
"""
for u in self.utterances:
if u.ignored:
continue
if u.word_labels is None:
return False
if u.phone_labels is None:
return False
return True
def __repr__(self) -> str:
"""Representation of File objects"""
return f'<File {self.name} Sound path="{self.wav_path}" Text path="{self.text_path}">'
def save(
self,
output_directory: Optional[str] = None,
backup_output_directory: Optional[str] = None,
text_type: Optional[TextFileType] = None,
) -> None:
"""
Output File to TextGrid or lab. If ``text_type`` is not specified, the original file type will be used,
but if there was no text file for the file, it will guess lab format if there is only one utterance, otherwise
it will output a TextGrid file.
Parameters
----------
output_directory: str, optional
Directory to output file, if None, then it will overwrite the original file
backup_output_directory: str, optional
If specified, then it will check whether it would overwrite an existing file and
instead use this directory
text_type: TextFileType, optional
Text type to save as, if not provided, it will use either the original file type or guess the file type
"""
utterance_count = len(self.utterances)
if text_type is None:
text_type = self.text_type
if text_type == TextFileType.NONE:
if utterance_count == 1:
text_type = TextFileType.LAB
else:
text_type = TextFileType.TEXTGRID
if text_type == TextFileType.LAB:
if utterance_count == 0 and os.path.exists(self.text_path):
os.remove(self.text_path)
return
utterance = next(iter(self.utterances))
output_path = self.construct_output_path(
output_directory, backup_output_directory, enforce_lab=True
)
with open(output_path, "w", encoding="utf8") as f:
if utterance.transcription_text is not None:
f.write(utterance.transcription_text)
else:
f.write(utterance.text)
return
elif text_type == TextFileType.TEXTGRID:
output_path = self.construct_output_path(output_directory, backup_output_directory)
max_time = self.duration
tiers = {}
for speaker in self.speaker_ordering:
if speaker is None:
tiers["speech"] = textgrid.IntervalTier("speech", [], minT=0, maxT=max_time)
else:
tiers[speaker] = textgrid.IntervalTier(speaker.name, [], minT=0, maxT=max_time)
tg = textgrid.Textgrid()
tg.maxTimestamp = max_time
for utterance in self.utterances:
if utterance.speaker is None:
speaker = "speech"
else:
speaker = utterance.speaker
if not self.aligned:
if utterance.transcription_text is not None:
tiers[speaker].entryList.append(
Interval(
start=utterance.begin,
end=utterance.end,
label=utterance.transcription_text,
)
)
else:
tiers[speaker].entryList.append(
Interval(
start=utterance.begin, end=utterance.end, label=utterance.text
)
)
for t in tiers.values():
tg.addTier(t)
tg.save(output_path, includeBlankSpaces=True, format="long_textgrid")
@property
def meta(self) -> Dict[str, Any]:
"""Metadata for the File"""
return {
"wav_path": self.wav_path,
"text_path": self.text_path,
"name": self.name,
"relative_path": self.relative_path,
"wav_info": self.wav_info.meta,
"speaker_ordering": [x.name for x in self.speaker_ordering],
}
@property
def has_sound_file(self) -> bool:
"""Flag for whether the File has a sound file"""
return self.sound_type != SoundFileType.NONE
@property
def has_text_file(self) -> bool:
"""Flag for whether the File has a text file"""
return self.text_type != TextFileType.NONE
@property
def aligned_data(self) -> Dict[str, Dict[str, List[CtmInterval]]]:
"""
Word and phone alignments for the file
Returns
-------
dict[str, dict[str, list[CtmInterval]]]
Dictionary of word and phone intervals for each speaker in the file
"""
data = {}
for s in self.speaker_ordering:
if s.name not in data:
data[s.name] = {"words": [], "phones": []}
for u in self.utterances:
if u.word_labels is None:
continue
data[u.speaker_name]["words"].extend(u.word_labels)
data[u.speaker_name]["phones"].extend(u.phone_labels)
return data
def clean_up(self) -> None:
"""
Recombine words that were split up as part of initial text processing
"""
for u in self.utterances:
if not u.word_labels:
continue
cur_ind = 0
actual_labels = []
dictionary = u.speaker.dictionary
for word in u.text.split():
splits = dictionary.lookup(word)
b = 1000000
e = -1
for w in splits:
cur = u.word_labels[cur_ind]
if w == cur.label or cur.label == dictionary.oov_word:
if cur.begin < b:
b = cur.begin
if cur.end > e:
e = cur.end
cur_ind += 1
lab = CtmInterval(b, e, word, u.name)
actual_labels.append(lab)
u.word_labels = actual_labels
u.phone_labels = [
x for x in u.phone_labels if x.label != dictionary.optional_silence_phone
]
def construct_output_path(
self,
output_directory: Optional[str] = None,
backup_output_directory: Optional[str] = None,
enforce_lab: bool = False,
) -> str:
"""
Construct the output path for the File
Parameters
----------
output_directory: str, optional
Directory to output to, if None, it will overwrite the original file
backup_output_directory: str, optional
Backup directory to write to in order to avoid overwriting an existing file
enforce_lab: bool
Flag for whether to enforce generating a lab file over a TextGrid
Returns
-------
str
Output path
"""
if enforce_lab:
extension = ".lab"
else:
extension = ".TextGrid"
if output_directory is None:
if self.text_path is None:
return os.path.splitext(self.wav_path)[0] + extension
return self.text_path
if self.relative_path:
relative = os.path.join(output_directory, self.relative_path)
else:
relative = output_directory
tg_path = os.path.join(relative, self.name + extension)
if backup_output_directory is not None and os.path.exists(tg_path):
tg_path = tg_path.replace(output_directory, backup_output_directory)
os.makedirs(os.path.dirname(tg_path), exist_ok=True)
return tg_path
def load_text(
self,
root_speaker: Optional[Speaker] = None,
sanitize_function: Optional[MultispeakerSanitizationFunction] = None,
) -> None:
"""
Load the transcription text from the text_file of the object
Parameters
----------
root_speaker: :class:`~montreal_forced_aligner.corpus.classes.Speaker`, optional
Speaker derived from the root directory, ignored for TextGrids
sanitize_function: :class:`~montreal_forced_aligner.dictionary.mixins.SanitizeFunction`, optional
Function to sanitize words and strip punctuation
"""
if self.text_type == TextFileType.LAB:
try:
text = load_text(self.text_path)
except UnicodeDecodeError:
raise TextParseError(self.text_path)
utterance = Utterance(speaker=root_speaker, file=self, text=text)
utterance.parse_transcription(sanitize_function)
self.add_utterance(utterance)
elif self.text_type == TextFileType.TEXTGRID:
try:
tg = textgrid.openTextgrid(self.text_path, includeEmptyIntervals=False)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
raise TextGridParseError(
self.text_path,
"\n".join(traceback.format_exception(exc_type, exc_value, exc_traceback)),
)
num_tiers = len(tg.tierNameList)
if num_tiers == 0:
raise TextGridParseError(self.text_path, "Number of tiers parsed was zero")
if self.num_channels > 2:
raise (Exception("More than two channels"))
for tier_name in tg.tierNameList:
ti = tg.tierDict[tier_name]
if tier_name.lower() == "notes":
continue
if not isinstance(ti, textgrid.IntervalTier):
continue
if not root_speaker:
speaker_name = tier_name.strip()
speaker = Speaker(speaker_name)
self.add_speaker(speaker)
else:
speaker = root_speaker
for begin, end, text in ti.entryList:
text = text.lower().strip()
if not text:
continue
begin, end = round(begin, 4), round(end, 4)
end = min(end, self.duration)
utt = Utterance(speaker=speaker, file=self, begin=begin, end=end, text=text)
utt.parse_transcription(sanitize_function)
if not utt.text:
continue
self.add_utterance(utt)
else:
utterance = Utterance(speaker=root_speaker, file=self)
self.add_utterance(utterance)
def add_speaker(self, speaker: Speaker) -> None:
"""
Add a speaker to a file
Parameters
----------
speaker: :class:`~montreal_forced_aligner.corpus.classes.Speaker`
Speaker to add
"""
if speaker not in self.speaker_ordering:
self.speaker_ordering.append(speaker)
def add_utterance(self, utterance: Utterance) -> None:
"""
Add an utterance to a file
Parameters
----------
utterance: :class:`~montreal_forced_aligner.corpus.classes.Utterance`
Utterance to add
"""
self.utterances.add_utterance(utterance)
self.add_speaker(utterance.speaker)
def delete_utterance(self, utterance: Utterance) -> None:
"""
Delete an utterance from the file
Parameters
----------
utterance: :class:`~montreal_forced_aligner.corpus.classes.Utterance`
Utterance to remove
"""
identifier = utterance.name
del self.utterances[identifier]
def load_info(self) -> None:
"""
Load sound file info if it hasn't been already
"""
if self.wav_path is not None:
self.wav_info = get_wav_info(self.wav_path)
@property
def duration(self) -> float:
"""Get the duration of the sound file"""
if self.wav_path is None:
return 0
if not self.wav_info:
self.load_info()
return self.wav_info.duration
@property
def num_channels(self) -> int:
"""Get the number of channels of the sound file"""
if self.wav_path is None:
return 0
if not self.wav_info:
self.load_info()
return self.wav_info.num_channels
@property
def num_utterances(self) -> int:
"""Get the number of utterances for the sound file"""
return len(self.utterances)
@property
def num_speakers(self) -> int:
"""Get the number of speakers in the sound file"""
return len(self.speaker_ordering)
@property
def sample_rate(self) -> int:
"""Get the sample rate of the sound file"""
if self.wav_path is None:
return 0
if not self.wav_info:
self.load_info()
return self.wav_info.sample_rate
@property
def format(self) -> str:
"""Get the sound file format"""
if not self.wav_info:
self.load_info()
return self.wav_info.format
@property
def sox_string(self) -> str:
"""String used for converting sound file via SoX within Kaldi"""
if not self.wav_info:
self.load_info()
return self.wav_info.sox_string
def load_wav_data(self) -> None:
"""
Load the sound file into memory as a numpy array
"""
self.waveform, _ = librosa.load(self.wav_path, sr=None, mono=False)
def normalized_waveform(
self, begin: float = 0, end: Optional[float] = None
) -> Tuple[np.array, np.array]:
if self.waveform is None:
self.load_wav_data()
if end is None or end > self.duration:
end = self.duration
begin_sample = int(begin * self.sample_rate)
end_sample = int(end * self.sample_rate)
if len(self.waveform.shape) > 1 and self.waveform.shape[0] == 2:
y = self.waveform[:, begin_sample:end_sample] / np.max(
np.abs(self.waveform[:, begin_sample:end_sample]), axis=0
)
y[np.isnan(y)] = 0
y[0, :] += 3
y[0, :] += 1
else:
y = (
self.waveform[begin_sample:end_sample]
/ np.max(np.abs(self.waveform[begin_sample:end_sample]), axis=0)
) + 1
x = np.arange(start=begin_sample, stop=end_sample) / self.sample_rate
return x, y
def for_wav_scp(self) -> str:
"""
Generate the string to use in feature generation
Returns
-------
str
SoX string if necessary, the sound file path otherwise
"""
if self.sox_string:
return self.sox_string
return self.wav_path
class Utterance(MfaCorpusClass):
"""
Class for information about specific utterances
Parameters
----------
speaker: :class:`~montreal_forced_aligner.corpus.classes.Speaker`
Speaker of the utterance
file: :class:`~montreal_forced_aligner.corpus.classes.File`
File that the utterance belongs to
begin: float, optional
Start time of the utterance,
if None, then the utterance is assumed to start at 0
end: float, optional
End time of the utterance,
if None, then the utterance is assumed to end at the end of the File
channel: int, optional
Channel in the file, if None, then assumed to be the first/only channel
text: str, optional
Text transcription of the utterance
Attributes
----------
file_name: str
Saved File.name property for reconstructing objects following serialization
speaker_name: str
Saved Speaker.name property for reconstructing objects following serialization
transcription_text: str, optional
Output of transcription is saved here
ignored: bool
The ignored flag is set if feature generation does not work for this utterance, or it is too short to
be processed by Kaldi
features: str, optional
Feature string reference to the computed features archive
phone_labels: list[:class:`~montreal_forced_aligner.data.CtmInterval`], optional
Saved aligned phone labels
word_labels: list[:class:`~montreal_forced_aligner.data.CtmInterval`], optional
Saved aligned word labels
oovs: list[str]
Words not found in the dictionary for this utterance
"""
def __init__(
self,
speaker: Speaker,
file: File,
begin: Optional[float] = None,
end: Optional[float] = None,
channel: Optional[int] = 0,
text: Optional[str] = None,
):
self.speaker = speaker
self.file = file
self.file_name = file.name
self.speaker_name: str = speaker.name
if begin is None:
begin = 0
if end is None:
end = self.file.duration
self.begin = begin
self.end = end
self.channel = channel
self.text = text
self.transcription_text = None
self.ignored = False
self.features = None
self.phone_labels: Optional[List[CtmInterval]] = None
self.word_labels: Optional[List[CtmInterval]] = None
self.oovs = set()
self.normalized_text = []
self.text_int = []
self.word_error_rate = None
def parse_transcription(self, sanitize_function=Optional[MultispeakerSanitizationFunction]):
"""
Parse an orthographic transcription given punctuation and clitic markers
Parameters
----------
sanitize_function: :class:`~montreal_forced_aligner.dictionary.multispeaker.MultispeakerSanitizationFunction`, optional
Function to sanitize words and strip punctuation
"""
self.normalized_text = []
if sanitize_function is not None:
try:
sanitize, split = sanitize_function.get_functions_for_speaker(self.speaker_name)
except AttributeError:
sanitize = sanitize_function
split = None
words = [
sanitize(w)
for w in self.text.split()
if w not in sanitize.clitic_markers + sanitize.compound_markers
]
self.text = " ".join(words)
if split is not None:
for w in words:
for new_w in split(w):
if new_w not in split.word_set:
self.oovs.add(new_w)
self.normalized_text.append(new_w)
@property
def multiprocessing_data(self):
return UtteranceData(
self.speaker_name,
self.file_name,
self.begin,
self.end,
self.channel,
self.text,
self.normalized_text,
self.oovs,
)
@classmethod
def load_from_mp_data(cls, data: UtteranceData, file: File) -> Utterance:
utterance = Utterance(
speaker=Speaker(data.speaker_name),
file=file,
begin=data.begin,
end=data.end,
channel=data.channel,
text=data.text,
)
if data.normalized_text:
utterance.normalized_text = data.normalized_text
utterance.oovs = data.oovs
return utterance
def __str__(self) -> str:
"""String representation"""
return self.name
def __repr__(self) -> str:
"""Object representation"""
return f'<Utterance "{self.name}">'
def __eq__(self, other: Union[Utterance, str]) -> bool:
"""Check if an Utterance is equal to another Utterance"""
if isinstance(other, Utterance):
return other.name == self.name
if isinstance(other, str):
return self.name == other
raise TypeError("Utterances can only be compared to other utterances and strings.")
def __lt__(self, other: Union[Utterance, str]) -> bool:
"""Check if an Utterance is less than another Utterance"""
if isinstance(other, Utterance):
return other.name < self.name
if isinstance(other, str):
return self.name < other
raise TypeError("Utterances can only be compared to other utterances and strings.")
def __gt__(self, other: Union[Utterance, str]) -> bool:
"""Check if an Utterance is greater than another Utterance"""
if isinstance(other, Utterance):
return other.name > self.name
if isinstance(other, str):
return self.name > other
raise TypeError("Utterances can only be compared to other utterances and strings.")
def __hash__(self) -> hash:
"""Compute the hash of this function"""
return hash(self.name)
@property
def duration(self) -> float:
"""Duration of the utterance"""
if self.begin is not None and self.end is not None:
return self.end - self.begin
return self.file.duration
@property
def meta(self) -> Dict[str, Any]:
"""Metadata dictionary for the utterance"""
return {
"speaker": self.speaker.name,
"file": self.file.name,
"begin": self.begin,
"end": self.end,
"channel": self.channel,
"text": self.text,
"ignored": self.ignored,
"features": self.features,
"normalized_text": self.normalized_text,
"oovs": self.oovs,
"transcription_text": self.transcription_text,
"word_error_rate": self.word_error_rate,
}
def set_speaker(self, speaker: Speaker) -> None:
"""
Set the speaker of the utterance and updates other objects
Parameters
----------
speaker: :class:`~montreal_forced_aligner.corpus.classes.Speaker`
New speaker
"""
self.speaker = speaker
self.speaker.add_utterance(self)
self.file.add_utterance(self)
@property
def is_segment(self) -> bool:
"""Check if this utterance is a segment of a longer file"""
return self.begin is not None and self.end is not None
def add_word_intervals(self, intervals: Union[CtmInterval, List[CtmInterval]]) -> None:
"""
Add aligned word intervals for the utterance
Parameters
----------
intervals: Union[CtmInterval, list[CtmInterval]]
Intervals to add
"""
if not isinstance(intervals, list):
intervals = [intervals]
if self.word_labels is None:
self.word_labels = []
for interval in intervals:
if self.begin is not None:
interval.shift_times(self.begin)
self.word_labels.extend(intervals)
def add_phone_intervals(self, intervals: Union[CtmInterval, List[CtmInterval]]) -> None:
"""
Add aligned phone intervals for the utterance
Parameters
----------
intervals: Union[CtmInterval, list[CtmInterval]]
Intervals to add
"""
if not isinstance(intervals, list):
intervals = [intervals]
if self.phone_labels is None:
self.phone_labels = []
for interval in intervals:
if self.begin is not None:
interval.shift_times(self.begin)
self.phone_labels.extend(intervals)
def text_for_scp(self) -> List[str]:
"""
Generate the text for exporting to Kaldi's text scp
Returns
-------
list[str]
List of words
"""
return self.text.split()
def text_int_for_scp(self) -> Optional[List[int]]:
"""
Generate the text for exporting to Kaldi's text int scp
Returns
-------
list[int]
List of word IDs, or None if the utterance's speaker doesn't have an associated dictionary
"""
if self.speaker.dictionary is None:
return
if self.text_int:
return self.text_int
if self.normalized_text:
normalized = True
text = self.normalized_text
else:
normalized = False
text = self.text_for_scp()
self.text_int = []
for i, t in enumerate(text):
lookup = self.speaker.dictionary.to_int(t, normalized)
if self.speaker.dictionary.oov_int in lookup:
self.oovs.add(text[i])
self.text_int.extend(lookup)
return self.text_int
def segment_for_scp(self) -> List[Any]:
"""
Generate data for Kaldi's segments scp file
Returns
-------
list[Any]
Segment data
"""
return [self.file_name.replace(" ", "_MFASPACE_"), self.begin, self.end, self.channel]
@property
def name(self) -> str:
"""The name of the utterance"""
base = f"{self.file_name}"
base = base.replace(" ", "-space-").replace(".", "-").replace("_", "-")
if not base.startswith(f"{self.speaker_name}-"):
base = f"{self.speaker_name}-" + base
if self.is_segment:
base = f"{base}-{self.begin}-{self.end}"
return base.replace(" ", "-space-").replace(".", "-").replace("_", "-")
T = TypeVar("T", Speaker, File, Utterance)
class Collection:
"""
Utility class for storing collections of corpus objects, allowing iteration, sorting, and
look up via names.
"""
CLASS_TYPE = ClassVar[MfaCorpusClass]
def __init__(self):
self._data: Dict[str, T] = {}
def __getitem__(self, key: str) -> T:
"""Get an item by identifier"""
return self._data[key]
def __delitem__(self, key: str) -> None:
"""Delete an item by identifier"""
del self._data[key]
def __setitem__(self, key: str, item: T) -> None:
"""Set an item by identifier"""
self._data[key] = item
def __len__(self) -> int:
"""Number of items in the collection"""
return len(self._data)
def __bool__(self) -> bool:
"""Check for whether the collection contains any items"""
return bool(self._data)
def __contains__(self, item: Union[str, T]) -> bool:
"""Check for whether the collection contains a specific item"""
if not isinstance(item, str):
item = item.name
return item in self._data
def subset(self, subset_identifiers: Set[str]) -> Generator[T]:
for item in self:
if subset_identifiers and item.name not in subset_identifiers:
continue
yield item
def __iter__(self) -> Generator[T]:
"""Iterator over the collection"""
for v in self._data.values():
yield v
def update(self, other: Union[Collection, Set[T], List[T]]) -> None:
"""Update collection from another collection"""
if isinstance(other, Collection):
self._data.update(other._data)
else:
for item in other:
self._data[item.name] = item
def __str__(self) -> str:
"""String representation"""
return str(self._data)
def __repr__(self) -> str:
"""Object representation"""
return f"<Collection of {self._data}>"
class SpeakerCollection(Collection):
"""
Utility class for storing collections of speakers
"""
CLASS_TYPE = Speaker
def add_speaker(self, speaker: Speaker) -> None:
"""
Add speaker to the collection
Parameters
----------
speaker: :class:`~montreal_forced_aligner.corpus.classes.Speaker`
Speaker to be added
"""
self[speaker.name] = speaker
def __repr__(self) -> str:
"""Object representation"""
return f"<SpeakerCollection of {self._data}>"
class FileCollection(Collection):
"""
Utility class for storing collections of speakers
"""
CLASS_TYPE = File
def __init__(self):
super(FileCollection, self).__init__()
self.lab_count = 0
self.textgrid_count = 0
self.sound_file_count = 0
def add_file(self, file: File) -> None:
"""
Add file to the collection
Parameters
----------
speaker: :class:`~montreal_forced_aligner.corpus.classes.File`
File to be added
"""
self[file.name] = file
if file.text_type == TextFileType.TEXTGRID:
self.textgrid_count += 1
elif file.text_type == TextFileType.LAB:
self.lab_count += 1
if file.sound_type != SoundFileType.NONE:
self.sound_file_count += 1
def __repr__(self) -> str:
"""Object representation"""
return f"<FileCollection of {self._data}>"
class UtteranceCollection(Collection):
"""
Utility class for storing collections of speakers
"""
CLASS_TYPE = Utterance
def add_utterance(self, utterance: Utterance) -> None:
"""
Add utterance to the collection
Parameters
----------
speaker: :class:`~montreal_forced_aligner.corpus.classes.Utterance`
Utterance to be added
"""
self[utterance.name] = utterance
def __iter__(self) -> Generator[Utterance]:
"""Iterator over the collection"""
for v in self._data.values():
if v.ignored:
continue
yield v
def __repr__(self) -> str:
"""Object representation"""
return f"<UtteranceCollection of {self._data}>"
| [
"montreal_forced_aligner.data.UtteranceData",
"os.remove",
"numpy.abs",
"montreal_forced_aligner.data.CtmInterval",
"praatio.utilities.constants.Interval",
"montreal_forced_aligner.corpus.helper.get_wav_info",
"numpy.isnan",
"numpy.arange",
"sys.exc_info",
"os.path.join",
"montreal_forced_aligne... | [((37938, 37976), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'Speaker', 'File', 'Utterance'], {}), "('T', Speaker, File, Utterance)\n", (37945, 37976), False, 'from typing import TYPE_CHECKING, Any, ClassVar, Dict, Generator, List, Optional, Set, Tuple, TypeVar, Union\n'), ((5890, 5937), 're.compile', 're.compile', (['"""\\\\.textgrid$"""'], {'flags': 're.IGNORECASE'}), "('\\\\.textgrid$', flags=re.IGNORECASE)\n", (5900, 5937), False, 'import re\n'), ((5954, 5996), 're.compile', 're.compile', (['"""\\\\.wav$"""'], {'flags': 're.IGNORECASE'}), "('\\\\.wav$', flags=re.IGNORECASE)\n", (5964, 5996), False, 'import re\n'), ((1981, 1990), 'collections.Counter', 'Counter', ([], {}), '()\n', (1988, 1990), False, 'from collections import Counter\n'), ((7669, 7856), 'montreal_forced_aligner.data.FileData', 'FileData', (['self.name', 'self.wav_path', 'self.text_path', 'self.relative_path', 'self.wav_info', '[s.name for s in self.speaker_ordering]', '[u.multiprocessing_data for u in self.utterances]'], {}), '(self.name, self.wav_path, self.text_path, self.relative_path, self\n .wav_info, [s.name for s in self.speaker_ordering], [u.\n multiprocessing_data for u in self.utterances])\n', (7677, 7856), False, 'from montreal_forced_aligner.data import CtmInterval, FileData, SoundFileInformation, SoundFileType, TextFileType, UtteranceData\n'), ((19889, 19934), 'os.path.join', 'os.path.join', (['relative', '(self.name + extension)'], {}), '(relative, self.name + extension)\n', (19901, 19934), False, 'import os\n'), ((25990, 26038), 'librosa.load', 'librosa.load', (['self.wav_path'], {'sr': 'None', 'mono': '(False)'}), '(self.wav_path, sr=None, mono=False)\n', (26002, 26038), False, 'import librosa\n'), ((31233, 31366), 'montreal_forced_aligner.data.UtteranceData', 'UtteranceData', (['self.speaker_name', 'self.file_name', 'self.begin', 'self.end', 'self.channel', 'self.text', 'self.normalized_text', 'self.oovs'], {}), '(self.speaker_name, self.file_name, self.begin, self.end, self\n .channel, self.text, self.normalized_text, self.oovs)\n', (31246, 31366), False, 'from montreal_forced_aligner.data import CtmInterval, FileData, SoundFileInformation, SoundFileType, TextFileType, UtteranceData\n'), ((10003, 10028), 'os.path.dirname', 'os.path.dirname', (['wav_path'], {}), '(wav_path)\n', (10018, 10028), False, 'import os\n'), ((10057, 10079), 'montreal_forced_aligner.corpus.helper.get_wav_info', 'get_wav_info', (['wav_path'], {}), '(wav_path)\n', (10069, 10079), False, 'from montreal_forced_aligner.corpus.helper import get_wav_info, load_text\n'), ((10113, 10139), 'os.path.dirname', 'os.path.dirname', (['text_path'], {}), '(text_path)\n', (10128, 10139), False, 'import os\n'), ((10202, 10224), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (10218, 10224), False, 'import os\n'), ((19766, 19816), 'os.path.join', 'os.path.join', (['output_directory', 'self.relative_path'], {}), '(output_directory, self.relative_path)\n', (19778, 19816), False, 'import os\n'), ((19986, 20009), 'os.path.exists', 'os.path.exists', (['tg_path'], {}), '(tg_path)\n', (20000, 20009), False, 'import os\n'), ((20112, 20136), 'os.path.dirname', 'os.path.dirname', (['tg_path'], {}), '(tg_path)\n', (20127, 20136), False, 'import os\n'), ((24346, 24373), 'montreal_forced_aligner.corpus.helper.get_wav_info', 'get_wav_info', (['self.wav_path'], {}), '(self.wav_path)\n', (24358, 24373), False, 'from montreal_forced_aligner.corpus.helper import get_wav_info, load_text\n'), ((26922, 26968), 'numpy.arange', 'np.arange', ([], {'start': 'begin_sample', 'stop': 'end_sample'}), '(start=begin_sample, stop=end_sample)\n', (26931, 26968), True, 'import numpy as np\n'), ((13964, 13994), 'os.path.exists', 'os.path.exists', (['self.text_path'], {}), '(self.text_path)\n', (13978, 13994), False, 'import os\n'), ((14012, 14037), 'os.remove', 'os.remove', (['self.text_path'], {}), '(self.text_path)\n', (14021, 14037), False, 'import os\n'), ((15052, 15071), 'praatio.textgrid.Textgrid', 'textgrid.Textgrid', ([], {}), '()\n', (15069, 15071), False, 'from praatio import textgrid\n'), ((18439, 18470), 'montreal_forced_aligner.data.CtmInterval', 'CtmInterval', (['b', 'e', 'word', 'u.name'], {}), '(b, e, word, u.name)\n', (18450, 18470), False, 'from montreal_forced_aligner.data import CtmInterval, FileData, SoundFileInformation, SoundFileType, TextFileType, UtteranceData\n'), ((20901, 20926), 'montreal_forced_aligner.corpus.helper.load_text', 'load_text', (['self.text_path'], {}), '(self.text_path)\n', (20910, 20926), False, 'from montreal_forced_aligner.corpus.helper import get_wav_info, load_text\n'), ((26657, 26668), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (26665, 26668), True, 'import numpy as np\n'), ((6674, 6742), 'montreal_forced_aligner.exceptions.CorpusError', 'CorpusError', (['"""File objects must have either a wav_path or text_path"""'], {}), "('File objects must have either a wav_path or text_path')\n", (6685, 6742), False, 'from montreal_forced_aligner.exceptions import CorpusError, TextGridParseError, TextParseError\n'), ((20988, 21018), 'montreal_forced_aligner.exceptions.TextParseError', 'TextParseError', (['self.text_path'], {}), '(self.text_path)\n', (21002, 21018), False, 'from montreal_forced_aligner.exceptions import CorpusError, TextGridParseError, TextParseError\n'), ((21292, 21358), 'praatio.textgrid.openTextgrid', 'textgrid.openTextgrid', (['self.text_path'], {'includeEmptyIntervals': '(False)'}), '(self.text_path, includeEmptyIntervals=False)\n', (21313, 21358), False, 'from praatio import textgrid\n'), ((21747, 21816), 'montreal_forced_aligner.exceptions.TextGridParseError', 'TextGridParseError', (['self.text_path', '"""Number of tiers parsed was zero"""'], {}), "(self.text_path, 'Number of tiers parsed was zero')\n", (21765, 21816), False, 'from montreal_forced_aligner.exceptions import CorpusError, TextGridParseError, TextParseError\n'), ((26571, 26620), 'numpy.abs', 'np.abs', (['self.waveform[:, begin_sample:end_sample]'], {}), '(self.waveform[:, begin_sample:end_sample])\n', (26577, 26620), True, 'import numpy as np\n'), ((6486, 6517), 'os.path.basename', 'os.path.basename', (['self.wav_path'], {}), '(self.wav_path)\n', (6502, 6517), False, 'import os\n'), ((14853, 14911), 'praatio.textgrid.IntervalTier', 'textgrid.IntervalTier', (['"""speech"""', '[]'], {'minT': '(0)', 'maxT': 'max_time'}), "('speech', [], minT=0, maxT=max_time)\n", (14874, 14911), False, 'from praatio import textgrid\n'), ((14971, 15033), 'praatio.textgrid.IntervalTier', 'textgrid.IntervalTier', (['speaker.name', '[]'], {'minT': '(0)', 'maxT': 'max_time'}), '(speaker.name, [], minT=0, maxT=max_time)\n', (14992, 15033), False, 'from praatio import textgrid\n'), ((19631, 19662), 'os.path.splitext', 'os.path.splitext', (['self.wav_path'], {}), '(self.wav_path)\n', (19647, 19662), False, 'import os\n'), ((21442, 21456), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (21454, 21456), False, 'import sys\n'), ((26836, 26882), 'numpy.abs', 'np.abs', (['self.waveform[begin_sample:end_sample]'], {}), '(self.waveform[begin_sample:end_sample])\n', (26842, 26882), True, 'import numpy as np\n'), ((6597, 6629), 'os.path.basename', 'os.path.basename', (['self.text_path'], {}), '(self.text_path)\n', (6613, 6629), False, 'import os\n'), ((15501, 15592), 'praatio.utilities.constants.Interval', 'Interval', ([], {'start': 'utterance.begin', 'end': 'utterance.end', 'label': 'utterance.transcription_text'}), '(start=utterance.begin, end=utterance.end, label=utterance.\n transcription_text)\n', (15509, 15592), False, 'from praatio.utilities.constants import Interval\n'), ((15852, 15924), 'praatio.utilities.constants.Interval', 'Interval', ([], {'start': 'utterance.begin', 'end': 'utterance.end', 'label': 'utterance.text'}), '(start=utterance.begin, end=utterance.end, label=utterance.text)\n', (15860, 15924), False, 'from praatio.utilities.constants import Interval\n'), ((21565, 21627), 'traceback.format_exception', 'traceback.format_exception', (['exc_type', 'exc_value', 'exc_traceback'], {}), '(exc_type, exc_value, exc_traceback)\n', (21591, 21627), False, 'import traceback\n')] |
import numpy as np
import copy
import pickle
from functools import partial
from rootpy.vector import LorentzVector
from sklearn.preprocessing import RobustScaler
import multiprocessing as mp
# Data loading related
def multithreadmap(f,X,ncores=20, **kwargs):
"""
multithreading map of a function, default on 20 cpu cores.
"""
func = partial(f, **kwargs)
p=mp.Pool(ncores)
Xout = p.map(func,X)
p.terminate()
return(Xout)
def create_tf_transform(X):
"""loads training data and make a robustscaler transform"""
# Make training data
Xcontent=multithreadmap(extract_component,X,component="content")
tf = RobustScaler().fit(np.vstack(Xcontent))
return(tf)
def tftransform(jet,tf):
"""applies a robustscaler transform to one jet"""
jet["content"] = tf.transform(jet["content"])
return(jet)
def apply_tf_transform(X,tf):
"""applies a robustscaler transform to a jet array"""
return(multithreadmap(tftransform,X,tf=tf))
def extract_component(e,component):
return(e[component])
def cast(event):
"""
Converts an envent into a list of p4, usable by fastjet
"""
a = np.zeros((len(event), 5))
for i, p in enumerate(event):
a[i, 3] = p[0]
a[i, 0] = p[1]
a[i, 1] = p[2]
a[i, 2] = p[3]
a[i, 4] = p[4]
return(a)
def create_jet_dictionary(e,cluster=None,regression=False,R=1.0):
"""
create the Jet dictionary stucture from fastjet
"""
jet = {}
if regression:
ye=e[-1]
e=e[0]
jet["genpt"] = ye
t=cast(e)
tree, content, mass, pt = cluster(t, jet_algorithm=1,R=R)[0] # dump highest pt jet only
jet["root_id"] = 0
jet["tree"] = tree # tree structure, tree[i] constains [left son, right son] of subjet i
jet["content"] = content # list of every p4 of every subjet used to create the full jet
jet["mass"] = mass
jet["pt"] = pt
jet["energy"] = content[0, 3]
px = content[0, 0]
py = content[0, 1]
pz = content[0, 2]
p = (content[0, 0:3] ** 2).sum() ** 0.5
eta = 0.5 * (np.log(p + pz) - np.log(p - pz))
phi = np.arctan2(py, px)
jet["eta"] = eta
jet["phi"] = phi
return(jet)
def preprocess(jet, cluster, output="kt", regression=False,R_clustering=0.3):
"""
preprocesses the data to make it usable by the recnn
Preprocessing algorithm:
1. j = the highest pt anti-kt jet (R=1)
2. run kt (R=0.3) on the constituents c of j, resulting in subjets sj1, sj2, ..., sjN
3. phi = sj1.phi(); for all c, do c.rotate_z(-phi)
4. bv = sj1.boost_vector(); bv.set_perp(0); for all c, do c.boost(-bv)
5. deltaz = sj1.pz - sj2.pz; deltay = sj1.py - sj2.py; alpha = -atan2(deltaz, deltay); for all c, do c.rotate_x(alpha)
6. if sj3.pz < 0: for all c, do c.set_pz(-c.pz)
7. finally recluster all transformed constituents c into a single jet
"""
jet = copy.deepcopy(jet)
constituents = jet["content"][jet["tree"][:, 0] == -1]
if regression :
genpt=jet["genpt"]
### run kt (R=0.3) on the constituents c of j, resulting in subjets sj1, sj2, ..., sjN ###
subjets = cluster(constituents, R=R_clustering, jet_algorithm=0)
oldeta=jet["eta"]
oldpt=jet['pt']
### Rot phi ###
# phi = sj1.phi()
# for all c, do c.rotate_z(-phi)
v = subjets[0][1][0]
v = LorentzVector(v)
phi = v.phi()
for _, content, _, _ in subjets:
for i in range(len(content)):
v = LorentzVector(content[i][:4])
v.rotate_z(-phi)
content[i, 0] = v[0]
content[i, 1] = v[1]
content[i, 2] = v[2]
content[i, 3] = v[3]
### boost ###
# bv = sj1.boost_vector()
# bv.set_perp(0)
# for all c, do c.boost(-bv)
v = subjets[0][1][0]
v = LorentzVector(v)
bv = v.boost_vector()
bv.set_perp(0)
for _, content, _, _ in subjets:
for i in range(len(content)):
v = LorentzVector(content[i][:4])
v.boost(-bv)
content[i, 0] = v[0]
content[i, 1] = v[1]
content[i, 2] = v[2]
content[i, 3] = v[3]
### Rot alpha ###
# deltaz = sj1.pz - sj2.pz
# deltay = sj1.py - sj2.py
# alpha = -atan2(deltaz, deltay)
# for all c, do c.rotate_x(alpha)
if len(subjets) >= 2:
deltaz = subjets[0][1][0, 2] - subjets[1][1][0, 2]
deltay = subjets[0][1][0, 1] - subjets[1][1][0, 1]
alpha = -np.arctan2(deltaz, deltay)
for _, content, _, _ in subjets:
for i in range(len(content)):
v = LorentzVector(content[i][:4])
v.rotate_x(alpha)
content[i, 0] = v[0]
content[i, 1] = v[1]
content[i, 2] = v[2]
content[i, 3] = v[3]
### flip if necessary ###
# if sj3.pz < 0: for all c, do c.set_pz(-c.pz)
if len(subjets) >= 3 and subjets[2][1][0, 2] < 0:
for _, content, _, _ in subjets:
for i in range(len(content)):
content[i, 2] *= -1.0
### finally recluster all transformed constituents c into a single jet ###
constituents = []
for tree, content, _, _ in subjets:
constituents.append(content[tree[:, 0] == -1])
constituents = np.vstack(constituents)
if output == "anti-kt":
subjets = cluster(constituents, R=100., jet_algorithm=1)
elif output == "kt":
subjets = cluster(constituents, R=100., jet_algorithm=0)
elif output == "cambridge":
subjets = cluster(constituents, R=100., jet_algorithm=2)
else:
raise
jet["tree"] = subjets[0][0]
jet["content"] = subjets[0][1]
v = LorentzVector(jet["content"][0])
jet["phi"] = v.phi()
jet["eta"] = v.eta()
jet["energy"] = v.E()
jet["mass"] = v.m()
jet["pt"] = v.pt()
jet["root_id"] = 0
jet['oldeta'] = oldeta
jet['oldpt'] = oldpt
if regression:
jet["genpt"] = genpt
return(jet)
def load_from_pickle(filename, n_jets):
"""loads a pickle file"""
jets = []
fd = open(filename, "rb")
for i in range(n_jets):
jet = pickle.load(fd)
jets.append(jet)
fd.close()
return jets
# Jet related
def _pt(v):
"""computes the pt of a LorentzVector"""
pz = v[2]
p = (v[0:3] ** 2).sum() ** 0.5
eta = 0.5 * (np.log(p + pz) - np.log(p - pz))
pt = p / np.cosh(eta)
return pt
def permute_by_pt(jet, root_id=None):
"""Makes the hightest pt subjet the right subjet"""
# ensure that the left sub-jet has always a larger pt than the right
if root_id is None:
root_id = jet["root_id"]
if jet["tree"][root_id][0] != -1:
left = jet["tree"][root_id][0]
right = jet["tree"][root_id][1]
pt_left = _pt(jet["content"][left])
pt_right = _pt(jet["content"][right])
if pt_left < pt_right:
jet["tree"][root_id][0] = right
jet["tree"][root_id][1] = left
permute_by_pt(jet, left)
permute_by_pt(jet, right)
return jet
def rewrite_content(jet):
"""computes successive fusions and ids."""
jet = copy.deepcopy(jet)
# if jet["content"].shape[1] == 5:
# pflow = jet["content"][:, 4].copy()
content = np.zeros((len(jet["content"]),4+5))
content[:,:4] = jet["content"][:,:-1]
ids = np.abs(jet['content'][:,-1])
content[:,4:] = np.array([np.isclose(ids,211.),np.isclose(ids,130.),np.isclose(ids,11.),np.isclose(ids,13.),np.isclose(ids,22.)],dtype=float).T
tree = jet["tree"]
def _rec(i):
if tree[i, 0] == -1:
pass
else:
_rec(tree[i, 0])
_rec(tree[i, 1])
c = content[tree[i, 0]] + content[tree[i, 1]]
c[4:]=((content[tree[i, 0],3])*content[tree[i, 0],4:]+(content[tree[i, 1],3])*content[tree[i, 1],4:])/(content[tree[i, 0],3]*content[tree[i, 1],3])
content[i] = c
_rec(jet["root_id"])
# if jet["content"].shape[1] == 5:
# jet["content"][:, 4] = pflow
return jet
def extract(jet, pflow=False):
"""per node feature extraction"""
jet = copy.deepcopy(jet)
s = jet["content"].shape
# if not pflow:
content = np.zeros((s[0], 7+5+2))
# else:
# # pflow value will be one-hot encoded
# content = np.zeros((s[0], 7+4))
for i in range(len(jet["content"])):
px = jet["content"][i, 0]
py = jet["content"][i, 1]
pz = jet["content"][i, 2]
p = (jet["content"][i, 0:3] ** 2).sum() ** 0.5
eta = 0.5 * (np.log(p + pz) - np.log(p - pz))
theta = 2 * np.arctan(np.exp(-eta))
pt = p / np.cosh(eta)
phi = np.arctan2(py, px)
content[i, 0] = p
content[i, 1] = eta if np.isfinite(eta) else 0.0
content[i, 2] = phi
content[i, 3] = jet["content"][i, 3]
content[i, 4] = (jet["content"][i, 3] /
jet["content"][jet["root_id"], 3])
content[i, 5] = pt if np.isfinite(pt) else 0.0
content[i, 6] = theta if np.isfinite(theta) else 0.0
content[i, 7] = jet["oldeta"]
content[i, 8] = jet["oldpt"]
content[i,9:] = jet["content"][i, -5:]
# if pflow:
# if jet["content"][i, 4] >= 0:
# content[i, 7+int(jet["content"][i, 4])] = 1.0
jet["content"] = content
return jet
def randomize(jet):
"""build a random tree"""
jet = copy.deepcopy(jet)
leaves = np.where(jet["tree"][:, 0] == -1)[0]
nodes = [n for n in leaves]
content = [jet["content"][n] for n in nodes]
nodes = range(len(nodes))
tree = [[-1, -1] for n in nodes]
pool = [n for n in nodes]
next_id = len(nodes)
while len(pool) >= 2:
i = np.random.randint(len(pool))
left = pool[i]
del pool[i]
j = np.random.randint(len(pool))
right = pool[j]
del pool[j]
nodes.append(next_id)
c = (content[left] + content[right])
# if len(c) == 5:
# c[-1] = -1
content.append(c)
tree.append([left, right])
pool.append(next_id)
next_id += 1
jet["content"] = np.array(content)
jet["tree"] = np.array(tree).astype(int)
jet["root_id"] = len(jet["tree"]) - 1
return jet
def sequentialize_by_pt(jet, reverse=False):
"""transform the tree into a sequence ordered by pt"""
jet = copy.deepcopy(jet)
leaves = np.where(jet["tree"][:, 0] == -1)[0]
nodes = [n for n in leaves]
content = [jet["content"][n] for n in nodes]
nodes = [i for i in range(len(nodes))]
tree = [[-1, -1] for n in nodes]
pool = sorted([n for n in nodes],
key = lambda n: _pt(content[n]),
reverse = reverse)
next_id = len(pool)
while len(pool) >= 2:
right = pool[-1]
left = pool[-2]
del pool[-1]
del pool[-1]
nodes.append(next_id)
c = (content[left] + content[right])
if len(c) == 5:
c[-1] = -1
content.append(c)
tree.append([left, right])
pool.append(next_id)
next_id += 1
jet["content"] = np.array(content)
jet["tree"] = np.array(tree).astype(int)
jet["root_id"] = len(jet["tree"]) - 1
return jet
| [
"functools.partial",
"copy.deepcopy",
"numpy.arctan2",
"numpy.abs",
"numpy.log",
"sklearn.preprocessing.RobustScaler",
"numpy.zeros",
"numpy.isfinite",
"rootpy.vector.LorentzVector",
"numpy.isclose",
"pickle.load",
"numpy.array",
"numpy.where",
"numpy.exp",
"multiprocessing.Pool",
"num... | [((339, 359), 'functools.partial', 'partial', (['f'], {}), '(f, **kwargs)\n', (346, 359), False, 'from functools import partial\n'), ((363, 378), 'multiprocessing.Pool', 'mp.Pool', (['ncores'], {}), '(ncores)\n', (370, 378), True, 'import multiprocessing as mp\n'), ((2131, 2149), 'numpy.arctan2', 'np.arctan2', (['py', 'px'], {}), '(py, px)\n', (2141, 2149), True, 'import numpy as np\n'), ((2930, 2948), 'copy.deepcopy', 'copy.deepcopy', (['jet'], {}), '(jet)\n', (2943, 2948), False, 'import copy\n'), ((3374, 3390), 'rootpy.vector.LorentzVector', 'LorentzVector', (['v'], {}), '(v)\n', (3387, 3390), False, 'from rootpy.vector import LorentzVector\n'), ((3833, 3849), 'rootpy.vector.LorentzVector', 'LorentzVector', (['v'], {}), '(v)\n', (3846, 3849), False, 'from rootpy.vector import LorentzVector\n'), ((5347, 5370), 'numpy.vstack', 'np.vstack', (['constituents'], {}), '(constituents)\n', (5356, 5370), True, 'import numpy as np\n'), ((5759, 5791), 'rootpy.vector.LorentzVector', 'LorentzVector', (["jet['content'][0]"], {}), "(jet['content'][0])\n", (5772, 5791), False, 'from rootpy.vector import LorentzVector\n'), ((7246, 7264), 'copy.deepcopy', 'copy.deepcopy', (['jet'], {}), '(jet)\n', (7259, 7264), False, 'import copy\n'), ((7451, 7480), 'numpy.abs', 'np.abs', (["jet['content'][:, -1]"], {}), "(jet['content'][:, -1])\n", (7457, 7480), True, 'import numpy as np\n'), ((8233, 8251), 'copy.deepcopy', 'copy.deepcopy', (['jet'], {}), '(jet)\n', (8246, 8251), False, 'import copy\n'), ((8316, 8343), 'numpy.zeros', 'np.zeros', (['(s[0], 7 + 5 + 2)'], {}), '((s[0], 7 + 5 + 2))\n', (8324, 8343), True, 'import numpy as np\n'), ((9537, 9555), 'copy.deepcopy', 'copy.deepcopy', (['jet'], {}), '(jet)\n', (9550, 9555), False, 'import copy\n'), ((10266, 10283), 'numpy.array', 'np.array', (['content'], {}), '(content)\n', (10274, 10283), True, 'import numpy as np\n'), ((10504, 10522), 'copy.deepcopy', 'copy.deepcopy', (['jet'], {}), '(jet)\n', (10517, 10522), False, 'import copy\n'), ((11261, 11278), 'numpy.array', 'np.array', (['content'], {}), '(content)\n', (11269, 11278), True, 'import numpy as np\n'), ((646, 665), 'numpy.vstack', 'np.vstack', (['Xcontent'], {}), '(Xcontent)\n', (655, 665), True, 'import numpy as np\n'), ((6234, 6249), 'pickle.load', 'pickle.load', (['fd'], {}), '(fd)\n', (6245, 6249), False, 'import pickle\n'), ((6495, 6507), 'numpy.cosh', 'np.cosh', (['eta'], {}), '(eta)\n', (6502, 6507), True, 'import numpy as np\n'), ((8781, 8799), 'numpy.arctan2', 'np.arctan2', (['py', 'px'], {}), '(py, px)\n', (8791, 8799), True, 'import numpy as np\n'), ((9570, 9603), 'numpy.where', 'np.where', (["(jet['tree'][:, 0] == -1)"], {}), "(jet['tree'][:, 0] == -1)\n", (9578, 9603), True, 'import numpy as np\n'), ((10537, 10570), 'numpy.where', 'np.where', (["(jet['tree'][:, 0] == -1)"], {}), "(jet['tree'][:, 0] == -1)\n", (10545, 10570), True, 'import numpy as np\n'), ((627, 641), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (639, 641), False, 'from sklearn.preprocessing import RobustScaler\n'), ((2088, 2102), 'numpy.log', 'np.log', (['(p + pz)'], {}), '(p + pz)\n', (2094, 2102), True, 'import numpy as np\n'), ((2105, 2119), 'numpy.log', 'np.log', (['(p - pz)'], {}), '(p - pz)\n', (2111, 2119), True, 'import numpy as np\n'), ((3506, 3535), 'rootpy.vector.LorentzVector', 'LorentzVector', (['content[i][:4]'], {}), '(content[i][:4])\n', (3519, 3535), False, 'from rootpy.vector import LorentzVector\n'), ((3986, 4015), 'rootpy.vector.LorentzVector', 'LorentzVector', (['content[i][:4]'], {}), '(content[i][:4])\n', (3999, 4015), False, 'from rootpy.vector import LorentzVector\n'), ((4498, 4524), 'numpy.arctan2', 'np.arctan2', (['deltaz', 'deltay'], {}), '(deltaz, deltay)\n', (4508, 4524), True, 'import numpy as np\n'), ((6449, 6463), 'numpy.log', 'np.log', (['(p + pz)'], {}), '(p + pz)\n', (6455, 6463), True, 'import numpy as np\n'), ((6466, 6480), 'numpy.log', 'np.log', (['(p - pz)'], {}), '(p - pz)\n', (6472, 6480), True, 'import numpy as np\n'), ((8754, 8766), 'numpy.cosh', 'np.cosh', (['eta'], {}), '(eta)\n', (8761, 8766), True, 'import numpy as np\n'), ((8858, 8874), 'numpy.isfinite', 'np.isfinite', (['eta'], {}), '(eta)\n', (8869, 8874), True, 'import numpy as np\n'), ((9095, 9110), 'numpy.isfinite', 'np.isfinite', (['pt'], {}), '(pt)\n', (9106, 9110), True, 'import numpy as np\n'), ((9153, 9171), 'numpy.isfinite', 'np.isfinite', (['theta'], {}), '(theta)\n', (9164, 9171), True, 'import numpy as np\n'), ((10302, 10316), 'numpy.array', 'np.array', (['tree'], {}), '(tree)\n', (10310, 10316), True, 'import numpy as np\n'), ((11297, 11311), 'numpy.array', 'np.array', (['tree'], {}), '(tree)\n', (11305, 11311), True, 'import numpy as np\n'), ((4628, 4657), 'rootpy.vector.LorentzVector', 'LorentzVector', (['content[i][:4]'], {}), '(content[i][:4])\n', (4641, 4657), False, 'from rootpy.vector import LorentzVector\n'), ((7510, 7532), 'numpy.isclose', 'np.isclose', (['ids', '(211.0)'], {}), '(ids, 211.0)\n', (7520, 7532), True, 'import numpy as np\n'), ((7531, 7553), 'numpy.isclose', 'np.isclose', (['ids', '(130.0)'], {}), '(ids, 130.0)\n', (7541, 7553), True, 'import numpy as np\n'), ((7552, 7573), 'numpy.isclose', 'np.isclose', (['ids', '(11.0)'], {}), '(ids, 11.0)\n', (7562, 7573), True, 'import numpy as np\n'), ((7572, 7593), 'numpy.isclose', 'np.isclose', (['ids', '(13.0)'], {}), '(ids, 13.0)\n', (7582, 7593), True, 'import numpy as np\n'), ((7592, 7613), 'numpy.isclose', 'np.isclose', (['ids', '(22.0)'], {}), '(ids, 22.0)\n', (7602, 7613), True, 'import numpy as np\n'), ((8660, 8674), 'numpy.log', 'np.log', (['(p + pz)'], {}), '(p + pz)\n', (8666, 8674), True, 'import numpy as np\n'), ((8677, 8691), 'numpy.log', 'np.log', (['(p - pz)'], {}), '(p - pz)\n', (8683, 8691), True, 'import numpy as np\n'), ((8723, 8735), 'numpy.exp', 'np.exp', (['(-eta)'], {}), '(-eta)\n', (8729, 8735), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Utilities to find the relative position of the sun."""
import datetime as dt
import cf_units as unit
import numpy as np
from improver import BasePlugin
from improver.utilities.spatial import lat_lon_determine, transform_grid_to_lat_lon
from improver.utilities.temporal import iris_time_to_datetime
def calc_solar_declination(day_of_year):
"""
Calculate the Declination for the day of the year.
Calculation equivalent to the calculation defined in
NOAA Earth System Research Lab Low Accuracy Equations
https://www.esrl.noaa.gov/gmd/grad/solcalc/sollinks.html
Args:
day_of_year (int):
Day of the year 0 to 365, 0 = 1st January
Returns:
float:
Declination in degrees.North-South
"""
# Declination (degrees):
# = -(axial_tilt)*cos(360./orbital_year * day_of_year - solstice_offset)
if day_of_year < 0 or day_of_year > 365:
msg = "Day of the year must be between 0 and 365"
raise ValueError(msg)
solar_declination = -23.5 * np.cos(np.radians(0.9856 * day_of_year + 9.3))
return solar_declination
def calc_solar_hour_angle(longitudes, day_of_year, utc_hour):
"""
Calculate the Solar Hour angle for each element of an array of longitudes.
Calculation equivalent to the calculation defined in
NOAA Earth System Research Lab Low Accuracy Equations
https://www.esrl.noaa.gov/gmd/grad/solcalc/sollinks.html
Args:
longitudes (float or numpy.ndarray):
A single Longitude or array of Longitudes
longitudes needs to be between 180.0 and -180.0 degrees
day_of_year (int):
Day of the year 0 to 365, 0 = 1st January
utc_hour (float):
Hour of the day in UTC
Returns:
solar_hour_angle (float or numpy.ndarray)
Hour angles in degrees East-West
"""
if day_of_year < 0 or day_of_year > 365:
msg = "Day of the year must be between 0 and 365"
raise ValueError(msg)
if utc_hour < 0.0 or utc_hour > 24.0:
msg = "Hour must be between 0 and 24.0"
raise ValueError(msg)
thetao = 2 * np.pi * day_of_year / 365.0
eqt = (
0.000075
+ 0.001868 * np.cos(thetao)
- 0.032077 * np.sin(thetao)
- 0.014615 * np.cos(2 * thetao)
- 0.040849 * np.sin(2 * thetao)
)
# Longitudinal Correction from the Grenwich Meridian
lon_correction = 24.0 * longitudes / 360.0
# Solar time (hours):
solar_time = utc_hour + lon_correction + eqt * 12 / np.pi
# Hour angle (degrees):
solar_hour_angle = (solar_time - 12.0) * 15.0
return solar_hour_angle
def calc_solar_elevation(
latitudes, longitudes, day_of_year, utc_hour, return_sine=False
):
"""
Calculate the Solar elevation.
Args:
latitudes (float or numpy.ndarray):
A single Latitude or array of Latitudes
latitudes needs to be between -90.0 and 90.0
longitudes (float or numpy.ndarray):
A single Longitude or array of Longitudes
longitudes needs to be between 180.0 and -180.0
day_of_year (int):
Day of the year 0 to 365, 0 = 1st January
utc_hour (float):
Hour of the day in UTC in hours
return_sine (bool):
If True return sine of solar elevation.
Default False.
Returns:
float or numpy.ndarray:
Solar elevation in degrees for each location.
"""
if np.min(latitudes) < -90.0 or np.max(latitudes) > 90.0:
msg = "Latitudes must be between -90.0 and 90.0"
raise ValueError(msg)
if day_of_year < 0 or day_of_year > 365:
msg = "Day of the year must be between 0 and 365"
raise ValueError(msg)
if utc_hour < 0.0 or utc_hour > 24.0:
msg = "Hour must be between 0 and 24.0"
raise ValueError(msg)
declination = calc_solar_declination(day_of_year)
decl = np.radians(declination)
hour_angle = calc_solar_hour_angle(longitudes, day_of_year, utc_hour)
rad_hours = np.radians(hour_angle)
lats = np.radians(latitudes)
# Calculate solar position:
solar_elevation = np.sin(decl) * np.sin(lats) + np.cos(decl) * np.cos(
lats
) * np.cos(rad_hours)
if not return_sine:
solar_elevation = np.degrees(np.arcsin(solar_elevation))
return solar_elevation
def daynight_terminator(longitudes, day_of_year, utc_hour):
"""
Calculate the Latitude values of the daynight terminator
for the given longitudes.
Args:
longitudes (numpy.ndarray):
Array of longitudes.
longitudes needs to be between 180.0 and -180.0 degrees
day_of_year (int):
Day of the year 0 to 365, 0 = 1st January
utc_hour (float):
Hour of the day in UTC
Returns:
numpy.ndarray:
latitudes of the daynight terminator
"""
if day_of_year < 0 or day_of_year > 365:
msg = "Day of the year must be between 0 and 365"
raise ValueError(msg)
if utc_hour < 0.0 or utc_hour > 24.0:
msg = "Hour must be between 0 and 24.0"
raise ValueError(msg)
declination = calc_solar_declination(day_of_year)
decl = np.radians(declination)
hour_angle = calc_solar_hour_angle(longitudes, day_of_year, utc_hour)
rad_hour = np.radians(hour_angle)
lats = np.arctan(-np.cos(rad_hour) / np.tan(decl))
lats = np.degrees(lats)
return lats
class DayNightMask(BasePlugin):
"""
Plugin Class to generate a daynight mask for the provided cube
"""
def __init__(self):
""" Initial the DayNightMask Object """
self.night = 0
self.day = 1
def __repr__(self):
"""Represent the configured plugin instance as a string."""
result = "<DayNightMask : " "Day = {}, Night = {}>".format(self.day, self.night)
return result
def _create_daynight_mask(self, cube):
"""
Create blank daynight mask cube
Args:
cube (iris.cube.Cube):
cube with the times and coordinates required for mask
Returns:
iris.cube.Cube:
Blank daynight mask cube. The resulting cube will be the
same shape as the time, y, and x coordinate, other coordinates
will be ignored although they might appear as attributes
on the cube as it is extracted from the first slice.
"""
daynight_mask = next(
cube.slices(
[cube.coord("time"), cube.coord(axis="y"), cube.coord(axis="x")]
)
).copy()
daynight_mask.rename("day_night_mask")
daynight_mask.units = unit.Unit("1")
daynight_mask.data = np.ones(daynight_mask.data.shape, dtype="int") * self.night
return daynight_mask
def _daynight_lat_lon_cube(self, mask_cube, day_of_year, utc_hour):
"""
Calculate the daynight mask for the provided Lat Lon cube
Args:
mask_cube (iris.cube.Cube):
daynight mask cube - data initially set to self.night
day_of_year (int):
day of the year 0 to 365, 0 = 1st January
utc_hour (float):
Hour in UTC
Returns:
iris.cube.Cube:
daynight mask cube - daytime set to self.day
"""
lons = mask_cube.coord("longitude").points
lats = mask_cube.coord("latitude").points
terminator_lats = daynight_terminator(lons, day_of_year, utc_hour)
lons_zeros = np.zeros_like(lons)
lats_zeros = np.zeros_like(lats).reshape(len(lats), 1)
lats_on_lon = lats.reshape(len(lats), 1) + lons_zeros
terminator_on_lon = lats_zeros + terminator_lats
dec = calc_solar_declination(day_of_year)
if dec > 0.0:
index = np.where(lats_on_lon >= terminator_on_lon)
else:
index = np.where(lats_on_lon < terminator_on_lon)
mask_cube.data[index] = self.day
return mask_cube
def process(self, cube):
"""
Calculate the daynight mask for the provided cube. Note that only the
hours and minutes of the dtval variable are used. To ensure consistent
behaviour with changes of second or subsecond precision, the second
component is added to the time object. This means that when the hours
and minutes are used, we have correctly rounded to the nearest minute,
e.g.::
dt(2017, 1, 1, 11, 59, 59) -- +59 --> dt(2017, 1, 1, 12, 0, 58)
dt(2017, 1, 1, 12, 0, 1) -- +1 --> dt(2017, 1, 1, 12, 0, 2)
dt(2017, 1, 1, 12, 0, 30) -- +30 --> dt(2017, 1, 1, 12, 1, 0)
Args:
cube (iris.cube.Cube):
input cube
Returns:
iris.cube.Cube:
daynight mask cube, daytime set to self.day
nighttime set to self.night.
The resulting cube will be the same shape as
the time, y, and x coordinate, other coordinates
will be ignored although they might appear as attributes
on the cube as it is extracted from the first slice.
"""
daynight_mask = self._create_daynight_mask(cube)
dtvalues = iris_time_to_datetime(daynight_mask.coord("time"))
for i, dtval in enumerate(dtvalues):
mask_cube = daynight_mask[i]
day_of_year = (dtval - dt.datetime(dtval.year, 1, 1)).days
dtval = dtval + dt.timedelta(seconds=dtval.second)
utc_hour = (dtval.hour * 60.0 + dtval.minute) / 60.0
trg_crs = lat_lon_determine(mask_cube)
# Grids that are not Lat Lon
if trg_crs is not None:
lats, lons = transform_grid_to_lat_lon(mask_cube)
solar_el = calc_solar_elevation(lats, lons, day_of_year, utc_hour)
mask_cube.data[np.where(solar_el > 0.0)] = self.day
else:
mask_cube = self._daynight_lat_lon_cube(
mask_cube, day_of_year, utc_hour
)
daynight_mask.data[i, ::] = mask_cube.data
return daynight_mask
| [
"numpy.radians",
"improver.utilities.spatial.lat_lon_determine",
"numpy.zeros_like",
"numpy.degrees",
"numpy.arcsin",
"numpy.ones",
"datetime.datetime",
"improver.utilities.spatial.transform_grid_to_lat_lon",
"numpy.min",
"numpy.sin",
"numpy.max",
"numpy.tan",
"numpy.cos",
"numpy.where",
... | [((5625, 5648), 'numpy.radians', 'np.radians', (['declination'], {}), '(declination)\n', (5635, 5648), True, 'import numpy as np\n'), ((5739, 5761), 'numpy.radians', 'np.radians', (['hour_angle'], {}), '(hour_angle)\n', (5749, 5761), True, 'import numpy as np\n'), ((5773, 5794), 'numpy.radians', 'np.radians', (['latitudes'], {}), '(latitudes)\n', (5783, 5794), True, 'import numpy as np\n'), ((6922, 6945), 'numpy.radians', 'np.radians', (['declination'], {}), '(declination)\n', (6932, 6945), True, 'import numpy as np\n'), ((7035, 7057), 'numpy.radians', 'np.radians', (['hour_angle'], {}), '(hour_angle)\n', (7045, 7057), True, 'import numpy as np\n'), ((7124, 7140), 'numpy.degrees', 'np.degrees', (['lats'], {}), '(lats)\n', (7134, 7140), True, 'import numpy as np\n'), ((8407, 8421), 'cf_units.Unit', 'unit.Unit', (['"""1"""'], {}), "('1')\n", (8416, 8421), True, 'import cf_units as unit\n'), ((9279, 9298), 'numpy.zeros_like', 'np.zeros_like', (['lons'], {}), '(lons)\n', (9292, 9298), True, 'import numpy as np\n'), ((2704, 2742), 'numpy.radians', 'np.radians', (['(0.9856 * day_of_year + 9.3)'], {}), '(0.9856 * day_of_year + 9.3)\n', (2714, 2742), True, 'import numpy as np\n'), ((3998, 4016), 'numpy.sin', 'np.sin', (['(2 * thetao)'], {}), '(2 * thetao)\n', (4004, 4016), True, 'import numpy as np\n'), ((5165, 5182), 'numpy.min', 'np.min', (['latitudes'], {}), '(latitudes)\n', (5171, 5182), True, 'import numpy as np\n'), ((5194, 5211), 'numpy.max', 'np.max', (['latitudes'], {}), '(latitudes)\n', (5200, 5211), True, 'import numpy as np\n'), ((5850, 5862), 'numpy.sin', 'np.sin', (['decl'], {}), '(decl)\n', (5856, 5862), True, 'import numpy as np\n'), ((5865, 5877), 'numpy.sin', 'np.sin', (['lats'], {}), '(lats)\n', (5871, 5877), True, 'import numpy as np\n'), ((5924, 5941), 'numpy.cos', 'np.cos', (['rad_hours'], {}), '(rad_hours)\n', (5930, 5941), True, 'import numpy as np\n'), ((6003, 6029), 'numpy.arcsin', 'np.arcsin', (['solar_elevation'], {}), '(solar_elevation)\n', (6012, 6029), True, 'import numpy as np\n'), ((7099, 7111), 'numpy.tan', 'np.tan', (['decl'], {}), '(decl)\n', (7105, 7111), True, 'import numpy as np\n'), ((8451, 8497), 'numpy.ones', 'np.ones', (['daynight_mask.data.shape'], {'dtype': '"""int"""'}), "(daynight_mask.data.shape, dtype='int')\n", (8458, 8497), True, 'import numpy as np\n'), ((9573, 9615), 'numpy.where', 'np.where', (['(lats_on_lon >= terminator_on_lon)'], {}), '(lats_on_lon >= terminator_on_lon)\n', (9581, 9615), True, 'import numpy as np\n'), ((9650, 9691), 'numpy.where', 'np.where', (['(lats_on_lon < terminator_on_lon)'], {}), '(lats_on_lon < terminator_on_lon)\n', (9658, 9691), True, 'import numpy as np\n'), ((11371, 11399), 'improver.utilities.spatial.lat_lon_determine', 'lat_lon_determine', (['mask_cube'], {}), '(mask_cube)\n', (11388, 11399), False, 'from improver.utilities.spatial import lat_lon_determine, transform_grid_to_lat_lon\n'), ((3958, 3976), 'numpy.cos', 'np.cos', (['(2 * thetao)'], {}), '(2 * thetao)\n', (3964, 3976), True, 'import numpy as np\n'), ((5880, 5892), 'numpy.cos', 'np.cos', (['decl'], {}), '(decl)\n', (5886, 5892), True, 'import numpy as np\n'), ((5895, 5907), 'numpy.cos', 'np.cos', (['lats'], {}), '(lats)\n', (5901, 5907), True, 'import numpy as np\n'), ((7080, 7096), 'numpy.cos', 'np.cos', (['rad_hour'], {}), '(rad_hour)\n', (7086, 7096), True, 'import numpy as np\n'), ((9320, 9339), 'numpy.zeros_like', 'np.zeros_like', (['lats'], {}), '(lats)\n', (9333, 9339), True, 'import numpy as np\n'), ((11249, 11283), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': 'dtval.second'}), '(seconds=dtval.second)\n', (11261, 11283), True, 'import datetime as dt\n'), ((11506, 11542), 'improver.utilities.spatial.transform_grid_to_lat_lon', 'transform_grid_to_lat_lon', (['mask_cube'], {}), '(mask_cube)\n', (11531, 11542), False, 'from improver.utilities.spatial import lat_lon_determine, transform_grid_to_lat_lon\n'), ((3922, 3936), 'numpy.sin', 'np.sin', (['thetao'], {}), '(thetao)\n', (3928, 3936), True, 'import numpy as np\n'), ((11185, 11214), 'datetime.datetime', 'dt.datetime', (['dtval.year', '(1)', '(1)'], {}), '(dtval.year, 1, 1)\n', (11196, 11214), True, 'import datetime as dt\n'), ((11657, 11681), 'numpy.where', 'np.where', (['(solar_el > 0.0)'], {}), '(solar_el > 0.0)\n', (11665, 11681), True, 'import numpy as np\n'), ((3886, 3900), 'numpy.cos', 'np.cos', (['thetao'], {}), '(thetao)\n', (3892, 3900), True, 'import numpy as np\n')] |
'''Various extensions to distributions
* skew normal and skew t distribution by Azzalini, A. & Capitanio, A.
* Gram-Charlier expansion distribution (using 4 moments),
* distributions based on non-linear transformation
- Transf_gen
- ExpTransf_gen, LogTransf_gen
- TransfTwo_gen
(defines as examples: square, negative square and abs transformations)
- this versions are without __new__
* mnvormcdf, mvstdnormcdf : cdf, rectangular integral for multivariate normal
distribution
TODO:
* Where is Transf_gen for general monotonic transformation ? found and added it
* write some docstrings, some parts I do not remember
* add Box-Cox transformation, parametrized ?
this is only partially cleaned, still includes test examples as functions
main changes
* add transf_gen (2010-05-09)
* added separate example and tests (2010-05-09)
* collect transformation function into classes
Example
-------
>>> logtg = Transf_gen(stats.t, np.exp, np.log,
numargs = 1, a=0, name = 'lnnorm',
longname = 'Exp transformed normal',
extradoc = '\ndistribution of y = exp(x), with x standard normal'
'precision for moment andstats is not very high, 2-3 decimals')
>>> logtg.cdf(5, 6)
0.92067704211191848
>>> stats.t.cdf(np.log(5), 6)
0.92067704211191848
>>> logtg.pdf(5, 6)
0.021798547904239293
>>> stats.t.pdf(np.log(5), 6)
0.10899273954837908
>>> stats.t.pdf(np.log(5), 6)/5. #derivative
0.021798547909675815
Author: josef-pktd
License: BSD
'''
import numpy as np
from numpy import poly1d,sqrt, exp
import scipy
from scipy import stats, special
from scipy.stats import distributions
from statsmodels.stats.moment_helpers import mvsk2mc, mc2mvsk
try:
from scipy.stats._mvn import mvndst
except ImportError:
# Must be using SciPy <1.8.0 where this function was moved (it's not a
# public SciPy function, but we need it here)
from scipy.stats.mvn import mvndst
#note copied from distr_skewnorm_0.py
class SkewNorm_gen(distributions.rv_continuous):
'''univariate Skew-Normal distribution of Azzalini
class follows scipy.stats.distributions pattern
but with __init__
'''
def __init__(self):
#super(SkewNorm_gen,self).__init__(
distributions.rv_continuous.__init__(self,
name = 'Skew Normal distribution', shapes = 'alpha',
extradoc = ''' ''' )
def _argcheck(self, alpha):
return 1 #(alpha >= 0)
def _rvs(self, alpha):
# see http://azzalini.stat.unipd.it/SN/faq.html
delta = alpha/np.sqrt(1+alpha**2)
u0 = stats.norm.rvs(size=self._size)
u1 = delta*u0 + np.sqrt(1-delta**2)*stats.norm.rvs(size=self._size)
return np.where(u0>0, u1, -u1)
def _munp(self, n, alpha):
# use pdf integration with _mom0_sc if only _pdf is defined.
# default stats calculation uses ppf, which is much slower
return self._mom0_sc(n, alpha)
def _pdf(self,x,alpha):
# 2*normpdf(x)*normcdf(alpha*x)
return 2.0/np.sqrt(2*np.pi)*np.exp(-x**2/2.0) * special.ndtr(alpha*x)
def _stats_skip(self,x,alpha,moments='mvsk'):
#skip for now to force moment integration as check
pass
skewnorm = SkewNorm_gen()
# generated the same way as distributions in stats.distributions
class SkewNorm2_gen(distributions.rv_continuous):
'''univariate Skew-Normal distribution of Azzalini
class follows scipy.stats.distributions pattern
'''
def _argcheck(self, alpha):
return 1 #where(alpha>=0, 1, 0)
def _pdf(self,x,alpha):
# 2*normpdf(x)*normcdf(alpha*x
return 2.0/np.sqrt(2*np.pi)*np.exp(-x**2/2.0) * special.ndtr(alpha*x)
skewnorm2 = SkewNorm2_gen(name = 'Skew Normal distribution', shapes = 'alpha',
extradoc = ''' -inf < alpha < inf''')
class ACSkewT_gen(distributions.rv_continuous):
'''univariate Skew-T distribution of Azzalini
class follows scipy.stats.distributions pattern
but with __init__
'''
def __init__(self):
#super(SkewT_gen,self).__init__(
distributions.rv_continuous.__init__(self,
name = 'Skew T distribution', shapes = 'df, alpha',
extradoc = '''
Skewed T distribution by <NAME>. & Capitanio, A. (2003)_
the pdf is given by:
pdf(x) = 2.0 * t.pdf(x, df) * t.cdf(df+1, alpha*x*np.sqrt((1+df)/(x**2+df)))
with alpha >=0
Note: different from skewed t distribution by Hansen 1999
.._
<NAME>. & <NAME>. (2003), Distributions generated by perturbation of
symmetry with emphasis on a multivariate skew-t distribution,
appears in J.Roy.Statist.Soc, series B, vol.65, pp.367-389
''' )
def _argcheck(self, df, alpha):
return (alpha == alpha)*(df>0)
## def _arg_check(self, alpha):
## return np.where(alpha>=0, 0, 1)
## def _argcheck(self, alpha):
## return np.where(alpha>=0, 1, 0)
def _rvs(self, df, alpha):
# see http://azzalini.stat.unipd.it/SN/faq.html
#delta = alpha/np.sqrt(1+alpha**2)
V = stats.chi2.rvs(df, size=self._size)
z = skewnorm.rvs(alpha, size=self._size)
return z/np.sqrt(V/df)
def _munp(self, n, df, alpha):
# use pdf integration with _mom0_sc if only _pdf is defined.
# default stats calculation uses ppf
return self._mom0_sc(n, df, alpha)
def _pdf(self, x, df, alpha):
# 2*normpdf(x)*normcdf(alpha*x)
return 2.0*distributions.t._pdf(x, df) * special.stdtr(df+1, alpha*x*np.sqrt((1+df)/(x**2+df)))
##
##def mvsk2cm(*args):
## mu,sig,sk,kur = args
## # Get central moments
## cnt = [None]*4
## cnt[0] = mu
## cnt[1] = sig #*sig
## cnt[2] = sk * sig**1.5
## cnt[3] = (kur+3.0) * sig**2.0
## return cnt
##
##
##def mvsk2m(args):
## mc, mc2, skew, kurt = args#= self._stats(*args,**mdict)
## mnc = mc
## mnc2 = mc2 + mc*mc
## mc3 = skew*(mc2**1.5) # 3rd central moment
## mnc3 = mc3+3*mc*mc2+mc**3 # 3rd non-central moment
## mc4 = (kurt+3.0)*(mc2**2.0) # 4th central moment
## mnc4 = mc4+4*mc*mc3+6*mc*mc*mc2+mc**4
## return (mc, mc2, mc3, mc4), (mnc, mnc2, mnc3, mnc4)
##
##def mc2mvsk(args):
## mc, mc2, mc3, mc4 = args
## skew = mc3 / mc2**1.5
## kurt = mc4 / mc2**2.0 - 3.0
## return (mc, mc2, skew, kurt)
##
##def m2mc(args):
## mnc, mnc2, mnc3, mnc4 = args
## mc = mnc
## mc2 = mnc2 - mnc*mnc
## #mc3 = skew*(mc2**1.5) # 3rd central moment
## mc3 = mnc3 - (3*mc*mc2+mc**3) # 3rd central moment
## #mc4 = (kurt+3.0)*(mc2**2.0) # 4th central moment
## mc4 = mnc4 - (4*mc*mc3+6*mc*mc*mc2+mc**4)
## return (mc, mc2, mc3, mc4)
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None]*N
plist[0] = poly1d(1)
for n in range(1,N):
plist[n] = plist[n-1].deriv() - poly1d([1,0])*plist[n-1]
return plist
def pdf_moments_st(cnt):
"""Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
version of scipy.stats, any changes ?
the scipy.stats version has a bug and returns normal distribution
"""
N = len(cnt)
if N < 2:
raise ValueError("At least two moments must be given to "
"approximate the pdf.")
totp = poly1d(1)
sig = sqrt(cnt[1])
mu = cnt[0]
if N > 2:
Dvals = _hermnorm(N+1)
for k in range(3,N+1):
# Find Ck
Ck = 0.0
for n in range((k-3)/2):
m = k-2*n
if m % 2: # m is odd
momdiff = cnt[m-1]
else:
momdiff = cnt[m-1] - sig*sig*scipy.factorial2(m-1)
Ck += Dvals[k][m] / sig**m * momdiff
# Add to totp
raise SystemError
print(Dvals)
print(Ck)
totp = totp + Ck*Dvals[k]
def thisfunc(x):
xn = (x-mu)/sig
return totp(xn)*exp(-xn*xn/2.0)/sqrt(2*np.pi)/sig
return thisfunc, totp
def pdf_mvsk(mvsk):
"""Return the Gaussian expanded pdf function given the list of 1st, 2nd
moment and skew and Fisher (excess) kurtosis.
Parameters
----------
mvsk : list of mu, mc2, skew, kurt
distribution is matched to these four moments
Returns
-------
pdffunc : function
function that evaluates the pdf(x), where x is the non-standardized
random variable.
Notes
-----
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
https://en.wikipedia.org/wiki/Edgeworth_series
<NAME>., <NAME>, <NAME>: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30
"""
N = len(mvsk)
if N < 4:
raise ValueError("Four moments must be given to "
"approximate the pdf.")
mu, mc2, skew, kurt = mvsk
totp = poly1d(1)
sig = sqrt(mc2)
if N > 2:
Dvals = _hermnorm(N+1)
C3 = skew/6.0
C4 = kurt/24.0
# Note: Hermite polynomial for order 3 in _hermnorm is negative
# instead of positive
totp = totp - C3*Dvals[3] + C4*Dvals[4]
def pdffunc(x):
xn = (x-mu)/sig
return totp(xn)*np.exp(-xn*xn/2.0)/np.sqrt(2*np.pi)/sig
return pdffunc
def pdf_moments(cnt):
"""Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
Notes
-----
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
https://en.wikipedia.org/wiki/Edgeworth_series
<NAME>., <NAME>, <NAME>: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30
"""
N = len(cnt)
if N < 2:
raise ValueError("At least two moments must be given to "
"approximate the pdf.")
mc, mc2, mc3, mc4 = cnt
skew = mc3 / mc2**1.5
kurt = mc4 / mc2**2.0 - 3.0 # Fisher kurtosis, excess kurtosis
totp = poly1d(1)
sig = sqrt(cnt[1])
mu = cnt[0]
if N > 2:
Dvals = _hermnorm(N+1)
## for k in range(3,N+1):
## # Find Ck
## Ck = 0.0
## for n in range((k-3)/2):
## m = k-2*n
## if m % 2: # m is odd
## momdiff = cnt[m-1]
## else:
## momdiff = cnt[m-1] - sig*sig*scipy.factorial2(m-1)
## Ck += Dvals[k][m] / sig**m * momdiff
## # Add to totp
## raise
## print Dvals
## print Ck
## totp = totp + Ck*Dvals[k]
C3 = skew/6.0
C4 = kurt/24.0
totp = totp - C3*Dvals[3] + C4*Dvals[4]
def thisfunc(x):
xn = (x-mu)/sig
return totp(xn)*np.exp(-xn*xn/2.0)/np.sqrt(2*np.pi)/sig
return thisfunc
class NormExpan_gen(distributions.rv_continuous):
'''Gram-Charlier Expansion of Normal distribution
class follows scipy.stats.distributions pattern
but with __init__
'''
def __init__(self,args, **kwds):
#todo: replace with super call
distributions.rv_continuous.__init__(self,
name = 'Normal Expansion distribution', shapes = ' ',
extradoc = '''
The distribution is defined as the Gram-Charlier expansion of
the normal distribution using the first four moments. The pdf
is given by
pdf(x) = (1+ skew/6.0 * H(xc,3) + kurt/24.0 * H(xc,4))*normpdf(xc)
where xc = (x-mu)/sig is the standardized value of the random variable
and H(xc,3) and H(xc,4) are Hermite polynomials
Note: This distribution has to be parametrized during
initialization and instantiation, and does not have a shape
parameter after instantiation (similar to frozen distribution
except for location and scale.) Location and scale can be used
as with other distributions, however note, that they are relative
to the initialized distribution.
''' )
#print args, kwds
mode = kwds.get('mode', 'sample')
if mode == 'sample':
mu,sig,sk,kur = stats.describe(args)[2:]
self.mvsk = (mu,sig,sk,kur)
cnt = mvsk2mc((mu,sig,sk,kur))
elif mode == 'mvsk':
cnt = mvsk2mc(args)
self.mvsk = args
elif mode == 'centmom':
cnt = args
self.mvsk = mc2mvsk(cnt)
else:
raise ValueError("mode must be 'mvsk' or centmom")
self.cnt = cnt
#self.mvsk = (mu,sig,sk,kur)
#self._pdf = pdf_moments(cnt)
self._pdf = pdf_mvsk(self.mvsk)
def _munp(self,n):
# use pdf integration with _mom0_sc if only _pdf is defined.
# default stats calculation uses ppf
return self._mom0_sc(n)
def _stats_skip(self):
# skip for now to force numerical integration of pdf for testing
return self.mvsk
## copied from nonlinear_transform_gen.py
''' A class for the distribution of a non-linear monotonic transformation of a continuous random variable
simplest usage:
example: create log-gamma distribution, i.e. y = log(x),
where x is gamma distributed (also available in scipy.stats)
loggammaexpg = Transf_gen(stats.gamma, np.log, np.exp)
example: what is the distribution of the discount factor y=1/(1+x)
where interest rate x is normally distributed with N(mux,stdx**2)')?
(just to come up with a story that implies a nice transformation)
invnormalg = Transf_gen(stats.norm, inversew, inversew_inv, decr=True, a=-np.inf)
This class does not work well for distributions with difficult shapes,
e.g. 1/x where x is standard normal, because of the singularity and jump at zero.
Note: I'm working from my version of scipy.stats.distribution.
But this script runs under scipy 0.6.0 (checked with numpy: 1.2.0rc2 and python 2.4)
This is not yet thoroughly tested, polished or optimized
TODO:
* numargs handling is not yet working properly, numargs needs to be specified (default = 0 or 1)
* feeding args and kwargs to underlying distribution is untested and incomplete
* distinguish args and kwargs for the transformed and the underlying distribution
- currently all args and no kwargs are transmitted to underlying distribution
- loc and scale only work for transformed, but not for underlying distribution
- possible to separate args for transformation and underlying distribution parameters
* add _rvs as method, will be faster in many cases
Created on Tuesday, October 28, 2008, 12:40:37 PM
Author: josef-pktd
License: BSD
'''
def get_u_argskwargs(**kwargs):
#Todo: What's this? wrong spacing, used in Transf_gen TransfTwo_gen
u_kwargs = dict((k.replace('u_','',1),v) for k,v in kwargs.items()
if k.startswith('u_'))
u_args = u_kwargs.pop('u_args',None)
return u_args, u_kwargs
class Transf_gen(distributions.rv_continuous):
'''a class for non-linear monotonic transformation of a continuous random variable
'''
def __init__(self, kls, func, funcinv, *args, **kwargs):
#print args
#print kwargs
self.func = func
self.funcinv = funcinv
#explicit for self.__dict__.update(kwargs)
#need to set numargs because inspection does not work
self.numargs = kwargs.pop('numargs', 0)
#print self.numargs
name = kwargs.pop('name','transfdist')
longname = kwargs.pop('longname','Non-linear transformed distribution')
extradoc = kwargs.pop('extradoc',None)
a = kwargs.pop('a', -np.inf)
b = kwargs.pop('b', np.inf)
self.decr = kwargs.pop('decr', False)
#defines whether it is a decreasing (True)
# or increasing (False) monotonic transformation
self.u_args, self.u_kwargs = get_u_argskwargs(**kwargs)
self.kls = kls #(self.u_args, self.u_kwargs)
# possible to freeze the underlying distribution
super(Transf_gen,self).__init__(a=a, b=b, name = name,
longname = longname, extradoc = extradoc)
def _rvs(self, *args, **kwargs):
self.kls._size = self._size
return self.funcinv(self.kls._rvs(*args))
def _cdf(self,x,*args, **kwargs):
#print args
if not self.decr:
return self.kls._cdf(self.funcinv(x),*args, **kwargs)
#note scipy _cdf only take *args not *kwargs
else:
return 1.0 - self.kls._cdf(self.funcinv(x),*args, **kwargs)
def _ppf(self, q, *args, **kwargs):
if not self.decr:
return self.func(self.kls._ppf(q,*args, **kwargs))
else:
return self.func(self.kls._ppf(1-q,*args, **kwargs))
def inverse(x):
return np.divide(1.0,x)
mux, stdx = 0.05, 0.1
mux, stdx = 9.0, 1.0
def inversew(x):
return 1.0/(1+mux+x*stdx)
def inversew_inv(x):
return (1.0/x - 1.0 - mux)/stdx #.np.divide(1.0,x)-10
def identit(x):
return x
invdnormalg = Transf_gen(stats.norm, inversew, inversew_inv, decr=True, #a=-np.inf,
numargs = 0, name = 'discf', longname = 'normal-based discount factor',
extradoc = '\ndistribution of discount factor y=1/(1+x)) with x N(0.05,0.1**2)')
lognormalg = Transf_gen(stats.norm, np.exp, np.log,
numargs = 2, a=0, name = 'lnnorm',
longname = 'Exp transformed normal',
extradoc = '\ndistribution of y = exp(x), with x standard normal'
'precision for moment andstats is not very high, 2-3 decimals')
loggammaexpg = Transf_gen(stats.gamma, np.log, np.exp, numargs=1)
## copied form nonlinear_transform_short.py
'''univariate distribution of a non-linear monotonic transformation of a
random variable
'''
class ExpTransf_gen(distributions.rv_continuous):
'''Distribution based on log/exp transformation
the constructor can be called with a distribution class
and generates the distribution of the transformed random variable
'''
def __init__(self, kls, *args, **kwargs):
#print args
#print kwargs
#explicit for self.__dict__.update(kwargs)
if 'numargs' in kwargs:
self.numargs = kwargs['numargs']
else:
self.numargs = 1
if 'name' in kwargs:
name = kwargs['name']
else:
name = 'Log transformed distribution'
if 'a' in kwargs:
a = kwargs['a']
else:
a = 0
super(ExpTransf_gen,self).__init__(a=0, name = name)
self.kls = kls
def _cdf(self,x,*args):
pass
#print args
return self.kls.cdf(np.log(x),*args)
def _ppf(self, q, *args):
return np.exp(self.kls.ppf(q,*args))
class LogTransf_gen(distributions.rv_continuous):
'''Distribution based on log/exp transformation
the constructor can be called with a distribution class
and generates the distribution of the transformed random variable
'''
def __init__(self, kls, *args, **kwargs):
#explicit for self.__dict__.update(kwargs)
if 'numargs' in kwargs:
self.numargs = kwargs['numargs']
else:
self.numargs = 1
if 'name' in kwargs:
name = kwargs['name']
else:
name = 'Log transformed distribution'
if 'a' in kwargs:
a = kwargs['a']
else:
a = 0
super(LogTransf_gen,self).__init__(a=a, name = name)
self.kls = kls
def _cdf(self,x, *args):
#print args
return self.kls._cdf(np.exp(x),*args)
def _ppf(self, q, *args):
return np.log(self.kls._ppf(q,*args))
## copied from transformtwo.py
'''
Created on Apr 28, 2009
@author: <NAME>
'''
''' A class for the distribution of a non-linear u-shaped or hump shaped transformation of a
continuous random variable
This is a companion to the distributions of non-linear monotonic transformation to the case
when the inverse mapping is a 2-valued correspondence, for example for absolute value or square
simplest usage:
example: create squared distribution, i.e. y = x**2,
where x is normal or t distributed
This class does not work well for distributions with difficult shapes,
e.g. 1/x where x is standard normal, because of the singularity and jump at zero.
This verifies for normal - chi2, normal - halfnorm, foldnorm, and t - F
TODO:
* numargs handling is not yet working properly,
numargs needs to be specified (default = 0 or 1)
* feeding args and kwargs to underlying distribution works in t distribution example
* distinguish args and kwargs for the transformed and the underlying distribution
- currently all args and no kwargs are transmitted to underlying distribution
- loc and scale only work for transformed, but not for underlying distribution
- possible to separate args for transformation and underlying distribution parameters
* add _rvs as method, will be faster in many cases
'''
class TransfTwo_gen(distributions.rv_continuous):
'''Distribution based on a non-monotonic (u- or hump-shaped transformation)
the constructor can be called with a distribution class, and functions
that define the non-linear transformation.
and generates the distribution of the transformed random variable
Note: the transformation, it's inverse and derivatives need to be fully
specified: func, funcinvplus, funcinvminus, derivplus, derivminus.
Currently no numerical derivatives or inverse are calculated
This can be used to generate distribution instances similar to the
distributions in scipy.stats.
'''
#a class for non-linear non-monotonic transformation of a continuous random variable
def __init__(self, kls, func, funcinvplus, funcinvminus, derivplus,
derivminus, *args, **kwargs):
#print args
#print kwargs
self.func = func
self.funcinvplus = funcinvplus
self.funcinvminus = funcinvminus
self.derivplus = derivplus
self.derivminus = derivminus
#explicit for self.__dict__.update(kwargs)
#need to set numargs because inspection does not work
self.numargs = kwargs.pop('numargs', 0)
#print self.numargs
name = kwargs.pop('name','transfdist')
longname = kwargs.pop('longname','Non-linear transformed distribution')
extradoc = kwargs.pop('extradoc',None)
a = kwargs.pop('a', -np.inf) # attached to self in super
b = kwargs.pop('b', np.inf) # self.a, self.b would be overwritten
self.shape = kwargs.pop('shape', False)
#defines whether it is a `u` shaped or `hump' shaped
# transformation
self.u_args, self.u_kwargs = get_u_argskwargs(**kwargs)
self.kls = kls #(self.u_args, self.u_kwargs)
# possible to freeze the underlying distribution
super(TransfTwo_gen,self).__init__(a=a, b=b, name = name,
shapes = kls.shapes,
longname = longname, extradoc = extradoc)
# add enough info for self.freeze() to be able to reconstruct the instance
self._ctor_param.update(
dict(kls=kls, func=func, funcinvplus=funcinvplus,
funcinvminus=funcinvminus, derivplus=derivplus,
derivminus=derivminus, shape=self.shape)
)
def _rvs(self, *args):
self.kls._size = self._size #size attached to self, not function argument
return self.func(self.kls._rvs(*args))
def _pdf(self,x,*args, **kwargs):
#print args
if self.shape == 'u':
signpdf = 1
elif self.shape == 'hump':
signpdf = -1
else:
raise ValueError('shape can only be `u` or `hump`')
return signpdf * (self.derivplus(x)*self.kls._pdf(self.funcinvplus(x),*args, **kwargs) -
self.derivminus(x)*self.kls._pdf(self.funcinvminus(x),*args, **kwargs))
#note scipy _cdf only take *args not *kwargs
def _cdf(self,x,*args, **kwargs):
#print args
if self.shape == 'u':
return self.kls._cdf(self.funcinvplus(x),*args, **kwargs) - \
self.kls._cdf(self.funcinvminus(x),*args, **kwargs)
#note scipy _cdf only take *args not *kwargs
else:
return 1.0 - self._sf(x,*args, **kwargs)
def _sf(self,x,*args, **kwargs):
#print args
if self.shape == 'hump':
return self.kls._cdf(self.funcinvplus(x),*args, **kwargs) - \
self.kls._cdf(self.funcinvminus(x),*args, **kwargs)
#note scipy _cdf only take *args not *kwargs
else:
return 1.0 - self._cdf(x, *args, **kwargs)
def _munp(self, n,*args, **kwargs):
return self._mom0_sc(n,*args)
# ppf might not be possible in general case?
# should be possible in symmetric case
# def _ppf(self, q, *args, **kwargs):
# if self.shape == 'u':
# return self.func(self.kls._ppf(q,*args, **kwargs))
# elif self.shape == 'hump':
# return self.func(self.kls._ppf(1-q,*args, **kwargs))
#TODO: rename these functions to have unique names
class SquareFunc:
'''class to hold quadratic function with inverse function and derivative
using instance methods instead of class methods, if we want extension
to parametrized function
'''
def inverseplus(self, x):
return np.sqrt(x)
def inverseminus(self, x):
return 0.0 - np.sqrt(x)
def derivplus(self, x):
return 0.5/np.sqrt(x)
def derivminus(self, x):
return 0.0 - 0.5/np.sqrt(x)
def squarefunc(self, x):
return np.power(x,2)
sqfunc = SquareFunc()
squarenormalg = TransfTwo_gen(stats.norm, sqfunc.squarefunc, sqfunc.inverseplus,
sqfunc.inverseminus, sqfunc.derivplus, sqfunc.derivminus,
shape='u', a=0.0, b=np.inf,
numargs = 0, name = 'squarenorm', longname = 'squared normal distribution',
extradoc = '\ndistribution of the square of a normal random variable' +\
' y=x**2 with x N(0.0,1)')
#u_loc=l, u_scale=s)
squaretg = TransfTwo_gen(stats.t, sqfunc.squarefunc, sqfunc.inverseplus,
sqfunc.inverseminus, sqfunc.derivplus, sqfunc.derivminus,
shape='u', a=0.0, b=np.inf,
numargs = 1, name = 'squarenorm', longname = 'squared t distribution',
extradoc = '\ndistribution of the square of a t random variable' +\
' y=x**2 with x t(dof,0.0,1)')
def inverseplus(x):
return np.sqrt(-x)
def inverseminus(x):
return 0.0 - np.sqrt(-x)
def derivplus(x):
return 0.0 - 0.5/np.sqrt(-x)
def derivminus(x):
return 0.5/np.sqrt(-x)
def negsquarefunc(x):
return -np.power(x,2)
negsquarenormalg = TransfTwo_gen(stats.norm, negsquarefunc, inverseplus, inverseminus,
derivplus, derivminus, shape='hump', a=-np.inf, b=0.0,
numargs = 0, name = 'negsquarenorm', longname = 'negative squared normal distribution',
extradoc = '\ndistribution of the negative square of a normal random variable' +\
' y=-x**2 with x N(0.0,1)')
#u_loc=l, u_scale=s)
def inverseplus(x):
return x
def inverseminus(x):
return 0.0 - x
def derivplus(x):
return 1.0
def derivminus(x):
return 0.0 - 1.0
def absfunc(x):
return np.abs(x)
absnormalg = TransfTwo_gen(stats.norm, np.abs, inverseplus, inverseminus,
derivplus, derivminus, shape='u', a=0.0, b=np.inf,
numargs = 0, name = 'absnorm', longname = 'absolute of normal distribution',
extradoc = '\ndistribution of the absolute value of a normal random variable' +\
' y=abs(x) with x N(0,1)')
#copied from mvncdf.py
'''multivariate normal probabilities and cumulative distribution function
a wrapper for scipy.stats._mvn.mvndst
SUBROUTINE MVNDST( N, LOWER, UPPER, INFIN, CORREL, MAXPTS,
& ABSEPS, RELEPS, ERROR, VALUE, INFORM )
*
* A subroutine for computing multivariate normal probabilities.
* This subroutine uses an algorithm given in the paper
* "Numerical Computation of Multivariate Normal Probabilities", in
* J. of Computational and Graphical Stat., 1(1992), pp. 141-149, by
* <NAME>
* Department of Mathematics
* Washington State University
* Pullman, WA 99164-3113
* Email : <EMAIL>
*
* Parameters
*
* N INTEGER, the number of variables.
* LOWER REAL, array of lower integration limits.
* UPPER REAL, array of upper integration limits.
* INFIN INTEGER, array of integration limits flags:
* if INFIN(I) < 0, Ith limits are (-infinity, infinity);
* if INFIN(I) = 0, Ith limits are (-infinity, UPPER(I)];
* if INFIN(I) = 1, Ith limits are [LOWER(I), infinity);
* if INFIN(I) = 2, Ith limits are [LOWER(I), UPPER(I)].
* CORREL REAL, array of correlation coefficients; the correlation
* coefficient in row I column J of the correlation matrix
* should be stored in CORREL( J + ((I-2)*(I-1))/2 ), for J < I.
* The correlation matrix must be positive semidefinite.
* MAXPTS INTEGER, maximum number of function values allowed. This
* parameter can be used to limit the time. A sensible
* strategy is to start with MAXPTS = 1000*N, and then
* increase MAXPTS if ERROR is too large.
* ABSEPS REAL absolute error tolerance.
* RELEPS REAL relative error tolerance.
* ERROR REAL estimated absolute error, with 99% confidence level.
* VALUE REAL estimated value for the integral
* INFORM INTEGER, termination status parameter:
* if INFORM = 0, normal completion with ERROR < EPS;
* if INFORM = 1, completion with ERROR > EPS and MAXPTS
* function vaules used; increase MAXPTS to
* decrease ERROR;
* if INFORM = 2, N > 500 or N < 1.
*
>>> mvndst([0.0,0.0],[10.0,10.0],[0,0],[0.5])
(2e-016, 1.0, 0)
>>> mvndst([0.0,0.0],[100.0,100.0],[0,0],[0.0])
(2e-016, 1.0, 0)
>>> mvndst([0.0,0.0],[1.0,1.0],[0,0],[0.0])
(2e-016, 0.70786098173714096, 0)
>>> mvndst([0.0,0.0],[0.001,1.0],[0,0],[0.0])
(2e-016, 0.42100802096993045, 0)
>>> mvndst([0.0,0.0],[0.001,10.0],[0,0],[0.0])
(2e-016, 0.50039894221391101, 0)
>>> mvndst([0.0,0.0],[0.001,100.0],[0,0],[0.0])
(2e-016, 0.50039894221391101, 0)
>>> mvndst([0.0,0.0],[0.01,100.0],[0,0],[0.0])
(2e-016, 0.5039893563146316, 0)
>>> mvndst([0.0,0.0],[0.1,100.0],[0,0],[0.0])
(2e-016, 0.53982783727702899, 0)
>>> mvndst([0.0,0.0],[0.1,100.0],[2,2],[0.0])
(2e-016, 0.019913918638514494, 0)
>>> mvndst([0.0,0.0],[0.0,0.0],[0,0],[0.0])
(2e-016, 0.25, 0)
>>> mvndst([0.0,0.0],[0.0,0.0],[-1,0],[0.0])
(2e-016, 0.5, 0)
>>> mvndst([0.0,0.0],[0.0,0.0],[-1,0],[0.5])
(2e-016, 0.5, 0)
>>> mvndst([0.0,0.0],[0.0,0.0],[0,0],[0.5])
(2e-016, 0.33333333333333337, 0)
>>> mvndst([0.0,0.0],[0.0,0.0],[0,0],[0.99])
(2e-016, 0.47747329317779391, 0)
'''
informcode = {0: 'normal completion with ERROR < EPS',
1: '''completion with ERROR > EPS and MAXPTS function values used;
increase MAXPTS to decrease ERROR;''',
2: 'N > 500 or N < 1'}
def mvstdnormcdf(lower, upper, corrcoef, **kwds):
'''standardized multivariate normal cumulative distribution function
This is a wrapper for scipy.stats._mvn.mvndst which calculates
a rectangular integral over a standardized multivariate normal
distribution.
This function assumes standardized scale, that is the variance in each dimension
is one, but correlation can be arbitrary, covariance = correlation matrix
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
corrcoef : float or array_like
specifies correlation matrix in one of three ways, see notes
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
The correlation matrix corrcoef can be given in 3 different ways
If the multivariate normal is two-dimensional than only the
correlation coefficient needs to be provided.
For general dimension the correlation matrix can be provided either
as a one-dimensional array of the upper triangular correlation
coefficients stacked by rows, or as full square correlation matrix
See Also
--------
mvnormcdf : cdf of multivariate normal distribution without
standardization
Examples
--------
>>> print(mvstdnormcdf([-np.inf,-np.inf], [0.0,np.inf], 0.5))
0.5
>>> corr = [[1.0, 0, 0.5],[0,1,0],[0.5,0,1]]
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0], [0.0,0.0,0.0], corr, abseps=1e-6))
0.166666399198
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0],corr, abseps=1e-8))
something wrong completion with ERROR > EPS and MAXPTS function values used;
increase MAXPTS to decrease ERROR; 1.048330348e-006
0.166666546218
>>> print(mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0], corr, \
maxpts=100000, abseps=1e-8))
0.166666588293
'''
n = len(lower)
#do not know if converting to array is necessary,
#but it makes ndim check possible
lower = np.array(lower)
upper = np.array(upper)
corrcoef = np.array(corrcoef)
correl = np.zeros(int(n*(n-1)/2.0)) #dtype necessary?
if (lower.ndim != 1) or (upper.ndim != 1):
raise ValueError('can handle only 1D bounds')
if len(upper) != n:
raise ValueError('bounds have different lengths')
if n==2 and corrcoef.size==1:
correl = corrcoef
#print 'case scalar rho', n
elif corrcoef.ndim == 1 and len(corrcoef) == n*(n-1)/2.0:
#print 'case flat corr', corrcoeff.shape
correl = corrcoef
elif corrcoef.shape == (n,n):
#print 'case square corr', correl.shape
correl = corrcoef[np.tril_indices(n, -1)]
# for ii in range(n):
# for jj in range(ii):
# correl[ jj + ((ii-2)*(ii-1))/2] = corrcoef[ii,jj]
else:
raise ValueError('corrcoef has incorrect dimension')
if 'maxpts' not in kwds:
if n >2:
kwds['maxpts'] = 10000*n
lowinf = np.isneginf(lower)
uppinf = np.isposinf(upper)
infin = 2.0*np.ones(n)
np.putmask(infin,lowinf,0)# infin.putmask(0,lowinf)
np.putmask(infin,uppinf,1) #infin.putmask(1,uppinf)
#this has to be last
np.putmask(infin,lowinf*uppinf,-1)
## #remove infs
## np.putmask(lower,lowinf,-100)# infin.putmask(0,lowinf)
## np.putmask(upper,uppinf,100) #infin.putmask(1,uppinf)
#print lower,',',upper,',',infin,',',correl
#print correl.shape
#print kwds.items()
error, cdfvalue, inform = mvndst(lower,upper,infin,correl,**kwds)
if inform:
print('something wrong', informcode[inform], error)
return cdfvalue
def mvnormcdf(upper, mu, cov, lower=None, **kwds):
'''multivariate normal cumulative distribution function
This is a wrapper for scipy.stats._mvn.mvndst which calculates
a rectangular integral over a multivariate normal distribution.
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
mu : array_lik, 1d
list or array of means
cov : array_like, 2d
specifies covariance matrix
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
This function normalizes the location and scale of the multivariate
normal distribution and then uses `mvstdnormcdf` to call the integration.
See Also
--------
mvstdnormcdf : location and scale standardized multivariate normal cdf
'''
upper = np.array(upper)
if lower is None:
lower = -np.ones(upper.shape) * np.inf
else:
lower = np.array(lower)
cov = np.array(cov)
stdev = np.sqrt(np.diag(cov)) # standard deviation vector
#do I need to make sure stdev is float and not int?
#is this correct to normalize to corr?
lower = (lower - mu)/stdev
upper = (upper - mu)/stdev
divrow = np.atleast_2d(stdev)
corr = cov/divrow/divrow.T
#v/np.sqrt(np.atleast_2d(np.diag(covv)))/np.sqrt(np.atleast_2d(np.diag(covv))).T
return mvstdnormcdf(lower, upper, corr, **kwds)
| [
"numpy.abs",
"scipy.stats.norm.rvs",
"numpy.ones",
"scipy.stats.chi2.rvs",
"numpy.exp",
"numpy.diag",
"scipy.stats.describe",
"numpy.isposinf",
"numpy.atleast_2d",
"scipy.factorial2",
"numpy.power",
"statsmodels.stats.moment_helpers.mc2mvsk",
"scipy.stats.mvn.mvndst",
"numpy.putmask",
"s... | [((6955, 6964), 'numpy.poly1d', 'poly1d', (['(1)'], {}), '(1)\n', (6961, 6964), False, 'from numpy import poly1d, sqrt, exp\n'), ((7486, 7495), 'numpy.poly1d', 'poly1d', (['(1)'], {}), '(1)\n', (7492, 7495), False, 'from numpy import poly1d, sqrt, exp\n'), ((7506, 7518), 'numpy.sqrt', 'sqrt', (['cnt[1]'], {}), '(cnt[1])\n', (7510, 7518), False, 'from numpy import poly1d, sqrt, exp\n'), ((9434, 9443), 'numpy.poly1d', 'poly1d', (['(1)'], {}), '(1)\n', (9440, 9443), False, 'from numpy import poly1d, sqrt, exp\n'), ((9454, 9463), 'numpy.sqrt', 'sqrt', (['mc2'], {}), '(mc2)\n', (9458, 9463), False, 'from numpy import poly1d, sqrt, exp\n'), ((10925, 10934), 'numpy.poly1d', 'poly1d', (['(1)'], {}), '(1)\n', (10931, 10934), False, 'from numpy import poly1d, sqrt, exp\n'), ((10945, 10957), 'numpy.sqrt', 'sqrt', (['cnt[1]'], {}), '(cnt[1])\n', (10949, 10957), False, 'from numpy import poly1d, sqrt, exp\n'), ((17707, 17724), 'numpy.divide', 'np.divide', (['(1.0)', 'x'], {}), '(1.0, x)\n', (17716, 17724), True, 'import numpy as np\n'), ((27697, 27708), 'numpy.sqrt', 'np.sqrt', (['(-x)'], {}), '(-x)\n', (27704, 27708), True, 'import numpy as np\n'), ((28549, 28558), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (28555, 28558), True, 'import numpy as np\n'), ((35135, 35150), 'numpy.array', 'np.array', (['lower'], {}), '(lower)\n', (35143, 35150), True, 'import numpy as np\n'), ((35163, 35178), 'numpy.array', 'np.array', (['upper'], {}), '(upper)\n', (35171, 35178), True, 'import numpy as np\n'), ((35194, 35212), 'numpy.array', 'np.array', (['corrcoef'], {}), '(corrcoef)\n', (35202, 35212), True, 'import numpy as np\n'), ((36122, 36140), 'numpy.isneginf', 'np.isneginf', (['lower'], {}), '(lower)\n', (36133, 36140), True, 'import numpy as np\n'), ((36154, 36172), 'numpy.isposinf', 'np.isposinf', (['upper'], {}), '(upper)\n', (36165, 36172), True, 'import numpy as np\n'), ((36205, 36233), 'numpy.putmask', 'np.putmask', (['infin', 'lowinf', '(0)'], {}), '(infin, lowinf, 0)\n', (36215, 36233), True, 'import numpy as np\n'), ((36261, 36289), 'numpy.putmask', 'np.putmask', (['infin', 'uppinf', '(1)'], {}), '(infin, uppinf, 1)\n', (36271, 36289), True, 'import numpy as np\n'), ((36342, 36380), 'numpy.putmask', 'np.putmask', (['infin', '(lowinf * uppinf)', '(-1)'], {}), '(infin, lowinf * uppinf, -1)\n', (36352, 36380), True, 'import numpy as np\n'), ((36645, 36688), 'scipy.stats.mvn.mvndst', 'mvndst', (['lower', 'upper', 'infin', 'correl'], {}), '(lower, upper, infin, correl, **kwds)\n', (36651, 36688), False, 'from scipy.stats.mvn import mvndst\n'), ((38203, 38218), 'numpy.array', 'np.array', (['upper'], {}), '(upper)\n', (38211, 38218), True, 'import numpy as np\n'), ((38340, 38353), 'numpy.array', 'np.array', (['cov'], {}), '(cov)\n', (38348, 38353), True, 'import numpy as np\n'), ((38590, 38610), 'numpy.atleast_2d', 'np.atleast_2d', (['stdev'], {}), '(stdev)\n', (38603, 38610), True, 'import numpy as np\n'), ((2252, 2361), 'scipy.stats.distributions.rv_continuous.__init__', 'distributions.rv_continuous.__init__', (['self'], {'name': '"""Skew Normal distribution"""', 'shapes': '"""alpha"""', 'extradoc': '""" """'}), "(self, name='Skew Normal distribution',\n shapes='alpha', extradoc=' ')\n", (2288, 2361), False, 'from scipy.stats import distributions\n'), ((2628, 2659), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'size': 'self._size'}), '(size=self._size)\n', (2642, 2659), False, 'from scipy import stats, special\n'), ((2751, 2776), 'numpy.where', 'np.where', (['(u0 > 0)', 'u1', '(-u1)'], {}), '(u0 > 0, u1, -u1)\n', (2759, 2776), True, 'import numpy as np\n'), ((4133, 4682), 'scipy.stats.distributions.rv_continuous.__init__', 'distributions.rv_continuous.__init__', (['self'], {'name': '"""Skew T distribution"""', 'shapes': '"""df, alpha"""', 'extradoc': '"""\nSkewed T distribution by <NAME>. & Capitanio, A. (2003)_\n\nthe pdf is given by:\n pdf(x) = 2.0 * t.pdf(x, df) * t.cdf(df+1, alpha*x*np.sqrt((1+df)/(x**2+df)))\n\nwith alpha >=0\n\nNote: different from skewed t distribution by Hansen 1999\n.._\n<NAME>. & <NAME>. (2003), Distributions generated by perturbation of\nsymmetry with emphasis on a multivariate skew-t distribution,\nappears in J.Roy.Statist.Soc, series B, vol.65, pp.367-389\n\n"""'}), '(self, name=\'Skew T distribution\',\n shapes=\'df, alpha\', extradoc=\n """\nSkewed T distribution by <NAME>. & Capitanio, A. (2003)_\n\nthe pdf is given by:\n pdf(x) = 2.0 * t.pdf(x, df) * t.cdf(df+1, alpha*x*np.sqrt((1+df)/(x**2+df)))\n\nwith alpha >=0\n\nNote: different from skewed t distribution by Hansen 1999\n.._\n<NAME>. & <NAME>. (2003), Distributions generated by perturbation of\nsymmetry with emphasis on a multivariate skew-t distribution,\nappears in J.Roy.Statist.Soc, series B, vol.65, pp.367-389\n\n"""\n )\n', (4169, 4682), False, 'from scipy.stats import distributions\n'), ((5103, 5138), 'scipy.stats.chi2.rvs', 'stats.chi2.rvs', (['df'], {'size': 'self._size'}), '(df, size=self._size)\n', (5117, 5138), False, 'from scipy import stats, special\n'), ((11974, 12866), 'scipy.stats.distributions.rv_continuous.__init__', 'distributions.rv_continuous.__init__', (['self'], {'name': '"""Normal Expansion distribution"""', 'shapes': '""" """', 'extradoc': '"""\n The distribution is defined as the Gram-Charlier expansion of\n the normal distribution using the first four moments. The pdf\n is given by\n\n pdf(x) = (1+ skew/6.0 * H(xc,3) + kurt/24.0 * H(xc,4))*normpdf(xc)\n\n where xc = (x-mu)/sig is the standardized value of the random variable\n and H(xc,3) and H(xc,4) are Hermite polynomials\n\n Note: This distribution has to be parametrized during\n initialization and instantiation, and does not have a shape\n parameter after instantiation (similar to frozen distribution\n except for location and scale.) Location and scale can be used\n as with other distributions, however note, that they are relative\n to the initialized distribution.\n """'}), '(self, name=\n \'Normal Expansion distribution\', shapes=\' \', extradoc=\n """\n The distribution is defined as the Gram-Charlier expansion of\n the normal distribution using the first four moments. The pdf\n is given by\n\n pdf(x) = (1+ skew/6.0 * H(xc,3) + kurt/24.0 * H(xc,4))*normpdf(xc)\n\n where xc = (x-mu)/sig is the standardized value of the random variable\n and H(xc,3) and H(xc,4) are Hermite polynomials\n\n Note: This distribution has to be parametrized during\n initialization and instantiation, and does not have a shape\n parameter after instantiation (similar to frozen distribution\n except for location and scale.) Location and scale can be used\n as with other distributions, however note, that they are relative\n to the initialized distribution.\n """\n )\n', (12010, 12866), False, 'from scipy.stats import distributions\n'), ((26483, 26493), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (26490, 26493), True, 'import numpy as np\n'), ((26728, 26742), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (26736, 26742), True, 'import numpy as np\n'), ((27748, 27759), 'numpy.sqrt', 'np.sqrt', (['(-x)'], {}), '(-x)\n', (27755, 27759), True, 'import numpy as np\n'), ((27847, 27858), 'numpy.sqrt', 'np.sqrt', (['(-x)'], {}), '(-x)\n', (27854, 27858), True, 'import numpy as np\n'), ((27894, 27908), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (27902, 27908), True, 'import numpy as np\n'), ((36189, 36199), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (36196, 36199), True, 'import numpy as np\n'), ((38314, 38329), 'numpy.array', 'np.array', (['lower'], {}), '(lower)\n', (38322, 38329), True, 'import numpy as np\n'), ((38374, 38386), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (38381, 38386), True, 'import numpy as np\n'), ((2595, 2618), 'numpy.sqrt', 'np.sqrt', (['(1 + alpha ** 2)'], {}), '(1 + alpha ** 2)\n', (2602, 2618), True, 'import numpy as np\n'), ((3107, 3130), 'scipy.special.ndtr', 'special.ndtr', (['(alpha * x)'], {}), '(alpha * x)\n', (3119, 3130), False, 'from scipy import stats, special\n'), ((3709, 3732), 'scipy.special.ndtr', 'special.ndtr', (['(alpha * x)'], {}), '(alpha * x)\n', (3721, 3732), False, 'from scipy import stats, special\n'), ((5205, 5220), 'numpy.sqrt', 'np.sqrt', (['(V / df)'], {}), '(V / df)\n', (5212, 5220), True, 'import numpy as np\n'), ((13093, 13120), 'statsmodels.stats.moment_helpers.mvsk2mc', 'mvsk2mc', (['(mu, sig, sk, kur)'], {}), '((mu, sig, sk, kur))\n', (13100, 13120), False, 'from statsmodels.stats.moment_helpers import mvsk2mc, mc2mvsk\n'), ((19609, 19618), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (19615, 19618), True, 'import numpy as np\n'), ((20538, 20547), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (20544, 20547), True, 'import numpy as np\n'), ((26547, 26557), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (26554, 26557), True, 'import numpy as np\n'), ((26606, 26616), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (26613, 26616), True, 'import numpy as np\n'), ((27800, 27811), 'numpy.sqrt', 'np.sqrt', (['(-x)'], {}), '(-x)\n', (27807, 27811), True, 'import numpy as np\n'), ((2684, 2707), 'numpy.sqrt', 'np.sqrt', (['(1 - delta ** 2)'], {}), '(1 - delta ** 2)\n', (2691, 2707), True, 'import numpy as np\n'), ((2704, 2735), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'size': 'self._size'}), '(size=self._size)\n', (2718, 2735), False, 'from scipy import stats, special\n'), ((3087, 3108), 'numpy.exp', 'np.exp', (['(-x ** 2 / 2.0)'], {}), '(-x ** 2 / 2.0)\n', (3093, 3108), True, 'import numpy as np\n'), ((3689, 3710), 'numpy.exp', 'np.exp', (['(-x ** 2 / 2.0)'], {}), '(-x ** 2 / 2.0)\n', (3695, 3710), True, 'import numpy as np\n'), ((5506, 5533), 'scipy.stats.distributions.t._pdf', 'distributions.t._pdf', (['x', 'df'], {}), '(x, df)\n', (5526, 5533), False, 'from scipy.stats import distributions\n'), ((7030, 7044), 'numpy.poly1d', 'poly1d', (['[1, 0]'], {}), '([1, 0])\n', (7036, 7044), False, 'from numpy import poly1d, sqrt, exp\n'), ((8107, 8122), 'numpy.sqrt', 'sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (8111, 8122), False, 'from numpy import poly1d, sqrt, exp\n'), ((9794, 9812), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (9801, 9812), True, 'import numpy as np\n'), ((11660, 11678), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (11667, 11678), True, 'import numpy as np\n'), ((13010, 13030), 'scipy.stats.describe', 'stats.describe', (['args'], {}), '(args)\n', (13024, 13030), False, 'from scipy import stats, special\n'), ((13165, 13178), 'statsmodels.stats.moment_helpers.mvsk2mc', 'mvsk2mc', (['args'], {}), '(args)\n', (13172, 13178), False, 'from statsmodels.stats.moment_helpers import mvsk2mc, mc2mvsk\n'), ((26672, 26682), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (26679, 26682), True, 'import numpy as np\n'), ((38258, 38278), 'numpy.ones', 'np.ones', (['upper.shape'], {}), '(upper.shape)\n', (38265, 38278), True, 'import numpy as np\n'), ((3070, 3088), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (3077, 3088), True, 'import numpy as np\n'), ((3672, 3690), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (3679, 3690), True, 'import numpy as np\n'), ((5564, 5597), 'numpy.sqrt', 'np.sqrt', (['((1 + df) / (x ** 2 + df))'], {}), '((1 + df) / (x ** 2 + df))\n', (5571, 5597), True, 'import numpy as np\n'), ((8091, 8110), 'numpy.exp', 'exp', (['(-xn * xn / 2.0)'], {}), '(-xn * xn / 2.0)\n', (8094, 8110), False, 'from numpy import poly1d, sqrt, exp\n'), ((9775, 9797), 'numpy.exp', 'np.exp', (['(-xn * xn / 2.0)'], {}), '(-xn * xn / 2.0)\n', (9781, 9797), True, 'import numpy as np\n'), ((11641, 11663), 'numpy.exp', 'np.exp', (['(-xn * xn / 2.0)'], {}), '(-xn * xn / 2.0)\n', (11647, 11663), True, 'import numpy as np\n'), ((13287, 13299), 'statsmodels.stats.moment_helpers.mc2mvsk', 'mc2mvsk', (['cnt'], {}), '(cnt)\n', (13294, 13299), False, 'from statsmodels.stats.moment_helpers import mvsk2mc, mc2mvsk\n'), ((35799, 35821), 'numpy.tril_indices', 'np.tril_indices', (['n', '(-1)'], {}), '(n, -1)\n', (35814, 35821), True, 'import numpy as np\n'), ((7828, 7851), 'scipy.factorial2', 'scipy.factorial2', (['(m - 1)'], {}), '(m - 1)\n', (7844, 7851), False, 'import scipy\n')] |
import numpy as np
import chainer
from sklearn.decomposition import PCA
from sklearn.datasets import load_svmlight_file
def make_data(datatype='mnist', seed=2018, pca_dim=100):
print("data_name", datatype)
x, t, doPCA = get_data(datatype)
print("x_shape", x.shape)
print("t_shape", t.shape)
#if doPCA is True:
#pca = PCA(n_components=pca_dim)
#pca.fit(x.T)
#x = pca.components_.T
return x, t
def get_data(datatype):
doPCA = False
if datatype == "mushroom":
x, t = load_svmlight_file("dataset/mushrooms.txt")
x = x.toarray()
t[t == 1] = 0
t[t == 2] = 1
doPCA = True
elif datatype == "waveform":
data = np.loadtxt('dataset/waveform.txt', delimiter=',')
x, t = data[:, :-1], data[:, -1]
t[t == 2] = 0
elif datatype == "shuttle":
x_train, t_train = load_svmlight_file('dataset/shuttle.scale.txt')
x_train = x_train.toarray()
x_test, t_test = load_svmlight_file('dataset/shuttle.scale.t.txt')
x_test = x_test.toarray()
x = np.concatenate([x_train, x_test])
t = np.concatenate([t_train, t_test])
t[ ~(t == 1)] = 0
elif datatype == "pageblocks":
data = np.loadtxt('dataset/page-blocks.txt')
x, t = data[:, :-1], data[:, -1]
t[~(t == 1)] = 0
elif datatype == "digits":
train, test = chainer.datasets.get_mnist()
x_train, t_train = train._datasets
x_test, t_test = test._datasets
x = np.concatenate([x_train, x_test])
t = np.concatenate([t_train, t_test])
t[t%2==0] = 0
t[t%2==1] = 1
doPCA = True
elif datatype == "spambase":
data = np.loadtxt('dataset/spambase.data.txt', delimiter=',')
x, t = data[:, :-1], data[:, -1]
elif datatype == "usps":
x_train, t_train = load_svmlight_file('dataset/usps')
x_train = x_train.toarray()
x_test, t_test = load_svmlight_file('dataset/usps.t')
x_test = x_test.toarray()
x = np.concatenate([x_train, x_test])
t = np.concatenate([t_train, t_test])
t[t%2==0] = 0
t[t%2==1] = 1
print(np.mean(t))
doPCA = True
elif datatype == "connect-4":
x, t = load_svmlight_file('dataset/connect-4.txt')
x = x.toarray()
t[t == -1] = 0
print(np.mean(t))
doPCA = True
elif datatype == "protein":
x_train, t_train = load_svmlight_file('dataset/protein.txt')
x_train = x_train.toarray()
x_test, t_test = load_svmlight_file('dataset/protein.t.txt')
x_test = x_test.toarray()
x = np.concatenate([x_train, x_test])
t = np.concatenate([t_train, t_test])
t[ ~(t == 1)] = 0
print(np.mean(t))
doPCA = True
elif datatype == "ijcnn1":
x, t = load_svmlight_file('./dataset/ijcnn1')
x = x.toarray()
t[ ~(t == 1)] = 0
print(np.mean(t))
doPCA = False
elif datatype == "w1a":
x_train, t_train = load_svmlight_file('./dataset/w1a')
x_train = x_train.toarray()
x_test, t_test = load_svmlight_file('./dataset/w1a.t')
x_test = x_test.toarray()
x = np.concatenate([x_train, x_test])
t = np.concatenate([t_train, t_test])
t[ ~(t == 1)] = 0
print(np.mean(t))
doPCA = False
else:
raise ValueError
return x, t, doPCA
| [
"numpy.concatenate",
"numpy.mean",
"numpy.loadtxt",
"sklearn.datasets.load_svmlight_file",
"chainer.datasets.get_mnist"
] | [((531, 574), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['"""dataset/mushrooms.txt"""'], {}), "('dataset/mushrooms.txt')\n", (549, 574), False, 'from sklearn.datasets import load_svmlight_file\n'), ((713, 762), 'numpy.loadtxt', 'np.loadtxt', (['"""dataset/waveform.txt"""'], {'delimiter': '""","""'}), "('dataset/waveform.txt', delimiter=',')\n", (723, 762), True, 'import numpy as np\n'), ((886, 933), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['"""dataset/shuttle.scale.txt"""'], {}), "('dataset/shuttle.scale.txt')\n", (904, 933), False, 'from sklearn.datasets import load_svmlight_file\n'), ((995, 1044), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['"""dataset/shuttle.scale.t.txt"""'], {}), "('dataset/shuttle.scale.t.txt')\n", (1013, 1044), False, 'from sklearn.datasets import load_svmlight_file\n'), ((1091, 1124), 'numpy.concatenate', 'np.concatenate', (['[x_train, x_test]'], {}), '([x_train, x_test])\n', (1105, 1124), True, 'import numpy as np\n'), ((1137, 1170), 'numpy.concatenate', 'np.concatenate', (['[t_train, t_test]'], {}), '([t_train, t_test])\n', (1151, 1170), True, 'import numpy as np\n'), ((1256, 1293), 'numpy.loadtxt', 'np.loadtxt', (['"""dataset/page-blocks.txt"""'], {}), "('dataset/page-blocks.txt')\n", (1266, 1293), True, 'import numpy as np\n'), ((1414, 1442), 'chainer.datasets.get_mnist', 'chainer.datasets.get_mnist', ([], {}), '()\n', (1440, 1442), False, 'import chainer\n'), ((1538, 1571), 'numpy.concatenate', 'np.concatenate', (['[x_train, x_test]'], {}), '([x_train, x_test])\n', (1552, 1571), True, 'import numpy as np\n'), ((1584, 1617), 'numpy.concatenate', 'np.concatenate', (['[t_train, t_test]'], {}), '([t_train, t_test])\n', (1598, 1617), True, 'import numpy as np\n'), ((1732, 1786), 'numpy.loadtxt', 'np.loadtxt', (['"""dataset/spambase.data.txt"""'], {'delimiter': '""","""'}), "('dataset/spambase.data.txt', delimiter=',')\n", (1742, 1786), True, 'import numpy as np\n'), ((1893, 1927), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['"""dataset/usps"""'], {}), "('dataset/usps')\n", (1911, 1927), False, 'from sklearn.datasets import load_svmlight_file\n'), ((1989, 2025), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['"""dataset/usps.t"""'], {}), "('dataset/usps.t')\n", (2007, 2025), False, 'from sklearn.datasets import load_svmlight_file\n'), ((2072, 2105), 'numpy.concatenate', 'np.concatenate', (['[x_train, x_test]'], {}), '([x_train, x_test])\n', (2086, 2105), True, 'import numpy as np\n'), ((2118, 2151), 'numpy.concatenate', 'np.concatenate', (['[t_train, t_test]'], {}), '([t_train, t_test])\n', (2132, 2151), True, 'import numpy as np\n'), ((2210, 2220), 'numpy.mean', 'np.mean', (['t'], {}), '(t)\n', (2217, 2220), True, 'import numpy as np\n'), ((2301, 2344), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['"""dataset/connect-4.txt"""'], {}), "('dataset/connect-4.txt')\n", (2319, 2344), False, 'from sklearn.datasets import load_svmlight_file\n'), ((2406, 2416), 'numpy.mean', 'np.mean', (['t'], {}), '(t)\n', (2413, 2416), True, 'import numpy as np\n'), ((2507, 2548), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['"""dataset/protein.txt"""'], {}), "('dataset/protein.txt')\n", (2525, 2548), False, 'from sklearn.datasets import load_svmlight_file\n'), ((2610, 2653), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['"""dataset/protein.t.txt"""'], {}), "('dataset/protein.t.txt')\n", (2628, 2653), False, 'from sklearn.datasets import load_svmlight_file\n'), ((2700, 2733), 'numpy.concatenate', 'np.concatenate', (['[x_train, x_test]'], {}), '([x_train, x_test])\n', (2714, 2733), True, 'import numpy as np\n'), ((2746, 2779), 'numpy.concatenate', 'np.concatenate', (['[t_train, t_test]'], {}), '([t_train, t_test])\n', (2760, 2779), True, 'import numpy as np\n'), ((2820, 2830), 'numpy.mean', 'np.mean', (['t'], {}), '(t)\n', (2827, 2830), True, 'import numpy as np\n'), ((2908, 2946), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['"""./dataset/ijcnn1"""'], {}), "('./dataset/ijcnn1')\n", (2926, 2946), False, 'from sklearn.datasets import load_svmlight_file\n'), ((3011, 3021), 'numpy.mean', 'np.mean', (['t'], {}), '(t)\n', (3018, 3021), True, 'import numpy as np\n'), ((3101, 3136), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['"""./dataset/w1a"""'], {}), "('./dataset/w1a')\n", (3119, 3136), False, 'from sklearn.datasets import load_svmlight_file\n'), ((3198, 3235), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['"""./dataset/w1a.t"""'], {}), "('./dataset/w1a.t')\n", (3216, 3235), False, 'from sklearn.datasets import load_svmlight_file\n'), ((3282, 3315), 'numpy.concatenate', 'np.concatenate', (['[x_train, x_test]'], {}), '([x_train, x_test])\n', (3296, 3315), True, 'import numpy as np\n'), ((3328, 3361), 'numpy.concatenate', 'np.concatenate', (['[t_train, t_test]'], {}), '([t_train, t_test])\n', (3342, 3361), True, 'import numpy as np\n'), ((3402, 3412), 'numpy.mean', 'np.mean', (['t'], {}), '(t)\n', (3409, 3412), True, 'import numpy as np\n')] |
# reference implementation of MNIST training
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
# for reproducibility inits
import random
import numpy as np
# give this to each dataloader
def dataloader_seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# networks
class MLPNet(nn.Module):
"""
3 layer fully connected network
"""
def __init__(self):
super(MLPNet, self).__init__()
self.fc1 = nn.Linear(28 * 28, 300)
self.fc2 = nn.Linear(300, 100)
self.fc3 = nn.Linear(100, 10)
def forward(self, x):
x = x.view(-1, 28 * 28) # flatten the data before continuing
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class LeNet5(nn.Module):
"""
- 2 Convs with max pooling and relu
- 2 Fully connected layers and relu
"""
def __init__(self):
super(LeNet5, self).__init__()
self.conv1 = nn.Conv2d(1, 20, kernel_size=5, stride=1)
self.conv2 = nn.Conv2d(20, 50, kernel_size=5, stride=1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train a DNN on MNIST and check accuracy.')
parser.add_argument('--batch_size', default=128, type=int, help="Batch size for training")
parser.add_argument('--epochs', default=10, type=int, help='Number of epochs to train.')
parser.add_argument('--seed', default=7, type=int, help='Seed for reproducibility.')
args = parser.parse_args()
# reproducibility
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
random.seed(seed) # Python random module.
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
batch_size = args.batch_size
# load MNIST dataset
use_cuda = torch.cuda.is_available()
transform = transforms.Compose([transforms.ToTensor()]) #, transforms.Normalize((0.1307,), (0.3081,))]) # We exclude any data augmentation here that generates more training data
# if not existing, download mnist dataset
train_set = datasets.MNIST(root='./mnist_data', train=True, transform=transform, download=True)
test_set = datasets.MNIST(root='./mnist_data', train=False, transform=transform, download=True)
val_size = 10000
train_size = len(train_set) - val_size
train_set, val_set = random_split(train_set, [train_size, val_size], generator=torch.Generator().manual_seed(seed))
num_workers = 0
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True,
num_workers=num_workers, pin_memory=True, worker_init_fn=dataloader_seed_worker)
val_loader = DataLoader(dataset=val_set, batch_size=batch_size, shuffle=False,
num_workers=num_workers, pin_memory=True, worker_init_fn=dataloader_seed_worker)
test_loader = DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False,
num_workers=num_workers, pin_memory=True, worker_init_fn=dataloader_seed_worker)
print(f'Total training batches: {len(train_loader)}')
print(f'Total validation batches: {len(val_loader)}')
print(f'Total testing batches: {len(test_loader)}')
# training
model = LeNet5()
# model = MLPNet()
if use_cuda:
model = model.cuda()
optimizer = optim.SGD(model.parameters(), lr=0.01) #, momentum=0.9)
criterion = nn.CrossEntropyLoss()
val_accuracies = []
for epoch in tqdm(range(args.epochs), desc="Epochs"):
# training
correct_cnt, summed_loss = 0, 0
total_cnt = 0
summed_loss = 0
model.train()
for batch_idx, (x, target) in enumerate(tqdm(train_loader, desc="Batches")):
optimizer.zero_grad()
if use_cuda:
x, target = x.cuda(), target.cuda()
out = model(x)
loss = criterion(out, target)
_, pred_label = torch.max(out, 1)
total_cnt += x.shape[0]
correct_cnt += (pred_label == target).sum()
summed_loss += loss.detach().cpu().numpy()
loss.backward()
optimizer.step()
if (batch_idx + 1) % 100 == 0 or (batch_idx + 1) == len(train_loader):
print(f'Epoch: {epoch}, batch index: {batch_idx + 1}, train loss: {summed_loss / total_cnt:.6f}, train acc: {correct_cnt * 1.0 / total_cnt:.3f}')
# validation
correct_cnt, summed_loss = 0, 0
total_cnt = 0
model.eval()
for batch_idx, (x, target) in enumerate(val_loader):
if use_cuda:
x, target = x.cuda(), target.cuda()
out = model(x)
loss = criterion(out, target)
_, pred_label = torch.max(out, 1)
total_cnt += x.shape[0]
correct_cnt += (pred_label == target).sum()
summed_loss = summed_loss + loss.detach().cpu().numpy()
if (batch_idx + 1) % 100 == 0 or (batch_idx + 1) == len(val_loader):
print(f'Epoch: {epoch}, batch index: {batch_idx + 1}, val loss: {summed_loss / total_cnt:.6f}, val acc: {correct_cnt / total_cnt:.3f}')
val_accuracies.append((correct_cnt / total_cnt).detach().cpu().numpy())
# testing
correct_cnt, summed_loss = 0, 0
total_cnt = 0
model.eval()
for batch_idx, (x, target) in enumerate(test_loader):
if use_cuda:
x, target = x.cuda(), target.cuda()
out = model(x)
loss = criterion(out, target)
_, pred_label = torch.max(out, 1)
total_cnt += x.shape[0]
correct_cnt += (pred_label == target).sum()
summed_loss += loss.detach().cpu().numpy()
np.savez_compressed(f"output/mnist_bp_lenet5_{seed}.npz", val_accuracies=np.array(val_accuracies), test_accuracy=np.array([(correct_cnt / total_cnt).detach().cpu().numpy()]))
print(f'\nTest loss: {summed_loss / total_cnt:.6f}, test acc: {correct_cnt / total_cnt:.3f}')
if args.seed == 7 and args.epochs == 10:
print(f"The final test loss should be {0.000186} and the test accuracy should be {0.992}")
assert (summed_loss / total_cnt) == 0.000186
assert (correct_cnt / total_cnt) == 0.992
# torch.save(model.state_dict(), f"{model.__class__.__name__}.pt")
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"random.seed",
"torch.initial_seed",
"torch.nn.Linear",
"torch.nn.functional.max_pool2d",
"tqdm.tqdm",
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.cuda.manual_seed",
"torch.cuda.is_available",
"torch.max",
"t... | [((503, 530), 'numpy.random.seed', 'np.random.seed', (['worker_seed'], {}), '(worker_seed)\n', (517, 530), True, 'import numpy as np\n'), ((535, 559), 'random.seed', 'random.seed', (['worker_seed'], {}), '(worker_seed)\n', (546, 559), False, 'import random\n'), ((1748, 1827), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train a DNN on MNIST and check accuracy."""'}), "(description='Train a DNN on MNIST and check accuracy.')\n", (1771, 1827), False, 'import argparse\n'), ((2185, 2208), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2202, 2208), False, 'import torch\n'), ((2213, 2241), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (2235, 2241), False, 'import torch\n'), ((2246, 2278), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (2272, 2278), False, 'import torch\n'), ((2314, 2334), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2328, 2334), True, 'import numpy as np\n'), ((2356, 2373), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2367, 2373), False, 'import random\n'), ((2403, 2426), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2420, 2426), False, 'import torch\n'), ((2591, 2616), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2614, 2616), False, 'import torch\n'), ((2863, 2950), 'torchvision.datasets.MNIST', 'datasets.MNIST', ([], {'root': '"""./mnist_data"""', 'train': '(True)', 'transform': 'transform', 'download': '(True)'}), "(root='./mnist_data', train=True, transform=transform,\n download=True)\n", (2877, 2950), True, 'import torchvision.datasets as datasets\n'), ((2962, 3050), 'torchvision.datasets.MNIST', 'datasets.MNIST', ([], {'root': '"""./mnist_data"""', 'train': '(False)', 'transform': 'transform', 'download': '(True)'}), "(root='./mnist_data', train=False, transform=transform,\n download=True)\n", (2976, 3050), True, 'import torchvision.datasets as datasets\n'), ((3273, 3429), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'num_workers', 'pin_memory': '(True)', 'worker_init_fn': 'dataloader_seed_worker'}), '(dataset=train_set, batch_size=batch_size, shuffle=True,\n num_workers=num_workers, pin_memory=True, worker_init_fn=\n dataloader_seed_worker)\n', (3283, 3429), False, 'from torch.utils.data import DataLoader, random_split\n'), ((3468, 3623), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'val_set', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers', 'pin_memory': '(True)', 'worker_init_fn': 'dataloader_seed_worker'}), '(dataset=val_set, batch_size=batch_size, shuffle=False,\n num_workers=num_workers, pin_memory=True, worker_init_fn=\n dataloader_seed_worker)\n', (3478, 3623), False, 'from torch.utils.data import DataLoader, random_split\n'), ((3661, 3817), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_set', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers', 'pin_memory': '(True)', 'worker_init_fn': 'dataloader_seed_worker'}), '(dataset=test_set, batch_size=batch_size, shuffle=False,\n num_workers=num_workers, pin_memory=True, worker_init_fn=\n dataloader_seed_worker)\n', (3671, 3817), False, 'from torch.utils.data import DataLoader, random_split\n'), ((4209, 4230), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4228, 4230), True, 'import torch.nn as nn\n'), ((470, 490), 'torch.initial_seed', 'torch.initial_seed', ([], {}), '()\n', (488, 490), False, 'import torch\n'), ((733, 756), 'torch.nn.Linear', 'nn.Linear', (['(28 * 28)', '(300)'], {}), '(28 * 28, 300)\n', (742, 756), True, 'import torch.nn as nn\n'), ((776, 795), 'torch.nn.Linear', 'nn.Linear', (['(300)', '(100)'], {}), '(300, 100)\n', (785, 795), True, 'import torch.nn as nn\n'), ((815, 833), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(10)'], {}), '(100, 10)\n', (824, 833), True, 'import torch.nn as nn\n'), ((1243, 1284), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(20)'], {'kernel_size': '(5)', 'stride': '(1)'}), '(1, 20, kernel_size=5, stride=1)\n', (1252, 1284), True, 'import torch.nn as nn\n'), ((1306, 1348), 'torch.nn.Conv2d', 'nn.Conv2d', (['(20)', '(50)'], {'kernel_size': '(5)', 'stride': '(1)'}), '(20, 50, kernel_size=5, stride=1)\n', (1315, 1348), True, 'import torch.nn as nn\n'), ((1368, 1394), 'torch.nn.Linear', 'nn.Linear', (['(4 * 4 * 50)', '(500)'], {}), '(4 * 4 * 50, 500)\n', (1377, 1394), True, 'import torch.nn as nn\n'), ((1414, 1432), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(10)'], {}), '(500, 10)\n', (1423, 1432), True, 'import torch.nn as nn\n'), ((1506, 1527), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (1518, 1527), True, 'import torch.nn.functional as F\n'), ((1574, 1595), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (1586, 1595), True, 'import torch.nn.functional as F\n'), ((6334, 6351), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (6343, 6351), False, 'import torch\n'), ((2654, 2675), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2673, 2675), True, 'import torchvision.transforms as transforms\n'), ((4489, 4523), 'tqdm.tqdm', 'tqdm', (['train_loader'], {'desc': '"""Batches"""'}), "(train_loader, desc='Batches')\n", (4493, 4523), False, 'from tqdm import tqdm\n'), ((4735, 4752), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (4744, 4752), False, 'import torch\n'), ((5543, 5560), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (5552, 5560), False, 'import torch\n'), ((6565, 6589), 'numpy.array', 'np.array', (['val_accuracies'], {}), '(val_accuracies)\n', (6573, 6589), True, 'import numpy as np\n'), ((3196, 3213), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (3211, 3213), False, 'import torch\n')] |
import io
import logging
import os
from logging.handlers import RotatingFileHandler
from pathlib import Path
from time import sleep
from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
import shopify
from dotenv import load_dotenv
from PIL import Image
from pydantic import BaseSettings
from myshopify import dto
from myshopify.dto.types import (
ShopifyFulfillmentService,
ShopifyInventoryManagement,
ShopifyInventoryPolicy,
ShopifyProductStatus,
)
from myshopify.shopify.inventory import (
add_images_to_product,
add_metafields,
create_product,
create_variant,
delete_product,
delete_variant,
generate_product_metafields,
get_all_shopify_resources,
update_inventory,
update_product,
update_product_metafield,
update_variant,
)
load_dotenv(dotenv_path=(Path(__file__).parent.parent / ".env").as_posix())
logging.getLogger("pyactiveresource").setLevel("WARNING")
logging.getLogger("PIL").setLevel("WARNING")
logging_format = "%(asctime)s | %(levelname)-8s | %(name)s | %(message)s"
file_handler = RotatingFileHandler(Path(__file__).parent / ".log", maxBytes=1000, backupCount=0)
stream_handler = logging.StreamHandler()
logging.basicConfig(level=logging.DEBUG, handlers=[file_handler, stream_handler], format=logging_format)
logger = logging.getLogger(__name__)
class Settings(BaseSettings):
delete_all: bool = False
delete_old_products: bool = False
delete_old_metadata: bool = True
add_metadata: bool = True
update_metadata: bool = True
shopify_key: str = os.getenv("QUILTEFRYD_SHOPIFY_KEY")
shopify_password: str = os.getenv("<PASSWORD>")
shopify_shop_name: str = os.getenv("QUILTEFRYD_SHOPIFY_NAME")
new_product_status: ShopifyProductStatus = ShopifyProductStatus.ACTIVE
allowed_product_categories: Optional[list[str]] = None
allowed_product_group1: Optional[List[str]] = None
def _get_metafield_data(row: pd.Series) -> Dict[str, Union[str, int]]:
data = {
"price_unit": row["price_unit"],
"minimum_order_quantity": 3 if row["price_unit"] == "desimeter" else 0,
"product_category": None if not row["product_category"] else row["product_category"],
"product_group1": None if not row["product_group1"] else row["product_group1"],
"product_group2": None if not row["product_group2"] else row["product_group2"],
"product_group3": None if not row["product_group3"] else row["product_group3"],
"product_color": None if not row["product_color"] else row["product_color"],
"fabric_material": None if not row["fabric_material"] else row["fabric_material"],
"fabric_type": None if not row["fabric_type"] else row["fabric_type"],
"pattern_type": None if not row["pattern_type"] else row["pattern_type"], # Fixme: Add to filtering
"vendor": None if not row["vendor"] else row["vendor"],
"designer": None if not row["designer"] else row["designer"],
}
return {k: v for (k, v) in data.items() if v is not None}
def main(settings: Settings, input_path: Path) -> None:
df = pd.read_pickle(input_path)
df = df.groupby("source_id").last()
if settings.allowed_product_categories is not None:
df = df.loc[df["product_category"].map(lambda x: x in settings.allowed_product_categories)]
if settings.allowed_product_group1 is not None:
df = df.loc[df["product_group1"].map(lambda x: x in settings.allowed_product_group1)]
shop_url = (
f"https://{settings.shopify_key}:{settings.shopify_password}"
f"@{settings.shopify_shop_name}.myshopify.com/admin"
)
shopify.ShopifyResource.set_site(value=shop_url) # noqa
if settings.delete_all:
products = get_all_shopify_resources(shopify.Product)
variants = get_all_shopify_resources(shopify.Variant)
for variant in variants:
logger.info(f"Deleting Variant: {variant.title} - sku: {variant.sku}")
delete_variant(variant=variant)
sleep(0.25)
for product in products:
logger.info(f"Deleting Product: {product.title}")
delete_product(product=product)
sleep(0.25)
# shop = shopify.Shop.current
products = get_all_shopify_resources(shopify.Product)
location = shopify.Location.find_first()
# Clean up old products
if settings.delete_old_products:
logger.info("Updating products")
for i, product in enumerate(products):
if product.variants[0].sku not in df["sku"].values:
logger.warning(f"Deleting old product: {product.title} - sku: {product.variants[0].sku}")
for variant in product.variants:
delete_variant(variant=variant)
delete_product(product=product)
skus = []
logger.info("Updating products")
for i, product in enumerate(products):
if product.variants[0].sku in df["sku"].values:
product_row = df.loc[df["sku"] == product.variants[0].sku].iloc[0]
product_dto = dto.shopify.Product(
id=product.id,
product_type=product_row["product_category"],
tags=product_row["tags"].strip(","),
vendor=product_row["vendor"],
)
update_product(product_update_dto=product_dto, shopify_product=product)
if product.images is None or product.images == []:
_ = add_images_to_product(
product=product,
image_list=[Image.open(io.BytesIO(product_row.images))] if product_row.images else [],
)
if settings.update_metadata:
metafields_data = _get_metafield_data(row=product_row)
update_product_metafield(
product=product, data=metafields_data, delete_missing=settings.delete_old_metadata
)
sleep(0.5)
for variant in product.variants:
skus.append(variant.sku)
if variant.sku in df["sku"].values:
# We are only updating a few fields since we want to keep description text, images, etc.
inventory_policy = (
ShopifyInventoryPolicy.DENY
if bool(product_row["hide_when_empty"])
else ShopifyInventoryPolicy.CONTINUE
)
price = (
product_row["discounted_price"] if product_row["discounted_price"] > 0 else product_row["price"]
)
compare_at_price = product_row["price"] if product_row["discounted_price"] > 0 else None
variant_dto = dto.shopify.ProductVariant(
id=variant.id,
product_id=product.id,
sku=variant.sku,
price=price,
compare_at_price=compare_at_price,
inventory_policy=inventory_policy,
)
update_variant(variant_update_dto=variant_dto, shopify_variant=variant)
inventory_level_update = dto.shopify.InventoryLevel(
inventory_item_id=variant.inventory_item_id,
location_id=location.id,
available=int(np.nan_to_num(product_row["available"], nan=0)),
)
update_inventory(inventory_level_dto=inventory_level_update)
else:
logger.warning(f"Not matched with POS: {product.title} - sku: {variant.sku}")
sleep(0.5)
logger.info("Importing products")
for _, product_row in df.iterrows():
if product_row.sku not in skus:
if product_row["available"] < 1 and product_row["hide_when_empty"]:
continue
new_product = dto.shopify.Product(
title=product_row["title"],
body_html=" ".join(["<p>" + x.strip() + "</p>\n" for x in product_row["body_html"].split("\n")]),
product_type=product_row["product_category"],
status=settings.new_product_status,
tags=product_row["tags"].strip(","),
vendor=product_row["vendor"],
)
product = create_product(product_dto=new_product)
price = product_row["discounted_price"] if product_row["discounted_price"] > 0 else product_row["price"]
compare_at_price = product_row["price"] if product_row["discounted_price"] > 0 else None
inventory_policy = (
ShopifyInventoryPolicy.DENY if bool(product_row["hide_when_empty"]) else ShopifyInventoryPolicy.CONTINUE
)
new_variant = dto.shopify.ProductVariant(
id=product.variants[0].id,
product_id=product.id,
sku=product_row["sku"],
price=price,
compare_at_price=compare_at_price,
inventory_policy=inventory_policy,
inventory_management=ShopifyInventoryManagement.SHOPIFY,
fulfillment_service=ShopifyFulfillmentService.MANUAL,
position=1,
)
variant = create_variant(variant_dto=new_variant)
inventory_level_update = dto.shopify.InventoryLevel(
inventory_item_id=variant.inventory_item_id,
location_id=location.id,
available=int(np.nan_to_num(product_row["available"], nan=0)),
)
_ = add_images_to_product(
product=product, image_list=[Image.open(io.BytesIO(product_row.images))] if product_row.images else []
)
if settings.add_metadata:
metafields_data = _get_metafield_data(row=product_row)
metafields = generate_product_metafields(data=metafields_data, product_id=product.id)
add_metafields(metafields_dto=metafields, product=product)
update_inventory(inventory_level_dto=inventory_level_update)
sleep(0.5)
logger.info("Done!")
if __name__ == "__main__":
settings = Settings()
INPUT_PATH = Path(__file__).parent / "data" / "shopify_products_export.pickle"
main(settings=settings, input_path=INPUT_PATH)
| [
"myshopify.shopify.inventory.delete_variant",
"numpy.nan_to_num",
"shopify.Location.find_first",
"myshopify.shopify.inventory.update_inventory",
"pathlib.Path",
"myshopify.shopify.inventory.update_product_metafield",
"myshopify.shopify.inventory.update_product",
"myshopify.shopify.inventory.add_metafi... | [((1196, 1219), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1217, 1219), False, 'import logging\n'), ((1220, 1328), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'handlers': '[file_handler, stream_handler]', 'format': 'logging_format'}), '(level=logging.DEBUG, handlers=[file_handler,\n stream_handler], format=logging_format)\n', (1239, 1328), False, 'import logging\n'), ((1335, 1362), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1352, 1362), False, 'import logging\n'), ((1585, 1620), 'os.getenv', 'os.getenv', (['"""QUILTEFRYD_SHOPIFY_KEY"""'], {}), "('QUILTEFRYD_SHOPIFY_KEY')\n", (1594, 1620), False, 'import os\n'), ((1649, 1672), 'os.getenv', 'os.getenv', (['"""<PASSWORD>"""'], {}), "('<PASSWORD>')\n", (1658, 1672), False, 'import os\n'), ((1702, 1738), 'os.getenv', 'os.getenv', (['"""QUILTEFRYD_SHOPIFY_NAME"""'], {}), "('QUILTEFRYD_SHOPIFY_NAME')\n", (1711, 1738), False, 'import os\n'), ((3127, 3153), 'pandas.read_pickle', 'pd.read_pickle', (['input_path'], {}), '(input_path)\n', (3141, 3153), True, 'import pandas as pd\n'), ((3657, 3705), 'shopify.ShopifyResource.set_site', 'shopify.ShopifyResource.set_site', ([], {'value': 'shop_url'}), '(value=shop_url)\n', (3689, 3705), False, 'import shopify\n'), ((4264, 4306), 'myshopify.shopify.inventory.get_all_shopify_resources', 'get_all_shopify_resources', (['shopify.Product'], {}), '(shopify.Product)\n', (4289, 4306), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((4322, 4351), 'shopify.Location.find_first', 'shopify.Location.find_first', ([], {}), '()\n', (4349, 4351), False, 'import shopify\n'), ((905, 942), 'logging.getLogger', 'logging.getLogger', (['"""pyactiveresource"""'], {}), "('pyactiveresource')\n", (922, 942), False, 'import logging\n'), ((963, 987), 'logging.getLogger', 'logging.getLogger', (['"""PIL"""'], {}), "('PIL')\n", (980, 987), False, 'import logging\n'), ((3762, 3804), 'myshopify.shopify.inventory.get_all_shopify_resources', 'get_all_shopify_resources', (['shopify.Product'], {}), '(shopify.Product)\n', (3787, 3804), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((3824, 3866), 'myshopify.shopify.inventory.get_all_shopify_resources', 'get_all_shopify_resources', (['shopify.Variant'], {}), '(shopify.Variant)\n', (3849, 3866), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((1117, 1131), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1121, 1131), False, 'from pathlib import Path\n'), ((3995, 4026), 'myshopify.shopify.inventory.delete_variant', 'delete_variant', ([], {'variant': 'variant'}), '(variant=variant)\n', (4009, 4026), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((4039, 4050), 'time.sleep', 'sleep', (['(0.25)'], {}), '(0.25)\n', (4044, 4050), False, 'from time import sleep\n'), ((4158, 4189), 'myshopify.shopify.inventory.delete_product', 'delete_product', ([], {'product': 'product'}), '(product=product)\n', (4172, 4189), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((4202, 4213), 'time.sleep', 'sleep', (['(0.25)'], {}), '(0.25)\n', (4207, 4213), False, 'from time import sleep\n'), ((5321, 5392), 'myshopify.shopify.inventory.update_product', 'update_product', ([], {'product_update_dto': 'product_dto', 'shopify_product': 'product'}), '(product_update_dto=product_dto, shopify_product=product)\n', (5335, 5392), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((7710, 7720), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (7715, 7720), False, 'from time import sleep\n'), ((8402, 8441), 'myshopify.shopify.inventory.create_product', 'create_product', ([], {'product_dto': 'new_product'}), '(product_dto=new_product)\n', (8416, 8441), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((8855, 9179), 'myshopify.dto.shopify.ProductVariant', 'dto.shopify.ProductVariant', ([], {'id': 'product.variants[0].id', 'product_id': 'product.id', 'sku': "product_row['sku']", 'price': 'price', 'compare_at_price': 'compare_at_price', 'inventory_policy': 'inventory_policy', 'inventory_management': 'ShopifyInventoryManagement.SHOPIFY', 'fulfillment_service': 'ShopifyFulfillmentService.MANUAL', 'position': '(1)'}), "(id=product.variants[0].id, product_id=product.id,\n sku=product_row['sku'], price=price, compare_at_price=compare_at_price,\n inventory_policy=inventory_policy, inventory_management=\n ShopifyInventoryManagement.SHOPIFY, fulfillment_service=\n ShopifyFulfillmentService.MANUAL, position=1)\n", (8881, 9179), False, 'from myshopify import dto\n'), ((9343, 9382), 'myshopify.shopify.inventory.create_variant', 'create_variant', ([], {'variant_dto': 'new_variant'}), '(variant_dto=new_variant)\n', (9357, 9382), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((10117, 10177), 'myshopify.shopify.inventory.update_inventory', 'update_inventory', ([], {'inventory_level_dto': 'inventory_level_update'}), '(inventory_level_dto=inventory_level_update)\n', (10133, 10177), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((10190, 10200), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (10195, 10200), False, 'from time import sleep\n'), ((4793, 4824), 'myshopify.shopify.inventory.delete_product', 'delete_product', ([], {'product': 'product'}), '(product=product)\n', (4807, 4824), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((5791, 5903), 'myshopify.shopify.inventory.update_product_metafield', 'update_product_metafield', ([], {'product': 'product', 'data': 'metafields_data', 'delete_missing': 'settings.delete_old_metadata'}), '(product=product, data=metafields_data,\n delete_missing=settings.delete_old_metadata)\n', (5815, 5903), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((5954, 5964), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (5959, 5964), False, 'from time import sleep\n'), ((9956, 10028), 'myshopify.shopify.inventory.generate_product_metafields', 'generate_product_metafields', ([], {'data': 'metafields_data', 'product_id': 'product.id'}), '(data=metafields_data, product_id=product.id)\n', (9983, 10028), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((10045, 10103), 'myshopify.shopify.inventory.add_metafields', 'add_metafields', ([], {'metafields_dto': 'metafields', 'product': 'product'}), '(metafields_dto=metafields, product=product)\n', (10059, 10103), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((10299, 10313), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (10303, 10313), False, 'from pathlib import Path\n'), ((4745, 4776), 'myshopify.shopify.inventory.delete_variant', 'delete_variant', ([], {'variant': 'variant'}), '(variant=variant)\n', (4759, 4776), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((6771, 6944), 'myshopify.dto.shopify.ProductVariant', 'dto.shopify.ProductVariant', ([], {'id': 'variant.id', 'product_id': 'product.id', 'sku': 'variant.sku', 'price': 'price', 'compare_at_price': 'compare_at_price', 'inventory_policy': 'inventory_policy'}), '(id=variant.id, product_id=product.id, sku=\n variant.sku, price=price, compare_at_price=compare_at_price,\n inventory_policy=inventory_policy)\n', (6797, 6944), False, 'from myshopify import dto\n'), ((7123, 7194), 'myshopify.shopify.inventory.update_variant', 'update_variant', ([], {'variant_update_dto': 'variant_dto', 'shopify_variant': 'variant'}), '(variant_update_dto=variant_dto, shopify_variant=variant)\n', (7137, 7194), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((7517, 7577), 'myshopify.shopify.inventory.update_inventory', 'update_inventory', ([], {'inventory_level_dto': 'inventory_level_update'}), '(inventory_level_dto=inventory_level_update)\n', (7533, 7577), False, 'from myshopify.shopify.inventory import add_images_to_product, add_metafields, create_product, create_variant, delete_product, delete_variant, generate_product_metafields, get_all_shopify_resources, update_inventory, update_product, update_product_metafield, update_variant\n'), ((9581, 9627), 'numpy.nan_to_num', 'np.nan_to_num', (["product_row['available']"], {'nan': '(0)'}), "(product_row['available'], nan=0)\n", (9594, 9627), True, 'import numpy as np\n'), ((854, 868), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (858, 868), False, 'from pathlib import Path\n'), ((7425, 7471), 'numpy.nan_to_num', 'np.nan_to_num', (["product_row['available']"], {'nan': '(0)'}), "(product_row['available'], nan=0)\n", (7438, 7471), True, 'import numpy as np\n'), ((9740, 9770), 'io.BytesIO', 'io.BytesIO', (['product_row.images'], {}), '(product_row.images)\n', (9750, 9770), False, 'import io\n'), ((5580, 5610), 'io.BytesIO', 'io.BytesIO', (['product_row.images'], {}), '(product_row.images)\n', (5590, 5610), False, 'import io\n')] |
#%% Reproduce MovieLens Experiment of the paper
import sys
sys.path.append('../code')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from cot import cot_numpy
from scipy.stats import mode
#%%
def cot_clustering(X, ns, nv, niter_cluster, niter, algo1='emd', algo2 = 'emd', reg1 = 0, reg2 = 0, verbose = False):
Xc = np.random.randn(ns, nv)
old_cost = 0
for i in range(niter_cluster):
Ts, Tv, cost = cot_numpy(X, Xc, niter=niter, algo=algo1, reg = reg1, algo2 = algo2, reg2 = reg2, verbose=False)
Xc = Ts.T.dot(X).dot(Tv) * ns * nv
if verbose:
print(cost)
if abs(old_cost - cost) == 0:
break
old_cost = cost
if verbose:
print("\n\n")
return Ts, Tv, Xc
# Import the data and get the rating matrix
df = pd.read_csv("../data/ml-100k/u.data", delimiter='\t',header=None, names=["user", "item", "rating", "timestamp"])
R_df = df.pivot(index = 'user', columns ='item', values = 'rating').fillna(0).values
movies = pd.read_csv('../data/ml-100k/u.item', sep='|', header=None, encoding='latin-1').get_values()[:,1]
mean_ratings = np.true_divide(R_df.sum(0),(R_df!=0).sum(0))
idx_best = np.argsort(mean_ratings)[::-1].tolist()
n_users, n_items = len(df.user.unique()), len(df.item.unique())
viz_orig = False
viz_cot = True
if viz_orig:
plt.figure(figsize = (9,6))
plt.imshow(R_df, cmap='Blues')
plt.xlabel("Users", fontsize = 15)
plt.ylabel("Movies", fontsize = 15)
plt.title('Original MovieLens matrix', fontsize = 20)
plt.xticks([])
plt.yticks([])
plt.show()
ns = 10
nv = 20
algo1 = 'emd'
algo2 = 'emd'
Ts, Tv, Xc = cot_clustering(R_df, ns = ns, nv = nv,
niter_cluster=10, niter=300, algo1=algo1, algo2=algo2, reg1=0, reg2=0, verbose = False)
yc = Tv.argmax(1)
sum_ratings_cot = np.sum(Xc, axis = 0)
idx_mov = np.argsort(sum_ratings_cot)[::-1]
idx_user = np.argsort(np.sum(Xc[:,idx_mov], axis = 1))
Xc = Xc[:,idx_mov]
Xc = Xc[idx_user,:]
if viz_cot:
plt.figure(figsize = (9,6))
plt.imshow(Xc, cmap='Blues')
plt.xlabel("Users clusters", fontsize = 15)
plt.ylabel("Movies clusters", fontsize = 15)
plt.title('Summarized MovieLens matrix', fontsize = 20)
plt.xticks([])
plt.yticks([])
plt.show()
print("Movies in the most rated cluster")
idx_best_cluster = np.where(yc == idx_mov[0])
print(movies[idx_best_cluster])
print("\nMovies in the least rated cluster")
idx_worst_cluster = np.where(yc == idx_mov[-1])
print(movies[idx_worst_cluster])
| [
"sys.path.append",
"matplotlib.pyplot.title",
"numpy.sum",
"matplotlib.pyplot.show",
"numpy.random.randn",
"pandas.read_csv",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"cot.cot_numpy",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.where",
"matplotlib.pyplot.xticks",
"ma... | [((60, 86), 'sys.path.append', 'sys.path.append', (['"""../code"""'], {}), "('../code')\n", (75, 86), False, 'import sys\n'), ((835, 953), 'pandas.read_csv', 'pd.read_csv', (['"""../data/ml-100k/u.data"""'], {'delimiter': '"""\t"""', 'header': 'None', 'names': "['user', 'item', 'rating', 'timestamp']"}), "('../data/ml-100k/u.data', delimiter='\\t', header=None, names=[\n 'user', 'item', 'rating', 'timestamp'])\n", (846, 953), True, 'import pandas as pd\n'), ((1863, 1881), 'numpy.sum', 'np.sum', (['Xc'], {'axis': '(0)'}), '(Xc, axis=0)\n', (1869, 1881), True, 'import numpy as np\n'), ((2372, 2398), 'numpy.where', 'np.where', (['(yc == idx_mov[0])'], {}), '(yc == idx_mov[0])\n', (2380, 2398), True, 'import numpy as np\n'), ((2497, 2524), 'numpy.where', 'np.where', (['(yc == idx_mov[-1])'], {}), '(yc == idx_mov[-1])\n', (2505, 2524), True, 'import numpy as np\n'), ((354, 377), 'numpy.random.randn', 'np.random.randn', (['ns', 'nv'], {}), '(ns, nv)\n', (369, 377), True, 'import numpy as np\n'), ((1370, 1396), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (1380, 1396), True, 'import matplotlib.pyplot as plt\n'), ((1402, 1432), 'matplotlib.pyplot.imshow', 'plt.imshow', (['R_df'], {'cmap': '"""Blues"""'}), "(R_df, cmap='Blues')\n", (1412, 1432), True, 'import matplotlib.pyplot as plt\n'), ((1437, 1469), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Users"""'], {'fontsize': '(15)'}), "('Users', fontsize=15)\n", (1447, 1469), True, 'import matplotlib.pyplot as plt\n'), ((1476, 1509), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Movies"""'], {'fontsize': '(15)'}), "('Movies', fontsize=15)\n", (1486, 1509), True, 'import matplotlib.pyplot as plt\n'), ((1516, 1567), 'matplotlib.pyplot.title', 'plt.title', (['"""Original MovieLens matrix"""'], {'fontsize': '(20)'}), "('Original MovieLens matrix', fontsize=20)\n", (1525, 1567), True, 'import matplotlib.pyplot as plt\n'), ((1574, 1588), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1584, 1588), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1607), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1603, 1607), True, 'import matplotlib.pyplot as plt\n'), ((1612, 1622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1620, 1622), True, 'import matplotlib.pyplot as plt\n'), ((1894, 1921), 'numpy.argsort', 'np.argsort', (['sum_ratings_cot'], {}), '(sum_ratings_cot)\n', (1904, 1921), True, 'import numpy as np\n'), ((1950, 1980), 'numpy.sum', 'np.sum', (['Xc[:, idx_mov]'], {'axis': '(1)'}), '(Xc[:, idx_mov], axis=1)\n', (1956, 1980), True, 'import numpy as np\n'), ((2039, 2065), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (2049, 2065), True, 'import matplotlib.pyplot as plt\n'), ((2071, 2099), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Xc'], {'cmap': '"""Blues"""'}), "(Xc, cmap='Blues')\n", (2081, 2099), True, 'import matplotlib.pyplot as plt\n'), ((2104, 2145), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Users clusters"""'], {'fontsize': '(15)'}), "('Users clusters', fontsize=15)\n", (2114, 2145), True, 'import matplotlib.pyplot as plt\n'), ((2152, 2194), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Movies clusters"""'], {'fontsize': '(15)'}), "('Movies clusters', fontsize=15)\n", (2162, 2194), True, 'import matplotlib.pyplot as plt\n'), ((2201, 2254), 'matplotlib.pyplot.title', 'plt.title', (['"""Summarized MovieLens matrix"""'], {'fontsize': '(20)'}), "('Summarized MovieLens matrix', fontsize=20)\n", (2210, 2254), True, 'import matplotlib.pyplot as plt\n'), ((2261, 2275), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2271, 2275), True, 'import matplotlib.pyplot as plt\n'), ((2280, 2294), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2290, 2294), True, 'import matplotlib.pyplot as plt\n'), ((2299, 2309), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2307, 2309), True, 'import matplotlib.pyplot as plt\n'), ((454, 548), 'cot.cot_numpy', 'cot_numpy', (['X', 'Xc'], {'niter': 'niter', 'algo': 'algo1', 'reg': 'reg1', 'algo2': 'algo2', 'reg2': 'reg2', 'verbose': '(False)'}), '(X, Xc, niter=niter, algo=algo1, reg=reg1, algo2=algo2, reg2=reg2,\n verbose=False)\n', (463, 548), False, 'from cot import cot_numpy\n'), ((1043, 1122), 'pandas.read_csv', 'pd.read_csv', (['"""../data/ml-100k/u.item"""'], {'sep': '"""|"""', 'header': 'None', 'encoding': '"""latin-1"""'}), "('../data/ml-100k/u.item', sep='|', header=None, encoding='latin-1')\n", (1054, 1122), True, 'import pandas as pd\n'), ((1214, 1238), 'numpy.argsort', 'np.argsort', (['mean_ratings'], {}), '(mean_ratings)\n', (1224, 1238), True, 'import numpy as np\n')] |
# import the necessary packages
import time
from datetime import datetime
import cv2
import numpy
import RPi.GPIO as GPIO
import Iothub_client_functions as iot
import picamera
import io, sys
import threading
import cropdata1440
from picamera.array import PiRGBArray
import picamera.array
from PIL import Image
from imutils.video import VideoStream
import imutils
pinsGPIO = [15,14,3,2,21,20,16,5,26,6]
pin_crop_ranges = cropdata1440.pin_crop_ranges
resetArmCrops = cropdata1440.resetArmCrops
pinSetterCrops = cropdata1440.pinSetterCrops
if len(sys.argv) > 1:
mode = sys.argv[1]
if mode == 'H':
ballCrops = [460,885,10,1330] #1440, 912
if mode == 'M':
ballCrops = [360,720,10,1275] # 1280,720
if mode == 'L':
ballCrops = [240,478,10,638] # 640, 480
# ballCrops = [460,885,10,1330] #1440, 912
# ballCrops = [360,720,10,1275] # 1280,720
ballCrops = [240,478,10,638] # 640, 480
def setResolution():
global resX, mode
resX = 640 #1280 #1440 #640
resY = 480 #720 #900 #480
res = (int(resX), int(resY))
if mode == 'H':
res = (int(1440), int(912))
if mode == 'M':
res = (int(1280), int(720))
if mode == 'L':
res = (int(640), int(320))
print('Resolution', res, mode)
resX = res[0]
return res
def getCroppedImage(image,crop_array):
croppedImage = image[crop_array[0]:crop_array[1],crop_array[2]:crop_array[3]]
return croppedImage
def setupGPIO(pins):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
for pin in pins:
GPIO.setup(pin,GPIO.OUT)
print ("setup Completed")
def bit_GPIO(pins,pinCount):
bits = "{0:b}".format(pinCount)
while len(bits)<10:
bits = "0"+bits
for idx in range(0,len(bits)):
if(bits[idx]=="1"):
GPIO.output(pins[idx], GPIO.HIGH)
else:
GPIO.output(pins[idx], GPIO.LOW)
def getMaskFrame():
global mask_gray, resetArmCrops, ballCrops, img_gray1arm
frame1 = cv2.imread('/home/pi/Shared/histImage/BallMask.jpg',1)
img_arm = getCroppedImage(frame1, resetArmCrops)
(h,w,d) = img_arm.shape
img_gray1arm = cv2.cvtColor(img_arm, cv2.COLOR_BGR2GRAY)
maskFrame = getCroppedImage(frame1, ballCrops)
mask_gray = cv2.cvtColor(maskFrame, cv2.COLOR_BGR2GRAY)
def writeImageSeries(frameNoStart, numberOfFrames, img_rgb):
if frameNoStart <= frameNo:
if frameNo <= frameNoStart+numberOfFrames:
print ('Saving ../home/pi/Shared/videos/videoCCEFrame'+ str(resX)+ str(frameNo) +'.jpg')
cv2.imwrite('/home/pi/Shared/videos/videoCCEFrame'+ str(resX) + str(frameNo) +'.jpg',img_rgb)
drawPinRectangles()
def write_video(stream,result):
# Write the entire content of the circular buffer to disk. No need to
# lock the stream here as we're definitely not writing to it
# simultaneously
global frameNo, videoReadyFrameNo
if frameNo < videoReadyFrameNo + 120:
return
videoReadyFrameNo = frameNo
print("writng dp ", result)
#setup ram dsk
# Wipe the circular stream once we're done
with io.open('/dp/log/firstFile.h264', 'wb') as output:
for frame in stream.frames:
if frame.frame_type == picamera.PiVideoFrameType.sps_header:
stream.seek(frame.position)
break
while True:
buf = stream.read1()
if not buf:
break
output.write(buf)
iotSend('/dp/log/firstFile.h264',result)
# Wipe the circular stream once we're done
stream.seek(0)
stream.truncate()
def isPinSetter():
global setterPresent
global frameNo
global img_rgb
global firstSetterFrame
global activity
# Convert BGR to HSV
frame = getCroppedImage(img_rgb, pinSetterCrops)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of green color in HSV
lower_green = numpy.array([65,60,60])
upper_green = numpy.array([80,255,255])
# Threshold the HSV image to get only green colors
mask = cv2.inRange(hsv, lower_green, upper_green)
res = cv2.bitwise_and(frame,frame, mask=mask)
_,thrshed = cv2.threshold(cv2.cvtColor(res,cv2.COLOR_BGR2GRAY),3,255,cv2.THRESH_BINARY)
_,contours,_ = cv2.findContours(thrshed,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
setterPresent = False
area = 0
for cnt in contours:
#Contour area is measured
area = cv2.contourArea(cnt) +area
if area >1000:
setterPresent = True
if setterPresent:
activity = activity + str(priorPinCount)+ ',-2,'
print("Green", area, frameNo, activity)
return
def isResetArm():
global firstArmFrame, armPresent, ballCounter
global frameNo
global img_rgb
global img_gray1arm
global threshArm
global resetArmCrops
global priorPinCount
frame2arm = getCroppedImage(img_rgb, resetArmCrops)
img_gray2arm = cv2.cvtColor(frame2arm, cv2.COLOR_BGR2GRAY)
# print('IMG GRAY ARM', img_gray1arm, img_gray2arm, frame2arm, type(frame2arm))
# print(type(img_gray1arm), type(img_gray2arm))
diff = cv2.absdiff(img_gray1arm,img_gray2arm)
# First value reduces noise. Values above 150 seem to miss certain ball colors
ret, threshArm = cv2.threshold(diff, 120,255,cv2.THRESH_BINARY)
# frame = threshArm
# Blur eliminates noise by averaging surrounding pixels. Value is array size of blur and MUST BE ODD
threshArm = cv2.medianBlur(threshArm,15)
cnts = cv2.findContours(threshArm.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and centroid
# c = max(cnts, key=cv2.contourArea)
# ((xContour, yContour), radius) = cv2.minEnclosingCircle(c)
# if radius > 15:
print('Reset Arm', frameNo, len(cnts), ballCounter, " ", priorPinCount)
armPresent = True
ballCounter = 0
return
def findPins():
global x,x1,y,y1
global priorPinCount, frameNo
global img_rgb
global frame2
global pinsFalling, timesup # initial values False, True
def timeout():
global timesup
timesup = True
print ('Timer is finished', timesup)
pinCount = 0
crop = []
sumHist = [0,0,0,0,0,0,0,0,0,0]
lower_red = numpy.array([0,0,70]) # lower_red = np.array([0,100,0])
upper_red = numpy.array([110, 110, 255]) # upper_red = np.array([180,255,255])
mask = cv2.inRange(img_rgb,lower_red,upper_red)
output = cv2.bitwise_and(img_rgb, img_rgb, mask=mask)
threshold1 = 10
for i in range(0,10):
crop.append(output[pin_crop_ranges[i][0]+y:pin_crop_ranges[i][1]+y1,pin_crop_ranges[i][2]+x:pin_crop_ranges[i][3]+x1])
hist = cv2.calcHist([crop[i]],[1],None,[4], [10,50])
sumHist[i] = hist[0]+hist[1]+hist[2]+hist[3]
# print (i, sumHist[i])
if threshold1 < sumHist[i]:
pinCount = pinCount + 2**(9-i)
bit_GPIO(pinsGPIO,pinCount)
if pinsFalling == True:
if timesup == False:
return
else:
result = " _"+ str(priorPinCount)+"_" + str(pinCount) + "_"
print("FrameNo ", frameNo, "PinCount ", priorPinCount, "_",pinCount )
if priorPinCount == 1023:
write_video(stream, result)
priorPinCount = pinCount
pinsFalling = False
return
if priorPinCount <= pinCount:
priorPinCount = pinCount
return
else:
pinsFalling = True
t = threading.Timer(2.0, timeout)
timesup = False
t.start() # after 2.0 seconds, stream will be saved
print ('timer is running', priorPinCount, pinCount)
return
def iotSend(buf, result):
global frameNo
try:
client = iot.iothub_client_init()
# if client.protocol == IoTHubTransportProvider.MQTT:
print ( "IoTHubClient is reporting state" )
reported_state = "{\"newState\":\"standBy\"}"
# td = datetime.now()
client.send_reported_state(reported_state, len(reported_state), iot.send_reported_state_callback, iot.SEND_REPORTED_STATE_CONTEXT)
filename = "dp" + result + ".h264"
f = open(buf, "rb+")
content = f.read()
print("CONTENT LEN", len(content), type(content))
client.upload_blob_async(filename,content, len(content), iot.blob_upload_conf_callback,1001)
except iot.IoTHubError as iothub_error:
print ( "Unexpected error %s from IoTHub" % iothub_error )
return
except KeyboardInterrupt:
print ( "IoTHubClient sample stopped" )
iot.print_last_message_time(client)
def drawPinRectangles():
global ball_image,img_rgb,x,y
global pin_crop_ranges,resX
mx=x
my=y
ball_image = img_rgb
# NOTE: crop is img[y: y + h, x: x + w]
# cv2.rectangle is a = (x,y) , b=(x1,y1)
for i in range(0,9):
a =(pin_crop_ranges[i][2]+mx,pin_crop_ranges[i][0]+my)
b = (pin_crop_ranges[i][3]+mx, pin_crop_ranges[i][1]+my)
cv2.rectangle(ball_image, b, a, 255, 2)
if i == 6:
cv2.putText(ball_image,str(a),a,cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
cv2.putText(ball_image,str(b),(b[0]-250,b[1]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
cv2.imwrite('/home/pi/Shared/videos/CCEPinMask'+str(i) +'.jpg',ball_image)
a = (ballCrops[2]+mx,ballCrops[0]+my)
b = (ballCrops[3]+mx, ballCrops[1]+my)
cv2.rectangle(ball_image, b, a, 255, 2)
cv2.putText(ball_image,str(a),a,cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
cv2.putText(ball_image,str(b),(b[0]-250,b[1]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
cv2.imwrite('/home/pi/Shared/videos/CCEBBallMask'+str(resX)+str(i) +'.jpg',ball_image)
a = (resetArmCrops[2]+mx, resetArmCrops[0]+my)
b = (resetArmCrops[3]+mx, resetArmCrops[1]+my)
cv2.rectangle(ball_image, b, a, 255, 2)
cv2.putText(ball_image,str(a),a,cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
cv2.putText(ball_image,str(b),(b[0]-250,b[1]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
cv2.imwrite('/home/pi/Shared/videos/CCEBBallMask'+str(resX)+str(i+1) +'.jpg',ball_image)
setupGPIO(pinsGPIO)
# getMaskFrame()
setterPresent = False
armPresent = False
ballPresent = False
priorPinCount = 0
pinsFalling = False
timesup = True
activity = "\r\n"
x=0
x1=0 +x
y=0
y1=0 + y
frameNo = 0
ballCounter = 0
videoReadyFrameNo = 0
video_preseconds = 3
while True:
# with picamera.PiCamera() as camera:
# camera.resolution = setResolution()
# camera.video_stabilization = True
# camera.annotate_background = True
# camera.rotation = 180
# rawCapture = PiRGBArray(camera, size=camera.resolution)
# # setup a circular buffer
# # stream = picamera.PiCameraCircularIO(camera, seconds = video_preseconds)
# stream = picamera.PiCameraCircularIO(camera, size = 3000000)
# # video recording into circular buffer from splitter port 1
# camera.start_recording(stream, format='h264', splitter_port=1)
# #camera.start_recording('test.h264', splitter_port=1)
# # wait 2 seconds for stable video data
# camera.wait_recording(2, splitter_port=1)
# # motion_detected = False
# print(camera.resolution)
vs = VideoStream(src=0, usePiCamera=True, resolution=setResolution()).start()
# Allow the camera to warm up.
time.sleep(2.0)
frame1 = vs.read()
frame1= getCroppedImage(frame1, ballCrops)
# img_gray1 = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
mask_gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
# for frame in camera.capture_continuous(rawCapture,format="bgr", use_video_port=True):
# # grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text???????????????????
# rawCapture.truncate()
# rawCapture.seek(0)
while True:
frame = vs.read()
frame2 = frame
frameNo = frameNo +1
img_rgb = frame2
# frame2arm = getCroppedImage(frame2, resetArmCrops)
# img_gray2arm = cv2.cvtColor(frame2arm, cv2.COLOR_BGR2GRAY)
print('Frame No ', frameNo)
# isPinSetter() #Deadwood
# if setterPresent:
# print('SetterPresent', frameNo, ballCounter)
# time.sleep(9)
# setterPresent = False
# ballPresent = False
# continue
# # isResetArm() #Reset
# if armPresent:
# print ('ArmPresent', frameNo, ballCounter)
# time.sleep(9)
# armPresent = False
# ballPresent = False
# ballCounter = 0
# continue
frame2= getCroppedImage(frame2, ballCrops)
# img_gray1 = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
img_gray = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
diff = cv2.absdiff(mask_gray,img_gray)
# First value reduces noise. Values above 150 seem to miss certain ball colors
ret, thresh = cv2.threshold(diff, 120,255,cv2.THRESH_BINARY)
frame = thresh
# Blur eliminates noise by averaging surrounding pixels. Value is array size of blur and MUST BE ODD
thresh = cv2.medianBlur(thresh,13)
# print(type(thresh), type(diff),type(img_gray1), type(img_gray2))
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
# center = None
# radius = 0
if len(cnts) == 0:
if ballPresent == True:
ballPresent = False
ballCounter = ballCounter + 1
print("BALLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL", ballCounter, 'at frame ', frameNo-1)
else:
ballPresent = True
# camera.annotate_text = "Date "+ str(time.process_time()) + " Frame " + str(frameNo) + " Prior " + str(priorPinCount)
writeImageSeries(30, 3, img_rgb)
# findPins()
# key = cv2.waitKey() & 0xFF
# # if the `q` key was pressed, break from the loop
# if key == ord("q"):
# break
| [
"threading.Timer",
"cv2.bitwise_and",
"cv2.medianBlur",
"cv2.rectangle",
"cv2.absdiff",
"RPi.GPIO.output",
"cv2.inRange",
"cv2.contourArea",
"cv2.cvtColor",
"RPi.GPIO.setup",
"Iothub_client_functions.iothub_client_init",
"Iothub_client_functions.print_last_message_time",
"io.open",
"RPi.GP... | [((1514, 1536), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (1526, 1536), True, 'import RPi.GPIO as GPIO\n'), ((1541, 1564), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (1557, 1564), True, 'import RPi.GPIO as GPIO\n'), ((2027, 2082), 'cv2.imread', 'cv2.imread', (['"""/home/pi/Shared/histImage/BallMask.jpg"""', '(1)'], {}), "('/home/pi/Shared/histImage/BallMask.jpg', 1)\n", (2037, 2082), False, 'import cv2\n'), ((2182, 2223), 'cv2.cvtColor', 'cv2.cvtColor', (['img_arm', 'cv2.COLOR_BGR2GRAY'], {}), '(img_arm, cv2.COLOR_BGR2GRAY)\n', (2194, 2223), False, 'import cv2\n'), ((2291, 2334), 'cv2.cvtColor', 'cv2.cvtColor', (['maskFrame', 'cv2.COLOR_BGR2GRAY'], {}), '(maskFrame, cv2.COLOR_BGR2GRAY)\n', (2303, 2334), False, 'import cv2\n'), ((3873, 3911), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (3885, 3911), False, 'import cv2\n'), ((3971, 3996), 'numpy.array', 'numpy.array', (['[65, 60, 60]'], {}), '([65, 60, 60])\n', (3982, 3996), False, 'import numpy\n'), ((4013, 4040), 'numpy.array', 'numpy.array', (['[80, 255, 255]'], {}), '([80, 255, 255])\n', (4024, 4040), False, 'import numpy\n'), ((4105, 4147), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_green', 'upper_green'], {}), '(hsv, lower_green, upper_green)\n', (4116, 4147), False, 'import cv2\n'), ((4158, 4198), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (4173, 4198), False, 'import cv2\n'), ((4309, 4374), 'cv2.findContours', 'cv2.findContours', (['thrshed', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thrshed, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (4325, 4374), False, 'import cv2\n'), ((4981, 5024), 'cv2.cvtColor', 'cv2.cvtColor', (['frame2arm', 'cv2.COLOR_BGR2GRAY'], {}), '(frame2arm, cv2.COLOR_BGR2GRAY)\n', (4993, 5024), False, 'import cv2\n'), ((5172, 5211), 'cv2.absdiff', 'cv2.absdiff', (['img_gray1arm', 'img_gray2arm'], {}), '(img_gray1arm, img_gray2arm)\n', (5183, 5211), False, 'import cv2\n'), ((5316, 5364), 'cv2.threshold', 'cv2.threshold', (['diff', '(120)', '(255)', 'cv2.THRESH_BINARY'], {}), '(diff, 120, 255, cv2.THRESH_BINARY)\n', (5329, 5364), False, 'import cv2\n'), ((5509, 5538), 'cv2.medianBlur', 'cv2.medianBlur', (['threshArm', '(15)'], {}), '(threshArm, 15)\n', (5523, 5538), False, 'import cv2\n'), ((6465, 6488), 'numpy.array', 'numpy.array', (['[0, 0, 70]'], {}), '([0, 0, 70])\n', (6476, 6488), False, 'import numpy\n'), ((6541, 6569), 'numpy.array', 'numpy.array', (['[110, 110, 255]'], {}), '([110, 110, 255])\n', (6552, 6569), False, 'import numpy\n'), ((6625, 6667), 'cv2.inRange', 'cv2.inRange', (['img_rgb', 'lower_red', 'upper_red'], {}), '(img_rgb, lower_red, upper_red)\n', (6636, 6667), False, 'import cv2\n'), ((6683, 6727), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img_rgb', 'img_rgb'], {'mask': 'mask'}), '(img_rgb, img_rgb, mask=mask)\n', (6698, 6727), False, 'import cv2\n'), ((8989, 9024), 'Iothub_client_functions.print_last_message_time', 'iot.print_last_message_time', (['client'], {}), '(client)\n', (9016, 9024), True, 'import Iothub_client_functions as iot\n'), ((9829, 9868), 'cv2.rectangle', 'cv2.rectangle', (['ball_image', 'b', 'a', '(255)', '(2)'], {}), '(ball_image, b, a, 255, 2)\n', (9842, 9868), False, 'import cv2\n'), ((10240, 10279), 'cv2.rectangle', 'cv2.rectangle', (['ball_image', 'b', 'a', '(255)', '(2)'], {}), '(ball_image, b, a, 255, 2)\n', (10253, 10279), False, 'import cv2\n'), ((11723, 11738), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (11733, 11738), False, 'import time\n'), ((11887, 11927), 'cv2.cvtColor', 'cv2.cvtColor', (['frame1', 'cv2.COLOR_BGR2GRAY'], {}), '(frame1, cv2.COLOR_BGR2GRAY)\n', (11899, 11927), False, 'import cv2\n'), ((1594, 1619), 'RPi.GPIO.setup', 'GPIO.setup', (['pin', 'GPIO.OUT'], {}), '(pin, GPIO.OUT)\n', (1604, 1619), True, 'import RPi.GPIO as GPIO\n'), ((3147, 3186), 'io.open', 'io.open', (['"""/dp/log/firstFile.h264"""', '"""wb"""'], {}), "('/dp/log/firstFile.h264', 'wb')\n", (3154, 3186), False, 'import io, sys\n'), ((4228, 4265), 'cv2.cvtColor', 'cv2.cvtColor', (['res', 'cv2.COLOR_BGR2GRAY'], {}), '(res, cv2.COLOR_BGR2GRAY)\n', (4240, 4265), False, 'import cv2\n'), ((6940, 6989), 'cv2.calcHist', 'cv2.calcHist', (['[crop[i]]', '[1]', 'None', '[4]', '[10, 50]'], {}), '([crop[i]], [1], None, [4], [10, 50])\n', (6952, 6989), False, 'import cv2\n'), ((7873, 7902), 'threading.Timer', 'threading.Timer', (['(2.0)', 'timeout'], {}), '(2.0, timeout)\n', (7888, 7902), False, 'import threading\n'), ((8150, 8174), 'Iothub_client_functions.iothub_client_init', 'iot.iothub_client_init', ([], {}), '()\n', (8172, 8174), True, 'import Iothub_client_functions as iot\n'), ((9412, 9451), 'cv2.rectangle', 'cv2.rectangle', (['ball_image', 'b', 'a', '(255)', '(2)'], {}), '(ball_image, b, a, 255, 2)\n', (9425, 9451), False, 'import cv2\n'), ((13196, 13236), 'cv2.cvtColor', 'cv2.cvtColor', (['frame2', 'cv2.COLOR_BGR2GRAY'], {}), '(frame2, cv2.COLOR_BGR2GRAY)\n', (13208, 13236), False, 'import cv2\n'), ((13252, 13284), 'cv2.absdiff', 'cv2.absdiff', (['mask_gray', 'img_gray'], {}), '(mask_gray, img_gray)\n', (13263, 13284), False, 'import cv2\n'), ((13394, 13442), 'cv2.threshold', 'cv2.threshold', (['diff', '(120)', '(255)', 'cv2.THRESH_BINARY'], {}), '(diff, 120, 255, cv2.THRESH_BINARY)\n', (13407, 13442), False, 'import cv2\n'), ((13591, 13617), 'cv2.medianBlur', 'cv2.medianBlur', (['thresh', '(13)'], {}), '(thresh, 13)\n', (13605, 13617), False, 'import cv2\n'), ((1839, 1872), 'RPi.GPIO.output', 'GPIO.output', (['pins[idx]', 'GPIO.HIGH'], {}), '(pins[idx], GPIO.HIGH)\n', (1850, 1872), True, 'import RPi.GPIO as GPIO\n'), ((1899, 1931), 'RPi.GPIO.output', 'GPIO.output', (['pins[idx]', 'GPIO.LOW'], {}), '(pins[idx], GPIO.LOW)\n', (1910, 1931), True, 'import RPi.GPIO as GPIO\n'), ((4486, 4506), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (4501, 4506), False, 'import cv2\n')] |
import numpy as np
import Box2D
from Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, distanceJointDef,
contactListener)
import gym
from gym import spaces
from gym.utils import seeding
"""
The objective of this environment is to land a rocket on a ship.
STATE VARIABLES
The state consists of the following variables:
- x position
- y position
- angle
- first leg ground contact indicator
- second leg ground contact indicator
- throttle
- engine gimbal
If VEL_STATE is set to true, the velocities are included:
- x velocity
- y velocity
- angular velocity
all state variables are roughly in the range [-1, 1]
CONTROL INPUTS
Discrete control inputs are:
- gimbal left
- gimbal right
- throttle up
- throttle down
- use first control thruster
- use second control thruster
- no action
Continuous control inputs are:
- gimbal (left/right)
- throttle (up/down)
- control thruster (left/right)
"""
CONTINUOUS = False
VEL_STATE = True # Add velocity info to state
FPS = 60
SCALE_S = 0.35 # Temporal Scaling, lower is faster - adjust forces appropriately
INITIAL_RANDOM = 0.4 # Random scaling of initial velocity, higher is more difficult
START_HEIGHT = 1000.0
START_SPEED = 80.0
# ROCKET
MIN_THROTTLE = 0.4
GIMBAL_THRESHOLD = 0.4
MAIN_ENGINE_POWER = 1600 * SCALE_S
SIDE_ENGINE_POWER = 100 / FPS * SCALE_S
ROCKET_WIDTH = 3.66 * SCALE_S
ROCKET_HEIGHT = ROCKET_WIDTH / 3.7 * 47.9
ENGINE_HEIGHT = ROCKET_WIDTH * 0.5
ENGINE_WIDTH = ENGINE_HEIGHT * 0.7
THRUSTER_HEIGHT = ROCKET_HEIGHT * 0.86
# LEGS
LEG_LENGTH = ROCKET_WIDTH * 2.2
BASE_ANGLE = -0.27
SPRING_ANGLE = 0.27
LEG_AWAY = ROCKET_WIDTH / 2
# SHIP
SHIP_HEIGHT = ROCKET_WIDTH
SHIP_WIDTH = SHIP_HEIGHT * 40
# VIEWPORT
VIEWPORT_H = 1440
VIEWPORT_W = 1008
H = 1.1 * START_HEIGHT * SCALE_S
W = float(VIEWPORT_W) / VIEWPORT_H * H
# SMOKE FOR VISUALS
MAX_SMOKE_LIFETIME = 2 * FPS
MEAN = np.array([-0.034, -0.15, -0.016, 0.0024, 0.0024, 0.137,
- 0.02, -0.01, -0.8, 0.002])
VAR = np.sqrt(np.array([0.08, 0.33, 0.0073, 0.0023, 0.0023, 0.8,
0.085, 0.0088, 0.063, 0.076]))
class ContactDetector(contactListener):
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
if self.env.water in [contact.fixtureA.body, contact.fixtureB.body] \
or self.env.lander in [contact.fixtureA.body, contact.fixtureB.body] \
or self.env.containers[0] in [contact.fixtureA.body, contact.fixtureB.body] \
or self.env.containers[1] in [contact.fixtureA.body, contact.fixtureB.body]:
self.env.game_over = True
else:
for i in range(2):
if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:
self.env.legs[i].ground_contact = True
def EndContact(self, contact):
for i in range(2):
if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:
self.env.legs[i].ground_contact = False
class RocketLander(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': FPS
}
def __init__(self):
self._seed()
self.viewer = None
self.episode_number = 0
self.world = Box2D.b2World()
self.water = None
self.lander = None
self.engine = None
self.ship = None
self.legs = []
high = np.array([1, 1, 1, 1, 1, 1, 1, np.inf, np.inf, np.inf], dtype=np.float32)
low = -high
if not VEL_STATE:
high = high[0:7]
low = low[0:7]
self.observation_space = spaces.Box(low, high, dtype=np.float32)
if CONTINUOUS:
self.action_space = spaces.Box(-1.0, +1.0, (3,), dtype=np.float32)
else:
self.action_space = spaces.Discrete(7)
self.reset()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _destroy(self):
if not self.water:
return
self.world.contactListener = None
self.world.DestroyBody(self.water)
self.water = None
self.world.DestroyBody(self.lander)
self.lander = None
self.world.DestroyBody(self.ship)
self.ship = None
self.world.DestroyBody(self.legs[0])
self.world.DestroyBody(self.legs[1])
self.legs = []
self.world.DestroyBody(self.containers[0])
self.world.DestroyBody(self.containers[1])
self.containers = []
def reset(self):
self._destroy()
self.world.contactListener_keepref = ContactDetector(self)
self.world.contactListener = self.world.contactListener_keepref
self.game_over = False
self.prev_shaping = None
self.throttle = 0
self.gimbal = 0.0
self.landed_ticks = 0
self.stepnumber = 0
self.smoke = []
# self.terrainheigth = self.np_random.uniform(H / 20, H / 10)
self.terrainheigth = H / 20
self.shipheight = self.terrainheigth + SHIP_HEIGHT
# ship_pos = self.np_random.uniform(0, SHIP_WIDTH / SCALE) + SHIP_WIDTH / SCALE
ship_pos = W / 2
self.helipad_x1 = ship_pos - SHIP_WIDTH / 2
self.helipad_x2 = self.helipad_x1 + SHIP_WIDTH
self.helipad_y = self.terrainheigth + SHIP_HEIGHT
self.water = self.world.CreateStaticBody(
fixtures=fixtureDef(
shape=polygonShape(vertices=((0, 0), (W, 0), (W, self.terrainheigth), (0, self.terrainheigth))),
friction=0.1,
restitution=0.0)
)
self.water.color1 = rgb(70, 96, 176)
self.ship = self.world.CreateStaticBody(
fixtures=fixtureDef(
shape=polygonShape(
vertices=((self.helipad_x1, self.terrainheigth),
(self.helipad_x2, self.terrainheigth),
(self.helipad_x2, self.terrainheigth + SHIP_HEIGHT),
(self.helipad_x1, self.terrainheigth + SHIP_HEIGHT))),
friction=0.5,
restitution=0.0)
)
self.containers = []
for side in [-1, 1]:
self.containers.append(self.world.CreateStaticBody(
fixtures=fixtureDef(
shape=polygonShape(
vertices=((ship_pos + side * 0.95 * SHIP_WIDTH / 2, self.helipad_y),
(ship_pos + side * 0.95 * SHIP_WIDTH / 2, self.helipad_y + SHIP_HEIGHT),
(ship_pos + side * 0.95 * SHIP_WIDTH / 2 - side * SHIP_HEIGHT,
self.helipad_y + SHIP_HEIGHT),
(ship_pos + side * 0.95 * SHIP_WIDTH / 2 - side * SHIP_HEIGHT, self.helipad_y)
)),
friction=0.2,
restitution=0.0)
))
self.containers[-1].color1 = rgb(206, 206, 2)
self.ship.color1 = (0.2, 0.2, 0.2)
initial_x = W / 2 + W * np.random.uniform(-0.3, 0.3)
initial_y = H * 0.95
self.lander = self.world.CreateDynamicBody(
position=(initial_x, initial_y),
angle=0.0,
fixtures=fixtureDef(
shape=polygonShape(vertices=((-ROCKET_WIDTH / 2, 0),
(+ROCKET_WIDTH / 2, 0),
(ROCKET_WIDTH / 2, +ROCKET_HEIGHT),
(-ROCKET_WIDTH / 2, +ROCKET_HEIGHT))),
density=1.0,
friction=0.5,
categoryBits=0x0010,
maskBits=0x001,
restitution=0.0)
)
self.lander.color1 = rgb(230, 230, 230)
for i in [-1, +1]:
leg = self.world.CreateDynamicBody(
position=(initial_x - i * LEG_AWAY, initial_y + ROCKET_WIDTH * 0.2),
angle=(i * BASE_ANGLE),
fixtures=fixtureDef(
shape=polygonShape(
vertices=((0, 0), (0, LEG_LENGTH / 25), (i * LEG_LENGTH, 0), (i * LEG_LENGTH, -LEG_LENGTH / 20),
(i * LEG_LENGTH / 3, -LEG_LENGTH / 7))),
density=1,
restitution=0.0,
friction=0.2,
categoryBits=0x0020,
maskBits=0x001)
)
leg.ground_contact = False
leg.color1 = (0.25, 0.25, 0.25)
rjd = revoluteJointDef(
bodyA=self.lander,
bodyB=leg,
localAnchorA=(i * LEG_AWAY, ROCKET_WIDTH * 0.2),
localAnchorB=(0, 0),
enableLimit=True,
maxMotorTorque=2500.0,
motorSpeed=-0.05 * i,
enableMotor=True
)
djd = distanceJointDef(bodyA=self.lander,
bodyB=leg,
anchorA=(i * LEG_AWAY, ROCKET_HEIGHT / 8),
anchorB=leg.fixtures[0].body.transform * (i * LEG_LENGTH, 0),
collideConnected=False,
frequencyHz=0.01,
dampingRatio=0.9
)
if i == 1:
rjd.lowerAngle = -SPRING_ANGLE
rjd.upperAngle = 0
else:
rjd.lowerAngle = 0
rjd.upperAngle = + SPRING_ANGLE
leg.joint = self.world.CreateJoint(rjd)
leg.joint2 = self.world.CreateJoint(djd)
self.legs.append(leg)
self.lander.linearVelocity = (
-self.np_random.uniform(0, INITIAL_RANDOM) * START_SPEED * (initial_x - W / 2) / W,
-START_SPEED)
self.lander.angularVelocity = (1 + INITIAL_RANDOM) * np.random.uniform(-1, 1)
self.drawlist = self.legs + [self.water] + [self.ship] + self.containers + [self.lander]
if CONTINUOUS:
return self.step([0, 0, 0])[0]
else:
return self.step(6)[0]
def step(self, action):
self.force_dir = 0
if CONTINUOUS:
np.clip(action, -1, 1)
self.gimbal += action[0] * 0.15 / FPS
self.throttle += action[1] * 0.5 / FPS
if action[2] > 0.5:
self.force_dir = 1
elif action[2] < -0.5:
self.force_dir = -1
else:
if action == 0:
self.gimbal += 0.1
elif action == 1:
self.gimbal -= 0.1
elif action == 2:
self.throttle += 0.1
elif action == 3:
self.throttle -= 0.1
elif action == 4: # left
self.force_dir = -1
elif action == 5: # right
self.force_dir = 1
self.gimbal = np.clip(self.gimbal, -GIMBAL_THRESHOLD, GIMBAL_THRESHOLD)
self.throttle = np.clip(self.throttle, 0.0, 1.0)
self.power = 0 if self.throttle == 0.0 else MIN_THROTTLE + self.throttle * (1 - MIN_THROTTLE)
# main engine force
force_pos = (self.lander.position[0], self.lander.position[1])
force = (-np.sin(self.lander.angle + self.gimbal) * MAIN_ENGINE_POWER * self.power,
np.cos(self.lander.angle + self.gimbal) * MAIN_ENGINE_POWER * self.power)
self.lander.ApplyForce(force=force, point=force_pos, wake=False)
# control thruster force
force_pos_c = self.lander.position + THRUSTER_HEIGHT * np.array(
(np.sin(self.lander.angle), np.cos(self.lander.angle)))
force_c = (-self.force_dir * np.cos(self.lander.angle) * SIDE_ENGINE_POWER,
self.force_dir * np.sin(self.lander.angle) * SIDE_ENGINE_POWER)
self.lander.ApplyLinearImpulse(impulse=force_c, point=force_pos_c, wake=False)
self.world.Step(1.0 / FPS, 60, 60)
pos = self.lander.position
vel_l = np.array(self.lander.linearVelocity) / START_SPEED
vel_a = self.lander.angularVelocity
x_distance = (pos.x - W / 2) / W
y_distance = (pos.y - self.shipheight) / (H - self.shipheight)
angle = (self.lander.angle / np.pi) % 2
if angle > 1:
angle -= 2
state = [
2 * x_distance,
2 * (y_distance - 0.5),
angle,
1.0 if self.legs[0].ground_contact else 0.0,
1.0 if self.legs[1].ground_contact else 0.0,
2 * (self.throttle - 0.5),
(self.gimbal / GIMBAL_THRESHOLD)
]
if VEL_STATE:
state.extend([vel_l[0],
vel_l[1],
vel_a])
# # print state
# if self.viewer is not None:
# print('\t'.join(["{:7.3}".format(s) for s in state]))
# REWARD -------------------------------------------------------------------------------------------------------
# state variables for reward
distance = np.linalg.norm((3 * x_distance, 3 * y_distance)) # weight x position more
speed = np.linalg.norm(vel_l)
groundcontact = self.legs[0].ground_contact or self.legs[1].ground_contact
brokenleg = (self.legs[0].joint.angle < 0 or self.legs[1].joint.angle > -0) and groundcontact
outside = abs(pos.x - W / 2) > W / 2 or pos.y > H
fuelcost = 0.1 * (0 * self.power + abs(self.force_dir)) / FPS
landed = self.legs[0].ground_contact and self.legs[1].ground_contact and speed < 0.1
done = False
reward = -fuelcost
if outside or brokenleg:
self.game_over = True
if self.game_over:
done = True
else:
# reward shaping
shaping = -0.5 * (distance + speed + abs(angle) ** 2)
shaping += 0.1 * (self.legs[0].ground_contact + self.legs[1].ground_contact)
if self.prev_shaping is not None:
reward += shaping - self.prev_shaping
self.prev_shaping = shaping
if landed:
self.landed_ticks += 1
else:
self.landed_ticks = 0
if self.landed_ticks == FPS:
reward = 1.0
done = True
print("LANDED!!!!!!!!!")
if done:
reward += max(-1, 0 - 2 * (speed + distance + abs(angle) + abs(vel_a)))
elif not groundcontact:
reward -= 0.25 / FPS
reward = np.clip(reward, -1, 1)
# REWARD -------------------------------------------------------------------------------------------------------
self.stepnumber += 1
state = (state - MEAN[:len(state)]) / VAR[:len(state)]
return np.array(state), reward, done, {}
def render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)
self.viewer.set_bounds(0, W, 0, H)
sky = rendering.FilledPolygon(((0, 0), (0, H), (W, H), (W, 0)))
self.sky_color = rgb(126, 150, 233)
sky.set_color(*self.sky_color)
self.sky_color_half_transparent = np.array((np.array(self.sky_color) + rgb(255, 255, 255))) / 2
self.viewer.add_geom(sky)
self.rockettrans = rendering.Transform()
engine = rendering.FilledPolygon(((0, 0),
(ENGINE_WIDTH / 2, -ENGINE_HEIGHT),
(-ENGINE_WIDTH / 2, -ENGINE_HEIGHT)))
self.enginetrans = rendering.Transform()
engine.add_attr(self.enginetrans)
engine.add_attr(self.rockettrans)
engine.set_color(.4, .4, .4)
self.viewer.add_geom(engine)
self.fire = rendering.FilledPolygon(((ENGINE_WIDTH * 0.4, 0), (-ENGINE_WIDTH * 0.4, 0),
(-ENGINE_WIDTH * 1.2, -ENGINE_HEIGHT * 5),
(0, -ENGINE_HEIGHT * 8), (ENGINE_WIDTH * 1.2, -ENGINE_HEIGHT * 5)))
self.fire.set_color(*rgb(255, 230, 107))
self.firescale = rendering.Transform(scale=(1, 1))
self.firetrans = rendering.Transform(translation=(0, -ENGINE_HEIGHT))
self.fire.add_attr(self.firescale)
self.fire.add_attr(self.firetrans)
self.fire.add_attr(self.enginetrans)
self.fire.add_attr(self.rockettrans)
smoke = rendering.FilledPolygon(((ROCKET_WIDTH / 2, THRUSTER_HEIGHT * 1),
(ROCKET_WIDTH * 3, THRUSTER_HEIGHT * 1.03),
(ROCKET_WIDTH * 4, THRUSTER_HEIGHT * 1),
(ROCKET_WIDTH * 3, THRUSTER_HEIGHT * 0.97)))
smoke.set_color(*self.sky_color_half_transparent)
self.smokescale = rendering.Transform(scale=(1, 1))
smoke.add_attr(self.smokescale)
smoke.add_attr(self.rockettrans)
self.viewer.add_geom(smoke)
self.gridfins = []
for i in (-1, 1):
finpoly = (
(i * ROCKET_WIDTH * 1.1, THRUSTER_HEIGHT * 1.01),
(i * ROCKET_WIDTH * 0.4, THRUSTER_HEIGHT * 1.01),
(i * ROCKET_WIDTH * 0.4, THRUSTER_HEIGHT * 0.99),
(i * ROCKET_WIDTH * 1.1, THRUSTER_HEIGHT * 0.99)
)
gridfin = rendering.FilledPolygon(finpoly)
gridfin.add_attr(self.rockettrans)
gridfin.set_color(0.25, 0.25, 0.25)
self.gridfins.append(gridfin)
if self.stepnumber % round(FPS / 10) == 0 and self.power > 0:
s = [MAX_SMOKE_LIFETIME * self.power, # total lifetime
0, # current lifetime
self.power * (1 + 0.2 * np.random.random()), # size
np.array(self.lander.position)
+ self.power * ROCKET_WIDTH * 10 * np.array((np.sin(self.lander.angle + self.gimbal),
-np.cos(self.lander.angle + self.gimbal)))
+ self.power * 5 * (np.random.random(2) - 0.5)] # position
self.smoke.append(s)
for s in self.smoke:
s[1] += 1
if s[1] > s[0]:
self.smoke.remove(s)
continue
t = rendering.Transform(translation=(s[3][0], s[3][1] + H * s[1] / 2000))
self.viewer.draw_circle(radius=0.05 * s[1] + s[2],
color=self.sky_color + (1 - (2 * s[1] / s[0] - 1) ** 2) / 3 * (
self.sky_color_half_transparent - self.sky_color)).add_attr(t)
self.viewer.add_onetime(self.fire)
for g in self.gridfins:
self.viewer.add_onetime(g)
for obj in self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
path = [trans * v for v in f.shape.vertices]
self.viewer.draw_polygon(path, color=obj.color1)
for l in zip(self.legs, [-1, 1]):
path = [self.lander.fixtures[0].body.transform * (l[1] * ROCKET_WIDTH / 2, ROCKET_HEIGHT / 8),
l[0].fixtures[0].body.transform * (l[1] * LEG_LENGTH * 0.8, 0)]
self.viewer.draw_polyline(path, color=self.ship.color1, linewidth=1 if START_HEIGHT > 500 else 2)
self.viewer.draw_polyline(((self.helipad_x2, self.terrainheigth + SHIP_HEIGHT),
(self.helipad_x1, self.terrainheigth + SHIP_HEIGHT)),
color=rgb(206, 206, 2),
linewidth=1)
self.rockettrans.set_translation(*self.lander.position)
self.rockettrans.set_rotation(self.lander.angle)
self.enginetrans.set_rotation(self.gimbal)
self.firescale.set_scale(newx=1, newy=self.power * np.random.uniform(1, 1.3))
self.smokescale.set_scale(newx=self.force_dir, newy=1)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def rgb(r, g, b):
return float(r) / 255, float(g) / 255, float(b) / 255
| [
"numpy.random.uniform",
"gym.envs.classic_control.rendering.Transform",
"Box2D.b2.polygonShape",
"Box2D.b2.revoluteJointDef",
"gym.spaces.Discrete",
"Box2D.b2.contactListener.__init__",
"numpy.clip",
"Box2D.b2World",
"gym.envs.classic_control.rendering.FilledPolygon",
"Box2D.b2.distanceJointDef",
... | [((1989, 2076), 'numpy.array', 'np.array', (['[-0.034, -0.15, -0.016, 0.0024, 0.0024, 0.137, -0.02, -0.01, -0.8, 0.002]'], {}), '([-0.034, -0.15, -0.016, 0.0024, 0.0024, 0.137, -0.02, -0.01, -0.8,\n 0.002])\n', (1997, 2076), True, 'import numpy as np\n'), ((2105, 2190), 'numpy.array', 'np.array', (['[0.08, 0.33, 0.0073, 0.0023, 0.0023, 0.8, 0.085, 0.0088, 0.063, 0.076]'], {}), '([0.08, 0.33, 0.0073, 0.0023, 0.0023, 0.8, 0.085, 0.0088, 0.063, 0.076]\n )\n', (2113, 2190), True, 'import numpy as np\n'), ((2290, 2320), 'Box2D.b2.contactListener.__init__', 'contactListener.__init__', (['self'], {}), '(self)\n', (2314, 2320), False, 'from Box2D.b2 import edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, distanceJointDef, contactListener\n'), ((3433, 3448), 'Box2D.b2World', 'Box2D.b2World', ([], {}), '()\n', (3446, 3448), False, 'import Box2D\n'), ((3593, 3666), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1, 1, np.inf, np.inf, np.inf]'], {'dtype': 'np.float32'}), '([1, 1, 1, 1, 1, 1, 1, np.inf, np.inf, np.inf], dtype=np.float32)\n', (3601, 3666), True, 'import numpy as np\n'), ((3803, 3842), 'gym.spaces.Box', 'spaces.Box', (['low', 'high'], {'dtype': 'np.float32'}), '(low, high, dtype=np.float32)\n', (3813, 3842), False, 'from gym import spaces\n'), ((4097, 4120), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (4114, 4120), False, 'from gym.utils import seeding\n'), ((11211, 11268), 'numpy.clip', 'np.clip', (['self.gimbal', '(-GIMBAL_THRESHOLD)', 'GIMBAL_THRESHOLD'], {}), '(self.gimbal, -GIMBAL_THRESHOLD, GIMBAL_THRESHOLD)\n', (11218, 11268), True, 'import numpy as np\n'), ((11293, 11325), 'numpy.clip', 'np.clip', (['self.throttle', '(0.0)', '(1.0)'], {}), '(self.throttle, 0.0, 1.0)\n', (11300, 11325), True, 'import numpy as np\n'), ((13357, 13405), 'numpy.linalg.norm', 'np.linalg.norm', (['(3 * x_distance, 3 * y_distance)'], {}), '((3 * x_distance, 3 * y_distance))\n', (13371, 13405), True, 'import numpy as np\n'), ((13448, 13469), 'numpy.linalg.norm', 'np.linalg.norm', (['vel_l'], {}), '(vel_l)\n', (13462, 13469), True, 'import numpy as np\n'), ((14834, 14856), 'numpy.clip', 'np.clip', (['reward', '(-1)', '(1)'], {}), '(reward, -1, 1)\n', (14841, 14856), True, 'import numpy as np\n'), ((3899, 3945), 'gym.spaces.Box', 'spaces.Box', (['(-1.0)', '(+1.0)', '(3,)'], {'dtype': 'np.float32'}), '(-1.0, +1.0, (3,), dtype=np.float32)\n', (3909, 3945), False, 'from gym import spaces\n'), ((3992, 4010), 'gym.spaces.Discrete', 'spaces.Discrete', (['(7)'], {}), '(7)\n', (4007, 4010), False, 'from gym import spaces\n'), ((8784, 8990), 'Box2D.b2.revoluteJointDef', 'revoluteJointDef', ([], {'bodyA': 'self.lander', 'bodyB': 'leg', 'localAnchorA': '(i * LEG_AWAY, ROCKET_WIDTH * 0.2)', 'localAnchorB': '(0, 0)', 'enableLimit': '(True)', 'maxMotorTorque': '(2500.0)', 'motorSpeed': '(-0.05 * i)', 'enableMotor': '(True)'}), '(bodyA=self.lander, bodyB=leg, localAnchorA=(i * LEG_AWAY, \n ROCKET_WIDTH * 0.2), localAnchorB=(0, 0), enableLimit=True,\n maxMotorTorque=2500.0, motorSpeed=-0.05 * i, enableMotor=True)\n', (8800, 8990), False, 'from Box2D.b2 import edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, distanceJointDef, contactListener\n'), ((9142, 9362), 'Box2D.b2.distanceJointDef', 'distanceJointDef', ([], {'bodyA': 'self.lander', 'bodyB': 'leg', 'anchorA': '(i * LEG_AWAY, ROCKET_HEIGHT / 8)', 'anchorB': '(leg.fixtures[0].body.transform * (i * LEG_LENGTH, 0))', 'collideConnected': '(False)', 'frequencyHz': '(0.01)', 'dampingRatio': '(0.9)'}), '(bodyA=self.lander, bodyB=leg, anchorA=(i * LEG_AWAY, \n ROCKET_HEIGHT / 8), anchorB=leg.fixtures[0].body.transform * (i *\n LEG_LENGTH, 0), collideConnected=False, frequencyHz=0.01, dampingRatio=0.9)\n', (9158, 9362), False, 'from Box2D.b2 import edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, distanceJointDef, contactListener\n'), ((10170, 10194), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (10187, 10194), True, 'import numpy as np\n'), ((10502, 10524), 'numpy.clip', 'np.clip', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (10509, 10524), True, 'import numpy as np\n'), ((12308, 12344), 'numpy.array', 'np.array', (['self.lander.linearVelocity'], {}), '(self.lander.linearVelocity)\n', (12316, 12344), True, 'import numpy as np\n'), ((15088, 15103), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (15096, 15103), True, 'import numpy as np\n'), ((15436, 15476), 'gym.envs.classic_control.rendering.Viewer', 'rendering.Viewer', (['VIEWPORT_W', 'VIEWPORT_H'], {}), '(VIEWPORT_W, VIEWPORT_H)\n', (15452, 15476), False, 'from gym.envs.classic_control import rendering\n'), ((15543, 15600), 'gym.envs.classic_control.rendering.FilledPolygon', 'rendering.FilledPolygon', (['((0, 0), (0, H), (W, H), (W, 0))'], {}), '(((0, 0), (0, H), (W, H), (W, 0)))\n', (15566, 15600), False, 'from gym.envs.classic_control import rendering\n'), ((15870, 15891), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {}), '()\n', (15889, 15891), False, 'from gym.envs.classic_control import rendering\n'), ((15914, 16025), 'gym.envs.classic_control.rendering.FilledPolygon', 'rendering.FilledPolygon', (['((0, 0), (ENGINE_WIDTH / 2, -ENGINE_HEIGHT), (-ENGINE_WIDTH / 2, -\n ENGINE_HEIGHT))'], {}), '(((0, 0), (ENGINE_WIDTH / 2, -ENGINE_HEIGHT), (-\n ENGINE_WIDTH / 2, -ENGINE_HEIGHT)))\n', (15937, 16025), False, 'from gym.envs.classic_control import rendering\n'), ((16144, 16165), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {}), '()\n', (16163, 16165), False, 'from gym.envs.classic_control import rendering\n'), ((16365, 16560), 'gym.envs.classic_control.rendering.FilledPolygon', 'rendering.FilledPolygon', (['((ENGINE_WIDTH * 0.4, 0), (-ENGINE_WIDTH * 0.4, 0), (-ENGINE_WIDTH * 1.2, -\n ENGINE_HEIGHT * 5), (0, -ENGINE_HEIGHT * 8), (ENGINE_WIDTH * 1.2, -\n ENGINE_HEIGHT * 5))'], {}), '(((ENGINE_WIDTH * 0.4, 0), (-ENGINE_WIDTH * 0.4, 0),\n (-ENGINE_WIDTH * 1.2, -ENGINE_HEIGHT * 5), (0, -ENGINE_HEIGHT * 8), (\n ENGINE_WIDTH * 1.2, -ENGINE_HEIGHT * 5)))\n', (16388, 16560), False, 'from gym.envs.classic_control import rendering\n'), ((16732, 16765), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {'scale': '(1, 1)'}), '(scale=(1, 1))\n', (16751, 16765), False, 'from gym.envs.classic_control import rendering\n'), ((16795, 16847), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {'translation': '(0, -ENGINE_HEIGHT)'}), '(translation=(0, -ENGINE_HEIGHT))\n', (16814, 16847), False, 'from gym.envs.classic_control import rendering\n'), ((17061, 17266), 'gym.envs.classic_control.rendering.FilledPolygon', 'rendering.FilledPolygon', (['((ROCKET_WIDTH / 2, THRUSTER_HEIGHT * 1), (ROCKET_WIDTH * 3, \n THRUSTER_HEIGHT * 1.03), (ROCKET_WIDTH * 4, THRUSTER_HEIGHT * 1), (\n ROCKET_WIDTH * 3, THRUSTER_HEIGHT * 0.97))'], {}), '(((ROCKET_WIDTH / 2, THRUSTER_HEIGHT * 1), (\n ROCKET_WIDTH * 3, THRUSTER_HEIGHT * 1.03), (ROCKET_WIDTH * 4, \n THRUSTER_HEIGHT * 1), (ROCKET_WIDTH * 3, THRUSTER_HEIGHT * 0.97)))\n', (17084, 17266), False, 'from gym.envs.classic_control import rendering\n'), ((17484, 17517), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {'scale': '(1, 1)'}), '(scale=(1, 1))\n', (17503, 17517), False, 'from gym.envs.classic_control import rendering\n'), ((19015, 19084), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {'translation': '(s[3][0], s[3][1] + H * s[1] / 2000)'}), '(translation=(s[3][0], s[3][1] + H * s[1] / 2000))\n', (19034, 19084), False, 'from gym.envs.classic_control import rendering\n'), ((7282, 7310), 'numpy.random.uniform', 'np.random.uniform', (['(-0.3)', '(0.3)'], {}), '(-0.3, 0.3)\n', (7299, 7310), True, 'import numpy as np\n'), ((18060, 18092), 'gym.envs.classic_control.rendering.FilledPolygon', 'rendering.FilledPolygon', (['finpoly'], {}), '(finpoly)\n', (18083, 18092), False, 'from gym.envs.classic_control import rendering\n'), ((11636, 11675), 'numpy.cos', 'np.cos', (['(self.lander.angle + self.gimbal)'], {}), '(self.lander.angle + self.gimbal)\n', (11642, 11675), True, 'import numpy as np\n'), ((11995, 12020), 'numpy.cos', 'np.cos', (['self.lander.angle'], {}), '(self.lander.angle)\n', (12001, 12020), True, 'import numpy as np\n'), ((12078, 12103), 'numpy.sin', 'np.sin', (['self.lander.angle'], {}), '(self.lander.angle)\n', (12084, 12103), True, 'import numpy as np\n'), ((20566, 20591), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(1.3)'], {}), '(1, 1.3)\n', (20583, 20591), True, 'import numpy as np\n'), ((5640, 5734), 'Box2D.b2.polygonShape', 'polygonShape', ([], {'vertices': '((0, 0), (W, 0), (W, self.terrainheigth), (0, self.terrainheigth))'}), '(vertices=((0, 0), (W, 0), (W, self.terrainheigth), (0, self.\n terrainheigth)))\n', (5652, 5734), False, 'from Box2D.b2 import edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, distanceJointDef, contactListener\n'), ((5954, 6170), 'Box2D.b2.polygonShape', 'polygonShape', ([], {'vertices': '((self.helipad_x1, self.terrainheigth), (self.helipad_x2, self.\n terrainheigth), (self.helipad_x2, self.terrainheigth + SHIP_HEIGHT), (\n self.helipad_x1, self.terrainheigth + SHIP_HEIGHT))'}), '(vertices=((self.helipad_x1, self.terrainheigth), (self.\n helipad_x2, self.terrainheigth), (self.helipad_x2, self.terrainheigth +\n SHIP_HEIGHT), (self.helipad_x1, self.terrainheigth + SHIP_HEIGHT)))\n', (5966, 6170), False, 'from Box2D.b2 import edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, distanceJointDef, contactListener\n'), ((7515, 7664), 'Box2D.b2.polygonShape', 'polygonShape', ([], {'vertices': '((-ROCKET_WIDTH / 2, 0), (+ROCKET_WIDTH / 2, 0), (ROCKET_WIDTH / 2, +\n ROCKET_HEIGHT), (-ROCKET_WIDTH / 2, +ROCKET_HEIGHT))'}), '(vertices=((-ROCKET_WIDTH / 2, 0), (+ROCKET_WIDTH / 2, 0), (\n ROCKET_WIDTH / 2, +ROCKET_HEIGHT), (-ROCKET_WIDTH / 2, +ROCKET_HEIGHT)))\n', (7527, 7664), False, 'from Box2D.b2 import edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, distanceJointDef, contactListener\n'), ((11545, 11584), 'numpy.sin', 'np.sin', (['(self.lander.angle + self.gimbal)'], {}), '(self.lander.angle + self.gimbal)\n', (11551, 11584), True, 'import numpy as np\n'), ((11903, 11928), 'numpy.sin', 'np.sin', (['self.lander.angle'], {}), '(self.lander.angle)\n', (11909, 11928), True, 'import numpy as np\n'), ((11930, 11955), 'numpy.cos', 'np.cos', (['self.lander.angle'], {}), '(self.lander.angle)\n', (11936, 11955), True, 'import numpy as np\n'), ((15748, 15772), 'numpy.array', 'np.array', (['self.sky_color'], {}), '(self.sky_color)\n', (15756, 15772), True, 'import numpy as np\n'), ((18508, 18538), 'numpy.array', 'np.array', (['self.lander.position'], {}), '(self.lander.position)\n', (18516, 18538), True, 'import numpy as np\n'), ((8280, 8434), 'Box2D.b2.polygonShape', 'polygonShape', ([], {'vertices': '((0, 0), (0, LEG_LENGTH / 25), (i * LEG_LENGTH, 0), (i * LEG_LENGTH, -\n LEG_LENGTH / 20), (i * LEG_LENGTH / 3, -LEG_LENGTH / 7))'}), '(vertices=((0, 0), (0, LEG_LENGTH / 25), (i * LEG_LENGTH, 0), (\n i * LEG_LENGTH, -LEG_LENGTH / 20), (i * LEG_LENGTH / 3, -LEG_LENGTH / 7)))\n', (8292, 8434), False, 'from Box2D.b2 import edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, distanceJointDef, contactListener\n'), ((18462, 18480), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (18478, 18480), True, 'import numpy as np\n'), ((18784, 18803), 'numpy.random.random', 'np.random.random', (['(2)'], {}), '(2)\n', (18800, 18803), True, 'import numpy as np\n'), ((6533, 6879), 'Box2D.b2.polygonShape', 'polygonShape', ([], {'vertices': '((ship_pos + side * 0.95 * SHIP_WIDTH / 2, self.helipad_y), (ship_pos + \n side * 0.95 * SHIP_WIDTH / 2, self.helipad_y + SHIP_HEIGHT), (ship_pos +\n side * 0.95 * SHIP_WIDTH / 2 - side * SHIP_HEIGHT, self.helipad_y +\n SHIP_HEIGHT), (ship_pos + side * 0.95 * SHIP_WIDTH / 2 - side *\n SHIP_HEIGHT, self.helipad_y))'}), '(vertices=((ship_pos + side * 0.95 * SHIP_WIDTH / 2, self.\n helipad_y), (ship_pos + side * 0.95 * SHIP_WIDTH / 2, self.helipad_y +\n SHIP_HEIGHT), (ship_pos + side * 0.95 * SHIP_WIDTH / 2 - side *\n SHIP_HEIGHT, self.helipad_y + SHIP_HEIGHT), (ship_pos + side * 0.95 *\n SHIP_WIDTH / 2 - side * SHIP_HEIGHT, self.helipad_y)))\n', (6545, 6879), False, 'from Box2D.b2 import edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, distanceJointDef, contactListener\n'), ((18601, 18640), 'numpy.sin', 'np.sin', (['(self.lander.angle + self.gimbal)'], {}), '(self.lander.angle + self.gimbal)\n', (18607, 18640), True, 'import numpy as np\n'), ((18705, 18744), 'numpy.cos', 'np.cos', (['(self.lander.angle + self.gimbal)'], {}), '(self.lander.angle + self.gimbal)\n', (18711, 18744), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import scipy.stats as st
import os
from functools import reduce
def mvlognorm(data, n=1000, seed=None, correlated=True):
"""Returns joint lognormal random variables.
Inputs:
data: (m, p) array of 'observations' where m is the number
of observations and p is the number of parameters to
estimate.
Keywords:
n: number of
seed: random seed for variable generation
correlated: logical. If True, assume random variables are
correlated, and calculate the correlation coefficient.
Outputs:
(n, p) array of simulated joint lognormal random variables.
Method: This function takes the input data (untransformed) and
calculates the mean, standard deviation and correlation coefficient
of the data, transforms it to a lognormal, and returns simulated
data based on the input 'observations'.
It is based on the MethylCapSig R package by <NAME> et al.,
https://CRAN.R-project.org/package=MethylCapSig
"""
p = data.shape[1]
mu = np.mean(data, axis=0)
Sigma = np.var(data, axis=0)
if correlated:
corr = np.corrcoef(np.log(data), rowvar=False)
else:
corr = np.eye((p))
alpha = np.log(mu) - 0.5*np.log(1.0 + Sigma/mu**2)
beta = np.diag(np.log(1.0 + Sigma/(mu**2)))
delta = reduce(np.matmul, [np.sqrt(beta), corr, np.sqrt(beta)])
delta_eigenvalues, delta_eigenvectors = np.linalg.eig(delta)
root_delta = reduce(np.matmul,
[delta_eigenvectors, np.diag(np.sqrt(delta_eigenvalues)), delta_eigenvectors.T])
out = st.norm.rvs(size=(n,p), random_state=seed)
for i in range(n):
out[i,:] = np.exp(alpha[:] + np.matmul(root_delta, out[i,:]))
return out
def tcrecs_generate(tcrecs_in='cmip5', dist='lognorm', n=1000, correlated=True,
strip_ecs_lt_tcr=True,
seed=None):
"""Generates a distribution of TCR and ECS.
Inputs:
tcrecs_in: either 'cmip5' for pre-shipped CMIP5 TCR (transient climate
response) and ECS (equilibrium climate sensitivity) values, or a
2-column array of TCR and ECS values to sample from.
Keywords:
dist: string. Distribution to use when constructing the
joint distribution. Accepted values are norm and
lognorm (default).
n: number of samples to generate. Default 1000.
correlated: logical. If True (default), assume ECS and TCR
inputs are correlated. The function calculates the
correlation coefficient automatically.
strip_ecs_lt_tcr: logical. If True (default), remove values
where ECS < TCR in place (but still return n samples.)
seed: random seed for generating variables.
Output:
(n, 2) array of sampled ECS, TCR pairs."""
if type(tcrecs_in) is str and tcrecs_in=='cmip5':
filepath = os.path.join(os.path.dirname(__file__),
'tcrecs/cmip5tcrecs.csv')
tcrecs_in = np.loadtxt(filepath, delimiter=',', skiprows=3)
try:
assert(type(tcrecs_in) is np.ndarray)
assert(tcrecs_in.ndim == 2)
assert(tcrecs_in.shape[1] == 2)
except AssertionError:
raise ValueError('tcrecs_in should "cmip5" or an array of shape (n, 2)')
dist = dist.lower()
def _genvar(tcrecs, dist, n, seed, correlated):
if dist=='lognorm':
out = mvlognorm(tcrecs, n=n, seed=seed, correlated=correlated)
elif dist=='norm':
mu = np.mean(tcrecs, axis=0)
if correlated:
cov = np.cov(tcrecs, rowvar=False)
else:
cov = np.diag(np.var(tcrecs, axis=0))
out = st.multivariate_normal.rvs(mu, cov, size=n, random_state=seed)
else:
raise ValueError('dist should be "norm" or "lognorm"')
return out
tcrecs_out = _genvar(tcrecs_in, dist, n, seed, correlated)
if strip_ecs_lt_tcr:
tcrecs_out = np.delete(tcrecs_out,
np.where(tcrecs_out[:,0] > tcrecs_out[:,1]), axis=0)
nreq = n - len(tcrecs_out[:,0])
while nreq>0:
# required otherwise we are repeating values
# this is still deterministic for constant ensemble size
if seed is not None:
seed = seed + nreq
new = _genvar(tcrecs_out, dist, n-len(tcrecs_out[:,0]),
seed, correlated)
tcrecs_out = np.append(tcrecs_out, new, axis=0)
tcrecs_out = np.delete(tcrecs_out, np.where(tcrecs_out[:,0] >
tcrecs_out[:,1]), axis=0)
nreq = n - len(tcrecs_out[:,0])
return tcrecs_out
| [
"numpy.log",
"scipy.stats.multivariate_normal.rvs",
"scipy.stats.norm.rvs",
"os.path.dirname",
"numpy.linalg.eig",
"numpy.append",
"numpy.mean",
"numpy.where",
"numpy.loadtxt",
"numpy.matmul",
"numpy.eye",
"numpy.cov",
"numpy.var",
"numpy.sqrt"
] | [((1139, 1160), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1146, 1160), True, 'import numpy as np\n'), ((1173, 1193), 'numpy.var', 'np.var', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1179, 1193), True, 'import numpy as np\n'), ((1530, 1550), 'numpy.linalg.eig', 'np.linalg.eig', (['delta'], {}), '(delta)\n', (1543, 1550), True, 'import numpy as np\n'), ((1685, 1728), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(n, p)', 'random_state': 'seed'}), '(size=(n, p), random_state=seed)\n', (1696, 1728), True, 'import scipy.stats as st\n'), ((1293, 1302), 'numpy.eye', 'np.eye', (['p'], {}), '(p)\n', (1299, 1302), True, 'import numpy as np\n'), ((1322, 1332), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (1328, 1332), True, 'import numpy as np\n'), ((1384, 1413), 'numpy.log', 'np.log', (['(1.0 + Sigma / mu ** 2)'], {}), '(1.0 + Sigma / mu ** 2)\n', (1390, 1413), True, 'import numpy as np\n'), ((3115, 3162), 'numpy.loadtxt', 'np.loadtxt', (['filepath'], {'delimiter': '""","""', 'skiprows': '(3)'}), "(filepath, delimiter=',', skiprows=3)\n", (3125, 3162), True, 'import numpy as np\n'), ((1240, 1252), 'numpy.log', 'np.log', (['data'], {}), '(data)\n', (1246, 1252), True, 'import numpy as np\n'), ((1339, 1368), 'numpy.log', 'np.log', (['(1.0 + Sigma / mu ** 2)'], {}), '(1.0 + Sigma / mu ** 2)\n', (1345, 1368), True, 'import numpy as np\n'), ((1449, 1462), 'numpy.sqrt', 'np.sqrt', (['beta'], {}), '(beta)\n', (1456, 1462), True, 'import numpy as np\n'), ((1470, 1483), 'numpy.sqrt', 'np.sqrt', (['beta'], {}), '(beta)\n', (1477, 1483), True, 'import numpy as np\n'), ((3032, 3057), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3047, 3057), False, 'import os\n'), ((4138, 4183), 'numpy.where', 'np.where', (['(tcrecs_out[:, 0] > tcrecs_out[:, 1])'], {}), '(tcrecs_out[:, 0] > tcrecs_out[:, 1])\n', (4146, 4183), True, 'import numpy as np\n'), ((4572, 4606), 'numpy.append', 'np.append', (['tcrecs_out', 'new'], {'axis': '(0)'}), '(tcrecs_out, new, axis=0)\n', (4581, 4606), True, 'import numpy as np\n'), ((1622, 1648), 'numpy.sqrt', 'np.sqrt', (['delta_eigenvalues'], {}), '(delta_eigenvalues)\n', (1629, 1648), True, 'import numpy as np\n'), ((1788, 1820), 'numpy.matmul', 'np.matmul', (['root_delta', 'out[i, :]'], {}), '(root_delta, out[i, :])\n', (1797, 1820), True, 'import numpy as np\n'), ((3635, 3658), 'numpy.mean', 'np.mean', (['tcrecs'], {'axis': '(0)'}), '(tcrecs, axis=0)\n', (3642, 3658), True, 'import numpy as np\n'), ((3827, 3889), 'scipy.stats.multivariate_normal.rvs', 'st.multivariate_normal.rvs', (['mu', 'cov'], {'size': 'n', 'random_state': 'seed'}), '(mu, cov, size=n, random_state=seed)\n', (3853, 3889), True, 'import scipy.stats as st\n'), ((4654, 4699), 'numpy.where', 'np.where', (['(tcrecs_out[:, 0] > tcrecs_out[:, 1])'], {}), '(tcrecs_out[:, 0] > tcrecs_out[:, 1])\n', (4662, 4699), True, 'import numpy as np\n'), ((3708, 3736), 'numpy.cov', 'np.cov', (['tcrecs'], {'rowvar': '(False)'}), '(tcrecs, rowvar=False)\n', (3714, 3736), True, 'import numpy as np\n'), ((3785, 3807), 'numpy.var', 'np.var', (['tcrecs'], {'axis': '(0)'}), '(tcrecs, axis=0)\n', (3791, 3807), True, 'import numpy as np\n')] |
"""
Tests for L{eliot._output}.
"""
from sys import stdout
from unittest import TestCase, skipUnless
# Make sure to use StringIO that only accepts unicode:
from io import BytesIO, StringIO
import json as pyjson
from tempfile import mktemp
from time import time
from uuid import UUID
from threading import Thread
try:
import numpy as np
except ImportError:
np = None
from zope.interface.verify import verifyClass
from .._output import (
MemoryLogger,
ILogger,
Destinations,
Logger,
bytesjson as json,
to_file,
FileDestination,
_safe_unicode_dictionary,
)
from .._action import start_action
from .._validation import ValidationError, Field, _MessageSerializer
from .._traceback import write_traceback
from ..testing import assertContainsFields
from .common import CustomObject, CustomJSONEncoder
class MemoryLoggerTests(TestCase):
"""
Tests for L{MemoryLogger}.
"""
def test_interface(self):
"""
L{MemoryLogger} implements L{ILogger}.
"""
verifyClass(ILogger, MemoryLogger)
def test_write(self):
"""
Dictionaries written with L{MemoryLogger.write} are stored on a list.
"""
logger = MemoryLogger()
logger.write({"a": "b"})
logger.write({"c": 1})
self.assertEqual(logger.messages, [{"a": "b"}, {"c": 1}])
logger.validate()
def test_notStringFieldKeys(self):
"""
Field keys must be unicode or bytes; if not L{MemoryLogger.validate}
raises a C{TypeError}.
"""
logger = MemoryLogger()
logger.write({123: "b"})
self.assertRaises(TypeError, logger.validate)
def test_bytesMustBeUTF8(self):
"""
Field keys can be bytes, but only if they're UTF-8 encoded Unicode.
"""
logger = MemoryLogger()
logger.write({"\u1234".encode("utf-16"): "b"})
self.assertRaises(UnicodeDecodeError, logger.validate)
def test_serializer(self):
"""
L{MemoryLogger.validate} calls the given serializer's C{validate()}
method with the message, as does L{MemoryLogger.write}.
"""
class FakeValidator(list):
def validate(self, message):
self.append(message)
def serialize(self, obj):
return obj
validator = FakeValidator()
logger = MemoryLogger()
message = {"message_type": "mymessage", "X": 1}
logger.write(message, validator)
self.assertEqual(validator, [message])
logger.validate()
self.assertEqual(validator, [message, message])
def test_failedValidation(self):
"""
L{MemoryLogger.validate} will allow exceptions raised by the serializer
to pass through.
"""
serializer = _MessageSerializer(
[Field.forValue("message_type", "mymessage", "The type")]
)
logger = MemoryLogger()
logger.write({"message_type": "wrongtype"}, serializer)
self.assertRaises(ValidationError, logger.validate)
def test_JSON(self):
"""
L{MemoryLogger.validate} will encode the output of serialization to
JSON.
"""
serializer = _MessageSerializer(
[
Field.forValue("message_type", "type", "The type"),
Field("foo", lambda value: object(), "The type"),
]
)
logger = MemoryLogger()
logger.write(
{"message_type": "type", "foo": "will become object()"}, serializer
)
self.assertRaises(TypeError, logger.validate)
@skipUnless(np, "NumPy is not installed.")
def test_EliotJSONEncoder(self):
"""
L{MemoryLogger.validate} uses the EliotJSONEncoder by default to do
encoding testing.
"""
logger = MemoryLogger()
logger.write({"message_type": "type", "foo": np.uint64(12)}, None)
logger.validate()
def test_JSON_custom_encoder(self):
"""
L{MemoryLogger.validate} will use a custom JSON encoder if one was given.
"""
logger = MemoryLogger(encoder=CustomJSONEncoder)
logger.write(
{"message_type": "type", "custom": CustomObject()},
None,
)
logger.validate()
def test_serialize(self):
"""
L{MemoryLogger.serialize} returns a list of serialized versions of the
logged messages.
"""
serializer = _MessageSerializer(
[
Field.forValue("message_type", "mymessage", "The type"),
Field("length", len, "The length"),
]
)
messages = [
{"message_type": "mymessage", "length": "abc"},
{"message_type": "mymessage", "length": "abcd"},
]
logger = MemoryLogger()
for message in messages:
logger.write(message, serializer)
self.assertEqual(
logger.serialize(),
[
{"message_type": "mymessage", "length": 3},
{"message_type": "mymessage", "length": 4},
],
)
def test_serializeCopies(self):
"""
L{MemoryLogger.serialize} does not mutate the original logged messages.
"""
serializer = _MessageSerializer(
[
Field.forValue("message_type", "mymessage", "The type"),
Field("length", len, "The length"),
]
)
message = {"message_type": "mymessage", "length": "abc"}
logger = MemoryLogger()
logger.write(message, serializer)
logger.serialize()
self.assertEqual(logger.messages[0]["length"], "abc")
def write_traceback(self, logger, exception):
"""
Write an exception as a traceback to the logger.
"""
try:
raise exception
except:
write_traceback(logger)
def test_tracebacksCauseTestFailure(self):
"""
Logging a traceback to L{MemoryLogger} will add its exception to
L{MemoryLogger.tracebackMessages}.
"""
logger = MemoryLogger()
exception = Exception()
self.write_traceback(logger, exception)
self.assertEqual(logger.tracebackMessages[0]["reason"], exception)
def test_flushTracebacksNoTestFailure(self):
"""
Any tracebacks cleared by L{MemoryLogger.flushTracebacks} (as specified
by exception type) are removed from
L{MemoryLogger.tracebackMessages}.
"""
logger = MemoryLogger()
exception = RuntimeError()
self.write_traceback(logger, exception)
logger.flushTracebacks(RuntimeError)
self.assertEqual(logger.tracebackMessages, [])
def test_flushTracebacksReturnsExceptions(self):
"""
L{MemoryLogger.flushTracebacks} returns the traceback messages.
"""
exceptions = [ZeroDivisionError(), ZeroDivisionError()]
logger = MemoryLogger()
logger.write({"x": 1})
for exc in exceptions:
self.write_traceback(logger, exc)
logger.write({"x": 1})
flushed = logger.flushTracebacks(ZeroDivisionError)
self.assertEqual(flushed, logger.messages[1:3])
def test_flushTracebacksUnflushedTestFailure(self):
"""
Any tracebacks uncleared by L{MemoryLogger.flushTracebacks} (because
they are of a different type) are still listed in
L{MemoryLogger.tracebackMessages}.
"""
logger = MemoryLogger()
exception = RuntimeError()
self.write_traceback(logger, exception)
logger.flushTracebacks(KeyError)
self.assertEqual(logger.tracebackMessages[0]["reason"], exception)
def test_flushTracebacksUnflushedUnreturned(self):
"""
Any tracebacks uncleared by L{MemoryLogger.flushTracebacks} (because
they are of a different type) are not returned.
"""
logger = MemoryLogger()
exception = RuntimeError()
self.write_traceback(logger, exception)
self.assertEqual(logger.flushTracebacks(KeyError), [])
def test_reset(self):
"""
L{MemoryLogger.reset} clears all logged messages and tracebacks.
"""
logger = MemoryLogger()
logger.write({"key": "value"}, None)
logger.reset()
self.assertEqual(
(logger.messages, logger.serializers, logger.tracebackMessages),
([], [], []),
)
def test_threadSafeWrite(self):
"""
L{MemoryLogger.write} can be called from multiple threads concurrently.
"""
# Some threads will log some messages
thread_count = 10
# A lot of messages. This will keep the threads running long enough
# to give them a chance to (try to) interfere with each other.
write_count = 10000
# They'll all use the same MemoryLogger instance.
logger = MemoryLogger()
# Each thread will have its own message and serializer that it writes
# to the log over and over again.
def write(msg, serializer):
for i in range(write_count):
logger.write(msg, serializer)
# Generate a single distinct message for each thread to log.
msgs = list({"i": i} for i in range(thread_count))
# Generate a single distinct serializer for each thread to log.
serializers = list(object() for i in range(thread_count))
# Pair them all up. This gives us a simple invariant we can check
# later on.
write_args = zip(msgs, serializers)
# Create the threads.
threads = list(Thread(target=write, args=args) for args in write_args)
# Run them all. Note threads early in this list will start writing to
# the log before later threads in the list even get a chance to start.
# That's part of why we have each thread write so many messages.
for t in threads:
t.start()
# Wait for them all to finish.
for t in threads:
t.join()
# Check that we got the correct number of messages in the log.
expected_count = thread_count * write_count
self.assertEqual(len(logger.messages), expected_count)
self.assertEqual(len(logger.serializers), expected_count)
# Check the simple invariant we created above. Every logged message
# must be paired with the correct serializer, where "correct" is
# defined by ``write_args`` above.
for position, (msg, serializer) in enumerate(
zip(logger.messages, logger.serializers)
):
# The indexes must match because the objects are paired using
# zip() above.
msg_index = msgs.index(msg)
serializer_index = serializers.index(serializer)
self.assertEqual(
msg_index,
serializer_index,
"Found message #{} with serializer #{} at position {}".format(
msg_index, serializer_index, position
),
)
class MyException(Exception):
"""
Custom exception.
"""
class BadDestination(list):
"""
A destination that throws an exception the first time it is called.
"""
called = 0
def __call__(self, msg):
if not self.called:
self.called = True
raise MyException("ono")
self.append(msg)
class DestinationsTests(TestCase):
"""
Tests for L{Destinations}.
"""
def test_send(self):
"""
L{Destinations.send} calls all destinations added with
L{Destinations.add} with the given dictionary.
"""
destinations = Destinations()
message = {"hoorj": "blargh"}
dest = []
dest2 = []
dest3 = []
destinations.add(dest.append, dest2.append)
destinations.add(dest3.append)
destinations.send(message)
self.assertEqual(dest, [message])
self.assertEqual(dest2, [message])
self.assertEqual(dest3, [message])
def test_destination_exception_multiple_destinations(self):
"""
If one destination throws an exception, other destinations still
get the message.
"""
destinations = Destinations()
dest = []
dest2 = BadDestination()
dest3 = []
destinations.add(dest.append)
destinations.add(dest2)
destinations.add(dest3.append)
message = {"hello": 123}
destinations.send(message)
self.assertIn(message, dest)
self.assertIn(message, dest3)
def test_destination_exception_continue(self):
"""
If a destination throws an exception, future messages are still
sent to it.
"""
destinations = Destinations()
dest = BadDestination()
destinations.add(dest)
msg1 = {"hello": 123}
msg2 = {"world": 456}
destinations.send(msg1)
self.assertNotIn(msg1, dest)
destinations.send(msg2)
self.assertIn(msg2, dest)
def test_remove(self):
"""
A destination removed with L{Destinations.remove} will no longer
receive messages from L{Destionations.add} calls.
"""
destinations = Destinations()
message = {"hello": 123}
dest = []
destinations.add(dest.append)
destinations.remove(dest.append)
destinations.send(message)
self.assertEqual(dest, [])
def test_removeNonExistent(self):
"""
Removing a destination that has not previously been added with result
in a C{ValueError} being thrown.
"""
destinations = Destinations()
self.assertRaises(ValueError, destinations.remove, [].append)
def test_addGlobalFields(self):
"""
L{Destinations.addGlobalFields} adds the given fields and values to
the messages being passed in.
"""
destinations = Destinations()
dest = []
destinations.add(dest.append)
destinations.addGlobalFields(x=123, y="hello")
destinations.send({"z": 456})
self.assertEqual(dest, [{"x": 123, "y": "hello", "z": 456}])
def test_addGlobalFieldsCumulative(self):
"""
L{Destinations.addGlobalFields} adds the given fields to those set by
previous calls.
"""
destinations = Destinations()
dest = []
destinations.add(dest.append)
destinations.addGlobalFields(x=123, y="hello")
destinations.addGlobalFields(x=456, z=456)
destinations.send({"msg": "X"})
self.assertEqual(dest, [{"x": 456, "y": "hello", "z": 456, "msg": "X"}])
def test_buffering(self):
"""
Before any destinations are set up to 1000 messages are buffered, and
then delivered to the first registered destinations.
"""
destinations = Destinations()
messages = [{"k": i} for i in range(1050)]
for m in messages:
destinations.send(m)
dest, dest2 = [], []
destinations.add(dest.append, dest2.append)
self.assertEqual((dest, dest2), (messages[-1000:], messages[-1000:]))
def test_buffering_second_batch(self):
"""
The second batch of added destination don't get the buffered messages.
"""
destinations = Destinations()
message = {"m": 1}
message2 = {"m": 2}
destinations.send(message)
dest = []
dest2 = []
destinations.add(dest.append)
destinations.add(dest2.append)
destinations.send(message2)
self.assertEqual((dest, dest2), ([message, message2], [message2]))
def test_global_fields_buffering(self):
"""
Global fields are added to buffered messages, when possible.
"""
destinations = Destinations()
message = {"m": 1}
destinations.send(message)
destinations.addGlobalFields(k=123)
dest = []
destinations.add(dest.append)
self.assertEqual(dest, [{"m": 1, "k": 123}])
def makeLogger():
"""
Return a tuple (L{Logger} instance, C{list} of written messages).
"""
logger = Logger()
logger._destinations = Destinations()
written = []
logger._destinations.add(written.append)
return logger, written
class LoggerTests(TestCase):
"""
Tests for L{Logger}.
"""
def test_interface(self):
"""
L{Logger} implements L{ILogger}.
"""
verifyClass(ILogger, Logger)
def test_global(self):
"""
A global L{Destinations} is used by the L{Logger} class.
"""
self.assertIsInstance(Logger._destinations, Destinations)
def test_write(self):
"""
L{Logger.write} sends the given dictionary L{Destinations} object.
"""
logger, written = makeLogger()
d = {"hello": 1}
logger.write(d)
self.assertEqual(written, [d])
def test_serializer(self):
"""
If a L{_MessageSerializer} is passed to L{Logger.write}, it is used to
serialize the message before it is passed to the destination.
"""
logger, written = makeLogger()
serializer = _MessageSerializer(
[
Field.forValue("message_type", "mymessage", "The type"),
Field("length", len, "The length of a thing"),
]
)
logger.write({"message_type": "mymessage", "length": "thething"}, serializer)
self.assertEqual(written, [{"message_type": "mymessage", "length": 8}])
def test_passedInDictionaryUnmodified(self):
"""
The dictionary passed in to L{Logger.write} is not modified.
"""
logger, written = makeLogger()
serializer = _MessageSerializer(
[
Field.forValue("message_type", "mymessage", "The type"),
Field("length", len, "The length of a thing"),
]
)
d = {"message_type": "mymessage", "length": "thething"}
original = d.copy()
logger.write(d, serializer)
self.assertEqual(d, original)
def test_safe_unicode_dictionary(self):
"""
L{_safe_unicode_dictionary} converts the given dictionary's
values and keys to unicode using C{safeunicode}.
"""
class badobject(object):
def __repr__(self):
raise TypeError()
dictionary = {badobject(): 123, 123: badobject()}
badMessage = "eliot: unknown, unicode() raised exception"
self.assertEqual(
eval(_safe_unicode_dictionary(dictionary)),
{badMessage: "123", "123": badMessage},
)
def test_safe_unicode_dictionary_fallback(self):
"""
If converting the dictionary failed for some reason,
L{_safe_unicode_dictionary} runs C{repr} on the object.
"""
self.assertEqual(_safe_unicode_dictionary(None), "None")
def test_safe_unicode_dictionary_fallback_failure(self):
"""
If all else fails, L{_safe_unicode_dictionary} just gives up.
"""
class badobject(object):
def __repr__(self):
raise TypeError()
self.assertEqual(
_safe_unicode_dictionary(badobject()),
"eliot: unknown, unicode() raised exception",
)
def test_serializationErrorTraceback(self):
"""
If serialization fails in L{Logger.write}, a traceback is logged,
along with a C{eliot:serialization_failure} message for debugging
purposes.
"""
logger, written = makeLogger()
def raiser(i):
raise RuntimeError("oops")
serializer = _MessageSerializer(
[
Field.forValue("message_type", "mymessage", "The type"),
Field("fail", raiser, "Serialization fail"),
]
)
message = {"message_type": "mymessage", "fail": "will"}
logger.write(message, serializer)
self.assertEqual(len(written), 2)
tracebackMessage = written[0]
assertContainsFields(
self,
tracebackMessage,
{
"exception": "%s.RuntimeError" % (RuntimeError.__module__,),
"message_type": "eliot:traceback",
},
)
self.assertIn("RuntimeError: oops", tracebackMessage["traceback"])
# Calling _safe_unicode_dictionary multiple times leads to
# inconsistent results due to hash ordering, so compare contents:
assertContainsFields(
self, written[1], {"message_type": "eliot:serialization_failure"}
)
self.assertEqual(
eval(written[1]["message"]),
dict((repr(key), repr(value)) for (key, value) in message.items()),
)
def test_destination_exception_caught(self):
"""
If a destination throws an exception, an appropriate error is
logged.
"""
logger = Logger()
logger._destinations = Destinations()
dest = BadDestination()
logger._destinations.add(dest)
message = {"hello": 123}
logger.write({"hello": 123})
assertContainsFields(
self,
dest[0],
{
"message_type": "eliot:destination_failure",
"message": _safe_unicode_dictionary(message),
"reason": "ono",
"exception": "eliot.tests.test_output.MyException",
},
)
def test_destination_multiple_exceptions_caught(self):
"""
If multiple destinations throw an exception, an appropriate error is
logged for each.
"""
logger = Logger()
logger._destinations = Destinations()
logger._destinations.add(BadDestination())
logger._destinations.add(lambda msg: 1 / 0)
messages = []
logger._destinations.add(messages.append)
try:
1 / 0
except ZeroDivisionError as e:
zero_divide = str(e)
zero_type = ZeroDivisionError.__module__ + ".ZeroDivisionError"
message = {"hello": 123}
logger.write({"hello": 123})
def remove(key):
return [message.pop(key) for message in messages[1:]]
# Make sure we have task_level & task_uuid in exception messages.
task_levels = remove("task_level")
task_uuids = remove("task_uuid")
timestamps = remove("timestamp")
self.assertEqual(
(
abs(timestamps[0] + timestamps[1] - 2 * time()) < 1,
task_levels == [[1], [1]],
len([UUID(uuid) for uuid in task_uuids]) == 2,
messages,
),
(
True,
True,
True,
[
message,
{
"message_type": "eliot:destination_failure",
"message": _safe_unicode_dictionary(message),
"reason": "ono",
"exception": "eliot.tests.test_output.MyException",
},
{
"message_type": "eliot:destination_failure",
"message": _safe_unicode_dictionary(message),
"reason": zero_divide,
"exception": zero_type,
},
],
),
)
def test_destination_exception_caught_twice(self):
"""
If a destination throws an exception, and the logged error about
it also causes an exception, then just drop that exception on the
floor, since there's nothing we can do with it.
"""
logger = Logger()
logger._destinations = Destinations()
def always_raise(message):
raise ZeroDivisionError()
logger._destinations.add(always_raise)
# Just a message. No exception raised; since everything is dropped no
# other assertions to be made.
logger.write({"hello": 123})
# With an action. No exception raised; since everything is dropped no
# other assertions to be made.
with start_action(logger, "sys:do"):
logger.write({"hello": 123})
class PEP8Tests(TestCase):
"""
Tests for PEP 8 method compatibility.
"""
def test_flush_tracebacks(self):
"""
L{MemoryLogger.flush_tracebacks} is the same as
L{MemoryLogger.flushTracebacks}
"""
self.assertEqual(MemoryLogger.flush_tracebacks, MemoryLogger.flushTracebacks)
class ToFileTests(TestCase):
"""
Tests for L{to_file}.
"""
def test_to_file_adds_destination(self):
"""
L{to_file} adds a L{FileDestination} destination with the given file.
"""
f = stdout
to_file(f)
expected = FileDestination(file=f)
self.addCleanup(Logger._destinations.remove, expected)
self.assertIn(expected, Logger._destinations._destinations)
def test_to_file_custom_encoder(self):
"""
L{to_file} accepts a custom encoder, and sets it on the resulting
L{FileDestination}.
"""
f = stdout
encoder = object()
to_file(f, encoder=encoder)
expected = FileDestination(file=f, encoder=encoder)
self.addCleanup(Logger._destinations.remove, expected)
self.assertIn(expected, Logger._destinations._destinations)
def test_bytes_values(self):
"""
DEPRECATED: On Python 3L{FileDestination} will encode bytes as if they were
UTF-8 encoded strings when writing to BytesIO only.
"""
message = {"x": b"abc"}
bytes_f = BytesIO()
destination = FileDestination(file=bytes_f)
destination(message)
self.assertEqual(
[json.loads(line) for line in bytes_f.getvalue().splitlines()],
[{"x": "abc"}],
)
@skipUnless(np, "NumPy is not installed.")
def test_default_encoder_is_EliotJSONEncoder(self):
"""The default encoder if none are specified is EliotJSONEncoder."""
message = {"x": np.int64(3)}
f = StringIO()
destination = FileDestination(file=f)
destination(message)
self.assertEqual(
[json.loads(line) for line in f.getvalue().splitlines()], [{"x": 3}]
)
def test_filedestination_writes_json_bytes(self):
"""
L{FileDestination} writes JSON-encoded messages to a file that accepts
bytes.
"""
message1 = {"x": 123}
message2 = {"y": None, "x": "abc"}
bytes_f = BytesIO()
destination = FileDestination(file=bytes_f)
destination(message1)
destination(message2)
self.assertEqual(
[json.loads(line) for line in bytes_f.getvalue().splitlines()],
[message1, message2],
)
def test_filedestination_custom_encoder(self):
"""
L{FileDestionation} can use a custom encoder.
"""
custom = object()
class CustomEncoder(pyjson.JSONEncoder):
def default(self, o):
if o is custom:
return "CUSTOM!"
else:
return pyjson.JSONEncoder.default(self, o)
message = {"x": 123, "z": custom}
f = BytesIO()
destination = FileDestination(file=f, encoder=CustomEncoder)
destination(message)
self.assertEqual(
json.loads(f.getvalue().splitlines()[0]), {"x": 123, "z": "CUSTOM!"}
)
def test_filedestination_flushes(self):
"""
L{FileDestination} flushes after every write, to ensure logs get
written out even if the local buffer hasn't filled up.
"""
path = mktemp()
# File with large buffer:
f = open(path, "wb", 1024 * 1024 * 10)
# and a small message that won't fill the buffer:
message1 = {"x": 123}
destination = FileDestination(file=f)
destination(message1)
# Message got written even though buffer wasn't filled:
self.assertEqual(
[json.loads(line) for line in open(path, "rb").read().splitlines()],
[message1],
)
def test_filedestination_writes_json_unicode(self):
"""
L{FileDestination} writes JSON-encoded messages to file that only
accepts Unicode.
"""
message = {"x": "\u1234"}
unicode_f = StringIO()
destination = FileDestination(file=unicode_f)
destination(message)
self.assertEqual(pyjson.loads(unicode_f.getvalue()), message)
def test_filedestination_unwriteable_file(self):
"""
L{FileDestination} raises a runtime error if the given file isn't writeable.
"""
path = mktemp()
open(path, "w").close()
f = open(path, "r")
with self.assertRaises(RuntimeError):
FileDestination(f)
| [
"zope.interface.verify.verifyClass",
"threading.Thread",
"io.BytesIO",
"io.StringIO",
"numpy.uint64",
"time.time",
"unittest.skipUnless",
"uuid.UUID",
"numpy.int64",
"tempfile.mktemp",
"json.JSONEncoder.default"
] | [((3634, 3675), 'unittest.skipUnless', 'skipUnless', (['np', '"""NumPy is not installed."""'], {}), "(np, 'NumPy is not installed.')\n", (3644, 3675), False, 'from unittest import TestCase, skipUnless\n'), ((26224, 26265), 'unittest.skipUnless', 'skipUnless', (['np', '"""NumPy is not installed."""'], {}), "(np, 'NumPy is not installed.')\n", (26234, 26265), False, 'from unittest import TestCase, skipUnless\n'), ((1032, 1066), 'zope.interface.verify.verifyClass', 'verifyClass', (['ILogger', 'MemoryLogger'], {}), '(ILogger, MemoryLogger)\n', (1043, 1066), False, 'from zope.interface.verify import verifyClass\n'), ((16640, 16668), 'zope.interface.verify.verifyClass', 'verifyClass', (['ILogger', 'Logger'], {}), '(ILogger, Logger)\n', (16651, 16668), False, 'from zope.interface.verify import verifyClass\n'), ((25987, 25996), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (25994, 25996), False, 'from io import BytesIO, StringIO\n'), ((26448, 26458), 'io.StringIO', 'StringIO', ([], {}), '()\n', (26456, 26458), False, 'from io import BytesIO, StringIO\n'), ((26915, 26924), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (26922, 26924), False, 'from io import BytesIO, StringIO\n'), ((27632, 27641), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (27639, 27641), False, 'from io import BytesIO, StringIO\n'), ((28077, 28085), 'tempfile.mktemp', 'mktemp', ([], {}), '()\n', (28083, 28085), False, 'from tempfile import mktemp\n'), ((28772, 28782), 'io.StringIO', 'StringIO', ([], {}), '()\n', (28780, 28782), False, 'from io import BytesIO, StringIO\n'), ((29114, 29122), 'tempfile.mktemp', 'mktemp', ([], {}), '()\n', (29120, 29122), False, 'from tempfile import mktemp\n'), ((26423, 26434), 'numpy.int64', 'np.int64', (['(3)'], {}), '(3)\n', (26431, 26434), True, 'import numpy as np\n'), ((3924, 3937), 'numpy.uint64', 'np.uint64', (['(12)'], {}), '(12)\n', (3933, 3937), True, 'import numpy as np\n'), ((9720, 9751), 'threading.Thread', 'Thread', ([], {'target': 'write', 'args': 'args'}), '(target=write, args=args)\n', (9726, 9751), False, 'from threading import Thread\n'), ((27541, 27576), 'json.JSONEncoder.default', 'pyjson.JSONEncoder.default', (['self', 'o'], {}), '(self, o)\n', (27567, 27576), True, 'import json as pyjson\n'), ((22858, 22868), 'uuid.UUID', 'UUID', (['uuid'], {}), '(uuid)\n', (22862, 22868), False, 'from uuid import UUID\n'), ((22781, 22787), 'time.time', 'time', ([], {}), '()\n', (22785, 22787), False, 'from time import time\n')] |
import pandas as pd
import numpy as np
import math
from scipy.stats import nct
from copy import deepcopy
import matplotlib.pyplot as plt
from ..estimators.stan_estimator import StanEstimatorMAP
from ..exceptions import IllegalArgument, ModelException
from ..utils.kernels import sandwich_kernel
from ..utils.features import make_fourier_series_df
from .template import BaseTemplate, MAPTemplate
from ..constants.constants import PredictionKeys, PredictMethod
from ..constants import ktrlite as constants
from orbit.constants.palette import OrbitPalette
from ..initializer.ktrlite import KTRLiteInitializer
class BaseKTRLite(BaseTemplate):
"""Base KTRLite model object with shared functionality for MAP method
Parameters
----------
seasonality : int, or list of int
multiple seasonality
seasonality_fs_order : int, or list of int
fourier series order for seasonality
level_knot_scale : float
sigma for level; default to be .5
seasonal_initial_knot_scale : float
scale parameter for seasonal regressors initial coefficient knots; default to be 1
seasonal_knot_scale : float
scale parameter for seasonal regressors drift of coefficient knots; default to be 0.1.
span_level : float between (0, 1)
window width to decide the number of windows for the level (trend) term.
e.g., span 0.1 will produce 10 windows.
span_coefficients : float between (0, 1)
window width to decide the number of windows for the regression term
degree of freedom : int
degree of freedom for error t-distribution
level_knot_dates : array like
list of pre-specified dates for the level knots
level_knot_length : int
the distance between every two knots for level
coefficients_knot_length : int
the distance between every two knots for coefficients
knot_location : {'mid_point', 'end_point'}; default 'mid_point'
knot locations. When level_knot_dates is specified, this is ignored for level knots.
date_freq : str
date frequency; if not supplied, pd.infer_freq will be used to imply the date frequency.
kwargs
To specify `estimator_type` or additional args for the specified `estimator_type`
"""
# data labels for sampler
_data_input_mapper = constants.DataInputMapper
# used to match name of `*.stan` or `*.pyro` file to look for the model
_model_name = 'ktrlite'
def __init__(self,
seasonality=None,
seasonality_fs_order=None,
level_knot_scale=0.5,
seasonal_initial_knot_scale=1.0,
seasonal_knot_scale=0.1,
span_level=0.1,
span_coefficients=0.3,
degree_of_freedom=30,
# knot customization
level_knot_dates=None,
level_knot_length=None,
coefficients_knot_length=None,
knot_location='mid_point',
date_freq=None,
**kwargs):
super().__init__(**kwargs) # create estimator in base class
self.span_level = span_level
self.level_knot_scale = level_knot_scale
# customize knot dates for levels
self.level_knot_dates = level_knot_dates
self.level_knot_length = level_knot_length
self.coefficients_knot_length = coefficients_knot_length
self.knot_location = knot_location
self.seasonality = seasonality
self.seasonality_fs_order = seasonality_fs_order
self.seasonal_initial_knot_scale = seasonal_initial_knot_scale
self.seasonal_knot_scale = seasonal_knot_scale
# set private var to arg value
# if None set default in _set_default_args()
# use public one if knots length is not available
self._seasonality = self.seasonality
self._seasonality_fs_order = self.seasonality_fs_order
self._seasonal_knot_scale = self.seasonal_knot_scale
self._seasonal_initial_knot_scale = None
self._seasonal_knot_scale = None
self._level_knot_dates = self.level_knot_dates
self._degree_of_freedom = degree_of_freedom
self.span_coefficients = span_coefficients
# self.rho_coefficients = rho_coefficients
self.date_freq = date_freq
# regression attributes -- now is ONLY used for fourier series as seasonality
self.num_of_regressors = 0
self.regressor_col = list()
self.regressor_col_gp = list()
self.coefficients_initial_knot_scale = list()
self.coefficients_knot_scale = list()
# set static data attributes
# self._set_static_attributes()
# set model param names
# this only depends on static attributes, but should these params depend
# on dynamic data (e.g actual data matrix, number of responses, etc) this should be
# called after fit instead
# self._set_model_param_names()
# basic response fields
# mainly set by ._set_dynamic_attributes()
self.response_offset = 0
self.is_valid_response = None
self.which_valid_response = None
self.num_of_valid_response = 0
self.num_knots_level = None
self.knots_tp_level = None
self.num_knots_coefficients = None
self.knots_tp_coefficients = None
self.regressor_matrix = None
# self.coefficients_knot_dates = None
def _set_init_values(self):
"""Override function from Base Template"""
# init_values_partial = partial(init_values_callable, seasonality=seasonality)
# partialfunc does not work when passed to PyStan because PyStan uses
# inspect.getargspec(func) which seems to raise an exception with keyword-only args
# caused by using partialfunc
# lambda as an alternative workaround
if len(self._seasonality) > 1 and self.num_of_regressors > 0:
init_values_callable = KTRLiteInitializer(self.num_of_regressors, self.num_knots_coefficients)
self._init_values = init_values_callable
# initialization related modules
def _set_default_args(self):
"""Set default attributes for None
"""
if self.seasonality is None:
self._seasonality = list()
self._seasonality_fs_order = list()
elif not isinstance(self._seasonality, list) and isinstance(self._seasonality * 1.0, float):
self._seasonality = [self.seasonality]
if self._seasonality and self._seasonality_fs_order is None:
self._seasonality_fs_order = [2] * len(self._seasonality)
elif not isinstance(self._seasonality_fs_order, list) and isinstance(self._seasonality_fs_order * 1.0, float):
self._seasonality_fs_order = [self.seasonality_fs_order]
if len(self._seasonality_fs_order) != len(self._seasonality):
raise IllegalArgument('length of seasonality and fs_order not matching')
for k, order in enumerate(self._seasonality_fs_order):
if 2 * order > self._seasonality[k] - 1:
raise IllegalArgument('reduce seasonality_fs_order to avoid over-fitting')
if not isinstance(self.seasonal_initial_knot_scale, list) and \
isinstance(self.seasonal_initial_knot_scale * 1.0, float):
self._seasonal_initial_knot_scale = [self.seasonal_initial_knot_scale] * len(self._seasonality)
else:
self._seasonal_initial_knot_scale = self.seasonal_initial_knot_scale
if not isinstance(self.seasonal_knot_scale, list) and isinstance(self.seasonal_knot_scale * 1.0, float):
self._seasonal_knot_scale = [self.seasonal_knot_scale] * len(self._seasonality)
else:
self._seasonal_knot_scale = self.seasonal_knot_scale
def _set_seasonality_attributes(self):
"""given list of seasonalities and their order, create list of seasonal_regressors_columns"""
self.regressor_col_gp = list()
self.regressor_col = list()
self.coefficients_initial_knot_scale = list()
self.coefficients_knot_scale = list()
if len(self._seasonality) > 0:
for idx, s in enumerate(self._seasonality):
fs_cols = []
order = self._seasonality_fs_order[idx]
self.coefficients_initial_knot_scale += [self._seasonal_initial_knot_scale[idx]] * order * 2
self.coefficients_knot_scale += [self._seasonal_knot_scale[idx]] * order * 2
for i in range(1, order + 1):
fs_cols.append('seas{}_fs_cos{}'.format(s, i))
fs_cols.append('seas{}_fs_sin{}'.format(s, i))
# flatten version of regressor columns
self.regressor_col += fs_cols
# list of group of regressor columns bundled with seasonality
self.regressor_col_gp.append(fs_cols)
self.num_of_regressors = len(self.regressor_col)
def _set_static_attributes(self):
"""Over-ride function from Base Template"""
self._set_default_args()
self._set_seasonality_attributes()
# fit and predict related modules
def _set_validate_ktr_params(self, df):
if self._seasonality:
max_seasonality = np.round(np.max(self._seasonality)).astype(int)
if self.num_of_observations < max_seasonality:
raise ModelException(
"Number of observations {} is less than max seasonality {}".format(
self.num_of_observations, max_seasonality))
# get some reasonable offset to regularize response to make default priors scale-insensitive
if self._seasonality:
max_seasonality = np.round(np.max(self._seasonality)).astype(int)
self.response_offset = np.nanmean(self.response[:max_seasonality])
else:
self.response_offset = np.nanmean(self.response)
self.is_valid_response = ~np.isnan(self.response)
# [0] to convert tuple back to array
self.which_valid_response = np.where(self.is_valid_response)[0]
self.num_of_valid_response = len(self.which_valid_response)
def _make_seasonal_regressors(self, df, shift):
"""
Parameters
----------
df : pd.DataFrame
shift: int
use 0 for fitting; use delta of prediction start and train start for prediction
Returns
-------
pd.DataFrame
data with computed fourier series attached
"""
if len(self._seasonality) > 0:
for idx, s in enumerate(self._seasonality):
order = self._seasonality_fs_order[idx]
df, _ = make_fourier_series_df(df, s, order=order, prefix='seas{}_'.format(s), shift=shift)
return df
def _set_regressor_matrix(self, df):
# init of regression matrix depends on length of response vector
self.regressor_matrix = np.zeros((self.num_of_observations, 0), dtype=np.double)
if self.num_of_regressors > 0:
self.regressor_matrix = df.filter(items=self.regressor_col, ).values
@staticmethod
def get_gap_between_dates(start_date, end_date, freq):
diff = end_date - start_date
gap = np.array(diff / np.timedelta64(1, freq))
return gap
@staticmethod
def _set_knots_tp(knots_distance, cutoff, knot_location):
if knot_location == 'mid_point':
# knot in the middle
knots_idx_start = round(knots_distance / 2)
knots_idx = np.arange(knots_idx_start, cutoff, knots_distance)
elif knot_location == 'end_point':
# knot in the end
knots_idx = np.sort(np.arange(cutoff - 1, 0, -knots_distance))
return knots_idx
def _set_kernel_matrix(self, df):
# Note that our tp starts by 1; to convert back to index of array, reduce it by 1
tp = np.arange(1, self.num_of_observations + 1) / self.num_of_observations
# this approach put knots in full range
self._cutoff = self.num_of_observations
# kernel of level calculations
if self._level_knot_dates is None:
if self.level_knot_length is not None:
knots_distance = self.level_knot_length
else:
number_of_knots = round(1 / self.span_level)
# FIXME: is it the best way to calculate knots_distance?
knots_distance = math.ceil(self._cutoff / number_of_knots)
knots_idx_level = self._set_knots_tp(knots_distance, self._cutoff, self.knot_location)
self._knots_idx_level = knots_idx_level
self.knots_tp_level = (1 + knots_idx_level) / self.num_of_observations
self._level_knot_dates = df[self.date_col].values[knots_idx_level]
else:
# to exclude dates which are not within training period
self._level_knot_dates = pd.to_datetime([
x for x in self._level_knot_dates if
(x <= df[self.date_col].values[-1]) and (x >= df[self.date_col].values[0])
])
# since we allow _level_knot_dates to be continuous, we calculate distance between knots
# in continuous value as well (instead of index)
if self.date_freq is None:
self.date_freq = pd.infer_freq(df[self.date_col])[0]
start_date = self.training_start
self.knots_tp_level = np.array(
(self.get_gap_between_dates(start_date, self._level_knot_dates, self.date_freq) + 1) /
(self.get_gap_between_dates(start_date, self.training_end, self.date_freq) + 1)
)
self.kernel_level = sandwich_kernel(tp, self.knots_tp_level)
self.num_knots_level = len(self.knots_tp_level)
self.kernel_coefficients = np.zeros((self.num_of_observations, 0), dtype=np.double)
self.num_knots_coefficients = 0
# kernel of coefficients calculations
if self.num_of_regressors > 0:
if self.coefficients_knot_length is not None:
knots_distance = self.coefficients_knot_length
else:
number_of_knots = round(1 / self.span_coefficients)
knots_distance = math.ceil(self._cutoff / number_of_knots)
knots_idx_coef = self._set_knots_tp(knots_distance, self._cutoff, self.knot_location)
self._knots_idx_coef = knots_idx_coef
self.knots_tp_coefficients = (1 + knots_idx_coef) / self.num_of_observations
self._coef_knot_dates = df[self.date_col].values[knots_idx_coef]
self.kernel_coefficients = sandwich_kernel(tp, self.knots_tp_coefficients)
self.num_knots_coefficients = len(self.knots_tp_coefficients)
def _set_dynamic_attributes(self, df):
"""Overriding: func: `~orbit.models.BaseETS._set_dynamic_attributes"""
# extra settings and validation for KTRLite
self._set_validate_ktr_params(df)
# attach fourier series as regressors
df = self._make_seasonal_regressors(df, shift=0)
# set regressors as input matrix and derive kernels
self._set_regressor_matrix(df)
self._set_kernel_matrix(df)
def _set_model_param_names(self):
"""Model parameters to extract"""
self._model_param_names += [param.value for param in constants.BaseSamplingParameters]
if len(self._seasonality) > 0 or self.num_of_regressors > 0:
self._model_param_names += [param.value for param in constants.RegressionSamplingParameters]
def _predict(self, posterior_estimates, df, include_error=False, **kwargs):
"""Vectorized version of prediction math"""
################################################################
# Prediction Attributes
################################################################
start = self.prediction_input_meta['start']
trained_len = self.num_of_observations
output_len = self.prediction_input_meta['df_length']
################################################################
# Model Attributes
################################################################
model = deepcopy(posterior_estimates)
# TODO: adopt torch ?
# for k, v in model.items():
# model[k] = torch.from_numpy(v)
# We can pull any arbitrary value from the dictionary because we hold the
# safe assumption: the length of the first dimension is always the number of samples
# thus can be safely used to determine `num_sample`. If predict_method is anything
# other than full, the value here should be 1
arbitrary_posterior_value = list(model.values())[0]
num_sample = arbitrary_posterior_value.shape[0]
################################################################
# Trend Component
################################################################
new_tp = np.arange(start + 1, start + output_len + 1) / trained_len
if include_error:
# in-sample knots
lev_knot_in = model.get(constants.BaseSamplingParameters.LEVEL_KNOT.value)
# TODO: hacky way; let's just assume last two knot distance is knots distance for all knots
lev_knot_width = self.knots_tp_level[-1] - self.knots_tp_level[-2]
# check whether we need to put new knots for simulation
if new_tp[-1] > 1:
# derive knots tp
if self.knots_tp_level[-1] + lev_knot_width >= new_tp[-1]:
knots_tp_level_out = np.array([new_tp[-1]])
else:
knots_tp_level_out = np.arange(self.knots_tp_level[-1] + lev_knot_width, new_tp[-1], lev_knot_width)
new_knots_tp_level = np.concatenate([self.knots_tp_level, knots_tp_level_out])
lev_knot_out = np.random.laplace(0, self.level_knot_scale,
size=(lev_knot_in.shape[0], len(knots_tp_level_out)))
lev_knot_out = np.cumsum(np.concatenate([lev_knot_in[:, -1].reshape(-1, 1), lev_knot_out],
axis=1), axis=1)[:, 1:]
lev_knot = np.concatenate([lev_knot_in, lev_knot_out], axis=1)
else:
new_knots_tp_level = self.knots_tp_level
lev_knot = lev_knot_in
kernel_level = sandwich_kernel(new_tp, new_knots_tp_level)
else:
lev_knot = model.get(constants.BaseSamplingParameters.LEVEL_KNOT.value)
kernel_level = sandwich_kernel(new_tp, self.knots_tp_level)
obs_scale = model.get(constants.BaseSamplingParameters.OBS_SCALE.value)
obs_scale = obs_scale.reshape(-1, 1)
trend = np.matmul(lev_knot, kernel_level.transpose(1, 0))
################################################################
# Seasonality Component
################################################################
# init of regression matrix depends on length of response vector
total_seas_regression = np.zeros(trend.shape, dtype=np.double)
seas_decomp = {}
# update seasonal regression matrices
if self._seasonality and self.regressor_col:
df = self._make_seasonal_regressors(df, shift=start)
coef_knot = model.get(constants.RegressionSamplingParameters.COEFFICIENTS_KNOT.value)
kernel_coefficients = sandwich_kernel(new_tp, self.knots_tp_coefficients)
coef = np.matmul(coef_knot, kernel_coefficients.transpose(1, 0))
pos = 0
for idx, cols in enumerate(self.regressor_col_gp):
seasonal_regressor_matrix = df[cols].values
seas_coef = coef[..., pos:(pos + len(cols)), :]
seas_regression = np.sum(seas_coef * seasonal_regressor_matrix.transpose(1, 0), axis=-2)
seas_decomp['seasonality_{}'.format(self._seasonality[idx])] = seas_regression
pos += len(cols)
total_seas_regression += seas_regression
if include_error:
epsilon = nct.rvs(self._degree_of_freedom, nc=0, loc=0,
scale=obs_scale, size=(num_sample, len(new_tp)))
pred_array = trend + total_seas_regression + epsilon
else:
pred_array = trend + total_seas_regression
out = {
PredictionKeys.PREDICTION.value: pred_array,
PredictionKeys.TREND.value: trend,
}
out.update(seas_decomp)
return out
class KTRLiteMAP(MAPTemplate, BaseKTRLite):
"""Concrete KTRLite model for MAP (Maximum a Posteriori) prediction
This model only supports MAP estimator type
"""
_supported_estimator_types = [StanEstimatorMAP]
def __init__(self, estimator_type=StanEstimatorMAP, **kwargs):
super().__init__(estimator_type=estimator_type, **kwargs)
# FIXME: need a unit test of this function
def get_level_knots(self):
out = {
self.date_col:
self._level_knot_dates,
constants.BaseSamplingParameters.LEVEL_KNOT.value:
# TODO: this is hacky, investigate why we have an extra dimension here?
np.squeeze(self._aggregate_posteriors[PredictMethod.MAP.value][
constants.BaseSamplingParameters.LEVEL_KNOT.value], 0),
}
return pd.DataFrame(out)
def get_levels(self):
out = {
self.date_col:
self.date_array,
constants.BaseSamplingParameters.LEVEL.value:
# TODO: this is hacky, investigate why we have an extra dimension here?
np.squeeze(self._aggregate_posteriors[PredictMethod.MAP.value][
constants.BaseSamplingParameters.LEVEL.value], 0),
}
return pd.DataFrame(out)
def plot_lev_knots(self, path=None, is_visible=True, title="",
fontsize=16, markersize=250, figsize=(16, 8)):
""" Plot the fitted level knots along with the actual time series.
Parameters
----------
path : str; optional
path to save the figure
is_visible : boolean
whether we want to show the plot. If called from unittest, is_visible might = False.
title : str; optional
title of the plot
fontsize : int; optional
fontsize of the title
markersize : int; optional
knot marker size
figsize : tuple; optional
figsize pass through to `matplotlib.pyplot.figure()`
Returns
-------
matplotlib axes object
"""
levels_df = self.get_levels()
knots_df = self.get_level_knots()
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(self.date_array, self.response, color=OrbitPalette.blue.value, lw=1, alpha=0.7, label='actual')
ax.plot(levels_df[self.date_col], levels_df[constants.BaseSamplingParameters.LEVEL.value],
color=OrbitPalette.black.value, lw=1, alpha=0.8,
label=constants.BaseSamplingParameters.LEVEL.value)
ax.scatter(knots_df[self.date_col], knots_df[constants.BaseSamplingParameters.LEVEL_KNOT.value],
color=OrbitPalette.green.value, lw=1, s=markersize, marker='^', alpha=0.8,
label=constants.BaseSamplingParameters.LEVEL_KNOT.value)
ax.legend()
ax.grid(True, which='major', c='grey', ls='-', lw=1, alpha=0.5)
ax.set_title(title, fontsize=fontsize)
if path:
fig.savefig(path)
if is_visible:
plt.show()
else:
plt.close()
return ax
| [
"pandas.DataFrame",
"copy.deepcopy",
"matplotlib.pyplot.show",
"math.ceil",
"matplotlib.pyplot.close",
"pandas.infer_freq",
"numpy.zeros",
"numpy.isnan",
"numpy.max",
"numpy.where",
"numpy.arange",
"pandas.to_datetime",
"numpy.timedelta64",
"numpy.array",
"numpy.squeeze",
"matplotlib.p... | [((11047, 11103), 'numpy.zeros', 'np.zeros', (['(self.num_of_observations, 0)'], {'dtype': 'np.double'}), '((self.num_of_observations, 0), dtype=np.double)\n', (11055, 11103), True, 'import numpy as np\n'), ((13943, 13999), 'numpy.zeros', 'np.zeros', (['(self.num_of_observations, 0)'], {'dtype': 'np.double'}), '((self.num_of_observations, 0), dtype=np.double)\n', (13951, 13999), True, 'import numpy as np\n'), ((16350, 16379), 'copy.deepcopy', 'deepcopy', (['posterior_estimates'], {}), '(posterior_estimates)\n', (16358, 16379), False, 'from copy import deepcopy\n'), ((19290, 19328), 'numpy.zeros', 'np.zeros', (['trend.shape'], {'dtype': 'np.double'}), '(trend.shape, dtype=np.double)\n', (19298, 19328), True, 'import numpy as np\n'), ((21628, 21645), 'pandas.DataFrame', 'pd.DataFrame', (['out'], {}), '(out)\n', (21640, 21645), True, 'import pandas as pd\n'), ((22078, 22095), 'pandas.DataFrame', 'pd.DataFrame', (['out'], {}), '(out)\n', (22090, 22095), True, 'import pandas as pd\n'), ((23005, 23040), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'figsize'}), '(1, 1, figsize=figsize)\n', (23017, 23040), True, 'import matplotlib.pyplot as plt\n'), ((9897, 9940), 'numpy.nanmean', 'np.nanmean', (['self.response[:max_seasonality]'], {}), '(self.response[:max_seasonality])\n', (9907, 9940), True, 'import numpy as np\n'), ((9990, 10015), 'numpy.nanmean', 'np.nanmean', (['self.response'], {}), '(self.response)\n', (10000, 10015), True, 'import numpy as np\n'), ((10051, 10074), 'numpy.isnan', 'np.isnan', (['self.response'], {}), '(self.response)\n', (10059, 10074), True, 'import numpy as np\n'), ((10156, 10188), 'numpy.where', 'np.where', (['self.is_valid_response'], {}), '(self.is_valid_response)\n', (10164, 10188), True, 'import numpy as np\n'), ((11649, 11699), 'numpy.arange', 'np.arange', (['knots_idx_start', 'cutoff', 'knots_distance'], {}), '(knots_idx_start, cutoff, knots_distance)\n', (11658, 11699), True, 'import numpy as np\n'), ((12016, 12058), 'numpy.arange', 'np.arange', (['(1)', '(self.num_of_observations + 1)'], {}), '(1, self.num_of_observations + 1)\n', (12025, 12058), True, 'import numpy as np\n'), ((13033, 13163), 'pandas.to_datetime', 'pd.to_datetime', (['[x for x in self._level_knot_dates if x <= df[self.date_col].values[-1] and\n x >= df[self.date_col].values[0]]'], {}), '([x for x in self._level_knot_dates if x <= df[self.date_col]\n .values[-1] and x >= df[self.date_col].values[0]])\n', (13047, 13163), True, 'import pandas as pd\n'), ((17119, 17163), 'numpy.arange', 'np.arange', (['(start + 1)', '(start + output_len + 1)'], {}), '(start + 1, start + output_len + 1)\n', (17128, 17163), True, 'import numpy as np\n'), ((21452, 21574), 'numpy.squeeze', 'np.squeeze', (['self._aggregate_posteriors[PredictMethod.MAP.value][constants.\n BaseSamplingParameters.LEVEL_KNOT.value]', '(0)'], {}), '(self._aggregate_posteriors[PredictMethod.MAP.value][constants.\n BaseSamplingParameters.LEVEL_KNOT.value], 0)\n', (21462, 21574), True, 'import numpy as np\n'), ((21907, 22024), 'numpy.squeeze', 'np.squeeze', (['self._aggregate_posteriors[PredictMethod.MAP.value][constants.\n BaseSamplingParameters.LEVEL.value]', '(0)'], {}), '(self._aggregate_posteriors[PredictMethod.MAP.value][constants.\n BaseSamplingParameters.LEVEL.value], 0)\n', (21917, 22024), True, 'import numpy as np\n'), ((23882, 23892), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23890, 23892), True, 'import matplotlib.pyplot as plt\n'), ((23919, 23930), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (23928, 23930), True, 'import matplotlib.pyplot as plt\n'), ((11369, 11392), 'numpy.timedelta64', 'np.timedelta64', (['(1)', 'freq'], {}), '(1, freq)\n', (11383, 11392), True, 'import numpy as np\n'), ((12558, 12599), 'math.ceil', 'math.ceil', (['(self._cutoff / number_of_knots)'], {}), '(self._cutoff / number_of_knots)\n', (12567, 12599), False, 'import math\n'), ((14366, 14407), 'math.ceil', 'math.ceil', (['(self._cutoff / number_of_knots)'], {}), '(self._cutoff / number_of_knots)\n', (14375, 14407), False, 'import math\n'), ((17956, 18013), 'numpy.concatenate', 'np.concatenate', (['[self.knots_tp_level, knots_tp_level_out]'], {}), '([self.knots_tp_level, knots_tp_level_out])\n', (17970, 18013), True, 'import numpy as np\n'), ((18406, 18457), 'numpy.concatenate', 'np.concatenate', (['[lev_knot_in, lev_knot_out]'], {'axis': '(1)'}), '([lev_knot_in, lev_knot_out], axis=1)\n', (18420, 18457), True, 'import numpy as np\n'), ((11805, 11846), 'numpy.arange', 'np.arange', (['(cutoff - 1)', '(0)', '(-knots_distance)'], {}), '(cutoff - 1, 0, -knots_distance)\n', (11814, 11846), True, 'import numpy as np\n'), ((13443, 13475), 'pandas.infer_freq', 'pd.infer_freq', (['df[self.date_col]'], {}), '(df[self.date_col])\n', (13456, 13475), True, 'import pandas as pd\n'), ((17753, 17775), 'numpy.array', 'np.array', (['[new_tp[-1]]'], {}), '([new_tp[-1]])\n', (17761, 17775), True, 'import numpy as np\n'), ((17839, 17918), 'numpy.arange', 'np.arange', (['(self.knots_tp_level[-1] + lev_knot_width)', 'new_tp[-1]', 'lev_knot_width'], {}), '(self.knots_tp_level[-1] + lev_knot_width, new_tp[-1], lev_knot_width)\n', (17848, 17918), True, 'import numpy as np\n'), ((9361, 9386), 'numpy.max', 'np.max', (['self._seasonality'], {}), '(self._seasonality)\n', (9367, 9386), True, 'import numpy as np\n'), ((9823, 9848), 'numpy.max', 'np.max', (['self._seasonality'], {}), '(self._seasonality)\n', (9829, 9848), True, 'import numpy as np\n')] |
# Graph convolution layer test
# requires Tensorflow 1.14.0, Keras 2.2.4
#
import numpy as np
import pandas as pd
import keras.backend as K
from keras.layers import Layer, Dense, Activation, LSTM
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
class gconv_lstm(Layer):
def __init__(self, dim1, dim2, units, Adj, batch_size,**kwargs):
super(gconv_lstm, self).__init__(**kwargs)
self.dim1 = dim1
self.dim2 = dim2
self.units = units
self.batch_size = batch_size
self.Adj = K.variable(Adj)
def build(self, input_shape):
self.W = self.add_weight(shape=(self.units,self.dim2),initializer='uniform', trainable=True, name='weight')
self.b = self.add_weight(shape=(self.units,), initializer='zeros', trainable=True, name='bias')
self.trainable_weights = [self.W,self.b]
super(gconv_lstm, self).build(input_shape)
def compute_output_shape(self,input_shape):
return (self.batch_size,self.dim1,self.dim2)
def call(self, x):
tensor1=[]
for i in range(self.units):
w0 = K.tf.multiply(self.Adj,self.W[i])
w0 = K.transpose(w0)
tensor1.append(K.dot(x,w0)+self.b[i])
tensor1=K.stack(tensor1,axis=3)
tensor1 = K.tf.reduce_mean(tensor1, axis=3)
return tensor1
#load input data
X_train=np.load('inp_test.npy')
y_train=np.load('out_test.npy')
# load neighbourhood matrix
matA0 = pd.read_csv('neighbourhood.csv', header=None)
matAdj = matA0.values
#build model
model_GL=Sequential()
model_GL.add(BatchNormalization())
model_GL.add(gconv_lstm(16,35,20,matAdj,10)) # timesteps,input sources,units,adjoint matrix, batch size
model_GL.add(Activation("relu"))
model_GL.add(LSTM(5, activation='relu', input_shape=(16, 35)))
model_GL.add(Dense(16))
model_GL.add(Activation("relu"))
model_GL.add(Dense(3))
model_GL.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae'])
# training:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
EPOCHS = 100
BS = 10
H = model_GL.fit(X_train[0:500], y_train[0:500], validation_data=(X_train[501:], y_train[501:]), batch_size=BS, epochs=EPOCHS, verbose=1)
print("Minimum Validation Loss:",min(H.history['val_loss']))
K.clear_session()
| [
"keras.backend.stack",
"keras.backend.dot",
"numpy.load",
"keras.layers.Activation",
"pandas.read_csv",
"keras.layers.LSTM",
"keras.layers.Dense",
"keras.backend.transpose",
"keras.backend.tf.multiply",
"keras.models.Sequential",
"keras.backend.tf.reduce_mean",
"keras.backend.variable",
"ker... | [((1443, 1466), 'numpy.load', 'np.load', (['"""inp_test.npy"""'], {}), "('inp_test.npy')\n", (1450, 1466), True, 'import numpy as np\n'), ((1476, 1499), 'numpy.load', 'np.load', (['"""out_test.npy"""'], {}), "('out_test.npy')\n", (1483, 1499), True, 'import numpy as np\n'), ((1540, 1585), 'pandas.read_csv', 'pd.read_csv', (['"""neighbourhood.csv"""'], {'header': 'None'}), "('neighbourhood.csv', header=None)\n", (1551, 1585), True, 'import pandas as pd\n'), ((1635, 1647), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1645, 1647), False, 'from keras.models import Sequential\n'), ((2359, 2376), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (2374, 2376), True, 'import keras.backend as K\n'), ((1662, 1682), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1680, 1682), False, 'from keras.layers.normalization import BatchNormalization\n'), ((1804, 1822), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1814, 1822), False, 'from keras.layers import Layer, Dense, Activation, LSTM\n'), ((1838, 1886), 'keras.layers.LSTM', 'LSTM', (['(5)'], {'activation': '"""relu"""', 'input_shape': '(16, 35)'}), "(5, activation='relu', input_shape=(16, 35))\n", (1842, 1886), False, 'from keras.layers import Layer, Dense, Activation, LSTM\n'), ((1902, 1911), 'keras.layers.Dense', 'Dense', (['(16)'], {}), '(16)\n', (1907, 1911), False, 'from keras.layers import Layer, Dense, Activation, LSTM\n'), ((1927, 1945), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1937, 1945), False, 'from keras.layers import Layer, Dense, Activation, LSTM\n'), ((1961, 1969), 'keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (1966, 1969), False, 'from keras.layers import Layer, Dense, Activation, LSTM\n'), ((586, 601), 'keras.backend.variable', 'K.variable', (['Adj'], {}), '(Adj)\n', (596, 601), True, 'import keras.backend as K\n'), ((1309, 1333), 'keras.backend.stack', 'K.stack', (['tensor1'], {'axis': '(3)'}), '(tensor1, axis=3)\n', (1316, 1333), True, 'import keras.backend as K\n'), ((1352, 1385), 'keras.backend.tf.reduce_mean', 'K.tf.reduce_mean', (['tensor1'], {'axis': '(3)'}), '(tensor1, axis=3)\n', (1368, 1385), True, 'import keras.backend as K\n'), ((1173, 1207), 'keras.backend.tf.multiply', 'K.tf.multiply', (['self.Adj', 'self.W[i]'], {}), '(self.Adj, self.W[i])\n', (1186, 1207), True, 'import keras.backend as K\n'), ((1225, 1240), 'keras.backend.transpose', 'K.transpose', (['w0'], {}), '(w0)\n', (1236, 1240), True, 'import keras.backend as K\n'), ((1269, 1281), 'keras.backend.dot', 'K.dot', (['x', 'w0'], {}), '(x, w0)\n', (1274, 1281), True, 'import keras.backend as K\n')] |
"""
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
DOF control methods example
---------------------------
An example that demonstrates various DOF control methods:
- Load cartpole asset from an urdf
- Get/set DOF properties
- Set DOF position and velocity targets
- Get DOF positions
- Apply DOF efforts
"""
import math
from isaacgym import gymapi
from isaacgym import gymutil
from isaacgym import gymtorch
import yaml
import numpy as np
# For Generating Observation Like RL script
import torch
from isaacgym.torch_utils import *
from isaacgymenvs.utils.torch_jit_utils import *
import time
import matplotlib.pyplot as plt
import numpy as np
def lerp(val1, val2, ratio):
if(ratio>1):
ratio = 1
if(ratio<0):
ratio = 0
return (1-ratio)*val1 + (ratio)*val2
# load configuration data
with open("../../training/cfg/task/CubeBot.yaml", "r") as cfg:
try:
cfg = yaml.safe_load(cfg)
except yaml.YAMLError as exc:
print(exc)
# initialize gym
gym = gymapi.acquire_gym()
# parse arguments
args = gymutil.parse_arguments(description="Joint control Methods Example")
# create a simulator
sim_params = gymapi.SimParams()
sim_params.substeps = 2
sim_params.dt = 1.0 / 60.0
sim_params.physx.solver_type = 1
sim_params.physx.num_position_iterations = 4
sim_params.physx.num_velocity_iterations = 1
sim_params.physx.num_threads = args.num_threads
sim_params.physx.use_gpu = args.use_gpu
sim_params.use_gpu_pipeline = False
if args.use_gpu_pipeline:
print("WARNING: Forcing CPU pipeline.")
sim_params.up_axis = gymapi.UP_AXIS_Z
sim_params.gravity.x = 0
sim_params.gravity.y = 0
sim_params.gravity.z = -9.81
sim = gym.create_sim(args.compute_device_id, args.graphics_device_id, args.physics_engine, sim_params)
if sim is None:
print("*** Failed to create sim")
quit()
# create viewer using the default camera properties
viewer = gym.create_viewer(sim, gymapi.CameraProperties())
if viewer is None:
raise ValueError('*** Failed to create viewer')
# add ground plane
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
gym.add_ground(sim, plane_params)
# set up the env grid
num_envs = 1
spacing = 1.5
env_lower = gymapi.Vec3(-spacing, 0.0, -spacing)
env_upper = gymapi.Vec3(spacing, 0.0, spacing)
# add cartpole urdf asset
asset_root = "../../assets"
asset_file = "urdf/CubeBot.urdf"
# Load asset with default control type of position for all joints
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = False
asset_options.angular_damping = cfg["env"]["angularDamping"]
asset_options.max_angular_velocity = cfg["env"]["angularVelocity"]
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_VEL
print("Loading asset '%s' from '%s'" % (asset_file, asset_root))
cubebot_asset = gym.load_asset(sim, asset_root, asset_file, asset_options)
# initial root pose for cartpole actors
initial_pose = gymapi.Transform()
initial_pose.p = gymapi.Vec3(0.0, 0.0, 1.0)
initial_pose.r = gymapi.Quat(0, 0.0, 0.0, 1.0)
start_rotation = torch.tensor([initial_pose.r.x, initial_pose.r.y, initial_pose.r.z, initial_pose.r.w])
# Create environment 0
# Cart held steady using position target mode.
# Pole held at a 45 degree angle using position target mode.
env0 = gym.create_env(sim, env_lower, env_upper, 2)
cubebot0 = gym.create_actor(env0, cubebot_asset, initial_pose, 'CubeBot', 0, 1)
# Configure DOF properties
props = gym.get_actor_dof_properties(env0, cubebot0)
props["driveMode"][:] = gymapi.DOF_MODE_VEL
props["stiffness"] = cfg["env"]["stiffness"]
props['damping'][:] = cfg["env"]["damping"]
props['velocity'][:] = cfg["env"]["maxSpeed"]
props['effort'][:] = cfg["env"]["maxTorque"]
props['friction'][:] = cfg["env"]["friction"]
gym.set_actor_dof_properties(env0, cubebot0, props)
# Set DOF drive targets
dof_dict = gym.get_actor_dof_dict(env0, cubebot0)
dof_keys = list(dof_dict.keys())
dof_handles = []
for key in dof_keys:
dof_handles.append(gym.find_actor_dof_handle(env0, cubebot0, key))
# targets = torch.tensor([1000, 0, 0, 0, 0, 0])
# gym.set_dof_velocity_target_tensor(env0, gymtorch.unwrap_tensor(targets))
# Look at the first env
cam_pos = gymapi.Vec3(8, 4, 1.5)
cam_target = gymapi.Vec3(0, 2, 1.5)
gym.viewer_camera_look_at(viewer, None, cam_pos, cam_target)
# Some Variables to control whats happening
loop_count = 1
control_idx = 0
target_speed = 0
pair_idx = 0
update_period = 100
obs_buf = np.zeros(19)
actor_root_state = gym.acquire_actor_root_state_tensor(sim)
root_states = gymtorch.wrap_tensor(actor_root_state)
print(root_states)
root_pos = root_states.view(1, 13)[0, 0:3] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
root_ori = root_states.view(1, 13)[0, 3:7] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
root_linvel = root_states.view(1, 13)[0, 7:10] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
root_angvel = root_states.view(1, 13)[0, 10:13] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
dof_state_tensor = gym.acquire_dof_state_tensor(sim)
num_dof = 6
dof_state = gymtorch.wrap_tensor(dof_state_tensor)
print(dof_state)
dof_pos = dof_state.view(num_dof, 2)[:, 0]
dof_vel = dof_state.view(num_dof, 2)[:, 1]
# Measurements for rewards
potentials = torch.zeros((3), device='cpu')
up_axis_idx = 2 # Set z to up so this should be 2
up_vec = to_torch(get_axis_params(1., up_axis_idx), device='cpu').repeat((num_envs, 1))
heading_vec = to_torch([0, 1, 0], device='cpu').repeat((num_envs, 1))
inv_start_rot = quat_conjugate(start_rotation).repeat((num_envs, 1))
basis_vec0 = heading_vec.clone()
basis_vec1 = up_vec.clone()
# Simulate
task='force'
plot_linvel = []
plot_angvel = []
plot_linvel_loc = []
plot_angvel_loc = []
while not gym.query_viewer_has_closed(viewer):
# step the physics
gym.simulate(sim)
gym.fetch_results(sim, True)
# update the viewer
gym.step_graphics(sim)
gym.draw_viewer(viewer, sim, True)
if(task == 'force'):
if(loop_count % update_period == 0):
forces = torch.zeros((21, 3), device='cpu', dtype=torch.float)
torques = torch.zeros((21, 3), device='cpu', dtype=torch.float)
forces[0, 2] = 250
torques[0, 2] = 10
if(control_idx == 0):
forces[0, 0] = 250
if(control_idx == 1):
forces[0, 1] = 250
if(control_idx == 2):
forces[0, 0] = -250
if(control_idx == 3):
forces[0, 1] = -250
control_idx += 1
if(control_idx > 3):
control_idx = 0
# Some Plotting
f, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1)
plt_array = np.array(plot_linvel_loc)
ax1.plot(plt_array[:,0], "-r", label="loc_vel x")
ax1.plot(plt_array[:,1], "-g", label="loc_vel y")
ax1.plot(plt_array[:,2], "-b", label="loc_vel z")
ax1.legend(loc="upper right")
ax1.set_title('plot_linvel_loc')
plt_array = np.array(plot_angvel_loc)
ax2.plot(plt_array[:,0], "-r", label="loc_vel x")
ax2.plot(plt_array[:,1], "-g", label="loc_vel y")
ax2.plot(plt_array[:,2], "-b", label="loc_vel z")
ax2.legend(loc="upper right")
ax2.set_title('plot_angvel_loc')
plt_array = np.array(plot_linvel)
ax3.plot(plt_array[:,0], "-r", label="loc_vel x")
ax3.plot(plt_array[:,1], "-g", label="loc_vel y")
ax3.plot(plt_array[:,2], "-b", label="loc_vel z")
ax3.legend(loc="upper right")
ax3.set_title('plot_linvel')
plt_array = np.array(plot_angvel)
ax4.plot(plt_array[:,0], "-r", label="loc_vel x")
ax4.plot(plt_array[:,1], "-g", label="loc_vel y")
ax4.plot(plt_array[:,2], "-b", label="loc_vel z")
ax4.legend(loc="upper right")
ax4.set_title('plot_angvel')
plt.show()
time.sleep(10)
plot_linvel_loc = []
plot_angvel_loc = []
plot_linvel = []
plot_angvel = []
gym.apply_rigid_body_force_tensors(sim, gymtorch.unwrap_tensor(forces), gymtorch.unwrap_tensor(torques), gymapi.ENV_SPACE)
loop_count += 1
if(task == 'step'):
# Every 100 steps, incriment the control_idx variable
if(loop_count % update_period == 0):
control_idx += 1
if(control_idx>1):
control_idx = 0
if(control_idx == 0):
target_speed = lerp(0, -cfg["env"]["maxSpeed"], (loop_count % update_period)/update_period)
if(control_idx == 1):
target_speed = 0
# Set the DOF target velocities
gym.set_dof_target_velocity(env0, dof_handles[2*pair_idx], target_speed)
gym.set_dof_target_velocity(env0, dof_handles[2*pair_idx+1], target_speed)
loop_count += 1
gym.refresh_actor_root_state_tensor(sim)
gym.refresh_dof_state_tensor(sim)
goal_pos = torch.zeros((num_envs, 3))
goal_pos[:, 1] = 100
to_target = goal_pos - root_pos
to_target[:, 2] = 0.0
prev_potentials= potentials.clone()
potentials = -torch.norm(to_target, p=2, dim=-1) / sim_params.dt
# print('!!!!')
# print(root_ori.repeat((num_envs, 1)))
# print(inv_start_rot)
# print(to_target)
# print(basis_vec0)
# print(basis_vec1)
# print('!!!!')
torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up(
root_ori.repeat((num_envs, 1)), inv_start_rot, to_target, basis_vec0, basis_vec1, 2)
# print('!!!!')
# print(torso_quat)
# print(root_linvel.repeat((num_envs, 1)))
# print(root_angvel.repeat((num_envs, 1)))
# print(goal_pos)
# print(root_pos.repeat((num_envs, 1)))
# print('!!!!')
vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot(
torso_quat, root_linvel.repeat((num_envs, 1)), root_angvel.repeat((num_envs, 1)), goal_pos, root_pos.repeat((num_envs, 1)))
dof_vel_scaled = dof_vel[0:6]/cfg["env"]["maxSpeed"]
actions = torch.zeros((6))
# print('vel_loc : {}'.format(vel_loc))
# print('angvel_loc : {}'.format(angvel_loc))
# print('yaw.unsqueeze(-1) : {}'.format(yaw.unsqueeze(-1)))
# print('roll.unsqueeze(-1) : {}'.format(roll.unsqueeze(-1)))
# print('pitch.unsqueeze(-1) : {}'.format(pitch.unsqueeze(-1)))
# print('angle_to_target.unsqueeze(-1) : {}'.format(angle_to_target.unsqueeze(-1)))
# print('up_proj.unsqueeze(-1) : {}'.format(up_proj.unsqueeze(-1)))
# print('heading_proj.unsqueeze(-1) : {}'.format(heading_proj.unsqueeze(-1)))
# print('dof_vel_scaled.repeat((num_envs, 1)) : {}'.format(dof_vel_scaled.repeat((num_envs, 1))))
# print('actions.repeat((num_envs, 1)) : {}'.format(actions.repeat((num_envs, 1))))
plot_linvel_loc.append([vel_loc[0,0], vel_loc[0,1], vel_loc[0,2]])
plot_angvel_loc.append([angvel_loc[0,0], angvel_loc[0,1], angvel_loc[0,2]])
plot_linvel.append([root_linvel[0].clone(), root_linvel[1].clone(), root_linvel[2].clone()])
plot_angvel.append([root_angvel[0].clone(), root_angvel[1].clone(), root_angvel[2].clone()])
obs = torch.cat((root_pos, root_ori, vel_loc, angvel_loc,
angle_to_target.unsqueeze(-1), up_proj.unsqueeze(-1), heading_proj.unsqueeze(-1),
dof_vel_scaled.repeat((num_envs, 1)), actions.repeat((num_envs, 1))), dim=-1)
# Wait for dt to elapse in real time.
# This synchronizes the physics simulation with the rendering rate.
gym.sync_frame_time(sim)
print('Done')
gym.destroy_viewer(viewer)
gym.destroy_sim(sim)
| [
"isaacgym.gymapi.CameraProperties",
"yaml.safe_load",
"isaacgym.gymapi.Quat",
"torch.zeros",
"matplotlib.pyplot.subplots",
"isaacgym.gymapi.AssetOptions",
"isaacgym.gymapi.Vec3",
"isaacgym.gymapi.SimParams",
"matplotlib.pyplot.show",
"isaacgym.gymapi.acquire_gym",
"torch.norm",
"time.sleep",
... | [((1363, 1383), 'isaacgym.gymapi.acquire_gym', 'gymapi.acquire_gym', ([], {}), '()\n', (1381, 1383), False, 'from isaacgym import gymapi\n'), ((1410, 1478), 'isaacgym.gymutil.parse_arguments', 'gymutil.parse_arguments', ([], {'description': '"""Joint control Methods Example"""'}), "(description='Joint control Methods Example')\n", (1433, 1478), False, 'from isaacgym import gymutil\n'), ((1514, 1532), 'isaacgym.gymapi.SimParams', 'gymapi.SimParams', ([], {}), '()\n', (1530, 1532), False, 'from isaacgym import gymapi\n'), ((2410, 2430), 'isaacgym.gymapi.PlaneParams', 'gymapi.PlaneParams', ([], {}), '()\n', (2428, 2430), False, 'from isaacgym import gymapi\n'), ((2453, 2479), 'isaacgym.gymapi.Vec3', 'gymapi.Vec3', (['(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 1.0)\n', (2464, 2479), False, 'from isaacgym import gymapi\n'), ((2576, 2612), 'isaacgym.gymapi.Vec3', 'gymapi.Vec3', (['(-spacing)', '(0.0)', '(-spacing)'], {}), '(-spacing, 0.0, -spacing)\n', (2587, 2612), False, 'from isaacgym import gymapi\n'), ((2625, 2659), 'isaacgym.gymapi.Vec3', 'gymapi.Vec3', (['spacing', '(0.0)', 'spacing'], {}), '(spacing, 0.0, spacing)\n', (2636, 2659), False, 'from isaacgym import gymapi\n'), ((2831, 2852), 'isaacgym.gymapi.AssetOptions', 'gymapi.AssetOptions', ([], {}), '()\n', (2850, 2852), False, 'from isaacgym import gymapi\n'), ((3272, 3290), 'isaacgym.gymapi.Transform', 'gymapi.Transform', ([], {}), '()\n', (3288, 3290), False, 'from isaacgym import gymapi\n'), ((3308, 3334), 'isaacgym.gymapi.Vec3', 'gymapi.Vec3', (['(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 1.0)\n', (3319, 3334), False, 'from isaacgym import gymapi\n'), ((3352, 3381), 'isaacgym.gymapi.Quat', 'gymapi.Quat', (['(0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0, 0.0, 0.0, 1.0)\n', (3363, 3381), False, 'from isaacgym import gymapi\n'), ((3399, 3489), 'torch.tensor', 'torch.tensor', (['[initial_pose.r.x, initial_pose.r.y, initial_pose.r.z, initial_pose.r.w]'], {}), '([initial_pose.r.x, initial_pose.r.y, initial_pose.r.z,\n initial_pose.r.w])\n', (3411, 3489), False, 'import torch\n'), ((4529, 4551), 'isaacgym.gymapi.Vec3', 'gymapi.Vec3', (['(8)', '(4)', '(1.5)'], {}), '(8, 4, 1.5)\n', (4540, 4551), False, 'from isaacgym import gymapi\n'), ((4565, 4587), 'isaacgym.gymapi.Vec3', 'gymapi.Vec3', (['(0)', '(2)', '(1.5)'], {}), '(0, 2, 1.5)\n', (4576, 4587), False, 'from isaacgym import gymapi\n'), ((4786, 4798), 'numpy.zeros', 'np.zeros', (['(19)'], {}), '(19)\n', (4794, 4798), True, 'import numpy as np\n'), ((4873, 4911), 'isaacgym.gymtorch.wrap_tensor', 'gymtorch.wrap_tensor', (['actor_root_state'], {}), '(actor_root_state)\n', (4893, 4911), False, 'from isaacgym import gymtorch\n'), ((5374, 5412), 'isaacgym.gymtorch.wrap_tensor', 'gymtorch.wrap_tensor', (['dof_state_tensor'], {}), '(dof_state_tensor)\n', (5394, 5412), False, 'from isaacgym import gymtorch\n'), ((5557, 5585), 'torch.zeros', 'torch.zeros', (['(3)'], {'device': '"""cpu"""'}), "(3, device='cpu')\n", (5568, 5585), False, 'import torch\n'), ((2277, 2302), 'isaacgym.gymapi.CameraProperties', 'gymapi.CameraProperties', ([], {}), '()\n', (2300, 2302), False, 'from isaacgym import gymapi\n'), ((9509, 9535), 'torch.zeros', 'torch.zeros', (['(num_envs, 3)'], {}), '((num_envs, 3))\n', (9520, 9535), False, 'import torch\n'), ((10606, 10620), 'torch.zeros', 'torch.zeros', (['(6)'], {}), '(6)\n', (10617, 10620), False, 'import torch\n'), ((1266, 1285), 'yaml.safe_load', 'yaml.safe_load', (['cfg'], {}), '(cfg)\n', (1280, 1285), False, 'import yaml\n'), ((6339, 6392), 'torch.zeros', 'torch.zeros', (['(21, 3)'], {'device': '"""cpu"""', 'dtype': 'torch.float'}), "((21, 3), device='cpu', dtype=torch.float)\n", (6350, 6392), False, 'import torch\n'), ((6415, 6468), 'torch.zeros', 'torch.zeros', (['(21, 3)'], {'device': '"""cpu"""', 'dtype': 'torch.float'}), "((21, 3), device='cpu', dtype=torch.float)\n", (6426, 6468), False, 'import torch\n'), ((9682, 9716), 'torch.norm', 'torch.norm', (['to_target'], {'p': '(2)', 'dim': '(-1)'}), '(to_target, p=2, dim=-1)\n', (9692, 9716), False, 'import torch\n'), ((6990, 7008), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {}), '(4, 1)\n', (7002, 7008), True, 'import matplotlib.pyplot as plt\n'), ((7037, 7062), 'numpy.array', 'np.array', (['plot_linvel_loc'], {}), '(plot_linvel_loc)\n', (7045, 7062), True, 'import numpy as np\n'), ((7385, 7410), 'numpy.array', 'np.array', (['plot_angvel_loc'], {}), '(plot_angvel_loc)\n', (7393, 7410), True, 'import numpy as np\n'), ((7733, 7754), 'numpy.array', 'np.array', (['plot_linvel'], {}), '(plot_linvel)\n', (7741, 7754), True, 'import numpy as np\n'), ((8073, 8094), 'numpy.array', 'np.array', (['plot_angvel'], {}), '(plot_angvel)\n', (8081, 8094), True, 'import numpy as np\n'), ((8400, 8410), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8408, 8410), True, 'import matplotlib.pyplot as plt\n'), ((8427, 8441), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (8437, 8441), False, 'import time\n'), ((8635, 8665), 'isaacgym.gymtorch.unwrap_tensor', 'gymtorch.unwrap_tensor', (['forces'], {}), '(forces)\n', (8657, 8665), False, 'from isaacgym import gymtorch\n'), ((8667, 8698), 'isaacgym.gymtorch.unwrap_tensor', 'gymtorch.unwrap_tensor', (['torques'], {}), '(torques)\n', (8689, 8698), False, 'from isaacgym import gymtorch\n')] |
import warnings
from copy import copy
import numpy as np
import pandas as pd
import scipy
from pandas.core.common import SettingWithCopyWarning
from scipy.sparse import csr_matrix
from scipy.stats import hmean, fisher_exact, rankdata, norm
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RidgeClassifierCV, LassoCV
from scattertext.CSRMatrixTools import delete_columns, CSRMatrixFactory
from scattertext.Common import DEFAULT_BETA, DEFAULT_SCALER_ALGO, DEFAULT_BACKGROUND_SCALER_ALGO, \
DEFAULT_BACKGROUND_BETA
from scattertext.TermDocMatrixWithoutCategories import TermDocMatrixWithoutCategories
from scattertext.indexstore import IndexStore, IndexStoreFromList
from scattertext.termscoring.CornerScore import CornerScore
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
from scattertext.termscoring.ScaledFScore import InvalidScalerException, ScaledFScore
class CannotCreateATermDocMatrixWithASignleCategoryException(Exception):
pass
class TermDocMatrix(TermDocMatrixWithoutCategories):
'''
!!! to do: refactor score functions into classes
'''
def __init__(self,
X,
mX,
y,
term_idx_store,
category_idx_store,
metadata_idx_store,
unigram_frequency_path=None):
'''
Parameters
----------
X : csr_matrix
term document matrix
mX : csr_matrix
metadata-document matrix
y : np.array
category index array
term_idx_store : IndexStore
Term indices
category_idx_store : IndexStore
Catgory indices
metadata_idx : IndexStore
Document metadata indices
unigram_frequency_path : str or None
Path to term frequency file.
'''
if all(y == y[0]):
raise CannotCreateATermDocMatrixWithASignleCategoryException(
'Documents must be labeled with more than one category. All documents were labeled '
'with category: "' + str(category_idx_store.getval(y[0])) + '"')
TermDocMatrixWithoutCategories.__init__(self, X=X, mX=mX, term_idx_store=term_idx_store,
metadata_idx_store=metadata_idx_store,
unigram_frequency_path=unigram_frequency_path)
self._y = y
self._category_idx_store = category_idx_store
def get_categories(self):
'''
Returns
-------
list
Category names
'''
return self._category_idx_store.values()
def old_get_term_freq_df(self):
d = {'term': self._term_idx_store._i2val}
for i, category in self._category_idx_store.items():
d[category + ' freq'] = self._X[self._y == i].sum(axis=0).A1
return pd.DataFrame(d).set_index('term')
def get_term_freq_df(self, label_append=' freq'):
'''
Parameters
-------
label_append : str
Returns
-------
pd.DataFrame indexed on terms, with columns giving frequencies for each
'''
'''
row = self._row_category_ids()
newX = csr_matrix((self._X.data, (row, self._X.indices)))
return self._term_freq_df_from_matrix(newX)
'''
mat = self.get_term_freq_mat()
return pd.DataFrame(mat,
index=pd.Series(self.get_terms(), name='term'),
columns=[str(c) + label_append for c in self.get_categories()])
def get_term_freq_mat(self):
'''
Returns
-------
np.array with columns as categories and rows as terms
'''
freq_mat = np.zeros(shape=(self.get_num_terms(), self.get_num_categories()),
dtype=self.get_term_doc_mat().dtype)
for cat_i in range(self.get_num_categories()):
freq_mat[:, cat_i] = self._X[self._y == cat_i, :].sum(axis=0)
return freq_mat
def get_term_count_mat(self):
'''
Returns
-------
np.array with columns as categories and rows as terms
'''
freq_mat = np.zeros(shape=(self.get_num_terms(), self.get_num_categories()),
dtype=self.get_term_doc_mat().dtype)
for cat_i in range(self.get_num_categories()):
X = (self._X[self._y == cat_i, :] > 0).astype(int)
freq_mat[:, cat_i] = X.sum(axis=0)
return freq_mat
def get_metadata_count_mat(self):
'''
Returns
-------
np.array with columns as categories and rows as terms
'''
freq_mat = np.zeros(shape=(self.get_num_metadata(), self.get_num_categories()),
dtype=self.get_metadata_doc_mat().dtype)
for cat_i in range(self.get_num_categories()):
mX = (self._mX[self._y == cat_i, :] > 0).astype(int)
freq_mat[:, cat_i] = mX.sum(axis=0)
return freq_mat
def get_term_doc_count_df(self, label_append=' freq'):
'''
Returns
-------
pd.DataFrame indexed on terms, with columns the number of documents each term appeared in
each category
'''
# row = self._row_category_ids()
# newX = csr_matrix(((self._X.data > 0).astype(int), (row, self._X.indices)))
# return self._ term_freq_df_from_matrix(newX)
mat = self.get_term_count_mat()
return pd.DataFrame(mat,
index=self.get_terms(),
columns=[str(c) + label_append for c in self.get_categories()])
def get_metadata_doc_count_df(self, label_append=' freq'):
'''
Returns
-------
pd.DataFrame indexed on metadata, with columns the number of documents
each metadata appeared in each category
'''
mat = self.get_metadata_count_mat()
return pd.DataFrame(mat,
index=self.get_metadata(),
columns=[str(c) + label_append for c in self.get_categories()])
def _term_freq_df_from_matrix(self, catX, label_append=' freq'):
return self._get_freq_df_using_idx_store(catX, self._term_idx_store, label_append=label_append)
def _get_freq_df_using_idx_store(self, catX, idx_store, label_append=' freq'):
d = {'term': idx_store._i2val}
for idx, cat in self._category_idx_store.items():
try:
d[str(cat) + label_append] = catX[idx, :].A[0]
except IndexError:
self._fix_problem_when_final_category_index_has_no_terms(cat, catX, d, label_append)
return pd.DataFrame(d).set_index('term')
def _fix_problem_when_final_category_index_has_no_terms(self, cat, catX, d, label_append=' freq'):
d[str(cat) + label_append] = np.zeros(catX.shape[1])
def get_metadata_freq_df(self, label_append=' freq'):
'''
Parameters
-------
label_append : str
Returns
-------
pd.DataFrame indexed on metadata, with columns giving frequencies for each category
'''
'''
row = self._row_category_ids_for_meta()
newX = csr_matrix((self._mX.data, (row, self._mX.indices)))
return self._metadata_freq_df_from_matrix(newX, label_append)
'''
freq_mat = np.zeros(shape=(self.get_num_metadata(), self.get_num_categories()),
dtype=self.get_metadata_doc_mat().dtype)
for cat_i in range(self.get_num_categories()):
freq_mat[:, cat_i] = self._mX[self._y == cat_i, :].sum(axis=0)
return pd.DataFrame(freq_mat,
index=pd.Series(self.get_metadata(), name='term'),
columns=[str(c) + label_append for c in self.get_categories()])
def _row_category_ids(self):
row = self._X.tocoo().row
for i, cat in enumerate(self._y):
row[row == i] = cat
return row
def _row_category_ids_for_meta(self):
row = self._mX.tocoo().row
for i, cat in enumerate(self._y):
row[row == i] = cat
return row
def _metadata_freq_df_from_matrix(self, catX, label_append=' freq'):
return self._get_freq_df_using_idx_store(catX, self._metadata_idx_store, label_append)
def get_category_names_by_row(self):
'''
Returns
-------
np.array of the category name for each row
'''
return np.array(self.get_categories())[self._y]
def _change_document_type_in_matrix(self, X, new_doc_ids):
new_data = self._make_all_positive_data_ones(X.data)
newX = csr_matrix((new_data, (new_doc_ids, X.indices)))
return newX
def keep_only_these_categories(self, categories, ignore_absences=False):
'''
Non destructive category removal.
Parameters
----------
categories : list
list of categories to keep
ignore_absences : bool, False by default
if categories does not appear, don't raise an error, just move on.
Returns
-------
TermDocMatrix, new object with categories removed.
'''
if not ignore_absences:
assert set(self.get_categories()) & set(categories) == set(categories)
categories_to_remove = [c for c in self.get_categories() if c not in categories]
return self.remove_categories(categories_to_remove)
def remove_categories(self, categories, ignore_absences=False):
'''
Non destructive category removal.
Parameters
----------
categories : list
list of categories to remove
ignore_absences : bool, False by default
if categories does not appear, don't raise an error, just move on.
Returns
-------
TermDocMatrix, new object with categories removed.
'''
idx_to_delete_list = []
existing_categories = set(self.get_categories())
for category in categories:
if category not in existing_categories:
if not ignore_absences:
raise KeyError('Category %s not found' % (category))
continue
idx_to_delete_list.append(self._category_idx_store.getidx(category))
new_category_idx_store = self._category_idx_store.batch_delete_idx(idx_to_delete_list)
columns_to_delete = np.nonzero(np.isin(self._y, idx_to_delete_list))
new_X = delete_columns(self._X.T, columns_to_delete).T
new_mX = delete_columns(self._mX.T, columns_to_delete).T
intermediate_y = self._y[~np.isin(self._y, idx_to_delete_list)]
old_y_to_new_y = [self._category_idx_store.getidx(x)
for x in new_category_idx_store._i2val]
new_y = np.array([old_y_to_new_y.index(i) if i in old_y_to_new_y else None
for i in range(intermediate_y.max() + 1)])[intermediate_y]
new_metadata_idx_store = self._metadata_idx_store
if self.metadata_in_use():
meta_idx_to_delete = np.nonzero(new_mX.sum(axis=0).A1 == 0)[0]
new_metadata_idx_store = self._metadata_idx_store.batch_delete_idx(meta_idx_to_delete)
term_idx_to_delete = np.nonzero(new_X.sum(axis=0).A1 == 0)[0]
new_term_idx_store = self._term_idx_store.batch_delete_idx(term_idx_to_delete)
new_X = delete_columns(new_X, term_idx_to_delete)
term_doc_mat_to_ret = self._make_new_term_doc_matrix(new_X, new_mX,
new_y.astype(int),
new_term_idx_store,
new_category_idx_store, new_metadata_idx_store,
~np.isin(self._y, idx_to_delete_list))
return term_doc_mat_to_ret
def remove_terms_by_indices(self, idx_to_delete_list, non_text=False):
'''
Parameters
----------
idx_to_delete_list, list
non_text, bool
Returns
-------
TermDocMatrix
'''
new_X, new_idx_store = self._get_X_after_delete_terms(idx_to_delete_list, non_text)
return self._make_new_term_doc_matrix(
new_X=self._X if non_text else new_X,
new_mX=new_X if non_text else self._mX,
new_y=self._y,
new_term_idx_store=self._term_idx_store if non_text else new_idx_store,
new_category_idx_store=self._category_idx_store,
new_metadata_idx_store=new_idx_store if non_text else self._metadata_idx_store,
new_y_mask=self._y == self._y
)
def change_category_names(self, new_category_names):
if len(new_category_names) != self.get_num_categories():
raise Exception("The number of category names passed (%s) needs to equal "
"the number of categories in the corpus (%s)." %
(len(new_category_names), self.get_num_categories()))
return self._make_new_term_doc_matrix(
new_category_idx_store=IndexStoreFromList.build(new_category_names)
)
def _make_new_term_doc_matrix(self,
new_X=None,
new_mX=None,
new_y=None,
new_term_idx_store=None,
new_category_idx_store=None,
new_metadata_idx_store=None,
new_y_mask=None):
return TermDocMatrix(X=new_X if new_X is not None else self._X,
mX=new_mX if new_mX is not None else self._mX,
y=new_y if new_y is not None else self._y,
term_idx_store=new_term_idx_store if new_term_idx_store is not None else self._term_idx_store,
category_idx_store=new_category_idx_store if new_category_idx_store is not None else self._category_idx_store,
metadata_idx_store=new_metadata_idx_store if new_metadata_idx_store is not None else self._metadata_idx_store,
unigram_frequency_path=self._unigram_frequency_path)
def get_posterior_mean_ratio_scores(self, category):
''' Computes posterior mean score.
Parameters
----------
category : str
category name to score
Returns
-------
np.array
'''
return self._get_posterior_mean_ratio_from_category(category)
def get_corner_scores(self, category):
''' Computes corner score, which is inversely correlated
to the Rudder score to the nearest upper-left or lower-right corner.
Parameters
----------
category : str
category name to score
Returns
-------
np.array
'''
return CornerScore.get_scores(
*self._get_catetgory_and_non_category_word_counts(category)
)
def get_rudder_scores(self, category):
''' Computes Rudder score.
Parameters
----------
category : str
category name to score
Returns
-------
np.array
'''
category_percentiles = self._get_term_percentiles_in_category(category)
not_category_percentiles = self._get_term_percentiles_not_in_category(category)
rudder_scores = self._get_rudder_scores_for_percentile_pair(category_percentiles,
not_category_percentiles)
return rudder_scores
def _get_posterior_mean_ratio_from_category(self, category):
cat_word_counts, not_cat_word_counts = self._get_catetgory_and_non_category_word_counts(category)
return self._get_posterior_mean_ratio_from_counts(cat_word_counts, not_cat_word_counts)
def _get_posterior_mean_ratio_from_counts(self, cat_word_counts, not_cat_word_counts):
cat_posterior_mean = self._get_posterior_mean_from_counts(cat_word_counts, not_cat_word_counts)
not_cat_posterior_mean = self._get_posterior_mean_from_counts(not_cat_word_counts, cat_word_counts)
return np.log(cat_posterior_mean / not_cat_posterior_mean) / np.log(2)
def _get_posterior_mean_from_counts(self, cat_word_counts, not_cat_word_counts):
a = cat_word_counts
b = cat_word_counts.sum() - cat_word_counts
beta = ((cat_word_counts.sum() + not_cat_word_counts.sum())
/ (cat_word_counts + not_cat_word_counts) - 1)
posterior_mean = (1. + a) / (1. + a + b + beta)
return posterior_mean
def get_logistic_regression_coefs_l2(self, category,
clf=RidgeClassifierCV()):
''' Computes l2-penalized logistic regression score.
Parameters
----------
category : str
category name to score
category : str
category name to score
Returns
-------
(coefficient array, accuracy, majority class baseline accuracy)
'''
try:
from sklearn.cross_validation import cross_val_predict
except:
from sklearn.model_selection import cross_val_predict
y = self._get_mask_from_category(category)
X = TfidfTransformer().fit_transform(self._X)
clf.fit(X, y)
y_hat = cross_val_predict(clf, X, y)
acc, baseline = self._get_accuracy_and_baseline_accuracy(y, y_hat)
return clf.coef_[0], acc, baseline
def _get_accuracy_and_baseline_accuracy(self, y, y_hat):
acc = sum(y_hat == y) * 1. / len(y)
baseline = max([sum(y), len(y) - sum(y)]) * 1. / len(y)
return acc, baseline
def get_logistic_regression_coefs_l1(self, category,
clf=LassoCV(alphas=[0.1, 0.001],
max_iter=10000,
n_jobs=-1)):
''' Computes l1-penalized logistic regression score.
Parameters
----------
category : str
category name to score
Returns
-------
(coefficient array, accuracy, majority class baseline accuracy)
'''
try:
from sklearn.cross_validation import cross_val_predict
except:
from sklearn.model_selection import cross_val_predict
y = self._get_mask_from_category(category)
y_continuous = self._get_continuous_version_boolean_y(y)
# X = TfidfTransformer().fit_transform(self._X)
X = self._X
clf.fit(X, y_continuous)
y_hat = (cross_val_predict(clf, X, y_continuous) > 0)
acc, baseline = self._get_accuracy_and_baseline_accuracy(y, y_hat)
clf.fit(X, y_continuous)
return clf.coef_, acc, baseline
def get_regression_coefs(self, category, clf=ElasticNet()):
''' Computes regression score of tdfidf transformed features
Parameters
----------
category : str
category name to score
clf : sklearn regressor
Returns
-------
coefficient array
'''
self._fit_tfidf_model(category, clf)
return clf.coef_
def get_logreg_coefs(self, category, clf=LogisticRegression()):
''' Computes regression score of tdfidf transformed features
Parameters
----------
category : str
category name to score
clf : sklearn regressor
Returns
-------
coefficient array
'''
self._fit_tfidf_model(category, clf)
return clf.coef_[0]
def _fit_tfidf_model(self, category, clf):
y = self._get_mask_from_category(category)
y_continuous = self._get_continuous_version_boolean_y(y)
X = TfidfTransformer().fit_transform(self._X)
clf.fit(X, y_continuous)
def _get_continuous_version_boolean_y(self, y_bool):
return 1000 * (y_bool * 2. - 1)
def get_scaled_f_scores(self,
category,
scaler_algo=DEFAULT_SCALER_ALGO,
beta=DEFAULT_BETA):
''' Computes scaled-fscores
Parameters
----------
category : str
category name to score
scaler_algo : str
Function that scales an array to a range \in [0 and 1]. Use 'percentile', 'normcdf'. Default.
beta : float
Beta in (1+B^2) * (Scale(P(w|c)) * Scale(P(c|w)))/(B^2*Scale(P(w|c)) + Scale(P(c|w))). Default.
Returns
-------
np.array of harmonic means of scaled P(word|category) and scaled P(category|word)
'''
assert beta > 0
cat_word_counts, not_cat_word_counts = self._get_catetgory_and_non_category_word_counts(category)
scores = self._get_scaled_f_score_from_counts(cat_word_counts, not_cat_word_counts, scaler_algo, beta)
return np.array(scores)
def _get_scaled_f_score_from_counts(self, cat_word_counts, not_cat_word_counts, scaler_algo,
beta=DEFAULT_BETA):
'''
scaler = self._get_scaler_function(scaler_algo)
p_word_given_category = cat_word_counts.astype(np.float64) / cat_word_counts.sum()
p_category_given_word = cat_word_counts.astype(np.float64) / (cat_word_counts + not_cat_word_counts)
scores \
= self._computer_harmoic_mean_of_probabilities_over_non_zero_in_category_count_terms(
cat_word_counts, p_category_given_word, p_word_given_category, scaler
)
'''
return ScaledFScore.get_scores(cat_word_counts, not_cat_word_counts, scaler_algo, beta=beta)
def _computer_harmoic_mean_of_probabilities_over_non_zero_in_category_count_terms(self,
cat_word_counts,
p_category_given_word,
p_word_given_category,
scaler):
df = pd.DataFrame({
'cat_word_counts': cat_word_counts,
'p_word_given_category': p_word_given_category,
'p_category_given_word': p_category_given_word
})
df_with_count = df[df['cat_word_counts'] > 0]
df_with_count['scale p_word_given_category'] = scaler(df_with_count['p_word_given_category'])
df_with_count['scale p_category_given_word'] = scaler(df_with_count['p_category_given_word'])
df['scale p_word_given_category'] = 0
df.loc[df_with_count.index, 'scale p_word_given_category'] = df_with_count['scale p_word_given_category']
df['scale p_category_given_word'] = 0
df.loc[df_with_count.index, 'scale p_category_given_word'] \
= df_with_count['scale p_category_given_word']
score = hmean([df_with_count['scale p_category_given_word'],
df_with_count['scale p_word_given_category']])
df['score'] = 0
df.loc[df_with_count.index, 'score'] = score
return df['score']
def _get_scaler_function(self, scaler_algo):
scaler = None
if scaler_algo == 'percentile':
scaler = lambda x: rankdata(x).astype(np.float64) / len(x)
elif scaler_algo == 'normcdf':
# scaler = lambda x: ECDF(x[cat_word_counts != 0])(x)
scaler = lambda x: norm.cdf(x, x.mean(), x.std())
elif scaler_algo == 'none':
scaler = lambda x: x
else:
raise InvalidScalerException("Invalid scaler alogrithm. Must be either percentile or normcdf.")
return scaler
def get_fisher_scores(self, category):
cat_word_counts, not_cat_word_counts = self._get_catetgory_and_non_category_word_counts(category)
return self._get_fisher_scores_from_counts(cat_word_counts, not_cat_word_counts)
def get_fisher_scores_vs_background(self):
'''
Returns
-------
pd.DataFrame of fisher scores vs background
'''
df = self.get_term_and_background_counts()
odds_ratio, p_values = self._get_fisher_scores_from_counts(
df['corpus'], df['background'])
df['Odds ratio'] = odds_ratio
df['Bonferroni-corrected p-values'] = p_values * len(df)
df.sort_values(by=['Bonferroni-corrected p-values', 'Odds ratio'],
ascending=[True, False])
return df
def get_posterior_mean_ratio_scores_vs_background(self):
'''
Returns
-------
pd.DataFrame of posterior mean scores vs background
'''
df = self.get_term_and_background_counts()
df['Log Posterior Mean Ratio'] = self._get_posterior_mean_ratio_from_counts(df['corpus'],
df['background'])
return df.sort_values('Log Posterior Mean Ratio', ascending=False)
def _get_catetgory_and_non_category_word_counts(self, category):
self._validate_category(category)
cat_word_counts = self._X[self._get_mask_from_category(category)].sum(axis=0).A1
not_cat_word_counts = self._X[self._y != self._category_idx_store.getidx(category)].sum(axis=0).A1
return cat_word_counts, not_cat_word_counts
def _validate_category(self, category):
if category not in self.get_categories():
raise Exception("Invalid category: %s, valid: %s" % (category, self.get_categories()))
def _get_fisher_scores_from_counts(self, cat_word_counts, not_cat_word_counts):
cat_not_word_counts = cat_word_counts.sum() - cat_word_counts
not_cat_not_word_counts = not_cat_word_counts.sum() - not_cat_word_counts
def do_fisher_exact(x):
return fisher_exact([[x[0], x[1]], [x[2], x[3]]], alternative='greater')
odds_ratio, p_values = np.apply_along_axis(
do_fisher_exact,
0,
np.array([cat_word_counts, cat_not_word_counts, not_cat_word_counts, not_cat_not_word_counts]))
return odds_ratio, p_values
def get_rudder_scores_vs_background(self):
'''
Returns
-------
pd.DataFrame of rudder scores vs background
'''
df = self.get_term_and_background_counts()
corpus_percentiles = self._get_percentiles_from_freqs(df['corpus'])
background_percentiles = self._get_percentiles_from_freqs(df['background'])
df['Rudder'] = (self._get_rudder_scores_for_percentile_pair(corpus_percentiles,
background_percentiles))
df = df.sort_values(by='Rudder', ascending=True)
return df
def _rescale_labels_to_neg_one_pos_one(self, category):
return (self._get_mask_from_category(category)) * 2 - 1
def _get_rudder_scores_for_percentile_pair(self, category_percentiles, not_category_percentiles):
return np.linalg.norm(np.array([1, 0])
- np.array(list(zip(category_percentiles, not_category_percentiles))),
axis=1)
def _get_term_percentiles_in_category(self, category):
mask = self._get_mask_from_category(category)
return self._get_frequency_percentiles(mask)
def _get_mask_from_category(self, category):
return self._y == self._category_idx_store.getidx(category)
def _get_term_percentiles_not_in_category(self, category):
mask = self._y != self._category_idx_store.getidx(category)
return self._get_frequency_percentiles(mask)
def _get_frequency_percentiles(self, mask):
freqs = self._X[mask].sum(axis=0).A1
percentiles = self._get_percentiles_from_freqs(freqs)
return percentiles
def _get_percentiles_from_freqs(self, freqs):
return rankdata(freqs) / len(freqs)
def get_term_category_frequencies(self, scatterchartdata):
'''
Applies the ranker in scatterchartdata to term-category frequencies.
Parameters
----------
scatterchartdata : ScatterChartData
Returns
-------
pd.DataFrame
'''
term_ranker = scatterchartdata.term_ranker(self)
if scatterchartdata.use_non_text_features:
term_ranker.use_non_text_features()
return term_ranker.get_ranks()
def get_category_ids(self):
'''
Returns array of category ids
Returns
-------
np.array
'''
return self._y
def get_category_index_store(self):
'''
Returns IndexStore object mapping categories to ids
Returns
-------
IndexStore
'''
return self._category_idx_store
def recategorize(self, new_categories):
'''
Parameters
----------
new_categories : array like
String names of new categories. Length should be equal to number of documents
Returns
-------
TermDocMatrix
'''
assert len(new_categories) == self.get_num_docs()
new_category_idx_store = IndexStoreFromList.build(set(new_categories))
new_y = np.array(new_category_idx_store.getidxstrictbatch(new_categories))
new_tdm = self._make_new_term_doc_matrix(self._X, self._mX, new_y, self._term_idx_store, new_category_idx_store,
self._metadata_idx_store, new_y == new_y)
return new_tdm
def use_doc_labeled_terms_as_metadata(self, doc_labels, separator='_'):
'''
Makes the metadata of a new TermDocMatrix a copy of the term-document matrix, except each term is prefixed
by its document's label followed by the separator.
:param doc_labels: list[str], should be the same size as the number of documents in the TermDocMatrix.
:param separator: str, default is '_'
:return: self
'''
assert len(doc_labels) == self.get_num_docs()
doc_labels = np.array(doc_labels)
terms_in_corpus = np.array(self._term_idx_store.values())
new_metadata_list = []
new_meta_X = None
ordered_doc_labels = list(sorted(set(doc_labels)))
for doc_label in ordered_doc_labels:
label_doc_mask = doc_labels == doc_label
label_X = self._X[label_doc_mask, :]
label_term_mask = (label_X.sum(axis=0) > 0).A1
label_X = label_X[:, label_term_mask]
cols_to_pad = len(new_metadata_list)
new_metadata_list += [doc_label + separator + term
for term in terms_in_corpus[label_term_mask]]
if new_meta_X is None:
new_meta_X = label_X
else:
label_X_pad = (CSRMatrixFactory()
.set_last_col_idx(cols_to_pad - 1)
.set_last_row_idx(sum(label_doc_mask) - 1)
.get_csr_matrix())
padded_label_X = scipy.sparse.hstack([label_X_pad, label_X])
new_meta_X.resize(new_meta_X.shape[0], padded_label_X.shape[1])
new_meta_X = scipy.sparse.vstack([new_meta_X,
padded_label_X])
new_metadata_idx_store = IndexStoreFromList.build(new_metadata_list)
new_meta_X = new_meta_X.tocsr()
new_mX = (CSRMatrixFactory()
.set_last_col_idx(new_meta_X.shape[1] - 1)
.set_last_row_idx(new_meta_X.shape[0] - 1)
.get_csr_matrix().tolil())
start_row = 0
for doc_label in ordered_doc_labels:
label_doc_mask = doc_labels == doc_label
num_rows = sum(label_doc_mask)
new_mX[label_doc_mask, :] = new_meta_X[start_row:start_row + num_rows, :]
start_row += num_rows
new_mX = new_mX.tocsr()
new_tdm = self._make_new_term_doc_matrix(self._X,
new_mX,
self._y,
self._term_idx_store,
self._category_idx_store,
new_metadata_idx_store,
self._y == self._y)
return new_tdm
def use_categories_as_metadata(self):
'''
Returns a TermDocMatrix which is identical to self except the metadata values are now identical to the
categories present.
:return: TermDocMatrix
'''
new_metadata_factory = CSRMatrixFactory()
for i, category_idx in enumerate(self.get_category_ids()):
new_metadata_factory[i, category_idx] = 1
new_metadata = new_metadata_factory.get_csr_matrix()
new_tdm = self._make_new_term_doc_matrix(self._X,
new_metadata,
self._y,
self._term_idx_store,
self._category_idx_store,
copy(self._category_idx_store),
self._y == self._y)
return new_tdm
def use_categories_as_metadata_and_replace_terms(self):
'''
Returns a TermDocMatrix which is identical to self except the metadata values are now identical to the
categories present and term-doc-matrix is now the metadata matrix.
:return: TermDocMatrix
'''
new_metadata_factory = CSRMatrixFactory()
for i, category_idx in enumerate(self.get_category_ids()):
new_metadata_factory[i, category_idx] = 1
new_metadata = new_metadata_factory.get_csr_matrix()
new_tdm = self._make_new_term_doc_matrix(self._mX,
new_metadata,
self._y,
self._metadata_idx_store,
self._category_idx_store,
copy(self._category_idx_store),
self._y == self._y)
return new_tdm
def get_num_categories(self):
'''
Returns the number of categories in the term document matrix
:return: int
'''
return len(self.get_categories())
| [
"numpy.isin",
"scattertext.CSRMatrixTools.CSRMatrixFactory",
"scattertext.TermDocMatrixWithoutCategories.TermDocMatrixWithoutCategories.__init__",
"pandas.DataFrame",
"warnings.simplefilter",
"sklearn.linear_model.ElasticNet",
"scipy.stats.rankdata",
"scattertext.termscoring.ScaledFScore.ScaledFScore.... | [((872, 943), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'SettingWithCopyWarning'}), "(action='ignore', category=SettingWithCopyWarning)\n", (893, 943), False, 'import warnings\n'), ((2288, 2471), 'scattertext.TermDocMatrixWithoutCategories.TermDocMatrixWithoutCategories.__init__', 'TermDocMatrixWithoutCategories.__init__', (['self'], {'X': 'X', 'mX': 'mX', 'term_idx_store': 'term_idx_store', 'metadata_idx_store': 'metadata_idx_store', 'unigram_frequency_path': 'unigram_frequency_path'}), '(self, X=X, mX=mX, term_idx_store=\n term_idx_store, metadata_idx_store=metadata_idx_store,\n unigram_frequency_path=unigram_frequency_path)\n', (2327, 2471), False, 'from scattertext.TermDocMatrixWithoutCategories import TermDocMatrixWithoutCategories\n'), ((7069, 7092), 'numpy.zeros', 'np.zeros', (['catX.shape[1]'], {}), '(catX.shape[1])\n', (7077, 7092), True, 'import numpy as np\n'), ((8915, 8963), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(new_data, (new_doc_ids, X.indices))'], {}), '((new_data, (new_doc_ids, X.indices)))\n', (8925, 8963), False, 'from scipy.sparse import csr_matrix\n'), ((11684, 11725), 'scattertext.CSRMatrixTools.delete_columns', 'delete_columns', (['new_X', 'term_idx_to_delete'], {}), '(new_X, term_idx_to_delete)\n', (11698, 11725), False, 'from scattertext.CSRMatrixTools import delete_columns, CSRMatrixFactory\n'), ((17204, 17223), 'sklearn.linear_model.RidgeClassifierCV', 'RidgeClassifierCV', ([], {}), '()\n', (17221, 17223), False, 'from sklearn.linear_model import RidgeClassifierCV, LassoCV\n'), ((17867, 17895), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['clf', 'X', 'y'], {}), '(clf, X, y)\n', (17884, 17895), False, 'from sklearn.model_selection import cross_val_predict\n'), ((18316, 18371), 'sklearn.linear_model.LassoCV', 'LassoCV', ([], {'alphas': '[0.1, 0.001]', 'max_iter': '(10000)', 'n_jobs': '(-1)'}), '(alphas=[0.1, 0.001], max_iter=10000, n_jobs=-1)\n', (18323, 18371), False, 'from sklearn.linear_model import RidgeClassifierCV, LassoCV\n'), ((19406, 19418), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {}), '()\n', (19416, 19418), False, 'from sklearn.linear_model import ElasticNet\n'), ((19805, 19825), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (19823, 19825), False, 'from sklearn.linear_model import LogisticRegression\n'), ((21486, 21502), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (21494, 21502), True, 'import numpy as np\n'), ((22163, 22252), 'scattertext.termscoring.ScaledFScore.ScaledFScore.get_scores', 'ScaledFScore.get_scores', (['cat_word_counts', 'not_cat_word_counts', 'scaler_algo'], {'beta': 'beta'}), '(cat_word_counts, not_cat_word_counts, scaler_algo,\n beta=beta)\n', (22186, 22252), False, 'from scattertext.termscoring.ScaledFScore import InvalidScalerException, ScaledFScore\n'), ((22771, 22921), 'pandas.DataFrame', 'pd.DataFrame', (["{'cat_word_counts': cat_word_counts, 'p_word_given_category':\n p_word_given_category, 'p_category_given_word': p_category_given_word}"], {}), "({'cat_word_counts': cat_word_counts, 'p_word_given_category':\n p_word_given_category, 'p_category_given_word': p_category_given_word})\n", (22783, 22921), True, 'import pandas as pd\n'), ((23572, 23676), 'scipy.stats.hmean', 'hmean', (["[df_with_count['scale p_category_given_word'], df_with_count[\n 'scale p_word_given_category']]"], {}), "([df_with_count['scale p_category_given_word'], df_with_count[\n 'scale p_word_given_category']])\n", (23577, 23676), False, 'from scipy.stats import hmean, fisher_exact, rankdata, norm\n'), ((30768, 30788), 'numpy.array', 'np.array', (['doc_labels'], {}), '(doc_labels)\n', (30776, 30788), True, 'import numpy as np\n'), ((32072, 32115), 'scattertext.indexstore.IndexStoreFromList.build', 'IndexStoreFromList.build', (['new_metadata_list'], {}), '(new_metadata_list)\n', (32096, 32115), False, 'from scattertext.indexstore import IndexStore, IndexStoreFromList\n'), ((33430, 33448), 'scattertext.CSRMatrixTools.CSRMatrixFactory', 'CSRMatrixFactory', ([], {}), '()\n', (33446, 33448), False, 'from scattertext.CSRMatrixTools import delete_columns, CSRMatrixFactory\n'), ((34464, 34482), 'scattertext.CSRMatrixTools.CSRMatrixFactory', 'CSRMatrixFactory', ([], {}), '()\n', (34480, 34482), False, 'from scattertext.CSRMatrixTools import delete_columns, CSRMatrixFactory\n'), ((10708, 10744), 'numpy.isin', 'np.isin', (['self._y', 'idx_to_delete_list'], {}), '(self._y, idx_to_delete_list)\n', (10715, 10744), True, 'import numpy as np\n'), ((10762, 10806), 'scattertext.CSRMatrixTools.delete_columns', 'delete_columns', (['self._X.T', 'columns_to_delete'], {}), '(self._X.T, columns_to_delete)\n', (10776, 10806), False, 'from scattertext.CSRMatrixTools import delete_columns, CSRMatrixFactory\n'), ((10826, 10871), 'scattertext.CSRMatrixTools.delete_columns', 'delete_columns', (['self._mX.T', 'columns_to_delete'], {}), '(self._mX.T, columns_to_delete)\n', (10840, 10871), False, 'from scattertext.CSRMatrixTools import delete_columns, CSRMatrixFactory\n'), ((16654, 16705), 'numpy.log', 'np.log', (['(cat_posterior_mean / not_cat_posterior_mean)'], {}), '(cat_posterior_mean / not_cat_posterior_mean)\n', (16660, 16705), True, 'import numpy as np\n'), ((16708, 16717), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (16714, 16717), True, 'import numpy as np\n'), ((19163, 19202), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['clf', 'X', 'y_continuous'], {}), '(clf, X, y_continuous)\n', (19180, 19202), False, 'from sklearn.model_selection import cross_val_predict\n'), ((26521, 26586), 'scipy.stats.fisher_exact', 'fisher_exact', (['[[x[0], x[1]], [x[2], x[3]]]'], {'alternative': '"""greater"""'}), "([[x[0], x[1]], [x[2], x[3]]], alternative='greater')\n", (26533, 26586), False, 'from scipy.stats import hmean, fisher_exact, rankdata, norm\n'), ((26696, 26794), 'numpy.array', 'np.array', (['[cat_word_counts, cat_not_word_counts, not_cat_word_counts,\n not_cat_not_word_counts]'], {}), '([cat_word_counts, cat_not_word_counts, not_cat_word_counts,\n not_cat_not_word_counts])\n', (26704, 26794), True, 'import numpy as np\n'), ((28584, 28599), 'scipy.stats.rankdata', 'rankdata', (['freqs'], {}), '(freqs)\n', (28592, 28599), False, 'from scipy.stats import hmean, fisher_exact, rankdata, norm\n'), ((34005, 34035), 'copy.copy', 'copy', (['self._category_idx_store'], {}), '(self._category_idx_store)\n', (34009, 34035), False, 'from copy import copy\n'), ((35044, 35074), 'copy.copy', 'copy', (['self._category_idx_store'], {}), '(self._category_idx_store)\n', (35048, 35074), False, 'from copy import copy\n'), ((3041, 3056), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (3053, 3056), True, 'import pandas as pd\n'), ((6894, 6909), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (6906, 6909), True, 'import pandas as pd\n'), ((10908, 10944), 'numpy.isin', 'np.isin', (['self._y', 'idx_to_delete_list'], {}), '(self._y, idx_to_delete_list)\n', (10915, 10944), True, 'import numpy as np\n'), ((12135, 12171), 'numpy.isin', 'np.isin', (['self._y', 'idx_to_delete_list'], {}), '(self._y, idx_to_delete_list)\n', (12142, 12171), True, 'import numpy as np\n'), ((13465, 13509), 'scattertext.indexstore.IndexStoreFromList.build', 'IndexStoreFromList.build', (['new_category_names'], {}), '(new_category_names)\n', (13489, 13509), False, 'from scattertext.indexstore import IndexStore, IndexStoreFromList\n'), ((17787, 17805), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (17803, 17805), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((20345, 20363), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (20361, 20363), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((27709, 27725), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (27717, 27725), True, 'import numpy as np\n'), ((31785, 31828), 'scipy.sparse.hstack', 'scipy.sparse.hstack', (['[label_X_pad, label_X]'], {}), '([label_X_pad, label_X])\n', (31804, 31828), False, 'import scipy\n'), ((31938, 31987), 'scipy.sparse.vstack', 'scipy.sparse.vstack', (['[new_meta_X, padded_label_X]'], {}), '([new_meta_X, padded_label_X])\n', (31957, 31987), False, 'import scipy\n'), ((24250, 24345), 'scattertext.termscoring.ScaledFScore.InvalidScalerException', 'InvalidScalerException', (['"""Invalid scaler alogrithm. Must be either percentile or normcdf."""'], {}), "(\n 'Invalid scaler alogrithm. Must be either percentile or normcdf.')\n", (24272, 24345), False, 'from scattertext.termscoring.ScaledFScore import InvalidScalerException, ScaledFScore\n'), ((23942, 23953), 'scipy.stats.rankdata', 'rankdata', (['x'], {}), '(x)\n', (23950, 23953), False, 'from scipy.stats import hmean, fisher_exact, rankdata, norm\n'), ((31543, 31561), 'scattertext.CSRMatrixTools.CSRMatrixFactory', 'CSRMatrixFactory', ([], {}), '()\n', (31559, 31561), False, 'from scattertext.CSRMatrixTools import delete_columns, CSRMatrixFactory\n'), ((32174, 32192), 'scattertext.CSRMatrixTools.CSRMatrixFactory', 'CSRMatrixFactory', ([], {}), '()\n', (32190, 32192), False, 'from scattertext.CSRMatrixTools import delete_columns, CSRMatrixFactory\n')] |
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow, Executor, Client, requests
exposed_port = 12345
class ShardsExecutor(Executor):
def __init__(self, n_docs: int = 5, **kwargs):
super().__init__(**kwargs)
self.n_docs = n_docs
@requests(on='/search')
def search(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.matches.extend(
[
Document(id=f'm-{self.runtime_args.shard_id}-{i}')
for i in range(self.n_docs)
]
)
doc.chunks.extend(
[
Document(id=f'c-{self.runtime_args.shard_id}-{i}')
for i in range(self.n_docs)
]
)
doc.text = self.runtime_args.name
if self.runtime_args.shard_id == 0:
doc.scores['cosine'].value = 0
doc.modality = 'text'
elif self.runtime_args.shard_id == 1:
doc.modality = 'image'
doc.tags = {'c': 'd'}
elif self.runtime_args.shard_id == 2:
doc.tags = {'a': 'b'}
class DummyExecutor(Executor):
@requests
def fake_reduce(self, **kwargs):
return DocumentArray([Document(id='fake_document')])
@pytest.mark.parametrize('n_docs', [3, 5])
def test_reduce_shards(n_docs):
n_shards = 3
search_flow = Flow(port_expose=exposed_port).add(
uses=ShardsExecutor,
shards=n_shards,
polling='all',
uses_with={'n_docs': n_docs},
)
with search_flow as f:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port).post('/search', inputs=da, return_results=True)
assert len(resp[0].docs) == 5
for doc in resp[0].docs:
# assert matches and chunks are combined
matches = set([doc.id for doc in doc.matches])
chunks = set([doc.id for doc in doc.chunks])
assert len(matches) == n_docs * n_shards
assert len(chunks) == n_docs * n_shards
for shard in range(n_shards):
for match in range(n_docs):
assert f'm-{shard}-{match}' in matches
for chunk in range(n_docs):
assert f'c-{shard}-{chunk}' in chunks
# assert data properties are reduced with priority to the first shards
assert doc.text == 'executor0/shard-0/rep-0'
assert doc.scores['cosine'].value == 0
assert doc.modality == 'text'
assert doc.tags == {'c': 'd'}
@pytest.mark.parametrize('n_shards', [3, 5])
@pytest.mark.parametrize('n_docs', [3, 5])
def test_uses_after_no_reduce(n_shards, n_docs):
search_flow = Flow(port_expose=exposed_port).add(
uses=ShardsExecutor,
shards=n_shards,
uses_after=DummyExecutor,
polling='all',
uses_with={'n_docs': n_docs},
)
with search_flow as f:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port).post('/search', inputs=da, return_results=True)
# assert no reduce happened
assert len(resp[0].docs) == 1
assert resp[0].docs[0].id == 'fake_document'
class Executor1(Executor):
@requests
def endpoint(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'exec1'
class Executor2(Executor):
@requests
def endpoint(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.tags = {'a': 'b'}
doc.modality = 'image'
class Executor3(Executor):
@requests
def endpoint(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.embedding = np.zeros(3)
class ExecutorStatus(Executor):
@requests
def endpoint(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'exec-status'
status = {
'shard_id': self.runtime_args.shard_id,
'happy_status': 'Hey there! Have a nice day :)',
}
return status
def test_reduce_needs():
flow = (
Flow(port_expose=exposed_port)
.add(uses=Executor1, name='pod0')
.add(uses=Executor2, needs='gateway', name='pod1')
.add(uses=Executor3, needs='gateway', name='pod2')
.add(needs=['pod0', 'pod1', 'pod2'], name='pod3')
)
with flow as f:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port).post('/', inputs=da, return_results=True)
assert len(resp[0].docs) == 5
for doc in resp[0].docs:
assert doc.text == 'exec1'
assert doc.tags == {'a': 'b'}
assert doc.modality == 'image'
assert (doc.embedding == np.zeros(3)).all()
def test_uses_before_reduce():
flow = (
Flow(port_expose=exposed_port)
.add(uses=Executor1, name='pod0')
.add(uses=Executor2, needs='gateway', name='pod1')
.add(uses=Executor3, needs='gateway', name='pod2')
.add(needs=['pod0', 'pod1', 'pod2'], name='pod3', uses_before='BaseExecutor')
)
with flow as f:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port).post('/', inputs=da, return_results=True)
# assert reduce happened because there is only BaseExecutor as uses_before
assert len(resp[0].docs) == 5
def test_uses_before_no_reduce_real_executor():
flow = (
Flow(port_expose=exposed_port)
.add(uses=Executor1, name='pod0')
.add(uses=Executor2, needs='gateway', name='pod1')
.add(uses=Executor3, needs='gateway', name='pod2')
.add(needs=['pod0', 'pod1', 'pod2'], name='pod3', uses_before=DummyExecutor)
)
with flow as f:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port).post('/', inputs=da, return_results=True)
# assert no reduce happened
assert len(resp[0].docs) == 1
assert resp[0].docs[0].id == 'fake_document'
def test_uses_before_no_reduce_real_executor_uses():
flow = (
Flow(port_expose=exposed_port)
.add(uses=Executor1, name='pod0')
.add(uses=Executor2, needs='gateway', name='pod1')
.add(uses=Executor3, needs='gateway', name='pod2')
.add(needs=['pod0', 'pod1', 'pod2'], name='pod3', uses=DummyExecutor)
)
with flow as f:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port).post('/', inputs=da, return_results=True)
# assert no reduce happened
assert len(resp[0].docs) == 1
assert resp[0].docs[0].id == 'fake_document'
def test_reduce_status():
n_shards = 2
flow = Flow(port_expose=exposed_port).add(
uses=ExecutorStatus, name='pod0', shards=n_shards, polling='all'
)
with flow as f:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port).post(
'/status', parameters={'foo': 'bar'}, inputs=da, return_results=True
)
assert resp[0].parameters['foo'] == 'bar'
assert len(resp[0].parameters['__results__']) == n_shards
for _, param in resp[0].parameters['__results__'].items():
assert 'shard_id' in param.keys()
assert 'happy_status' in param.keys()
for doc in resp[0].docs:
assert doc.text == 'exec-status'
| [
"jina.Client",
"jina.requests",
"numpy.zeros",
"jina.Flow",
"jina.Document",
"pytest.mark.parametrize"
] | [((1332, 1373), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_docs"""', '[3, 5]'], {}), "('n_docs', [3, 5])\n", (1355, 1373), False, 'import pytest\n'), ((2580, 2623), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_shards"""', '[3, 5]'], {}), "('n_shards', [3, 5])\n", (2603, 2623), False, 'import pytest\n'), ((2625, 2666), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_docs"""', '[3, 5]'], {}), "('n_docs', [3, 5])\n", (2648, 2666), False, 'import pytest\n'), ((286, 308), 'jina.requests', 'requests', ([], {'on': '"""/search"""'}), "(on='/search')\n", (294, 308), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((1441, 1471), 'jina.Flow', 'Flow', ([], {'port_expose': 'exposed_port'}), '(port_expose=exposed_port)\n', (1445, 1471), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((2734, 2764), 'jina.Flow', 'Flow', ([], {'port_expose': 'exposed_port'}), '(port_expose=exposed_port)\n', (2738, 2764), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((3714, 3725), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3722, 3725), True, 'import numpy as np\n'), ((6682, 6712), 'jina.Flow', 'Flow', ([], {'port_expose': 'exposed_port'}), '(port_expose=exposed_port)\n', (6686, 6712), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((1298, 1326), 'jina.Document', 'Document', ([], {'id': '"""fake_document"""'}), "(id='fake_document')\n", (1306, 1326), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((1654, 1664), 'jina.Document', 'Document', ([], {}), '()\n', (1662, 1664), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((1700, 1725), 'jina.Client', 'Client', ([], {'port': 'exposed_port'}), '(port=exposed_port)\n', (1706, 1725), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((2981, 2991), 'jina.Document', 'Document', ([], {}), '()\n', (2989, 2991), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((3027, 3052), 'jina.Client', 'Client', ([], {'port': 'exposed_port'}), '(port=exposed_port)\n', (3033, 3052), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((4408, 4418), 'jina.Document', 'Document', ([], {}), '()\n', (4416, 4418), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((4454, 4479), 'jina.Client', 'Client', ([], {'port': 'exposed_port'}), '(port=exposed_port)\n', (4460, 4479), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((5136, 5146), 'jina.Document', 'Document', ([], {}), '()\n', (5144, 5146), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((5182, 5207), 'jina.Client', 'Client', ([], {'port': 'exposed_port'}), '(port=exposed_port)\n', (5188, 5207), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((5766, 5776), 'jina.Document', 'Document', ([], {}), '()\n', (5774, 5776), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((5812, 5837), 'jina.Client', 'Client', ([], {'port': 'exposed_port'}), '(port=exposed_port)\n', (5818, 5837), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((6396, 6406), 'jina.Document', 'Document', ([], {}), '()\n', (6404, 6406), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((6442, 6467), 'jina.Client', 'Client', ([], {'port': 'exposed_port'}), '(port=exposed_port)\n', (6448, 6467), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((6846, 6856), 'jina.Document', 'Document', ([], {}), '()\n', (6854, 6856), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((6892, 6917), 'jina.Client', 'Client', ([], {'port': 'exposed_port'}), '(port=exposed_port)\n', (6898, 6917), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((457, 507), 'jina.Document', 'Document', ([], {'id': 'f"""m-{self.runtime_args.shard_id}-{i}"""'}), "(id=f'm-{self.runtime_args.shard_id}-{i}')\n", (465, 507), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((657, 707), 'jina.Document', 'Document', ([], {'id': 'f"""c-{self.runtime_args.shard_id}-{i}"""'}), "(id=f'c-{self.runtime_args.shard_id}-{i}')\n", (665, 707), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((4731, 4742), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4739, 4742), True, 'import numpy as np\n'), ((4104, 4134), 'jina.Flow', 'Flow', ([], {'port_expose': 'exposed_port'}), '(port_expose=exposed_port)\n', (4108, 4134), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((4804, 4834), 'jina.Flow', 'Flow', ([], {'port_expose': 'exposed_port'}), '(port_expose=exposed_port)\n', (4808, 4834), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((5435, 5465), 'jina.Flow', 'Flow', ([], {'port_expose': 'exposed_port'}), '(port_expose=exposed_port)\n', (5439, 5465), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n'), ((6072, 6102), 'jina.Flow', 'Flow', ([], {'port_expose': 'exposed_port'}), '(port_expose=exposed_port)\n', (6076, 6102), False, 'from jina import Document, DocumentArray, Flow, Executor, Client, requests\n')] |
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by <NAME> and <NAME>
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from matplotlib import pylab
import numpy as np
import scipy
from scipy.stats import norm, pearsonr
from utils import CHART_DIR
def _plot_correlation_func(x, y):
r, p = pearsonr(x, y)
title = "Cor($X_1$, $X_2$) = %.3f" % r
pylab.scatter(x, y)
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
f1 = scipy.poly1d(scipy.polyfit(x, y, 1))
pylab.plot(x, f1(x), "r--", linewidth=2)
# pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in
# [0,1,2,3,4]])
def plot_correlation_demo():
np.random.seed(0) # to reproduce the data later on
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(0, 10, 0.2)
pylab.subplot(221)
y = 0.5 * x + norm.rvs(1, scale=.01, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(222)
y = 0.5 * x + norm.rvs(1, scale=.1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(223)
y = 0.5 * x + norm.rvs(1, scale=1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(224)
y = norm.rvs(1, scale=10, size=len(x))
_plot_correlation_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "corr_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(-5, 5, 0.2)
pylab.subplot(221)
y = 0.5 * x ** 2 + norm.rvs(1, scale=.01, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(222)
y = 0.5 * x ** 2 + norm.rvs(1, scale=.1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(223)
y = 0.5 * x ** 2 + norm.rvs(1, scale=1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(224)
y = 0.5 * x ** 2 + norm.rvs(1, scale=10, size=len(x))
_plot_correlation_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "corr_demo_2.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_correlation_demo()
| [
"matplotlib.pylab.autoscale",
"scipy.polyfit",
"matplotlib.pylab.scatter",
"numpy.random.seed",
"matplotlib.pylab.subplot",
"os.path.join",
"matplotlib.pylab.title",
"matplotlib.pylab.clf",
"scipy.stats.pearsonr",
"numpy.arange",
"matplotlib.pylab.xlabel",
"matplotlib.pylab.ylabel",
"matplot... | [((387, 401), 'scipy.stats.pearsonr', 'pearsonr', (['x', 'y'], {}), '(x, y)\n', (395, 401), False, 'from scipy.stats import norm, pearsonr\n'), ((449, 468), 'matplotlib.pylab.scatter', 'pylab.scatter', (['x', 'y'], {}), '(x, y)\n', (462, 468), False, 'from matplotlib import pylab\n'), ((473, 491), 'matplotlib.pylab.title', 'pylab.title', (['title'], {}), '(title)\n', (484, 491), False, 'from matplotlib import pylab\n'), ((496, 517), 'matplotlib.pylab.xlabel', 'pylab.xlabel', (['"""$X_1$"""'], {}), "('$X_1$')\n", (508, 517), False, 'from matplotlib import pylab\n'), ((522, 543), 'matplotlib.pylab.ylabel', 'pylab.ylabel', (['"""$X_2$"""'], {}), "('$X_2$')\n", (534, 543), False, 'from matplotlib import pylab\n'), ((767, 784), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (781, 784), True, 'import numpy as np\n'), ((823, 834), 'matplotlib.pylab.clf', 'pylab.clf', ([], {}), '()\n', (832, 834), False, 'from matplotlib import pylab\n'), ((839, 877), 'matplotlib.pylab.figure', 'pylab.figure', ([], {'num': 'None', 'figsize': '(8, 8)'}), '(num=None, figsize=(8, 8))\n', (851, 877), False, 'from matplotlib import pylab\n'), ((887, 908), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.2)'], {}), '(0, 10, 0.2)\n', (896, 908), True, 'import numpy as np\n'), ((914, 932), 'matplotlib.pylab.subplot', 'pylab.subplot', (['(221)'], {}), '(221)\n', (927, 932), False, 'from matplotlib import pylab\n'), ((1025, 1043), 'matplotlib.pylab.subplot', 'pylab.subplot', (['(222)'], {}), '(222)\n', (1038, 1043), False, 'from matplotlib import pylab\n'), ((1135, 1153), 'matplotlib.pylab.subplot', 'pylab.subplot', (['(223)'], {}), '(223)\n', (1148, 1153), False, 'from matplotlib import pylab\n'), ((1244, 1262), 'matplotlib.pylab.subplot', 'pylab.subplot', (['(224)'], {}), '(224)\n', (1257, 1262), False, 'from matplotlib import pylab\n'), ((1344, 1371), 'matplotlib.pylab.autoscale', 'pylab.autoscale', ([], {'tight': '(True)'}), '(tight=True)\n', (1359, 1371), False, 'from matplotlib import pylab\n'), ((1376, 1392), 'matplotlib.pylab.grid', 'pylab.grid', (['(True)'], {}), '(True)\n', (1386, 1392), False, 'from matplotlib import pylab\n'), ((1506, 1517), 'matplotlib.pylab.clf', 'pylab.clf', ([], {}), '()\n', (1515, 1517), False, 'from matplotlib import pylab\n'), ((1522, 1560), 'matplotlib.pylab.figure', 'pylab.figure', ([], {'num': 'None', 'figsize': '(8, 8)'}), '(num=None, figsize=(8, 8))\n', (1534, 1560), False, 'from matplotlib import pylab\n'), ((1570, 1591), 'numpy.arange', 'np.arange', (['(-5)', '(5)', '(0.2)'], {}), '(-5, 5, 0.2)\n', (1579, 1591), True, 'import numpy as np\n'), ((1597, 1615), 'matplotlib.pylab.subplot', 'pylab.subplot', (['(221)'], {}), '(221)\n', (1610, 1615), False, 'from matplotlib import pylab\n'), ((1713, 1731), 'matplotlib.pylab.subplot', 'pylab.subplot', (['(222)'], {}), '(222)\n', (1726, 1731), False, 'from matplotlib import pylab\n'), ((1828, 1846), 'matplotlib.pylab.subplot', 'pylab.subplot', (['(223)'], {}), '(223)\n', (1841, 1846), False, 'from matplotlib import pylab\n'), ((1942, 1960), 'matplotlib.pylab.subplot', 'pylab.subplot', (['(224)'], {}), '(224)\n', (1955, 1960), False, 'from matplotlib import pylab\n'), ((2057, 2084), 'matplotlib.pylab.autoscale', 'pylab.autoscale', ([], {'tight': '(True)'}), '(tight=True)\n', (2072, 2084), False, 'from matplotlib import pylab\n'), ((2089, 2105), 'matplotlib.pylab.grid', 'pylab.grid', (['(True)'], {}), '(True)\n', (2099, 2105), False, 'from matplotlib import pylab\n'), ((567, 589), 'scipy.polyfit', 'scipy.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (580, 589), False, 'import scipy\n'), ((1445, 1478), 'os.path.join', 'os.path.join', (['CHART_DIR', 'filename'], {}), '(CHART_DIR, filename)\n', (1457, 1478), False, 'import os\n'), ((2158, 2191), 'os.path.join', 'os.path.join', (['CHART_DIR', 'filename'], {}), '(CHART_DIR, filename)\n', (2170, 2191), False, 'import os\n')] |
"""
Showcases *Prismatic* colourspace computations.
"""
import numpy as np
import colour
from colour.utilities import message_box
message_box('"Prismatic" Colourspace Computations')
RGB = np.array([0.25, 0.50, 0.75])
message_box(
f'Converting from the "RGB" colourspace to the "Prismatic" colourspace '
f'given "RGB" values:\n\n\t{RGB}'
)
print(colour.RGB_to_Prismatic(RGB))
print("\n")
Lrgb = np.array([0.7500000, 0.1666667, 0.3333333, 0.5000000])
message_box(
f'Converting from the "Prismatic" colourspace to the "RGB" colourspace '
f'given "Lrgb" values:\n\n\t{Lrgb}'
)
print(colour.Prismatic_to_RGB(Lrgb))
print("\n")
message_box(
f'Applying 50% desaturation in the "Prismatic" colourspace to the given '
f'"RGB" values:\n\n\t{RGB}'
)
saturation = 0.5
Lrgb = colour.RGB_to_Prismatic(RGB)
Lrgb[..., 1:] = 1.0 / 3.0 + saturation * (Lrgb[..., 1:] - 1.0 / 3.0)
print(colour.Prismatic_to_RGB(Lrgb))
| [
"colour.Prismatic_to_RGB",
"colour.utilities.message_box",
"numpy.array",
"colour.RGB_to_Prismatic"
] | [((133, 184), 'colour.utilities.message_box', 'message_box', (['""""Prismatic" Colourspace Computations"""'], {}), '(\'"Prismatic" Colourspace Computations\')\n', (144, 184), False, 'from colour.utilities import message_box\n'), ((192, 219), 'numpy.array', 'np.array', (['[0.25, 0.5, 0.75]'], {}), '([0.25, 0.5, 0.75])\n', (200, 219), True, 'import numpy as np\n'), ((221, 347), 'colour.utilities.message_box', 'message_box', (['f"""Converting from the "RGB" colourspace to the "Prismatic" colourspace given "RGB" values:\n\n\t{RGB}"""'], {}), '(\n f"""Converting from the "RGB" colourspace to the "Prismatic" colourspace given "RGB" values:\n\n\t{RGB}"""\n )\n', (232, 347), False, 'from colour.utilities import message_box\n'), ((408, 451), 'numpy.array', 'np.array', (['[0.75, 0.1666667, 0.3333333, 0.5]'], {}), '([0.75, 0.1666667, 0.3333333, 0.5])\n', (416, 451), True, 'import numpy as np\n'), ((463, 591), 'colour.utilities.message_box', 'message_box', (['f"""Converting from the "Prismatic" colourspace to the "RGB" colourspace given "Lrgb" values:\n\n\t{Lrgb}"""'], {}), '(\n f"""Converting from the "Prismatic" colourspace to the "RGB" colourspace given "Lrgb" values:\n\n\t{Lrgb}"""\n )\n', (474, 591), False, 'from colour.utilities import message_box\n'), ((646, 767), 'colour.utilities.message_box', 'message_box', (['f"""Applying 50% desaturation in the "Prismatic" colourspace to the given "RGB" values:\n\n\t{RGB}"""'], {}), '(\n f"""Applying 50% desaturation in the "Prismatic" colourspace to the given "RGB" values:\n\n\t{RGB}"""\n )\n', (657, 767), False, 'from colour.utilities import message_box\n'), ((795, 823), 'colour.RGB_to_Prismatic', 'colour.RGB_to_Prismatic', (['RGB'], {}), '(RGB)\n', (818, 823), False, 'import colour\n'), ((357, 385), 'colour.RGB_to_Prismatic', 'colour.RGB_to_Prismatic', (['RGB'], {}), '(RGB)\n', (380, 385), False, 'import colour\n'), ((601, 630), 'colour.Prismatic_to_RGB', 'colour.Prismatic_to_RGB', (['Lrgb'], {}), '(Lrgb)\n', (624, 630), False, 'import colour\n'), ((899, 928), 'colour.Prismatic_to_RGB', 'colour.Prismatic_to_RGB', (['Lrgb'], {}), '(Lrgb)\n', (922, 928), False, 'import colour\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import pandas as pd
import numpy as np
import os
from sklearn.model_selection import train_test_split as tts
from matplotlib import pyplot as plt
from zipfile import ZipFile
import gensim
from gensim.models import FastText, Word2Vec
from sklearn import svm
from sklearn.manifold import TSNE
import sys
# VECTOR_LENGTH = 100
# #see results variable in tenFiles.py on Aman's laptop
# tenFileData = np.load("tenFiles.npy", allow_pickle=True)
# def text_preprocessing(phrase):
# phrase = phrase.lower()
# phrase = phrase.replace('&', ' ')
# phrase = phrase.replace('-', '')
# phrase = phrase.replace(',',' ')
# phrase = phrase.replace('.',' ')
# phrase = phrase.replace('/',' ')
# phrase = phrase.replace('(',' ')
# phrase = phrase.replace(')',' ')
# phrase = phrase.replace('[',' ')
# phrase = phrase.replace(']',' ')
# phrase = phrase.replace(':',' ')
# phrase = phrase.replace(';',' ')
# sentence = phrase.split(' ')
# return sentence
# #patient sentence arrays holds each of the sentences, seperated by patiend
# #master sentence arrays holds all of the patients and loses information
# # about which patient the sentence corresponds to
# #master sentence used only for training the word2vec model
# #patientConcatenatedSentences contains joined versions of each of the sentences in patient sentence array
# #same for masterConcatenatedSentences
# patientSentenceArrays = []
# masterSentences = []
# masterConcatenatedSentences = []
# patientContatenatedSentences = []
# labelArray = []
# for sentenceArray, label in tenFileData:
# curPat = []
# curPatConc = []
# for sentence in sentenceArray:
# concatSentence = ""
# for word in sentence:
# concatSentence += word + " "
# concatSentence = text_preprocessing(concatSentence)
# if not sentence == []:
# # concatSentence = ""
# masterConcatenatedSentences.append(concatSentence)
# masterSentences.append(sentence)
# curPatConc.append(concatSentence)
# curPat.append(sentence)
# patientSentenceArrays.append(curPat)
# patientContatenatedSentences.append(curPatConc)
# labelArray.append(label)
# # concat_word2Vec = Word2Vec(masterConcatenatedSentences, size= VECTOR_LENGTH, window=2, min_count=1, workers=2, sg=1)
# non_concat_word2vec = Word2Vec(masterSentences, size= VECTOR_LENGTH, window=2, min_count=1, workers=2, sg=1)
# def tsne_plot(model):
# "Creates and TSNE model and plots it"
# labels = []
# tokens = []
# j = 0
# for word in model.wv.vocab:
# j += 1
# # if j < 400:
# # continue
# tokens.append(model[word])
# labels.append(word)
# tsne_model = TSNE(perplexity= 100, n_components=2, init='pca', n_iter=2500)
# new_values = tsne_model.fit_transform(tokens)
# x = []
# y = []
# for value in new_values:
# x.append(value[0])
# y.append(value[1])
# plt.figure(figsize=(16, 16))
# for i in range(len(x)):
# plt.scatter(x[i],y[i])
# plt.annotate(labels[i][0:10],
# xy=(x[i], y[i]),
# xytext=(5, 2),
# textcoords='offset points',
# ha='right',
# va='bottom')
# plt.show()
# #takes each patient's sentences and turns them into one of two things: a matrix and a long vector (flattened matrix)
# def patientToMatrix(patientSentences, w2vmodel):
# matrix = []
# flattenedVector = []
# for s in patientSentences:
# matrixInsert = torch.zeros(VECTOR_LENGTH)
# if len(s) == 0:
# matrix.append(matrixInsert)
# for el in matrixInsert:
# flattenedVector.append(el)
# else:
# # print(s)
# for word in s:
# # print(word)
# matrixInsert += torch.Tensor(w2vmodel[word])
# matrixInsert /= len(s)
# for el in matrixInsert:
# flattenedVector.append(el)
# matrix.append(matrixInsert)
# return (torch.stack(matrix), torch.tensor(flattenedVector))
# #takes a dataset of patients and returns a 3D matrix with numerical values, and a reduced dimension 2D matrix with the same numerical values
# def modelReadyPatientData(patientSentences, w2vmodel):
# # w2vmodel.wv.vocab
# patientMatrices = []
# flattenedPatients = []
# for patient in patientSentences:
# patMat, patVec = patientToMatrix(patient, w2vmodel)
# patientMatrices.append(patMat)
# flattenedPatients.append(patVec)
# return(torch.stack(patientMatrices), torch.stack(flattenedPatients))
# print("started matrix conversion")
# nonConcatPatientMatrices, nonConcatPatientVectors = modelReadyPatientData(patientSentenceArrays, non_concat_word2vec)
# trainX, testX, trainY, testY = tts(nonConcatPatientVectors, labelArray)
# supportVectorMachine = svm.LinearSVC()
# supportVectorMachine.fit(trainX, trainY)
# print("SVM Score: ", supportVectorMachine.score(testX, testY))
# trainX, testX, trainY, testY = tts(nonConcatPatientMatrices, labelArray)
os.chdir(sys.argv[1])
files = os.listdir()
data = []
for f in files:
try:
data.append(pd.read_csv(f))
except:
continue
os.chdir("../")
numColumns = len(data[0].columns) - 2
train_array = []
svmArray = []
label_array = []
for df in data:
label = df[sys.argv[2]][0]
df = df.drop(columns = ['Unnamed: 0', sys.argv[2]])
# print(df.values)
matrix = torch.Tensor(df.values)
train_array.append(matrix)
svmArray.append(df.values.reshape(-1))
label_array.append(label)
trainX, testX, trainY, testY = tts(train_array, label_array, test_size = 0.3, stratify = label_array)
trainXSvm, testXSvm, trainYSvm, testYSvm = tts(svmArray, label_array, test_size = 0.3, stratify = label_array)
train_set = []
for i in range(len(trainX)):
# print(trainX[i], trainY[i])
train_set.append((trainX[i].reshape(-1, numColumns, 12), trainY[i]))
# train_set.append((train_array[i], label_array[i]))
test_set = []
for i in range(len(testX)):
# print(trainX[i], trainY[i])
test_set.append((testX[i][0:24].reshape(-1, numColumns, 12), testY[i]))
# train_set.append((train_array[i], label_array[i]))
BATCH_SIZE = 10
train_set = torch.utils.data.DataLoader(train_set, batch_size = BATCH_SIZE, shuffle = True)
test_set = torch.utils.data.DataLoader(test_set, batch_size = BATCH_SIZE, shuffle = True)
baseline = svm.LinearSVC()
baseline.fit(trainXSvm, trainYSvm)
print("SVM score:", baseline.score(testXSvm, testYSvm))
class FeedForward(nn.Module):
def __init__(self, input_size, hidden1_size, hidden2_size, num_classes):
super(FeedForward, self).__init__()
self.fc1= nn.Linear(input_size, hidden1_size)
self.ReLU1 = nn.ReLU()
self.fc2 = nn.Linear(hidden1_size, hidden2_size)
self.ReLU2 = nn.ReLU()
self.fc3 = nn.Linear(hidden2_size, num_classes)
def forward(self,x):
# print(x)
x = x.view(-1, self.num_flat_features(x))
out = self.fc1(x)
out = self.ReLU1(out)
# print(out)
# r_out, (h_n, h_c) = self.rnn(out.view(-1,1,512), None)
# print(r_out)
# out = r_out[:, -1, :]
# out = self.ReLU2(out)
out = self.fc2(out)
out = self.ReLU2(out)
out = self.fc3(out)
# out = self.ReLU3(out)
# print(out)
return out
def num_flat_features(self, x): # To see dimensions of layers
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
FFN = FeedForward(numColumns * 12, 128, 64, 2)
optimizer = optim.SGD(FFN.parameters(), lr=0.1, momentum=0.5)
loss_f = nn.CrossEntropyLoss()
lossX = np.arange(100)
lossY = []
testingY = []
FFN.train()
numEpoch = 1
for i in range(100):
cum_loss = 0
epoch_loss = []
loss_per_batch = []
for inputs, labels in train_set:
# print(inputs.shape)
# print(labels)
# break
optimizer.zero_grad()
outputs = FFN(inputs)
# print(outputs[0])
# print(outputs)
# print(outputs.squeeze(2))
loss = loss_f(outputs, labels.long())
# loss.requires_grad = True
loss.backward()
optimizer.step()
# cum_loss += loss.data.item()
loss_per_batch.append(loss.data.item())
#add min, max, avg, std of loss
# print("batch loss avg")
loss_per_batch = torch.Tensor(loss_per_batch)
print("Epoch", numEpoch, "Loss avg: ", torch.mean(loss_per_batch) / BATCH_SIZE)
print("Epoch", numEpoch, "Loss std: ", (torch.std(loss_per_batch) / BATCH_SIZE) ** 0.5)
print("Epoch", numEpoch, "Loss min: ", torch.min(loss_per_batch) / BATCH_SIZE)
print("Epoch", numEpoch, "Loss max: ", torch.max(loss_per_batch) / BATCH_SIZE)
with torch.no_grad():
total = 0
correct = 0
for inputs, labels in test_set:
out = FFN(inputs)
for i in range(len(out)):
prediction = out[i].argmax().item()
# print(prediction)
total += 1
if prediction == labels[i]:
# print("Correct: ", prediction)
correct += 1
accuracy = correct / total
print("Epoch", numEpoch, "Test Accuracy: ", accuracy)
testingY.append(accuracy)
for inputs, labels in train_set:
out = FFN(inputs)
for i in range(len(out)):
prediction = out[i].argmax().item()
# print(prediction)
total += 1
if prediction == labels[i]:
# print("Correct: ", prediction)
correct += 1
accuracy = correct / total
print("Epoch", numEpoch, "Train Accuracy: ", accuracy)
lossY.append(cum_loss)
numEpoch += 1
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (numColumns - 1,4))
self.conv2 = nn.Conv2d(6, 16, (1, 7))
# self.conv3 = nn.Conv2d(16, 32, 3)
# self.conv4 = nn.Conv2d(32, 64, 2)
self.fc1 = nn.Linear(240, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 2)
def forward(self, x):
# print(x)
x = self.conv1(x)
# x = F.avg_pool2d(x, (2, 2))
# x = nn.Dropout(x)
x = self.conv2(torch.relu(x))
x = F.avg_pool2d(torch.relu(x), (2,1))
# x = self.conv3(torch.relu(x))
# x = F.avg_pool2d(torch.relu(x), 2)
# x = self.conv4(torch.relu(x))
# x = F.avg_pool2d(torch.relu(x), (2,2))
x = x.view(-1, self.num_flat_features(x))
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = torch.sigmoid(self.fc3(x))
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
CoNN = CNN()
optimizer = optim.SGD(CoNN.parameters(), lr=0.1, momentum=0.5)
loss_f = nn.CrossEntropyLoss()
lossX = np.arange(100)
lossY = []
testingY = []
CoNN.train()
numEpoch = 1
for i in range(100):
# cum_loss = 0
loss_per_batch = []
for inputs, labels in train_set:
# print(inputs.shape)
# print(labels)
# break
optimizer.zero_grad()
outputs = CoNN(inputs)
# print(outputs[0])
# print(outputs)
# print(outputs.squeeze(2))
loss = loss_f(outputs, labels.long())
# loss.requires_grad = True
loss.backward()
optimizer.step()
loss_per_batch.append(loss.data.item())
loss_per_batch = torch.tensor(loss_per_batch)
print("Epoch", numEpoch, "Loss avg: ", torch.mean(loss_per_batch) / BATCH_SIZE)
print("Epoch", numEpoch, "Loss std: ", (torch.std(loss_per_batch) / BATCH_SIZE) ** 0.5)
print("Epoch", numEpoch, "Loss min: ", torch.min(loss_per_batch) / BATCH_SIZE)
print("Epoch", numEpoch, "Loss max: ", torch.max(loss_per_batch) / BATCH_SIZE)
with torch.no_grad():
total = 0
correct = 0
for inputs, labels in test_set:
out = CoNN(inputs)
for i in range(len(out)):
prediction = out[i].argmax().item()
# print(prediction)
total += 1
if prediction == labels[i]:
# print("Correct: ", prediction)
correct += 1
accuracy = correct / total
print("Epoch", numEpoch, "Test Accuracy: ", accuracy)
testingY.append(accuracy)
for inputs, labels in train_set:
out = CoNN(inputs)
for i in range(len(out)):
prediction = out[i].argmax().item()
# print(prediction)
total += 1
if prediction == labels[i]:
# print("Correct: ", prediction)
correct += 1
accuracy = correct / total
print("Epoch", numEpoch, "Train Accuracy: ", accuracy)
# testingY.append(accuracy)
lossY.append(cum_loss)
numEpoch += 1
# break | [
"torch.relu",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"torch.std",
"numpy.arange",
"torch.no_grad",
"os.chdir",
"torch.utils.data.DataLoader",
"torch.Tensor",
"torch.nn.Linear",
"sklearn.svm.LinearSVC",
"torch.mean",
"torch.nn.Conv2d",
"torch.max",
"torch.min",
"... | [((5302, 5323), 'os.chdir', 'os.chdir', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (5310, 5323), False, 'import os\n'), ((5335, 5347), 'os.listdir', 'os.listdir', ([], {}), '()\n', (5345, 5347), False, 'import os\n'), ((5445, 5460), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (5453, 5460), False, 'import os\n'), ((5850, 5916), 'sklearn.model_selection.train_test_split', 'tts', (['train_array', 'label_array'], {'test_size': '(0.3)', 'stratify': 'label_array'}), '(train_array, label_array, test_size=0.3, stratify=label_array)\n', (5853, 5916), True, 'from sklearn.model_selection import train_test_split as tts\n'), ((5965, 6028), 'sklearn.model_selection.train_test_split', 'tts', (['svmArray', 'label_array'], {'test_size': '(0.3)', 'stratify': 'label_array'}), '(svmArray, label_array, test_size=0.3, stratify=label_array)\n', (5968, 6028), True, 'from sklearn.model_selection import train_test_split as tts\n'), ((6482, 6557), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), '(train_set, batch_size=BATCH_SIZE, shuffle=True)\n', (6509, 6557), False, 'import torch\n'), ((6574, 6648), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_set'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), '(test_set, batch_size=BATCH_SIZE, shuffle=True)\n', (6601, 6648), False, 'import torch\n'), ((6667, 6682), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {}), '()\n', (6680, 6682), False, 'from sklearn import svm\n'), ((8048, 8069), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8067, 8069), True, 'import torch.nn as nn\n'), ((8081, 8095), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (8090, 8095), True, 'import numpy as np\n'), ((11187, 11208), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (11206, 11208), True, 'import torch.nn as nn\n'), ((11224, 11238), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (11233, 11238), True, 'import numpy as np\n'), ((5691, 5714), 'torch.Tensor', 'torch.Tensor', (['df.values'], {}), '(df.values)\n', (5703, 5714), False, 'import torch\n'), ((8749, 8777), 'torch.Tensor', 'torch.Tensor', (['loss_per_batch'], {}), '(loss_per_batch)\n', (8761, 8777), False, 'import torch\n'), ((11773, 11801), 'torch.tensor', 'torch.tensor', (['loss_per_batch'], {}), '(loss_per_batch)\n', (11785, 11801), False, 'import torch\n'), ((6951, 6986), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden1_size'], {}), '(input_size, hidden1_size)\n', (6960, 6986), True, 'import torch.nn as nn\n'), ((7009, 7018), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7016, 7018), True, 'import torch.nn as nn\n'), ((7039, 7076), 'torch.nn.Linear', 'nn.Linear', (['hidden1_size', 'hidden2_size'], {}), '(hidden1_size, hidden2_size)\n', (7048, 7076), True, 'import torch.nn as nn\n'), ((7099, 7108), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7106, 7108), True, 'import torch.nn as nn\n'), ((7129, 7165), 'torch.nn.Linear', 'nn.Linear', (['hidden2_size', 'num_classes'], {}), '(hidden2_size, num_classes)\n', (7138, 7165), True, 'import torch.nn as nn\n'), ((9124, 9139), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9137, 9139), False, 'import torch\n'), ((10122, 10158), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(6)', '(numColumns - 1, 4)'], {}), '(1, 6, (numColumns - 1, 4))\n', (10131, 10158), True, 'import torch.nn as nn\n'), ((10176, 10200), 'torch.nn.Conv2d', 'nn.Conv2d', (['(6)', '(16)', '(1, 7)'], {}), '(6, 16, (1, 7))\n', (10185, 10200), True, 'import torch.nn as nn\n'), ((10299, 10318), 'torch.nn.Linear', 'nn.Linear', (['(240)', '(120)'], {}), '(240, 120)\n', (10308, 10318), True, 'import torch.nn as nn\n'), ((10335, 10353), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(84)'], {}), '(120, 84)\n', (10344, 10353), True, 'import torch.nn as nn\n'), ((10370, 10386), 'torch.nn.Linear', 'nn.Linear', (['(84)', '(2)'], {}), '(84, 2)\n', (10379, 10386), True, 'import torch.nn as nn\n'), ((12148, 12163), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12161, 12163), False, 'import torch\n'), ((5403, 5417), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (5414, 5417), True, 'import pandas as pd\n'), ((8820, 8846), 'torch.mean', 'torch.mean', (['loss_per_batch'], {}), '(loss_per_batch)\n', (8830, 8846), False, 'import torch\n'), ((8994, 9019), 'torch.min', 'torch.min', (['loss_per_batch'], {}), '(loss_per_batch)\n', (9003, 9019), False, 'import torch\n'), ((9076, 9101), 'torch.max', 'torch.max', (['loss_per_batch'], {}), '(loss_per_batch)\n', (9085, 9101), False, 'import torch\n'), ((10535, 10548), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (10545, 10548), False, 'import torch\n'), ((10572, 10585), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (10582, 10585), False, 'import torch\n'), ((11844, 11870), 'torch.mean', 'torch.mean', (['loss_per_batch'], {}), '(loss_per_batch)\n', (11854, 11870), False, 'import torch\n'), ((12018, 12043), 'torch.min', 'torch.min', (['loss_per_batch'], {}), '(loss_per_batch)\n', (12027, 12043), False, 'import torch\n'), ((12100, 12125), 'torch.max', 'torch.max', (['loss_per_batch'], {}), '(loss_per_batch)\n', (12109, 12125), False, 'import torch\n'), ((8904, 8929), 'torch.std', 'torch.std', (['loss_per_batch'], {}), '(loss_per_batch)\n', (8913, 8929), False, 'import torch\n'), ((11928, 11953), 'torch.std', 'torch.std', (['loss_per_batch'], {}), '(loss_per_batch)\n', (11937, 11953), False, 'import torch\n')] |
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.autograd import Function
from torch.nn import BCELoss, MSELoss, CrossEntropyLoss
import numpy as np
from torch.autograd import Variable
def make_one_hot(labels, classes):
if len(labels.size()) == 4:
one_hot = torch.cuda.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3]).zero_()
elif len(labels.size()) == 5:
one_hot = torch.cuda.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3],
labels.size()[4]).zero_()
target = one_hot.scatter_(1, labels.data, 1)
return target
class BCELoss(nn.Module):
def __init__(self, ):
super().__init__()
self.loss_fn = nn.BCELoss()
def forward(self, model_output, data_input):
masks_probs_flat = model_output.view(-1)
true_masks_flat = data_input.float().view(-1)
loss = self.loss_fn(masks_probs_flat, true_masks_flat)
return loss
class SoftDiceLoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(SoftDiceLoss, self).__init__()
def forward(self, logits, targets):
num = targets.size(0)
probs = F.sigmoid(logits)
m1 = probs.view(num, -1)
m2 = targets.view(num, -1)
intersection = (m1 * m2)
score = 2. * (intersection.sum(1) + 1) / (m1.sum(1) + m2.sum(1) + 1)
score = 1 - score.sum() / num
return score
class WeightedBCEWithLogitsLoss(nn.Module):
def __init__(self, size_average=True):
super(WeightedBCEWithLogitsLoss, self).__init__()
self.size_average = size_average
def weighted(self, input, target, weight, alpha, beta):
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
if weight is not None:
loss = alpha * loss + beta * loss * weight
if self.size_average:
return loss.mean()
else:
return loss.sum()
def forward(self, input, target, weight, alpha, beta):
if weight is not None:
return self.weighted(input, target, weight, alpha, beta)
else:
return self.weighted(input, target, None, alpha, beta)
class CrossEntropy2d(nn.Module):
def __init__(self, class_num, alpha=None, gamma=2, size_average=True, ignore_label=255):
super(CrossEntropy2d, self).__init__()
if alpha is None:
self.alpha = Variable(torch.ones(class_num, 1))
else:
if isinstance(alpha, Variable):
self.alpha = alpha
else:
self.alpha = Variable(alpha)
self.gamma = gamma
self.class_num = class_num
self.size_average = size_average
self.ignore_label = ignore_label
def forward(self, predict, target):
N, C, H, W = predict.size()
sm = nn.Softmax2d()
P = sm(predict)
P = torch.clamp(P, min=1e-9, max=1 - (1e-9))
target_mask = (target >= 0) * (target != self.ignore_label)
target = target[target_mask].view(1, -1)
predict = P[target_mask.view(N, 1, H, W).repeat(1, C, 1, 1)].view(C, -1)
probs = torch.gather(predict, dim=0, index=target)
log_p = probs.log()
batch_loss = -(torch.pow((1 - probs), self.gamma)) * log_p
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
class EntropyLoss(nn.Module):
def __init__(self, ):
super(EntropyLoss, self).__init__()
def forward(self, v):
"""
Entropy loss for probabilistic prediction vectors
input: batch_size x channels x h x w
output: batch_size x 1 x h x w
"""
assert v.dim() == 4
n, c, h, w = v.size()
return -torch.sum(torch.mul(v, torch.log2(v + 1e-30))) / (n * h * w * np.log2(c))
class EMLoss(nn.Module):
def __init__(self, ):
super(EMLoss, self).__init__()
def forward(self, d_real, d_fake):
"""
Wasserstein distance
input: batch_size x channels x h x w
output: batch_size x 1 x h x w
"""
assert d_real.dim() == 4
return -torch.mean(d_real) + torch.mean(d_fake)
class DiceLoss(nn.Module):
def __init__(self, smooth=1., ignore_index=255):
super(DiceLoss, self).__init__()
self.ignore_index = ignore_index
self.smooth = smooth
def forward(self, output, target):
if self.ignore_index not in range(target.min(), target.max()):
if (target == self.ignore_index).sum() > 0:
target[target == self.ignore_index] = target.min()
target = make_one_hot(target.unsqueeze(dim=1), classes=output.size()[1])
output = F.softmax(output, dim=1)
output_flat = output.contiguous().view(-1)
target_flat = target.contiguous().view(-1)
intersection = (output_flat * target_flat).sum()
loss = 1 - ((2. * intersection + self.smooth) /
(output_flat.sum() + target_flat.sum() + self.smooth))
return loss
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=None, ignore_index=255, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
self.CE_loss = nn.CrossEntropyLoss(ignore_index=ignore_index, weight=alpha)
def forward(self, output, target):
logpt = self.CE_loss(output, target)
pt = torch.exp(-logpt)
loss = ((1 - pt) ** self.gamma) * logpt
if self.size_average:
return loss.mean()
return loss.sum()
class KLDivLossWithLogits(nn.Module):
def __init__(self, reduction='mean'):
super().__init__(reduction)
self.kl_div_loss = nn.KLDivLoss(reduction=reduction)
def forward(self, logits1, logits2):
return self.kl_div_loss(F.log_softmax(logits1, dim=1), F.softmax(logits2, dim=1))
class Multi_scale_DC_FC_loss(nn.Module):
def __init__(self, ):
super(Multi_scale_DC_FC_loss, self).__init__()
self.ce = FocalLoss()
self.dc = DiceLoss()
def forward(self, net_output, target):
result = 0
for out in net_output:
dc_loss = self.dc(out, target)
ce_loss = self.ce(out, target)
result += (ce_loss + dc_loss)
return result
class DC_and_FC_loss(nn.Module):
def __init__(self, ):
super(DC_and_FC_loss, self).__init__()
self.ce = FocalLoss()
self.dc = DiceLoss()
# self.gamma1 = Parameter(torch.ones(1))
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
# flb_loss_value = flb_loss.item()
# dc_loss_value = dc_loss.item()
# result = dc_loss_value * flb_loss + dc_loss * flb_loss_value
result = ce_loss + dc_loss
# result = self.gamma1 * flb_loss + dc_loss
return result
class BinaryFocalLoss(nn.Module):
"""
This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in
'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'
Focal_Loss= -1*alpha*(1-pt)*log(pt)
:param num_class:
:param alpha: (tensor) 3D or 4D the scalar factor for this criterion
:param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more
focus on hard misclassified example
:param reduction: `none`|`mean`|`sum`
:param **kwargs
balance_index: (int) balance class index, should be specific when alpha is float
"""
def __init__(self, alpha=[1.0, 1.0], gamma=2, ignore_index=None, reduction='mean'):
super(BinaryFocalLoss, self).__init__()
if alpha is None:
alpha = [0.25, 0.75]
self.alpha = alpha
self.gamma = gamma
self.smooth = 1e-6
self.ignore_index = ignore_index
self.reduction = reduction
assert self.reduction in ['none', 'mean', 'sum']
if self.alpha is None:
self.alpha = torch.ones(2)
elif isinstance(self.alpha, (list, np.ndarray)):
self.alpha = np.asarray(self.alpha)
self.alpha = np.reshape(self.alpha, (2))
assert self.alpha.shape[0] == 2, \
'the `alpha` shape is not match the number of class'
elif isinstance(self.alpha, (float, int)):
self.alpha = np.asarray([self.alpha, 1.0 - self.alpha], dtype=np.float).view(2)
else:
raise TypeError('{} not supported'.format(type(self.alpha)))
def forward(self, output, target):
prob = output
prob = torch.clamp(prob, self.smooth, 1.0 - self.smooth)
pos_mask = (target == 1).float()
neg_mask = (target == 0).float()
pos_loss = -self.alpha[0] * torch.pow(torch.sub(1.0, prob), self.gamma) * torch.log(prob) * pos_mask
neg_loss = -self.alpha[1] * torch.pow(prob, self.gamma) * \
torch.log(torch.sub(1.0, prob)) * neg_mask
neg_loss = neg_loss.sum()
pos_loss = pos_loss.sum()
num_pos = pos_mask.view(pos_mask.size(0), -1).sum()
num_neg = neg_mask.view(neg_mask.size(0), -1).sum()
if num_pos == 0:
loss = neg_loss
else:
loss = pos_loss / num_pos + neg_loss / num_neg
return loss
class DC_and_Focal_loss(nn.Module):
def __init__(self, ):
super(DC_and_Focal_loss, self).__init__()
self.flb = BinaryFocalLoss()
self.dc = SoftDiceLoss()
# self.gamma1 = Parameter(torch.ones(1))
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
flb_loss = self.flb(net_output, target)
# flb_loss_value = flb_loss.item()
# dc_loss_value = dc_loss.item()
# result = dc_loss_value * flb_loss + dc_loss * flb_loss_value
result = flb_loss + dc_loss
# result = self.gamma1 * flb_loss + dc_loss
return result
class DC_and_CE_loss(nn.Module):
def __init__(self, ):
super(DC_and_CE_loss, self).__init__()
self.ce = CrossEntropyLoss()
self.dc = DiceLoss()
# self.gamma1 = Parameter(torch.ones(1))
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
# flb_loss_value = flb_loss.item()
# dc_loss_value = dc_loss.item()
# result = dc_loss_value * flb_loss + dc_loss * flb_loss_value
result = ce_loss + dc_loss
# result = self.gamma1 * flb_loss + dc_loss
return result
#
# # https://github.com/pytorch/pytorch/issues/1249
# def dice_coeff(pred, target):
# smooth = 1.
# num = pred.size(0)
# m1 = pred.view(num, -1) # Flatten
# m2 = target.view(num, -1) # Flatten
# intersection = (m1 * m2).sum()
#
# return (2. * intersection + smooth) / (m1.sum() + m2.sum() + smooth)
class DiceCoeff(Function):
"""Dice coeff for individual examples"""
def forward(self, input, target):
self.save_for_backward(input, target)
eps = 0.0001
self.inter = torch.dot(input.view(-1), target.view(-1))
self.union = torch.sum(input) + torch.sum(target) + eps
t = (2 * self.inter.float() + eps) / self.union.float()
return t
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
input, target = self.saved_variables
grad_input = grad_target = None
if self.needs_input_grad[0]:
grad_input = grad_output * 2 * (target * self.union - self.inter) \
/ (self.union * self.union)
if self.needs_input_grad[1]:
grad_target = None
return grad_input, grad_target
def dice_coeff(input, target):
"""Dice coeff for batches"""
if input.is_cuda:
s = torch.FloatTensor(1).cuda().zero_()
else:
s = torch.FloatTensor(1).zero_()
for i, c in enumerate(zip(input, target)):
s = s + DiceCoeff().forward(c[0], c[1])
return s / (i + 1)
device = 0
class ReconLoss(nn.Module):
def __init__(self, reduction='mean', masked=False):
super().__init__()
self.loss_fn = nn.L1Loss(reduction=reduction)
self.masked = masked
def forward(self, data_input, model_output):
outputs = model_output['outputs']
targets = data_input['targets']
if self.masked:
masks = data_input['masks']
return self.loss_fn(outputs * (1 - masks), targets * (1 - masks))
else:
return self.loss_fn(outputs, targets)
class VGGLoss(nn.Module):
def __init__(self, vgg):
super().__init__()
self.vgg = vgg
self.l1_loss = nn.L1Loss()
def vgg_loss(self, output, target):
output_feature = self.vgg(output.repeat(1, 3, 1, 1))
target_feature = self.vgg(target.repeat(1, 3, 1, 1))
loss = (
self.l1_loss(output_feature.relu2_2, target_feature.relu2_2)
+ self.l1_loss(output_feature.relu3_3, target_feature.relu3_3)
+ self.l1_loss(output_feature.relu4_3, target_feature.relu4_3)
)
return loss
def forward(self, targets, outputs):
# Note: It can be batch-lized
mean_image_loss = []
for frame_idx in range(targets.size(-1)):
mean_image_loss.append(
self.vgg_loss(outputs[..., frame_idx], targets[..., frame_idx])
)
mean_image_loss = torch.stack(mean_image_loss, dim=0).mean(dim=0)
return mean_image_loss
class StyleLoss(nn.Module):
def __init__(self, vgg, original_channel_norm=True):
super().__init__()
self.vgg = vgg
self.l1_loss = nn.L1Loss()
self.original_channel_norm = original_channel_norm
# From https://github.com/pytorch/tutorials/blob/master/advanced_source/neural_style_tutorial.py
def gram_matrix(self, input):
a, b, c, d = input.size() # a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
features = input.view(a * b, c * d) # resise F_XL into \hat F_XL
G = torch.mm(features, features.t()) # compute the gram product
# we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(a * b * c * d)
# Implement "Image Inpainting for Irregular Holes Using Partial Convolutions", Liu et al., 2018
def style_loss(self, output, target):
output_features = self.vgg(output.repeat(1, 3, 1, 1))
target_features = self.vgg(target.repeat(1, 3, 1, 1))
layers = ['relu2_2', 'relu3_3', 'relu4_3'] # n_channel: 128 (=2 ** 7), 256 (=2 ** 8), 512 (=2 ** 9)
loss = 0
for i, layer in enumerate(layers):
output_feature = getattr(output_features, layer)
target_feature = getattr(target_features, layer)
B, C_P, H, W = output_feature.shape
output_gram_matrix = self.gram_matrix(output_feature)
target_gram_matrix = self.gram_matrix(target_feature)
if self.original_channel_norm:
C_P_square_divider = 2 ** (i + 1) # original design (avoid too small loss)
else:
C_P_square_divider = C_P ** 2
assert C_P == 128 * 2 ** i
loss += self.l1_loss(output_gram_matrix, target_gram_matrix) / C_P_square_divider
return loss
def forward(self, targets, outputs):
# Note: It can be batch-lized
mean_image_loss = []
for frame_idx in range(targets.size(-1)):
mean_image_loss.append(
self.style_loss(outputs[..., frame_idx], targets[..., frame_idx])
)
mean_image_loss = torch.stack(mean_image_loss, dim=0).mean(dim=0)
return mean_image_loss
class L1LossMaskedMean(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.L1Loss(reduction='sum')
def forward(self, x, y, masked):
l1_sum = self.l1(x * masked, y * masked)
return l1_sum / torch.sum(masked)
class L2LossMaskedMean(nn.Module):
def __init__(self, reduction='sum'):
super().__init__()
self.l2 = nn.MSELoss(reduction=reduction)
def forward(self, x, y, mask):
l2_sum = self.l2(x * mask, y * mask)
return l2_sum / torch.sum(mask)
# From https://github.com/jxgu1016/Total_Variation_Loss.pytorch
class TVLoss(nn.Module):
def __init__(self):
super(TVLoss, self).__init__()
def forward(self, data_input, model_output):
# View 3D data as 2D
outputs = model_output['outputs']
B, L, C, H, W = outputs.shape
x = outputs.view([B * L, C, H, W])
masks = data_input['masks']
masks = masks.view([B * L, -1])
mask_areas = masks.sum(dim=1)
h_x = x.size()[2]
w_x = x.size()[3]
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum(1).sum(1).sum(1)
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum(1).sum(1).sum(1)
return ((h_tv + w_tv) / mask_areas).mean()
# Based on https://github.com/knazeri/edge-connect/blob/master/src/loss.py
class AdversarialLoss(nn.Module):
r"""
Adversarial loss
https://arxiv.org/abs/1711.10337
"""
def __init__(self, type='lsgan', target_real_label=1.0, target_fake_label=0.0):
r"""
type = nsgan | lsgan | hinge | l1
"""
super(AdversarialLoss, self).__init__()
self.type = type
self.register_buffer('real_label', torch.tensor(target_real_label).to(device))
self.register_buffer('fake_label', torch.tensor(target_fake_label).to(device))
if type == 'nsgan':
self.criterion = nn.BCELoss()
elif type == 'lsgan':
self.criterion = nn.MSELoss()
elif type == 'hinge':
self.criterion = nn.ReLU()
elif type == 'l1':
self.criterion = nn.L1Loss()
def __call__(self, outputs, is_real, is_disc=None):
if self.type == 'hinge':
if is_disc:
if is_real:
outputs = -outputs
return self.criterion(1 + outputs).mean()
else:
return (-outputs).mean()
else:
labels = (self.real_label if is_real else self.fake_label).expand_as(outputs)
loss = self.criterion(outputs, labels)
return loss
class KLMSELoss(nn.Module):
def __init__(self):
super(KLMSELoss, self).__init__()
self.mse_loss = nn.MSELoss(size_average=False)
def forward(self, recon_x, x, mu, logvar):
MSE = self.mse_loss(recon_x, x)
# see Appendix B from VAE paper: https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return MSE + KLD
| [
"torch.nn.functional.sigmoid",
"torch.ones",
"torch.nn.MSELoss",
"torch.gather",
"torch.nn.BCELoss",
"torch.FloatTensor",
"torch.exp",
"numpy.reshape",
"torch.nn.functional.log_softmax",
"torch.log",
"torch.mean",
"torch.autograd.Variable",
"torch.nn.KLDivLoss",
"numpy.log2",
"numpy.asar... | [((771, 783), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (781, 783), True, 'import torch.nn as nn\n'), ((1241, 1258), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['logits'], {}), '(logits)\n', (1250, 1258), True, 'import torch.nn.functional as F\n'), ((3145, 3159), 'torch.nn.Softmax2d', 'nn.Softmax2d', ([], {}), '()\n', (3157, 3159), True, 'import torch.nn as nn\n'), ((3197, 3237), 'torch.clamp', 'torch.clamp', (['P'], {'min': '(1e-09)', 'max': '(1 - 1e-09)'}), '(P, min=1e-09, max=1 - 1e-09)\n', (3208, 3237), False, 'import torch\n'), ((3453, 3495), 'torch.gather', 'torch.gather', (['predict'], {'dim': '(0)', 'index': 'target'}), '(predict, dim=0, index=target)\n', (3465, 3495), False, 'import torch\n'), ((5080, 5104), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (5089, 5104), True, 'import torch.nn.functional as F\n'), ((5660, 5720), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': 'ignore_index', 'weight': 'alpha'}), '(ignore_index=ignore_index, weight=alpha)\n', (5679, 5720), True, 'import torch.nn as nn\n'), ((5819, 5836), 'torch.exp', 'torch.exp', (['(-logpt)'], {}), '(-logpt)\n', (5828, 5836), False, 'import torch\n'), ((6117, 6150), 'torch.nn.KLDivLoss', 'nn.KLDivLoss', ([], {'reduction': 'reduction'}), '(reduction=reduction)\n', (6129, 6150), True, 'import torch.nn as nn\n'), ((9093, 9142), 'torch.clamp', 'torch.clamp', (['prob', 'self.smooth', '(1.0 - self.smooth)'], {}), '(prob, self.smooth, 1.0 - self.smooth)\n', (9104, 9142), False, 'import torch\n'), ((10564, 10582), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (10580, 10582), False, 'from torch.nn import BCELoss, MSELoss, CrossEntropyLoss\n'), ((12712, 12742), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {'reduction': 'reduction'}), '(reduction=reduction)\n', (12721, 12742), True, 'import torch.nn as nn\n'), ((13240, 13251), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (13249, 13251), True, 'import torch.nn as nn\n'), ((14252, 14263), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (14261, 14263), True, 'import torch.nn as nn\n'), ((16502, 16528), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (16511, 16528), True, 'import torch.nn as nn\n'), ((16781, 16812), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': 'reduction'}), '(reduction=reduction)\n', (16791, 16812), True, 'import torch.nn as nn\n'), ((19156, 19186), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'size_average': '(False)'}), '(size_average=False)\n', (19166, 19186), True, 'import torch.nn as nn\n'), ((4536, 4554), 'torch.mean', 'torch.mean', (['d_fake'], {}), '(d_fake)\n', (4546, 4554), False, 'import torch\n'), ((6225, 6254), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits1'], {'dim': '(1)'}), '(logits1, dim=1)\n', (6238, 6254), True, 'import torch.nn.functional as F\n'), ((6256, 6281), 'torch.nn.functional.softmax', 'F.softmax', (['logits2'], {'dim': '(1)'}), '(logits2, dim=1)\n', (6265, 6281), True, 'import torch.nn.functional as F\n'), ((8497, 8510), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (8507, 8510), False, 'import torch\n'), ((16640, 16657), 'torch.sum', 'torch.sum', (['masked'], {}), '(masked)\n', (16649, 16657), False, 'import torch\n'), ((16918, 16933), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (16927, 16933), False, 'import torch\n'), ((18333, 18345), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (18343, 18345), True, 'import torch.nn as nn\n'), ((2729, 2753), 'torch.ones', 'torch.ones', (['class_num', '(1)'], {}), '(class_num, 1)\n', (2739, 2753), False, 'import torch\n'), ((2895, 2910), 'torch.autograd.Variable', 'Variable', (['alpha'], {}), '(alpha)\n', (2903, 2910), False, 'from torch.autograd import Variable\n'), ((3547, 3579), 'torch.pow', 'torch.pow', (['(1 - probs)', 'self.gamma'], {}), '(1 - probs, self.gamma)\n', (3556, 3579), False, 'import torch\n'), ((4173, 4183), 'numpy.log2', 'np.log2', (['c'], {}), '(c)\n', (4180, 4183), True, 'import numpy as np\n'), ((4515, 4533), 'torch.mean', 'torch.mean', (['d_real'], {}), '(d_real)\n', (4525, 4533), False, 'import torch\n'), ((8593, 8615), 'numpy.asarray', 'np.asarray', (['self.alpha'], {}), '(self.alpha)\n', (8603, 8615), True, 'import numpy as np\n'), ((8641, 8666), 'numpy.reshape', 'np.reshape', (['self.alpha', '(2)'], {}), '(self.alpha, 2)\n', (8651, 8666), True, 'import numpy as np\n'), ((9309, 9324), 'torch.log', 'torch.log', (['prob'], {}), '(prob)\n', (9318, 9324), False, 'import torch\n'), ((11653, 11669), 'torch.sum', 'torch.sum', (['input'], {}), '(input)\n', (11662, 11669), False, 'import torch\n'), ((11672, 11689), 'torch.sum', 'torch.sum', (['target'], {}), '(target)\n', (11681, 11689), False, 'import torch\n'), ((12414, 12434), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)'], {}), '(1)\n', (12431, 12434), False, 'import torch\n'), ((14013, 14048), 'torch.stack', 'torch.stack', (['mean_image_loss'], {'dim': '(0)'}), '(mean_image_loss, dim=0)\n', (14024, 14048), False, 'import torch\n'), ((16317, 16352), 'torch.stack', 'torch.stack', (['mean_image_loss'], {'dim': '(0)'}), '(mean_image_loss, dim=0)\n', (16328, 16352), False, 'import torch\n'), ((18406, 18418), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (18416, 18418), True, 'import torch.nn as nn\n'), ((9372, 9399), 'torch.pow', 'torch.pow', (['prob', 'self.gamma'], {}), '(prob, self.gamma)\n', (9381, 9399), False, 'import torch\n'), ((9433, 9453), 'torch.sub', 'torch.sub', (['(1.0)', 'prob'], {}), '(1.0, prob)\n', (9442, 9453), False, 'import torch\n'), ((18144, 18175), 'torch.tensor', 'torch.tensor', (['target_real_label'], {}), '(target_real_label)\n', (18156, 18175), False, 'import torch\n'), ((18231, 18262), 'torch.tensor', 'torch.tensor', (['target_fake_label'], {}), '(target_fake_label)\n', (18243, 18262), False, 'import torch\n'), ((18479, 18488), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (18486, 18488), True, 'import torch.nn as nn\n'), ((4134, 4155), 'torch.log2', 'torch.log2', (['(v + 1e-30)'], {}), '(v + 1e-30)\n', (4144, 4155), False, 'import torch\n'), ((9273, 9293), 'torch.sub', 'torch.sub', (['(1.0)', 'prob'], {}), '(1.0, prob)\n', (9282, 9293), False, 'import torch\n'), ((12356, 12376), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)'], {}), '(1)\n', (12373, 12376), False, 'import torch\n'), ((18546, 18557), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (18555, 18557), True, 'import torch.nn as nn\n'), ((8861, 8919), 'numpy.asarray', 'np.asarray', (['[self.alpha, 1.0 - self.alpha]'], {'dtype': 'np.float'}), '([self.alpha, 1.0 - self.alpha], dtype=np.float)\n', (8871, 8919), True, 'import numpy as np\n'), ((17473, 17524), 'torch.pow', 'torch.pow', (['(x[:, :, 1:, :] - x[:, :, :h_x - 1, :])', '(2)'], {}), '(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2)\n', (17482, 17524), False, 'import torch\n'), ((17563, 17614), 'torch.pow', 'torch.pow', (['(x[:, :, :, 1:] - x[:, :, :, :w_x - 1])', '(2)'], {}), '(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2)\n', (17572, 17614), False, 'import torch\n')] |
import cv2
import numpy as np
import os, csv
def get_label_info(csv_path):
"""
Retrieve the class names and label values for the selected dataset.
Must be in CSV format!
# Arguments
csv_path: The file path of the class dictionairy
# Returns
Two lists: one for the class names and the other for the label values
"""
filename, file_extension = os.path.splitext(csv_path)
if not file_extension == ".csv":
return ValueError("File is not a CSV!")
class_names = []
label_values = []
with open(csv_path, 'r') as csvfile:
file_reader = csv.reader(csvfile, delimiter=',')
header = next(file_reader)
for row in file_reader:
class_names.append(row[0])
label_values.append([int(row[1]), int(row[2]), int(row[3])])
# print(class_dict)
return class_names, label_values
def one_hot_it(label, label_values):
"""
Convert a segmentation image label array to one-hot format
by replacing each pixel value with a vector of length num_classes
# Arguments
label: The 2D array segmentation image label
label_values
# Returns
A 2D array with the same width and hieght as the input, but
with a depth size of num_classes
"""
# https://stackoverflow.com/questions/46903885/map-rgb-semantic-maps-to-one-hot-encodings-and-vice-versa-in-tensorflow
# https://stackoverflow.com/questions/14859458/how-to-check-if-all-values-in-the-columns-of-a-numpy-matrix-are-the-same
semantic_map = []
for colour in label_values:
equality = np.equal(label, colour)
class_map = np.all(equality, axis = -1)
semantic_map.append(class_map)
semantic_map = np.stack(semantic_map, axis=-1)
return semantic_map
def reverse_one_hot(image):
"""
Transform a 2D array in one-hot format (depth is num_classes),
to a 2D array with only 1 channel, where each pixel value is
the classified class key.
# Arguments
image: The one-hot format image
# Returns
A 2D array with the same width and hieght as the input, but
with a depth size of 1, where each pixel value is the classified
class key.
"""
x = np.argmax(image, axis = -1)
return x
def colour_code_segmentation(image, label_values):
"""
Given a 1-channel array of class keys, colour code the segmentation results.
# Arguments
image: single channel array where each value represents the class key.
label_values
# Returns
Colour coded image for segmentation visualization
"""
colour_codes = np.array(label_values)
x = colour_codes[image.astype(int)]
return x
| [
"numpy.stack",
"csv.reader",
"numpy.argmax",
"numpy.equal",
"numpy.array",
"os.path.splitext",
"numpy.all"
] | [((397, 423), 'os.path.splitext', 'os.path.splitext', (['csv_path'], {}), '(csv_path)\n', (413, 423), False, 'import os, csv\n'), ((1757, 1788), 'numpy.stack', 'np.stack', (['semantic_map'], {'axis': '(-1)'}), '(semantic_map, axis=-1)\n', (1765, 1788), True, 'import numpy as np\n'), ((2276, 2301), 'numpy.argmax', 'np.argmax', (['image'], {'axis': '(-1)'}), '(image, axis=-1)\n', (2285, 2301), True, 'import numpy as np\n'), ((2685, 2707), 'numpy.array', 'np.array', (['label_values'], {}), '(label_values)\n', (2693, 2707), True, 'import numpy as np\n'), ((616, 650), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (626, 650), False, 'import os, csv\n'), ((1627, 1650), 'numpy.equal', 'np.equal', (['label', 'colour'], {}), '(label, colour)\n', (1635, 1650), True, 'import numpy as np\n'), ((1671, 1696), 'numpy.all', 'np.all', (['equality'], {'axis': '(-1)'}), '(equality, axis=-1)\n', (1677, 1696), True, 'import numpy as np\n')] |
from DataSpace import DataSpace
import numpy as np
from scipy.spatial.distance import euclidean, cosine
class Distance_Datas(DataSpace):
"""
Измерение внутреннего пространства данных (выбор типа)
Вычисление расстояния по типу между своими данными
и внешними данными
data: данные
checked_data : внешние данные
type: тип расстояния
"""
# инициализации
def __init__(self, data, dist_type) -> object:
super().__init__(data)
self.type_dist = dist_type
def own_euclid_space(self):
space = []
for i in range(self.data.shape[0]):
for j in range(i, self.data.shape[0]):
euclid = euclidean(self.data[i], self.data[j])
space.append(euclid)
return space
@property
def distance(self):
if self.type_dist == 'cos_dis':
return self.owncosin_space(self, checked_data)
elif self.type_dist == 'euclid_dis':
return self.own_euclid_space(self, checked_data)
else:
print(f"Введите 'cos_dis' или 'euclid_dis'")
def __cosin_distance(self, checked_data):
distance = []
for i in range(self.data.shape[0]):
for j in range(checked_data.shape[0]):
cos = cosine(self.data[i], checked_data[j])
distance.append(cos)
return distance
def __euclid_distance(self, checked_data):
distance = []
for i in range(self.data.shape[0]):
for j in range(checked_data.shape[0]):
euclid = euclidean(self.data[i], checked_data[j])
distance.append(euclid)
return distance
def distance_spaces(self, checked_data):
if self.type_dist == 'cos_dis':
own_space = self.owncosin_space()
distance = self.__cosin_distance(checked_data)
elif self.type_dist == 'euclid_dis':
own_space = self.own_euclid_space()
distance = self.__euclid_distance(checked_data)
own_max = max(own_space)
own_min = min(own_space)
distance_max = max(distance)
distance_min = min(distance)
if distance_min > own_min and distance_max < own_max:
return print(f'Данные {(distance_min, distance_max)} из контрольной области {(own_min, own_max)} на основе {self.type_dist}')
else:
return print(f'Данные {(distance_min, distance_max)} вне контрольной области {(own_min, own_max)} на основе {self.type_dist}')
control_data = np.random.random(size=(10, 7))
checked_data = np.random.binomial(2, 0.7, size=(20, 7))
own_data_cos = Distance_Datas(control_data, 'cos_dis')
print(own_data_cos)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
control_data = np.random.random(size=(10, 7))
checked_data = np.random.binomial(2, 0.7, size=(20, 7))
own_data_cos = Distance_Datas(control_data, 'cos_dis')
print(own_data_cos)
print()
own_data_cos.distance_spaces(checked_data)
print()
own_data_euc = Distance_Datas(control_data, 'euclid_dis')
print(own_data_euc)
print()
own_data_euc.distance_spaces(checked_data)
| [
"scipy.spatial.distance.cosine",
"numpy.random.random",
"numpy.random.binomial",
"scipy.spatial.distance.euclidean"
] | [((2523, 2553), 'numpy.random.random', 'np.random.random', ([], {'size': '(10, 7)'}), '(size=(10, 7))\n', (2539, 2553), True, 'import numpy as np\n'), ((2569, 2609), 'numpy.random.binomial', 'np.random.binomial', (['(2)', '(0.7)'], {'size': '(20, 7)'}), '(2, 0.7, size=(20, 7))\n', (2587, 2609), True, 'import numpy as np\n'), ((2791, 2821), 'numpy.random.random', 'np.random.random', ([], {'size': '(10, 7)'}), '(size=(10, 7))\n', (2807, 2821), True, 'import numpy as np\n'), ((2841, 2881), 'numpy.random.binomial', 'np.random.binomial', (['(2)', '(0.7)'], {'size': '(20, 7)'}), '(2, 0.7, size=(20, 7))\n', (2859, 2881), True, 'import numpy as np\n'), ((678, 715), 'scipy.spatial.distance.euclidean', 'euclidean', (['self.data[i]', 'self.data[j]'], {}), '(self.data[i], self.data[j])\n', (687, 715), False, 'from scipy.spatial.distance import euclidean, cosine\n'), ((1275, 1312), 'scipy.spatial.distance.cosine', 'cosine', (['self.data[i]', 'checked_data[j]'], {}), '(self.data[i], checked_data[j])\n', (1281, 1312), False, 'from scipy.spatial.distance import euclidean, cosine\n'), ((1564, 1604), 'scipy.spatial.distance.euclidean', 'euclidean', (['self.data[i]', 'checked_data[j]'], {}), '(self.data[i], checked_data[j])\n', (1573, 1604), False, 'from scipy.spatial.distance import euclidean, cosine\n')] |
# sys.path.append("../src/")
import sys
sys.path.append("../src/")
# from post_processing import compute_sig, local_project
import site
import sys
import pandas as pd
import sys
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import mshr
import dolfin
from dolfin import MPI
import os
import sympy
import numpy as np
# import post_processing as pp
import petsc4py
from functools import reduce
import ufl
petsc4py.init(sys.argv)
from petsc4py import PETSc
# from hashlib import md5
from pathlib import Path
import json
import hashlib
from copy import deepcopy
import mpi4py
comm = mpi4py.MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
from dolfin.cpp.log import log, LogLevel, set_log_level
dolfin.parameters["std_out_all_processes"] = False
from solvers import EquilibriumAM
from solver_stability import StabilitySolver
from linsearch import LineSearch
# from dolfin import NonlinearProblem, derivative, \
# TrialFunction, TestFunction, inner, assemble, sqrt, \
# Constant, interpolate, RectangleMesh, Point
from dolfin import *
import yaml
from utils import get_versions
code_parameters = get_versions()
set_log_level(LogLevel.INFO)
def getDefaultParameters():
with open('../parameters/form_compiler.yml') as f:
form_compiler_parameters = yaml.load(f, Loader=yaml.FullLoader)
with open('../parameters/solvers_default.yml') as f:
equilibrium_parameters = yaml.load(f, Loader=yaml.FullLoader)['equilibrium']
with open('../parameters/solvers_default.yml') as f:
damage_parameters = yaml.load(f, Loader=yaml.FullLoader)['damage']
with open('../parameters/solvers_default.yml') as f:
elasticity_parameters = yaml.load(f, Loader=yaml.FullLoader)['elasticity']
with open('../parameters/model1d.yaml') as f:
material_parameters = yaml.load(f, Loader=yaml.FullLoader)['material']
with open('../parameters/loading.yaml') as f:
loading_parameters = yaml.load(f, Loader=yaml.FullLoader)['loading']
with open('../parameters/stability.yaml') as f:
stability_parameters = yaml.load(f, Loader=yaml.FullLoader)['stability']
with open('../parameters/stability.yaml') as f:
inertia_parameters = yaml.load(f, Loader=yaml.FullLoader)['inertia']
with open('../parameters/stability.yaml') as f:
eigen_parameters = yaml.load(f, Loader=yaml.FullLoader)['eigen']
default_parameters = {
'code': {**code_parameters},
'compiler': {**form_compiler_parameters},
'eigen': {**eigen_parameters},
# 'geometry': {**geometry_parameters},
'inertia': {**inertia_parameters},
'loading': {**loading_parameters},
'material': {**material_parameters},
# 'newton': {**newton_parameters},
'equilibrium':{**equilibrium_parameters},
'damage':{**damage_parameters},
'elasticity':{**elasticity_parameters},
'stability': {**stability_parameters},
}
return default_parameters
def numerical_test(
user_parameters,
ell=0.05,
nu=0.,
):
time_data = []
time_data_pd = []
spacetime = []
lmbda_min_prev = 1e-6
bifurcated = False
bifurcation_loads = []
save_current_bifurcation = False
bifurc_i = 0
bifurcation_loads = []
# Create mesh and define function space
geometry_parameters = {'Lx': 1., 'Ly': .1, 'n': 5}
# Define Dirichlet boundaries
outdir = '../test/output/test_secondorderevo'
Path(outdir).mkdir(parents=True, exist_ok=True)
with open('../parameters/form_compiler.yml') as f:
form_compiler_parameters = yaml.load(f, Loader=yaml.FullLoader)
with open('../parameters/solvers_default.yml') as f:
solver_parameters = yaml.load(f, Loader=yaml.FullLoader)
with open('../parameters/model1d.yaml') as f:
material_parameters = yaml.load(f, Loader=yaml.FullLoader)['material']
with open('../parameters/loading.yaml') as f:
loading_parameters = yaml.load(f, Loader=yaml.FullLoader)['loading']
with open('../parameters/stability.yaml') as f:
stability_parameters = yaml.load(f, Loader=yaml.FullLoader)['stability']
Path(outdir).mkdir(parents=True, exist_ok=True)
print('Outdir is: '+outdir)
default_parameters = {
'code': {**code_parameters},
'compiler': {**form_compiler_parameters},
'geometry': {**geometry_parameters},
'loading': {**loading_parameters},
'material': {**material_parameters},
'solver':{**solver_parameters},
'stability': {**stability_parameters},
}
default_parameters.update(user_parameters)
# FIXME: Not nice
parameters = default_parameters
with open(os.path.join(outdir, 'parameters.yaml'), "w") as f:
yaml.dump(parameters, f, default_flow_style=False)
Lx = parameters['geometry']['Lx']; Ly = parameters['geometry']['Ly']
ell = parameters['material']['ell']
comm = MPI.comm_world
geom = mshr.Rectangle(dolfin.Point(-Lx/2., -Ly/2.), dolfin.Point(Lx/2., Ly/2.))
# import pdb; pdb.set_trace()
# resolution = max(geometry_parameters['n'] * Lx / ell, 1/(Ly*10))
resolution = max(geometry_parameters['n'] * Lx / ell, 5/(Ly*10))
resolution = 50
mesh = mshr.generate_mesh(geom, resolution)
meshf = dolfin.File(os.path.join(outdir, "mesh.xml"))
meshf << mesh
plot(mesh)
plt.savefig(os.path.join(outdir, "mesh.pdf"), bbox_inches='tight')
savelag = 1
left = dolfin.CompiledSubDomain("near(x[0], -Lx/2.)", Lx=Lx)
right = dolfin.CompiledSubDomain("near(x[0], Lx/2.)", Lx=Lx)
left_bottom_pt = dolfin.CompiledSubDomain("near(x[0],-Lx/2.) && near(x[1],-Ly/2.)", Lx=Lx, Ly=Ly)
mf = dolfin.MeshFunction("size_t", mesh, 1, 0)
right.mark(mf, 1)
left.mark(mf, 2)
ds = dolfin.Measure("ds", subdomain_data=mf)
dx = dolfin.Measure("dx", metadata=form_compiler_parameters, domain=mesh)
# Function Spaces
V_u = dolfin.VectorFunctionSpace(mesh, "CG", 1)
V_alpha = dolfin.FunctionSpace(mesh, "CG", 1)
u = dolfin.Function(V_u, name="Total displacement")
alpha = Function(V_alpha)
dalpha = TrialFunction(V_alpha)
alpha_bif = dolfin.Function(V_alpha)
alpha_bif_old = dolfin.Function(V_alpha)
state = {'u': u, 'alpha': alpha}
Z = dolfin.FunctionSpace(mesh,
dolfin.MixedElement([u.ufl_element(),alpha.ufl_element()]))
z = dolfin.Function(Z)
v, beta = dolfin.split(z)
ut = dolfin.Expression("t", t=0.0, degree=0)
bcs_u = [dolfin.DirichletBC(V_u.sub(0), dolfin.Constant(0), left),
dolfin.DirichletBC(V_u.sub(0), ut, right),
dolfin.DirichletBC(V_u, (0, 0), left_bottom_pt, method="pointwise")]
bcs_alpha_l = DirichletBC(V_alpha, Constant(0.0), left)
bcs_alpha_r = DirichletBC(V_alpha, Constant(0.0), right)
# bcs_alpha =[bcs_alpha_l, bcs_alpha_r]
bcs_alpha = []
bcs = {"damage": bcs_alpha, "elastic": bcs_u}
# import pdb; pdb.set_trace()
ell = parameters['material']['ell']
# Problem definition
# Problem definition
k_res = parameters['material']['k_res']
a = (1 - alpha) ** 2. + k_res
w_1 = parameters['material']['sigma_D0'] ** 2 / parameters['material']['E']
w = w_1 * alpha
eps = sym(grad(u))
lmbda0 = parameters['material']['E'] * parameters['material']['nu'] /(1. - parameters['material']['nu'])**2.
mu0 = parameters['material']['E']/ 2. / (1.0 + parameters['material']['nu'])
Wu = 1./2.* lmbda0 * tr(eps)**2. + mu0 * inner(eps, eps)
energy = a * Wu * dx + w_1 *( alpha + \
parameters['material']['ell']** 2.*inner(grad(alpha), grad(alpha)))*dx
eps_ = variable(eps)
sigma = diff(a * Wu, eps_)
e1 = dolfin.Constant([1, 0])
file_out = dolfin.XDMFFile(os.path.join(outdir, "output.xdmf"))
file_out.parameters["functions_share_mesh"] = True
file_out.parameters["flush_output"] = True
file_postproc = dolfin.XDMFFile(os.path.join(outdir, "output_postproc.xdmf"))
file_postproc.parameters["functions_share_mesh"] = True
file_postproc.parameters["flush_output"] = True
file_eig = dolfin.XDMFFile(os.path.join(outdir, "modes.xdmf"))
file_eig.parameters["functions_share_mesh"] = True
file_eig.parameters["flush_output"] = True
file_bif = dolfin.XDMFFile(os.path.join(outdir, "bifurcation.xdmf"))
file_bif.parameters["functions_share_mesh"] = True
file_bif.parameters["flush_output"] = True
file_bif_postproc = dolfin.XDMFFile(os.path.join(outdir, "bifurcation_postproc.xdmf"))
file_bif_postproc.parameters["functions_share_mesh"] = True
file_bif_postproc.parameters["flush_output"] = True
solver = EquilibriumAM(energy, state, bcs, parameters=parameters['solver'])
stability = StabilitySolver(energy, state, bcs, parameters = parameters['stability'])
linesearch = LineSearch(energy, state)
xs = np.linspace(-parameters['geometry']['Lx']/2., parameters['geometry']['Lx']/2, 50)
load_steps = np.linspace(parameters['loading']['load_min'],
parameters['loading']['load_max'],
parameters['loading']['n_steps'])
log(LogLevel.INFO, '====================== EVO ==========================')
log(LogLevel.INFO, '{}'.format(parameters))
for it, load in enumerate(load_steps):
log(LogLevel.CRITICAL, '====================== STEPPING ==========================')
log(LogLevel.CRITICAL, 'CRITICAL: Solving load t = {:.2f}'.format(load))
ut.t = load
(time_data_i, am_iter) = solver.solve()
# Second order stability conditions
(stable, negev) = stability.solve(solver.damage.problem.lb)
log(LogLevel.CRITICAL, 'Current state is{}stable'.format(' ' if stable else ' un'))
# we postpone the update after the stability check
solver.update()
mineig = stability.mineig if hasattr(stability, 'mineig') else 0.0
log(LogLevel.INFO, 'INFO: lmbda min {}'.format(lmbda_min_prev))
log(LogLevel.INFO, 'INFO: mineig {}'.format(mineig))
Deltav = (mineig-lmbda_min_prev) if hasattr(stability, 'eigs') else 0
if (mineig + Deltav)*(lmbda_min_prev+dolfin.DOLFIN_EPS) < 0 and not bifurcated:
bifurcated = True
# save 3 bif modes
print('About to bifurcate load ', load, 'step', it)
bifurcation_loads.append(load)
modes = np.where(stability.eigs < 0)[0]
with dolfin.XDMFFile(os.path.join(outdir, "postproc.xdmf")) as file:
leneigs = len(modes)
maxmodes = min(3, leneigs)
for n in range(maxmodes):
mode = dolfin.project(stability.linsearch[n]['beta_n'], V_alpha)
modename = 'beta-%d'%n
print(modename)
file.write_checkpoint(mode, modename, 0, append=True)
bifurc_i += 1
lmbda_min_prev = mineig if hasattr(stability, 'mineig') else 0.
time_data_i["load"] = load
time_data_i["alpha_max"] = max(alpha.vector()[:])
time_data_i["elastic_energy"] = dolfin.assemble(
1./2.* material_parameters['E']*a*eps**2. *dx)
time_data_i["dissipated_energy"] = dolfin.assemble(
(w + w_1 * material_parameters['ell'] ** 2. * inner(grad(alpha), grad(alpha)))*dx)
time_data_i["stable"] = stability.stable
time_data_i["# neg ev"] = stability.negev
time_data_i["eigs"] = stability.eigs if hasattr(stability, 'eigs') else np.inf
snn = dolfin.dot(dolfin.dot(sigma, e1), e1)
time_data_i["sigma"] = 1/parameters['geometry']['Ly'] * dolfin.assemble(snn*ds(1))
log(LogLevel.INFO,
"Load/time step {:.4g}: iteration: {:3d}, err_alpha={:.4g}".format(
time_data_i["load"],
time_data_i["iterations"][0],
time_data_i["alpha_error"][0]))
time_data.append(time_data_i)
time_data_pd = pd.DataFrame(time_data)
if np.mod(it, savelag) == 0:
with file_out as f:
f.write(alpha, load)
f.write(u, load)
with dolfin.XDMFFile(os.path.join(outdir, "output_postproc.xdmf")) as f:
f.write_checkpoint(alpha, "alpha-{}".format(it), 0, append = True)
log(LogLevel.PROGRESS, 'PROGRESS: written step {}'.format(it))
time_data_pd.to_json(os.path.join(outdir, "time_data.json"))
spacetime.append(get_trace(alpha))
if save_current_bifurcation:
# modes = np.where(stability.eigs < 0)[0]
time_data_i['h_opt'] = h_opt
time_data_i['max_h'] = hmax
time_data_i['min_h'] = hmin
with file_bif_postproc as file:
# leneigs = len(modes)
# maxmodes = min(3, leneigs)
beta0v = dolfin.project(stability.perturbation_beta, V_alpha)
log(LogLevel.DEBUG, 'DEBUG: irrev {}'.format(alpha.vector()-alpha_old.vector()))
file.write_checkpoint(beta0v, 'beta0', 0, append = True)
file.write_checkpoint(alpha_bif_old, 'alpha-old', 0, append=True)
file.write_checkpoint(alpha_bif, 'alpha-bif', 0, append=True)
file.write_checkpoint(alpha, 'alpha', 0, append=True)
np.save(os.path.join(outdir, 'energy_perturbations'), energy_perturbations, allow_pickle=True, fix_imports=True)
with file_eig as file:
_v = dolfin.project(dolfin.Constant(h_opt)*perturbation_v, V_u)
_beta = dolfin.project(dolfin.Constant(h_opt)*perturbation_beta, V_alpha)
_v.rename('perturbation displacement', 'perturbation displacement')
_beta.rename('perturbation damage', 'perturbation damage')
# import pdb; pdb.set_trace()
f.write(_v, load)
f.write(_beta, load)
_spacetime = pd.DataFrame(spacetime)
spacetime = _spacetime.fillna(0)
mat = np.matrix(spacetime)
plt.imshow(mat, cmap = 'Greys', vmin = 0., vmax = 1., aspect=.1)
plt.colorbar()
def format_space(x, pos, xresol = 100):
return '$%1.1f$'%((-x+xresol/2)/xresol)
def format_time(t, pos, xresol = 100):
return '$%1.1f$'%((t-parameters['loading']['load_min'])/parameters['loading']['n_steps']*parameters['loading']['load_max'])
from matplotlib.ticker import FuncFormatter, MaxNLocator
ax = plt.gca()
ax.yaxis.set_major_formatter(FuncFormatter(format_space))
ax.xaxis.set_major_formatter(FuncFormatter(format_time))
plt.xlabel('$x$')
plt.ylabel('$t$')
plt.savefig(os.path.join(outdir, "spacetime.pdf".format(load)), bbox_inches="tight")
spacetime.to_json(os.path.join(outdir + "/spacetime.json"))
from matplotlib.ticker import FuncFormatter, MaxNLocator
plot(alpha)
plt.savefig(os.path.join(outdir, 'alpha.pdf'))
log(LogLevel.INFO, "Saved figure: {}".format(os.path.join(outdir, 'alpha.pdf')))
xs = np.linspace(-Lx/2., Lx/2., 100)
profile = np.array([alpha(x, 0) for x in xs])
plt.figure()
plt.plot(xs, profile, marker='o')
plt.plot(xs, np.array([u(x, 0) for x in xs]))
# plt.ylim(0., 1.)
plt.savefig(os.path.join(outdir, 'profile.pdf'))
return time_data_pd, outdir
from test_firstorderevo import get_trace
# def get_trace(alpha, xresol = 100):
# X =alpha.function_space().tabulate_dof_coordinates()
# xs = np.linspace(min(X[:, 0]),max(X[:, 0]), xresol)
# alpha0 = [alpha(x, 0) for x in xs]
# return alpha0
if __name__ == "__main__":
# Parameters
with open('../parameters/tractionbar.yml') as f:
parameters = yaml.load(f, Loader=yaml.FullLoader)
data, experiment = numerical_test(user_parameters = parameters)
print(data)
log(LogLevel.INFO, "Postprocess")
import postprocess as pp
with open(os.path.join(experiment, 'parameters.yaml')) as f:
parameters = yaml.load(f, Loader=yaml.FullLoader)
lab = '\\ell={}, E={}, \\sigma_D = {}'.format(
parameters['material']['ell'],
parameters['material']['E'],
parameters['material']['sigma_D0'])
tc = (parameters['material']['sigma_D0']/parameters['material']['E'])**(.5)
ell = parameters['material']['ell']
# import pdb; pdb.set_trace()
fig1, ax1 =pp.plot_energy(parameters, data, tc)
# visuals.setspines2()
print(data['elastic_energy'])
mu = parameters['material']['E']/2.
# elast_en = [1./2.*2.*mu*eps**2 for eps in data['load']]
# Lx = 1.
# Ly = .1
# Omega = Lx*Ly
elast_en = [1./2.*parameters['material']['E']*eps**2 for eps in data['load']]
plt.plot(data['load'], elast_en, c='k', label='analytic')
plt.axhline(parameters['geometry']['Ly'], c='k')
plt.legend()
plt.ylim(0, 1.)
plt.title('${}$'.format(lab))
fig1.savefig(os.path.join(experiment, "energy.pdf"), bbox_inches='tight')
(fig2, ax1, ax2) =pp.plot_spectrum(parameters, data, tc)
plt.legend(loc='lower left')
ax2.set_ylim(-1e-7, 2e-4)
fig2.savefig(os.path.join(experiment, "spectrum.pdf"), bbox_inches='tight')
| [
"yaml.load",
"dolfin.MeshFunction",
"dolfin.CompiledSubDomain",
"yaml.dump",
"dolfin.cpp.log.log",
"dolfin.DirichletBC",
"dolfin.Constant",
"mshr.generate_mesh",
"matplotlib.pyplot.figure",
"pathlib.Path",
"solver_stability.StabilitySolver",
"matplotlib.pyplot.gca",
"os.path.join",
"dolfin... | [((40, 66), 'sys.path.append', 'sys.path.append', (['"""../src/"""'], {}), "('../src/')\n", (55, 66), False, 'import sys\n'), ((230, 251), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (244, 251), False, 'import matplotlib\n'), ((432, 455), 'petsc4py.init', 'petsc4py.init', (['sys.argv'], {}), '(sys.argv)\n', (445, 455), False, 'import petsc4py\n'), ((1157, 1171), 'utils.get_versions', 'get_versions', ([], {}), '()\n', (1169, 1171), False, 'from utils import get_versions\n'), ((1173, 1201), 'dolfin.cpp.log.set_log_level', 'set_log_level', (['LogLevel.INFO'], {}), '(LogLevel.INFO)\n', (1186, 1201), False, 'from dolfin.cpp.log import log, LogLevel, set_log_level\n'), ((5284, 5320), 'mshr.generate_mesh', 'mshr.generate_mesh', (['geom', 'resolution'], {}), '(geom, resolution)\n', (5302, 5320), False, 'import mshr\n'), ((5512, 5565), 'dolfin.CompiledSubDomain', 'dolfin.CompiledSubDomain', (['"""near(x[0], -Lx/2.)"""'], {'Lx': 'Lx'}), "('near(x[0], -Lx/2.)', Lx=Lx)\n", (5536, 5565), False, 'import dolfin\n'), ((5578, 5630), 'dolfin.CompiledSubDomain', 'dolfin.CompiledSubDomain', (['"""near(x[0], Lx/2.)"""'], {'Lx': 'Lx'}), "('near(x[0], Lx/2.)', Lx=Lx)\n", (5602, 5630), False, 'import dolfin\n'), ((5652, 5737), 'dolfin.CompiledSubDomain', 'dolfin.CompiledSubDomain', (['"""near(x[0],-Lx/2.) && near(x[1],-Ly/2.)"""'], {'Lx': 'Lx', 'Ly': 'Ly'}), "('near(x[0],-Lx/2.) && near(x[1],-Ly/2.)', Lx=Lx, Ly=Ly\n )\n", (5676, 5737), False, 'import dolfin\n'), ((5743, 5784), 'dolfin.MeshFunction', 'dolfin.MeshFunction', (['"""size_t"""', 'mesh', '(1)', '(0)'], {}), "('size_t', mesh, 1, 0)\n", (5762, 5784), False, 'import dolfin\n'), ((5838, 5877), 'dolfin.Measure', 'dolfin.Measure', (['"""ds"""'], {'subdomain_data': 'mf'}), "('ds', subdomain_data=mf)\n", (5852, 5877), False, 'import dolfin\n'), ((5887, 5955), 'dolfin.Measure', 'dolfin.Measure', (['"""dx"""'], {'metadata': 'form_compiler_parameters', 'domain': 'mesh'}), "('dx', metadata=form_compiler_parameters, domain=mesh)\n", (5901, 5955), False, 'import dolfin\n'), ((5989, 6030), 'dolfin.VectorFunctionSpace', 'dolfin.VectorFunctionSpace', (['mesh', '"""CG"""', '(1)'], {}), "(mesh, 'CG', 1)\n", (6015, 6030), False, 'import dolfin\n'), ((6045, 6080), 'dolfin.FunctionSpace', 'dolfin.FunctionSpace', (['mesh', '"""CG"""', '(1)'], {}), "(mesh, 'CG', 1)\n", (6065, 6080), False, 'import dolfin\n'), ((6089, 6136), 'dolfin.Function', 'dolfin.Function', (['V_u'], {'name': '"""Total displacement"""'}), "(V_u, name='Total displacement')\n", (6104, 6136), False, 'import dolfin\n'), ((6219, 6243), 'dolfin.Function', 'dolfin.Function', (['V_alpha'], {}), '(V_alpha)\n', (6234, 6243), False, 'import dolfin\n'), ((6264, 6288), 'dolfin.Function', 'dolfin.Function', (['V_alpha'], {}), '(V_alpha)\n', (6279, 6288), False, 'import dolfin\n'), ((6444, 6462), 'dolfin.Function', 'dolfin.Function', (['Z'], {}), '(Z)\n', (6459, 6462), False, 'import dolfin\n'), ((6477, 6492), 'dolfin.split', 'dolfin.split', (['z'], {}), '(z)\n', (6489, 6492), False, 'import dolfin\n'), ((6503, 6542), 'dolfin.Expression', 'dolfin.Expression', (['"""t"""'], {'t': '(0.0)', 'degree': '(0)'}), "('t', t=0.0, degree=0)\n", (6520, 6542), False, 'import dolfin\n'), ((7766, 7789), 'dolfin.Constant', 'dolfin.Constant', (['[1, 0]'], {}), '([1, 0])\n', (7781, 7789), False, 'import dolfin\n'), ((8725, 8791), 'solvers.EquilibriumAM', 'EquilibriumAM', (['energy', 'state', 'bcs'], {'parameters': "parameters['solver']"}), "(energy, state, bcs, parameters=parameters['solver'])\n", (8738, 8791), False, 'from solvers import EquilibriumAM\n'), ((8808, 8879), 'solver_stability.StabilitySolver', 'StabilitySolver', (['energy', 'state', 'bcs'], {'parameters': "parameters['stability']"}), "(energy, state, bcs, parameters=parameters['stability'])\n", (8823, 8879), False, 'from solver_stability import StabilitySolver\n'), ((8899, 8924), 'linsearch.LineSearch', 'LineSearch', (['energy', 'state'], {}), '(energy, state)\n', (8909, 8924), False, 'from linsearch import LineSearch\n'), ((8935, 9026), 'numpy.linspace', 'np.linspace', (["(-parameters['geometry']['Lx'] / 2.0)", "(parameters['geometry']['Lx'] / 2)", '(50)'], {}), "(-parameters['geometry']['Lx'] / 2.0, parameters['geometry'][\n 'Lx'] / 2, 50)\n", (8946, 9026), True, 'import numpy as np\n'), ((9035, 9155), 'numpy.linspace', 'np.linspace', (["parameters['loading']['load_min']", "parameters['loading']['load_max']", "parameters['loading']['n_steps']"], {}), "(parameters['loading']['load_min'], parameters['loading'][\n 'load_max'], parameters['loading']['n_steps'])\n", (9046, 9155), True, 'import numpy as np\n'), ((9171, 9246), 'dolfin.cpp.log.log', 'log', (['LogLevel.INFO', '"""====================== EVO =========================="""'], {}), "(LogLevel.INFO, '====================== EVO ==========================')\n", (9174, 9246), False, 'from dolfin.cpp.log import log, LogLevel, set_log_level\n'), ((13987, 14010), 'pandas.DataFrame', 'pd.DataFrame', (['spacetime'], {}), '(spacetime)\n', (13999, 14010), True, 'import pandas as pd\n'), ((14058, 14078), 'numpy.matrix', 'np.matrix', (['spacetime'], {}), '(spacetime)\n', (14067, 14078), True, 'import numpy as np\n'), ((14083, 14144), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mat'], {'cmap': '"""Greys"""', 'vmin': '(0.0)', 'vmax': '(1.0)', 'aspect': '(0.1)'}), "(mat, cmap='Greys', vmin=0.0, vmax=1.0, aspect=0.1)\n", (14093, 14144), True, 'import matplotlib.pyplot as plt\n'), ((14152, 14166), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (14164, 14166), True, 'import matplotlib.pyplot as plt\n'), ((14508, 14517), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (14515, 14517), True, 'import matplotlib.pyplot as plt\n'), ((14647, 14664), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (14657, 14664), True, 'import matplotlib.pyplot as plt\n'), ((14669, 14686), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$t$"""'], {}), "('$t$')\n", (14679, 14686), True, 'import matplotlib.pyplot as plt\n'), ((15066, 15103), 'numpy.linspace', 'np.linspace', (['(-Lx / 2.0)', '(Lx / 2.0)', '(100)'], {}), '(-Lx / 2.0, Lx / 2.0, 100)\n', (15077, 15103), True, 'import numpy as np\n'), ((15152, 15164), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15162, 15164), True, 'import matplotlib.pyplot as plt\n'), ((15169, 15202), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'profile'], {'marker': '"""o"""'}), "(xs, profile, marker='o')\n", (15177, 15202), True, 'import matplotlib.pyplot as plt\n'), ((15869, 15902), 'dolfin.cpp.log.log', 'log', (['LogLevel.INFO', '"""Postprocess"""'], {}), "(LogLevel.INFO, 'Postprocess')\n", (15872, 15902), False, 'from dolfin.cpp.log import log, LogLevel, set_log_level\n'), ((16397, 16433), 'postprocess.plot_energy', 'pp.plot_energy', (['parameters', 'data', 'tc'], {}), '(parameters, data, tc)\n', (16411, 16433), True, 'import postprocess as pp\n'), ((16731, 16788), 'matplotlib.pyplot.plot', 'plt.plot', (["data['load']", 'elast_en'], {'c': '"""k"""', 'label': '"""analytic"""'}), "(data['load'], elast_en, c='k', label='analytic')\n", (16739, 16788), True, 'import matplotlib.pyplot as plt\n'), ((16793, 16841), 'matplotlib.pyplot.axhline', 'plt.axhline', (["parameters['geometry']['Ly']"], {'c': '"""k"""'}), "(parameters['geometry']['Ly'], c='k')\n", (16804, 16841), True, 'import matplotlib.pyplot as plt\n'), ((16846, 16858), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16856, 16858), True, 'import matplotlib.pyplot as plt\n'), ((16864, 16880), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (16872, 16880), True, 'import matplotlib.pyplot as plt\n'), ((17016, 17054), 'postprocess.plot_spectrum', 'pp.plot_spectrum', (['parameters', 'data', 'tc'], {}), '(parameters, data, tc)\n', (17032, 17054), True, 'import postprocess as pp\n'), ((17059, 17087), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (17069, 17087), True, 'import matplotlib.pyplot as plt\n'), ((1324, 1360), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1333, 1360), False, 'import yaml\n'), ((3639, 3675), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (3648, 3675), False, 'import yaml\n'), ((3762, 3798), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (3771, 3798), False, 'import yaml\n'), ((4803, 4853), 'yaml.dump', 'yaml.dump', (['parameters', 'f'], {'default_flow_style': '(False)'}), '(parameters, f, default_flow_style=False)\n', (4812, 4853), False, 'import yaml\n'), ((5021, 5055), 'dolfin.Point', 'dolfin.Point', (['(-Lx / 2.0)', '(-Ly / 2.0)'], {}), '(-Lx / 2.0, -Ly / 2.0)\n', (5033, 5055), False, 'import dolfin\n'), ((5051, 5083), 'dolfin.Point', 'dolfin.Point', (['(Lx / 2.0)', '(Ly / 2.0)'], {}), '(Lx / 2.0, Ly / 2.0)\n', (5063, 5083), False, 'import dolfin\n'), ((5346, 5378), 'os.path.join', 'os.path.join', (['outdir', '"""mesh.xml"""'], {}), "(outdir, 'mesh.xml')\n", (5358, 5378), False, 'import os\n'), ((5429, 5461), 'os.path.join', 'os.path.join', (['outdir', '"""mesh.pdf"""'], {}), "(outdir, 'mesh.pdf')\n", (5441, 5461), False, 'import os\n'), ((6683, 6750), 'dolfin.DirichletBC', 'dolfin.DirichletBC', (['V_u', '(0, 0)', 'left_bottom_pt'], {'method': '"""pointwise"""'}), "(V_u, (0, 0), left_bottom_pt, method='pointwise')\n", (6701, 6750), False, 'import dolfin\n'), ((7822, 7857), 'os.path.join', 'os.path.join', (['outdir', '"""output.xdmf"""'], {}), "(outdir, 'output.xdmf')\n", (7834, 7857), False, 'import os\n'), ((7997, 8041), 'os.path.join', 'os.path.join', (['outdir', '"""output_postproc.xdmf"""'], {}), "(outdir, 'output_postproc.xdmf')\n", (8009, 8041), False, 'import os\n'), ((8186, 8220), 'os.path.join', 'os.path.join', (['outdir', '"""modes.xdmf"""'], {}), "(outdir, 'modes.xdmf')\n", (8198, 8220), False, 'import os\n'), ((8355, 8395), 'os.path.join', 'os.path.join', (['outdir', '"""bifurcation.xdmf"""'], {}), "(outdir, 'bifurcation.xdmf')\n", (8367, 8395), False, 'import os\n'), ((8539, 8588), 'os.path.join', 'os.path.join', (['outdir', '"""bifurcation_postproc.xdmf"""'], {}), "(outdir, 'bifurcation_postproc.xdmf')\n", (8551, 8588), False, 'import os\n'), ((9347, 9435), 'dolfin.cpp.log.log', 'log', (['LogLevel.CRITICAL', '"""====================== STEPPING =========================="""'], {}), "(LogLevel.CRITICAL,\n '====================== STEPPING ==========================')\n", (9350, 9435), False, 'from dolfin.cpp.log import log, LogLevel, set_log_level\n'), ((11144, 11219), 'dolfin.assemble', 'dolfin.assemble', (["(1.0 / 2.0 * material_parameters['E'] * a * eps ** 2.0 * dx)"], {}), "(1.0 / 2.0 * material_parameters['E'] * a * eps ** 2.0 * dx)\n", (11159, 11219), False, 'import dolfin\n'), ((12006, 12029), 'pandas.DataFrame', 'pd.DataFrame', (['time_data'], {}), '(time_data)\n', (12018, 12029), True, 'import pandas as pd\n'), ((14552, 14579), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['format_space'], {}), '(format_space)\n', (14565, 14579), False, 'from matplotlib.ticker import FuncFormatter, MaxNLocator\n'), ((14614, 14640), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['format_time'], {}), '(format_time)\n', (14627, 14640), False, 'from matplotlib.ticker import FuncFormatter, MaxNLocator\n'), ((14799, 14839), 'os.path.join', 'os.path.join', (["(outdir + '/spacetime.json')"], {}), "(outdir + '/spacetime.json')\n", (14811, 14839), False, 'import os\n'), ((14935, 14968), 'os.path.join', 'os.path.join', (['outdir', '"""alpha.pdf"""'], {}), "(outdir, 'alpha.pdf')\n", (14947, 14968), False, 'import os\n'), ((15292, 15327), 'os.path.join', 'os.path.join', (['outdir', '"""profile.pdf"""'], {}), "(outdir, 'profile.pdf')\n", (15304, 15327), False, 'import os\n'), ((15742, 15778), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (15751, 15778), False, 'import yaml\n'), ((16019, 16055), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (16028, 16055), False, 'import yaml\n'), ((16932, 16970), 'os.path.join', 'os.path.join', (['experiment', '"""energy.pdf"""'], {}), "(experiment, 'energy.pdf')\n", (16944, 16970), False, 'import os\n'), ((17135, 17175), 'os.path.join', 'os.path.join', (['experiment', '"""spectrum.pdf"""'], {}), "(experiment, 'spectrum.pdf')\n", (17147, 17175), False, 'import os\n'), ((1451, 1487), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1460, 1487), False, 'import yaml\n'), ((1588, 1624), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1597, 1624), False, 'import yaml\n'), ((1724, 1760), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1733, 1760), False, 'import yaml\n'), ((1855, 1891), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1864, 1891), False, 'import yaml\n'), ((1983, 2019), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1992, 2019), False, 'import yaml\n'), ((2114, 2150), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (2123, 2150), False, 'import yaml\n'), ((2245, 2281), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (2254, 2281), False, 'import yaml\n'), ((2372, 2408), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (2381, 2408), False, 'import yaml\n'), ((3499, 3511), 'pathlib.Path', 'Path', (['outdir'], {}), '(outdir)\n', (3503, 3511), False, 'from pathlib import Path\n'), ((3880, 3916), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (3889, 3916), False, 'import yaml\n'), ((4009, 4045), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (4018, 4045), False, 'import yaml\n'), ((4141, 4177), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (4150, 4177), False, 'import yaml\n'), ((4196, 4208), 'pathlib.Path', 'Path', (['outdir'], {}), '(outdir)\n', (4200, 4208), False, 'from pathlib import Path\n'), ((4743, 4782), 'os.path.join', 'os.path.join', (['outdir', '"""parameters.yaml"""'], {}), "(outdir, 'parameters.yaml')\n", (4755, 4782), False, 'import os\n'), ((6587, 6605), 'dolfin.Constant', 'dolfin.Constant', (['(0)'], {}), '(0)\n', (6602, 6605), False, 'import dolfin\n'), ((11587, 11608), 'dolfin.dot', 'dolfin.dot', (['sigma', 'e1'], {}), '(sigma, e1)\n', (11597, 11608), False, 'import dolfin\n'), ((12042, 12061), 'numpy.mod', 'np.mod', (['it', 'savelag'], {}), '(it, savelag)\n', (12048, 12061), True, 'import numpy as np\n'), ((12517, 12533), 'test_firstorderevo.get_trace', 'get_trace', (['alpha'], {}), '(alpha)\n', (12526, 12533), False, 'from test_firstorderevo import get_trace\n'), ((15019, 15052), 'os.path.join', 'os.path.join', (['outdir', '"""alpha.pdf"""'], {}), "(outdir, 'alpha.pdf')\n", (15031, 15052), False, 'import os\n'), ((15947, 15990), 'os.path.join', 'os.path.join', (['experiment', '"""parameters.yaml"""'], {}), "(experiment, 'parameters.yaml')\n", (15959, 15990), False, 'import os\n'), ((10436, 10464), 'numpy.where', 'np.where', (['(stability.eigs < 0)'], {}), '(stability.eigs < 0)\n', (10444, 10464), True, 'import numpy as np\n'), ((12451, 12489), 'os.path.join', 'os.path.join', (['outdir', '"""time_data.json"""'], {}), "(outdir, 'time_data.json')\n", (12463, 12489), False, 'import os\n'), ((12904, 12956), 'dolfin.project', 'dolfin.project', (['stability.perturbation_beta', 'V_alpha'], {}), '(stability.perturbation_beta, V_alpha)\n', (12918, 12956), False, 'import dolfin\n'), ((10502, 10539), 'os.path.join', 'os.path.join', (['outdir', '"""postproc.xdmf"""'], {}), "(outdir, 'postproc.xdmf')\n", (10514, 10539), False, 'import os\n'), ((10699, 10756), 'dolfin.project', 'dolfin.project', (["stability.linsearch[n]['beta_n']", 'V_alpha'], {}), "(stability.linsearch[n]['beta_n'], V_alpha)\n", (10713, 10756), False, 'import dolfin\n'), ((12203, 12247), 'os.path.join', 'os.path.join', (['outdir', '"""output_postproc.xdmf"""'], {}), "(outdir, 'output_postproc.xdmf')\n", (12215, 12247), False, 'import os\n'), ((13382, 13426), 'os.path.join', 'os.path.join', (['outdir', '"""energy_perturbations"""'], {}), "(outdir, 'energy_perturbations')\n", (13394, 13426), False, 'import os\n'), ((13559, 13581), 'dolfin.Constant', 'dolfin.Constant', (['h_opt'], {}), '(h_opt)\n', (13574, 13581), False, 'import dolfin\n'), ((13642, 13664), 'dolfin.Constant', 'dolfin.Constant', (['h_opt'], {}), '(h_opt)\n', (13657, 13664), False, 'import dolfin\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# Number of data points: n
n = len(data)
# x-data for the ECDF: x
x = np.sort(data)
# y-data for the ECDF: y
y = np.arange(1, n+1) / n
return x, y
x, y = ecdf(df["temperature"])
plt.figure(figsize=(8,7))
sns.set()
plt.plot(x, y, marker=".", linestyle="none")
plt.xlabel("Body Temperature (F)")
plt.ylabel("Cumulative Distribution Function")
samples = np.random.normal(np.mean(df["temperature"]), np.std(df["temperature"]), size=10000)
x_theor, y_theor = ecdf(samples)
plt.plot(x_theor, y_theor)
plt.legend(('Normal Distribution', 'Empirical Data'), loc='lower right')
print(stats.normaltest(df["temperature"]))
periscope.output(plt)
| [
"matplotlib.pyplot.plot",
"scipy.stats.normaltest",
"numpy.std",
"matplotlib.pyplot.legend",
"numpy.sort",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"seaborn.set"
] | [((414, 440), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 7)'}), '(figsize=(8, 7))\n', (424, 440), True, 'import matplotlib.pyplot as plt\n'), ((440, 449), 'seaborn.set', 'sns.set', ([], {}), '()\n', (447, 449), True, 'import seaborn as sns\n'), ((450, 494), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'marker': '"""."""', 'linestyle': '"""none"""'}), "(x, y, marker='.', linestyle='none')\n", (458, 494), True, 'import matplotlib.pyplot as plt\n'), ((495, 529), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Body Temperature (F)"""'], {}), "('Body Temperature (F)')\n", (505, 529), True, 'import matplotlib.pyplot as plt\n'), ((530, 576), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Distribution Function"""'], {}), "('Cumulative Distribution Function')\n", (540, 576), True, 'import matplotlib.pyplot as plt\n'), ((708, 734), 'matplotlib.pyplot.plot', 'plt.plot', (['x_theor', 'y_theor'], {}), '(x_theor, y_theor)\n', (716, 734), True, 'import matplotlib.pyplot as plt\n'), ((735, 807), 'matplotlib.pyplot.legend', 'plt.legend', (["('Normal Distribution', 'Empirical Data')"], {'loc': '"""lower right"""'}), "(('Normal Distribution', 'Empirical Data'), loc='lower right')\n", (745, 807), True, 'import matplotlib.pyplot as plt\n'), ((290, 303), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (297, 303), True, 'import numpy as np\n'), ((606, 632), 'numpy.mean', 'np.mean', (["df['temperature']"], {}), "(df['temperature'])\n", (613, 632), True, 'import numpy as np\n'), ((634, 659), 'numpy.std', 'np.std', (["df['temperature']"], {}), "(df['temperature'])\n", (640, 659), True, 'import numpy as np\n'), ((815, 850), 'scipy.stats.normaltest', 'stats.normaltest', (["df['temperature']"], {}), "(df['temperature'])\n", (831, 850), False, 'from scipy import stats\n'), ((342, 361), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (351, 361), True, 'import numpy as np\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
This module contains the definition of a base class for a chemistry operator.
Such an operator takes a QMolecule and produces an input for
a quantum algorithm
"""
from abc import ABC, abstractmethod
import warnings
import logging
from typing import Dict, Union, List, Tuple, Optional, cast
import numpy as np
from qiskit.aqua.algorithms import MinimumEigensolverResult, EigensolverResult, AlgorithmResult
from qiskit.chemistry import QMolecule
logger = logging.getLogger(__name__)
# A dipole moment, when present as X, Y and Z components will normally have float values for all
# the components. However when using Z2Symmetries, if the dipole component operator does not
# commute with the symmetry then no evaluation is done and None will be used as the 'value'
# indicating no measurement of the observable took place
DipoleTuple = Tuple[Optional[float], Optional[float], Optional[float]]
class ChemistryOperator(ABC):
"""
Base class for ChemistryOperator.
"""
INFO_NUM_PARTICLES = 'num_particles'
INFO_NUM_ORBITALS = 'num_orbitals'
INFO_TWO_QUBIT_REDUCTION = 'two_qubit_reduction'
INFO_Z2SYMMETRIES = 'z2symmetries'
@abstractmethod
def __init__(self):
warnings.warn('The ChemistryOperator is deprecated as of Qiskit Aqua 0.8.0 and will be '
'removed no earlier than 3 months after the release date. Instead, the '
'FermionicTransformation can be used to transform QMolecules and construct '
'ground state result objects.', DeprecationWarning, stacklevel=2)
self._molecule_info = {}
@abstractmethod
def run(self, qmolecule):
"""
Convert the qmolecule, according to the ChemistryOperator, into an Operator
that can be given to a QuantumAlgorithm
Args:
qmolecule (QMolecule): from a chemistry driver
Returns:
Tuple: (qubit_op, aux_ops)
"""
raise NotImplementedError
def process_algorithm_result(
self, algo_result: Union[dict,
MinimumEigensolverResult,
EigensolverResult]) -> Union[Tuple[List[str], dict],
'MolecularGroundStateResult',
'MolecularExcitedStatesResult']:
"""
Takes the algorithm result and processes it as required, e.g. by
combination of any parts that were classically computed, for the
final result.
Args:
algo_result: Result from algorithm
Returns:
Final chemistry result computed from the algorithm result
"""
if isinstance(algo_result, MinimumEigensolverResult):
return self._process_algorithm_result(algo_result)
elif isinstance(algo_result, EigensolverResult):
return self._process_algorithm_result(algo_result)
else:
lines, result = self._process_algorithm_result(algo_result)
result['algorithm_retvals'] = algo_result
return lines, result
@abstractmethod
def _process_algorithm_result(self, algo_result):
raise NotImplementedError
@property
def molecule_info(self):
""" returns molecule info """
return self._molecule_info
def _add_molecule_info(self, key, value):
self._molecule_info[key] = value
class MolecularChemistryResult(AlgorithmResult):
"""
Molecular chemistry Result
Energies are in Hartree and dipole moments in A.U unless otherwise stated.
"""
def __init__(self, a_dict: Optional[Dict] = None) -> None:
super().__init__(a_dict)
warnings.warn('The qiskit.chemistry.chemistry_operator.MolecularChemistryResult object is '
'deprecated as of 0.8.0 and will be removed no sooner than 3 months after the'
' release. You should use qiskit.chemistry.algorithms.ground_state_solvers.'
'FermionicGroundStateResult instead.', DeprecationWarning, stacklevel=2)
@property
def algorithm_result(self) -> AlgorithmResult:
""" Returns raw algorithm result """
return self.get('algorithm_result')
@algorithm_result.setter
def algorithm_result(self, value: AlgorithmResult) -> None:
""" Sets raw algorithm result """
self.data['algorithm_result'] = value
@property
def hartree_fock_energy(self) -> float:
""" Returns Hartree-Fock energy """
return self.get('hartree_fock_energy')
@hartree_fock_energy.setter
def hartree_fock_energy(self, value: float) -> None:
""" Sets Hartree-Fock energy """
self.data['hartree_fock_energy'] = value
@property
def nuclear_repulsion_energy(self) -> Optional[float]:
""" Returns nuclear repulsion energy when available from driver """
return self.get('nuclear_repulsion_energy')
@nuclear_repulsion_energy.setter
def nuclear_repulsion_energy(self, value: float) -> None:
""" Sets nuclear repulsion energy """
self.data['nuclear_repulsion_energy'] = value
@property
def nuclear_dipole_moment(self) -> Optional[DipoleTuple]:
""" Returns nuclear dipole moment X,Y,Z components in A.U when available from driver """
return self.get('nuclear_dipole_moment')
@nuclear_dipole_moment.setter
def nuclear_dipole_moment(self, value: DipoleTuple) -> None:
""" Sets nuclear dipole moment in A.U """
self.data['nuclear_dipole_moment'] = value
class MolecularGroundStateResult(MolecularChemistryResult):
"""
Molecular Ground State Energy Result.
Energies are in Hartree and dipole moments in A.U unless otherwise stated.
"""
def __init__(self, a_dict: Optional[Dict] = None) -> None:
super().__init__(a_dict)
warnings.warn('The qiskit.chemistry.chemistry_operator.MolecularGroundStateResult object '
'is deprecated as of 0.8.0 and will be removed no sooner than 3 months after '
'the release. You should use qiskit.chemistry.algorithms.'
'ground_state_solvers.FermionicGroundStateResult instead.',
DeprecationWarning, stacklevel=2)
@property
def energy(self) -> Optional[float]:
""" Returns ground state energy if nuclear_repulsion_energy is available from driver """
nre = self.nuclear_repulsion_energy
return self.electronic_energy + nre if nre is not None else None
@property
def electronic_energy(self) -> float:
""" Returns electronic part of ground state energy """
return (self.computed_electronic_energy
+ self.ph_extracted_energy
+ self.frozen_extracted_energy)
@property
def computed_electronic_energy(self) -> float:
""" Returns computed electronic part of ground state energy """
return self.get('computed_electronic_energy')
@computed_electronic_energy.setter
def computed_electronic_energy(self, value: float) -> None:
""" Sets computed electronic part of ground state energy """
self.data['computed_electronic_energy'] = value
@property
def ph_extracted_energy(self) -> float:
""" Returns particle hole extracted part of ground state energy """
return self.get('ph_extracted_energy')
@ph_extracted_energy.setter
def ph_extracted_energy(self, value: float) -> None:
""" Sets particle hole extracted part of ground state energy """
self.data['ph_extracted_energy'] = value
@property
def frozen_extracted_energy(self) -> float:
""" Returns frozen extracted part of ground state energy """
return self.get('frozen_extracted_energy')
@frozen_extracted_energy.setter
def frozen_extracted_energy(self, value: float) -> None:
""" Sets frozen extracted part of ground state energy """
self.data['frozen_extracted_energy'] = value
# Dipole moment results. Note dipole moments of tuples of X, Y and Z components. Chemistry
# drivers either support dipole integrals or not. Note that when using Z2 symmetries of
def has_dipole(self) -> bool:
""" Returns whether dipole moment is present in result or not """
return self.nuclear_dipole_moment is not None and self.electronic_dipole_moment is not None
@property
def reverse_dipole_sign(self) -> bool:
""" Returns if electronic dipole moment sign should be reversed when adding to nuclear """
return self.get('reverse_dipole_sign')
@reverse_dipole_sign.setter
def reverse_dipole_sign(self, value: bool) -> None:
""" Sets if electronic dipole moment sign should be reversed when adding to nuclear """
self.data['reverse_dipole_sign'] = value
@property
def total_dipole_moment(self) -> Optional[float]:
""" Returns total dipole of moment """
if self.dipole_moment is None:
return None # No dipole at all
if np.any(np.equal(list(self.dipole_moment), None)):
return None # One or more components in the dipole is None
return np.sqrt(np.sum(np.power(list(self.dipole_moment), 2)))
@property
def total_dipole_moment_in_debye(self) -> Optional[float]:
""" Returns total dipole of moment in Debye """
tdm = self.total_dipole_moment
return tdm / QMolecule.DEBYE if tdm is not None else None
@property
def dipole_moment(self) -> Optional[DipoleTuple]:
""" Returns dipole moment """
edm = self.electronic_dipole_moment
if self.reverse_dipole_sign:
edm = cast(DipoleTuple, tuple(-1 * x if x is not None else None for x in edm))
return _dipole_tuple_add(edm, self.nuclear_dipole_moment)
@property
def dipole_moment_in_debye(self) -> Optional[DipoleTuple]:
""" Returns dipole moment in Debye """
dipm = self.dipole_moment
if dipm is None:
return None
dipmd0 = dipm[0]/QMolecule.DEBYE if dipm[0] is not None else None
dipmd1 = dipm[1]/QMolecule.DEBYE if dipm[1] is not None else None
dipmd2 = dipm[2]/QMolecule.DEBYE if dipm[2] is not None else None
return dipmd0, dipmd1, dipmd2
@property
def electronic_dipole_moment(self) -> Optional[DipoleTuple]:
""" Returns electronic dipole moment """
return _dipole_tuple_add(self.computed_dipole_moment,
_dipole_tuple_add(self.ph_extracted_dipole_moment,
self.frozen_extracted_dipole_moment))
@property
def computed_dipole_moment(self) -> Optional[DipoleTuple]:
""" Returns computed electronic part of dipole moment """
return self.get('computed_dipole_moment')
@computed_dipole_moment.setter
def computed_dipole_moment(self, value: DipoleTuple) -> None:
""" Sets computed electronic part of dipole moment """
self.data['computed_dipole_moment'] = value
@property
def ph_extracted_dipole_moment(self) -> Optional[DipoleTuple]:
""" Returns particle hole extracted part of dipole moment """
return self.get('ph_extracted_dipole_moment')
@ph_extracted_dipole_moment.setter
def ph_extracted_dipole_moment(self, value: DipoleTuple) -> None:
""" Sets particle hole extracted part of dipole moment """
self.data['ph_extracted_dipole_moment'] = value
@property
def frozen_extracted_dipole_moment(self) -> Optional[DipoleTuple]:
""" Returns frozen extracted part of dipole moment """
return self.get('frozen_extracted_dipole_moment')
@frozen_extracted_dipole_moment.setter
def frozen_extracted_dipole_moment(self, value: DipoleTuple) -> None:
""" Sets frozen extracted part of dipole moment """
self.data['frozen_extracted_dipole_moment'] = value
# Other measured operators. If these are not evaluated then None will be returned
# instead of any measured value.
def has_observables(self):
""" Returns whether result has aux op observables such as spin, num particles """
return self.total_angular_momentum is not None \
or self.num_particles is not None \
or self.magnetization is not None
@property
def total_angular_momentum(self) -> Optional[float]:
""" Returns total angular momentum (S^2) """
return self.get('total_angular_momentum')
@total_angular_momentum.setter
def total_angular_momentum(self, value: float) -> None:
""" Sets total angular momentum """
self.data['total_angular_momentum'] = value
@property
def spin(self) -> Optional[float]:
""" Returns computed spin """
if self.total_angular_momentum is None:
return None
return (-1.0 + np.sqrt(1 + 4 * self.total_angular_momentum)) / 2
@property
def num_particles(self) -> Optional[float]:
""" Returns measured number of particles """
return self.get('num_particles')
@num_particles.setter
def num_particles(self, value: float) -> None:
""" Sets measured number of particles """
self.data['num_particles'] = value
@property
def magnetization(self) -> Optional[float]:
""" Returns measured magnetization """
return self.get('magnetization')
@magnetization.setter
def magnetization(self, value: float) -> None:
""" Sets measured magnetization """
self.data['magnetization'] = value
def __str__(self) -> str:
""" Printable formatted result """
return '\n'.join(self.formatted)
@property
def formatted(self) -> List[str]:
""" Formatted result as a list of strings """
lines = []
lines.append('=== GROUND STATE ENERGY ===')
lines.append(' ')
lines.append('* Electronic ground state energy (Hartree): {}'.
format(round(self.electronic_energy, 12)))
lines.append(' - computed part: {}'.
format(round(self.computed_electronic_energy, 12)))
lines.append(' - frozen energy part: {}'.
format(round(self.frozen_extracted_energy, 12)))
lines.append(' - particle hole part: {}'
.format(round(self.ph_extracted_energy, 12)))
if self.nuclear_repulsion_energy is not None:
lines.append('~ Nuclear repulsion energy (Hartree): {}'.
format(round(self.nuclear_repulsion_energy, 12)))
lines.append('> Total ground state energy (Hartree): {}'.
format(round(self.energy, 12)))
if self.has_observables():
line = ' Measured::'
if self.num_particles is not None:
line += ' # Particles: {:.3f}'.format(self.num_particles)
if self.spin is not None:
line += ' S: {:.3f}'.format(self.spin)
if self.total_angular_momentum is not None:
line += ' S^2: {:.3f}'.format(self.total_angular_momentum)
if self.magnetization is not None:
line += ' M: {:.5f}'.format(self.magnetization)
lines.append(line)
if self.has_dipole():
lines.append(' ')
lines.append('=== DIPOLE MOMENT ===')
lines.append(' ')
lines.append('* Electronic dipole moment (a.u.): {}'
.format(_dipole_to_string(self.electronic_dipole_moment)))
lines.append(' - computed part: {}'
.format(_dipole_to_string(self.computed_dipole_moment)))
lines.append(' - frozen energy part: {}'
.format(_dipole_to_string(self.frozen_extracted_dipole_moment)))
lines.append(' - particle hole part: {}'
.format(_dipole_to_string(self.ph_extracted_dipole_moment)))
if self.nuclear_dipole_moment is not None:
lines.append('~ Nuclear dipole moment (a.u.): {}'
.format(_dipole_to_string(self.nuclear_dipole_moment)))
lines.append('> Dipole moment (a.u.): {} Total: {}'
.format(_dipole_to_string(self.dipole_moment),
_float_to_string(self.total_dipole_moment)))
lines.append(' (debye): {} Total: {}'
.format(_dipole_to_string(self.dipole_moment_in_debye),
_float_to_string(self.total_dipole_moment_in_debye)))
return lines
class MolecularExcitedStatesResult(MolecularChemistryResult):
"""
Molecular Excited States Result
Energies are in Hartree and dipole moments in A.U unless otherwise stated.
"""
# TODO This needs completing once EigenSolver interface/result is final
@property
def energies(self) -> Tuple:
""" Returns ground state energy """
return self.get('energies')
@energies.setter
def energies(self, value: Tuple) -> None:
""" Sets ground state energy """
self.data['energies'] = value
def _dipole_tuple_add(x: Optional[DipoleTuple],
y: Optional[DipoleTuple]) -> Optional[DipoleTuple]:
""" Utility to add two dipole tuples element-wise for dipole additions """
if x is None or y is None:
return None
return _element_add(x[0], y[0]), _element_add(x[1], y[1]), _element_add(x[2], y[2])
def _element_add(x: Optional[float], y: Optional[float]):
""" Add dipole elements where a value may be None then None is returned """
return x + y if x is not None and y is not None else None
def _dipole_to_string(dipole: DipoleTuple):
dips = [round(x, 8) if x is not None else x for x in dipole]
value = '['
for i, _ in enumerate(dips):
value += _float_to_string(dips[i]) if dips[i] is not None else 'None'
value += ' ' if i < len(dips)-1 else ']'
return value
def _float_to_string(value: Optional[float], precision: int = 8) -> str:
if value is None:
return 'None'
else:
return '0.0' if value == 0 else ('{:.' + str(precision) + 'f}').format(value).rstrip('0')
| [
"warnings.warn",
"logging.getLogger",
"numpy.sqrt"
] | [((942, 969), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (959, 969), False, 'import logging\n'), ((1693, 1998), 'warnings.warn', 'warnings.warn', (['"""The ChemistryOperator is deprecated as of Qiskit Aqua 0.8.0 and will be removed no earlier than 3 months after the release date. Instead, the FermionicTransformation can be used to transform QMolecules and construct ground state result objects."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n 'The ChemistryOperator is deprecated as of Qiskit Aqua 0.8.0 and will be removed no earlier than 3 months after the release date. Instead, the FermionicTransformation can be used to transform QMolecules and construct ground state result objects.'\n , DeprecationWarning, stacklevel=2)\n", (1706, 1998), False, 'import warnings\n'), ((4252, 4573), 'warnings.warn', 'warnings.warn', (['"""The qiskit.chemistry.chemistry_operator.MolecularChemistryResult object is deprecated as of 0.8.0 and will be removed no sooner than 3 months after the release. You should use qiskit.chemistry.algorithms.ground_state_solvers.FermionicGroundStateResult instead."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n 'The qiskit.chemistry.chemistry_operator.MolecularChemistryResult object is deprecated as of 0.8.0 and will be removed no sooner than 3 months after the release. You should use qiskit.chemistry.algorithms.ground_state_solvers.FermionicGroundStateResult instead.'\n , DeprecationWarning, stacklevel=2)\n", (4265, 4573), False, 'import warnings\n'), ((6437, 6760), 'warnings.warn', 'warnings.warn', (['"""The qiskit.chemistry.chemistry_operator.MolecularGroundStateResult object is deprecated as of 0.8.0 and will be removed no sooner than 3 months after the release. You should use qiskit.chemistry.algorithms.ground_state_solvers.FermionicGroundStateResult instead."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n 'The qiskit.chemistry.chemistry_operator.MolecularGroundStateResult object is deprecated as of 0.8.0 and will be removed no sooner than 3 months after the release. You should use qiskit.chemistry.algorithms.ground_state_solvers.FermionicGroundStateResult instead.'\n , DeprecationWarning, stacklevel=2)\n", (6450, 6760), False, 'import warnings\n'), ((13491, 13535), 'numpy.sqrt', 'np.sqrt', (['(1 + 4 * self.total_angular_momentum)'], {}), '(1 + 4 * self.total_angular_momentum)\n', (13498, 13535), True, 'import numpy as np\n')] |
import numpy as np
from pandas.compat import reduce
from pandas.core.dtypes.common import is_list_like
from pandas.core import common as com
def cartesian_product(X):
"""
Numpy version of itertools.product or pandas.compat.product.
Sometimes faster (for large inputs)...
Parameters
----------
X : list-like of list-likes
Returns
-------
product : list of ndarrays
Examples
--------
>>> cartesian_product([list('ABC'), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
array([1, 2, 1, 2, 1, 2])]
See also
--------
itertools.product : Cartesian product of input iterables. Equivalent to
nested for-loops.
pandas.compat.product : An alias for itertools.product.
"""
msg = "Input must be a list-like of list-likes"
if not is_list_like(X):
raise TypeError(msg)
for x in X:
if not is_list_like(x):
raise TypeError(msg)
if len(X) == 0:
return []
lenX = np.fromiter((len(x) for x in X), dtype=np.intp)
cumprodX = np.cumproduct(lenX)
a = np.roll(cumprodX, 1)
a[0] = 1
if cumprodX[-1] != 0:
b = cumprodX[-1] / cumprodX
else:
# if any factor is empty, the cartesian product is empty
b = np.zeros_like(cumprodX)
return [np.tile(np.repeat(np.asarray(com.values_from_object(x)), b[i]),
np.product(a[i]))
for i, x in enumerate(X)]
def _compose2(f, g):
"""Compose 2 callables"""
return lambda *args, **kwargs: f(g(*args, **kwargs))
def compose(*funcs):
"""Compose 2 or more callables"""
assert len(funcs) > 1, 'At least 2 callables must be passed to compose'
return reduce(_compose2, funcs)
| [
"numpy.zeros_like",
"pandas.core.common.values_from_object",
"numpy.roll",
"pandas.core.dtypes.common.is_list_like",
"numpy.cumproduct",
"numpy.product",
"pandas.compat.reduce"
] | [((1073, 1092), 'numpy.cumproduct', 'np.cumproduct', (['lenX'], {}), '(lenX)\n', (1086, 1092), True, 'import numpy as np\n'), ((1102, 1122), 'numpy.roll', 'np.roll', (['cumprodX', '(1)'], {}), '(cumprodX, 1)\n', (1109, 1122), True, 'import numpy as np\n'), ((1721, 1745), 'pandas.compat.reduce', 'reduce', (['_compose2', 'funcs'], {}), '(_compose2, funcs)\n', (1727, 1745), False, 'from pandas.compat import reduce\n'), ((832, 847), 'pandas.core.dtypes.common.is_list_like', 'is_list_like', (['X'], {}), '(X)\n', (844, 847), False, 'from pandas.core.dtypes.common import is_list_like\n'), ((1286, 1309), 'numpy.zeros_like', 'np.zeros_like', (['cumprodX'], {}), '(cumprodX)\n', (1299, 1309), True, 'import numpy as np\n'), ((909, 924), 'pandas.core.dtypes.common.is_list_like', 'is_list_like', (['x'], {}), '(x)\n', (921, 924), False, 'from pandas.core.dtypes.common import is_list_like\n'), ((1407, 1423), 'numpy.product', 'np.product', (['a[i]'], {}), '(a[i])\n', (1417, 1423), True, 'import numpy as np\n'), ((1352, 1377), 'pandas.core.common.values_from_object', 'com.values_from_object', (['x'], {}), '(x)\n', (1374, 1377), True, 'from pandas.core import common as com\n')] |
import numpy as np
from tensorflow import keras
def is_numpy(obj):
"""
Check of the type is instance of numpy array
:param obj: object to check
:return: True if the object is numpy-type array.
"""
return isinstance(obj, (np.ndarray, np.generic))
def ensure_numpy_type(obj):
"""
Raise exception if it's not a numpy
:param obj: object to check
:return: numpy object
"""
if is_numpy(obj):
return obj
else:
raise AttributeError('Not a numpy type.')
def ensure_tf_type(obj, fake_input_layer=None, name=None):
"""
Convert to Keras Constant if needed
:param obj: numpy / tf type
:param fake_input_layer: fake input layer to add constant
:return: tf type
"""
if is_numpy(obj):
if obj.dtype == np.int64:
obj = np.int32(obj)
def target_layer(_, inp=obj, dtype=obj.dtype.name):
import numpy as np
import tensorflow as tf
if not isinstance(inp, (np.ndarray, np.generic)):
inp = np.array(inp, dtype=dtype)
return tf.constant(inp, dtype=inp.dtype)
lambda_layer = keras.layers.Lambda(target_layer, name=name)
return lambda_layer(fake_input_layer)
else:
return obj
def check_torch_keras_error(model, k_model, input_np, epsilon=1e-5, change_ordering=False):
"""
Check difference between Torch and Keras models
:param model: torch model
:param k_model: keras model
:param input_np: input data as numpy array or list of numpy array
:param epsilon: allowed difference
:param change_ordering: change ordering for keras input
:return: actual difference
"""
from torch.autograd import Variable
import torch
if isinstance(input_np, np.ndarray):
input_np = [input_np]
input_var = [Variable(torch.FloatTensor(i)) for i in input_np]
pytorch_output = model(*input_var)
if not isinstance(pytorch_output, tuple):
pytorch_output = [pytorch_output.data.numpy()]
else:
pytorch_output = [p.data.numpy() for p in pytorch_output]
if change_ordering:
# change image data format
_input_np = []
for i in input_np:
axes = list(range(len(i.shape)))
axes = axes[0:1] + axes[2:] + axes[1:2]
_input_np.append(np.transpose(i, axes))
input_np = _input_np
# run keras model
keras_output = k_model.predict(input_np)
if not isinstance(keras_output, list):
keras_output = [keras_output]
# change image data format
_koutput = []
for k in keras_output:
axes = list(range(len(k.shape)))
axes = axes[0:1] + axes[-1:] + axes[1:-1]
_koutput.append(np.transpose(k, axes))
keras_output = _koutput
else:
keras_output = k_model.predict(input_np)
if not isinstance(keras_output, list):
keras_output = [keras_output]
max_error = 0
for p, k in zip(pytorch_output, keras_output):
error = np.max(np.abs(p - k))
if error > max_error:
max_error = error
assert max_error < epsilon
return max_error
| [
"numpy.abs",
"torch.FloatTensor",
"numpy.transpose",
"tensorflow.constant",
"numpy.array",
"numpy.int32",
"tensorflow.keras.layers.Lambda"
] | [((1153, 1197), 'tensorflow.keras.layers.Lambda', 'keras.layers.Lambda', (['target_layer'], {'name': 'name'}), '(target_layer, name=name)\n', (1172, 1197), False, 'from tensorflow import keras\n'), ((823, 836), 'numpy.int32', 'np.int32', (['obj'], {}), '(obj)\n', (831, 836), True, 'import numpy as np\n'), ((1095, 1128), 'tensorflow.constant', 'tf.constant', (['inp'], {'dtype': 'inp.dtype'}), '(inp, dtype=inp.dtype)\n', (1106, 1128), True, 'import tensorflow as tf\n'), ((1853, 1873), 'torch.FloatTensor', 'torch.FloatTensor', (['i'], {}), '(i)\n', (1870, 1873), False, 'import torch\n'), ((3075, 3088), 'numpy.abs', 'np.abs', (['(p - k)'], {}), '(p - k)\n', (3081, 3088), True, 'import numpy as np\n'), ((1049, 1075), 'numpy.array', 'np.array', (['inp'], {'dtype': 'dtype'}), '(inp, dtype=dtype)\n', (1057, 1075), True, 'import numpy as np\n'), ((2346, 2367), 'numpy.transpose', 'np.transpose', (['i', 'axes'], {}), '(i, axes)\n', (2358, 2367), True, 'import numpy as np\n'), ((2779, 2800), 'numpy.transpose', 'np.transpose', (['k', 'axes'], {}), '(k, axes)\n', (2791, 2800), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import re
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import LabelBinarizer
import tensorflow as tf
from .embeddings import CharacterSequenceEmbedding, WordSequenceEmbedding
from .taggers import Tagger, extract_sequence_features
DEFAULT_ENTITY_TOKEN_SPAN_INDEX = 2
GAZ_PATTERN_MATCH = r"in-gaz\|type:(\w+)\|pos:(\w+)\|"
REGEX_TYPE_POSITIONAL_INDEX = 1
DEFAULT_LABEL = "B|UNK"
DEFAULT_GAZ_LABEL = "O"
RANDOM_SEED = 1
ZERO_INITIALIZER_VALUE = 0
logger = logging.getLogger(__name__)
class LstmModel(Tagger): # pylint: disable=too-many-instance-attributes
"""This class encapsulates the bi-directional LSTM model and provides
the correct interface for use by the tagger model"""
def fit(self, X, y):
examples_arr = np.asarray(X, dtype="float32")
labels_arr = np.asarray(y, dtype="int32")
self._fit(examples_arr, labels_arr)
return self
def predict(self, X, dynamic_resource=None):
encoded_examples_arr = np.asarray(X, dtype="float32")
tags_by_example_arr = self._predict(encoded_examples_arr)
resized_predicted_tags = []
for query, seq_len in zip(tags_by_example_arr, self.sequence_lengths):
resized_predicted_tags.append(query[:seq_len])
return resized_predicted_tags
def set_params(self, **parameters):
"""
Initialize params for the LSTM. The keys in the parameters dictionary
are as follows:
Args:
parameters (dict): The keys in the parameters dictionary are as follows:
number_of_epochs: The number of epochs to run (int)
batch_size: The batch size for mini-batch training (int)
token_lstm_hidden_state_dimension: The hidden state
dimension of the LSTM cell (int)
learning_rate: The learning rate of the optimizer (int)
optimizer: The optimizer used to train the network
is the number of entities in the dataset (str)
display_epoch: The number of epochs after which the
network displays common stats like accuracy (int)
padding_length: The length of each query, which is
fixed, so some queries will be cut short in length
representing the word embedding, the row index
is the word's index (int)
token_embedding_dimension: The embedding dimension of the word (int)
token_pretrained_embedding_filepath: The pretrained embedding file-path (str)
dense_keep_prob: The dropout rate of the dense layers (float)
lstm_input_keep_prob: The dropout rate of the inputs to the LSTM cell (float)
lstm_output_keep_prob: The dropout rate of the outputs of the LSTM cell (float)
gaz_encoding_dimension: The gazetteer encoding dimension (int)
"""
self.number_of_epochs = parameters.get("number_of_epochs", 20)
self.batch_size = parameters.get("batch_size", 20)
self.token_lstm_hidden_state_dimension = parameters.get(
"token_lstm_hidden_state_dimension", 300
)
self.learning_rate = parameters.get("learning_rate", 0.005)
self.optimizer_tf = parameters.get("optimizer", "adam")
self.padding_length = parameters.get("padding_length", 20)
self.display_epoch = parameters.get("display_epoch", 20)
self.token_embedding_dimension = parameters.get(
"token_embedding_dimension", 300
)
self.token_pretrained_embedding_filepath = parameters.get(
"token_pretrained_embedding_filepath"
)
self.dense_keep_probability = parameters.get("dense_keep_prob", 0.5)
self.lstm_input_keep_prob = parameters.get("lstm_input_keep_prob", 0.5)
self.lstm_output_keep_prob = parameters.get("lstm_output_keep_prob", 0.5)
self.gaz_encoding_dimension = parameters.get("gaz_encoding_dimension", 100)
self.use_crf_layer = parameters.get("use_crf_layer", True)
self.use_char_embeddings = parameters.get("use_character_embeddings", False)
self.char_window_sizes = parameters.get("char_window_sizes", [5])
self.max_char_per_word = parameters.get("maximum_characters_per_word", 20)
self.character_embedding_dimension = parameters.get(
"character_embedding_dimension", 10
)
self.word_level_character_embedding_size = parameters.get(
"word_level_character_embedding_size", 40
)
def get_params(self, deep=True):
return self.__dict__
def construct_tf_variables(self):
"""
Constructs the variables and operations in the TensorFlow session graph
"""
with self.graph.as_default():
self.dense_keep_prob_tf = tf.placeholder(
tf.float32, name="dense_keep_prob_tf"
)
self.lstm_input_keep_prob_tf = tf.placeholder(
tf.float32, name="lstm_input_keep_prob_tf"
)
self.lstm_output_keep_prob_tf = tf.placeholder(
tf.float32, name="lstm_output_keep_prob_tf"
)
self.query_input_tf = tf.placeholder(
tf.float32,
[None, self.padding_length, self.token_embedding_dimension],
name="query_input_tf",
)
self.gaz_input_tf = tf.placeholder(
tf.float32,
[None, self.padding_length, self.gaz_dimension],
name="gaz_input_tf",
)
self.label_tf = tf.placeholder(
tf.int32,
[None, int(self.padding_length), self.output_dimension],
name="label_tf",
)
self.batch_sequence_lengths_tf = tf.placeholder(
tf.int32, shape=[None], name="batch_sequence_lengths_tf"
)
self.batch_sequence_mask_tf = tf.placeholder(
tf.bool, shape=[None], name="batch_sequence_mask_tf"
)
if self.use_char_embeddings:
self.char_input_tf = tf.placeholder(
tf.float32,
[
None,
self.padding_length,
self.max_char_per_word,
self.character_embedding_dimension,
],
name="char_input_tf",
)
combined_embedding_tf = self._construct_embedding_network()
self.lstm_output_tf = self._construct_lstm_network(combined_embedding_tf)
self.lstm_output_softmax_tf = tf.nn.softmax(
self.lstm_output_tf, name="output_softmax_tensor"
)
self.optimizer_tf, self.cost_tf = self._define_optimizer_and_cost()
self.global_init = tf.global_variables_initializer()
self.local_init = tf.local_variables_initializer()
self.saver = tf.train.Saver()
def extract_features(self, examples, config, resources, y=None, fit=True):
"""Transforms a list of examples into features that are then used by the
deep learning model.
Args:
examples (list of mindmeld.core.Query): a list of queries
config (ModelConfig): The ModelConfig which may contain information used for feature
extraction
resources (dict): Resources which may be used for this model's feature extraction
y (list): A list of label sequences
Returns:
(sequence_embeddings, encoded_labels, groups): features for the LSTM network
"""
del fit # unused -- we use the value of y to determine whether to encode labels
if y:
# Train time
self.resources = resources
padded_y = self._pad_labels(y, DEFAULT_LABEL)
y_flat = [item for sublist in padded_y for item in sublist]
encoded_labels_flat = self.label_encoder.fit_transform(y_flat)
encoded_labels = []
start_index = 0
for label_sequence in padded_y:
encoded_labels.append(
encoded_labels_flat[start_index : start_index + len(label_sequence)]
)
start_index += len(label_sequence)
gaz_entities = list(self.resources.get("gazetteers", {}).keys())
gaz_entities.append(DEFAULT_GAZ_LABEL)
self.gaz_encoder.fit(gaz_entities)
# The gaz dimension are the sum total of the gazetteer entities and
# the 'other' gaz entity, which is the entity for all non-gazetteer tokens
self.gaz_dimension = len(gaz_entities)
self.output_dimension = len(self.label_encoder.classes_)
else:
# Predict time
encoded_labels = None
# Extract features and classes
(
x_sequence_embeddings_arr,
self.gaz_features_arr,
self.char_features_arr,
) = self._get_features(examples)
self.sequence_lengths = self._extract_seq_length(examples)
# There are no groups in this model
groups = None
return x_sequence_embeddings_arr, encoded_labels, groups
def setup_model(self, config):
self.set_params(**config.params)
self.label_encoder = LabelBinarizer()
self.gaz_encoder = LabelBinarizer()
self.graph = tf.Graph()
self.saver = None
self.example_type = config.example_type
self.features = config.features
self.query_encoder = WordSequenceEmbedding(
self.padding_length,
self.token_embedding_dimension,
self.token_pretrained_embedding_filepath,
)
if self.use_char_embeddings:
self.char_encoder = CharacterSequenceEmbedding(
self.padding_length,
self.character_embedding_dimension,
self.max_char_per_word,
)
def construct_feed_dictionary(
self, batch_examples, batch_char, batch_gaz, batch_seq_len, batch_labels=None
):
"""Constructs the feed dictionary that is used to feed data into the tensors
Args:
batch_examples (ndarray): A batch of examples
batch_char (ndarray): A batch of character features
batch_gaz (ndarray): A batch of gazetteer features
batch_seq_len (ndarray): A batch of sequence length of each query
batch_labels (ndarray): A batch of labels
Returns:
The feed dictionary
"""
if batch_labels is None:
batch_labels = []
return_dict = {
self.query_input_tf: batch_examples,
self.batch_sequence_lengths_tf: batch_seq_len,
self.gaz_input_tf: batch_gaz,
self.dense_keep_prob_tf: self.dense_keep_probability,
self.lstm_input_keep_prob_tf: self.lstm_input_keep_prob,
self.lstm_output_keep_prob_tf: self.lstm_output_keep_prob,
self.batch_sequence_mask_tf: self._generate_boolean_mask(batch_seq_len),
}
if len(batch_labels) > 0:
return_dict[self.label_tf] = batch_labels
if len(batch_char) > 0:
return_dict[self.char_input_tf] = batch_char
return return_dict
def _construct_embedding_network(self):
""" Constructs a network based on the word embedding and gazetteer
inputs and concatenates them together
Returns:
Combined embeddings of the word and gazetteer embeddings
"""
initializer = tf.contrib.layers.xavier_initializer(seed=RANDOM_SEED)
dense_gaz_embedding_tf = tf.contrib.layers.fully_connected(
inputs=self.gaz_input_tf,
num_outputs=self.gaz_encoding_dimension,
weights_initializer=initializer,
)
batch_size_dim = tf.shape(self.query_input_tf)[0]
if self.use_char_embeddings:
word_level_char_embeddings_list = []
for window_size in self.char_window_sizes:
word_level_char_embeddings_list.append(
self.apply_convolution(
self.char_input_tf, batch_size_dim, window_size
)
)
word_level_char_embedding = tf.concat(word_level_char_embeddings_list, 2)
# Combined the two embeddings
combined_embedding_tf = tf.concat(
[self.query_input_tf, word_level_char_embedding], axis=2
)
else:
combined_embedding_tf = self.query_input_tf
combined_embedding_tf = tf.concat(
[combined_embedding_tf, dense_gaz_embedding_tf], axis=2
)
return combined_embedding_tf
def apply_convolution(self, input_tensor, batch_size, char_window_size):
""" Constructs a convolution network of a specific window size
Args:
input_tensor (tensor): The input tensor to the network
batch_size (int): The batch size of the training data
char_window_size (int): The character window size of each stride
Returns:
(Tensor): Convolved output tensor
"""
convolution_reshaped_char_embedding = tf.reshape(
input_tensor,
[
-1,
self.padding_length,
self.max_char_per_word,
self.character_embedding_dimension,
1,
],
)
# Index 0 dimension is 1 because we want to apply this to every word. Index 1 dimension is
# char_window_size since this is the convolution window size. Index 3 dimension is
# 1 since the input channel is 1 dimensional (the sequence string). Index 4 dimension is
# the output dimension which is a hyper-parameter.
char_convolution_filter = tf.Variable(
tf.random_normal(
[
1,
char_window_size,
self.character_embedding_dimension,
1,
self.word_level_character_embedding_size,
],
dtype=tf.float32,
)
)
# Strides is None because we want to advance one character at a time and one word at a time
conv_output = tf.nn.convolution(
convolution_reshaped_char_embedding, char_convolution_filter, padding="SAME"
)
# Max pool over each word, captured by the size of the filter corresponding to an entire
# single word
max_pool = tf.nn.pool(
conv_output,
window_shape=[
1,
self.max_char_per_word,
self.character_embedding_dimension,
],
pooling_type="MAX",
padding="VALID",
)
# Transpose because shape before is batch_size BY query_padding_length BY 1 BY 1
# BY num_filters. This transform rearranges the dimension of each rank such that
# the num_filters dimension comes after the query_padding_length, so the last index
# 4 is brought after the index 1.
max_pool = tf.transpose(max_pool, [0, 1, 4, 2, 3])
max_pool = tf.reshape(
max_pool,
[batch_size, self.padding_length, self.word_level_character_embedding_size],
)
char_convolution_bias = tf.Variable(
tf.random_normal([self.word_level_character_embedding_size,])
)
char_convolution_bias = tf.tile(char_convolution_bias, [self.padding_length])
char_convolution_bias = tf.reshape(
char_convolution_bias,
[self.padding_length, self.word_level_character_embedding_size],
)
char_convolution_bias = tf.tile(char_convolution_bias, [batch_size, 1])
char_convolution_bias = tf.reshape(
char_convolution_bias,
[batch_size, self.padding_length, self.word_level_character_embedding_size],
)
word_level_char_embedding = tf.nn.relu(max_pool + char_convolution_bias)
return word_level_char_embedding
def _define_optimizer_and_cost(self):
""" This function defines the optimizer and cost function of the LSTM model
Returns:
AdamOptimizer, Tensor: The optimizer function to reduce loss and the loss values
"""
if self.use_crf_layer:
flattened_labels = tf.cast(tf.argmax(self.label_tf, axis=2), tf.int32)
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
self.lstm_output_tf, flattened_labels, self.batch_sequence_lengths_tf
)
cost_tf = tf.reduce_mean(-log_likelihood, name="cost_tf")
else:
masked_logits = tf.boolean_mask(
tf.reshape(self.lstm_output_tf, [-1, self.output_dimension]),
self.batch_sequence_mask_tf,
)
masked_labels = tf.boolean_mask(
tf.reshape(self.label_tf, [-1, self.output_dimension]),
self.batch_sequence_mask_tf,
)
softmax_loss_tf = tf.nn.softmax_cross_entropy_with_logits(
logits=masked_logits, labels=masked_labels, name="softmax_loss_tf"
)
cost_tf = tf.reduce_mean(softmax_loss_tf, name="cost_tf")
optimizer_tf = tf.train.AdamOptimizer(
learning_rate=float(self.learning_rate)
).minimize(cost_tf)
return optimizer_tf, cost_tf
def _calculate_score(self, output_arr, label_arr, seq_lengths_arr):
""" This function calculates the sequence score of all the queries,
that is, the total number of queries where all the tags are predicted
correctly.
Args:
output_arr (ndarray): Output array of the LSTM network
label_arr (ndarray): Label array of the true labels of the data
seq_lengths_arr (ndarray): A real sequence lengths of each example
Returns:
int: The number of queries where all the tags are correct
"""
reshaped_output_arr = np.reshape(
output_arr, [-1, int(self.padding_length), self.output_dimension]
)
reshaped_output_arr = np.argmax(reshaped_output_arr, 2)
reshaped_labels_arr = np.argmax(label_arr, 2)
score = 0
for idx, _ in enumerate(reshaped_output_arr):
seq_len = seq_lengths_arr[idx]
predicted_tags = reshaped_output_arr[idx][:seq_len]
actual_tags = reshaped_labels_arr[idx][:seq_len]
if np.array_equal(predicted_tags, actual_tags):
score += 1
return score
def _pad_labels(self, list_of_sequences, default_token):
"""
Pads the label sequence
Args:
list_of_sequences (list): A list of label sequences
default_token (str): The default label token for padding purposes
Returns:
list: padded output
"""
padded_output = []
for sequence in list_of_sequences:
padded_seq = [default_token] * self.padding_length
for idx, _ in enumerate(sequence):
if idx < self.padding_length:
padded_seq[idx] = sequence[idx]
padded_output.append(padded_seq)
return padded_output
def _generate_boolean_mask(self, seq_lengths):
"""
Generates boolean masks for each query in a query list
Args:
seq_lengths (list): A list of sequence lengths
Return:
list: A list of boolean masking values
"""
mask = [False] * (len(seq_lengths) * self.padding_length)
for idx, seq_len in enumerate(seq_lengths):
start_index = idx * self.padding_length
for i in range(start_index, start_index + seq_len):
mask[i] = True
return mask
@staticmethod
def _construct_lstm_state(initializer, hidden_dimension, batch_size, name):
"""Construct the LSTM initial state
Args:
initializer (tf.contrib.layers.xavier_initializer): initializer used
hidden_dimension: num dimensions of the hidden state variable
batch_size: the batch size of the data
name: suffix of the variable going to be used
Returns:
(LSTMStateTuple): LSTM state information
"""
initial_cell_state = tf.get_variable(
"initial_cell_state_{}".format(name),
shape=[1, hidden_dimension],
dtype=tf.float32,
initializer=initializer,
)
initial_output_state = tf.get_variable(
"initial_output_state_{}".format(name),
shape=[1, hidden_dimension],
dtype=tf.float32,
initializer=initializer,
)
c_states = tf.tile(initial_cell_state, tf.stack([batch_size, 1]))
h_states = tf.tile(initial_output_state, tf.stack([batch_size, 1]))
return tf.contrib.rnn.LSTMStateTuple(c_states, h_states)
def _construct_regularized_lstm_cell(self, hidden_dimensions, initializer):
"""Construct a regularized lstm cell based on a dropout layer
Args:
hidden_dimensions: num dimensions of the hidden state variable
initializer (tf.contrib.layers.xavier_initializer): initializer used
Returns:
(DropoutWrapper): regularized LSTM cell
"""
lstm_cell = tf.contrib.rnn.CoupledInputForgetGateLSTMCell(
hidden_dimensions,
forget_bias=1.0,
initializer=initializer,
state_is_tuple=True,
)
lstm_cell = tf.contrib.rnn.DropoutWrapper(
lstm_cell,
input_keep_prob=self.lstm_input_keep_prob_tf,
output_keep_prob=self.lstm_output_keep_prob_tf,
)
return lstm_cell
def _construct_lstm_network(self, input_tensor):
""" This function constructs the Bi-Directional LSTM network
Args:
input_tensor (Tensor): Input tensor to the LSTM network
Returns:
output_tensor (Tensor): The output layer of the LSTM network
"""
n_hidden = int(self.token_lstm_hidden_state_dimension)
# We cannot use the static batch size variable since for the last batch set
# of data, the data size could be less than the batch size
batch_size_dim = tf.shape(input_tensor)[0]
# We use the xavier initializer for some of it's gradient control properties
initializer = tf.contrib.layers.xavier_initializer(seed=RANDOM_SEED)
# Forward LSTM construction
lstm_cell_forward_tf = self._construct_regularized_lstm_cell(
n_hidden, initializer
)
initial_state_forward_tf = self._construct_lstm_state(
initializer, n_hidden, batch_size_dim, "lstm_cell_forward_tf"
)
# Backward LSTM construction
lstm_cell_backward_tf = self._construct_regularized_lstm_cell(
n_hidden, initializer
)
initial_state_backward_tf = self._construct_lstm_state(
initializer, n_hidden, batch_size_dim, "lstm_cell_backward_tf"
)
# Combined the forward and backward LSTM networks
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=lstm_cell_forward_tf,
cell_bw=lstm_cell_backward_tf,
inputs=input_tensor,
sequence_length=self.batch_sequence_lengths_tf,
dtype=tf.float32,
initial_state_fw=initial_state_forward_tf,
initial_state_bw=initial_state_backward_tf,
)
# Construct the output later
output_tf = tf.concat([output_fw, output_bw], axis=-1)
output_tf = tf.nn.dropout(output_tf, self.dense_keep_prob_tf)
output_weights_tf = tf.get_variable(
name="output_weights_tf",
shape=[2 * n_hidden, self.output_dimension],
dtype="float32",
initializer=initializer,
)
output_weights_tf = tf.tile(output_weights_tf, [batch_size_dim, 1])
output_weights_tf = tf.reshape(
output_weights_tf, [batch_size_dim, 2 * n_hidden, self.output_dimension]
)
zero_initializer = tf.constant_initializer(ZERO_INITIALIZER_VALUE)
output_bias_tf = tf.get_variable(
name="output_bias_tf",
shape=[self.output_dimension],
dtype="float32",
initializer=zero_initializer,
)
output_tf = tf.add(
tf.matmul(output_tf, output_weights_tf),
output_bias_tf,
name="output_tensor",
)
return output_tf
def _get_model_constructor(self):
return self
def _extract_seq_length(self, examples):
"""Extract sequence lengths from the input examples
Args:
examples (list of Query objects): List of input queries
Returns:
(list): List of seq lengths for each query
"""
seq_lengths = []
for example in examples:
if len(example.normalized_tokens) > self.padding_length:
seq_lengths.append(self.padding_length)
else:
seq_lengths.append(len(example.normalized_tokens))
return seq_lengths
def _get_features(self, examples):
"""Extracts the word and gazetteer embeddings from the input examples
Args:
examples (list of mindmeld.core.Query): a list of queries
Returns:
(tuple): Word embeddings and Gazetteer one-hot embeddings
"""
x_feats_array = []
gaz_feats_array = []
char_feats_array = []
for example in examples:
x_feat, gaz_feat, char_feat = self._extract_features(example)
x_feats_array.append(x_feat)
gaz_feats_array.append(gaz_feat)
char_feats_array.append(char_feat)
# save all the embeddings used for model saving purposes
self.query_encoder.save_embeddings()
if self.use_char_embeddings:
self.char_encoder.save_embeddings()
x_feats_array = np.asarray(x_feats_array)
gaz_feats_array = np.asarray(gaz_feats_array)
char_feats_array = (
np.asarray(char_feats_array) if self.use_char_embeddings else []
)
return x_feats_array, gaz_feats_array, char_feats_array
def _gaz_transform(self, list_of_tokens_to_transform):
"""This function is used to handle special logic around SKLearn's LabelBinarizer
class which behaves in a non-standard way for 2 classes. In a 2 class system,
it encodes the classes as [0] and [1]. However, in a 3 class system, it encodes
the classes as [0,0,1], [0,1,0], [1,0,0] and sustains this behavior for num_class > 2.
We want to encode 2 class systems as [0,1] and [1,0]. This function does that.
Args:
list_of_tokens_to_transform (list): A sequence of class labels
Returns:
(array): corrected encoding from the binarizer
"""
output = self.gaz_encoder.transform(list_of_tokens_to_transform)
if len(self.gaz_encoder.classes_) == 2:
output = np.hstack((1 - output, output))
return output
def _extract_features(self, example):
"""Extracts feature dicts for each token in an example.
Args:
example (mindmeld.core.Query): an query
Returns:
(list of dict): features
"""
default_gaz_one_hot = self._gaz_transform([DEFAULT_GAZ_LABEL]).tolist()[0]
extracted_gaz_tokens = [default_gaz_one_hot] * self.padding_length
extracted_sequence_features = extract_sequence_features(
example, self.example_type, self.features, self.resources
)
for index, extracted_gaz in enumerate(extracted_sequence_features):
if index >= self.padding_length:
break
if extracted_gaz == {}:
continue
combined_gaz_features = set()
for key in extracted_gaz.keys():
regex_match = re.match(GAZ_PATTERN_MATCH, key)
if regex_match:
# Examples of gaz features here are:
# in-gaz|type:city|pos:start|p_fe,
# in-gaz|type:city|pos:end|pct-char-len
# There were many gaz features of the same type that had
# bot start and end position tags for a given token.
# Due to this, we did not implement functionality to
# extract the positional information due to the noise
# associated with it.
combined_gaz_features.add(
regex_match.group(REGEX_TYPE_POSITIONAL_INDEX)
)
if len(combined_gaz_features) != 0:
total_encoding = np.zeros(self.gaz_dimension, dtype=np.int)
for encoding in self._gaz_transform(list(combined_gaz_features)):
total_encoding = np.add(total_encoding, encoding)
extracted_gaz_tokens[index] = total_encoding.tolist()
padded_query = self.query_encoder.encode_sequence_of_tokens(
example.normalized_tokens
)
if self.use_char_embeddings:
padded_char = self.char_encoder.encode_sequence_of_tokens(
example.normalized_tokens
)
else:
padded_char = None
return padded_query, extracted_gaz_tokens, padded_char
def _fit(self, X, y):
"""Trains a classifier without cross-validation. It iterates through
the data, feeds batches to the tensorflow session graph and fits the
model based on the feed forward and back propagation steps.
Args:
X (list of list of list of str): a list of queries to train on
y (list of list of str): a list of expected labels
"""
self.construct_tf_variables()
self.session = tf.Session(graph=self.graph)
self.session.run([self.global_init, self.local_init])
for epochs in range(int(self.number_of_epochs)):
logger.info("Training epoch : %s", epochs)
indices = list(range(len(X)))
np.random.shuffle(indices)
gaz = self.gaz_features_arr[indices]
char = self.char_features_arr[indices] if self.use_char_embeddings else []
examples = X[indices]
labels = y[indices]
batch_size = int(self.batch_size)
num_batches = int(math.ceil(len(examples) / batch_size))
seq_len = np.array(self.sequence_lengths)[indices]
for batch in range(num_batches):
batch_start_index = batch * batch_size
batch_end_index = (batch * batch_size) + batch_size
batch_info = {
"batch_examples": examples[batch_start_index:batch_end_index],
"batch_labels": labels[batch_start_index:batch_end_index],
"batch_gaz": gaz[batch_start_index:batch_end_index],
"batch_seq_len": seq_len[batch_start_index:batch_end_index],
"batch_char": char[batch_start_index:batch_end_index],
}
if batch % int(self.display_epoch) == 0:
output, loss, _ = self.session.run(
[self.lstm_output_tf, self.cost_tf, self.optimizer_tf],
feed_dict=self.construct_feed_dictionary(**batch_info),
)
score = self._calculate_score(
output, batch_info["batch_labels"], batch_info["batch_seq_len"]
)
accuracy = score / (len(batch_info["batch_examples"]) * 1.0)
logger.info(
"Trained batch from index {} to {}, "
"Mini-batch loss: {:.5f}, "
"Training sequence accuracy: {:.5f}".format(
batch * batch_size,
(batch * batch_size) + batch_size,
loss,
accuracy,
)
)
else:
self.session.run(
self.optimizer_tf,
feed_dict=self.construct_feed_dictionary(**batch_info),
)
return self
def _predict(self, X):
"""Predicts tags for query sequence
Args:
X (list of list of list of str): a list of input representations
Returns:
(list): A list of decoded labelled predicted by the model
"""
seq_len_arr = np.array(self.sequence_lengths)
# During predict time, we make sure no nodes are dropped out
self.dense_keep_probability = 1.0
self.lstm_input_keep_prob = 1.0
self.lstm_output_keep_prob = 1.0
output = self.session.run(
[self.lstm_output_softmax_tf],
feed_dict=self.construct_feed_dictionary(
X, self.char_features_arr, self.gaz_features_arr, seq_len_arr
),
)
output = np.reshape(
output, [-1, int(self.padding_length), self.output_dimension]
)
output = np.argmax(output, 2)
decoded_queries = []
for idx, encoded_predict in enumerate(output):
decoded_query = []
for tag in encoded_predict[: self.sequence_lengths[idx]]:
decoded_query.append(self.label_encoder.classes_[tag])
decoded_queries.append(decoded_query)
return decoded_queries
def _predict_proba(self, X):
"""Predict tags for query sequence with their confidence scores
Args:
X (list of list of list of str): a list of input representations
Returns:
(list): A list of decoded labelled predicted by the model with confidence scores
"""
seq_len_arr = np.array(self.sequence_lengths)
# During predict time, we make sure no nodes are dropped out
self.dense_keep_probability = 1.0
self.lstm_input_keep_prob = 1.0
self.lstm_output_keep_prob = 1.0
output = self.session.run(
[self.lstm_output_softmax_tf],
feed_dict=self.construct_feed_dictionary(
X, self.char_features_arr, self.gaz_features_arr, seq_len_arr
),
)
output = np.reshape(
output, [-1, int(self.padding_length), self.output_dimension]
)
class_output = np.argmax(output, 2)
decoded_queries = []
for idx, encoded_predict in enumerate(class_output):
decoded_query = []
for token_idx, tag in enumerate(
encoded_predict[: self.sequence_lengths[idx]]
):
decoded_query.append(
[self.label_encoder.classes_[tag], output[idx][token_idx][tag]]
)
decoded_queries.append(decoded_query)
return decoded_queries
def dump(self, path, config):
"""
Saves the Tensorflow model
Args:
path (str): the folder path for the entity model folder
config (dict): The model config
"""
path = path.split(".pkl")[0] + "_model_files"
config["model"] = path
config["serializable"] = False
if not os.path.isdir(path):
os.makedirs(path)
if not self.saver:
# This conditional happens when there are not entities for the associated
# model
return
self.saver.save(self.session, os.path.join(path, "lstm_model"))
# Save feature extraction variables
variables_to_dump = {
"resources": self.resources,
"gaz_dimension": self.gaz_dimension,
"output_dimension": self.output_dimension,
"gaz_features": self.gaz_features_arr,
"sequence_lengths": self.sequence_lengths,
"gaz_encoder": self.gaz_encoder,
"label_encoder": self.label_encoder,
}
joblib.dump(variables_to_dump, os.path.join(path, ".feature_extraction_vars"))
def load(self, path):
"""
Loads the Tensorflow model
Args:
path (str): the folder path for the entity model folder
"""
path = path.split(".pkl")[0] + "_model_files"
if not os.path.exists(os.path.join(path, "lstm_model.meta")):
# This conditional is for models with no labels where no TF graph was built
# for this.
return
self.graph = tf.Graph()
self.session = tf.Session(graph=self.graph)
with self.graph.as_default():
saver = tf.train.import_meta_graph(os.path.join(path, "lstm_model.meta"))
saver.restore(self.session, os.path.join(path, "lstm_model"))
# Restore tensorflow graph variables
self.dense_keep_prob_tf = self.session.graph.get_tensor_by_name(
"dense_keep_prob_tf:0"
)
self.lstm_input_keep_prob_tf = self.session.graph.get_tensor_by_name(
"lstm_input_keep_prob_tf:0"
)
self.lstm_output_keep_prob_tf = self.session.graph.get_tensor_by_name(
"lstm_output_keep_prob_tf:0"
)
self.query_input_tf = self.session.graph.get_tensor_by_name(
"query_input_tf:0"
)
self.gaz_input_tf = self.session.graph.get_tensor_by_name("gaz_input_tf:0")
self.label_tf = self.session.graph.get_tensor_by_name("label_tf:0")
self.batch_sequence_lengths_tf = self.session.graph.get_tensor_by_name(
"batch_sequence_lengths_tf:0"
)
self.batch_sequence_mask_tf = self.session.graph.get_tensor_by_name(
"batch_sequence_mask_tf:0"
)
self.lstm_output_tf = self.session.graph.get_tensor_by_name(
"output_tensor:0"
)
self.lstm_output_softmax_tf = self.session.graph.get_tensor_by_name(
"output_softmax_tensor:0"
)
if self.use_char_embeddings:
self.char_input_tf = self.session.graph.get_tensor_by_name(
"char_input_tf:0"
)
# Load feature extraction variables
variables_to_load = joblib.load(os.path.join(path, ".feature_extraction_vars"))
self.resources = variables_to_load["resources"]
self.gaz_dimension = variables_to_load["gaz_dimension"]
self.output_dimension = variables_to_load["output_dimension"]
self.gaz_features = variables_to_load["gaz_features"]
self.sequence_lengths = variables_to_load["sequence_lengths"]
self.gaz_encoder = variables_to_load["gaz_encoder"]
self.label_encoder = variables_to_load["label_encoder"]
| [
"tensorflow.contrib.layers.xavier_initializer",
"sklearn.preprocessing.LabelBinarizer",
"numpy.argmax",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"tensorflow.local_variables_initializer",
"tensorflow.nn.pool",
"tensorflow.matmul",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorfl... | [((1173, 1200), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1190, 1200), False, 'import logging\n'), ((1456, 1486), 'numpy.asarray', 'np.asarray', (['X'], {'dtype': '"""float32"""'}), "(X, dtype='float32')\n", (1466, 1486), True, 'import numpy as np\n'), ((1508, 1536), 'numpy.asarray', 'np.asarray', (['y'], {'dtype': '"""int32"""'}), "(y, dtype='int32')\n", (1518, 1536), True, 'import numpy as np\n'), ((1682, 1712), 'numpy.asarray', 'np.asarray', (['X'], {'dtype': '"""float32"""'}), "(X, dtype='float32')\n", (1692, 1712), True, 'import numpy as np\n'), ((10109, 10125), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (10123, 10125), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((10153, 10169), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (10167, 10169), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((10192, 10202), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10200, 10202), True, 'import tensorflow as tf\n'), ((12394, 12448), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'seed': 'RANDOM_SEED'}), '(seed=RANDOM_SEED)\n', (12430, 12448), True, 'import tensorflow as tf\n'), ((12483, 12621), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', ([], {'inputs': 'self.gaz_input_tf', 'num_outputs': 'self.gaz_encoding_dimension', 'weights_initializer': 'initializer'}), '(inputs=self.gaz_input_tf, num_outputs=\n self.gaz_encoding_dimension, weights_initializer=initializer)\n', (12516, 12621), True, 'import tensorflow as tf\n'), ((13445, 13511), 'tensorflow.concat', 'tf.concat', (['[combined_embedding_tf, dense_gaz_embedding_tf]'], {'axis': '(2)'}), '([combined_embedding_tf, dense_gaz_embedding_tf], axis=2)\n', (13454, 13511), True, 'import tensorflow as tf\n'), ((14068, 14186), 'tensorflow.reshape', 'tf.reshape', (['input_tensor', '[-1, self.padding_length, self.max_char_per_word, self.\n character_embedding_dimension, 1]'], {}), '(input_tensor, [-1, self.padding_length, self.max_char_per_word,\n self.character_embedding_dimension, 1])\n', (14078, 14186), True, 'import tensorflow as tf\n'), ((15157, 15256), 'tensorflow.nn.convolution', 'tf.nn.convolution', (['convolution_reshaped_char_embedding', 'char_convolution_filter'], {'padding': '"""SAME"""'}), "(convolution_reshaped_char_embedding,\n char_convolution_filter, padding='SAME')\n", (15174, 15256), True, 'import tensorflow as tf\n'), ((15414, 15557), 'tensorflow.nn.pool', 'tf.nn.pool', (['conv_output'], {'window_shape': '[1, self.max_char_per_word, self.character_embedding_dimension]', 'pooling_type': '"""MAX"""', 'padding': '"""VALID"""'}), "(conv_output, window_shape=[1, self.max_char_per_word, self.\n character_embedding_dimension], pooling_type='MAX', padding='VALID')\n", (15424, 15557), True, 'import tensorflow as tf\n'), ((16008, 16047), 'tensorflow.transpose', 'tf.transpose', (['max_pool', '[0, 1, 4, 2, 3]'], {}), '(max_pool, [0, 1, 4, 2, 3])\n', (16020, 16047), True, 'import tensorflow as tf\n'), ((16067, 16169), 'tensorflow.reshape', 'tf.reshape', (['max_pool', '[batch_size, self.padding_length, self.word_level_character_embedding_size]'], {}), '(max_pool, [batch_size, self.padding_length, self.\n word_level_character_embedding_size])\n', (16077, 16169), True, 'import tensorflow as tf\n'), ((16363, 16416), 'tensorflow.tile', 'tf.tile', (['char_convolution_bias', '[self.padding_length]'], {}), '(char_convolution_bias, [self.padding_length])\n', (16370, 16416), True, 'import tensorflow as tf\n'), ((16449, 16552), 'tensorflow.reshape', 'tf.reshape', (['char_convolution_bias', '[self.padding_length, self.word_level_character_embedding_size]'], {}), '(char_convolution_bias, [self.padding_length, self.\n word_level_character_embedding_size])\n', (16459, 16552), True, 'import tensorflow as tf\n'), ((16616, 16663), 'tensorflow.tile', 'tf.tile', (['char_convolution_bias', '[batch_size, 1]'], {}), '(char_convolution_bias, [batch_size, 1])\n', (16623, 16663), True, 'import tensorflow as tf\n'), ((16696, 16811), 'tensorflow.reshape', 'tf.reshape', (['char_convolution_bias', '[batch_size, self.padding_length, self.word_level_character_embedding_size]'], {}), '(char_convolution_bias, [batch_size, self.padding_length, self.\n word_level_character_embedding_size])\n', (16706, 16811), True, 'import tensorflow as tf\n'), ((16879, 16923), 'tensorflow.nn.relu', 'tf.nn.relu', (['(max_pool + char_convolution_bias)'], {}), '(max_pool + char_convolution_bias)\n', (16889, 16923), True, 'import tensorflow as tf\n'), ((19088, 19121), 'numpy.argmax', 'np.argmax', (['reshaped_output_arr', '(2)'], {}), '(reshaped_output_arr, 2)\n', (19097, 19121), True, 'import numpy as np\n'), ((19152, 19175), 'numpy.argmax', 'np.argmax', (['label_arr', '(2)'], {}), '(label_arr, 2)\n', (19161, 19175), True, 'import numpy as np\n'), ((21875, 21924), 'tensorflow.contrib.rnn.LSTMStateTuple', 'tf.contrib.rnn.LSTMStateTuple', (['c_states', 'h_states'], {}), '(c_states, h_states)\n', (21904, 21924), True, 'import tensorflow as tf\n'), ((22350, 22481), 'tensorflow.contrib.rnn.CoupledInputForgetGateLSTMCell', 'tf.contrib.rnn.CoupledInputForgetGateLSTMCell', (['hidden_dimensions'], {'forget_bias': '(1.0)', 'initializer': 'initializer', 'state_is_tuple': '(True)'}), '(hidden_dimensions,\n forget_bias=1.0, initializer=initializer, state_is_tuple=True)\n', (22395, 22481), True, 'import tensorflow as tf\n'), ((22558, 22697), 'tensorflow.contrib.rnn.DropoutWrapper', 'tf.contrib.rnn.DropoutWrapper', (['lstm_cell'], {'input_keep_prob': 'self.lstm_input_keep_prob_tf', 'output_keep_prob': 'self.lstm_output_keep_prob_tf'}), '(lstm_cell, input_keep_prob=self.\n lstm_input_keep_prob_tf, output_keep_prob=self.lstm_output_keep_prob_tf)\n', (22587, 22697), True, 'import tensorflow as tf\n'), ((23449, 23503), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'seed': 'RANDOM_SEED'}), '(seed=RANDOM_SEED)\n', (23485, 23503), True, 'import tensorflow as tf\n'), ((24199, 24480), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'cell_fw': 'lstm_cell_forward_tf', 'cell_bw': 'lstm_cell_backward_tf', 'inputs': 'input_tensor', 'sequence_length': 'self.batch_sequence_lengths_tf', 'dtype': 'tf.float32', 'initial_state_fw': 'initial_state_forward_tf', 'initial_state_bw': 'initial_state_backward_tf'}), '(cell_fw=lstm_cell_forward_tf, cell_bw=\n lstm_cell_backward_tf, inputs=input_tensor, sequence_length=self.\n batch_sequence_lengths_tf, dtype=tf.float32, initial_state_fw=\n initial_state_forward_tf, initial_state_bw=initial_state_backward_tf)\n', (24230, 24480), True, 'import tensorflow as tf\n'), ((24619, 24661), 'tensorflow.concat', 'tf.concat', (['[output_fw, output_bw]'], {'axis': '(-1)'}), '([output_fw, output_bw], axis=-1)\n', (24628, 24661), True, 'import tensorflow as tf\n'), ((24682, 24731), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_tf', 'self.dense_keep_prob_tf'], {}), '(output_tf, self.dense_keep_prob_tf)\n', (24695, 24731), True, 'import tensorflow as tf\n'), ((24761, 24894), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""output_weights_tf"""', 'shape': '[2 * n_hidden, self.output_dimension]', 'dtype': '"""float32"""', 'initializer': 'initializer'}), "(name='output_weights_tf', shape=[2 * n_hidden, self.\n output_dimension], dtype='float32', initializer=initializer)\n", (24776, 24894), True, 'import tensorflow as tf\n'), ((24977, 25024), 'tensorflow.tile', 'tf.tile', (['output_weights_tf', '[batch_size_dim, 1]'], {}), '(output_weights_tf, [batch_size_dim, 1])\n', (24984, 25024), True, 'import tensorflow as tf\n'), ((25053, 25142), 'tensorflow.reshape', 'tf.reshape', (['output_weights_tf', '[batch_size_dim, 2 * n_hidden, self.output_dimension]'], {}), '(output_weights_tf, [batch_size_dim, 2 * n_hidden, self.\n output_dimension])\n', (25063, 25142), True, 'import tensorflow as tf\n'), ((25188, 25235), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['ZERO_INITIALIZER_VALUE'], {}), '(ZERO_INITIALIZER_VALUE)\n', (25211, 25235), True, 'import tensorflow as tf\n'), ((25261, 25382), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""output_bias_tf"""', 'shape': '[self.output_dimension]', 'dtype': '"""float32"""', 'initializer': 'zero_initializer'}), "(name='output_bias_tf', shape=[self.output_dimension], dtype\n ='float32', initializer=zero_initializer)\n", (25276, 25382), True, 'import tensorflow as tf\n'), ((27094, 27119), 'numpy.asarray', 'np.asarray', (['x_feats_array'], {}), '(x_feats_array)\n', (27104, 27119), True, 'import numpy as np\n'), ((27146, 27173), 'numpy.asarray', 'np.asarray', (['gaz_feats_array'], {}), '(gaz_feats_array)\n', (27156, 27173), True, 'import numpy as np\n'), ((31034, 31062), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (31044, 31062), True, 'import tensorflow as tf\n'), ((33811, 33842), 'numpy.array', 'np.array', (['self.sequence_lengths'], {}), '(self.sequence_lengths)\n', (33819, 33842), True, 'import numpy as np\n'), ((34403, 34423), 'numpy.argmax', 'np.argmax', (['output', '(2)'], {}), '(output, 2)\n', (34412, 34423), True, 'import numpy as np\n'), ((35107, 35138), 'numpy.array', 'np.array', (['self.sequence_lengths'], {}), '(self.sequence_lengths)\n', (35115, 35138), True, 'import numpy as np\n'), ((35705, 35725), 'numpy.argmax', 'np.argmax', (['output', '(2)'], {}), '(output, 2)\n', (35714, 35725), True, 'import numpy as np\n'), ((37795, 37805), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (37803, 37805), True, 'import tensorflow as tf\n'), ((37829, 37857), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (37839, 37857), True, 'import tensorflow as tf\n'), ((5513, 5566), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""dense_keep_prob_tf"""'}), "(tf.float32, name='dense_keep_prob_tf')\n", (5527, 5566), True, 'import tensorflow as tf\n'), ((5640, 5698), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""lstm_input_keep_prob_tf"""'}), "(tf.float32, name='lstm_input_keep_prob_tf')\n", (5654, 5698), True, 'import tensorflow as tf\n'), ((5773, 5832), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""lstm_output_keep_prob_tf"""'}), "(tf.float32, name='lstm_output_keep_prob_tf')\n", (5787, 5832), True, 'import tensorflow as tf\n'), ((5898, 6013), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.padding_length, self.token_embedding_dimension]'], {'name': '"""query_input_tf"""'}), "(tf.float32, [None, self.padding_length, self.\n token_embedding_dimension], name='query_input_tf')\n", (5912, 6013), True, 'import tensorflow as tf\n'), ((6105, 6205), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.padding_length, self.gaz_dimension]'], {'name': '"""gaz_input_tf"""'}), "(tf.float32, [None, self.padding_length, self.gaz_dimension],\n name='gaz_input_tf')\n", (6119, 6205), True, 'import tensorflow as tf\n'), ((6502, 6574), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""batch_sequence_lengths_tf"""'}), "(tf.int32, shape=[None], name='batch_sequence_lengths_tf')\n", (6516, 6574), True, 'import tensorflow as tf\n'), ((6648, 6716), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '[None]', 'name': '"""batch_sequence_mask_tf"""'}), "(tf.bool, shape=[None], name='batch_sequence_mask_tf')\n", (6662, 6716), True, 'import tensorflow as tf\n'), ((7363, 7427), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.lstm_output_tf'], {'name': '"""output_softmax_tensor"""'}), "(self.lstm_output_tf, name='output_softmax_tensor')\n", (7376, 7427), True, 'import tensorflow as tf\n'), ((7570, 7603), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7601, 7603), True, 'import tensorflow as tf\n'), ((7634, 7666), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (7664, 7666), True, 'import tensorflow as tf\n'), ((7693, 7709), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7707, 7709), True, 'import tensorflow as tf\n'), ((12690, 12719), 'tensorflow.shape', 'tf.shape', (['self.query_input_tf'], {}), '(self.query_input_tf)\n', (12698, 12719), True, 'import tensorflow as tf\n'), ((13119, 13164), 'tensorflow.concat', 'tf.concat', (['word_level_char_embeddings_list', '(2)'], {}), '(word_level_char_embeddings_list, 2)\n', (13128, 13164), True, 'import tensorflow as tf\n'), ((13244, 13311), 'tensorflow.concat', 'tf.concat', (['[self.query_input_tf, word_level_char_embedding]'], {'axis': '(2)'}), '([self.query_input_tf, word_level_char_embedding], axis=2)\n', (13253, 13311), True, 'import tensorflow as tf\n'), ((14719, 14862), 'tensorflow.random_normal', 'tf.random_normal', (['[1, char_window_size, self.character_embedding_dimension, 1, self.\n word_level_character_embedding_size]'], {'dtype': 'tf.float32'}), '([1, char_window_size, self.character_embedding_dimension, \n 1, self.word_level_character_embedding_size], dtype=tf.float32)\n', (14735, 14862), True, 'import tensorflow as tf\n'), ((16258, 16318), 'tensorflow.random_normal', 'tf.random_normal', (['[self.word_level_character_embedding_size]'], {}), '([self.word_level_character_embedding_size])\n', (16274, 16318), True, 'import tensorflow as tf\n'), ((17361, 17469), 'tensorflow.contrib.crf.crf_log_likelihood', 'tf.contrib.crf.crf_log_likelihood', (['self.lstm_output_tf', 'flattened_labels', 'self.batch_sequence_lengths_tf'], {}), '(self.lstm_output_tf, flattened_labels,\n self.batch_sequence_lengths_tf)\n', (17394, 17469), True, 'import tensorflow as tf\n'), ((17518, 17565), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(-log_likelihood)'], {'name': '"""cost_tf"""'}), "(-log_likelihood, name='cost_tf')\n", (17532, 17565), True, 'import tensorflow as tf\n'), ((17970, 18082), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'masked_logits', 'labels': 'masked_labels', 'name': '"""softmax_loss_tf"""'}), "(logits=masked_logits, labels=\n masked_labels, name='softmax_loss_tf')\n", (18009, 18082), True, 'import tensorflow as tf\n'), ((18131, 18178), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['softmax_loss_tf'], {'name': '"""cost_tf"""'}), "(softmax_loss_tf, name='cost_tf')\n", (18145, 18178), True, 'import tensorflow as tf\n'), ((19432, 19475), 'numpy.array_equal', 'np.array_equal', (['predicted_tags', 'actual_tags'], {}), '(predicted_tags, actual_tags)\n', (19446, 19475), True, 'import numpy as np\n'), ((21756, 21781), 'tensorflow.stack', 'tf.stack', (['[batch_size, 1]'], {}), '([batch_size, 1])\n', (21764, 21781), True, 'import tensorflow as tf\n'), ((21832, 21857), 'tensorflow.stack', 'tf.stack', (['[batch_size, 1]'], {}), '([batch_size, 1])\n', (21840, 21857), True, 'import tensorflow as tf\n'), ((23315, 23337), 'tensorflow.shape', 'tf.shape', (['input_tensor'], {}), '(input_tensor)\n', (23323, 23337), True, 'import tensorflow as tf\n'), ((25478, 25517), 'tensorflow.matmul', 'tf.matmul', (['output_tf', 'output_weights_tf'], {}), '(output_tf, output_weights_tf)\n', (25487, 25517), True, 'import tensorflow as tf\n'), ((27215, 27243), 'numpy.asarray', 'np.asarray', (['char_feats_array'], {}), '(char_feats_array)\n', (27225, 27243), True, 'import numpy as np\n'), ((28182, 28213), 'numpy.hstack', 'np.hstack', (['(1 - output, output)'], {}), '((1 - output, output))\n', (28191, 28213), True, 'import numpy as np\n'), ((31294, 31320), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (31311, 31320), True, 'import numpy as np\n'), ((36553, 36572), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (36566, 36572), False, 'import os\n'), ((36586, 36603), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (36597, 36603), False, 'import os\n'), ((36796, 36828), 'os.path.join', 'os.path.join', (['path', '"""lstm_model"""'], {}), "(path, 'lstm_model')\n", (36808, 36828), False, 'import os\n'), ((37300, 37346), 'os.path.join', 'os.path.join', (['path', '""".feature_extraction_vars"""'], {}), "(path, '.feature_extraction_vars')\n", (37312, 37346), False, 'import os\n'), ((39617, 39663), 'os.path.join', 'os.path.join', (['path', '""".feature_extraction_vars"""'], {}), "(path, '.feature_extraction_vars')\n", (39629, 39663), False, 'import os\n'), ((6826, 6973), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.padding_length, self.max_char_per_word, self.\n character_embedding_dimension]'], {'name': '"""char_input_tf"""'}), "(tf.float32, [None, self.padding_length, self.\n max_char_per_word, self.character_embedding_dimension], name=\n 'char_input_tf')\n", (6840, 6973), True, 'import tensorflow as tf\n'), ((17285, 17317), 'tensorflow.argmax', 'tf.argmax', (['self.label_tf'], {'axis': '(2)'}), '(self.label_tf, axis=2)\n', (17294, 17317), True, 'import tensorflow as tf\n'), ((17641, 17701), 'tensorflow.reshape', 'tf.reshape', (['self.lstm_output_tf', '[-1, self.output_dimension]'], {}), '(self.lstm_output_tf, [-1, self.output_dimension])\n', (17651, 17701), True, 'import tensorflow as tf\n'), ((17824, 17878), 'tensorflow.reshape', 'tf.reshape', (['self.label_tf', '[-1, self.output_dimension]'], {}), '(self.label_tf, [-1, self.output_dimension])\n', (17834, 17878), True, 'import tensorflow as tf\n'), ((29104, 29136), 're.match', 're.match', (['GAZ_PATTERN_MATCH', 'key'], {}), '(GAZ_PATTERN_MATCH, key)\n', (29112, 29136), False, 'import re\n'), ((29902, 29944), 'numpy.zeros', 'np.zeros', (['self.gaz_dimension'], {'dtype': 'np.int'}), '(self.gaz_dimension, dtype=np.int)\n', (29910, 29944), True, 'import numpy as np\n'), ((31662, 31693), 'numpy.array', 'np.array', (['self.sequence_lengths'], {}), '(self.sequence_lengths)\n', (31670, 31693), True, 'import numpy as np\n'), ((37602, 37639), 'os.path.join', 'os.path.join', (['path', '"""lstm_model.meta"""'], {}), "(path, 'lstm_model.meta')\n", (37614, 37639), False, 'import os\n'), ((37944, 37981), 'os.path.join', 'os.path.join', (['path', '"""lstm_model.meta"""'], {}), "(path, 'lstm_model.meta')\n", (37956, 37981), False, 'import os\n'), ((38023, 38055), 'os.path.join', 'os.path.join', (['path', '"""lstm_model"""'], {}), "(path, 'lstm_model')\n", (38035, 38055), False, 'import os\n'), ((30064, 30096), 'numpy.add', 'np.add', (['total_encoding', 'encoding'], {}), '(total_encoding, encoding)\n', (30070, 30096), True, 'import numpy as np\n')] |
"""This holds a routine for restricting the current process memory on Windows."""
import multiprocessing
import ctypes
def set_memory_limit(memory_limit):
"""Creates a new unnamed job object and assigns the current process to it.
The job object will have the given memory limit in bytes: the given process
together with its descendant processes will not be allowed to exceed
the limit. If purge_pid_on_exit is true, when the *calling* process exits
(the calling process can be the same or different from the given process),
the given process and all its descendant processes will be killed."""
import os
pid = os.getpid()
purge_pid_on_exit = True
# Windows API constants, used for OpenProcess and SetInformationJobObject.
PROCESS_TERMINATE = 0x1
PROCESS_SET_QUOTA = 0x100
JobObjectExtendedLimitInformation = 9
JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x100
JOB_OBJECT_LIMIT_JOB_MEMORY = 0x200
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x2000
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
"""Windows API structure, used as input to SetInformationJobObject."""
class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [("PerProcessUserTimeLimit", ctypes.c_int64),
("PerJobUserTimeLimit", ctypes.c_int64),
("LimitFlags", ctypes.c_uint32),
("MinimumWorkingSetSize", ctypes.c_void_p),
("MaximumWorkingSetSize", ctypes.c_void_p),
("ActiveProcessLimit", ctypes.c_uint32),
("Affinity", ctypes.c_void_p),
("PriorityClass", ctypes.c_uint32),
("SchedulingClass", ctypes.c_uint32)]
class IO_COUNTERS(ctypes.Structure):
_fields_ = [("ReadOperationCount", ctypes.c_uint64),
("WriteOperationCount", ctypes.c_uint64),
("OtherOperationCount", ctypes.c_uint64),
("ReadTransferCount", ctypes.c_uint64),
("WriteTransferCount", ctypes.c_uint64),
("OtherTransferCount", ctypes.c_uint64)]
_fields_ = [("BasicLimitInformation", JOBOBJECT_BASIC_LIMIT_INFORMATION),
("IoInfo", IO_COUNTERS),
("ProcessMemoryLimit", ctypes.c_void_p),
("JobMemoryLimit", ctypes.c_void_p),
("PeakProcessMemoryUsed", ctypes.c_void_p),
("PeakJobMemoryUsed", ctypes.c_void_p)]
job_info = JOBOBJECT_EXTENDED_LIMIT_INFORMATION()
job_info.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_JOB_MEMORY
if purge_pid_on_exit:
job_info.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
job_info.JobMemoryLimit = memory_limit
kernel = ctypes.windll.kernel32
job = kernel.CreateJobObjectA(None, None)
if job == 0:
raise RuntimeError("CreateJobObjectA failed")
keep_job_handle = False
try:
if not kernel.SetInformationJobObject(
job,
JobObjectExtendedLimitInformation,
ctypes.POINTER(JOBOBJECT_EXTENDED_LIMIT_INFORMATION)(job_info),
ctypes.sizeof(JOBOBJECT_EXTENDED_LIMIT_INFORMATION)):
raise RuntimeError("SetInformationJobObject failed")
process = kernel.OpenProcess(PROCESS_SET_QUOTA | PROCESS_TERMINATE,False, pid)
if process == 0:
raise RuntimeError("OpenProcess failed")
try:
if not kernel.AssignProcessToJobObject(job, process):
raise RuntimeError("AssignProcessToJobObject failed")
# If purge_pid_on_exit is true, we kill process pid and all its
# descendants when the job handle is closed. So, we keep the handle
# dangling, and it will be closed when *this* process terminates.
keep_job_handle = purge_pid_on_exit
finally:
if not kernel.CloseHandle(process):
raise RuntimeError("CloseHandle failed")
finally:
if not (keep_job_handle or kernel.CloseHandle(job)):
raise RuntimeError("CloseHandle failed")
def allocate(bytes):
import numpy
try:
result = numpy.zeros(shape=(bytes,), dtype='i1')
print("allocation done:", bytes)
except Exception as ex:
print("Failed to allocate:", ex)
raise
def runner(thunk, memory_limit, *args):
set_memory_limit(memory_limit)
thunk(*args)
def run_in_process_with_memory_limit(thunk, memory_limit, test_bytes):
p = multiprocessing.Process(target=runner, args=(thunk, memory_limit, test_bytes))
p.start()
p.join()
def main():
memory_limit = 1000*1000*100
run_in_process_with_memory_limit(allocate, memory_limit=memory_limit, test_bytes=memory_limit)
if __name__ == "__main__":
main()
| [
"os.getpid",
"ctypes.sizeof",
"numpy.zeros",
"multiprocessing.Process",
"ctypes.POINTER"
] | [((645, 656), 'os.getpid', 'os.getpid', ([], {}), '()\n', (654, 656), False, 'import os\n'), ((4667, 4745), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'runner', 'args': '(thunk, memory_limit, test_bytes)'}), '(target=runner, args=(thunk, memory_limit, test_bytes))\n', (4690, 4745), False, 'import multiprocessing\n'), ((4328, 4367), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(bytes,)', 'dtype': '"""i1"""'}), "(shape=(bytes,), dtype='i1')\n", (4339, 4367), False, 'import numpy\n'), ((3296, 3347), 'ctypes.sizeof', 'ctypes.sizeof', (['JOBOBJECT_EXTENDED_LIMIT_INFORMATION'], {}), '(JOBOBJECT_EXTENDED_LIMIT_INFORMATION)\n', (3309, 3347), False, 'import ctypes\n'), ((3213, 3265), 'ctypes.POINTER', 'ctypes.POINTER', (['JOBOBJECT_EXTENDED_LIMIT_INFORMATION'], {}), '(JOBOBJECT_EXTENDED_LIMIT_INFORMATION)\n', (3227, 3265), False, 'import ctypes\n')] |
'''
.. module:: skrf.media.distributedCircuit
============================================================
distributedCircuit (:mod:`skrf.media.distributedCircuit`)
============================================================
A transmission line mode defined in terms of distributed impedance and admittance values.
.. autosummary::
:toctree: generated/
DistributedCircuit
'''
from copy import deepcopy
from scipy.constants import epsilon_0, mu_0, c,pi, mil
import numpy as npy
from numpy import sqrt, exp, array,tan,sin,cos,inf, log, real,imag,\
interp, linspace, shape,zeros, reshape
from ..tlineFunctions import electrical_length
from .media import Media, DefinedGammaZ0
from ..constants import INF, ONE, ZERO
class DistributedCircuit(Media):
'''
A transmission line mode defined in terms of distributed impedance and admittance values.
Parameters
------------
frequency : :class:`~skrf.frequency.Frequency` object
frequency band of the media
z0 : number, array-like, or None
the port impedance for media. Only needed if its different
from the characteristic impedance of the transmission
line. if z0 is None then will default to Z0
C : number, or array-like
distributed capacitance, in F/m
L : number, or array-like
distributed inductance, in H/m
R : number, or array-like
distributed resistance, in Ohm/m
G : number, or array-like
distributed conductance, in S/m
Notes
----------
if C,I,R,G are vectors they should be the same length
:class:`DistributedCircuit` is `Media` object representing a
transmission line mode defined in terms of distributed impedance
and admittance values.
A Distributed Circuit may be defined in terms
of the following attributes,
================================ ================ ================
Quantity Symbol Property
================================ ================ ================
Distributed Capacitance :math:`C^{'}` :attr:`C`
Distributed Inductance :math:`L^{'}` :attr:`L`
Distributed Resistance :math:`R^{'}` :attr:`R`
Distributed Conductance :math:`G^{'}` :attr:`G`
================================ ================ ================
The following quantities may be calculated, which are functions of
angular frequency (:math:`\omega`):
=================================== ================================================== ==============================
Quantity Symbol Property
=================================== ================================================== ==============================
Distributed Impedance :math:`Z^{'} = R^{'} + j \\omega L^{'}` :attr:`Z`
Distributed Admittance :math:`Y^{'} = G^{'} + j \\omega C^{'}` :attr:`Y`
=================================== ================================================== ==============================
The properties which define their wave behavior:
=================================== ============================================ ==============================
Quantity Symbol Method
=================================== ============================================ ==============================
Characteristic Impedance :math:`Z_0 = \\sqrt{ \\frac{Z^{'}}{Y^{'}}}` :func:`Z0`
Propagation Constant :math:`\\gamma = \\sqrt{ Z^{'} Y^{'}}` :func:`gamma`
=================================== ============================================ ==============================
Given the following definitions, the components of propagation
constant are interpreted as follows:
.. math::
+\\Re e\\{\\gamma\\} = \\text{attenuation}
-\\Im m\\{\\gamma\\} = \\text{forward propagation}
See Also
--------
from_media
'''
def __init__(self, frequency=None, z0=None, C=90e-12, L=280e-9, R=0, G=0,
*args, **kwargs):
super(DistributedCircuit, self).__init__(frequency=frequency,
z0=z0)
self.C, self.L, self.R, self.G = C,L,R,G
def __str__(self):
f=self.frequency
try:
output = \
'Distributed Circuit Media. %i-%i %s. %i points'%\
(f.f_scaled[0],f.f_scaled[-1],f.unit, f.npoints) + \
'\nL\'= %.2f, C\'= %.2f,R\'= %.2f, G\'= %.2f, '% \
(self.L, self.C,self.R, self.G)
except(TypeError):
output = \
'Distributed Circuit Media. %i-%i %s. %i points'%\
(f.f_scaled[0],f.f_scaled[-1],f.unit, f.npoints) + \
'\nL\'= %.2f.., C\'= %.2f..,R\'= %.2f.., G\'= %.2f.., '% \
(self.L[0], self.C[0],self.R[0], self.G[0])
return output
def __repr__(self):
return self.__str__()
@classmethod
def from_media(cls, my_media, *args, **kwargs):
'''
Initializes a DistributedCircuit from an existing
:class:'~skrf.media.media.Media' instance.
Parameters
------------
my_media : :class:'~skrf.media.media.Media' instance.
the media object
'''
w = my_media.frequency.w
gamma = my_media.gamma
Z0 = my_media.Z0
z0 = my_media.z0
Y = gamma/Z0
Z = gamma*Z0
G,C = real(Y), imag(Y)/w
R,L = real(Z), imag(Z)/w
return cls(frequency = my_media.frequency,
z0 = z0, C=C, L=L, R=R, G=G, *args, **kwargs)
@classmethod
def from_csv(self, *args, **kw):
d = DefinedGammaZ0.from_csv(*args,**kw)
return self.from_media(d)
@property
def Z(self):
'''
Distributed Impedance, :math:`Z^{'}`
Defined as
.. math::
Z^{'} = R^{'} + j \\omega L^{'}
Returns
--------
Z : numpy.ndarray
Distributed impedance in units of ohm/m
'''
w = self.frequency.w
return self.R + 1j*w*self.L
@property
def Y(self):
'''
Distributed Admittance, :math:`Y^{'}`
Defined as
.. math::
Y^{'} = G^{'} + j \\omega C^{'}
Returns
--------
Y : numpy.ndarray
Distributed Admittance in units of S/m
'''
w = self.frequency.w
return self.G + 1j*w*self.C
@property
def Z0(self):
'''
Characteristic Impedance, :math:`Z0`
.. math::
Z_0 = \\sqrt{ \\frac{Z^{'}}{Y^{'}}}
Returns
--------
Z0 : numpy.ndarray
Characteristic Impedance in units of ohms
'''
return sqrt(self.Z/self.Y)
@property
def gamma(self):
'''
Propagation Constant, :math:`\\gamma`
Defined as,
.. math::
\\gamma = \\sqrt{ Z^{'} Y^{'}}
Returns
--------
gamma : numpy.ndarray
Propagation Constant,
Notes
---------
The components of propagation constant are interpreted as follows:
positive real(gamma) = attenuation
positive imag(gamma) = forward propagation
'''
return sqrt(self.Z*self.Y)
| [
"numpy.imag",
"numpy.real",
"numpy.sqrt"
] | [((7107, 7128), 'numpy.sqrt', 'sqrt', (['(self.Z / self.Y)'], {}), '(self.Z / self.Y)\n', (7111, 7128), False, 'from numpy import sqrt, exp, array, tan, sin, cos, inf, log, real, imag, interp, linspace, shape, zeros, reshape\n'), ((7642, 7663), 'numpy.sqrt', 'sqrt', (['(self.Z * self.Y)'], {}), '(self.Z * self.Y)\n', (7646, 7663), False, 'from numpy import sqrt, exp, array, tan, sin, cos, inf, log, real, imag, interp, linspace, shape, zeros, reshape\n'), ((5747, 5754), 'numpy.real', 'real', (['Y'], {}), '(Y)\n', (5751, 5754), False, 'from numpy import sqrt, exp, array, tan, sin, cos, inf, log, real, imag, interp, linspace, shape, zeros, reshape\n'), ((5780, 5787), 'numpy.real', 'real', (['Z'], {}), '(Z)\n', (5784, 5787), False, 'from numpy import sqrt, exp, array, tan, sin, cos, inf, log, real, imag, interp, linspace, shape, zeros, reshape\n'), ((5756, 5763), 'numpy.imag', 'imag', (['Y'], {}), '(Y)\n', (5760, 5763), False, 'from numpy import sqrt, exp, array, tan, sin, cos, inf, log, real, imag, interp, linspace, shape, zeros, reshape\n'), ((5789, 5796), 'numpy.imag', 'imag', (['Z'], {}), '(Z)\n', (5793, 5796), False, 'from numpy import sqrt, exp, array, tan, sin, cos, inf, log, real, imag, interp, linspace, shape, zeros, reshape\n')] |
import numpy as np
from sklearn.metrics import roc_auc_score as roc_auc
from cases.data.data_utils import get_scoring_case_data_paths
from fedot.core.data.data import InputData
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.pipelines.tuning.unified import PipelineTuner
def get_case_train_test_data():
""" Function for getting data for train and validation """
train_file_path, test_file_path = get_scoring_case_data_paths()
train_data = InputData.from_csv(train_file_path)
test_data = InputData.from_csv(test_file_path)
return train_data, test_data
def get_simple_pipeline():
""" Function return simple pipeline with the following structure:
xgboost \
-> logit
knn |
"""
first = PrimaryNode(operation_type='xgboost')
second = PrimaryNode(operation_type='knn')
final = SecondaryNode(operation_type='logit',
nodes_from=[first, second])
pipeline = Pipeline(final)
return pipeline
def pipeline_tuning(pipeline: Pipeline, train_data: InputData,
test_data: InputData, local_iter: int,
tuner_iter_num: int = 30) -> (float, list):
""" Function for tuning pipeline with PipelineTuner
:param pipeline: pipeline to tune
:param train_data: InputData for train
:param test_data: InputData for validation
:param local_iter: amount of tuner launches
:param tuner_iter_num: amount of iterations, which tuner will perform
:return mean_metric: mean value of ROC AUC metric
:return several_iter_scores_test: list with metrics
"""
several_iter_scores_test = []
for iteration in range(local_iter):
print(f'current local iteration {iteration}')
# Pipeline tuning
pipeline_tuner = PipelineTuner(pipeline=pipeline,
task=train_data.task,
iterations=tuner_iter_num)
tuned_pipeline = pipeline_tuner.tune_pipeline(input_data=train_data,
loss_function=roc_auc)
# After tuning prediction
tuned_pipeline.fit(train_data)
after_tuning_predicted = tuned_pipeline.predict(test_data)
# Metrics
aft_tun_roc_auc = roc_auc(y_true=test_data.target,
y_score=after_tuning_predicted.predict)
several_iter_scores_test.append(aft_tun_roc_auc)
mean_metric = float(np.mean(several_iter_scores_test))
return mean_metric, several_iter_scores_test
if __name__ == '__main__':
train_data, test_data = get_case_train_test_data()
# Pipeline composition
pipeline = get_simple_pipeline()
# Before tuning prediction
pipeline.fit(train_data, use_fitted=False)
before_tuning_predicted = pipeline.predict(test_data)
bfr_tun_roc_auc = roc_auc(y_true=test_data.target,
y_score=before_tuning_predicted.predict)
local_iter = 5
# Pipeline tuning
after_tune_roc_auc, several_iter_scores_test = pipeline_tuning(pipeline=pipeline,
train_data=train_data,
test_data=test_data,
local_iter=local_iter)
print(f'Several test scores {several_iter_scores_test}')
print(f'Mean test score over {local_iter} iterations: {after_tune_roc_auc}')
print(round(bfr_tun_roc_auc, 3))
print(round(after_tune_roc_auc, 3))
| [
"fedot.core.data.data.InputData.from_csv",
"fedot.core.pipelines.pipeline.Pipeline",
"cases.data.data_utils.get_scoring_case_data_paths",
"sklearn.metrics.roc_auc_score",
"numpy.mean",
"fedot.core.pipelines.node.SecondaryNode",
"fedot.core.pipelines.tuning.unified.PipelineTuner",
"fedot.core.pipelines... | [((491, 520), 'cases.data.data_utils.get_scoring_case_data_paths', 'get_scoring_case_data_paths', ([], {}), '()\n', (518, 520), False, 'from cases.data.data_utils import get_scoring_case_data_paths\n'), ((539, 574), 'fedot.core.data.data.InputData.from_csv', 'InputData.from_csv', (['train_file_path'], {}), '(train_file_path)\n', (557, 574), False, 'from fedot.core.data.data import InputData\n'), ((591, 625), 'fedot.core.data.data.InputData.from_csv', 'InputData.from_csv', (['test_file_path'], {}), '(test_file_path)\n', (609, 625), False, 'from fedot.core.data.data import InputData\n'), ((828, 865), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', ([], {'operation_type': '"""xgboost"""'}), "(operation_type='xgboost')\n", (839, 865), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((879, 912), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', ([], {'operation_type': '"""knn"""'}), "(operation_type='knn')\n", (890, 912), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((925, 990), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', ([], {'operation_type': '"""logit"""', 'nodes_from': '[first, second]'}), "(operation_type='logit', nodes_from=[first, second])\n", (938, 990), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((1033, 1048), 'fedot.core.pipelines.pipeline.Pipeline', 'Pipeline', (['final'], {}), '(final)\n', (1041, 1048), False, 'from fedot.core.pipelines.pipeline import Pipeline\n'), ((2918, 2991), 'sklearn.metrics.roc_auc_score', 'roc_auc', ([], {'y_true': 'test_data.target', 'y_score': 'before_tuning_predicted.predict'}), '(y_true=test_data.target, y_score=before_tuning_predicted.predict)\n', (2925, 2991), True, 'from sklearn.metrics import roc_auc_score as roc_auc\n'), ((1858, 1944), 'fedot.core.pipelines.tuning.unified.PipelineTuner', 'PipelineTuner', ([], {'pipeline': 'pipeline', 'task': 'train_data.task', 'iterations': 'tuner_iter_num'}), '(pipeline=pipeline, task=train_data.task, iterations=\n tuner_iter_num)\n', (1871, 1944), False, 'from fedot.core.pipelines.tuning.unified import PipelineTuner\n'), ((2337, 2409), 'sklearn.metrics.roc_auc_score', 'roc_auc', ([], {'y_true': 'test_data.target', 'y_score': 'after_tuning_predicted.predict'}), '(y_true=test_data.target, y_score=after_tuning_predicted.predict)\n', (2344, 2409), True, 'from sklearn.metrics import roc_auc_score as roc_auc\n'), ((2526, 2559), 'numpy.mean', 'np.mean', (['several_iter_scores_test'], {}), '(several_iter_scores_test)\n', (2533, 2559), True, 'import numpy as np\n')] |
import collections
import numpy as np
import sys
import grammar
def format_table(table, sep=' '):
num_cols = len(table[0])
if any([len(row) != num_cols for row in table]):
raise RuntimeError('Number of columns must match.')
widths = [max([len(row[i]) for row in table])
for i in range(num_cols)]
format_string = sep.join(['%' + str(w) + 's' for w in widths])
return [format_string % tuple(row) for row in table]
def format_table_latex(table):
return [l + ' \\\\' for l in format_table(table, ' & ')]
class Failure:
def __init__(self, structure, level, all_failed, name=None):
self.structure = structure
self.level = level
self.all_failed = all_failed
self.name = name
def print_failed_structures(failures, outfile=sys.stdout):
if failures:
print >> outfile, 'The inference algorithms failed for the following structures:'
print >> outfile
print >> outfile, '%30s%8s %s' % \
('structure', 'level', 'notes')
print >> outfile
for f in failures:
line = '%30s%8d ' % (grammar.pretty_print(f.structure), f.level)
if f.name:
line += '(for %s) ' % f.name
if not f.all_failed:
line += '(only some jobs failed) '
print >> outfile, line
print >> outfile
print >> outfile
class ModelScore:
def __init__(self, structure, row_score, col_score, total, row_improvement, col_improvement,
z_score_row, z_score_col):
self.structure = structure
self.row_score = row_score
self.col_score = col_score
self.total = total
self.row_improvement = row_improvement
self.col_improvement = col_improvement
self.z_score_row = z_score_row
self.z_score_col = z_score_col
def print_scores(level, model_scores, outfile=sys.stdout):
print >> outfile, 'The following are the top-scoring structures for level %d:' % level
print >> outfile
print >> outfile, '%30s%10s%10s%13s%13s%13s%10s%10s' % \
('structure', 'row', 'col', 'total', 'row impvt.', 'col impvt.', 'z (row)', 'z (col)')
print >> outfile
for ms in model_scores:
print >> outfile, '%30s%10.2f%10.2f%13.2f%13.2f%13.2f%10.2f%10.2f' % \
(grammar.pretty_print(ms.structure), ms.row_score, ms.col_score, ms.total,
ms.row_improvement, ms.col_improvement, ms.z_score_row, ms.z_score_col)
print >> outfile
print >> outfile
def print_model_sequence(model_scores, outfile=sys.stdout):
print >> outfile, "Here are the best-performing structures in each level of the search:"
print >> outfile
print >> outfile, '%10s%25s%13s%13s%10s%10s' % \
('level', 'structure', 'row impvt.', 'col impvt.', 'z (row)', 'z (col)')
print >> outfile
for i, ms in enumerate(model_scores):
print >> outfile, '%10d%25s%13.2f%13.2f%10.2f%10.2f' % \
(i+1, grammar.pretty_print(ms.structure), ms.row_improvement, ms.col_improvement,
ms.z_score_row, ms.z_score_col)
print >> outfile
print >> outfile
class RunningTime:
def __init__(self, level, structure, num_samples, total_time):
self.level = level
self.structure = structure
self.num_samples = num_samples
self.total_time = total_time
def format_time(t):
if t < 60.:
return '%1.1f seconds' % t
elif t < 3600.:
return '%1.1f minutes' % (t / 60.)
else:
return '%1.1f hours' % (t / 3600.)
def print_running_times(running_times, outfile=sys.stdout):
total = sum([rt.total_time for rt in running_times])
print >> outfile, 'Total CPU time was %s. Here is the breakdown:' % format_time(total)
print >> outfile
print >> outfile, '%30s%8s %s' % \
('structure', 'level', 'time')
print >> outfile
running_times = sorted(running_times, key=lambda rt: rt.total_time, reverse=True)
for rt in running_times:
time_str = '%d x %s' % (rt.num_samples, format_time(rt.total_time / rt.num_samples))
print >> outfile, '%30s%8d %s' % (grammar.pretty_print(rt.structure), rt.level, time_str)
print >> outfile
print >> outfile
class FinalResult:
def __init__(self, expt_name, structure):
self.expt_name = expt_name
self.structure = structure
def print_learned_structures(results, outfile=sys.stdout):
def sortkey(result):
return result.expt_name.split('_')[-1]
results = sorted(results, key=sortkey)
print >> outfile, 'The learned structures:'
print >> outfile
print >> outfile, '%25s%25s' % ('experiment', 'structure')
print >> outfile
for r in results:
print >> outfile, '%25s%25s' % (r.expt_name, grammar.pretty_print(r.structure))
print >> outfile
print >> outfile
class LatentVariables:
def __init__(self, label, z):
self.label = label
self.z = z
def print_components(model, structure, row_or_col, items, outfile=sys.stdout):
cluster_members = collections.defaultdict(list)
if model == 'clustering':
for item in items:
z = item.z if np.isscalar(item.z) else item.z.argmax()
cluster_members[z].append(item.label)
component_type, component_type_pl = 'Cluster', 'clusters'
elif model == 'binary':
for item in items:
for i, zi in enumerate(item.z):
if zi:
cluster_members[i].append(item.label)
component_type, component_type_pl = 'Component', 'components'
cluster_ids = sorted(cluster_members.keys(), key=lambda k: len(cluster_members[k]), reverse=True)
row_col_str = {'row': 'row', 'col': 'column'}[row_or_col]
print >> outfile, 'For structure %s, the following %s %s were found:' % \
(grammar.pretty_print(structure), row_col_str, component_type_pl)
print >> outfile
for i, cid in enumerate(cluster_ids):
print >> outfile, ' %s %d:' % (component_type, i+1)
print >> outfile
for label in cluster_members[cid]:
print >> outfile, ' %s' % label
print >> outfile
print >> outfile
| [
"collections.defaultdict",
"numpy.isscalar",
"grammar.pretty_print"
] | [((5127, 5156), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (5150, 5156), False, 'import collections\n'), ((5240, 5259), 'numpy.isscalar', 'np.isscalar', (['item.z'], {}), '(item.z)\n', (5251, 5259), True, 'import numpy as np\n'), ((5915, 5946), 'grammar.pretty_print', 'grammar.pretty_print', (['structure'], {}), '(structure)\n', (5935, 5946), False, 'import grammar\n'), ((1139, 1172), 'grammar.pretty_print', 'grammar.pretty_print', (['f.structure'], {}), '(f.structure)\n', (1159, 1172), False, 'import grammar\n'), ((2360, 2394), 'grammar.pretty_print', 'grammar.pretty_print', (['ms.structure'], {}), '(ms.structure)\n', (2380, 2394), False, 'import grammar\n'), ((3027, 3061), 'grammar.pretty_print', 'grammar.pretty_print', (['ms.structure'], {}), '(ms.structure)\n', (3047, 3061), False, 'import grammar\n'), ((4203, 4237), 'grammar.pretty_print', 'grammar.pretty_print', (['rt.structure'], {}), '(rt.structure)\n', (4223, 4237), False, 'import grammar\n'), ((4842, 4875), 'grammar.pretty_print', 'grammar.pretty_print', (['r.structure'], {}), '(r.structure)\n', (4862, 4875), False, 'import grammar\n')] |
import pandas as pd
import numpy as np
#time:2020年7月30日
#author:ZhangChang
#function description:函数描述
#处理CMU数据集,生产一次事件的数据
#user 是用户名,line_start
#fileName是数据文件名
#测试用例
'''
fileName = "./data/DSL-StrongPasswordData.xls"
keystroke_data,end_time = make_estimate_data('s002',0,fileName)
print (keystroke_data,end_time)
'''
def make_estimate_data_for_up(user,line_start,line_end,fileName):
df_oral = pd.read_excel(fileName,header=0)
df = df_oral[df_oral["subject"]==user]
#df = pd.read_excel(fileName, header=0)
data_instance = [] # 一次击键行为对应的数据。
data_instance_all = []
data_temp_hold = []
for i in range(line_start, line_end):
for j in range(11):
temp_Hole = df.iloc[i, 3 + j * 3] # Hold的时间。
data_temp_hold.append(temp_Hole) # 记录Hold time 时间。
data_temp_hold.sort()
data_instance_all.append(data_temp_hold)
data_temp_hold = []
return np.concatenate(data_instance_all)
def make_estimate_data_for_up_all(fileName,user):
df = pd.read_excel(fileName,header=0)
#df = df_oral[df_oral["subject"]==user]
#df = pd.read_excel(fileName, header=0)
data_instance = [] # 一次击键行为对应的数据。
data_instance_all = []
data_temp_hold = []
for i in range(0, 20400):
#跳过user
if df.iloc[i,0] == user:
continue
# 只取每个用户的前50个样本
if i%400 >= 50:
continue
for j in range(11):
temp_Hole = df.iloc[i, 3 + j * 3] # Hold的时间。
data_temp_hold.append(temp_Hole) # 记录Hold time 时间。
data_temp_hold.sort()
data_instance_all.append(data_temp_hold)
data_temp_hold = []
return np.concatenate(data_instance_all)
fileName = "./data/DSL-StrongPasswordData.xls"
#keystroke_data = make_estimate_data_for_up('s002',0,2,fileName)
keystroke_data = make_estimate_data_for_up_all(fileName,'s002')
print(keystroke_data)
print(len(keystroke_data))
| [
"pandas.read_excel",
"numpy.concatenate"
] | [((418, 451), 'pandas.read_excel', 'pd.read_excel', (['fileName'], {'header': '(0)'}), '(fileName, header=0)\n', (431, 451), True, 'import pandas as pd\n'), ((956, 989), 'numpy.concatenate', 'np.concatenate', (['data_instance_all'], {}), '(data_instance_all)\n', (970, 989), True, 'import numpy as np\n'), ((1053, 1086), 'pandas.read_excel', 'pd.read_excel', (['fileName'], {'header': '(0)'}), '(fileName, header=0)\n', (1066, 1086), True, 'import pandas as pd\n'), ((1725, 1758), 'numpy.concatenate', 'np.concatenate', (['data_instance_all'], {}), '(data_instance_all)\n', (1739, 1758), True, 'import numpy as np\n')] |
import glob
import numpy as np
import config
import os
# find files in the raw directory
files = glob.glob("raw/raw*")
os.system("rm output/*")
# number of arguments to expect per line
kargs = 14
ksensors = 12
# buffer for duplicate timestamps
buf_value = 100000
# take raw data files and convert to a dictionary of timestamps
# each timestamp has an array of packets.
def parse_data(files):
cycles = []
# the data dictionary used to store all relevant values
data = {}
# iterate through all generated raw files
for file in files:
f = open(file, 'r')
raw = f.readlines()
f.close()
# initialize buffer to zero, this is used to increment the timestamps if
# a reset occurs during the reading.
buf = 0
# traverse through the file and add good data to dictionary
for line in raw:
# check if timestamp reset, add a fixed value
if str(line[0]) == "t":
# increment by a large enough value, say 1000
buf += buf_value
# check if line begins with correct letter
if line[0] != "I":
continue
# split line by whitespace
line = line.split()
# verify line is the correct length
if len(line) != kargs:
continue
# create pseudo timestamp
time = int(buf) + int(line[3])
# populate packet with this line's info. this will be appended to the
# data dictionary at the given timestamp.
packet = {}
packet["id"] = line[1]
packet["led_state"] = line[5]
packet["r"] = line[7]
packet["g"] = line[9]
packet["b"] = line[11]
packet["c"] = line[13]
# add packet to appropriate time in the data dictionary
if time in data.keys():
data[time].append(packet)
else:
data[time] = []
data[time].append(packet)
# trim data to only contain timestamps for which we have all data.
# if that timestamp has exactly ksensors number of packets,
# then we have received all the data for that timestamp.
bad_times = []
for time in data.keys():
if len(data[time]) != ksensors:
bad_times.append(time)
for time in bad_times:
del data[time]
return data
# take sensor-wise state values and determine true state id
# from the configured lighting pattern
def determine_state(vec, mem):
mat = config.CFG["state_matrix"]
krow, kcol = mat.shape
# array used to find rows which match the state
match = np.array([0]*krow)
i = 0
for row in mat:
if (row == vec).all():
match[i] = 1
i += 1
match_indices = np.array(match.nonzero()).flatten()
if len(match_indices) == 1:
return match_indices[0]
elif len(match_indices) == 0:
print("======= ERROR: state not found")
return -1
inc = 0
# continue looking until we have gone through all memory
while (inc < len(mem)):
mem_match_indices = [] # populate with current step match
prev_mem_val = mem[-inc] # get mem row corresponding to step
for match in match_indices:
prev_mem_loc = match - inc
row = mat[prev_mem_loc,:]
if (row == prev_mem_val).all():
# if this previous value is consistent
# add to new match list
mem_match_indices.append(prev_mem_loc)
if len(mem_match_indices) == 1:
return mem_match_indices[0]
match_indices = mem_match_indices
# go back another step
inc += 1
print("warning, skipping additional matches")
if len(match_indices) > 0:
return match_indices[0]
else:
print("Could not accurately find state id")
return None
# send parsed to one master file
def save_to_file(data, filename):
# iterate through all timestamps and generate strings to print
inc = 0
mem = []
file_num = 0
this_file = filename + "_" + str(file_num) + ".txt"
print("created file: ", this_file)
f = open(this_file, "w")
for time in data.keys():
this_time = data[time]
output = {}
# initialize state vector to populate with sensor-wise state info.
# this will be compared against the expected pattern in the config
# file to get the state that the system is in at this timestamp
state_vec = np.array([-1]*ksensors)
# get data from each sensor at the given timestamp.
for i in range(0, ksensors):
sensor = this_time[i]
this_id = sensor["id"]
# get specific sensor state and update state_vec
state = sensor["led_state"]
state_vec[int(this_id)] = int(state)
# generate output string
output[int(this_id)] = "\tID: " + this_id + "\tState: " + state + " R: " + sensor["r"] + " G: " + sensor["g"] + " B: " + sensor["b"] + " C: " + sensor["c"]+"\n"
# get true state id from the configured lighting pattern
state_id = determine_state(state_vec, mem)
if state_id == 0:
this_file = filename + "_" + str(file_num) + ".txt"
print("created file: ", this_file)
f.close()
f = open(this_file, "w")
file_num += 1
mem.append(state_vec)
if len(mem) > 2*ksensors:
mem.pop(0)
# write data
# get rid of excess buffer created
time = time % buf_value
line = "Timestamp: " + str(time) + " State ID: " + str(state_id)+"\n"
f.write(line)
for i in range(0,ksensors):
f.write(output[i])
f.close()
if __name__ == "__main__":
# create dictionary of all timestamps
my_data = parse_data(files)
# format to strings and write to file
save_to_file(my_data, "output/output")
| [
"numpy.array",
"os.system",
"glob.glob"
] | [((98, 119), 'glob.glob', 'glob.glob', (['"""raw/raw*"""'], {}), "('raw/raw*')\n", (107, 119), False, 'import glob\n'), ((121, 145), 'os.system', 'os.system', (['"""rm output/*"""'], {}), "('rm output/*')\n", (130, 145), False, 'import os\n'), ((2705, 2725), 'numpy.array', 'np.array', (['([0] * krow)'], {}), '([0] * krow)\n', (2713, 2725), True, 'import numpy as np\n'), ((4611, 4636), 'numpy.array', 'np.array', (['([-1] * ksensors)'], {}), '([-1] * ksensors)\n', (4619, 4636), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert an osc file to multiple csv files.
Accepts file.osc with contents:
____________________________________________________________________________________________________
osc_time |path |types |packets
e17775e1.21044f19 |/muse/eeg |ffffff |825.201477 825.201477 825.201477 825.201477 nan 825.201477
e17775e1.21173fb7 |/muse/acc |fff |0.000000 0.000000 0.000000
----------------------------------------------------------------------------------------------------
Parses and saves to multiple `path`.csv files.
The first column is the corresponding timestamp.
osc_time is in a weird format:
x.y where x and y are hex numbers.
x = seconds since 1 Jan 1900
y = 2**-32 fraction of a second
Subtract 2,208,988,800 seconds to get unix timestamp
"""
import os
from argparse import ArgumentParser
import numpy as np
from collections import defaultdict
eaxmple_usage = 'Usage: python osc_to_csv.py file.osc -f folder_path'
parser = ArgumentParser(
description='Convert an osc file to multiple csv files.',
epilog=eaxmple_usage)
parser.add_argument(
'file_name', type=str,
help='osc file captured via: ```oscdump [port] > file.osc```')
parser.add_argument(
'-f', '--folder', type=str, dest='folder_path',
help='A folder to place files in.')
args = parser.parse_args()
file_name = args.file_name
folder_path = args.folder_path
path_dict = defaultdict(lambda: [], {})
debug = False
num = 0
with open(file_name, 'r') as osc_file:
for line in osc_file.readlines():
line = line.strip('\n')
osc_time, path, types, packets = line.split(' ', 3)
x, y = osc_time.split('.')
float_time = int(x, 16) + int(y,16)/2**32
path_dict[path] += [[float_time] + [float(pack) for pack in packets.split(' ')]]
# debug with smaller file size
if debug:
if num < 50:
num = num + 1
else:
break
for path, path_list in path_dict.items():
path = path[1::] if path[0] == '/' else path
file_name = path.replace('/', '_')+'.csv'
if folder_path:
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
file_name = os.path.join(folder_path, file_name)
np.savetxt(file_name, np.array(path_list), delimiter=',')
print(f'Saved: {file_name}')
print('Done!')
| [
"argparse.ArgumentParser",
"os.makedirs",
"os.path.isdir",
"collections.defaultdict",
"numpy.array",
"os.path.join"
] | [((1067, 1165), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Convert an osc file to multiple csv files."""', 'epilog': 'eaxmple_usage'}), "(description='Convert an osc file to multiple csv files.',\n epilog=eaxmple_usage)\n", (1081, 1165), False, 'from argparse import ArgumentParser\n'), ((1498, 1526), 'collections.defaultdict', 'defaultdict', (['(lambda : [])', '{}'], {}), '(lambda : [], {})\n', (1509, 1526), False, 'from collections import defaultdict\n'), ((2305, 2341), 'os.path.join', 'os.path.join', (['folder_path', 'file_name'], {}), '(folder_path, file_name)\n', (2317, 2341), False, 'import os\n'), ((2368, 2387), 'numpy.array', 'np.array', (['path_list'], {}), '(path_list)\n', (2376, 2387), True, 'import numpy as np\n'), ((2220, 2246), 'os.path.isdir', 'os.path.isdir', (['folder_path'], {}), '(folder_path)\n', (2233, 2246), False, 'import os\n'), ((2260, 2284), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (2271, 2284), False, 'import os\n')] |
#!/usr/bin/env python
import pyflann_ibeis
import numpy as np
from numpy import ones
from numpy.random import rand
import pytest
import unittest
class Test_PyFLANN_nn(unittest.TestCase):
def setUp(self):
self.nn = pyflann_ibeis.FLANN()
class Test_PyFLANN_nn_index(unittest.TestCase):
def testnn_index(self):
dim = 10
N = 100
x = rand(N, dim)
nn = pyflann_ibeis.FLANN()
nn.build_index(x)
nnidx, nndist = nn.nn_index(x)
correct = all(nnidx == np.arange(N, dtype=pyflann_ibeis.index_type))
nn.delete_index()
self.assertTrue(correct)
def testnn_index_random_permute(self):
numtests = 500
dim = 10
N = 100
nns = [None] * numtests
x = [rand(N, dim) for i in range(numtests)]
correct = ones(numtests, dtype=np.bool_)
for i in np.random.permutation(numtests):
nns[i] = pyflann_ibeis.FLANN()
nns[i].build_index(x[i])
# For kicks
if rand() < 0.5:
nns[i].kmeans(x[i], 5)
if rand() < 0.5:
nns[i].nn(x[i], x[i])
for i in np.random.permutation(numtests):
nnidx, nndist = nns[i].nn_index(x[i])
correct[i] = all(nnidx == np.arange(N, dtype=pyflann_ibeis.index_type))
for i in reversed(range(numtests)):
if rand() < 0.5:
nns[i].delete_index()
else:
del nns[i]
self.assertTrue(all(correct))
@pytest.mark.skip('not debugging')
def testnn_index_bad_index_call_noindex(self):
nn = pyflann_ibeis.FLANN()
# self.assertRaises(FLANNException, lambda: nn.nn_index(rand(5, 5)))
import pytest
with pytest.raises(pyflann_ibeis.FLANNException):
nn.nn_index(rand(5, 5))
@pytest.mark.skip('not debugging')
def testnn_index_bad_index_call_delindex(self):
nn = pyflann_ibeis.FLANN()
nn.build_index(rand(5, 5))
nn.delete_index()
with pytest.raises(pyflann_ibeis.FLANNException):
nn.nn_index(rand(5, 5))
# self.assertRaises(FLANNException, lambda: nn.nn_index(rand(5, 5)))
if __name__ == '__main__':
"""
pytest ~/code/flann/test/test_nn_index.py
xdoctest ~/code/flann/test/test_nn_index.py zero
"""
unittest.main()
| [
"unittest.main",
"pyflann_ibeis.FLANN",
"numpy.ones",
"pytest.raises",
"numpy.arange",
"numpy.random.permutation",
"numpy.random.rand",
"pytest.mark.skip"
] | [((1541, 1574), 'pytest.mark.skip', 'pytest.mark.skip', (['"""not debugging"""'], {}), "('not debugging')\n", (1557, 1574), False, 'import pytest\n'), ((1860, 1893), 'pytest.mark.skip', 'pytest.mark.skip', (['"""not debugging"""'], {}), "('not debugging')\n", (1876, 1893), False, 'import pytest\n'), ((2362, 2377), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2375, 2377), False, 'import unittest\n'), ((229, 250), 'pyflann_ibeis.FLANN', 'pyflann_ibeis.FLANN', ([], {}), '()\n', (248, 250), False, 'import pyflann_ibeis\n'), ((377, 389), 'numpy.random.rand', 'rand', (['N', 'dim'], {}), '(N, dim)\n', (381, 389), False, 'from numpy.random import rand\n'), ((403, 424), 'pyflann_ibeis.FLANN', 'pyflann_ibeis.FLANN', ([], {}), '()\n', (422, 424), False, 'import pyflann_ibeis\n'), ((832, 862), 'numpy.ones', 'ones', (['numtests'], {'dtype': 'np.bool_'}), '(numtests, dtype=np.bool_)\n', (836, 862), False, 'from numpy import ones\n'), ((881, 912), 'numpy.random.permutation', 'np.random.permutation', (['numtests'], {}), '(numtests)\n', (902, 912), True, 'import numpy as np\n'), ((1172, 1203), 'numpy.random.permutation', 'np.random.permutation', (['numtests'], {}), '(numtests)\n', (1193, 1203), True, 'import numpy as np\n'), ((1639, 1660), 'pyflann_ibeis.FLANN', 'pyflann_ibeis.FLANN', ([], {}), '()\n', (1658, 1660), False, 'import pyflann_ibeis\n'), ((1959, 1980), 'pyflann_ibeis.FLANN', 'pyflann_ibeis.FLANN', ([], {}), '()\n', (1978, 1980), False, 'import pyflann_ibeis\n'), ((775, 787), 'numpy.random.rand', 'rand', (['N', 'dim'], {}), '(N, dim)\n', (779, 787), False, 'from numpy.random import rand\n'), ((935, 956), 'pyflann_ibeis.FLANN', 'pyflann_ibeis.FLANN', ([], {}), '()\n', (954, 956), False, 'import pyflann_ibeis\n'), ((1773, 1816), 'pytest.raises', 'pytest.raises', (['pyflann_ibeis.FLANNException'], {}), '(pyflann_ibeis.FLANNException)\n', (1786, 1816), False, 'import pytest\n'), ((2004, 2014), 'numpy.random.rand', 'rand', (['(5)', '(5)'], {}), '(5, 5)\n', (2008, 2014), False, 'from numpy.random import rand\n'), ((2056, 2099), 'pytest.raises', 'pytest.raises', (['pyflann_ibeis.FLANNException'], {}), '(pyflann_ibeis.FLANNException)\n', (2069, 2099), False, 'import pytest\n'), ((522, 566), 'numpy.arange', 'np.arange', (['N'], {'dtype': 'pyflann_ibeis.index_type'}), '(N, dtype=pyflann_ibeis.index_type)\n', (531, 566), True, 'import numpy as np\n'), ((1034, 1040), 'numpy.random.rand', 'rand', ([], {}), '()\n', (1038, 1040), False, 'from numpy.random import rand\n'), ((1102, 1108), 'numpy.random.rand', 'rand', ([], {}), '()\n', (1106, 1108), False, 'from numpy.random import rand\n'), ((1399, 1405), 'numpy.random.rand', 'rand', ([], {}), '()\n', (1403, 1405), False, 'from numpy.random import rand\n'), ((1842, 1852), 'numpy.random.rand', 'rand', (['(5)', '(5)'], {}), '(5, 5)\n', (1846, 1852), False, 'from numpy.random import rand\n'), ((2125, 2135), 'numpy.random.rand', 'rand', (['(5)', '(5)'], {}), '(5, 5)\n', (2129, 2135), False, 'from numpy.random import rand\n'), ((1293, 1337), 'numpy.arange', 'np.arange', (['N'], {'dtype': 'pyflann_ibeis.index_type'}), '(N, dtype=pyflann_ibeis.index_type)\n', (1302, 1337), True, 'import numpy as np\n')] |
"""
Created on Jan 29, 2018
@author: Brian
The purpose of this code is to learn basic plotting using MatPlotLib and Numpy.
This code also addresses related topics, like making 1D Numpy arrays, saving
plots as image files, deleting files, and making a polynomial regression.
"""
import matplotlib.pyplot as plt # imports MatPlotLib module
import numpy as np # import Numpy
import os # imports OS module - used for saving/reading files
# Standard 2D plots require data with two coordinates
# Assume we have the following data:
xData = np.array([12.5, 25, 37.5, 50, 62.5, 75]) # x data
yData = np.array([20, 59, 118, 197, 299, 420]) # y data
# The data doesn't have to be numpy arrays - lists work too for plotting
# Making your own lists/arrays is useful for plotting
list(range(10)) # make a list from 0 to 9 in increments of 1
np.array(range(10)) # make a 1D array from 0 to 9 in increments of 1
# Useful functions to create linear or logarithmic 1D arrays
# linspace inputs are (Start, Stop, num=NumberOfElements)
np.linspace(0, 10, num=21) # create linear array from 0 to 10 with 51 elements
np.linspace(10, -10, num=51) # Start can be greater than Stop
np.linspace(0, 1e5, num=100) # create linear array from 0 to 100,000 with 100
# elements
# Similarly, logspace can be used to make an array with log spacing
xLog = np.logspace(0, 10, num=21)
# create log array from 0 to 10 with 21 elements
print(xLog)
# If you are using an IDE with the IPython console (e.g. Spyder), figures can
# be displayed in line in the console. If you'd rather have your figures as
# separate popups a la Matlab, check your IDE's settings.
# In Spyder, this can be configured through:
# Tools --> Preferences --> IPython console --> Graphics --> Backend: Automatic
# Set Backend to Inline (default) if you want graphics in the Console instead
# Making a basic plot:
# (Note, you must run all lines together in order to make one plot with all
# of the specified elements)
fig, ax = plt.subplots() # define figure object and axis object
plt.plot(xData, yData) # plots the x-y data on the current figure
# Sometimes you need to define figure and axis objects to edit other settings
# For basic usage, you don't need to define them:
plt.plot(xData, yData) # plots the x-y data on the current figure
# Save your plot with the plt.savefig function
plt.plot(xData, yData) # plots the x-y data on the current figure
fname = 'MyPlotImage.png' # choose filename & extension (e.g. png, jpg, pdf)
plt.savefig(fname) # save current figure as filename to working directory
# The file will save in the working directory if the path is not specified
# Check the current working directory with getcwd
os.getcwd() # check current working directory
# Run/open file outside of Spyder/Python
os.startfile(fname) # open file with the OS's default program
os.remove(fname) # delete file permanently (Does NOT go to Recycle Bin!)
# It's gone forever. Just like with "rm" in bash shell
# Save as higher-quality image by increasing the dpi (default dpi = 80)
plt.plot(xData, yData) # plot the x-y data on the current figure
myDir = 'C:\\Users\\Brian\\Desktop\\' # example folder to save figure
filepath = myDir + fname # combine directory and file name into one string
print(filepath)
# Specify the DPI (dots per inch of a plot) to set size of plot
plt.savefig(filepath, dpi=320) # Save file with high DPI
plt.close() # Once the file has been saved, you can close the figure so it
# does not appear on the console
# open file with the OS's default program
os.startfile(filepath)
# delete file permanently (Does NOT go to Recycle Bin!)
os.remove(filepath)
# Now we have made a basic plot. Notice the default format is a thin,
# continuous, blue line.
# What if you wanted black circles instead?
# The line/marker format can be specified with a color and/or marker style,
# entered immediately after the (x, y) data in the plot.plot() function
# Colors:
# k = black
# b = blue
# r = red
# m = magenta
# c = cyan
# y = yellow
# g = green
# For more colors go to: https://matplotlib.org/api/colors_api.html
# Markers:
# o = circles
# x = x
# . = small dot
# + = plus
# s = square
# d = diamond
# p = pentagon
# ^ = triangle
# For more markers go to: https://matplotlib.org/api/markers_api.html
# Line styles
# - = solid line
# -. = dash-dot line
# -- = dashed line
# : = dotted line
# For more lines (and general options for 2D lines) go to:
# https://matplotlib.org/api/_as_gen/matplotlib.lines.Line2D.html
# The color, marker, and line format can be specified in any order
# e.g. 'ko-' or 'o-k'
plt.plot(xData, yData, 'ko') # plots with black (k) circles (o) and no line
# Good plots need axis labels. Axis labels are added with xlabel and ylabel
plt.plot(xData, yData, 'ko') # plots the x-y data with black (k) circles (o)
plt.xlabel("Velocity [mph]") # add x-label to plot
plt.ylabel("Stopping Distance [ft]") # add y-label to plot
# Multiple data sets can be plotted together
# We need another data set
xData2 = [11.25, 22.5, 33.75, 45.0, 60, 72]
yData2 = [30., 60, 100., 150, 270, 410]
# Elements can be plotted together either on the same line of code:
# The first data set is in black circles with no line
# The second data set is plotted with blue triangles and a dotted line
plt.plot(xData, yData, 'ko', xData2, yData2, 'b:^')
plt.xlabel("Velocity [mph]") # add x-label to plot
plt.ylabel("Stopping Distance [ft]") # add y-label to plot
# Or on separate lines of code:
plt.plot(xData, yData, 'rx') # red X markers
plt.plot(xData2, yData2, 'bs') # blue square markers
plt.xlabel("Velocity [mph]") # add x-label to plot
plt.ylabel("Stopping Distance [ft]") # add y-label to plot
# You can make multiple plots by using the plt.figure() function
plt.figure() # make new figure
plt.plot(xData, yData, 'rx') # red X markers
plt.xlabel("Velocity [mph]") # add x-label to plot
plt.ylabel("Stopping Distance [ft]") # add y-label to plot
plt.figure() # make new figure
plt.plot(xData2, yData2, 'bs') # blue square markers
plt.xlabel("Velocity [mph]") # add x-label to plot
plt.ylabel("Stopping Distance [ft]") # add y-label to plot
# You can add a legend with the legend function; input is an array of strings
plt.plot(xData, yData, 'md') # magenta diamond markers
plt.plot(xData2, yData2, 'y.') # yellow dot markers
plt.xlabel("Velocity [mph]") # add x-label to plot
plt.ylabel("Stopping Distance [ft]") # add y-label to plot
plt.legend(['Test 1', 'Test 2'])
# More on legends here: https://matplotlib.org/users/legend_guide.html
# Set the axis limits using xlim(xmin, xmax) and ylim(ymin, ymax)
plt.plot(xData, yData, 'ko')
plt.plot(xData2, yData2, 'bs')
plt.xlabel("Velocity [mph]") # add x-label to plot
plt.ylabel("Stopping Distance [ft]") # add y-label to plot
plt.legend(['Test 1', 'Test 2'])
plt.xlim(0, 80)
plt.ylim(0, 500)
# Plot on log-log axes using the "loglog" function in place of "plot"
plt.loglog(xData, yData, 'ko')
plt.xlabel("Velocity [mph]") # add x-label to plot
plt.ylabel("Stopping Distance [ft]") # add y-label to plot
# Or plot with a logarithmic x-axis and linear y axis with "semilogx"
plt.semilogx(xData, yData, 'ko')
plt.xlabel("Velocity [mph]") # add x-label to plot
plt.ylabel("Stopping Distance [ft]") # add y-label to plot
# Or vise versa with "semilogy"
plt.semilogy(xData, yData, 'ko')
plt.xlabel("Velocity [mph]") # add x-label to plot
plt.ylabel("Stopping Distance [ft]") # add y-label to plot
# Turn on the grid with the grid function
plt.plot(xData, yData, 'ko')
plt.grid(True) # Turns on major gridlines
# Simple OLS polynomial regression for (xData, yData)
degree = 2 # Choose order of polynomial
# create fit object with inputs (x, y, degree)
fit = np.polyfit(xData, yData, degree) # y = b0 + b1*x + b2*x^2
yFit = np.polyval(fit, xData) # Calculate y_fit for each x data point
R = np.corrcoef(xData, yData)[0, 1] # Get correlation coefficient (R)
print(R) # Print correlation coefficient
Rsq = R**2 # Calculate R^2 value
print('The R-squared value is {0:0.6}'.format(Rsq)) # Print R^2 value
plt.plot(xData, yData, 'ko', xData, yFit, 'r') # Plot data with regression
plt.xlabel("Velocity [mph]") # add x-label to plot
plt.ylabel("Stopping Distance [ft]") # add y-label to plot
# Small note on data types:
# Most (if not all) Numpy functions use 64-bit floating-point numbers (aka
# float64) by default. This means calculations are more precise compared to
# default Python numbers (which are 32-bit) at the cost of speed. If speed is
# of high importance, consider using 32-bit numbers with Numpy. These can be
# specified in most functions with "dtype"; e.g. linspace:
x1 = np.linspace(4, 100, 25) # this creates an array of float64s (default)
type(x1[0]) # data type is float64 for individual elements
x2 = np.linspace(4, 100, 25, dtype='float32') # array of float32s
type(x2[0]) # data type is float32 for individual elements
# Read more on data types here:
# https://docs.scipy.org/doc/numpy/user/basics.types.html
| [
"matplotlib.pyplot.loglog",
"os.remove",
"numpy.polyfit",
"numpy.logspace",
"matplotlib.pyplot.figure",
"numpy.polyval",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.subplots",
"os.startfile",
"matplotlib.pyplot.ylim",
"numpy.corrcoef",
"mat... | [((541, 581), 'numpy.array', 'np.array', (['[12.5, 25, 37.5, 50, 62.5, 75]'], {}), '([12.5, 25, 37.5, 50, 62.5, 75])\n', (549, 581), True, 'import numpy as np\n'), ((600, 638), 'numpy.array', 'np.array', (['[20, 59, 118, 197, 299, 420]'], {}), '([20, 59, 118, 197, 299, 420])\n', (608, 638), True, 'import numpy as np\n'), ((1030, 1056), 'numpy.linspace', 'np.linspace', (['(0)', '(10)'], {'num': '(21)'}), '(0, 10, num=21)\n', (1041, 1056), True, 'import numpy as np\n'), ((1110, 1138), 'numpy.linspace', 'np.linspace', (['(10)', '(-10)'], {'num': '(51)'}), '(10, -10, num=51)\n', (1121, 1138), True, 'import numpy as np\n'), ((1173, 1206), 'numpy.linspace', 'np.linspace', (['(0)', '(100000.0)'], {'num': '(100)'}), '(0, 100000.0, num=100)\n', (1184, 1206), True, 'import numpy as np\n'), ((1369, 1395), 'numpy.logspace', 'np.logspace', (['(0)', '(10)'], {'num': '(21)'}), '(0, 10, num=21)\n', (1380, 1395), True, 'import numpy as np\n'), ((2013, 2027), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2025, 2027), True, 'import matplotlib.pyplot as plt\n'), ((2068, 2090), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData'], {}), '(xData, yData)\n', (2076, 2090), True, 'import matplotlib.pyplot as plt\n'), ((2264, 2286), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData'], {}), '(xData, yData)\n', (2272, 2286), True, 'import matplotlib.pyplot as plt\n'), ((2379, 2401), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData'], {}), '(xData, yData)\n', (2387, 2401), True, 'import matplotlib.pyplot as plt\n'), ((2524, 2542), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (2535, 2542), True, 'import matplotlib.pyplot as plt\n'), ((2725, 2736), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2734, 2736), False, 'import os\n'), ((2813, 2832), 'os.startfile', 'os.startfile', (['fname'], {}), '(fname)\n', (2825, 2832), False, 'import os\n'), ((2876, 2892), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (2885, 2892), False, 'import os\n'), ((3097, 3119), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData'], {}), '(xData, yData)\n', (3105, 3119), True, 'import matplotlib.pyplot as plt\n'), ((3390, 3420), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {'dpi': '(320)'}), '(filepath, dpi=320)\n', (3401, 3420), True, 'import matplotlib.pyplot as plt\n'), ((3448, 3459), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3457, 3459), True, 'import matplotlib.pyplot as plt\n'), ((3613, 3635), 'os.startfile', 'os.startfile', (['filepath'], {}), '(filepath)\n', (3625, 3635), False, 'import os\n'), ((3693, 3712), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (3702, 3712), False, 'import os\n'), ((4658, 4686), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData', '"""ko"""'], {}), "(xData, yData, 'ko')\n", (4666, 4686), True, 'import matplotlib.pyplot as plt\n'), ((4813, 4841), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData', '"""ko"""'], {}), "(xData, yData, 'ko')\n", (4821, 4841), True, 'import matplotlib.pyplot as plt\n'), ((4891, 4919), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity [mph]"""'], {}), "('Velocity [mph]')\n", (4901, 4919), True, 'import matplotlib.pyplot as plt\n'), ((4943, 4979), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stopping Distance [ft]"""'], {}), "('Stopping Distance [ft]')\n", (4953, 4979), True, 'import matplotlib.pyplot as plt\n'), ((5354, 5405), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData', '"""ko"""', 'xData2', 'yData2', '"""b:^"""'], {}), "(xData, yData, 'ko', xData2, yData2, 'b:^')\n", (5362, 5405), True, 'import matplotlib.pyplot as plt\n'), ((5406, 5434), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity [mph]"""'], {}), "('Velocity [mph]')\n", (5416, 5434), True, 'import matplotlib.pyplot as plt\n'), ((5458, 5494), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stopping Distance [ft]"""'], {}), "('Stopping Distance [ft]')\n", (5468, 5494), True, 'import matplotlib.pyplot as plt\n'), ((5551, 5579), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData', '"""rx"""'], {}), "(xData, yData, 'rx')\n", (5559, 5579), True, 'import matplotlib.pyplot as plt\n'), ((5597, 5627), 'matplotlib.pyplot.plot', 'plt.plot', (['xData2', 'yData2', '"""bs"""'], {}), "(xData2, yData2, 'bs')\n", (5605, 5627), True, 'import matplotlib.pyplot as plt\n'), ((5651, 5679), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity [mph]"""'], {}), "('Velocity [mph]')\n", (5661, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5703, 5739), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stopping Distance [ft]"""'], {}), "('Stopping Distance [ft]')\n", (5713, 5739), True, 'import matplotlib.pyplot as plt\n'), ((5829, 5841), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5839, 5841), True, 'import matplotlib.pyplot as plt\n'), ((5861, 5889), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData', '"""rx"""'], {}), "(xData, yData, 'rx')\n", (5869, 5889), True, 'import matplotlib.pyplot as plt\n'), ((5907, 5935), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity [mph]"""'], {}), "('Velocity [mph]')\n", (5917, 5935), True, 'import matplotlib.pyplot as plt\n'), ((5959, 5995), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stopping Distance [ft]"""'], {}), "('Stopping Distance [ft]')\n", (5969, 5995), True, 'import matplotlib.pyplot as plt\n'), ((6019, 6031), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6029, 6031), True, 'import matplotlib.pyplot as plt\n'), ((6051, 6081), 'matplotlib.pyplot.plot', 'plt.plot', (['xData2', 'yData2', '"""bs"""'], {}), "(xData2, yData2, 'bs')\n", (6059, 6081), True, 'import matplotlib.pyplot as plt\n'), ((6105, 6133), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity [mph]"""'], {}), "('Velocity [mph]')\n", (6115, 6133), True, 'import matplotlib.pyplot as plt\n'), ((6157, 6193), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stopping Distance [ft]"""'], {}), "('Stopping Distance [ft]')\n", (6167, 6193), True, 'import matplotlib.pyplot as plt\n'), ((6296, 6324), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData', '"""md"""'], {}), "(xData, yData, 'md')\n", (6304, 6324), True, 'import matplotlib.pyplot as plt\n'), ((6352, 6382), 'matplotlib.pyplot.plot', 'plt.plot', (['xData2', 'yData2', '"""y."""'], {}), "(xData2, yData2, 'y.')\n", (6360, 6382), True, 'import matplotlib.pyplot as plt\n'), ((6405, 6433), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity [mph]"""'], {}), "('Velocity [mph]')\n", (6415, 6433), True, 'import matplotlib.pyplot as plt\n'), ((6457, 6493), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stopping Distance [ft]"""'], {}), "('Stopping Distance [ft]')\n", (6467, 6493), True, 'import matplotlib.pyplot as plt\n'), ((6517, 6549), 'matplotlib.pyplot.legend', 'plt.legend', (["['Test 1', 'Test 2']"], {}), "(['Test 1', 'Test 2'])\n", (6527, 6549), True, 'import matplotlib.pyplot as plt\n'), ((6689, 6717), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData', '"""ko"""'], {}), "(xData, yData, 'ko')\n", (6697, 6717), True, 'import matplotlib.pyplot as plt\n'), ((6718, 6748), 'matplotlib.pyplot.plot', 'plt.plot', (['xData2', 'yData2', '"""bs"""'], {}), "(xData2, yData2, 'bs')\n", (6726, 6748), True, 'import matplotlib.pyplot as plt\n'), ((6749, 6777), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity [mph]"""'], {}), "('Velocity [mph]')\n", (6759, 6777), True, 'import matplotlib.pyplot as plt\n'), ((6801, 6837), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stopping Distance [ft]"""'], {}), "('Stopping Distance [ft]')\n", (6811, 6837), True, 'import matplotlib.pyplot as plt\n'), ((6861, 6893), 'matplotlib.pyplot.legend', 'plt.legend', (["['Test 1', 'Test 2']"], {}), "(['Test 1', 'Test 2'])\n", (6871, 6893), True, 'import matplotlib.pyplot as plt\n'), ((6894, 6909), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(80)'], {}), '(0, 80)\n', (6902, 6909), True, 'import matplotlib.pyplot as plt\n'), ((6910, 6926), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(500)'], {}), '(0, 500)\n', (6918, 6926), True, 'import matplotlib.pyplot as plt\n'), ((6998, 7028), 'matplotlib.pyplot.loglog', 'plt.loglog', (['xData', 'yData', '"""ko"""'], {}), "(xData, yData, 'ko')\n", (7008, 7028), True, 'import matplotlib.pyplot as plt\n'), ((7029, 7057), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity [mph]"""'], {}), "('Velocity [mph]')\n", (7039, 7057), True, 'import matplotlib.pyplot as plt\n'), ((7081, 7117), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stopping Distance [ft]"""'], {}), "('Stopping Distance [ft]')\n", (7091, 7117), True, 'import matplotlib.pyplot as plt\n'), ((7212, 7244), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['xData', 'yData', '"""ko"""'], {}), "(xData, yData, 'ko')\n", (7224, 7244), True, 'import matplotlib.pyplot as plt\n'), ((7245, 7273), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity [mph]"""'], {}), "('Velocity [mph]')\n", (7255, 7273), True, 'import matplotlib.pyplot as plt\n'), ((7297, 7333), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stopping Distance [ft]"""'], {}), "('Stopping Distance [ft]')\n", (7307, 7333), True, 'import matplotlib.pyplot as plt\n'), ((7390, 7422), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['xData', 'yData', '"""ko"""'], {}), "(xData, yData, 'ko')\n", (7402, 7422), True, 'import matplotlib.pyplot as plt\n'), ((7423, 7451), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity [mph]"""'], {}), "('Velocity [mph]')\n", (7433, 7451), True, 'import matplotlib.pyplot as plt\n'), ((7475, 7511), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stopping Distance [ft]"""'], {}), "('Stopping Distance [ft]')\n", (7485, 7511), True, 'import matplotlib.pyplot as plt\n'), ((7578, 7606), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData', '"""ko"""'], {}), "(xData, yData, 'ko')\n", (7586, 7606), True, 'import matplotlib.pyplot as plt\n'), ((7607, 7621), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7615, 7621), True, 'import matplotlib.pyplot as plt\n'), ((7800, 7832), 'numpy.polyfit', 'np.polyfit', (['xData', 'yData', 'degree'], {}), '(xData, yData, degree)\n', (7810, 7832), True, 'import numpy as np\n'), ((7866, 7888), 'numpy.polyval', 'np.polyval', (['fit', 'xData'], {}), '(fit, xData)\n', (7876, 7888), True, 'import numpy as np\n'), ((8149, 8195), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData', '"""ko"""', 'xData', 'yFit', '"""r"""'], {}), "(xData, yData, 'ko', xData, yFit, 'r')\n", (8157, 8195), True, 'import matplotlib.pyplot as plt\n'), ((8225, 8253), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity [mph]"""'], {}), "('Velocity [mph]')\n", (8235, 8253), True, 'import matplotlib.pyplot as plt\n'), ((8277, 8313), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stopping Distance [ft]"""'], {}), "('Stopping Distance [ft]')\n", (8287, 8313), True, 'import matplotlib.pyplot as plt\n'), ((8738, 8761), 'numpy.linspace', 'np.linspace', (['(4)', '(100)', '(25)'], {}), '(4, 100, 25)\n', (8749, 8761), True, 'import numpy as np\n'), ((8875, 8915), 'numpy.linspace', 'np.linspace', (['(4)', '(100)', '(25)'], {'dtype': '"""float32"""'}), "(4, 100, 25, dtype='float32')\n", (8886, 8915), True, 'import numpy as np\n'), ((7934, 7959), 'numpy.corrcoef', 'np.corrcoef', (['xData', 'yData'], {}), '(xData, yData)\n', (7945, 7959), True, 'import numpy as np\n')] |
import numpy as np
ecoli_m_b = np.array([[0.1, 0.15, 0.19, 0.5, # Matraz 250 mL
0.9, 1.4, 1.8, 2.1, 2.3],
[0.1, 0.17, 0.2, 0.53, # Biorreactor 50 L
0.97, 1.43, 1.8, 2.1, 2.8],
[0.1, 0.17, 0.2, 0.52, # B. alimentado 50 L
0.95, 1.41, 1.8, 2.2, 2.8]
])
#0.39
ecoli_m_b * 0.39
produccion = np.array([ [5,3], [11, 7], [4, 9], [2, 6]])
produccion
costos = np.array([3.5, 5, 7, 4.3])
costos
costos / produccion.T
| [
"numpy.array"
] | [((31, 201), 'numpy.array', 'np.array', (['[[0.1, 0.15, 0.19, 0.5, 0.9, 1.4, 1.8, 2.1, 2.3], [0.1, 0.17, 0.2, 0.53, \n 0.97, 1.43, 1.8, 2.1, 2.8], [0.1, 0.17, 0.2, 0.52, 0.95, 1.41, 1.8, 2.2,\n 2.8]]'], {}), '([[0.1, 0.15, 0.19, 0.5, 0.9, 1.4, 1.8, 2.1, 2.3], [0.1, 0.17, 0.2,\n 0.53, 0.97, 1.43, 1.8, 2.1, 2.8], [0.1, 0.17, 0.2, 0.52, 0.95, 1.41, \n 1.8, 2.2, 2.8]])\n', (39, 201), True, 'import numpy as np\n'), ((425, 468), 'numpy.array', 'np.array', (['[[5, 3], [11, 7], [4, 9], [2, 6]]'], {}), '([[5, 3], [11, 7], [4, 9], [2, 6]])\n', (433, 468), True, 'import numpy as np\n'), ((490, 516), 'numpy.array', 'np.array', (['[3.5, 5, 7, 4.3]'], {}), '([3.5, 5, 7, 4.3])\n', (498, 516), True, 'import numpy as np\n')] |
import numpy as np
from rasterio import (
ubyte, uint8, uint16, uint32, int16, int32, float32, float64)
from rasterio.dtypes import (
_gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype,
validate_dtype
)
def test_is_ndarray():
assert is_ndarray(np.zeros((1,)))
assert is_ndarray([0]) == False
assert is_ndarray((0,)) == False
def test_np_dt_uint8():
assert check_dtype(np.uint8)
def test_dt_ubyte():
assert check_dtype(ubyte)
def test_check_dtype_invalid():
assert check_dtype('foo') == False
def test_gdal_name():
assert _gdal_typename(ubyte) == 'Byte'
assert _gdal_typename(np.uint8) == 'Byte'
assert _gdal_typename(np.uint16) == 'UInt16'
def test_get_minimum_dtype():
assert get_minimum_dtype([0, 1]) == uint8
assert get_minimum_dtype([0, 1000]) == uint16
assert get_minimum_dtype([0, 100000]) == uint32
assert get_minimum_dtype([-1, 0, 1]) == int16
assert get_minimum_dtype([-1, 0, 100000]) == int32
assert get_minimum_dtype([-1.5, 0, 1.5]) == float32
assert get_minimum_dtype([-1.5e+100, 0, 1.5e+100]) == float64
def test_can_cast_dtype():
assert can_cast_dtype((1, 2, 3), np.uint8) == True
assert can_cast_dtype(np.array([1, 2, 3]), np.uint8) == True
assert can_cast_dtype(np.array([1, 2, 3], dtype=np.uint8), np.uint8) == True
assert can_cast_dtype(np.array([1, 2, 3]), np.float32) == True
assert can_cast_dtype(np.array([1.4, 2.1, 3.65]), np.float32) == True
assert can_cast_dtype(np.array([1.4, 2.1, 3.65]), np.uint8) == False
def test_validate_dtype():
assert validate_dtype([1, 2, 3], ('uint8', 'uint16')) == True
assert validate_dtype(np.array([1, 2, 3]), ('uint8', 'uint16')) == True
assert validate_dtype(np.array([1.4, 2.1, 3.65]), ('float32',)) == True
assert validate_dtype(np.array([1.4, 2.1, 3.65]),('uint8',)) == False
| [
"rasterio.dtypes.can_cast_dtype",
"rasterio.dtypes._gdal_typename",
"rasterio.dtypes.check_dtype",
"numpy.zeros",
"rasterio.dtypes.is_ndarray",
"numpy.array",
"rasterio.dtypes.get_minimum_dtype",
"rasterio.dtypes.validate_dtype"
] | [((413, 434), 'rasterio.dtypes.check_dtype', 'check_dtype', (['np.uint8'], {}), '(np.uint8)\n', (424, 434), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((469, 487), 'rasterio.dtypes.check_dtype', 'check_dtype', (['ubyte'], {}), '(ubyte)\n', (480, 487), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((287, 301), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (295, 301), True, 'import numpy as np\n'), ((314, 329), 'rasterio.dtypes.is_ndarray', 'is_ndarray', (['[0]'], {}), '([0])\n', (324, 329), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((350, 366), 'rasterio.dtypes.is_ndarray', 'is_ndarray', (['(0,)'], {}), '((0,))\n', (360, 366), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((533, 551), 'rasterio.dtypes.check_dtype', 'check_dtype', (['"""foo"""'], {}), "('foo')\n", (544, 551), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((596, 617), 'rasterio.dtypes._gdal_typename', '_gdal_typename', (['ubyte'], {}), '(ubyte)\n', (610, 617), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((639, 663), 'rasterio.dtypes._gdal_typename', '_gdal_typename', (['np.uint8'], {}), '(np.uint8)\n', (653, 663), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((685, 710), 'rasterio.dtypes._gdal_typename', '_gdal_typename', (['np.uint16'], {}), '(np.uint16)\n', (699, 710), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((766, 791), 'rasterio.dtypes.get_minimum_dtype', 'get_minimum_dtype', (['[0, 1]'], {}), '([0, 1])\n', (783, 791), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((812, 840), 'rasterio.dtypes.get_minimum_dtype', 'get_minimum_dtype', (['[0, 1000]'], {}), '([0, 1000])\n', (829, 840), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((862, 892), 'rasterio.dtypes.get_minimum_dtype', 'get_minimum_dtype', (['[0, 100000]'], {}), '([0, 100000])\n', (879, 892), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((914, 943), 'rasterio.dtypes.get_minimum_dtype', 'get_minimum_dtype', (['[-1, 0, 1]'], {}), '([-1, 0, 1])\n', (931, 943), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((964, 998), 'rasterio.dtypes.get_minimum_dtype', 'get_minimum_dtype', (['[-1, 0, 100000]'], {}), '([-1, 0, 100000])\n', (981, 998), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((1019, 1052), 'rasterio.dtypes.get_minimum_dtype', 'get_minimum_dtype', (['[-1.5, 0, 1.5]'], {}), '([-1.5, 0, 1.5])\n', (1036, 1052), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((1075, 1118), 'rasterio.dtypes.get_minimum_dtype', 'get_minimum_dtype', (['[-1.5e+100, 0, 1.5e+100]'], {}), '([-1.5e+100, 0, 1.5e+100])\n', (1092, 1118), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((1170, 1205), 'rasterio.dtypes.can_cast_dtype', 'can_cast_dtype', (['(1, 2, 3)', 'np.uint8'], {}), '((1, 2, 3), np.uint8)\n', (1184, 1205), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((1614, 1660), 'rasterio.dtypes.validate_dtype', 'validate_dtype', (['[1, 2, 3]', "('uint8', 'uint16')"], {}), "([1, 2, 3], ('uint8', 'uint16'))\n", (1628, 1660), False, 'from rasterio.dtypes import _gdal_typename, is_ndarray, check_dtype, get_minimum_dtype, can_cast_dtype, validate_dtype\n'), ((1240, 1259), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1248, 1259), True, 'import numpy as np\n'), ((1305, 1340), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.uint8'}), '([1, 2, 3], dtype=np.uint8)\n', (1313, 1340), True, 'import numpy as np\n'), ((1386, 1405), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1394, 1405), True, 'import numpy as np\n'), ((1453, 1479), 'numpy.array', 'np.array', (['[1.4, 2.1, 3.65]'], {}), '([1.4, 2.1, 3.65])\n', (1461, 1479), True, 'import numpy as np\n'), ((1527, 1553), 'numpy.array', 'np.array', (['[1.4, 2.1, 3.65]'], {}), '([1.4, 2.1, 3.65])\n', (1535, 1553), True, 'import numpy as np\n'), ((1695, 1714), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1703, 1714), True, 'import numpy as np\n'), ((1771, 1797), 'numpy.array', 'np.array', (['[1.4, 2.1, 3.65]'], {}), '([1.4, 2.1, 3.65])\n', (1779, 1797), True, 'import numpy as np\n'), ((1847, 1873), 'numpy.array', 'np.array', (['[1.4, 2.1, 3.65]'], {}), '([1.4, 2.1, 3.65])\n', (1855, 1873), True, 'import numpy as np\n')] |
# -*- coding:utf8 -*-
# ==============================================================================
# Copyright 2017 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This module implements the Vocab class for converting string to id and back
"""
import numpy as np
class Vocab(object):
"""
Implements a vocabulary to store the tokens in the data, with their corresponding embeddings.
"""
def __init__(self, filename=None, initial_tokens=None, lower=False):
self.id2token = {}
self.token2id = {}
self.token_cnt = {}
self.lower = lower
self.embed_dim = None
self.embeddings = None
#self.pad_token = '<blank>'
#self.unk_token = '<unk>'
# keep consistence with allennlp vocabulary
self.pad_token = '@@P<PASSWORD>@@'.lower()
self.unk_token = '@@UNKNOWN@@'.lower()
self.initial_tokens = initial_tokens if initial_tokens is not None else []
self.initial_tokens.extend([self.pad_token, self.unk_token])
for token in self.initial_tokens:
self.add(token)
if filename is not None:
self.load_from_file(filename)
def size(self):
"""
get the size of vocabulary
Returns:
an integer indicating the size
"""
return len(self.id2token)
def load_from_file(self, file_path):
"""
loads the vocab from file_path
Args:
file_path: a file with a word in each line
"""
for line in open(file_path, 'r'):
token = line.rstrip('\n')
self.add(token)
def get_id(self, token):
"""
gets the id of a token, returns the id of unk token if token is not in vocab
Args:
key: a string indicating the word
Returns:
an integer
"""
token = token.lower() if self.lower else token
try:
return self.token2id[token]
except KeyError:
return self.token2id[self.unk_token]
def get_token(self, idx):
"""
gets the token corresponding to idx, returns unk token if idx is not in vocab
Args:
idx: an integer
returns:
a token string
"""
try:
return self.id2token[idx]
except KeyError:
return self.unk_token
def add(self, token, cnt=1):
"""
adds the token to vocab
Args:
token: a string
cnt: a num indicating the count of the token to add, default is 1
"""
token = token.lower() if self.lower else token
if token in self.token2id:
idx = self.token2id[token]
else:
idx = len(self.id2token)
self.id2token[idx] = token
self.token2id[token] = idx
if cnt > 0:
if token in self.token_cnt:
self.token_cnt[token] += cnt
else:
self.token_cnt[token] = cnt
return idx
def filter_tokens_by_cnt(self, min_cnt):
"""
filter the tokens in vocab by their count
Args:
min_cnt: tokens with frequency less than min_cnt is filtered
"""
filtered_tokens = [token for token in self.token2id if self.token_cnt[token] >= min_cnt]
# rebuild the token x id map
self.token2id = {}
self.id2token = {}
for token in self.initial_tokens:
self.add(token, cnt=0)
for token in filtered_tokens:
self.add(token, cnt=0)
def randomly_init_embeddings(self, embed_dim):
"""
randomly initializes the embeddings for each token
Args:
embed_dim: the size of the embedding for each token
"""
self.embed_dim = embed_dim
self.embeddings = np.random.rand(self.size(), embed_dim)
for token in [self.pad_token, self.unk_token]:
self.embeddings[self.get_id(token)] = np.zeros([self.embed_dim])
def load_pretrained_embeddings(self, embedding_path):
"""
loads the pretrained embeddings from embedding_path,
tokens not in pretrained embeddings will be filtered
Args:
embedding_path: the path of the pretrained embedding file
"""
trained_embeddings = {}
with open(embedding_path, 'r') as fin:
for line in fin:
contents = line.strip().split()
token = contents[0].decode('utf8')
if token not in self.token2id:
continue
trained_embeddings[token] = list(map(float, contents[1:]))
if self.embed_dim is None:
self.embed_dim = len(contents) - 1
filtered_tokens = trained_embeddings.keys()
# rebuild the token x id map
self.token2id = {}
self.id2token = {}
for token in self.initial_tokens:
self.add(token, cnt=0)
for token in filtered_tokens:
self.add(token, cnt=0)
# load embeddings
self.embeddings = np.zeros([self.size(), self.embed_dim])
for token in self.token2id.keys():
if token in trained_embeddings:
self.embeddings[self.get_id(token)] = trained_embeddings[token]
def convert_to_ids(self, tokens):
"""
Convert a list of tokens to ids, use unk_token if the token is not in vocab.
Args:
tokens: a list of token
Returns:
a list of ids
"""
vec = [self.get_id(label) for label in tokens]
return vec
def recover_from_ids(self, ids, stop_id=None):
"""
Convert a list of ids to tokens, stop converting if the stop_id is encountered
Args:
ids: a list of ids to convert
stop_id: the stop id, default is None
Returns:
a list of tokens
"""
tokens = []
for i in ids:
tokens += [self.get_token(i)]
if stop_id is not None and i == stop_id:
break
return tokens
| [
"numpy.zeros"
] | [((4618, 4644), 'numpy.zeros', 'np.zeros', (['[self.embed_dim]'], {}), '([self.embed_dim])\n', (4626, 4644), True, 'import numpy as np\n')] |
"""Main game script for real-life fruit ninja."""
from ninja import Ninja
import numpy as np
import cv2
def main():
game = Ninja(420, 640)
cap = cv2.VideoCapture(0)
# setup optical flow
_, frame1 = cap.read()
frame1 = cv2.resize(frame1, (640, 360))
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
_, frame = cap.read()
frame = cv2.resize(frame, (640, 360))
# compute optical flow
next = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(
prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
frame = frame + bgr
game.update()
game.check(mag, ang, frame)
game.draw(frame)
cv2.imshow(game.frame_name, frame)
if cv2.waitKey(1) & 0xff == ord('q'):
break
if __name__ == '__main__':
main()
| [
"numpy.zeros_like",
"cv2.cartToPolar",
"ninja.Ninja",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.VideoCapture",
"cv2.calcOpticalFlowFarneback",
"cv2.normalize",
"cv2.imshow",
"cv2.resize"
] | [((130, 145), 'ninja.Ninja', 'Ninja', (['(420)', '(640)'], {}), '(420, 640)\n', (135, 145), False, 'from ninja import Ninja\n'), ((156, 175), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (172, 175), False, 'import cv2\n'), ((242, 272), 'cv2.resize', 'cv2.resize', (['frame1', '(640, 360)'], {}), '(frame1, (640, 360))\n', (252, 272), False, 'import cv2\n'), ((284, 324), 'cv2.cvtColor', 'cv2.cvtColor', (['frame1', 'cv2.COLOR_BGR2GRAY'], {}), '(frame1, cv2.COLOR_BGR2GRAY)\n', (296, 324), False, 'import cv2\n'), ((335, 356), 'numpy.zeros_like', 'np.zeros_like', (['frame1'], {}), '(frame1)\n', (348, 356), True, 'import numpy as np\n'), ((442, 471), 'cv2.resize', 'cv2.resize', (['frame', '(640, 360)'], {}), '(frame, (640, 360))\n', (452, 471), False, 'import cv2\n'), ((519, 558), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (531, 558), False, 'import cv2\n'), ((574, 646), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['prvs', 'next', 'None', '(0.5)', '(3)', '(15)', '(3)', '(5)', '(1.2)', '(0)'], {}), '(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n', (602, 646), False, 'import cv2\n'), ((679, 722), 'cv2.cartToPolar', 'cv2.cartToPolar', (['flow[..., 0]', 'flow[..., 1]'], {}), '(flow[..., 0], flow[..., 1])\n', (694, 722), False, 'import cv2\n'), ((789, 838), 'cv2.normalize', 'cv2.normalize', (['mag', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(mag, None, 0, 255, cv2.NORM_MINMAX)\n', (802, 838), False, 'import cv2\n'), ((853, 889), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2BGR'], {}), '(hsv, cv2.COLOR_HSV2BGR)\n', (865, 889), False, 'import cv2\n'), ((1010, 1044), 'cv2.imshow', 'cv2.imshow', (['game.frame_name', 'frame'], {}), '(game.frame_name, frame)\n', (1020, 1044), False, 'import cv2\n'), ((1056, 1070), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1067, 1070), False, 'import cv2\n')] |
import logging
# from random import randint, seed, getstate, setstate
import random
import numpy as np
from scipy import optimize
from copulas import EPSILON
from copulas.bivariate.base import Bivariate, CopulaTypes
from copulas.multivariate.base import Multivariate
from copulas.multivariate.tree import Tree
from copulas.univariate.kde import KDEUnivariate
LOGGER = logging.getLogger(__name__)
class VineCopula(Multivariate):
def __init__(self, vine_type):
"""Instantiate a vine copula class.
Args:
:param vine_type: type of the vine copula, could be 'cvine','dvine','rvine'
:type vine_type: string
"""
super().__init__()
self.type = vine_type
self.u_matrix = None
self.model = KDEUnivariate
def fit(self, X, truncated=3):
"""Fit a vine model to the data.
Args:
X: `np.ndarray`: data to be fitted.
truncated: `int` max level to build the vine.
"""
self.n_sample, self.n_var = X.shape
self.tau_mat = X.corr(method='kendall').values
self.u_matrix = np.empty([self.n_sample, self.n_var])
self.truncated = truncated
self.depth = self.n_var - 1
self.trees = []
self.unis, self.ppfs = [], []
for i, col in enumerate(X):
uni = self.model()
uni.fit(X[col])
self.u_matrix[:, i] = [uni.cumulative_distribution(x) for x in X[col]]
self.unis.append(uni)
self.ppfs.append(uni.percent_point)
self.train_vine(self.type)
def train_vine(self, tree_type):
LOGGER.debug('start building tree : 0')
tree_1 = Tree(tree_type)
tree_1.fit(0, self.n_var, self.tau_mat, self.u_matrix)
self.trees.append(tree_1)
LOGGER.debug('finish building tree : 0')
for k in range(1, min(self.n_var - 1, self.truncated)):
# get constraints from previous tree
self.trees[k - 1]._get_constraints()
tau = self.trees[k - 1].get_tau_matrix()
LOGGER.debug('start building tree: {0}'.format(k))
tree_k = Tree(tree_type)
tree_k.fit(k, self.n_var - k, tau, self.trees[k - 1])
self.trees.append(tree_k)
LOGGER.debug('finish building tree: {0}'.format(k))
def get_likelihood(self, uni_matrix):
"""Compute likelihood of the vine."""
num_tree = len(self.trees)
values = np.empty([1, num_tree])
for i in range(num_tree):
value, new_uni_matrix = self.trees[i].get_likelihood(uni_matrix)
uni_matrix = new_uni_matrix
values[0, i] = value
return np.sum(values)
def sample(self, num_rows=1, seed=None):
"""Generating samples from vine model."""
s1 = np.random.get_state()
s2 = random.getstate()
np.random.seed(seed)
random.setstate(seed)
unis = np.random.uniform(0, 1, self.n_var)
# randomly select a node to start with
first_ind = random.randint(0, self.n_var - 1)
np.random.seed(s1)
random.setstate(s2)
adj = self.trees[0].get_adjacent_matrix()
visited = []
explore = [first_ind]
sampled = np.zeros(self.n_var)
itr = 0
while explore:
current = explore.pop(0)
neighbors = np.where(adj[current, :] == 1)[0].tolist()
if itr == 0:
new_x = self.ppfs[current](unis[current])
else:
for i in range(itr - 1, -1, -1):
current_ind = -1
if i >= self.truncated:
continue
current_tree = self.trees[i].edges
# get index of edge to retrieve
for edge in current_tree:
if i == 0:
if (edge.L == current and edge.R == visited[0]) or\
(edge.R == current and edge.L == visited[0]):
current_ind = edge.index
break
else:
if edge.L == current or edge.R == current:
condition = set(edge.D)
condition.add(edge.L)
condition.add(edge.R)
visit_set = set(visited).add(current)
if condition.issubset(visit_set):
current_ind = edge.index
break
if current_ind != -1:
# the node is not indepedent contional on visited node
copula_type = current_tree[current_ind].name
copula_para = current_tree[current_ind].param
cop = Bivariate(CopulaTypes(copula_type))
derivative = cop.get_h_function()
# start with last level
if i == itr - 1:
tmp = optimize.fminbound(
derivative, EPSILON, 1.0,
args=(unis[visited[0]], copula_para, unis[current])
)
else:
tmp = optimize.fminbound(
derivative, EPSILON, 1.0,
args=(unis[visited[0]], copula_para, tmp)
)
tmp = min(max(tmp, EPSILON), 0.99)
new_x = self.ppfs[current](tmp)
sampled[current] = new_x
for s in neighbors:
if s not in visited:
explore.insert(0, s)
itr += 1
visited.insert(0, current)
return sampled
| [
"numpy.random.uniform",
"copulas.bivariate.base.CopulaTypes",
"numpy.sum",
"numpy.random.seed",
"random.randint",
"numpy.random.get_state",
"numpy.empty",
"numpy.zeros",
"logging.getLogger",
"numpy.where",
"scipy.optimize.fminbound",
"random.setstate",
"random.getstate",
"copulas.multivari... | [((371, 398), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (388, 398), False, 'import logging\n'), ((1118, 1155), 'numpy.empty', 'np.empty', (['[self.n_sample, self.n_var]'], {}), '([self.n_sample, self.n_var])\n', (1126, 1155), True, 'import numpy as np\n'), ((1690, 1705), 'copulas.multivariate.tree.Tree', 'Tree', (['tree_type'], {}), '(tree_type)\n', (1694, 1705), False, 'from copulas.multivariate.tree import Tree\n'), ((2477, 2500), 'numpy.empty', 'np.empty', (['[1, num_tree]'], {}), '([1, num_tree])\n', (2485, 2500), True, 'import numpy as np\n'), ((2702, 2716), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (2708, 2716), True, 'import numpy as np\n'), ((2826, 2847), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (2845, 2847), True, 'import numpy as np\n'), ((2870, 2887), 'random.getstate', 'random.getstate', ([], {}), '()\n', (2885, 2887), False, 'import random\n'), ((2905, 2925), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2919, 2925), True, 'import numpy as np\n'), ((2943, 2964), 'random.setstate', 'random.setstate', (['seed'], {}), '(seed)\n', (2958, 2964), False, 'import random\n'), ((2989, 3024), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.n_var'], {}), '(0, 1, self.n_var)\n', (3006, 3024), True, 'import numpy as np\n'), ((3101, 3134), 'random.randint', 'random.randint', (['(0)', '(self.n_var - 1)'], {}), '(0, self.n_var - 1)\n', (3115, 3134), False, 'import random\n'), ((3152, 3170), 'numpy.random.seed', 'np.random.seed', (['s1'], {}), '(s1)\n', (3166, 3170), True, 'import numpy as np\n'), ((3188, 3207), 'random.setstate', 'random.setstate', (['s2'], {}), '(s2)\n', (3203, 3207), False, 'import random\n'), ((3337, 3357), 'numpy.zeros', 'np.zeros', (['self.n_var'], {}), '(self.n_var)\n', (3345, 3357), True, 'import numpy as np\n'), ((2152, 2167), 'copulas.multivariate.tree.Tree', 'Tree', (['tree_type'], {}), '(tree_type)\n', (2156, 2167), False, 'from copulas.multivariate.tree import Tree\n'), ((3458, 3488), 'numpy.where', 'np.where', (['(adj[current, :] == 1)'], {}), '(adj[current, :] == 1)\n', (3466, 3488), True, 'import numpy as np\n'), ((5010, 5034), 'copulas.bivariate.base.CopulaTypes', 'CopulaTypes', (['copula_type'], {}), '(copula_type)\n', (5021, 5034), False, 'from copulas.bivariate.base import Bivariate, CopulaTypes\n'), ((5217, 5318), 'scipy.optimize.fminbound', 'optimize.fminbound', (['derivative', 'EPSILON', '(1.0)'], {'args': '(unis[visited[0]], copula_para, unis[current])'}), '(derivative, EPSILON, 1.0, args=(unis[visited[0]],\n copula_para, unis[current]))\n', (5235, 5318), False, 'from scipy import optimize\n'), ((5473, 5564), 'scipy.optimize.fminbound', 'optimize.fminbound', (['derivative', 'EPSILON', '(1.0)'], {'args': '(unis[visited[0]], copula_para, tmp)'}), '(derivative, EPSILON, 1.0, args=(unis[visited[0]],\n copula_para, tmp))\n', (5491, 5564), False, 'from scipy import optimize\n')] |
# import external modules
import numpy, os
# Add Exasim to Python search path
cdir = os.getcwd(); ii = cdir.find("Exasim");
exec(open(cdir[0:(ii+6)] + "/Installation/setpath.py").read());
# import internal modules
import Preprocessing, Postprocessing, Gencode, Mesh
# Create pde object and mesh object
pde,mesh = Preprocessing.initializeexasim();
# Define a PDE model: governing equations and boundary conditions
pde['model'] = "ModelD"; # ModelC, ModelD, ModelW
pde['modelfile'] = "pdemodel"; # name of a file defining the PDE model
# Choose computing platform and set number of processors
#pde['platform'] = "gpu"; # choose this option if NVIDIA GPUs are available
pde['mpiprocs'] = 1; # number of MPI processors
# Set discretization parameters, physical parameters, and solver parameters
pde['porder'] = 3; # polynomial degree
pde['physicsparam'] = numpy.array([1.0]); # unit thermal conductivity
pde['tau'] = numpy.array([1.0]); # DG stabilization parameter
# create a mesh of 8 by 8 quads on a square domain
mesh['p'], mesh['t'] = Mesh.SquareMesh(8,8,1)[0:2];
# expressions for domain boundaries
mesh['boundaryexpr'] = [lambda p: (p[1,:] < 1e-3), lambda p: (p[0,:] > 1-1e-3), lambda p: (p[1,:] > 1-1e-3), lambda p: (p[0,:] < 1e-3)];
mesh['boundarycondition'] = numpy.array([1, 1, 1, 1]); # Set boundary condition for each boundary
# call exasim to generate and run C++ code to solve the PDE model
sol, pde, mesh = Postprocessing.exasim(pde,mesh)[0:3];
# visualize the numerical solution of the PDE model using Paraview
pde['visscalars'] = ["temperature", 0]; # list of scalar fields for visualization
pde['visvectors'] = ["temperature gradient", numpy.array([1, 2]).astype(int)]; # list of vector fields for visualization
Postprocessing.vis(sol,pde,mesh); # visualize the numerical solution
print("Done!");
# npf = dmd[0]['facecon'].shape[0];
# nf = dmd[0]['facecon'].shape[2];
# print(numpy.reshape(dmd[0]['facecon'][:,0,:],(npf,nf),'F').T)
# print(numpy.reshape(dmd[0]['facecon'][:,1,:],(npf,nf),'F').T)
# fileapp1 = cdir + "/datain/app.bin";
# app1 = Preprocessing.readapp(fileapp1);
# fileapp2 = cdir + "/Applications/Poisson/Poisson2d/datain/app.bin";
# app2 = Preprocessing.readapp(fileapp2);
# diff = Preprocessing.checkapp(app1,app2);
# print(app1['problem'])
# print(app2['problem'])
# print(diff)
# filemaster1 = cdir + "/datain/master.bin";
# tm1 = numpy.fromfile(open(filemaster1, "r"), dtype=numpy.float64);
# filemaster2 = cdir + "/Applications/Poisson/Poisson2d/datain/master.bin";
# tm2 = numpy.fromfile(open(filemaster2, "r"), dtype=numpy.float64);
# print(max(abs(tm1.flatten('F')-tm2.flatten('F'))))
# filemesh1 = cdir + "/datain/mesh1.bin";
# mesh1 = Preprocessing.readmesh(filemesh1);
# filemesh2 = cdir + "/Applications/Poisson/Poisson2d/datain/mesh1.bin";
# mesh2 = Preprocessing.readmesh(filemesh2);
# diff = Preprocessing.checkmesh(mesh1,mesh2);
# print(mesh1['nbsd'])
# print(mesh2['nbsd'])
# print(diff)
#
# filemesh1 = cdir + "/datain/mesh2.bin";
# mesh1 = Preprocessing.readmesh(filemesh1);
# filemesh2 = cdir + "/Applications/Poisson/Poisson2d/datain/mesh2.bin";
# mesh2 = Preprocessing.readmesh(filemesh2);
# diff = Preprocessing.checkmesh(mesh1,mesh2);
# print(mesh1['nbsd'])
# print(mesh2['nbsd'])
# print(diff)
# print(mesh1['ndims'])
# print(mesh2['ndims'])
# print(mesh1['nsize'])
# print(mesh2['nsize'])
# print(mesh1['facecon'][0:10])
# print(mesh2['facecon'][0:10])
# print(dmd[0]['facecon'][:,:,0])
# fileapp1 = cdir + "/datain/app['bin";
# app1 = Preprocessing.readapp(fileapp1);
# fileapp2 = cdir + "/Applications/Poisson2d/datain/app['bin";
# app2 = Preprocessing.readapp(fileapp2);
# diff = Preprocessing.checkapp(app1,app2);
# print(diff)
#
# filemaster1 = cdir + "/datain/master.bin";
# tm1 = numpy.fromfile(open(filemaster1, "r"), dtype=numpy.float64);
# filemaster2 = cdir + "/Applications/Poisson2d/datain/master.bin";
# tm2 = numpy.fromfile(open(filemaster2, "r"), dtype=numpy.float64);
# print(max(abs(tm1.flatten('F')-tm2.flatten('F'))))
#
# filemesh1 = cdir + "/datain/mesh.bin";
# mesh1 = Preprocessing.readmesh(filemesh1);
# filemesh2 = cdir + "/Applications/Poisson2d/datain/mesh.bin";
# mesh2 = Preprocessing.readmesh(filemesh2);
# diff = Preprocessing.checkmesh(mesh1,mesh2);
# print(diff)
# tm1 = numpy.fromfile(open(filemesh1, "r"), dtype=numpy.float64);
# tm2 = numpy.fromfile(open(filemesh2, "r"), dtype=numpy.float64);
# print(mesh1['nsize'])
# print(mesh2['nsize'])
# k1 = 0; k2 = 20;
# print(max(abs(tm1[k1:k2].flatten('F')-tm2[k1:k2].flatten('F'))))
# k1 = 20; k2 = 1152+20;
# print(max(abs(tm1[k1:k2].flatten('F')-tm2[k1:k2].flatten('F'))))
# print(tm1[k1:k2])
# print(tm2[k1:k2])
# print(mesh1['facecon'].flatten('F'))
# print(mesh2['facecon'].flatten('F'))
# print(tm1.shape)
# print(tm2.shape)
# print(mesh1['colent2elem'].T)
# print(mesh2['colent2elem'].T)
# print(mesh['f'].T)
# print(mesh['dgnodes'][:,:,0])
# print(mesh['dgnodes'][:,:,-1])
| [
"os.getcwd",
"Preprocessing.initializeexasim",
"Postprocessing.exasim",
"numpy.array",
"Mesh.SquareMesh",
"Postprocessing.vis"
] | [((86, 97), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (95, 97), False, 'import numpy, os\n'), ((316, 348), 'Preprocessing.initializeexasim', 'Preprocessing.initializeexasim', ([], {}), '()\n', (346, 348), False, 'import Preprocessing, Postprocessing, Gencode, Mesh\n'), ((881, 899), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (892, 899), False, 'import numpy, os\n'), ((944, 962), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (955, 962), False, 'import numpy, os\n'), ((1309, 1334), 'numpy.array', 'numpy.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (1320, 1334), False, 'import numpy, os\n'), ((1773, 1807), 'Postprocessing.vis', 'Postprocessing.vis', (['sol', 'pde', 'mesh'], {}), '(sol, pde, mesh)\n', (1791, 1807), False, 'import Preprocessing, Postprocessing, Gencode, Mesh\n'), ((1079, 1103), 'Mesh.SquareMesh', 'Mesh.SquareMesh', (['(8)', '(8)', '(1)'], {}), '(8, 8, 1)\n', (1094, 1103), False, 'import Preprocessing, Postprocessing, Gencode, Mesh\n'), ((1464, 1496), 'Postprocessing.exasim', 'Postprocessing.exasim', (['pde', 'mesh'], {}), '(pde, mesh)\n', (1485, 1496), False, 'import Preprocessing, Postprocessing, Gencode, Mesh\n'), ((1697, 1716), 'numpy.array', 'numpy.array', (['[1, 2]'], {}), '([1, 2])\n', (1708, 1716), False, 'import numpy, os\n')] |
"""
IMGO - Process, augment, and balance image data.
------------------------------------------------
UPTOOLS module
Classes
-------
Image_Dataset: Class representing an image dataset, being a collection
of X (square image data) and y (label data) arrays.
Class Attributes:
base_path (str): path to the directory containing images or class
subdirectories.
-
mode (str): format of source image data: "img" if raw images,
"np" if numpy-arrays, or "h5" if HDF5 format.
-
reduce (str): statistical reduction performed on raw data:
"norm" for pixel value normalization, "std" for pixel value
standardization (using training data statistics). Note that
any reduction will be undone if the datasets are saved to disk.
-
class_list (list): list of classes in the dataset.
-
class_no (int): number of classes in the dataset.
-
split (int): number of splits in the dataset: 2 if split into
training, validation, and testing subsets; 1 if split into
training and testing subsets; and 0 if not split (or merged).
-
dims (tuple): dimensions of the images in the dataset (set to
"various" (str) if the images are not of a consistent size).
-
img_shape (tuple): dimensions and channels of the images in the
dataset.
-
expand (str): statistical expansion performed on raw data:
"de_norm" for de-normalization of pixel values, and "de_std" for
de-standardization of pixel values.
-
shadow (dict): image and label data for each data subset "train",
"val", "test", and "data" (if unsplit or merged), in integer
(ie non-normalized and non-standardized form).
-
X_train (numpy-array) training image data arrays if split using
data_split method.
-
y_train (numpy-array) training label data arrays if split using
data_split method.
-
X_val (numpy-array) validation image data arrays if split using
data_split method.
-
y_val (numpy-array) validation label data arrays if split using
data_split method.
-
X_test (numpy-array) testing image data arrays if split using
data_split method.
-
y_test (numpy-array) testing label data arrays if split using
data_split method.
-
X_data (numpy-array): image data arrays (unsplit or merged
using data_merge method.).
-
y_data (numpy-array): data label arrays (one-hot-encoded, unsplit
or merged using data_merge method).
-
min_pv (float): minimum pixel value across entire dataset.
-
max_pv (float): maximum pixel value across entire dataset.
-
size (int): total number of images in the dataset.
Class Methods:
init: constructs the necessary attributes for the dataset.
-
details: prints or displays summary details about the dataset.
-
map_classes: maps class names to label data from new list of
class names.
-
normalize: normalizes pixel values to range [0,1].
-
standardize: standardizes pixel values using the mean and
standard deviation of the training subset (note that the dataset
must be split in order to standardize).
-
data_split: splits X and y data into training, validation and
testing subsets.
-
data_merge: merges X and y training and testing (and validation,
if applicable) subsets into single dataset.
-
display_batch: displays random batch of images from the dataset.
-
save_arrays: saves dataset (or subsets) as numpy arrays in
HDF5 format.
-
save_imgs: saves dataset into main directory and subdirectories
for each class.
-
augment_training_set: calls on an (initialized) imgo.augtools
augmenter to apply image augmentation to the Image_Dataset's
X_train subset.
-
split_rebalance: splits dataset into training and testing
(and validation) subsets and rebalances class sizes by calling
on an (initialized) imgo.augtools augmenter to generate new
training images (without affecting the validation/testing
subsets).
Module-Wide Functions
---------------------
get_class_names: fetch class names from image data directories.
-
img_to_df: compile image directories into pandas-DataFrame.
-
display_img_df: display batches of images from a pandas-DataFrame.
-
read_img_df: read images from pandas-DataFrame.
-
one_hot_encode: one-hot-encode image data labels.
-
auto_rescale: rescale image to square of specified dimensions.
-
threshold_rescale: rescale image to square of specified dimensions
if outside threshold dimension range.
-
rescale_flow: apply either auto or threshold rescaling to images located
on local disk (with option to save).
"""
import os
import numpy as np
import pandas as pd
import random
import imageio
import cv2
import matplotlib.pyplot as plt
import h5py
from imgaug import augmenters as iaa
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from send2trash import send2trash
# ------------------------------------------------------------------------
def get_class_names(base_path):
"""
Fetches class names from subdirectories in the directory given as the
base path.
Arguments:
base_path (str): path to the directory containing images or class
subdirectories.
Returns:
class_list (list): list of classes identified from subdirectories.
"""
class_bool = 1
for r, d, f in os.walk(base_path):
if d == []:
class_bool = 0
else:
break
if class_bool == 0:
class_list = []
else:
class_list = sorted(
[f for f in os.listdir(base_path) if not f.startswith(".")],
key=lambda f: f.lower(),
)
return class_list
# ------------------------------------------------------------------------
def img_to_df(base_path):
"""
Fetches images and class names from subdirectories in the directory
given as the base path and returns a DataFrame.
Arguments:
base_path (str): path to the directory containing images or class
subdirectories.
Returns:
df (pandas-DataFrame): DataFrame of size x-by-2 (where column 0
is the image path, column 1 is the class name, and x is the number
of images).
"""
class_list = get_class_names(base_path)
if class_list == []:
img_list = [
f"{base_path}/{f}"
for f in os.listdir(base_path)
if not f.startswith(".")
]
df = pd.DataFrame(img_list, columns=["image"])
df["class"] = "no_class"
df = df.reset_index(drop=True)
return df
else:
df_list = []
for c in class_list:
img_list = [
f"{base_path}/{c}/{f}"
for f in os.listdir(f"{base_path}/{c}")
if not f.startswith(".")
]
class_df = pd.DataFrame(img_list, columns=["image"])
class_df["class"] = c
df_list.append(class_df)
df = pd.concat(df_list)
df = df.reset_index(drop=True)
return df
# ------------------------------------------------------------------------
def display_img_df(df, batch_no, batch_size, n_rows, n_cols):
"""
Displays images contained in an x-by-2 DataFrame (where column 0 is
the image path, column 1 is the class name, and x is the number of
images).
Arguments:
df (pandas-DataFrame): x-by-2 DataFrame (where column 0 is the
image path, column 1 is the class name, and x is the number of
images)
-
batch_size (int): size of subset (batch) of images.
-
batch_no (int): which batch from the DataFrame to display.
-
n_rows (int): number of rows of images to display.
-
n_cols (int): number of columns of images to display.
Returns:
Visualization of image batch specified.
"""
if n_rows * n_cols != batch_size:
raise Exception(
f"Cannot display {batch_size} images in {n_rows} rows and {n_cols} cols."
)
batches = np.divmod(len(df), batch_size)[0] + bool(
np.divmod(len(df), batch_size)[1]
)
if (batch_no + 1) > batches:
raise Exception(
f"'batch_no' argument out of range; final batch is {batches-1}."
)
bottom = np.arange(0, len(df), batch_size)[batch_no]
if batch_no == batches - 1:
top = len(df)
else:
top = np.arange(0, len(df), batch_size)[batch_no + 1]
batch_df = df[bottom:top]
img_list = []
label_list = []
n = 0
for i, j in batch_df.iterrows():
img = imageio.imread(j[0])
img_list.append(img)
if j[1] == "no_class":
label = f"batch {batch_no}, img {n}"
n += 1
else:
label = j[1]
label_list.append(label)
img_array = np.array(img_list)
label_array = np.array(label_list)
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = "Helvetica"
plt.rcParams["text.color"] = "#333F4B"
fig = plt.figure(figsize=(12, 8))
for i in range(1, (img_array.shape[0]) + 1):
ax = fig.add_subplot(n_rows, n_cols, i)
ax.imshow(img_array[i - 1])
ax.set_title(label_array[i - 1])
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
plt.show()
# ------------------------------------------------------------------------
def read_img_df(df, img_scale=None, class_name=None, save=False):
"""
Reads images contained in an x-by-2 DataFrame (where column 0 is the
image path, column 1 is the class name, and x is the number of
images).
Arguments:
df (pandas-DataFrame): x-by-2 DataFrame (where column 0 is the
image path, column 1 is the class name, and x is the number of
images)
Keyword Arguments:
img_scale (int) optional: dimensions for desired (square) output
images. If None, no resizing will occur. Defaults to None.
-
class_name (str) optional: name of a class in the DataFrame. If
given, only images belonging to that class will be read. Defaults
to None.
-
save (bool) optional: whether or not to save resulting array of
image data as a .h5 file in the path
'imgo_output/uptools/preprocessing'. Note that images can only
be saved if they have been rescaled using the 'img_scale'
argument. Dafaults to False.
Returns:
img_array (numpy-array): images as numpy-array.
"""
if class_name:
data_df = df.loc[df["class"] == class_name]
else:
data_df = df
img_list = []
label_list = []
n = 0
for i, j in data_df.iterrows():
raw_img = imageio.imread(j[0])
if img_scale:
img = auto_rescale(raw_img, img_scale)
else:
img = raw_img
img_list.append(img)
if j[1] == "no_class":
label = n
n += 1
else:
label = j[1]
label_list.append(label)
img_array = np.array(img_list)
label_array = np.array(label_list)
if save:
if img_scale == None:
raise Exception(
"Cannot save images with inconsistent dimensions."
)
else:
my_path = "imgo_output/uptools/preprocessing"
r = None
for i in my_path.split("/"):
if r == None:
if not os.path.exists(i):
os.mkdir(i)
r = i
else:
if not os.path.exists(r + "/" + i):
os.mkdir(r + "/" + i)
r = r + "/" + i
with h5py.File(f"{r}/X_data.h5", "w") as hf:
hf.create_dataset(f"X_data", data=img_array)
print(f"{r}/X_data.h5 saved successfully.")
return img_array
# ------------------------------------------------------------------------
def one_hot_encode(y_data, class_list, save=False):
"""
One-hot encodes list of class labels.
Note that the one-hot encoded data returned will be based on the
class_list given sorted in alphabetical order.
Arguments:
y_data (list, tuple, or 1D-array): list of class labels.
-
class_list (list): list of class names against which the one-hot
encoding occurs.
Keyword Arguments:
save (bool) optional: whether or not to save resulting array of
one-hot encoded data as a .h5 file in the path
'imgo_output/uptools/preprocessing'. Dafaults to False.
Returns:
y_data (numpy-array): one-hot encoded class label data as numpy-
array.
"""
y_list = []
labels = []
if type(class_list) is not list:
raise Exception(
f"class_list must be a list; {type(class_list)} given."
)
else:
classes = sorted(list(set(class_list)), key=lambda f: f.lower())
class_no = len(classes)
for i in y_data:
ohe_init = np.zeros(class_no)
label = i
labels.append(label)
ohe_init[classes.index(label)] = 1
ohe_label = ohe_init
y_list.append(ohe_label)
y_data = np.array(y_list)
if save:
my_path = "imgo_output/uptools/preprocessing"
r = None
for i in my_path.split("/"):
if r == None:
if not os.path.exists(i):
os.mkdir(i)
r = i
else:
if not os.path.exists(r + "/" + i):
os.mkdir(r + "/" + i)
r = r + "/" + i
with h5py.File(f"{r}/y_data.h5", "w") as hf:
hf.create_dataset(f"y_data", data=y_data)
print(f"{r}/y_data.h5 saved successfully.")
return y_data
# ------------------------------------------------------------------------
def auto_rescale(img, dim):
"""
Rescales image to a square of n-by-n pixels, where n is the integer
value given by the 'dim' argument. Rescaling is performed with cv2
'inter cubic' interpolation if the original dimensions are smaller than
the target dimensions, and cv2 'inter area' interpolation if the
original dimensions are greater than the target dimensions.
Arguments:
img (numpy-array): original image to rescale.
-
dim (int): number of pixels in the target dimensions.
Returns:
scaled_img (numpy-array): image rescaled into square of length and
height equal to 'dim'.
"""
raw_dims = np.max(img.shape)
scale = (dim, dim)
if raw_dims < dim:
scaled_img = cv2.resize(
img, scale, interpolation=cv2.INTER_CUBIC
)
else:
scaled_img = cv2.resize(
img, scale, interpolation=cv2.INTER_AREA
)
if len(scaled_img.shape) == 2:
scaled_img = np.expand_dims(scaled_img, 2)
return scaled_img
# ------------------------------------------------------------------------
def threshold_rescale(img, lower=None, upper=None):
"""
Rescales image to a square of n-by-n pixels if the square root of the
product of the image dimensions are lower or higher than the
respective threshold values given by the 'lower' and 'upper' arguments.
Rescaling is performed with cv2 'inter cubic' interpolation if the
original dimensions are smaller than the target dimensions, and cv2
'inter area' interpolation if the original dimensions are greater than
the target dimensions.
Arguments:
img (numpy-array): original image to rescale.
Keyword Arguments:
lower (int) optional: the lower bound of the threshold. Defaults
to None.
-
uppper (int) optional: the upper bound of the threshold. Defaults
to None.
Returns:
scaled_img (numpy-array): image rescaled into square of length and
height equal to 'lower' or 'upper', depending on which is given.
"""
img_dim = np.sqrt(img.shape[0] * img.shape[1])
if (lower is not None) and (upper is None):
if img_dim < lower:
scaled_img = cv2.resize(
img, (lower, lower), interpolation=cv2.INTER_CUBIC
)
else:
scaled_img = img
elif (lower is None) and (upper is not None):
if img_dim > upper:
scaled_img = cv2.resize(
img, (upper, upper), interpolation=cv2.INTER_AREA
)
else:
scaled_img = img
elif (lower is not None) and (upper is not None):
if img_dim < lower:
scaled_img = cv2.resize(
img, (lower, lower), interpolation=cv2.INTER_CUBIC
)
elif img_dim > upper:
scaled_img = cv2.resize(
img, (upper, upper), interpolation=cv2.INTER_AREA
)
else:
scaled_img = img
else:
scaled_img = img
if len(scaled_img.shape) == 2:
scaled_img = np.expand_dims(scaled_img, 2)
return scaled_img
# ------------------------------------------------------------------------
def rescale_flow(
base_path,
rescale_mode,
dim=None,
lower=None,
upper=None,
class_selection=None,
save=False,
):
"""
Fetches images and class names from subdirectories in the directory
given as the base path and rescales the images using either the
'auto_rescale' or the 'threshold_rescale' function, with the option
to save the rescaled images in place of the original images.
Arguments:
base_path (str): path to the directory containing images or class
subdirectories.
-
rescale_mode (str): which rescale function to use; either 'auto'
or 'threshold'.
Keyword Arguments:
dim (int) optional: number of pixels in the target dimensions.
Defaults to None.
-
lower (int) optional: the lower bound of the threshold. Defaults
to None.
-
uppper (int) optional: the upper bound of the threshold. Defaults
to None.
-
class_selection (list) optional: list of class names on which
to perform the rescaling. If not given, will apply to all the
identified in the directories. Defaults to None.
-
save (bool) optional: whether or not to save the rescaled images
in the directories from which they were drawn. Note that saving
will overwrite the original images. Defaults to None.
Returns:
X (numpy-array): images in array form (if 'save' is False).
-
y (numpy-array): one-hot encoded label data (if 'save' is False).
"""
if rescale_mode not in ["auto", "threshold"]:
raise Exception(
"Choose valid rescale mode: 'auto' or 'threshold'."
)
df = img_to_df(base_path)
scaled_imgs = []
scaled_imgs_labels = []
if class_selection:
if type(class_selection) is not list:
raise Exception(
f"Class selection must be a list; {type(class_selection)} given."
)
else:
df = df.loc[df["class"].isin(class_selection)]
class_list = sorted(
list(df["class"].unique()), key=lambda f: f.lower()
)
for i, j in tqdm(df.iterrows(), total=len(df)):
img = imageio.imread(j[0])
if rescale_mode == "auto":
scaled_img = auto_rescale(img, dim)
elif rescale_mode == "threshold":
scaled_img = threshold_rescale(
img, lower=lower, upper=upper
)
else:
scaled_img = img
if len(scaled_img.shape) == 2:
scaled_img = np.expand_dims(scaled_img, 2)
if save:
imageio.imwrite(j[0], scaled_img)
else:
scaled_imgs.append(scaled_img)
scaled_imgs_labels.append(j[1])
if not save:
X = np.array(scaled_imgs)
y = one_hot_encode(scaled_imgs_labels, class_list)
return X, y
# ------------------------------------------------------------------------
class Image_Dataset:
"""
Image_Dataset: Class representing an image dataset, being a collection
of X (square image data) and y (label data) arrays.
Attributes
----------
base_path (str): path to the directory containing images or class
subdirectories.
-
mode (str): format of source image data: "imgs" if raw images, "np" if
numpy-arrays, or "h5" if HDF5 format.
-
reduce (str): statistical reduction performed on raw data: "norm" for
pixel value normalization, "std" for pixel value standardization
(using training data statistics). Note that any reduction will be
undone if the datasets are saved to disk.
-
class_list (list): list of classes in the dataset.
-
class_no (int): number of classes in the dataset.
-
split (int): number of splits in the dataset: 2 if split into
training, validation, and testing subsets; 1 if split into
training and testing subsets; and 0 if not split (or merged).
-
dims (tuple): dimensions of the images in the dataset (set to
"various" (str) if the images are not of a consistent size).
-
img_shape (tuple): dimensions and channels of the images in the
dataset.
-
expand (str): statistical expansion performed on raw data: "de_norm"
for de-normalization of pixel values, and "de_std" for
de-standardization of pixel values.
-
shadow (dict): image and label data for each data subset "train",
"val", "test", and "data" (if unsplit or merged), in integer (ie
non-normalized and non-standardized form).
-
X_train (numpy-array) training image data arrays if split using
data_split method.
-
y_train (numpy-array) training label data arrays if split using
data_split method.
-
X_val (numpy-array) validation image data arrays if split using
data_split method.
-
y_val (numpy-array) validation label data arrays if split using
data_split method.
-
X_test (numpy-array) testing image data arrays if split using
data_split method.
-
y_test (numpy-array) testing label data arrays if split using
data_split method.
-
X_data (numpy-array): image data arrays (unsplit or merged using
data_merge method.).
-
y_data (numpy-array): data label arrays (one-hot-encoded, unsplit
or merged using data_merge method).
-
min_pv (float): minimum pixel value across entire dataset.
-
max_pv (float): maximum pixel value across entire dataset.
-
size (int): total number of images in the dataset.
Methods
-------
init: constructs the necessary attributes for the dataset.
-
details: prints or displays summary details about the dataset.
-
map_classes: maps class names to label data from new list of class
names.
-
normalize: normalizes pixel values to range [0,1].
-
standardize: standardizes pixel values using the mean and standard
deviation of the training subset (note that the dataset must be split
in order to standardize).
-
data_split: splits X and y data into training, validation and testing
subsets.
-
data_merge: merges X and y training and testing (and validation, if
applicable) subsets into single dataset.
-
display_batch: displays random batch of images from the dataset.
-
save_arrays: saves dataset (or subsets) as numpy arrays in HDF5
format.
-
save_imgs: saves dataset into main directory and subdirectories for
each class.
-
augment_training_set: calls on an (initialized) imgo.augtools
augmenter to apply image augmentation to the Image_Dataset's X_train
subset.
-
split_rebalance: splits dataset into training and testing
(and validation) subsets and rebalances class sizes by calling on
an (initialized) imgo.augtools augmenter to generate new training
images (without affecting the validation/testing subsets).
"""
def __init__(
self,
base_path,
mode,
img_scale,
pre_norm=False,
pre_std=False,
normalize=False,
standardize=False,
manual_classes=None,
):
"""
Constructs all the necessary attributes for the dataset.
Arguments:
base_path (str): path to the directory containing images or
class subdirectories.
-
mode (str): format of source image data: "imgs" if raw images,
"np" if numpy-arrays, or "h5" if HDF5 format.
-
img_scale (int): dimensions for desired (square) output
images.
Keyword Arguments:
pre_norm (bool) optional: whether or not the numpy data has
been normalized prior to initialization of the Image_Dataset
object. If it has been normalized, not setting this argument
to True will result in error. Defaults to False.
-
pre_std (bool) optional: whether or not the numpy data has
been standardized prior to initialization of the Image_Dataset
object. If it has been normalized, not setting this argument
to True will result in error. Defaults to False.
-
normalize (bool) optional: whether or not to normalize image
pixel values to range [0,1]. Note that normalized datasets
will be saved in non-normalized form if saved to disk.
Defaults to False.
-
standardize (bool) optional: whether or not to standardize
the pixel values in the training and testing (and validation)
sets using the mean and standard deviation of the training
data. Note that the standardization operation will occur only
when the dataset is split using the 'data_split' method. Note
also that standardized datasets will be saved in
non-standardized form if saved to disk. Defaults to False.
-
manual_classes (list) optional: list of class names if using
"np" or "h5" modes. Classes in this list will be tagged onto
the y data in alphabetical order (ie column 0 of the y data
will be named as the first class in the list when sorted
alphabetically).
Yields:
Image_Dataset object with attributes as specified.
"""
self.base_path = base_path
if mode in ["imgs", "np", "h5"]:
self.mode = mode
else:
raise Exception(
"Must select valid mode: 'imgs', 'np', or 'h5'."
)
if type(img_scale) is int:
rescale_dims = img_scale
else:
raise Exception(
f"'img_scale' argument must be integer, {type(img_scale)} given."
)
rescale_dims = None
if normalize and standardize:
raise Exception(
"Can either normalize or standardize data, cannot do both."
)
self.reduce = None
elif normalize and (not standardize):
self.reduce = "norm"
elif (not normalize) and standardize:
self.reduce = "std"
else:
self.reduce = None
class_list_pending = False
if manual_classes is not None:
if mode == "imgs":
raise Exception(
"'manual_classes' argument can only be given for 'np' and 'h5' modes."
)
self.class_list = None
self.class_no = None
else:
if type(manual_classes) is list:
self.class_list = sorted(
[str(i) for i in manual_classes],
key=lambda f: f.lower(),
)
self.class_no = len(self.class_list)
else:
raise Exception(
f"'manual_classes' argument must be list, {type(manual_classes)} given."
)
self.class_list = None
self.class_no = None
else:
if self.mode == "imgs":
self.class_list = get_class_names(self.base_path)
if self.class_list == []:
self.class_list.append("no_class")
self.class_no = len(self.class_list)
else:
class_list_pending = True
self.class_list = None
self.class_no = None
combo_sets = {
"train": [None, None],
"val": [None, None],
"test": [None, None],
"data": [None, None],
}
if (self.mode == "np") or (self.mode == "h5"):
file_list = []
for r, d, f in os.walk(base_path):
for i in f:
if not i.startswith("."):
file_list.append(
[i, os.path.relpath(os.path.join(r, i))]
)
for file in file_list:
if file[0].lower().endswith(".npz"):
for k, v in combo_sets.items():
if file[0].lower().startswith(f"x_{k}".lower()):
data_load = np.load(
file[1], allow_pickle=True
)
combo_sets[k][0] = data_load[
data_load.files[0]
]
elif (
file[0].lower().startswith(f"y_{k}".lower())
):
data_load = np.load(
file[1], allow_pickle=True
)
combo_sets[k][1] = data_load[
data_load.files[0]
]
elif file[0].lower().endswith(".npy"):
for k, v in combo_sets.items():
if file[0].lower().startswith(f"x_{k}".lower()):
combo_sets[k][0] = np.load(
file[1], allow_pickle=True
)
elif (
file[0].lower().startswith(f"y_{k}".lower())
):
combo_sets[k][1] = np.load(
file[1], allow_pickle=True
)
elif file[0].lower().endswith(".h5"):
for k, v in combo_sets.items():
if file[0].lower().startswith(f"x_{k}".lower()):
with h5py.File(f"{file[1]}", "r") as hf:
combo_sets[k][0] = hf[f"X_{k}"][:]
elif (
file[0].lower().startswith(f"y_{k}".lower())
):
with h5py.File(f"{file[1]}", "r") as hf:
combo_sets[k][1] = hf[f"y_{k}"][:]
else:
raise Exception(
"No valid '.npy', '.npz', or '.h5' files identified."
)
if class_list_pending:
y_shapes = {}
for v in [
v
for k, v in combo_sets.items()
if (v[1] is not None)
]:
y_shapes[k] = v[1].shape[1]
if len(set(y_shapes.values())) == 1:
clist = []
self.class_no = list(set(y_shapes.values()))[0]
c = str(0) * len(str(self.class_no))
for i in range(1, self.class_no + 1):
len_dif = len(str(self.class_no)) - (
len(c + str(i)) - len(str(self.class_no))
)
cl = (str(0) * len_dif) + str(i)
clist.append(f"class_{cl}")
self.class_list = clist
else:
raise Exception(
"Inconsistent number of classes in y-arrays."
)
elif self.mode == "imgs":
data_df = img_to_df(self.base_path)
X_list = []
y_list = []
for i, j in tqdm(
data_df.iterrows(),
total=len(data_df),
desc="Reading images",
position=0,
):
ohe_init = np.zeros(self.class_no)
label = j[1]
ohe_init[self.class_list.index(label)] = 1
ohe_label = ohe_init
y_list.append(ohe_label)
img_data = imageio.imread(j[0])
if rescale_dims:
img = auto_rescale(img_data, rescale_dims)
else:
img = img_data
X_list.append(img)
combo_sets["data"][0] = np.array(X_list)
combo_sets["data"][1] = np.array(y_list)
self.split = int(
len([1 for k, v in combo_sets.items() if v[0] is not None])
- 1
)
x_eqdims = []
for k, v in combo_sets.items():
if v[0] is not None:
if (
(len(v[0].shape) == 4) or (len(v[0].shape) == 3)
) and (v[0].shape[1] == v[0].shape[2]):
x_eqdims.append(v[0].shape[1])
if (len(set(x_eqdims)) == 1) and (
len(x_eqdims) - 1 == self.split
):
self.dims = (list(set(x_eqdims))[0], list(set(x_eqdims))[0])
else:
self.dims = "various"
if pre_norm and pre_std:
raise Exception(
"Cannot expand data if both pre-normalized and pre-standardized."
)
self.expand = None
elif pre_norm and (not pre_std):
self.expand = "de_norm"
elif (not pre_norm) and pre_std:
if self.dims == "various":
raise Exception(
"Cannot expand data if image dimensions are not the same."
)
else:
if self.split == 0:
raise Exception(
"Cannot de-standardize unsplit data."
)
self.expand = None
else:
self.expand = "de_std"
else:
self.expand = None
X_sets = {"train": [], "val": [], "test": [], "data": []}
min_pv = None
max_pv = None
for k, v in X_sets.items():
if combo_sets[k][0] is not None:
for i in tqdm(
np.arange(combo_sets[k][0].shape[0]),
total=combo_sets[k][0].shape[0],
desc=f"Processing X_{k}",
):
if self.expand is not None:
if self.expand == "de_std":
img_min = np.min(combo_sets[k][0][i])
img_max = np.max(combo_sets[k][0][i])
raw_img = (
((combo_sets[k][0][i] - img_min) * 255)
/ (img_max - img_min)
).astype(np.uint8)
clean_img = np.clip(raw_img, 0, 255).astype(
np.uint8
)
elif self.expand == "de_norm":
raw_img = (
combo_sets[k][0][i] * 255
).astype(np.uint8)
clean_img = np.clip(raw_img, 0, 255).astype(
np.uint8
)
else:
raw_img = combo_sets[k][0][i]
clean_img = np.clip(raw_img, 0, 255).astype(
np.uint8
)
else:
raw_img = combo_sets[k][0][i]
clean_img = np.clip(raw_img, 0, 255).astype(
np.uint8
)
if self.mode == "imgs":
img = clean_img
else:
if rescale_dims:
img = auto_rescale(clean_img, rescale_dims)
self.dims = (rescale_dims, rescale_dims)
else:
img = clean_img
v.append(img)
img_min = np.min(img)
img_max = np.max(img)
if min_pv is None:
min_pv = img_min
else:
if img_min < min_pv:
min_pv = img_min
if max_pv is None:
max_pv = img_max
else:
if img_max > max_pv:
max_pv = img_max
combo_sets[k][0] = np.array(v)
for k, v in combo_sets.items():
if v[0] is not None:
if len(v[0].shape) == 3:
v[0] = np.expand_dims(v[0], 3)
channels = []
for k, v in combo_sets.items():
if v[0] is not None:
channels.append(v[0].shape[-1])
if len(set(channels)) != 1:
raise Exception(
"Cannot create ImageDataset with inconsistent number of channels."
)
channels = list(set(channels))[0]
self.img_shape = (self.dims[0], self.dims[1], channels)
self.shadow = combo_sets
if self.reduce == "std":
if self.dims == "various":
raise Exception(
"Cannot standardize data if image dimensions are not the same."
)
else:
if self.split == 0:
print(
"Data will be standardized when split using 'data_split'."
)
for k, v in combo_sets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(self, f"X_{k}", v[0])
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
self.min_pv = min_pv
self.max_pv = max_pv
else:
print("Standardizing...")
self.mu = (
np.sum(combo_sets["train"][0])
/ combo_sets["train"][0].size
)
self.sigma = np.sqrt(
np.sum((combo_sets["train"][0] - self.mu) ** 2)
/ combo_sets["train"][0].size
)
for k, v in combo_sets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(
self,
f"X_{k}",
(v[0] - self.mu) / self.sigma,
)
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
self.min_pv = (min_pv - self.mu) / self.sigma
self.max_pv = (max_pv - self.mu) / self.sigma
elif self.reduce == "norm":
print("Normalizing...")
for k, v in combo_sets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(self, f"X_{k}", v[0] / 255)
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
self.min_pv = min_pv / 255
self.max_pv = max_pv / 255
else:
for k, v in combo_sets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(self, f"X_{k}", v[0])
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
self.min_pv = min_pv
self.max_pv = max_pv
if self.split == 0:
self.size = self.y_data.shape[0]
elif self.split == 1:
self.size = self.y_train.shape[0] + self.y_test.shape[0]
else:
self.size = (
self.y_train.shape[0]
+ self.y_val.shape[0]
+ self.y_test.shape[0]
)
print("Image_Datset initialized successfully.")
# ----------
def details(self, plot=False):
"""
Prints summary details of Image_Dataset object, or displays the
details as a visualization if kwarg 'plot' is given as True.
"""
labels = {}
labs_nums = {}
for k, v in self.shadow.items():
if v[1] is not None:
labels[k] = []
for i in np.arange(v[1].shape[0]):
label_index = np.argmax(v[1][i], axis=0)
label = self.class_list[label_index]
labels[k].append(label)
for k, v in labels.items():
lab, num = np.unique(labels[k], return_counts=True)
labs_nums[k] = dict(zip(list(lab), list(num)))
if self.split == 0:
imgs_per_class = labs_nums["data"]
df_cols = ["data"]
splits = None
colors = ["#81ecec"]
elif self.split == 1:
imgs_per_class = labs_nums
df_cols = ["train", "test"]
splits = df_cols
colors = ["#81ecec", "#a29bfe"]
else:
imgs_per_class = labs_nums
df_cols = ["train", "val", "test"]
splits = df_cols
colors = ["#81ecec", "#74b9ff", "#a29bfe"]
val_ranges = {"min": self.min_pv, "max": self.max_pv}
ds_dict = {
"total_images": self.size,
"splits": splits,
"images_per_class": imgs_per_class,
"image_shape": self.img_shape,
"pixel_values": val_ranges,
}
if plot:
ldf = pd.DataFrame(labs_nums, columns=df_cols).fillna(0)
ldf = ldf.iloc[::-1]
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = "Helvetica"
plt.rcParams["axes.edgecolor"] = "#333F4B"
plt.rcParams["axes.linewidth"] = 0.8
plt.rcParams["xtick.color"] = "#333F4B"
plt.rcParams["ytick.color"] = "#333F4B"
plt.rcParams["text.color"] = "#333F4B"
fig, ax = plt.subplots(figsize=(10, 6))
fig.text(
0,
0.9,
"Class",
fontsize=15,
fontweight="black",
color="#333F4B",
)
ldf.plot.barh(stacked=True, ax=ax, color=colors)
ax.set_xlabel(
"Images",
fontsize=15,
fontweight="black",
color="#333F4B",
)
ax.set_ylabel("")
ax.tick_params(axis="both", which="major", labelsize=12)
if self.split == 0:
ax.legend().set_visible(False)
else:
ax.legend(bbox_to_anchor=(1, 0.5))
ax.spines["top"].set_color("none")
ax.spines["right"].set_color("none")
plt.show()
else:
print("Image_Dataset details")
print("---------------------")
for k, v in {
i: ds_dict[i]
for i in ds_dict
if i != "images_per_class"
}.items():
print(f"{k:<20}{v}\n---")
print("images_per_class\n-")
for k, v in ds_dict["images_per_class"].items():
print(f"{k:<20}{v}\n-")
if self.split != 0:
totals = {}
for c in self.class_list:
c_total = []
for k, v in ds_dict["images_per_class"].items():
c_total.append(
ds_dict["images_per_class"][k][c]
)
totals[c] = sum(c_total)
print(f"{'totals':<20}{totals}")
# ----------
def map_classes(self, class_list):
"""
Maps class names from a given list of class names.
"""
if type(class_list) is list:
new_list = sorted(
[str(i) for i in class_list], key=lambda f: f.lower()
)
if len(new_list) == len(self.class_list):
for i in range(len(self.class_list)):
self.class_list[i] = new_list[i]
else:
raise Exception(
"Number of classes given does not match number in Image_Dataset."
)
else:
raise Exception(
f"'class_list' argument must be list, {type(class_list)} given."
)
# ----------
def normalize(self):
"""
Normalizes pixel values to range [0,1].
"""
if self.reduce == "norm":
raise Exception("Data has already been normalized.")
elif self.reduce == "std":
raise Exception("Cannot normalize standardized data.")
else:
subsets = self.shadow
print("Normalizing...")
for k, v in subsets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(self, f"X_{k}", v[0] / 255)
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
self.min_pv = self.min_pv / 255
self.max_pv = self.max_pv / 255
self.reduce = "norm"
print("Normalization complete.")
# ----------
def standardize(self):
"""
Standardizes pixel values using the mean and standard deviation of
the training subset (note that the dataset must be split in order
to standardize).
"""
if self.reduce == "std":
raise Exception("Data has already been standardized.")
elif self.reduce == "norm":
raise Exception("Cannot standardize normalized data.")
else:
subsets = self.shadow
if self.dims == "various":
raise Exception(
"Cannot standardize data if image dimensions are not the same."
)
elif self.split == 0:
raise Exception("Cannot standardize unsplit data.")
else:
print("Standardizing...")
self.mu = (
np.sum(subsets["train"][0])
/ subsets["train"][0].size
)
self.sigma = np.sqrt(
np.sum((subsets["train"][0] - self.mu) ** 2)
/ subsets["train"][0].size
)
for k, v in subsets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(
self,
f"X_{k}",
(v[0] - self.mu) / self.sigma,
)
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
self.min_pv = (self.min_pv - self.mu) / self.sigma
self.max_pv = (self.max_pv - self.mu) / self.sigma
self.reduce = "std"
print("Standardization complete.")
# ----------
def data_split(self, split_ratio, seed=None, stratify=False):
"""
Splits the Image_Dataset object into training and testing, and/or
validation subsets.
Note that this method is built using Scikit-Learn's
train_test_split. For more information see:
https://scikit-learn.org/stable/modules/classes.html#
Arguments:
split_ratio (tuple): ratios in the form (a, b, c) used
to split the dataset; where a, b, and c are float values
representing the desired proportions of training,
validation, and testing subsets, repectively; and
a + b + c = 1. If only two values are given, ie in the
form (a, b); the dataset will be split into training and
testing subsets only. In this case, a + b must be equal
to 1.
Keyword Arguments:
seed (int) optional: random seed for use in the data split.
Defaults to None.
-
stratify (bool) optional: whether or not to preserve the class
balances that exist in the un-split data. Defaults to False.
Yields:
Training, testing (and validation) subset arrays as 'X_train'
and 'y_train', 'X_test' and 'y_test' (and 'X_val' and 'y_val'
if 3 values passed into the 'split_ratio' argument) attributes
of the Image_Dataset object. None as 'X_data' and 'y_data'
attributes of the Image_Dataset object.
"""
if self.split != 0:
raise Exception(
"Cannot split dataset that has already been split."
)
else:
if (type(split_ratio) is list) or (
type(split_ratio) is tuple
):
split_ratio = [np.round(i, 2) for i in split_ratio]
if sum(split_ratio) == 1:
print("Splitting...")
if len(split_ratio) == 3:
if stratify:
(
self.shadow["train"][0],
Xtv,
self.shadow["train"][1],
ytv,
) = train_test_split(
self.shadow["data"][0],
self.shadow["data"][1],
test_size=round(
split_ratio[1] + split_ratio[2], 2
),
stratify=self.shadow["data"][1],
random_state=seed,
)
(
self.shadow["val"][0],
self.shadow["test"][0],
self.shadow["val"][1],
self.shadow["test"][1],
) = train_test_split(
Xtv,
ytv,
test_size=split_ratio[2]
/ round(
split_ratio[1] + split_ratio[2], 2
),
stratify=ytv,
random_state=seed,
)
else:
(
self.shadow["train"][0],
Xtv,
self.shadow["train"][1],
ytv,
) = train_test_split(
self.shadow["data"][0],
self.shadow["data"][1],
test_size=round(
split_ratio[1] + split_ratio[2], 2
),
random_state=seed,
)
(
self.shadow["val"][0],
self.shadow["test"][0],
self.shadow["val"][1],
self.shadow["test"][1],
) = train_test_split(
Xtv,
ytv,
test_size=split_ratio[2]
/ round(
split_ratio[1] + split_ratio[2], 2
),
random_state=seed,
)
del Xtv
del ytv
self.shadow["data"][0] = None
self.shadow["data"][1] = None
self.split = 2
elif len(split_ratio) == 2:
if stratify:
(
self.shadow["train"][0],
self.shadow["test"][0],
self.shadow["train"][1],
self.shadow["test"][1],
) = train_test_split(
self.shadow["data"][0],
self.shadow["data"][1],
test_size=split_ratio[1],
stratify=self.shadow["data"][1],
random_state=seed,
)
else:
(
self.shadow["train"][0],
self.shadow["test"][0],
self.shadow["train"][1],
self.shadow["test"][1],
) = train_test_split(
self.shadow["data"][0],
self.shadow["data"][1],
test_size=split_ratio[1],
random_state=seed,
)
self.shadow["data"][0] = None
self.shadow["data"][1] = None
self.shadow["val"][0] = None
self.shadow["val"][1] = None
self.split = 1
else:
raise Exception(
"'split_ratio' argument must be list or tuple of 2 or 3 floats."
)
else:
raise Exception(
"'split_ratio' argument must sum to 1."
)
else:
raise Exception(
f"'split_ratio' argument must be list or tuple, {type(split_ratio)} given."
)
subsets = self.shadow
if self.reduce == "std":
if self.dims == "various":
raise Exception(
"Cannot standardize data if image dimensions are not the same."
)
else:
print("Standardizing...")
self.mu = (
np.sum(subsets["train"][0])
/ subsets["train"][0].size
)
self.sigma = np.sqrt(
np.sum((subsets["train"][0] - self.mu) ** 2)
/ subsets["train"][0].size
)
for k, v in subsets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(
self,
f"X_{k}",
(v[0] - self.mu) / self.sigma,
)
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
self.min_pv = (self.min_pv - self.mu) / self.sigma
self.max_pv = (self.max_pv - self.mu) / self.sigma
elif self.reduce == "norm":
print("Normalizing...")
for k, v in subsets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(self, f"X_{k}", v[0] / 255)
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
else:
for k, v in subsets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(self, f"X_{k}", v[0])
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
if self.split == 1:
print(
"Data sucessfully split into training and testing subsets."
)
elif self.split == 2:
print(
"Data sucessfully split into training, validation, and testing subsets."
)
# ----------
def data_merge(self):
"""
Merges a split Image_Dataset object into a single dataset.
Yields full X and y data arrays as 'X_data' and 'y_data'
attributes of the Image_Dataset object, and None as the 'X_train',
'y_train', 'X_val', 'y_val', 'X_test', and 'y_test' attributes of
the Image_Dataset object.
"""
if self.split == 0:
raise Exception(
"Cannot merge dataset that has not been split."
)
else:
print("Merging...")
x_merge = [
v[0]
for k, v in self.shadow.items()
if (v[0] is not None)
]
y_merge = [
v[1]
for k, v in self.shadow.items()
if (v[1] is not None)
]
try:
self.shadow["data"][0] = np.concatenate(
[
v[0]
for k, v in self.shadow.items()
if (v[0] is not None)
]
)
self.shadow["data"][1] = np.concatenate(
[
v[1]
for k, v in self.shadow.items()
if (v[1] is not None)
]
)
except:
merge_x_listwise = []
merge_y_listwise = []
for i in x_merge:
for x in np.arange(i.shape[0]):
merge_x_listwise.append(i[x])
for i in y_merge:
for y in np.arange(i.shape[0]):
merge_y_listwise.append(i[y])
self.shadow["data"][0] = np.array(merge_x_listwise)
self.shadow["data"][1] = np.array(merge_y_listwise)
self.shadow["train"] = [None, None]
self.shadow["val"] = [None, None]
self.shadow["test"] = [None, None]
self.split = 0
subsets = self.shadow
if self.reduce == "std":
if self.dims == "various":
raise Exception(
"Cannot standardize data if image dimensions are not the same."
)
else:
print(
"Note: merged dataset will not be standardized."
)
for k, v in subsets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(self, f"X_{k}", v[0])
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
self.min_pv = (
(self.min_pv * self.sigma) + self.mu
).astype(np.uint8)
self.max_pv = (
(self.max_pv * self.sigma) + self.mu
).astype(np.uint8)
elif self.reduce == "norm":
print("Normalizing...")
for k, v in subsets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(self, f"X_{k}", v[0] / 255)
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
else:
for k, v in subsets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(self, f"X_{k}", v[0])
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
print("Data sucessfully merged into single data set.")
# ----------
def display_batch(self, n_rows, n_cols):
"""
Displays random batch of images from the Image_Dataset object,
along with class label and data subset if drawn from a split
dataset.
Arguments:
n_rows (int): number of rows of images to display.
-
n_cols (int): number of columns of images to display.
Returns:
Visualization of random batch of images from the dataset.
"""
if n_rows * n_cols > self.size:
raise Exception(
f"Cannot display {n_rows*n_cols} images because only {self.size} in dataset."
)
else:
ds_array = np.arange(self.size)
np.random.shuffle(ds_array)
index_list = ds_array[0 : n_rows * n_cols]
display_list = []
for k, v in self.shadow.items():
if v[1] is not None:
for i in np.arange(v[1].shape[0]):
if v[0][i].shape[2] == 1:
img = np.squeeze(v[0][i], 2)
else:
img = v[0][i]
label_index = np.argmax(v[1][i], axis=0)
label = self.class_list[label_index]
if self.split == 0:
display_list.append([img, label])
else:
display_list.append([img, f"{label} ({k})"])
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = "Helvetica"
plt.rcParams["text.color"] = "#333F4B"
fig = plt.figure(figsize=(12, 8))
for i in range(1, (n_rows * n_cols) + 1):
ax = fig.add_subplot(n_rows, n_cols, i)
ax.imshow(display_list[index_list[i - 1]][0])
ax.set_title(display_list[index_list[i - 1]][1])
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
plt.show()
# ----------
def save_arrays(self, save_dir):
"""
Saves the dataset in HDF5 format into a directory specified by
the 'save_dir' argument. Note that the directory will be
created if it does not already exist, and that existing data
within the specified directory will be overwritten.
"""
my_path = "imgo_output/uptools/save_arrays/" + save_dir
r = None
for i in my_path.split("/"):
if r == None:
if not os.path.exists(i):
os.mkdir(i)
r = i
else:
if not os.path.exists(r + "/" + i):
os.mkdir(r + "/" + i)
r = r + "/" + i
if self.reduce == "std":
print("Saving non-standardized arrays.")
elif self.reduce == "norm":
print("Saving non-normalized arrays.")
for k, v in self.shadow.items():
if v[0] is not None:
with h5py.File(f"{r}/X_{k}.h5", "w") as hf:
hf.create_dataset(f"X_{k}", data=v[0])
print(f"{r}/X_{k}.h5 saved successfully.")
if v[1] is not None:
with h5py.File(f"{r}/y_{k}.h5", "w") as hf:
hf.create_dataset(f"y_{k}", data=v[1])
print(f"{r}/y_{k}.h5 saved successfully.")
# ----------
def save_imgs(self, save_dir):
"""
Saves the dataset in image format into a directory specified
by the 'save_dir' argument (images are saved into
subdirectories for each class within the this directory).
Note that the directory will be created if it does not already
exist, and that existing data within the specified directory
will be overwritten.
"""
my_path = "imgo_output/uptools/save_imgs/" + save_dir
r = None
for i in my_path.split("/"):
if r == None:
if not os.path.exists(i):
os.mkdir(i)
r = i
else:
if not os.path.exists(r + "/" + i):
os.mkdir(r + "/" + i)
r = r + "/" + i
class_counter = {}
for c in self.class_list:
class_counter[c] = 0
if not os.path.exists(r + "/" + c):
os.mkdir(r + "/" + c)
for k, v in self.shadow.items():
if (v[0] is not None) and (v[1] is not None):
for i in tqdm(
np.arange(v[1].shape[0]),
total=v[1].shape[0],
desc="Saving",
):
img = v[0][i]
label_index = np.argmax(v[1][i], axis=0)
label = self.class_list[label_index]
class_counter[label] += 1
path = my_path + "/" + label
imageio.imwrite(
f"{path}/{label}_{class_counter[label]}.jpg",
img,
)
# ----------
def augment_training_set(
self,
portion,
augmenter=None,
augment_scale=None,
augment_type="random",
order=None,
):
"""
Calls on an (initialized) imgo.augtools augmenter to apply image
augmentation to the Image_Dataset's X_train subset.
Arguments:
portion (float): float within the range [0,1]. This is the
portion of images in the set that will be augmented.
Keyword Arguments:
augmenter (imgo.uptools Augmenter object) optional: the
augmenter to apply to the images. Defaults to None.
-
augment_scale (int) optional: square dimensions to which the
images are temporarily rescaled prior to augmentation (note
that larger values result in better quality augmentations).
The images will be rescaled back to their previous (square)
dimensions after augmentation. Defaults to None.
-
augment_type (str) optional: either "random" or "simple".
If "random", the class' "random_augment" method will be used
for the augmentation. If "simple", the "simple_augment" method
will be used. If None, "random_augment" is used. Defaults to
None.
-
order (list) optional: list of indices (integer type) to
determine the order in which the transformation functions are
applied. Note that the transformation functions are ordered
alphabetically by default. Only relevant if using "simple"
as the "augment_type" (see above). Defaults to None.
Returns:
X_train_aug (numpy-array): the Image_Dataset's X_train object
with augmented images added.
Yields:
Augmented images (in numpy-array form) added to the 'X_train'
attribute of the Image_Dataset object.
"""
from imgo import augtools
if self.split == 0:
raise Exception("Data has not been split.")
else:
if (portion >= 0) and (portion <= 1):
n = int(np.round(self.X_train.shape[0] * (portion)))
img_indices = np.random.choice(
self.X_train.shape[0], n, replace=False
)
else:
raise Exception(
"Portion argument must be in range [0,1]."
)
X_aug_list = []
y_aug_list = []
for x in tqdm(
np.arange(self.shadow["train"][0].shape[0]), position=0
):
if x in img_indices:
img = self.shadow["train"][0][x]
label = self.shadow["train"][1][x]
if augment_scale:
scaled_img = auto_rescale(img, augment_scale)
if augment_type == "simple":
aug_scaled_img = augmenter.simple_augment(
scaled_img, order=order
)
else:
aug_scaled_img = augmenter.random_augment(
scaled_img
)
aug_img = auto_rescale(
aug_scaled_img, self.dims[0]
)
else:
if augment_type == "simple":
aug_img = augmenter.simple_augment(
img, order=order
)
else:
aug_img = augmenter.random_augment(
self.shadow["train"][0][x]
)
X_aug_list.append(aug_img)
y_aug_list.append(label)
X_train_aug = np.array(X_aug_list)
y_train_aug = np.array(y_aug_list)
full_X = np.concatenate(
(self.shadow["train"][0], X_train_aug), axis=0
)
full_y = np.concatenate(
(self.shadow["train"][1], y_train_aug), axis=0
)
shuffle_indices = np.random.choice(
full_X.shape[0], full_X.shape[0], replace=False
)
self.shadow["train"][0] = full_X[shuffle_indices]
self.shadow["train"][1] = full_y[shuffle_indices]
self.y_train = self.shadow["train"][1]
if self.reduce == "std":
if self.dims == "various":
raise Exception(
"Cannot standardize data if image dimensions are not the same."
)
else:
print("Standardizing...")
self.mu = (
np.sum(self.shadow["train"][0])
/ self.shadow["train"][0].size
)
self.sigma = np.sqrt(
np.sum((self.shadow["train"][0] - self.mu) ** 2)
/ self.shadow["train"][0].size
)
for k, v in self.shadow.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(
self,
f"X_{k}",
(v[0] - self.mu) / self.sigma,
)
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
self.min_pv = (self.min_pv - self.mu) / self.sigma
self.max_pv = (self.max_pv - self.mu) / self.sigma
if self.reduce == "norm":
print("Normalizing...")
self.X_train = self.shadow["train"][0] / 255
else:
self.X_train = self.shadow["train"][0]
self.y_train = self.shadow["train"][1]
if self.split == 1:
print("Training data sucessfully augmented.")
self.size = self.y_train.shape[0] + self.y_test.shape[0]
elif self.split == 2:
print("Training data sucessfully augmented.")
self.size = (
self.y_train.shape[0]
+ self.y_val.shape[0]
+ self.y_test.shape[0]
)
# ----------
def split_rebalance(
self,
split_ratio,
augmenter=None,
augment_scale=None,
augment_type=None,
order=None,
force=False,
):
"""
Splits dataset into training and testing (and validation) subsets
and rebalances class sizes by calling on an (initialized)
imgo.augtools augmenter to generate new training images (without
affecting the validation/testing subsets). The number of images
generated for each class will depend on the ratios given by the
'split_ratio' argument as well as the number of images already
included in each class. The number of new images will be maximum
possible such that the ratios are preserved and that at the total
of the training and validation subsets are no larger than half of
the smallest class.
Arguments:
split_ratio (tuple): ratios in the form (a, b, c) used
to split the dataset; where a, b, and c are float values
representing the desired proportions of training,
validation, and testing subsets, repectively; and
a + b + c = 1. If only two values are given, ie in the
form (a, b); the dataset will be split into training and
testing subsets only. In this case, a + b must be equal
to 1. The number of images generated by the augmenter will
depend on the ratios given as well as the number of images
in each class.
Keyword Arguments:
augmenter (imgo.uptools Augmenter object) optional: the
augmenter to apply to the images. Defaults to None.
-
augment_scale (int) optional: square dimensions to which the
images are temporarily rescaled prior to augmentation (note
that larger values result in better quality augmentations).
The images will be rescaled back to their previous (square)
dimensions after augmentation. Defaults to None.
-
augment_type (str) optional: either "random" or "simple".
If "random", the class' "random_augment" method will be used
for the augmentation. If "simple", the "simple_augment" method
will be used. If None, "random_augment" is used. Defaults to
None.
-
order (list) optional: list of indices (integer type) to
determine the order in which the transformation functions are
applied. Note that the transformation functions are ordered
alphabetically by default. Only relevant if using "simple"
as the "augment_type" (see above). Defaults to None.
-
force (bool) optional: whether or not to force the method to
apply augmentation to datasets that are already balanced. The
method will check if the classes are already balanced and
raise an exception if not set to 'True'. Defaults to False.
Returns:
X_train_aug (numpy-array): the Image_Dataset's X_train object
with augmented images (if inplace argument set to False).
Yields:
Augmented images (in numpy-array form) as the 'X_train'
attribute of the Image_Dataset object (if inplace argument
set to True).
"""
from imgo import augtools
if (type(split_ratio) is list) or (type(split_ratio) is tuple):
split_ratio = [np.round(i, 2) for i in split_ratio]
if np.round(sum(split_ratio), 2) == 1:
if len(split_ratio) == 3:
tr_r = split_ratio[0]
va_r = split_ratio[1]
te_r = split_ratio[2]
vt_r = split_ratio[1] + split_ratio[2]
elif len(split_ratio) == 2:
tr_r = split_ratio[0]
va_r = 0
te_r = split_ratio[1]
vt_r = split_ratio[1]
else:
raise Exception(
"'split_ratio' argument must be list or tuple of 2 or 3 floats."
)
else:
raise Exception("'split_ratio' argument must sum to 1.")
else:
raise Exception(
f"'split_ratio' argument must be list or tuple, {type(split_ratio)} given."
)
if self.split != 0:
raise Exception(
"Cannot split dataset that has already been split."
)
else:
indices = {}
for i in self.class_list:
indices[i] = {
"imgs": [],
"to_bal": [],
"to_rem": [],
"valtest": [],
"val": [],
"test": [],
}
for i in np.arange(self.shadow["data"][1].shape[0]):
for c in indices.keys():
if (
self.class_list[
np.argmax(self.shadow["data"][1][i], axis=0)
]
== c
):
indices[c]["imgs"].append(i)
lens = []
for c in indices.keys():
lens.append(len(indices[c]["imgs"]))
if all(i == lens[0] for i in lens):
if not force:
raise Exception(
"Classes already appear to be balanced, set 'force' to 'True' to rebalance."
)
else:
pass
min_class_size = np.min(
[len(indices[c]["imgs"]) for c in indices.keys()]
)
max_vt_size = int((min_class_size / 2) // 1)
max_tr_size = int(((max_vt_size / vt_r) * tr_r) // 1)
max_total_size = max_vt_size + max_tr_size
train_indices = []
val_indices = []
test_indices = []
for c in indices.keys():
indices[c]["valtest"] = list(
np.random.choice(
indices[c]["imgs"],
size=max_vt_size,
replace=False,
)
)
indices[c]["val"] = list(
np.random.choice(
indices[c]["valtest"],
size=int((max_total_size * (va_r)) // 1),
replace=False,
)
)
indices[c]["test"] = [
i
for i in indices[c]["valtest"]
if i not in indices[c]["val"]
]
for i in indices[c]["valtest"]:
indices[c]["imgs"].remove(i)
val_indices += indices[c]["val"]
test_indices += indices[c]["test"]
bal_indices = []
for c in indices.keys():
class_tr_size = len(indices[c]["imgs"])
inds = []
dif = np.abs(class_tr_size - max_tr_size)
passes = np.divmod(dif, class_tr_size)
for i in range(passes[0]):
inds += list(
np.random.choice(
indices[c]["imgs"],
size=class_tr_size,
replace=False,
)
)
inds += list(
np.random.choice(
indices[c]["imgs"],
size=passes[1],
replace=False,
)
)
if class_tr_size > max_tr_size:
indices[c]["to_rem"] = inds
for i in indices[c]["to_rem"]:
indices[c]["imgs"].remove(i)
else:
indices[c]["to_bal"] = inds
bal_indices += indices[c]["to_bal"]
train_indices += indices[c]["imgs"]
X_train = []
y_train = []
X_val = []
y_val = []
X_test = []
y_test = []
data_range = np.arange(self.shadow["data"][1].shape[0])
shuffle = np.random.choice(
data_range, size=data_range.shape[0], replace=False
)
for i in shuffle:
img = self.shadow["data"][0][i]
label = self.shadow["data"][1][i]
if i in train_indices:
X_train.append(img)
y_train.append(label)
if i in val_indices:
X_val.append(img)
y_val.append(label)
elif i in test_indices:
X_test.append(img)
y_test.append(label)
for i in tqdm(
bal_indices,
total=len(bal_indices),
desc="Rebalancing",
position=0,
):
img = self.shadow["data"][0][i]
label = self.shadow["data"][1][i]
if augment_scale:
scaled_img = auto_rescale(img, augment_scale)
if augment_type == "simple":
aug_scaled_img = augmenter.simple_augment(
scaled_img, order=order
)
else:
aug_scaled_img = augmenter.random_augment(
scaled_img
)
aug_img = auto_rescale(aug_scaled_img, self.dims[0])
else:
if augment_type == "simple":
aug_img = augmenter.simple_augment(
img, order=order
)
else:
aug_img = augmenter.random_augment(img)
X_train.append(aug_img)
y_train.append(label)
X_train_array = np.array(X_train)
y_train_array = np.array(y_train)
data_range_2 = np.arange(y_train_array.shape[0])
shuffle_2 = np.random.choice(
data_range_2,
size=data_range_2.shape[0],
replace=False,
)
self.shadow["train"][0] = X_train_array[shuffle_2]
self.shadow["train"][1] = y_train_array[shuffle_2]
if len(split_ratio) == 3:
self.shadow["val"][0] = np.array(X_val)
self.shadow["val"][1] = np.array(y_val)
self.shadow["test"][0] = np.array(X_test)
self.shadow["test"][1] = np.array(y_test)
self.split = 2
if len(split_ratio) == 2:
self.shadow["val"][0] = None
self.shadow["val"][1] = None
self.shadow["test"][0] = np.array(X_val + X_test)
self.shadow["test"][1] = np.array(y_val + y_test)
self.split = 1
self.shadow["data"][0] = None
self.shadow["data"][1] = None
subsets = self.shadow
if self.reduce == "std":
if self.dims == "various":
raise Exception(
"Cannot standardize data if image dimensions are not the same."
)
else:
print("Standardizing...")
self.mu = (
np.sum(subsets["train"][0])
/ subsets["train"][0].size
)
self.sigma = np.sqrt(
np.sum((subsets["train"][0] - self.mu) ** 2)
/ subsets["train"][0].size
)
for k, v in subsets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(
self,
f"X_{k}",
(v[0] - self.mu) / self.sigma,
)
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
self.min_pv = (self.min_pv - self.mu) / self.sigma
self.max_pv = (self.max_pv - self.mu) / self.sigma
elif self.reduce == "norm":
print("Normalizing...")
for k, v in subsets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(self, f"X_{k}", v[0] / 255)
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
else:
for k, v in subsets.items():
if v[0] is None:
setattr(self, f"X_{k}", None)
else:
setattr(self, f"X_{k}", v[0])
if v[1] is None:
setattr(self, f"y_{k}", None)
else:
setattr(self, f"y_{k}", v[1])
if self.split == 1:
print(
"Data sucessfully rebalanced and split into training and testing subsets."
)
self.size = self.y_train.shape[0] + self.y_test.shape[0]
elif self.split == 2:
print(
"Data sucessfully rebalanced and split into training, validation, and testing subsets."
)
self.size = (
self.y_train.shape[0]
+ self.y_val.shape[0]
+ self.y_test.shape[0]
)
| [
"os.mkdir",
"numpy.load",
"numpy.abs",
"numpy.sum",
"numpy.argmax",
"sklearn.model_selection.train_test_split",
"os.walk",
"numpy.clip",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.round",
"os.path.join",
"numpy.unique",
"pandas.DataFrame",
"os.path.exists",
"numpy.divmod",
"n... | [((5871, 5889), 'os.walk', 'os.walk', (['base_path'], {}), '(base_path)\n', (5878, 5889), False, 'import os\n'), ((9361, 9379), 'numpy.array', 'np.array', (['img_list'], {}), '(img_list)\n', (9369, 9379), True, 'import numpy as np\n'), ((9398, 9418), 'numpy.array', 'np.array', (['label_list'], {}), '(label_list)\n', (9406, 9418), True, 'import numpy as np\n'), ((9571, 9598), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (9581, 9598), True, 'import matplotlib.pyplot as plt\n'), ((9854, 9864), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9862, 9864), True, 'import matplotlib.pyplot as plt\n'), ((11590, 11608), 'numpy.array', 'np.array', (['img_list'], {}), '(img_list)\n', (11598, 11608), True, 'import numpy as np\n'), ((11627, 11647), 'numpy.array', 'np.array', (['label_list'], {}), '(label_list)\n', (11635, 11647), True, 'import numpy as np\n'), ((13764, 13780), 'numpy.array', 'np.array', (['y_list'], {}), '(y_list)\n', (13772, 13780), True, 'import numpy as np\n'), ((15104, 15121), 'numpy.max', 'np.max', (['img.shape'], {}), '(img.shape)\n', (15110, 15121), True, 'import numpy as np\n'), ((16553, 16589), 'numpy.sqrt', 'np.sqrt', (['(img.shape[0] * img.shape[1])'], {}), '(img.shape[0] * img.shape[1])\n', (16560, 16589), True, 'import numpy as np\n'), ((6969, 7010), 'pandas.DataFrame', 'pd.DataFrame', (['img_list'], {'columns': "['image']"}), "(img_list, columns=['image'])\n", (6981, 7010), True, 'import pandas as pd\n'), ((7485, 7503), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (7494, 7503), True, 'import pandas as pd\n'), ((9124, 9144), 'imageio.imread', 'imageio.imread', (['j[0]'], {}), '(j[0])\n', (9138, 9144), False, 'import imageio\n'), ((11267, 11287), 'imageio.imread', 'imageio.imread', (['j[0]'], {}), '(j[0])\n', (11281, 11287), False, 'import imageio\n'), ((13579, 13597), 'numpy.zeros', 'np.zeros', (['class_no'], {}), '(class_no)\n', (13587, 13597), True, 'import numpy as np\n'), ((15191, 15244), 'cv2.resize', 'cv2.resize', (['img', 'scale'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, scale, interpolation=cv2.INTER_CUBIC)\n', (15201, 15244), False, 'import cv2\n'), ((15298, 15350), 'cv2.resize', 'cv2.resize', (['img', 'scale'], {'interpolation': 'cv2.INTER_AREA'}), '(img, scale, interpolation=cv2.INTER_AREA)\n', (15308, 15350), False, 'import cv2\n'), ((15430, 15459), 'numpy.expand_dims', 'np.expand_dims', (['scaled_img', '(2)'], {}), '(scaled_img, 2)\n', (15444, 15459), True, 'import numpy as np\n'), ((17548, 17577), 'numpy.expand_dims', 'np.expand_dims', (['scaled_img', '(2)'], {}), '(scaled_img, 2)\n', (17562, 17577), True, 'import numpy as np\n'), ((19908, 19928), 'imageio.imread', 'imageio.imread', (['j[0]'], {}), '(j[0])\n', (19922, 19928), False, 'import imageio\n'), ((20492, 20513), 'numpy.array', 'np.array', (['scaled_imgs'], {}), '(scaled_imgs)\n', (20500, 20513), True, 'import numpy as np\n'), ((7359, 7400), 'pandas.DataFrame', 'pd.DataFrame', (['img_list'], {'columns': "['image']"}), "(img_list, columns=['image'])\n", (7371, 7400), True, 'import pandas as pd\n'), ((14185, 14217), 'h5py.File', 'h5py.File', (['f"""{r}/y_data.h5"""', '"""w"""'], {}), "(f'{r}/y_data.h5', 'w')\n", (14194, 14217), False, 'import h5py\n'), ((16692, 16754), 'cv2.resize', 'cv2.resize', (['img', '(lower, lower)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (lower, lower), interpolation=cv2.INTER_CUBIC)\n', (16702, 16754), False, 'import cv2\n'), ((20266, 20295), 'numpy.expand_dims', 'np.expand_dims', (['scaled_img', '(2)'], {}), '(scaled_img, 2)\n', (20280, 20295), True, 'import numpy as np\n'), ((20326, 20359), 'imageio.imwrite', 'imageio.imwrite', (['j[0]', 'scaled_img'], {}), '(j[0], scaled_img)\n', (20341, 20359), False, 'import imageio\n'), ((29545, 29563), 'os.walk', 'os.walk', (['base_path'], {}), '(base_path)\n', (29552, 29563), False, 'import os\n'), ((42692, 42732), 'numpy.unique', 'np.unique', (['labels[k]'], {'return_counts': '(True)'}), '(labels[k], return_counts=True)\n', (42701, 42732), True, 'import numpy as np\n'), ((44135, 44164), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (44147, 44164), True, 'import matplotlib.pyplot as plt\n'), ((44948, 44958), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (44956, 44958), True, 'import matplotlib.pyplot as plt\n'), ((63820, 63840), 'numpy.arange', 'np.arange', (['self.size'], {}), '(self.size)\n', (63829, 63840), True, 'import numpy as np\n'), ((63853, 63880), 'numpy.random.shuffle', 'np.random.shuffle', (['ds_array'], {}), '(ds_array)\n', (63870, 63880), True, 'import numpy as np\n'), ((64803, 64830), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (64813, 64830), True, 'import matplotlib.pyplot as plt\n'), ((65181, 65191), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (65189, 65191), True, 'import matplotlib.pyplot as plt\n'), ((72229, 72249), 'numpy.array', 'np.array', (['X_aug_list'], {}), '(X_aug_list)\n', (72237, 72249), True, 'import numpy as np\n'), ((72276, 72296), 'numpy.array', 'np.array', (['y_aug_list'], {}), '(y_aug_list)\n', (72284, 72296), True, 'import numpy as np\n'), ((72319, 72381), 'numpy.concatenate', 'np.concatenate', (["(self.shadow['train'][0], X_train_aug)"], {'axis': '(0)'}), "((self.shadow['train'][0], X_train_aug), axis=0)\n", (72333, 72381), True, 'import numpy as np\n'), ((72433, 72495), 'numpy.concatenate', 'np.concatenate', (["(self.shadow['train'][1], y_train_aug)"], {'axis': '(0)'}), "((self.shadow['train'][1], y_train_aug), axis=0)\n", (72447, 72495), True, 'import numpy as np\n'), ((72557, 72622), 'numpy.random.choice', 'np.random.choice', (['full_X.shape[0]', 'full_X.shape[0]'], {'replace': '(False)'}), '(full_X.shape[0], full_X.shape[0], replace=False)\n', (72573, 72622), True, 'import numpy as np\n'), ((79881, 79923), 'numpy.arange', 'np.arange', (["self.shadow['data'][1].shape[0]"], {}), "(self.shadow['data'][1].shape[0])\n", (79890, 79923), True, 'import numpy as np\n'), ((83311, 83353), 'numpy.arange', 'np.arange', (["self.shadow['data'][1].shape[0]"], {}), "(self.shadow['data'][1].shape[0])\n", (83320, 83353), True, 'import numpy as np\n'), ((83376, 83445), 'numpy.random.choice', 'np.random.choice', (['data_range'], {'size': 'data_range.shape[0]', 'replace': '(False)'}), '(data_range, size=data_range.shape[0], replace=False)\n', (83392, 83445), True, 'import numpy as np\n'), ((6886, 6907), 'os.listdir', 'os.listdir', (['base_path'], {}), '(base_path)\n', (6896, 6907), False, 'import os\n'), ((12254, 12286), 'h5py.File', 'h5py.File', (['f"""{r}/X_data.h5"""', '"""w"""'], {}), "(f'{r}/X_data.h5', 'w')\n", (12263, 12286), False, 'import h5py\n'), ((16931, 16992), 'cv2.resize', 'cv2.resize', (['img', '(upper, upper)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (upper, upper), interpolation=cv2.INTER_AREA)\n', (16941, 16992), False, 'import cv2\n'), ((33833, 33849), 'numpy.array', 'np.array', (['X_list'], {}), '(X_list)\n', (33841, 33849), True, 'import numpy as np\n'), ((33886, 33902), 'numpy.array', 'np.array', (['y_list'], {}), '(y_list)\n', (33894, 33902), True, 'import numpy as np\n'), ((38039, 38050), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (38047, 38050), True, 'import numpy as np\n'), ((42444, 42468), 'numpy.arange', 'np.arange', (['v[1].shape[0]'], {}), '(v[1].shape[0])\n', (42453, 42468), True, 'import numpy as np\n'), ((67510, 67537), 'os.path.exists', 'os.path.exists', (["(r + '/' + c)"], {}), "(r + '/' + c)\n", (67524, 67537), False, 'import os\n'), ((67555, 67576), 'os.mkdir', 'os.mkdir', (["(r + '/' + c)"], {}), "(r + '/' + c)\n", (67563, 67576), False, 'import os\n'), ((70534, 70591), 'numpy.random.choice', 'np.random.choice', (['self.X_train.shape[0]', 'n'], {'replace': '(False)'}), '(self.X_train.shape[0], n, replace=False)\n', (70550, 70591), True, 'import numpy as np\n'), ((70863, 70906), 'numpy.arange', 'np.arange', (["self.shadow['train'][0].shape[0]"], {}), "(self.shadow['train'][0].shape[0])\n", (70872, 70906), True, 'import numpy as np\n'), ((78469, 78483), 'numpy.round', 'np.round', (['i', '(2)'], {}), '(i, 2)\n', (78477, 78483), True, 'import numpy as np\n'), ((82130, 82165), 'numpy.abs', 'np.abs', (['(class_tr_size - max_tr_size)'], {}), '(class_tr_size - max_tr_size)\n', (82136, 82165), True, 'import numpy as np\n'), ((82191, 82220), 'numpy.divmod', 'np.divmod', (['dif', 'class_tr_size'], {}), '(dif, class_tr_size)\n', (82200, 82220), True, 'import numpy as np\n'), ((85170, 85187), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (85178, 85187), True, 'import numpy as np\n'), ((85220, 85237), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (85228, 85237), True, 'import numpy as np\n'), ((85270, 85303), 'numpy.arange', 'np.arange', (['y_train_array.shape[0]'], {}), '(y_train_array.shape[0])\n', (85279, 85303), True, 'import numpy as np\n'), ((85332, 85405), 'numpy.random.choice', 'np.random.choice', (['data_range_2'], {'size': 'data_range_2.shape[0]', 'replace': '(False)'}), '(data_range_2, size=data_range_2.shape[0], replace=False)\n', (85348, 85405), True, 'import numpy as np\n'), ((85690, 85705), 'numpy.array', 'np.array', (['X_val'], {}), '(X_val)\n', (85698, 85705), True, 'import numpy as np\n'), ((85746, 85761), 'numpy.array', 'np.array', (['y_val'], {}), '(y_val)\n', (85754, 85761), True, 'import numpy as np\n'), ((85803, 85819), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (85811, 85819), True, 'import numpy as np\n'), ((85861, 85877), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (85869, 85877), True, 'import numpy as np\n'), ((86078, 86102), 'numpy.array', 'np.array', (['(X_val + X_test)'], {}), '(X_val + X_test)\n', (86086, 86102), True, 'import numpy as np\n'), ((86144, 86168), 'numpy.array', 'np.array', (['(y_val + y_test)'], {}), '(y_val + y_test)\n', (86152, 86168), True, 'import numpy as np\n'), ((6082, 6103), 'os.listdir', 'os.listdir', (['base_path'], {}), '(base_path)\n', (6092, 6103), False, 'import os\n'), ((7250, 7280), 'os.listdir', 'os.listdir', (['f"""{base_path}/{c}"""'], {}), "(f'{base_path}/{c}')\n", (7260, 7280), False, 'import os\n'), ((13954, 13971), 'os.path.exists', 'os.path.exists', (['i'], {}), '(i)\n', (13968, 13971), False, 'import os\n'), ((13993, 14004), 'os.mkdir', 'os.mkdir', (['i'], {}), '(i)\n', (14001, 14004), False, 'import os\n'), ((14068, 14095), 'os.path.exists', 'os.path.exists', (["(r + '/' + i)"], {}), "(r + '/' + i)\n", (14082, 14095), False, 'import os\n'), ((14117, 14138), 'os.mkdir', 'os.mkdir', (["(r + '/' + i)"], {}), "(r + '/' + i)\n", (14125, 14138), False, 'import os\n'), ((17173, 17235), 'cv2.resize', 'cv2.resize', (['img', '(lower, lower)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (lower, lower), interpolation=cv2.INTER_CUBIC)\n', (17183, 17235), False, 'import cv2\n'), ((33368, 33391), 'numpy.zeros', 'np.zeros', (['self.class_no'], {}), '(self.class_no)\n', (33376, 33391), True, 'import numpy as np\n'), ((33586, 33606), 'imageio.imread', 'imageio.imread', (['j[0]'], {}), '(j[0])\n', (33600, 33606), False, 'import imageio\n'), ((35588, 35624), 'numpy.arange', 'np.arange', (['combo_sets[k][0].shape[0]'], {}), '(combo_sets[k][0].shape[0])\n', (35597, 35624), True, 'import numpy as np\n'), ((37555, 37566), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (37561, 37566), True, 'import numpy as np\n'), ((37597, 37608), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (37603, 37608), True, 'import numpy as np\n'), ((38193, 38216), 'numpy.expand_dims', 'np.expand_dims', (['v[0]', '(3)'], {}), '(v[0], 3)\n', (38207, 38216), True, 'import numpy as np\n'), ((42504, 42530), 'numpy.argmax', 'np.argmax', (['v[1][i]'], {'axis': '(0)'}), '(v[1][i], axis=0)\n', (42513, 42530), True, 'import numpy as np\n'), ((43655, 43695), 'pandas.DataFrame', 'pd.DataFrame', (['labs_nums'], {'columns': 'df_cols'}), '(labs_nums, columns=df_cols)\n', (43667, 43695), True, 'import pandas as pd\n'), ((51247, 51261), 'numpy.round', 'np.round', (['i', '(2)'], {}), '(i, 2)\n', (51255, 51261), True, 'import numpy as np\n'), ((60789, 60815), 'numpy.array', 'np.array', (['merge_x_listwise'], {}), '(merge_x_listwise)\n', (60797, 60815), True, 'import numpy as np\n'), ((60857, 60883), 'numpy.array', 'np.array', (['merge_y_listwise'], {}), '(merge_y_listwise)\n', (60865, 60883), True, 'import numpy as np\n'), ((64079, 64103), 'numpy.arange', 'np.arange', (['v[1].shape[0]'], {}), '(v[1].shape[0])\n', (64088, 64103), True, 'import numpy as np\n'), ((65711, 65728), 'os.path.exists', 'os.path.exists', (['i'], {}), '(i)\n', (65725, 65728), False, 'import os\n'), ((65750, 65761), 'os.mkdir', 'os.mkdir', (['i'], {}), '(i)\n', (65758, 65761), False, 'import os\n'), ((65825, 65852), 'os.path.exists', 'os.path.exists', (["(r + '/' + i)"], {}), "(r + '/' + i)\n", (65839, 65852), False, 'import os\n'), ((65874, 65895), 'os.mkdir', 'os.mkdir', (["(r + '/' + i)"], {}), "(r + '/' + i)\n", (65882, 65895), False, 'import os\n'), ((66198, 66229), 'h5py.File', 'h5py.File', (['f"""{r}/X_{k}.h5"""', '"""w"""'], {}), "(f'{r}/X_{k}.h5', 'w')\n", (66207, 66229), False, 'import h5py\n'), ((66409, 66440), 'h5py.File', 'h5py.File', (['f"""{r}/y_{k}.h5"""', '"""w"""'], {}), "(f'{r}/y_{k}.h5', 'w')\n", (66418, 66440), False, 'import h5py\n'), ((67178, 67195), 'os.path.exists', 'os.path.exists', (['i'], {}), '(i)\n', (67192, 67195), False, 'import os\n'), ((67217, 67228), 'os.mkdir', 'os.mkdir', (['i'], {}), '(i)\n', (67225, 67228), False, 'import os\n'), ((67292, 67319), 'os.path.exists', 'os.path.exists', (["(r + '/' + i)"], {}), "(r + '/' + i)\n", (67306, 67319), False, 'import os\n'), ((67341, 67362), 'os.mkdir', 'os.mkdir', (["(r + '/' + i)"], {}), "(r + '/' + i)\n", (67349, 67362), False, 'import os\n'), ((67728, 67752), 'numpy.arange', 'np.arange', (['v[1].shape[0]'], {}), '(v[1].shape[0])\n', (67737, 67752), True, 'import numpy as np\n'), ((67917, 67943), 'numpy.argmax', 'np.argmax', (['v[1][i]'], {'axis': '(0)'}), '(v[1][i], axis=0)\n', (67926, 67943), True, 'import numpy as np\n'), ((68116, 68182), 'imageio.imwrite', 'imageio.imwrite', (['f"""{path}/{label}_{class_counter[label]}.jpg"""', 'img'], {}), "(f'{path}/{label}_{class_counter[label]}.jpg', img)\n", (68131, 68182), False, 'import imageio\n'), ((70459, 70500), 'numpy.round', 'np.round', (['(self.X_train.shape[0] * portion)'], {}), '(self.X_train.shape[0] * portion)\n', (70467, 70500), True, 'import numpy as np\n'), ((81124, 81193), 'numpy.random.choice', 'np.random.choice', (["indices[c]['imgs']"], {'size': 'max_vt_size', 'replace': '(False)'}), "(indices[c]['imgs'], size=max_vt_size, replace=False)\n", (81140, 81193), True, 'import numpy as np\n'), ((82578, 82645), 'numpy.random.choice', 'np.random.choice', (["indices[c]['imgs']"], {'size': 'passes[1]', 'replace': '(False)'}), "(indices[c]['imgs'], size=passes[1], replace=False)\n", (82594, 82645), True, 'import numpy as np\n'), ((11995, 12012), 'os.path.exists', 'os.path.exists', (['i'], {}), '(i)\n', (12009, 12012), False, 'import os\n'), ((12038, 12049), 'os.mkdir', 'os.mkdir', (['i'], {}), '(i)\n', (12046, 12049), False, 'import os\n'), ((12125, 12152), 'os.path.exists', 'os.path.exists', (["(r + '/' + i)"], {}), "(r + '/' + i)\n", (12139, 12152), False, 'import os\n'), ((12178, 12199), 'os.mkdir', 'os.mkdir', (["(r + '/' + i)"], {}), "(r + '/' + i)\n", (12186, 12199), False, 'import os\n'), ((17321, 17382), 'cv2.resize', 'cv2.resize', (['img', '(upper, upper)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (upper, upper), interpolation=cv2.INTER_AREA)\n', (17331, 17382), False, 'import cv2\n'), ((39690, 39720), 'numpy.sum', 'np.sum', (["combo_sets['train'][0]"], {}), "(combo_sets['train'][0])\n", (39696, 39720), True, 'import numpy as np\n'), ((56788, 56815), 'numpy.sum', 'np.sum', (["subsets['train'][0]"], {}), "(subsets['train'][0])\n", (56794, 56815), True, 'import numpy as np\n'), ((60530, 60551), 'numpy.arange', 'np.arange', (['i.shape[0]'], {}), '(i.shape[0])\n', (60539, 60551), True, 'import numpy as np\n'), ((60670, 60691), 'numpy.arange', 'np.arange', (['i.shape[0]'], {}), '(i.shape[0])\n', (60679, 60691), True, 'import numpy as np\n'), ((64322, 64348), 'numpy.argmax', 'np.argmax', (['v[1][i]'], {'axis': '(0)'}), '(v[1][i], axis=0)\n', (64331, 64348), True, 'import numpy as np\n'), ((73181, 73212), 'numpy.sum', 'np.sum', (["self.shadow['train'][0]"], {}), "(self.shadow['train'][0])\n", (73187, 73212), True, 'import numpy as np\n'), ((82323, 82394), 'numpy.random.choice', 'np.random.choice', (["indices[c]['imgs']"], {'size': 'class_tr_size', 'replace': '(False)'}), "(indices[c]['imgs'], size=class_tr_size, replace=False)\n", (82339, 82394), True, 'import numpy as np\n'), ((86671, 86698), 'numpy.sum', 'np.sum', (["subsets['train'][0]"], {}), "(subsets['train'][0])\n", (86677, 86698), True, 'import numpy as np\n'), ((30030, 30065), 'numpy.load', 'np.load', (['file[1]'], {'allow_pickle': '(True)'}), '(file[1], allow_pickle=True)\n', (30037, 30065), True, 'import numpy as np\n'), ((35883, 35910), 'numpy.min', 'np.min', (['combo_sets[k][0][i]'], {}), '(combo_sets[k][0][i])\n', (35889, 35910), True, 'import numpy as np\n'), ((35949, 35976), 'numpy.max', 'np.max', (['combo_sets[k][0][i]'], {}), '(combo_sets[k][0][i])\n', (35955, 35976), True, 'import numpy as np\n'), ((39863, 39910), 'numpy.sum', 'np.sum', (["((combo_sets['train'][0] - self.mu) ** 2)"], {}), "((combo_sets['train'][0] - self.mu) ** 2)\n", (39869, 39910), True, 'import numpy as np\n'), ((48401, 48428), 'numpy.sum', 'np.sum', (["subsets['train'][0]"], {}), "(subsets['train'][0])\n", (48407, 48428), True, 'import numpy as np\n'), ((56955, 56999), 'numpy.sum', 'np.sum', (["((subsets['train'][0] - self.mu) ** 2)"], {}), "((subsets['train'][0] - self.mu) ** 2)\n", (56961, 56999), True, 'import numpy as np\n'), ((64189, 64211), 'numpy.squeeze', 'np.squeeze', (['v[0][i]', '(2)'], {}), '(v[0][i], 2)\n', (64199, 64211), True, 'import numpy as np\n'), ((73356, 73404), 'numpy.sum', 'np.sum', (["((self.shadow['train'][0] - self.mu) ** 2)"], {}), "((self.shadow['train'][0] - self.mu) ** 2)\n", (73362, 73404), True, 'import numpy as np\n'), ((80060, 80104), 'numpy.argmax', 'np.argmax', (["self.shadow['data'][1][i]"], {'axis': '(0)'}), "(self.shadow['data'][1][i], axis=0)\n", (80069, 80104), True, 'import numpy as np\n'), ((86838, 86882), 'numpy.sum', 'np.sum', (["((subsets['train'][0] - self.mu) ** 2)"], {}), "((subsets['train'][0] - self.mu) ** 2)\n", (86844, 86882), True, 'import numpy as np\n'), ((30438, 30473), 'numpy.load', 'np.load', (['file[1]'], {'allow_pickle': '(True)'}), '(file[1], allow_pickle=True)\n', (30445, 30473), True, 'import numpy as np\n'), ((30903, 30938), 'numpy.load', 'np.load', (['file[1]'], {'allow_pickle': '(True)'}), '(file[1], allow_pickle=True)\n', (30910, 30938), True, 'import numpy as np\n'), ((37026, 37050), 'numpy.clip', 'np.clip', (['raw_img', '(0)', '(255)'], {}), '(raw_img, 0, 255)\n', (37033, 37050), True, 'import numpy as np\n'), ((48552, 48596), 'numpy.sum', 'np.sum', (["((subsets['train'][0] - self.mu) ** 2)"], {}), "((subsets['train'][0] - self.mu) ** 2)\n", (48558, 48596), True, 'import numpy as np\n'), ((54731, 54878), 'sklearn.model_selection.train_test_split', 'train_test_split', (["self.shadow['data'][0]", "self.shadow['data'][1]"], {'test_size': 'split_ratio[1]', 'stratify': "self.shadow['data'][1]", 'random_state': 'seed'}), "(self.shadow['data'][0], self.shadow['data'][1], test_size=\n split_ratio[1], stratify=self.shadow['data'][1], random_state=seed)\n", (54747, 54878), False, 'from sklearn.model_selection import train_test_split\n'), ((55383, 55497), 'sklearn.model_selection.train_test_split', 'train_test_split', (["self.shadow['data'][0]", "self.shadow['data'][1]"], {'test_size': 'split_ratio[1]', 'random_state': 'seed'}), "(self.shadow['data'][0], self.shadow['data'][1], test_size=\n split_ratio[1], random_state=seed)\n", (55399, 55497), False, 'from sklearn.model_selection import train_test_split\n'), ((29729, 29747), 'os.path.join', 'os.path.join', (['r', 'i'], {}), '(r, i)\n', (29741, 29747), False, 'import os\n'), ((31179, 31214), 'numpy.load', 'np.load', (['file[1]'], {'allow_pickle': '(True)'}), '(file[1], allow_pickle=True)\n', (31186, 31214), True, 'import numpy as np\n'), ((36230, 36254), 'numpy.clip', 'np.clip', (['raw_img', '(0)', '(255)'], {}), '(raw_img, 0, 255)\n', (36237, 36254), True, 'import numpy as np\n'), ((31490, 31518), 'h5py.File', 'h5py.File', (['f"""{file[1]}"""', '"""r"""'], {}), "(f'{file[1]}', 'r')\n", (31499, 31518), False, 'import h5py\n'), ((36574, 36598), 'numpy.clip', 'np.clip', (['raw_img', '(0)', '(255)'], {}), '(raw_img, 0, 255)\n', (36581, 36598), True, 'import numpy as np\n'), ((36806, 36830), 'numpy.clip', 'np.clip', (['raw_img', '(0)', '(255)'], {}), '(raw_img, 0, 255)\n', (36813, 36830), True, 'import numpy as np\n'), ((31757, 31785), 'h5py.File', 'h5py.File', (['f"""{file[1]}"""', '"""r"""'], {}), "(f'{file[1]}', 'r')\n", (31766, 31785), False, 'import h5py\n')] |
from couplib.myreportservice import *
from couplib.constants import *
from configuration import *
from math import *
import numpy as np
#-------------------------------------------------------------------------------
class AtomInterface():
"""Interface class for the ionfromation about atoms (primarely read from PDB file)"""
def __init__(self, x, y, z, ElSym, AtomPDBID = ATOM_PDB_ID_NONE, AtomName = ATOM_PDB_NAME_NONE, AltLoc = ATOM_PDB_ALT_LOC_NONE, ResID = ATOM_PDB_RES_ID_NONE, ResName=ATOM_PDB_RES_NAME_NONE, ChainID=ATOM_PDB_CHAIN_ID_NONE):
"""Initialize with the PDB properties"""
#Cartesian coordinates of the atom in Angstroms
self.x = x
self.y = y
self.z = z
self.ElSym = ElSym #Element symbol from the PDB file
self.AtomPDBID = AtomPDBID #Atom PDB ID (unique)
self.AtomName = AtomName #Atom PDB name
self.AltLoc = AltLoc #Alternate location indicator
self.ResID = ResID # Atom PDB residue ID (same for all atoms of given fragment)
self.ResName = ResName #Atom PDB residue name (same for all atoms of given fragment)
self.ChainID = ChainID
return
def MyPrint(self, ID = -1, ALabelLen = STR_LEN_ALABEL, Round_XYZ = INT_ROUND, XYZ_Len = STR_LEN_FLOAT):
"""Print atom"""
if (ID == -1 ):
ID = self.AtomPDBID
print("{} {} {} {}".format( str(self.ElSym+str(ID)+str(self.AltLoc)).ljust(ALabelLen), str(round(self.x,Round_XYZ)).ljust(XYZ_Len),
str(round(self.y,Round_XYZ)).ljust(XYZ_Len), str(round(self.z,Round_XYZ)).ljust(XYZ_Len)))
#-------------------------------------------------------------------------------
class OriginInterface():
"""Interface class for the information about origins of fragments"""
def __init__(self, x, y, z):
#Cartesian coordinates of the atom in Angstroms
self.x = x
self.y = y
self.z = z
return
def GetNPArray(self):
"""Return NumPy Array"""
return np.asarray([self.x, self.y, self.z])
def MyPrint(self):
"""Print origin"""
print("({} {} {})".format(str(round(self.x,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.y,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.z,INT_ROUND)).ljust(STR_LEN_FLOAT)))
return
#-------------------------------------------------------------------------------
class QDVibInterface():
"""Interface class for the information about quamtum dynamics paramerters of fragments"""
def __init__(self, ExStID, VibModeID, VibModeID_Internal, Vib_cm1, ElVibCoupl_cm1, Vib_Decay_ps1):
self.ExStID = ExStID
self.VibModeID = VibModeID
self.VibModeID_Internal = VibModeID_Internal
self.Vib_cm1 = Vib_cm1
self.ElVibCoupl_cm1 = ElVibCoupl_cm1
self.Vib_Decay_ps1 = Vib_Decay_ps1
return
def MyPrint(self):
"""Print quantum dynamics parameters"""
print("{} {} {} {} {}".format(str(self.ExStID).ljust(STR_LEN_FLOAT),
str(self.VibModeID).ljust(STR_LEN_FLOAT),
str(round(self.Vib_cm1,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.ElVibCoupl_cm1,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.Vib_Decay_ps1,INT_ROUND)).ljust(STR_LEN_FLOAT)))
return
#-------------------------------------------------------------------------------
class ExciteStateInterface():
"""Interface class for the excitate state information"""
def __init__(self, ExStID, Abs_cm1, x, y, z, Ems_cm1, El_Deph_Rate_ps1, Epsilon_M1cm1,Phi_D,FlLifetime_s, FlLifetime_sb_s):
"""Initialize with the excited state properties"""
self.ExStID = ExStID #Excited state id (1-based index, read from an external file)
self.Abs_cm1 = Abs_cm1 #Absorption maximum, cm-1
if (Abs_cm1 != 0 ):
self.Abs_nm = CM1_NM/Abs_cm1 #Absorption maximum, nm
else:
self.Abs_nm = 0.0
#Transition dipole moment components (x,y,z) a.u.
self.x = x
self.y = y
self.z = z
self.Ems_cm1 = Ems_cm1 #Emission maximum, cm-1
if (Ems_cm1 != 0 ):
self.Ems_nm = CM1_NM/Ems_cm1 #Emission maximum, nm
else:
self.Ems_nm = 0.0
self.El_Deph_Rate_ps1 = El_Deph_Rate_ps1
self.Epsilon_M1cm1 = Epsilon_M1cm1 #Exctinction coefficient
self.Phi_D = Phi_D #Fluorescence quantum yield
self.FlLifetime_s = FlLifetime_s #fluorescnce lifetime from input in s
self.FlLifetime_sb_s = FlLifetime_sb_s #Strickler-Berg fluorescnce lifetime in s
self.Abs_Spec = [] #Absorption spectrum from input
self.Ems_Spec = [] #Emission spectrum from input
self.Abs_Spec_nm = [] #Absorption spectrum from input
self.Ems_Spec_nm = [] #Emission spectrum from input
self.Abs_Int_Lim_Low_nm = 0.0 #Lower integration limit of absorption spec. in nm
self.Abs_Int_Lim_Up_nm = 0.0 #Upper integration limit of absorption spec. in nm
self.Ems_Int_Lim_Low_nm = 0.0 #Lower integration limit of emission spec. in nm
self.Ems_Int_Lim_Up_nm = 0.0 #Upper integration limit of emission spec. in nm
self.warning = "Warning: zero transition dipole moment!"
return
#-------------------------------------------------------------------------------
def MyPrint(self,JobType=""):
"""Print excited state properites (does not print quantum dynamics parameters e.g. el. dephasing rate)"""
Norm = sqrt(self.x**2+self.y**2+self.z**2)
NormD = Norm*TDM_auToDebye
Format_CM1 = 2
Format_nm = 1
INTX_LEN = 5
#Special print if calculation of lifetimes only
if ( JobType == CFG_MET_ARX_LFT):
print("{}\t{}\t{}\t{}\t{}".format(
str(self.ExStID).ljust(INTX_LEN),
str(round(self.Abs_nm,Format_nm)).ljust(STR_LEN_FLOAT),
str(round(self.Ems_nm,Format_nm)).ljust(STR_LEN_FLOAT),
str(round(self.Epsilon_M1cm1,Format_CM1)).ljust(STR_LEN_FLOAT),
str(round(self.Phi_D,Format_CM1)).ljust(STR_LEN_FLOAT)))
else:
print("{} {} ({} {} {}) {} {} {}".format( str(self.ExStID).ljust(INTX_LEN),
str(round(self.Abs_nm,Format_nm)).ljust(STR_LEN_FLOAT),
str(round(self.x,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.y,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.z,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(Norm,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(NormD,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.Ems_nm,Format_nm)).ljust(STR_LEN_FLOAT)), end=' ')
if ( Norm == 0.0):
print(self.warning)
else:
print()
return
def MyTDMPrint(self):
"""Print excited state transition dipole momment"""
Norm = sqrt(self.x**2+self.y**2+self.z**2)
print("{} ({} {} {}) {}".format( str(self.ExStID).ljust(3),
str(round(self.x,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.y,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.z,INT_ROUND)).ljust(STR_LEN_FLOAT), round(Norm,INT_ROUND)), end=' ')
return
#-------------------------------------------------------------------------------
class CouplingInterface():
"""Interface class for the excitate state information"""
def __init__(self, State1 = 0, State2 = 0, R = 0, AMuAMuD = 0, OriFact = 0, OriPercent = 0, K = 0, Coupl = 0, ElScreening = 0, ScreenedCoupl = 0):
"""Initialize with the coupling properties"""
#Excited states under consideration
self.State1 = State1
self.State2 = State2
self.R = R #interfragment distnce angstrom, A
self.OriFact = OriFact # Orientation factor (-2...2) unitless
self.OriPercent = OriPercent # Normalized orientation percent
self.AMuAMuD = AMuAMuD # Product of absolute values of transtion dipole moments in a.u.^2
self.K = K # Distance independent factor (Product of absolute values of transtion dipole moments in a.u.^2 and Orientation factor)
self.Coupl = Coupl #Forster coupling in Hartrees
self.ElScreening = ElScreening #Electrostatic screening
self.ScreenedCoupl = ScreenedCoupl #Screened coupilng in Hartrees
return
#-------------------------------------------------------------------------------
class ResonanceInterface():
"""Interface class for the information about detected resonances (matching excitation energies of fragments) """
def __init__(self, Ecm1_1, Ecm1_2, Diff, Overlap, Flag):
"""Initialize with the resonance properties"""
#Excitation energies of states under consideration in cm-1
self.Ecm1_1 = Ecm1_1
self.Ecm1_2 = Ecm1_2
self.Diff = Diff #Difference of excitation energies in cm-1
self.Flag = Flag # true=resonance, false:=no resonance
return
#-------------------------------------------------------------------------------
class OverlapInterface():
"""Interface class for the information about overlap of spectra """
def __init__(self, Overlap_M1cm1nm4 = 0.0, Flag = True):
"""Initialize with overlaps"""
self.Overlap_M1cm1nm4 = Overlap_M1cm1nm4 #Spectral overlap
self.Flag = Flag #true = overlaps are available
return
#-------------------------------------------------------------------------------
class RateInterface():
"""Interface class for Forster rates """
def __init__(self, Rate_s1 = 0.0, BoltzmannFactor = 0.0, AlphaCorrection=0.0):
"""Initialize with rates"""
self.Rate_s1 = Rate_s1 #Rate in s-1
self.BoltzmannFactor = BoltzmannFactor
self.AlphaCorrection = AlphaCorrection
return
| [
"numpy.asarray"
] | [((1870, 1906), 'numpy.asarray', 'np.asarray', (['[self.x, self.y, self.z]'], {}), '([self.x, self.y, self.z])\n', (1880, 1906), True, 'import numpy as np\n')] |
import datajoint as dj
import numpy as np
from . import get_schema_name
schema = dj.schema(get_schema_name('lab'))
@schema
class Person(dj.Manual):
definition = """
username : varchar(24)
----
fullname : varchar(255)
"""
@schema
class Rig(dj.Manual):
definition = """
rig : varchar(24)
---
room : varchar(20) # example 2w.342
rig_description : varchar(1024)
"""
@schema
class AnimalStrain(dj.Lookup):
definition = """
animal_strain : varchar(30)
"""
contents = zip(['pl56', 'kj18'])
@schema
class AnimalSource(dj.Lookup):
definition = """
animal_source : varchar(30)
"""
contents = zip(['Jackson Labs', 'Allen Institute', 'Charles River', 'MMRRC', 'Taconic', 'Other'])
@schema
class ModifiedGene(dj.Manual):
definition = """
gene_modification : varchar(60)
---
gene_modification_description = '' : varchar(256)
"""
@schema
class Subject(dj.Manual):
definition = """
subject_id : int # institution 6 digit animal ID
---
-> [nullable] Person # person responsible for the animal
cage_number : int # institution 6 digit animal ID
date_of_birth : date # format: yyyy-mm-dd
sex : enum('M','F','Unknown')
-> [nullable] AnimalSource # where was the animal ordered from
"""
class Strain(dj.Part):
definition = """
# Subject strains
-> master
-> AnimalStrain
"""
class GeneModification(dj.Part):
definition = """
# Subject gene modifications
-> Subject
-> ModifiedGene
---
zygosity = 'Unknown' : enum('Het', 'Hom', 'Unknown')
type = 'Unknown' : enum('Knock-in', 'Transgene', 'Unknown')
"""
@schema
class CompleteGenotype(dj.Computed):
# should be computed
definition = """
-> Subject
---
complete_genotype : varchar(1000)
"""
def make(self, key):
pass
@schema
class WaterRestriction(dj.Manual):
definition = """
-> Subject
---
water_restriction_number : varchar(16) # WR number
cage_number : int
wr_start_date : date
wr_start_weight : Decimal(6,3)
"""
@schema
class VirusSource(dj.Lookup):
definition = """
virus_source : varchar(60)
"""
contents = zip(['Janelia', 'UPenn', 'Addgene', 'UNC', 'Other'])
@schema
class Serotype(dj.Manual):
definition = """
serotype : varchar(60)
"""
@schema
class Virus(dj.Manual):
definition = """
virus_id : int unsigned
---
-> VirusSource
-> Serotype
-> Person
virus_name : varchar(256)
titer : Decimal(20,1) #
order_date : date
remarks : varchar(256)
"""
class Notes(dj.Part):
definition = """
# Notes for virus
-> Virus
note_id : int
---
note : varchar(256)
"""
@schema
class SkullReference(dj.Lookup):
definition = """
skull_reference : varchar(60)
"""
contents = zip(['Bregma', 'Lambda'])
@schema
class BrainArea(dj.Lookup):
definition = """
brain_area: varchar(32)
---
description = null : varchar (4000) # name of the brain area (lab terms, not necessarily in AIBS)
"""
contents = [('ALM', 'anterior lateral motor cortex'),
('vS1', 'vibrissal primary somatosensory cortex ("barrel cortex")'),
('Thalamus', 'Thalamus'), ('Medulla', 'Medulla'),
('Striatum', 'Striatum'), ('Midbrain', 'Midbrain')]
@schema
class Hemisphere(dj.Lookup):
definition = """
hemisphere: varchar(32)
"""
contents = zip(['left', 'right', 'both'])
@schema
class Surgery(dj.Manual):
definition = """
-> Subject
surgery_id : int # surgery number
---
-> Person
start_time : datetime # start time
end_time : datetime # end time
surgery_description : varchar(256)
"""
class VirusInjection(dj.Part):
definition = """
# Virus injections
-> master
injection_id : int
---
-> Virus
-> SkullReference
ap_location : Decimal(8,3) # um from ref anterior is positive
ml_location : Decimal(8,3) # um from ref right is positive
dv_location : Decimal(8,3) # um from dura dorsal is positive
volume : Decimal(10,3) # in nl
dilution : Decimal (10, 2) # 1 to how much
description : varchar(256)
"""
class Procedure(dj.Part):
definition = """
# Other things you did to the animal
-> master
procedure_id : int
---
-> SkullReference
ap_location=null : Decimal(8,3) # um from ref anterior is positive
ml_location=null : Decimal(8,3) # um from ref right is positive
dv_location=null : Decimal(8,3) # um from dura dorsal is positive
surgery_procedure_description : varchar(1000)
"""
@schema
class SurgeryLocation(dj.Manual):
definition = """
-> Surgery.Procedure
---
-> Hemisphere
-> BrainArea
"""
@schema
class ProbeType(dj.Lookup):
definition = """
probe_type: varchar(32) # e.g. neuropixels_1.0
"""
class Electrode(dj.Part):
definition = """
-> master
electrode: int # electrode index, starts at 0
---
shank: int # shank index, starts at 0, advance left to right
shank_col: int # column index, starts at 0, advance left to right
shank_row: int # row index, starts at 0, advance tip to tail
x_coord=NULL: float # (um) x coordinate of the electrode within the probe, (0, 0) is the tip of the probe
y_coord=NULL: float # (um) y coordinate of the electrode within the probe, (0, 0) is the tip of the probe
z_coord=0: float # (um) z coordinate of the electrode within the probe, (0, 0) is the tip of the probe
"""
@property
def contents(self):
return zip(['silicon_probe', 'tetrode_array',
'neuropixels 1.0 - 3A', 'neuropixels 1.0 - 3B',
'neuropixels 2.0 - SS', 'neuropixels 2.0 - MS'])
@staticmethod
def create_neuropixels_probe(probe_type='neuropixels 1.0 - 3A'):
"""
Create `ProbeType` and `Electrode` for neuropixels probe 1.0 (3A and 3B), 2.0 (SS and MS)
For electrode location, the (0, 0) is the bottom left corner of the probe (ignore the tip portion)
Electrode numbering is 1-indexing
"""
def build_electrodes(site_count, col_spacing, row_spacing, white_spacing, col_count=2,
shank_count=1, shank_spacing=250):
"""
:param site_count: site count per shank
:param col_spacing: (um) horrizontal spacing between sites
:param row_spacing: (um) vertical spacing between columns
:param white_spacing: (um) offset spacing
:param col_count: number of column per shank
:param shank_count: number of shank
:param shank_spacing: spacing between shanks
:return:
"""
row_count = int(site_count / col_count)
x_coords = np.tile([0, 0 + col_spacing], row_count)
x_white_spaces = np.tile([white_spacing, white_spacing, 0, 0], int(row_count / 2))
x_coords = x_coords + x_white_spaces
y_coords = np.repeat(np.arange(row_count) * row_spacing, 2)
shank_cols = np.tile([0, 1], row_count)
shank_rows = np.repeat(range(row_count), 2)
npx_electrodes = []
for shank_no in range(shank_count):
npx_electrodes.extend([{'electrode': (site_count * shank_no) + e_id + 1, # electrode number is 1-based index
'shank': shank_no + 1, # shank number is 1-based index
'shank_col': c_id + 1, # column number is 1-based index
'shank_row': r_id + 1, # row number is 1-based index
'x_coord': x + (shank_no * shank_spacing),
'y_coord': y,
'z_coord': 0} for e_id, (c_id, r_id, x, y) in enumerate(
zip(shank_cols, shank_rows, x_coords, y_coords))])
return npx_electrodes
# ---- 1.0 3A ----
if probe_type == 'neuropixels 1.0 - 3A':
electrodes = build_electrodes(site_count = 960, col_spacing = 32, row_spacing = 20,
white_spacing = 16, col_count = 2)
probe_type = {'probe_type': 'neuropixels 1.0 - 3A'}
with ProbeType.connection.transaction:
ProbeType.insert1(probe_type, skip_duplicates=True)
ProbeType.Electrode.insert([{**probe_type, **e} for e in electrodes], skip_duplicates=True)
# ---- 1.0 3B ----
if probe_type == 'neuropixels 1.0 - 3B':
electrodes = build_electrodes(site_count = 960, col_spacing = 32, row_spacing = 20,
white_spacing = 16, col_count = 2)
probe_type = {'probe_type': 'neuropixels 1.0 - 3B'}
with ProbeType.connection.transaction:
ProbeType.insert1(probe_type, skip_duplicates=True)
ProbeType.Electrode.insert([{**probe_type, **e} for e in electrodes], skip_duplicates=True)
# ---- 2.0 Single shank ----
if probe_type == 'neuropixels 2.0 - SS':
electrodes = build_electrodes(site_count=1280, col_spacing=32, row_spacing=15,
white_spacing=0, col_count=2,
shank_count=1, shank_spacing=250)
probe_type = {'probe_type': 'neuropixels 2.0 - SS'}
with ProbeType.connection.transaction:
ProbeType.insert1(probe_type, skip_duplicates=True)
ProbeType.Electrode.insert([{**probe_type, **e} for e in electrodes], skip_duplicates=True)
# ---- 2.0 Multi shank ----
if probe_type == 'neuropixels 2.0 - MS':
electrodes = build_electrodes(site_count=1280, col_spacing=32, row_spacing=15,
white_spacing=0, col_count=2,
shank_count=4, shank_spacing=250)
probe_type = {'probe_type': 'neuropixels 2.0 - MS'}
with ProbeType.connection.transaction:
ProbeType.insert1(probe_type, skip_duplicates=True)
ProbeType.Electrode.insert([{**probe_type, **e} for e in electrodes], skip_duplicates=True)
@schema
class ElectrodeConfig(dj.Lookup):
definition = """
-> ProbeType
electrode_config_name: varchar(64) # user friendly name
---
electrode_config_hash: varchar(36) # hash of the group and group_member (ensure uniqueness)
unique index (electrode_config_hash)
"""
class ElectrodeGroup(dj.Part):
definition = """
# grouping of electrodes to be clustered together (e.g. a neuropixel electrode config - 384/960)
-> master
electrode_group: int # electrode group
"""
class Electrode(dj.Part):
definition = """
-> master.ElectrodeGroup
-> ProbeType.Electrode
---
is_used: bool # is this channel used for spatial average (ref channels are by default not used)
"""
@schema
class Probe(dj.Lookup):
definition = """ # represent a physical probe
probe: varchar(32) # unique identifier for this model of probe (e.g. part number)
---
-> ProbeType
probe_comment='' : varchar(1000)
"""
@schema
class PhotostimDevice(dj.Lookup):
definition = """
photostim_device : varchar(20)
---
excitation_wavelength : decimal(5,1) # (nm)
photostim_device_description : varchar(255)
"""
contents =[
('LaserGem473', 473, 'Laser (Laser Quantum, Gem 473)'),
('LED470', 470, 'LED (Thor Labs, M470F3 - 470 nm, 17.2 mW (Min) Fiber-Coupled LED)'),
('OBIS470', 473, 'OBIS 473nm LX 50mW Laser System: Fiber Pigtail (Coherent Inc)')]
| [
"numpy.arange",
"numpy.tile"
] | [((7464, 7504), 'numpy.tile', 'np.tile', (['[0, 0 + col_spacing]', 'row_count'], {}), '([0, 0 + col_spacing], row_count)\n', (7471, 7504), True, 'import numpy as np\n'), ((7748, 7774), 'numpy.tile', 'np.tile', (['[0, 1]', 'row_count'], {}), '([0, 1], row_count)\n', (7755, 7774), True, 'import numpy as np\n'), ((7683, 7703), 'numpy.arange', 'np.arange', (['row_count'], {}), '(row_count)\n', (7692, 7703), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.