code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/bin/python3
"""
This scripts simulates the Diffusion on a fine grid (N1) and on a coare grid (N2)
The IC in the coarse grid is the interpolated IC of the fine grid. Then we plot
the Diffusion of the fine grid, the Diffusion on the coarse grid, and the difference between
Dffusion of fine grid interpolated on coarse grid vs Diffusion on coarse grid.
"""
# Discretization fine grid (DNS)
N1 = 1024
# Discretization coarse grid
N2 = 32
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import argparse
sys.path.append('./../../_model/')
from scipy import interpolate
import numpy as np
from Diffusion import *
#------------------------------------------------------------------------------
## set parameters and initialize simulation
L = 2*np.pi
dt = 0.001
tEnd = 5
nu = 0.01
ic = 'box'
dns = Diffusion(L=L, N=N1, dt=dt, nu=nu, tend=tEnd)
dns.IC(case=ic)
print("simulate dns..")
## simulate
dns.simulate()
# convert to physical space
dns.fou2real()
## for plotting
uTruth = dns.uu
tTruth = dns.tt
xTruth = dns.x
#------------------------------------------------------------------------------
## restart
u_restart = dns.uu[0,:].copy()
f_restart = interpolate.interp1d(xTruth, u_restart)
# restart from coarse physical space
subgrid = Diffusion(L=L, N=N2, dt=dt, nu=nu, tend=tEnd)
subgrid.IC(case=ic)
subgrid.setGroundTruth(tTruth, xTruth, uTruth)
# create interpolated IC
xCoarse = subgrid.x
uRestartCoarse = f_restart(xCoarse)
subgrid.IC( u0 = uRestartCoarse)
# continue simulation
print("simulate sgs..")
subgrid.simulate( nsteps=int(tEnd/dt), restart=True )
# convert to physical space
subgrid.fou2real()
# get solution
uCoarse = subgrid.uu
# eval truth on coarse grid
uTruthToCoarse = subgrid.mapGroundTruth()
#------------------------------------------------------------------------------
## plot comparison
print("plotting..")
fig, axs = plt.subplots(1,3)
cs0 = axs[0].contourf(dns.x, dns.tt, uTruth, 50, cmap=plt.get_cmap("seismic"))
cs1 = axs[1].contourf(subgrid.x, subgrid.tt, uCoarse, 50, cmap=plt.get_cmap("seismic"))
diff = np.abs(uCoarse-uTruthToCoarse)
cs2 = axs[2].contourf(subgrid.x, subgrid.tt, diff, 50, cmap=plt.get_cmap("seismic"))
# plt.colorbar(cs0, ax=axs[0])
plt.colorbar(cs1, ax=axs[1])
plt.colorbar(cs2, ax=axs[2])
plt.setp(axs[:], xlabel='$x$')
plt.setp(axs[0], ylabel='$t$')
# for c in cs.collections: c.set_rasterized(True)
axs[1].set_yticklabels([])
axs[2].set_yticklabels([])
fig.savefig('interpolate.png')
| [
"sys.path.append",
"numpy.abs",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.colorbar",
"matplotlib.use",
"scipy.interpolate.interp1d",
"matplotlib.pyplot.subplots"
] | [((462, 483), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (476, 483), False, 'import matplotlib\n'), ((544, 578), 'sys.path.append', 'sys.path.append', (['"""./../../_model/"""'], {}), "('./../../_model/')\n", (559, 578), False, 'import sys\n'), ((1202, 1241), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['xTruth', 'u_restart'], {}), '(xTruth, u_restart)\n', (1222, 1241), False, 'from scipy import interpolate\n'), ((1906, 1924), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (1918, 1924), True, 'import matplotlib.pyplot as plt\n'), ((2098, 2130), 'numpy.abs', 'np.abs', (['(uCoarse - uTruthToCoarse)'], {}), '(uCoarse - uTruthToCoarse)\n', (2104, 2130), True, 'import numpy as np\n'), ((2246, 2274), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cs1'], {'ax': 'axs[1]'}), '(cs1, ax=axs[1])\n', (2258, 2274), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2303), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cs2'], {'ax': 'axs[2]'}), '(cs2, ax=axs[2])\n', (2287, 2303), True, 'import matplotlib.pyplot as plt\n'), ((2304, 2334), 'matplotlib.pyplot.setp', 'plt.setp', (['axs[:]'], {'xlabel': '"""$x$"""'}), "(axs[:], xlabel='$x$')\n", (2312, 2334), True, 'import matplotlib.pyplot as plt\n'), ((2335, 2365), 'matplotlib.pyplot.setp', 'plt.setp', (['axs[0]'], {'ylabel': '"""$t$"""'}), "(axs[0], ylabel='$t$')\n", (2343, 2365), True, 'import matplotlib.pyplot as plt\n'), ((1978, 2001), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""seismic"""'], {}), "('seismic')\n", (1990, 2001), True, 'import matplotlib.pyplot as plt\n'), ((2066, 2089), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""seismic"""'], {}), "('seismic')\n", (2078, 2089), True, 'import matplotlib.pyplot as plt\n'), ((2189, 2212), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""seismic"""'], {}), "('seismic')\n", (2201, 2212), True, 'import matplotlib.pyplot as plt\n')] |
# from here: https://github.com/Sentdex/cyberpython2077/blob/main/grabscreen.py
# Done by Frannecklp
import cv2
import numpy as np
import win32gui, win32ui, win32con, win32api
def grab_screen(region=None):
hwin = win32gui.GetDesktopWindow()
if region:
left, top, x2, y2 = region
width = x2 - left
height = y2 - top
else:
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
hwindc = win32gui.GetWindowDC(hwin)
srcdc = win32ui.CreateDCFromHandle(hwindc)
memdc = srcdc.CreateCompatibleDC()
bmp = win32ui.CreateBitmap()
bmp.CreateCompatibleBitmap(srcdc, width, height)
memdc.SelectObject(bmp)
memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)
signedIntsArray = bmp.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (height, width, 4)
srcdc.DeleteDC()
memdc.DeleteDC()
win32gui.ReleaseDC(hwin, hwindc)
win32gui.DeleteObject(bmp.GetHandle())
return cv2.cvtColor(img, cv2.COLOR_BGRA2RGB) | [
"win32gui.GetDesktopWindow",
"win32gui.ReleaseDC",
"cv2.cvtColor",
"win32gui.GetWindowDC",
"win32ui.CreateBitmap",
"win32ui.CreateDCFromHandle",
"numpy.fromstring",
"win32api.GetSystemMetrics"
] | [((220, 247), 'win32gui.GetDesktopWindow', 'win32gui.GetDesktopWindow', ([], {}), '()\n', (245, 247), False, 'import win32gui, win32ui, win32con, win32api\n'), ((655, 681), 'win32gui.GetWindowDC', 'win32gui.GetWindowDC', (['hwin'], {}), '(hwin)\n', (675, 681), False, 'import win32gui, win32ui, win32con, win32api\n'), ((694, 728), 'win32ui.CreateDCFromHandle', 'win32ui.CreateDCFromHandle', (['hwindc'], {}), '(hwindc)\n', (720, 728), False, 'import win32gui, win32ui, win32con, win32api\n'), ((778, 800), 'win32ui.CreateBitmap', 'win32ui.CreateBitmap', ([], {}), '()\n', (798, 800), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1019, 1064), 'numpy.fromstring', 'np.fromstring', (['signedIntsArray'], {'dtype': '"""uint8"""'}), "(signedIntsArray, dtype='uint8')\n", (1032, 1064), True, 'import numpy as np\n'), ((1147, 1179), 'win32gui.ReleaseDC', 'win32gui.ReleaseDC', (['hwin', 'hwindc'], {}), '(hwin, hwindc)\n', (1165, 1179), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1235, 1272), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGRA2RGB'], {}), '(img, cv2.COLOR_BGRA2RGB)\n', (1247, 1272), False, 'import cv2\n'), ((377, 431), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['win32con.SM_CXVIRTUALSCREEN'], {}), '(win32con.SM_CXVIRTUALSCREEN)\n', (402, 431), False, 'import win32gui, win32ui, win32con, win32api\n'), ((449, 503), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['win32con.SM_CYVIRTUALSCREEN'], {}), '(win32con.SM_CYVIRTUALSCREEN)\n', (474, 503), False, 'import win32gui, win32ui, win32con, win32api\n'), ((519, 572), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['win32con.SM_XVIRTUALSCREEN'], {}), '(win32con.SM_XVIRTUALSCREEN)\n', (544, 572), False, 'import win32gui, win32ui, win32con, win32api\n'), ((587, 640), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['win32con.SM_YVIRTUALSCREEN'], {}), '(win32con.SM_YVIRTUALSCREEN)\n', (612, 640), False, 'import win32gui, win32ui, win32con, win32api\n')] |
import argparse
import collections
import numpy as np
import utils
from newsroom.analyze.rouge import ROUGE_L, ROUGE_N
def print_mean(results, rouge_types):
for rouge_type in rouge_types:
precs = results[rouge_type]['p']
recalls = results[rouge_type]['r']
fscores = results[rouge_type]['f']
p = round(np.mean(precs), 3)
r = round(np.mean(recalls), 3)
f = round(np.mean(fscores), 3)
print(rouge_type, 'p:', p, 'r:', r, 'f:', f)
def evaluate(ref_summaries, pred_summaries, lowercase=False):
rouge_types = ['rouge-1', 'rouge-2', 'rouge-l']
results = dict((rouge_type, collections.defaultdict(list))
for rouge_type in rouge_types)
for ref, pred in zip(ref_summaries, pred_summaries):
if lowercase:
pred = pred.lower()
ref = ref.lower()
r1 = ROUGE_N(ref, pred, n=1)
r2 = ROUGE_N(ref, pred, n=2)
rl = ROUGE_L(ref, pred)
for (rouge_type, scores) in zip(rouge_types, [r1, r2, rl]):
results[rouge_type]['p'].append(scores.precision)
results[rouge_type]['r'].append(scores.recall)
results[rouge_type]['f'].append(scores.fscore)
mean_results = {}
for rouge_type in rouge_types:
precs = results[rouge_type]['p']
recalls = results[rouge_type]['r']
fscores = results[rouge_type]['f']
mean_results[rouge_type] = {
'p': round(np.mean(precs), 3),
'r': round(np.mean(recalls), 3),
'f': round(np.mean(fscores), 3)
}
return mean_results
def evaluate_from_path(dataset_path, pred_path, start, stop, lowercase=False):
dataset = utils.read_jsonl(dataset_path)
predictions = utils.read_jsonl(pred_path)
rouge_types = ['rouge-1', 'rouge-2', 'rouge-l']
results = dict((rouge_type, collections.defaultdict(list))
for rouge_type in rouge_types)
for i, cluster in enumerate(dataset):
if start > -1 and i < start:
continue
if stop > -1 and i >= stop:
break
prediction = next(predictions)
assert prediction['cluster_id'] == cluster['id']
hyp = prediction['summary']
ref = cluster['summary']
if lowercase:
hyp = hyp.lower()
ref = ref.lower()
r1 = ROUGE_N(ref, hyp, n=1)
r2 = ROUGE_N(ref, hyp, n=2)
rl = ROUGE_L(ref, hyp)
for (rouge_type, scores) in zip(rouge_types, [r1, r2, rl]):
results[rouge_type]['p'].append(scores.precision)
results[rouge_type]['r'].append(scores.recall)
results[rouge_type]['f'].append(scores.fscore)
if i % 100 == 0:
print(i)
# print_mean(results, rouge_types)
print('Final Average:')
print_mean(results, rouge_types)
return results
def main(args):
results = evaluate(args.dataset, args.preds, args.start, args.stop,
args.lowercase)
utils.write_json(results, args.o)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset')
parser.add_argument('--preds')
parser.add_argument('--o')
parser.add_argument('--start', type=int, default=-1)
parser.add_argument('--stop', type=int, default=-1)
parser.add_argument('--lowercase', action='store_true')
main(parser.parse_args())
| [
"argparse.ArgumentParser",
"newsroom.analyze.rouge.ROUGE_L",
"collections.defaultdict",
"numpy.mean",
"newsroom.analyze.rouge.ROUGE_N",
"utils.write_json",
"utils.read_jsonl"
] | [((1704, 1734), 'utils.read_jsonl', 'utils.read_jsonl', (['dataset_path'], {}), '(dataset_path)\n', (1720, 1734), False, 'import utils\n'), ((1753, 1780), 'utils.read_jsonl', 'utils.read_jsonl', (['pred_path'], {}), '(pred_path)\n', (1769, 1780), False, 'import utils\n'), ((3017, 3050), 'utils.write_json', 'utils.write_json', (['results', 'args.o'], {}), '(results, args.o)\n', (3033, 3050), False, 'import utils\n'), ((3093, 3118), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3116, 3118), False, 'import argparse\n'), ((876, 899), 'newsroom.analyze.rouge.ROUGE_N', 'ROUGE_N', (['ref', 'pred'], {'n': '(1)'}), '(ref, pred, n=1)\n', (883, 899), False, 'from newsroom.analyze.rouge import ROUGE_L, ROUGE_N\n'), ((913, 936), 'newsroom.analyze.rouge.ROUGE_N', 'ROUGE_N', (['ref', 'pred'], {'n': '(2)'}), '(ref, pred, n=2)\n', (920, 936), False, 'from newsroom.analyze.rouge import ROUGE_L, ROUGE_N\n'), ((950, 968), 'newsroom.analyze.rouge.ROUGE_L', 'ROUGE_L', (['ref', 'pred'], {}), '(ref, pred)\n', (957, 968), False, 'from newsroom.analyze.rouge import ROUGE_L, ROUGE_N\n'), ((2366, 2388), 'newsroom.analyze.rouge.ROUGE_N', 'ROUGE_N', (['ref', 'hyp'], {'n': '(1)'}), '(ref, hyp, n=1)\n', (2373, 2388), False, 'from newsroom.analyze.rouge import ROUGE_L, ROUGE_N\n'), ((2402, 2424), 'newsroom.analyze.rouge.ROUGE_N', 'ROUGE_N', (['ref', 'hyp'], {'n': '(2)'}), '(ref, hyp, n=2)\n', (2409, 2424), False, 'from newsroom.analyze.rouge import ROUGE_L, ROUGE_N\n'), ((2438, 2455), 'newsroom.analyze.rouge.ROUGE_L', 'ROUGE_L', (['ref', 'hyp'], {}), '(ref, hyp)\n', (2445, 2455), False, 'from newsroom.analyze.rouge import ROUGE_L, ROUGE_N\n'), ((339, 353), 'numpy.mean', 'np.mean', (['precs'], {}), '(precs)\n', (346, 353), True, 'import numpy as np\n'), ((376, 392), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (383, 392), True, 'import numpy as np\n'), ((415, 431), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (422, 431), True, 'import numpy as np\n'), ((638, 667), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (661, 667), False, 'import collections\n'), ((1464, 1478), 'numpy.mean', 'np.mean', (['precs'], {}), '(precs)\n', (1471, 1478), True, 'import numpy as np\n'), ((1507, 1523), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (1514, 1523), True, 'import numpy as np\n'), ((1552, 1568), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (1559, 1568), True, 'import numpy as np\n'), ((1866, 1895), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1889, 1895), False, 'import collections\n')] |
import numpy
import torch
import torch.nn.functional as F
from babyai.rl.algos.base import BaseAlgo
class PPOAlgo(BaseAlgo):
"""The class for the Proximal Policy Optimization algorithm
([Schulman et al., 2015](https://arxiv.org/abs/1707.06347))."""
def __init__(self, envs, acmodel, num_frames_per_proc=None, discount=0.99, lr=7e-4, beta1=0.9, beta2=0.999,
gae_lambda=0.95,
entropy_coef=0.01, value_loss_coef=0.5, max_grad_norm=0.5, recurrence=4,
adam_eps=1e-5, clip_eps=0.2, epochs=4, batch_size=256, preprocess_obss=None,
reshape_reward=None, aux_info=None, reward_fn='babyai'):
num_frames_per_proc = num_frames_per_proc or 128
super().__init__(envs, acmodel, num_frames_per_proc, discount, lr, gae_lambda, entropy_coef,
value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward,
aux_info, reward_fn)
self.clip_eps = clip_eps
self.epochs = epochs
self.batch_size = batch_size
assert self.batch_size % self.recurrence == 0
self.optimizer = torch.optim.Adam(self.acmodel.parameters(), lr, (beta1, beta2), eps=adam_eps)
self.batch_num = 0
def update_parameters(self):
# Collect experiences
exps, logs = self.collect_experiences()
'''
exps is a DictList with the following keys ['obs', 'memory', 'mask', 'action', 'value', 'reward',
'advantage', 'returnn', 'log_prob'] and ['collected_info', 'extra_predictions'] if we use aux_info
exps.obs is a DictList with the following keys ['image', 'instr']
exps.obj.image is a (n_procs * n_frames_per_proc) x image_size 4D tensor
exps.obs.instr is a (n_procs * n_frames_per_proc) x (max number of words in an instruction) 2D tensor
exps.memory is a (n_procs * n_frames_per_proc) x (memory_size = 2*image_embedding_size) 2D tensor
exps.mask is (n_procs * n_frames_per_proc) x 1 2D tensor
if we use aux_info: exps.collected_info and exps.extra_predictions are DictLists with keys
being the added information. They are either (n_procs * n_frames_per_proc) 1D tensors or
(n_procs * n_frames_per_proc) x k 2D tensors where k is the number of classes for multiclass classification
'''
for _ in range(self.epochs):
# Initialize log values
log_entropies = []
log_values = []
log_policy_losses = []
log_value_losses = []
log_grad_norms = []
log_losses = []
'''
For each epoch, we create int(total_frames / batch_size + 1) batches, each of size batch_size (except
maybe the last one. Each batch is divided into sub-batches of size recurrence (frames are contiguous in
a sub-batch), but the position of each sub-batch in a batch and the position of each batch in the whole
list of frames is random thanks to self._get_batches_starting_indexes().
'''
for inds in self._get_batches_starting_indexes():
# inds is a numpy array of indices that correspond to the beginning of a sub-batch
# there are as many inds as there are batches
# Initialize batch values
batch_entropy = 0
batch_value = 0
batch_policy_loss = 0
batch_value_loss = 0
batch_loss = 0
# Initialize memory
memory = exps.memory[inds]
for i in range(self.recurrence):
# Create a sub-batch of experience
sb = exps[inds + i]
# Compute loss
model_results = self.acmodel(sb.obs, memory * sb.mask)
dist = model_results['dist']
value = model_results['value']
memory = model_results['memory']
extra_predictions = model_results['extra_predictions']
entropy = dist.entropy().mean()
ratio = torch.exp(dist.log_prob(sb.action) - sb.log_prob)
surr1 = ratio * sb.advantage
surr2 = torch.clamp(ratio, 1.0 - self.clip_eps, 1.0 + self.clip_eps) * sb.advantage
policy_loss = -torch.min(surr1, surr2).mean()
value_clipped = sb.value + torch.clamp(value - sb.value, -self.clip_eps, self.clip_eps)
surr1 = (value - sb.returnn).pow(2)
surr2 = (value_clipped - sb.returnn).pow(2)
value_loss = torch.max(surr1, surr2).mean()
loss = policy_loss - self.entropy_coef * entropy + self.value_loss_coef * value_loss
# Update batch values
batch_entropy += entropy.item()
batch_value += value.mean().item()
batch_policy_loss += policy_loss.item()
batch_value_loss += value_loss.item()
batch_loss += loss
# Update memories for next epoch
if i < self.recurrence - 1:
exps.memory[inds + i + 1] = memory.detach()
# Update batch values
batch_entropy /= self.recurrence
batch_value /= self.recurrence
batch_policy_loss /= self.recurrence
batch_value_loss /= self.recurrence
batch_loss /= self.recurrence
# Update actor-critic
self.optimizer.zero_grad()
batch_loss.backward()
grad_norm = sum(p.grad.data.norm(2) ** 2 for p in self.acmodel.parameters() if p.grad is not None) ** 0.5
torch.nn.utils.clip_grad_norm_(self.acmodel.parameters(), self.max_grad_norm)
self.optimizer.step()
# Update log values
log_entropies.append(batch_entropy)
log_values.append(batch_value)
log_policy_losses.append(batch_policy_loss)
log_value_losses.append(batch_value_loss)
log_grad_norms.append(grad_norm.item())
log_losses.append(batch_loss.item())
# Log some values
logs["entropy"] = numpy.mean(log_entropies)
logs["value"] = numpy.mean(log_values)
logs["policy_loss"] = numpy.mean(log_policy_losses)
logs["value_loss"] = numpy.mean(log_value_losses)
logs["grad_norm"] = numpy.mean(log_grad_norms)
logs["loss"] = numpy.mean(log_losses)
return logs
def _get_batches_starting_indexes(self):
"""Gives, for each batch, the indexes of the observations given to
the model and the experiences used to compute the loss at first.
Returns
-------
batches_starting_indexes : list of list of int
the indexes of the experiences to be used at first for each batch
"""
indexes = numpy.arange(0, self.num_frames, self.recurrence)
indexes = numpy.random.permutation(indexes)
num_indexes = self.batch_size // self.recurrence
batches_starting_indexes = [indexes[i:i + num_indexes] for i in range(0, len(indexes), num_indexes)]
return batches_starting_indexes
| [
"numpy.mean",
"torch.max",
"numpy.arange",
"torch.clamp",
"numpy.random.permutation",
"torch.min"
] | [((6400, 6425), 'numpy.mean', 'numpy.mean', (['log_entropies'], {}), '(log_entropies)\n', (6410, 6425), False, 'import numpy\n'), ((6450, 6472), 'numpy.mean', 'numpy.mean', (['log_values'], {}), '(log_values)\n', (6460, 6472), False, 'import numpy\n'), ((6503, 6532), 'numpy.mean', 'numpy.mean', (['log_policy_losses'], {}), '(log_policy_losses)\n', (6513, 6532), False, 'import numpy\n'), ((6562, 6590), 'numpy.mean', 'numpy.mean', (['log_value_losses'], {}), '(log_value_losses)\n', (6572, 6590), False, 'import numpy\n'), ((6619, 6645), 'numpy.mean', 'numpy.mean', (['log_grad_norms'], {}), '(log_grad_norms)\n', (6629, 6645), False, 'import numpy\n'), ((6669, 6691), 'numpy.mean', 'numpy.mean', (['log_losses'], {}), '(log_losses)\n', (6679, 6691), False, 'import numpy\n'), ((7104, 7153), 'numpy.arange', 'numpy.arange', (['(0)', 'self.num_frames', 'self.recurrence'], {}), '(0, self.num_frames, self.recurrence)\n', (7116, 7153), False, 'import numpy\n'), ((7172, 7205), 'numpy.random.permutation', 'numpy.random.permutation', (['indexes'], {}), '(indexes)\n', (7196, 7205), False, 'import numpy\n'), ((4300, 4360), 'torch.clamp', 'torch.clamp', (['ratio', '(1.0 - self.clip_eps)', '(1.0 + self.clip_eps)'], {}), '(ratio, 1.0 - self.clip_eps, 1.0 + self.clip_eps)\n', (4311, 4360), False, 'import torch\n'), ((4490, 4550), 'torch.clamp', 'torch.clamp', (['(value - sb.value)', '(-self.clip_eps)', 'self.clip_eps'], {}), '(value - sb.value, -self.clip_eps, self.clip_eps)\n', (4501, 4550), False, 'import torch\n'), ((4704, 4727), 'torch.max', 'torch.max', (['surr1', 'surr2'], {}), '(surr1, surr2)\n', (4713, 4727), False, 'import torch\n'), ((4411, 4434), 'torch.min', 'torch.min', (['surr1', 'surr2'], {}), '(surr1, surr2)\n', (4420, 4434), False, 'import torch\n')] |
"""Test script for single agent problems.
This scripts runs the best model found by one of the executions of `singleagent.py`
Example
-------
To run the script, type in a terminal:
$ python test_singleagent.py --exp ./results/save-<env>-<algo>-<obs>-<act>-<time_date>
"""
import sys
sys.path.append('/home/mullin/WorkSpace/gym-pybullet-drones-master/gym_pybullet_drones/envs/single_agent_rl/FlyToTarget.py')
#from FlyToTarget import FlyToTarget
#import FlyToTarget
import os
import time
from datetime import datetime
import argparse
import re
import numpy as np
import gym
import torch
from stable_baselines3.common.env_checker import check_env
from stable_baselines3 import A2C
from stable_baselines3 import PPO
from stable_baselines3 import SAC
from stable_baselines3 import TD3
from stable_baselines3 import DDPG
from stable_baselines3.common.policies import ActorCriticPolicy as a2cppoMlpPolicy
from stable_baselines3.common.policies import ActorCriticCnnPolicy as a2cppoCnnPolicy
from stable_baselines3.sac.policies import SACPolicy as sacMlpPolicy
from stable_baselines3.sac import CnnPolicy as sacCnnPolicy
from stable_baselines3.td3 import MlpPolicy as td3ddpgMlpPolicy
from stable_baselines3.td3 import CnnPolicy as td3ddpgCnnPolicy
from stable_baselines3.common.evaluation import evaluate_policy
from gym_pybullet_drones.utils.utils import sync
from gym_pybullet_drones.utils.Logger import Logger
from gym_pybullet_drones.envs.single_agent_rl.TakeoffAviary import TakeoffAviary
from gym_pybullet_drones.envs.single_agent_rl.HoverAviary import HoverAviary
from gym_pybullet_drones.envs.single_agent_rl.FlyThruGateAviary import FlyThruGateAviary
from gym_pybullet_drones.envs.single_agent_rl.TuneAviary import TuneAviary
from gym_pybullet_drones.envs.single_agent_rl.FlyToTarget import FlyToTarget
from gym_pybullet_drones.envs.single_agent_rl.BaseSingleAgentAviary import ActionType, ObservationType
import shared_constants
if __name__ == "__main__":
#### Define and parse (optional) arguments for the script ##
parser = argparse.ArgumentParser(description='Single agent reinforcement learning example script using TakeoffAviary')
parser.add_argument('--exp', type=str, help='The experiment folder written as ./results/save-<env>-<algo>-<obs>-<act>-<time_date>', metavar='')
ARGS = parser.parse_args()
#### Load the model from file ##############################
algo = ARGS.exp.split("-")[2]
if os.path.isfile(ARGS.exp+'/success_model.zip'):
path = ARGS.exp+'/success_model.zip'
elif os.path.isfile(ARGS.exp+'/best_model.zip'):
path = ARGS.exp+'/best_model.zip'
else:
print("[ERROR]: no model under the specified path", ARGS.exp)
if algo == 'a2c':
model = A2C.load(path)
if algo == 'ppo':
model = PPO.load(path)
if algo == 'sac':
model = SAC.load(path)
if algo == 'td3':
model = TD3.load(path)
if algo == 'ddpg':
model = DDPG.load(path)
#### Parameters to recreate the environment ################
env_name = ARGS.exp.split("-")[1]+"-aviary-v0"
OBS = ObservationType.KIN if ARGS.exp.split("-")[3] == 'kin' else ObservationType.RGB
if ARGS.exp.split("-")[4] == 'rpm':
ACT = ActionType.RPM
elif ARGS.exp.split("-")[4] == 'dyn':
ACT = ActionType.DYN
elif ARGS.exp.split("-")[4] == 'pid':
ACT = ActionType.PID
elif ARGS.exp.split("-")[4] == 'vel':
ACT = ActionType.VEL
elif ARGS.exp.split("-")[4] == 'tun':
ACT = ActionType.TUN
elif ARGS.exp.split("-")[4] == 'one_d_rpm':
ACT = ActionType.ONE_D_RPM
elif ARGS.exp.split("-")[4] == 'one_d_dyn':
ACT = ActionType.ONE_D_DYN
elif ARGS.exp.split("-")[4] == 'one_d_pid':
ACT = ActionType.ONE_D_PID
#### Evaluate the model ####################################
eval_env = gym.make(env_name,
aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
obs=OBS,
act=ACT
)
mean_reward, std_reward = evaluate_policy(model,
eval_env,
n_eval_episodes=10
)
print("\n\n\nMean reward ", mean_reward, " +- ", std_reward, "\n\n")
#### Show, record a video, and log the model's performance #
test_env = gym.make(env_name,
gui=True,
record=False,
aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
obs=OBS,
act=ACT
)
logger = Logger(logging_freq_hz=int(test_env.SIM_FREQ/test_env.AGGR_PHY_STEPS),
num_drones=1
)
obs = test_env.reset()
start = time.time()
for i in range(6*int(test_env.SIM_FREQ/test_env.AGGR_PHY_STEPS)): # Up to 6''
action, _states = model.predict(obs,
deterministic=True # OPTIONAL 'deterministic=False'
)
obs, reward, done, info = test_env.step(action)
test_env.render()
if OBS==ObservationType.KIN:
logger.log(drone=0,
timestamp=i/test_env.SIM_FREQ,
state= np.hstack([obs[0:3], np.zeros(4), obs[3:15], np.resize(action, (4))]),
control=np.zeros(12)
)
sync(np.floor(i*test_env.AGGR_PHY_STEPS), start, test_env.TIMESTEP)
# if done: obs = test_env.reset() # OPTIONAL EPISODE HALT
test_env.close()
logger.save_as_csv("sa") # Optional CSV save
logger.plot()
# with np.load(ARGS.exp+'/evaluations.npz') as data:
# print(data.files)
# print(data['timesteps'])
# print(data['results'])
# print(data['ep_lengths'])
| [
"sys.path.append",
"stable_baselines3.PPO.load",
"gym.make",
"argparse.ArgumentParser",
"numpy.resize",
"stable_baselines3.common.evaluation.evaluate_policy",
"numpy.floor",
"stable_baselines3.TD3.load",
"numpy.zeros",
"time.time",
"os.path.isfile",
"stable_baselines3.A2C.load",
"stable_base... | [((291, 425), 'sys.path.append', 'sys.path.append', (['"""/home/mullin/WorkSpace/gym-pybullet-drones-master/gym_pybullet_drones/envs/single_agent_rl/FlyToTarget.py"""'], {}), "(\n '/home/mullin/WorkSpace/gym-pybullet-drones-master/gym_pybullet_drones/envs/single_agent_rl/FlyToTarget.py'\n )\n", (306, 425), False, 'import sys\n'), ((2052, 2166), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Single agent reinforcement learning example script using TakeoffAviary"""'}), "(description=\n 'Single agent reinforcement learning example script using TakeoffAviary')\n", (2075, 2166), False, 'import argparse\n'), ((2486, 2533), 'os.path.isfile', 'os.path.isfile', (["(ARGS.exp + '/success_model.zip')"], {}), "(ARGS.exp + '/success_model.zip')\n", (2500, 2533), False, 'import os\n'), ((3910, 4004), 'gym.make', 'gym.make', (['env_name'], {'aggregate_phy_steps': 'shared_constants.AGGR_PHY_STEPS', 'obs': 'OBS', 'act': 'ACT'}), '(env_name, aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS, obs\n =OBS, act=ACT)\n', (3918, 4004), False, 'import gym\n'), ((4127, 4179), 'stable_baselines3.common.evaluation.evaluate_policy', 'evaluate_policy', (['model', 'eval_env'], {'n_eval_episodes': '(10)'}), '(model, eval_env, n_eval_episodes=10)\n', (4142, 4179), False, 'from stable_baselines3.common.evaluation import evaluate_policy\n'), ((4473, 4591), 'gym.make', 'gym.make', (['env_name'], {'gui': '(True)', 'record': '(False)', 'aggregate_phy_steps': 'shared_constants.AGGR_PHY_STEPS', 'obs': 'OBS', 'act': 'ACT'}), '(env_name, gui=True, record=False, aggregate_phy_steps=\n shared_constants.AGGR_PHY_STEPS, obs=OBS, act=ACT)\n', (4481, 4591), False, 'import gym\n'), ((4910, 4921), 'time.time', 'time.time', ([], {}), '()\n', (4919, 4921), False, 'import time\n'), ((2587, 2631), 'os.path.isfile', 'os.path.isfile', (["(ARGS.exp + '/best_model.zip')"], {}), "(ARGS.exp + '/best_model.zip')\n", (2601, 2631), False, 'import os\n'), ((2791, 2805), 'stable_baselines3.A2C.load', 'A2C.load', (['path'], {}), '(path)\n', (2799, 2805), False, 'from stable_baselines3 import A2C\n'), ((2844, 2858), 'stable_baselines3.PPO.load', 'PPO.load', (['path'], {}), '(path)\n', (2852, 2858), False, 'from stable_baselines3 import PPO\n'), ((2897, 2911), 'stable_baselines3.SAC.load', 'SAC.load', (['path'], {}), '(path)\n', (2905, 2911), False, 'from stable_baselines3 import SAC\n'), ((2950, 2964), 'stable_baselines3.TD3.load', 'TD3.load', (['path'], {}), '(path)\n', (2958, 2964), False, 'from stable_baselines3 import TD3\n'), ((3004, 3019), 'stable_baselines3.DDPG.load', 'DDPG.load', (['path'], {}), '(path)\n', (3013, 3019), False, 'from stable_baselines3 import DDPG\n'), ((5572, 5609), 'numpy.floor', 'np.floor', (['(i * test_env.AGGR_PHY_STEPS)'], {}), '(i * test_env.AGGR_PHY_STEPS)\n', (5580, 5609), True, 'import numpy as np\n'), ((5521, 5533), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (5529, 5533), True, 'import numpy as np\n'), ((5439, 5450), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (5447, 5450), True, 'import numpy as np\n'), ((5464, 5484), 'numpy.resize', 'np.resize', (['action', '(4)'], {}), '(action, 4)\n', (5473, 5484), True, 'import numpy as np\n')] |
import sys
import multiprocessing as mp
import numpy as np
import scipy.optimize as op
def get_default_executor():
"""
Provide a default executor (a context manager
returning an object with a map method).
This is the multiprocessing Pool object () for python3.
The multiprocessing Pool in python2 does not have an __enter__
and __exit__ method, this function provides a backport of the python3 Pool
context manager.
Returns
-------
Pool : executor-like object
An object with context manager (__enter__, __exit__) and map method.
"""
if (sys.version_info > (3, 0)):
Pool = mp.Pool
return Pool
else:
from contextlib import contextmanager
from functools import wraps
@wraps(mp.Pool)
@contextmanager
def Pool(*args, **kwargs):
pool = mp.Pool(*args, **kwargs)
yield pool
pool.terminate()
return Pool
def search(f, box, n, m, batch, resfile,
rho0=0.5, p=1.0, nrand=10000, nrand_frac=0.05,
executor=get_default_executor()):
"""
Minimize given expensive black-box function and save results into text file.
Parameters
----------
f : callable
The objective function to be minimized.
box : list of lists
List of ranges for each parameter.
n : int
Number of initial function calls.
m : int
Number of subsequent function calls.
batch : int
Number of function calls evaluated simultaneously (in parallel).
resfile : str
Text file to save results.
rho0 : float, optional
Initial "balls density".
p : float, optional
Rate of "balls density" decay (p=1 - linear, p>1 - faster, 0<p<1 - slower).
nrand : int, optional
Number of random samples that is generated for space rescaling.
nrand_frac : float, optional
Fraction of nrand that is actually used for space rescaling.
executor : callable, optional
Should have a map method and behave as a context manager.
Allows the user to use various parallelisation tools
as dask.distributed or pathos.
"""
# space size
d = len(box)
# adjusting the number of function calls to the batch size
if n % batch != 0:
n = n - n % batch + batch
if m % batch != 0:
m = m - m % batch + batch
# go from normalized values (unit cube) to absolute values (box)
def cubetobox(x):
return [box[i][0]+(box[i][1]-box[i][0])*x[i] for i in range(d)]
# generating latin hypercube
points = np.zeros((n, d+1))
points[:, 0:-1] = latin(n, d)
# initial sampling
for i in range(n//batch):
with executor() as e:
points[batch*i:batch*(i+1), -1] = list(e.map(f, list(map(cubetobox, points[batch*i:batch*(i+1), 0:-1]))))
# normalizing function values
fmax = max(abs(points[:, -1]))
points[:, -1] = points[:, -1]/fmax
# volume of d-dimensional ball (r = 1)
if d % 2 == 0:
v1 = np.pi**(d/2)/np.math.factorial(d/2)
else:
v1 = 2*(4*np.pi)**((d-1)/2)*np.math.factorial((d-1)/2)/np.math.factorial(d)
# subsequent iterations (current subsequent iteration = i*batch+j)
T = np.identity(d)
for i in range(m//batch):
# refining scaling matrix T
if d > 1:
fit_noscale = rbf(points, np.identity(d))
population = np.zeros((nrand, d+1))
population[:, 0:-1] = np.random.rand(nrand, d)
population[:, -1] = list(map(fit_noscale, population[:, 0:-1]))
cloud = population[population[:, -1].argsort()][0:int(nrand*nrand_frac), 0:-1]
eigval, eigvec = np.linalg.eig(np.cov(np.transpose(cloud)))
T = [eigvec[:, j]/np.sqrt(eigval[j]) for j in range(d)]
T = T/np.linalg.norm(T)
# sampling next batch of points
fit = rbf(points, T)
points = np.append(points, np.zeros((batch, d+1)), axis=0)
for j in range(batch):
r = ((rho0*((m-1.-(i*batch+j))/(m-1.))**p)/(v1*(n+i*batch+j)))**(1./d)
cons = [{'type': 'ineq', 'fun': lambda x, localk=k: np.linalg.norm(np.subtract(x, points[localk, 0:-1])) - r}
for k in range(n+i*batch+j)]
while True:
minfit = op.minimize(fit, np.random.rand(d), method='SLSQP', bounds=[[0., 1.]]*d, constraints=cons)
if np.isnan(minfit.x)[0] == False:
break
points[n+i*batch+j, 0:-1] = np.copy(minfit.x)
with executor() as e:
points[n+batch*i:n+batch*(i+1), -1] = list(e.map(f, list(map(cubetobox, points[n+batch*i:n+batch*(i+1), 0:-1]))))/fmax
# saving results into text file
points[:, 0:-1] = list(map(cubetobox, points[:, 0:-1]))
points[:, -1] = points[:, -1]*fmax
points = points[points[:, -1].argsort()]
#Receive sorted list of candidate args + valuation.
return points
#labels = [' par_'+str(i+1)+(7-len(str(i+1)))*' '+',' for i in range(d)]+[' f_value ']
#np.savetxt(resfile, points, delimiter=',', fmt=' %+1.4e', header=''.join(labels), comments='')
def latin(n, d):
"""
Build latin hypercube.
Parameters
----------
n : int
Number of points.
d : int
Size of space.
Returns
-------
lh : ndarray
Array of points uniformly placed in d-dimensional unit cube.
"""
# spread function
def spread(points):
return sum(1./np.linalg.norm(np.subtract(points[i], points[j])) for i in range(n) for j in range(n) if i > j)
# starting with diagonal shape
lh = [[i/(n-1.)]*d for i in range(n)]
# minimizing spread function by shuffling
minspread = spread(lh)
for i in range(1000):
point1 = np.random.randint(n)
point2 = np.random.randint(n)
dim = np.random.randint(d)
newlh = np.copy(lh)
newlh[point1, dim], newlh[point2, dim] = newlh[point2, dim], newlh[point1, dim]
newspread = spread(newlh)
if newspread < minspread:
lh = np.copy(newlh)
minspread = newspread
return lh
def rbf(points, T):
"""
Build RBF-fit for given points (see Holmstrom, 2008 for details) using scaling matrix.
Parameters
----------
points : ndarray
Array of multi-d points with corresponding values [[x1, x2, .., xd, val], ...].
T : ndarray
Scaling matrix.
Returns
-------
fit : callable
Function that returns the value of the RBF-fit at a given point.
"""
n = len(points)
d = len(points[0])-1
def phi(r):
return r*r*r
Phi = [[phi(np.linalg.norm(np.dot(T, np.subtract(points[i, 0:-1], points[j, 0:-1])))) for j in range(n)] for i in range(n)]
P = np.ones((n, d+1))
P[:, 0:-1] = points[:, 0:-1]
F = points[:, -1]
M = np.zeros((n+d+1, n+d+1))
M[0:n, 0:n] = Phi
M[0:n, n:n+d+1] = P
M[n:n+d+1, 0:n] = np.transpose(P)
v = np.zeros(n+d+1)
v[0:n] = F
sol = np.linalg.solve(M, v)
lam, b, a = sol[0:n], sol[n:n+d], sol[n+d]
def fit(x):
return sum(lam[i]*phi(np.linalg.norm(np.dot(T, np.subtract(x, points[i, 0:-1])))) for i in range(n)) + np.dot(b, x) + a
return fit
| [
"numpy.subtract",
"numpy.copy",
"numpy.zeros",
"numpy.ones",
"numpy.identity",
"numpy.transpose",
"numpy.isnan",
"numpy.random.randint",
"numpy.math.factorial",
"numpy.linalg.norm",
"functools.wraps",
"multiprocessing.Pool",
"numpy.random.rand",
"numpy.dot",
"numpy.linalg.solve",
"nump... | [((2612, 2632), 'numpy.zeros', 'np.zeros', (['(n, d + 1)'], {}), '((n, d + 1))\n', (2620, 2632), True, 'import numpy as np\n'), ((3262, 3276), 'numpy.identity', 'np.identity', (['d'], {}), '(d)\n', (3273, 3276), True, 'import numpy as np\n'), ((6824, 6843), 'numpy.ones', 'np.ones', (['(n, d + 1)'], {}), '((n, d + 1))\n', (6831, 6843), True, 'import numpy as np\n'), ((6907, 6939), 'numpy.zeros', 'np.zeros', (['(n + d + 1, n + d + 1)'], {}), '((n + d + 1, n + d + 1))\n', (6915, 6939), True, 'import numpy as np\n'), ((7000, 7015), 'numpy.transpose', 'np.transpose', (['P'], {}), '(P)\n', (7012, 7015), True, 'import numpy as np\n'), ((7025, 7044), 'numpy.zeros', 'np.zeros', (['(n + d + 1)'], {}), '(n + d + 1)\n', (7033, 7044), True, 'import numpy as np\n'), ((7067, 7088), 'numpy.linalg.solve', 'np.linalg.solve', (['M', 'v'], {}), '(M, v)\n', (7082, 7088), True, 'import numpy as np\n'), ((771, 785), 'functools.wraps', 'wraps', (['mp.Pool'], {}), '(mp.Pool)\n', (776, 785), False, 'from functools import wraps\n'), ((5816, 5836), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (5833, 5836), True, 'import numpy as np\n'), ((5854, 5874), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (5871, 5874), True, 'import numpy as np\n'), ((5889, 5909), 'numpy.random.randint', 'np.random.randint', (['d'], {}), '(d)\n', (5906, 5909), True, 'import numpy as np\n'), ((5927, 5938), 'numpy.copy', 'np.copy', (['lh'], {}), '(lh)\n', (5934, 5938), True, 'import numpy as np\n'), ((864, 888), 'multiprocessing.Pool', 'mp.Pool', (['*args'], {}), '(*args, **kwargs)\n', (871, 888), True, 'import multiprocessing as mp\n'), ((3065, 3089), 'numpy.math.factorial', 'np.math.factorial', (['(d / 2)'], {}), '(d / 2)\n', (3082, 3089), True, 'import numpy as np\n'), ((3161, 3181), 'numpy.math.factorial', 'np.math.factorial', (['d'], {}), '(d)\n', (3178, 3181), True, 'import numpy as np\n'), ((3442, 3466), 'numpy.zeros', 'np.zeros', (['(nrand, d + 1)'], {}), '((nrand, d + 1))\n', (3450, 3466), True, 'import numpy as np\n'), ((3499, 3523), 'numpy.random.rand', 'np.random.rand', (['nrand', 'd'], {}), '(nrand, d)\n', (3513, 3523), True, 'import numpy as np\n'), ((3973, 3997), 'numpy.zeros', 'np.zeros', (['(batch, d + 1)'], {}), '((batch, d + 1))\n', (3981, 3997), True, 'import numpy as np\n'), ((4548, 4565), 'numpy.copy', 'np.copy', (['minfit.x'], {}), '(minfit.x)\n', (4555, 4565), True, 'import numpy as np\n'), ((6113, 6127), 'numpy.copy', 'np.copy', (['newlh'], {}), '(newlh)\n', (6120, 6127), True, 'import numpy as np\n'), ((3134, 3164), 'numpy.math.factorial', 'np.math.factorial', (['((d - 1) / 2)'], {}), '((d - 1) / 2)\n', (3151, 3164), True, 'import numpy as np\n'), ((3401, 3415), 'numpy.identity', 'np.identity', (['d'], {}), '(d)\n', (3412, 3415), True, 'import numpy as np\n'), ((3850, 3867), 'numpy.linalg.norm', 'np.linalg.norm', (['T'], {}), '(T)\n', (3864, 3867), True, 'import numpy as np\n'), ((7264, 7276), 'numpy.dot', 'np.dot', (['b', 'x'], {}), '(b, x)\n', (7270, 7276), True, 'import numpy as np\n'), ((3742, 3761), 'numpy.transpose', 'np.transpose', (['cloud'], {}), '(cloud)\n', (3754, 3761), True, 'import numpy as np\n'), ((3794, 3812), 'numpy.sqrt', 'np.sqrt', (['eigval[j]'], {}), '(eigval[j])\n', (3801, 3812), True, 'import numpy as np\n'), ((4357, 4374), 'numpy.random.rand', 'np.random.rand', (['d'], {}), '(d)\n', (4371, 4374), True, 'import numpy as np\n'), ((4450, 4468), 'numpy.isnan', 'np.isnan', (['minfit.x'], {}), '(minfit.x)\n', (4458, 4468), True, 'import numpy as np\n'), ((5539, 5572), 'numpy.subtract', 'np.subtract', (['points[i]', 'points[j]'], {}), '(points[i], points[j])\n', (5550, 5572), True, 'import numpy as np\n'), ((6728, 6773), 'numpy.subtract', 'np.subtract', (['points[i, 0:-1]', 'points[j, 0:-1]'], {}), '(points[i, 0:-1], points[j, 0:-1])\n', (6739, 6773), True, 'import numpy as np\n'), ((4199, 4235), 'numpy.subtract', 'np.subtract', (['x', 'points[localk, 0:-1]'], {}), '(x, points[localk, 0:-1])\n', (4210, 4235), True, 'import numpy as np\n'), ((7208, 7239), 'numpy.subtract', 'np.subtract', (['x', 'points[i, 0:-1]'], {}), '(x, points[i, 0:-1])\n', (7219, 7239), True, 'import numpy as np\n')] |
'''This script loads pre-trained word embeddings (GloVe embeddings)
into a frozen Keras Embedding layer, and uses it to
train a text classification model on the 20 Newsgroup dataset
(classication of newsgroup messages into 20 different categories).
GloVe embedding data can be found at:
http://nlp.stanford.edu/data/glove.6B.zip
(source page: http://nlp.stanford.edu/projects/glove/)
20 Newsgroup data can be found at:
http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.html
'''
from __future__ import print_function
import os
import sys
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
BASE_DIR = ''
GLOVE_DIR = BASE_DIR + '/glove.6B/'
TEXT_DATA_DIR = BASE_DIR + '/20_newsgroup/'
MAX_SEQUENCE_LENGTH = 1000
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
# first, build index mapping words in the embeddings set
# to their embedding vector
print('Indexing word vectors.')
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
if sys.version_info < (3,):
f = open(fpath)
else:
f = open(fpath, encoding='latin-1')
t = f.read()
i = t.find('\n\n') # skip header
if 0 < i:
t = t[i:]
texts.append(t)
f.close()
labels.append(label_id)
print('Found %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
num_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-num_validation_samples]
y_train = labels[:-num_validation_samples]
x_val = data[-num_validation_samples:]
y_val = labels[-num_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
num_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(len(labels_index), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
# happy learning!
model.fit(x_train, y_train, validation_data=(x_val, y_val),
epochs=10, batch_size=128)
| [
"keras.preprocessing.sequence.pad_sequences",
"os.path.isdir",
"numpy.asarray",
"numpy.zeros",
"keras.layers.Flatten",
"keras.models.Model",
"keras.layers.Conv1D",
"keras.layers.MaxPooling1D",
"keras.preprocessing.text.Tokenizer",
"numpy.arange",
"keras.layers.Embedding",
"keras.layers.Dense",... | [((2501, 2534), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'MAX_NB_WORDS'}), '(num_words=MAX_NB_WORDS)\n', (2510, 2534), False, 'from keras.preprocessing.text import Tokenizer\n'), ((2707, 2759), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences'], {'maxlen': 'MAX_SEQUENCE_LENGTH'}), '(sequences, maxlen=MAX_SEQUENCE_LENGTH)\n', (2720, 2759), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2963, 2987), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (2972, 2987), True, 'import numpy as np\n'), ((2988, 3014), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (3005, 3014), True, 'import numpy as np\n'), ((3421, 3457), 'numpy.zeros', 'np.zeros', (['(num_words, EMBEDDING_DIM)'], {}), '((num_words, EMBEDDING_DIM))\n', (3429, 3457), True, 'import numpy as np\n'), ((3884, 4002), 'keras.layers.Embedding', 'Embedding', (['num_words', 'EMBEDDING_DIM'], {'weights': '[embedding_matrix]', 'input_length': 'MAX_SEQUENCE_LENGTH', 'trainable': '(False)'}), '(num_words, EMBEDDING_DIM, weights=[embedding_matrix],\n input_length=MAX_SEQUENCE_LENGTH, trainable=False)\n', (3893, 4002), False, 'from keras.layers import Conv1D, MaxPooling1D, Embedding\n'), ((4199, 4249), 'keras.layers.Input', 'Input', ([], {'shape': '(MAX_SEQUENCE_LENGTH,)', 'dtype': '"""int32"""'}), "(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n", (4204, 4249), False, 'from keras.layers import Dense, Input, Flatten\n'), ((4634, 4662), 'keras.models.Model', 'Model', (['sequence_input', 'preds'], {}), '(sequence_input, preds)\n', (4639, 4662), False, 'from keras.models import Model\n'), ((1193, 1237), 'os.path.join', 'os.path.join', (['GLOVE_DIR', '"""glove.6B.100d.txt"""'], {}), "(GLOVE_DIR, 'glove.6B.100d.txt')\n", (1205, 1237), False, 'import os\n'), ((1313, 1352), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (1323, 1352), True, 'import numpy as np\n'), ((1690, 1715), 'os.listdir', 'os.listdir', (['TEXT_DATA_DIR'], {}), '(TEXT_DATA_DIR)\n', (1700, 1715), False, 'import os\n'), ((1729, 1762), 'os.path.join', 'os.path.join', (['TEXT_DATA_DIR', 'name'], {}), '(TEXT_DATA_DIR, name)\n', (1741, 1762), False, 'import os\n'), ((1770, 1789), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1783, 1789), False, 'import os\n'), ((2785, 2803), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (2795, 2803), True, 'import numpy as np\n'), ((4307, 4340), 'keras.layers.Conv1D', 'Conv1D', (['(128)', '(5)'], {'activation': '"""relu"""'}), "(128, 5, activation='relu')\n", (4313, 4340), False, 'from keras.layers import Conv1D, MaxPooling1D, Embedding\n'), ((4365, 4380), 'keras.layers.MaxPooling1D', 'MaxPooling1D', (['(5)'], {}), '(5)\n', (4377, 4380), False, 'from keras.layers import Conv1D, MaxPooling1D, Embedding\n'), ((4388, 4421), 'keras.layers.Conv1D', 'Conv1D', (['(128)', '(5)'], {'activation': '"""relu"""'}), "(128, 5, activation='relu')\n", (4394, 4421), False, 'from keras.layers import Conv1D, MaxPooling1D, Embedding\n'), ((4429, 4444), 'keras.layers.MaxPooling1D', 'MaxPooling1D', (['(5)'], {}), '(5)\n', (4441, 4444), False, 'from keras.layers import Conv1D, MaxPooling1D, Embedding\n'), ((4452, 4485), 'keras.layers.Conv1D', 'Conv1D', (['(128)', '(5)'], {'activation': '"""relu"""'}), "(128, 5, activation='relu')\n", (4458, 4485), False, 'from keras.layers import Conv1D, MaxPooling1D, Embedding\n'), ((4493, 4509), 'keras.layers.MaxPooling1D', 'MaxPooling1D', (['(35)'], {}), '(35)\n', (4505, 4509), False, 'from keras.layers import Conv1D, MaxPooling1D, Embedding\n'), ((4517, 4526), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4524, 4526), False, 'from keras.layers import Dense, Input, Flatten\n'), ((4534, 4563), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (4539, 4563), False, 'from keras.layers import Dense, Input, Flatten\n'), ((1894, 1910), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1904, 1910), False, 'import os\n'), ((1969, 1994), 'os.path.join', 'os.path.join', (['path', 'fname'], {}), '(path, fname)\n', (1981, 1994), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
X, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=50)
def plot_forest(max_depth=1):
plt.figure()
ax = plt.gca()
h = 0.02
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
if max_depth != 0:
forest = RandomForestClassifier(n_estimators=20, max_depth=max_depth,
random_state=1).fit(X, y)
Z = forest.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=.4)
ax.set_title("max_depth = %d" % max_depth)
else:
ax.set_title("data set")
ax.scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
def plot_forest_interactive():
from IPython.html.widgets import interactive, IntSlider
slider = IntSlider(min=0, max=8, step=1, value=0)
return interactive(plot_forest, max_depth=slider)
| [
"sklearn.ensemble.RandomForestClassifier",
"IPython.html.widgets.interactive",
"IPython.html.widgets.IntSlider",
"sklearn.datasets.make_blobs",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.array",
"matplotlib.pyplot.gca"
] | [((160, 230), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'centers': '[[0, 0], [1, 1]]', 'random_state': '(61526)', 'n_samples': '(50)'}), '(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=50)\n', (170, 230), False, 'from sklearn.datasets import make_blobs\n'), ((271, 283), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (281, 283), True, 'import matplotlib.pyplot as plt\n'), ((294, 303), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (301, 303), True, 'import matplotlib.pyplot as plt\n'), ((1221, 1261), 'IPython.html.widgets.IntSlider', 'IntSlider', ([], {'min': '(0)', 'max': '(8)', 'step': '(1)', 'value': '(0)'}), '(min=0, max=8, step=1, value=0)\n', (1230, 1261), False, 'from IPython.html.widgets import interactive, IntSlider\n'), ((1274, 1316), 'IPython.html.widgets.interactive', 'interactive', (['plot_forest'], {'max_depth': 'slider'}), '(plot_forest, max_depth=slider)\n', (1285, 1316), False, 'from IPython.html.widgets import interactive, IntSlider\n'), ((464, 490), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (473, 490), True, 'import numpy as np\n'), ((492, 518), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (501, 518), True, 'import numpy as np\n'), ((564, 640), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(20)', 'max_depth': 'max_depth', 'random_state': '(1)'}), '(n_estimators=20, max_depth=max_depth, random_state=1)\n', (586, 640), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((971, 991), 'numpy.array', 'np.array', (["['b', 'r']"], {}), "(['b', 'r'])\n", (979, 991), True, 'import numpy as np\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to migrate legacy protos to their modern equivalents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorboard.compat.proto import event_pb2
from tensorboard.compat.proto import summary_pb2
from tensorboard.plugins.audio import metadata as audio_metadata
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.util import tensor_util
def migrate_event(event):
if not event.HasField("summary"):
return event
old_values = event.summary.value
new_values = [migrate_value(value) for value in old_values]
# Optimization: Don't create a new event if there were no changes.
if len(old_values) == len(new_values) and all(
x is y for (x, y) in zip(old_values, new_values)
):
return event
result = event_pb2.Event()
result.CopyFrom(event)
del result.summary.value[:]
result.summary.value.extend(new_values)
return result
def migrate_value(value):
"""Convert `value` to a new-style value, if necessary and possible.
An "old-style" value is a value that uses any `value` field other than
the `tensor` field. A "new-style" value is a value that uses the
`tensor` field. TensorBoard continues to support old-style values on
disk; this method converts them to new-style values so that further
code need only deal with one data format.
Arguments:
value: A `Summary.Value` object. This argument is not modified.
Returns:
If the `value` is an old-style value for which there is a new-style
equivalent, the result is the new-style value. Otherwise---if the
value is already new-style or does not yet have a new-style
equivalent---the value will be returned unchanged.
:type value: Summary.Value
:rtype: Summary.Value
"""
handler = {
"histo": _migrate_histogram_value,
"image": _migrate_image_value,
"audio": _migrate_audio_value,
"simple_value": _migrate_scalar_value,
}.get(value.WhichOneof("value"))
return handler(value) if handler else value
def make_summary(tag, metadata, data):
tensor_proto = tensor_util.make_tensor_proto(data)
return summary_pb2.Summary.Value(
tag=tag, metadata=metadata, tensor=tensor_proto
)
def _migrate_histogram_value(value):
histogram_value = value.histo
bucket_lefts = [histogram_value.min] + histogram_value.bucket_limit[:-1]
bucket_rights = histogram_value.bucket_limit[:-1] + [histogram_value.max]
bucket_counts = histogram_value.bucket
buckets = np.array(
[bucket_lefts, bucket_rights, bucket_counts], dtype=np.float32
).transpose()
summary_metadata = histogram_metadata.create_summary_metadata(
display_name=value.metadata.display_name or value.tag,
description=value.metadata.summary_description,
)
return make_summary(value.tag, summary_metadata, buckets)
def _migrate_image_value(value):
image_value = value.image
data = [
str(image_value.width).encode("ascii"),
str(image_value.height).encode("ascii"),
image_value.encoded_image_string,
]
summary_metadata = image_metadata.create_summary_metadata(
display_name=value.metadata.display_name or value.tag,
description=value.metadata.summary_description,
)
return make_summary(value.tag, summary_metadata, data)
def _migrate_audio_value(value):
audio_value = value.audio
data = [[audio_value.encoded_audio_string, b""]] # empty label
summary_metadata = audio_metadata.create_summary_metadata(
display_name=value.metadata.display_name or value.tag,
description=value.metadata.summary_description,
encoding=audio_metadata.Encoding.Value("WAV"),
)
return make_summary(value.tag, summary_metadata, data)
def _migrate_scalar_value(value):
scalar_value = value.simple_value
summary_metadata = scalar_metadata.create_summary_metadata(
display_name=value.metadata.display_name or value.tag,
description=value.metadata.summary_description,
)
return make_summary(value.tag, summary_metadata, scalar_value)
| [
"tensorboard.plugins.audio.metadata.Encoding.Value",
"tensorboard.plugins.image.metadata.create_summary_metadata",
"tensorboard.compat.proto.event_pb2.Event",
"tensorboard.plugins.scalar.metadata.create_summary_metadata",
"tensorboard.plugins.histogram.metadata.create_summary_metadata",
"tensorboard.util.... | [((1705, 1722), 'tensorboard.compat.proto.event_pb2.Event', 'event_pb2.Event', ([], {}), '()\n', (1720, 1722), False, 'from tensorboard.compat.proto import event_pb2\n'), ((3044, 3079), 'tensorboard.util.tensor_util.make_tensor_proto', 'tensor_util.make_tensor_proto', (['data'], {}), '(data)\n', (3073, 3079), False, 'from tensorboard.util import tensor_util\n'), ((3091, 3165), 'tensorboard.compat.proto.summary_pb2.Summary.Value', 'summary_pb2.Summary.Value', ([], {'tag': 'tag', 'metadata': 'metadata', 'tensor': 'tensor_proto'}), '(tag=tag, metadata=metadata, tensor=tensor_proto)\n', (3116, 3165), False, 'from tensorboard.compat.proto import summary_pb2\n'), ((3588, 3738), 'tensorboard.plugins.histogram.metadata.create_summary_metadata', 'histogram_metadata.create_summary_metadata', ([], {'display_name': '(value.metadata.display_name or value.tag)', 'description': 'value.metadata.summary_description'}), '(display_name=value.metadata.\n display_name or value.tag, description=value.metadata.summary_description)\n', (3630, 3738), True, 'from tensorboard.plugins.histogram import metadata as histogram_metadata\n'), ((4067, 4213), 'tensorboard.plugins.image.metadata.create_summary_metadata', 'image_metadata.create_summary_metadata', ([], {'display_name': '(value.metadata.display_name or value.tag)', 'description': 'value.metadata.summary_description'}), '(display_name=value.metadata.\n display_name or value.tag, description=value.metadata.summary_description)\n', (4105, 4213), True, 'from tensorboard.plugins.image import metadata as image_metadata\n'), ((4823, 4970), 'tensorboard.plugins.scalar.metadata.create_summary_metadata', 'scalar_metadata.create_summary_metadata', ([], {'display_name': '(value.metadata.display_name or value.tag)', 'description': 'value.metadata.summary_description'}), '(display_name=value.metadata.\n display_name or value.tag, description=value.metadata.summary_description)\n', (4862, 4970), True, 'from tensorboard.plugins.scalar import metadata as scalar_metadata\n'), ((3465, 3537), 'numpy.array', 'np.array', (['[bucket_lefts, bucket_rights, bucket_counts]'], {'dtype': 'np.float32'}), '([bucket_lefts, bucket_rights, bucket_counts], dtype=np.float32)\n', (3473, 3537), True, 'import numpy as np\n'), ((4623, 4659), 'tensorboard.plugins.audio.metadata.Encoding.Value', 'audio_metadata.Encoding.Value', (['"""WAV"""'], {}), "('WAV')\n", (4652, 4659), True, 'from tensorboard.plugins.audio import metadata as audio_metadata\n')] |
# pip install https://github.com/fogleman/sdf/archive/refs/heads/main.zip
import math
import numpy as np
import sdf
import zengl
from window import Window
c = sdf.cylinder(0.5)
f = sdf.sphere(1) & sdf.box(1.5)
f -= c.orient(sdf.X) | c.orient(sdf.Y) | c.orient(sdf.Z)
# f = sdf.sphere(2) & sdf.slab(z0=-0.5, z1=0.5).k(0.1)
# f -= sdf.cylinder(1).k(0.1)
# f -= sdf.cylinder(0.25).circular_array(16, 2).k(0.1)
# s = sdf.sphere(0.75)
# s = s.translate(sdf.Z * -3) | s.translate(sdf.Z * 3)
# s = s.union(sdf.capsule(sdf.Z * -3, sdf.Z * 3, 0.5), k=1)
# f = sdf.sphere(1.5).union(s.orient(sdf.X), s.orient(sdf.Y), s.orient(sdf.Z), k=1)
# f = sdf.rounded_cylinder(1, 0.1, 5)
# x = sdf.box((1, 1, 4)).rotate(sdf.pi / 4)
# x = x.circular_array(24, 1.6)
# x = x.twist(0.75) | x.twist(-0.75)
# f -= x.k(0.1)
# f -= sdf.cylinder(0.5).k(0.1)
# c = sdf.cylinder(0.25).orient(sdf.X)
# f -= c.translate(sdf.Z * -2.5).k(0.1)
# f -= c.translate(sdf.Z * 2.5).k(0.1)
# f = sdf.rounded_box([3.2, 1, 0.25], 0.1).translate((1.5, 0, 0.0625))
# f = f.bend_linear(sdf.X * 0.75, sdf.X * 2.25, sdf.Z * -0.1875, sdf.ease.in_out_quad)
# f = f.circular_array(3, 0)
# f = f.repeat((2.7, 5.4, 0), padding=1)
# f |= f.translate((2.7 / 2, 2.7, 0))
# f &= sdf.cylinder(10)
# f |= (sdf.cylinder(12) - sdf.cylinder(10)) & sdf.slab(z0=-0.5, z1=0.5).k(0.25)
points = np.array(f.generate())
tmp = points.reshape(-1, 3, 3)
normals = np.repeat(np.cross(tmp[:, 1] - tmp[:, 0], tmp[:, 2] - tmp[:, 0]), 3, axis=0)
radius = np.max(np.sqrt(np.sum(points * points, axis=1)))
window = Window(1280, 720)
ctx = zengl.context()
image = ctx.image(window.size, 'rgba8unorm', samples=4)
depth = ctx.image(window.size, 'depth24plus', samples=4)
image.clear_value = (0.2, 0.2, 0.2, 1.0)
mesh = np.concatenate([points, normals], axis=1).astype('f4').tobytes()
vertex_buffer = ctx.buffer(mesh)
uniform_buffer = ctx.buffer(size=80)
model = ctx.pipeline(
vertex_shader='''
#version 330
layout (std140) uniform Common {
mat4 mvp;
};
layout (location = 0) in vec3 in_vert;
layout (location = 1) in vec3 in_norm;
out vec3 v_norm;
void main() {
gl_Position = mvp * vec4(in_vert, 1.0);
v_norm = in_norm;
}
''',
fragment_shader='''
#version 330
in vec3 v_norm;
layout (location = 0) out vec4 out_color;
void main() {
vec3 color = vec3(0.1, 0.7, 1.0);
vec3 light = vec3(4.0, 3.0, 10.0);
float lum = dot(normalize(light), normalize(v_norm)) * 0.4 + 0.6;
out_color = vec4(color * lum, 1.0);
}
''',
layout=[
{
'name': 'Common',
'binding': 0,
},
],
resources=[
{
'type': 'uniform_buffer',
'binding': 0,
'buffer': uniform_buffer,
},
],
framebuffer=[image, depth],
topology='triangles',
cull_face='back',
vertex_buffers=zengl.bind(vertex_buffer, '3f 3f', 0, 1),
vertex_count=vertex_buffer.size // 24,
)
@window.render
def render():
x, y = math.sin(window.time * 0.5) * 2.5 * radius, math.cos(window.time * 0.5) * 2.5 * radius
camera = zengl.camera((x, y, 1.5 * radius), (0.0, 0.0, 0.0), aspect=window.aspect, fov=45.0)
uniform_buffer.write(camera)
image.clear()
depth.clear()
model.render()
image.blit()
window.run()
| [
"zengl.camera",
"sdf.box",
"numpy.sum",
"sdf.cylinder",
"window.Window",
"numpy.cross",
"math.sin",
"sdf.sphere",
"zengl.bind",
"math.cos",
"zengl.context",
"numpy.concatenate"
] | [((162, 179), 'sdf.cylinder', 'sdf.cylinder', (['(0.5)'], {}), '(0.5)\n', (174, 179), False, 'import sdf\n'), ((1543, 1560), 'window.Window', 'Window', (['(1280)', '(720)'], {}), '(1280, 720)\n', (1549, 1560), False, 'from window import Window\n'), ((1567, 1582), 'zengl.context', 'zengl.context', ([], {}), '()\n', (1580, 1582), False, 'import zengl\n'), ((184, 197), 'sdf.sphere', 'sdf.sphere', (['(1)'], {}), '(1)\n', (194, 197), False, 'import sdf\n'), ((200, 212), 'sdf.box', 'sdf.box', (['(1.5)'], {}), '(1.5)\n', (207, 212), False, 'import sdf\n'), ((1408, 1462), 'numpy.cross', 'np.cross', (['(tmp[:, 1] - tmp[:, 0])', '(tmp[:, 2] - tmp[:, 0])'], {}), '(tmp[:, 1] - tmp[:, 0], tmp[:, 2] - tmp[:, 0])\n', (1416, 1462), True, 'import numpy as np\n'), ((3222, 3309), 'zengl.camera', 'zengl.camera', (['(x, y, 1.5 * radius)', '(0.0, 0.0, 0.0)'], {'aspect': 'window.aspect', 'fov': '(45.0)'}), '((x, y, 1.5 * radius), (0.0, 0.0, 0.0), aspect=window.aspect,\n fov=45.0)\n', (3234, 3309), False, 'import zengl\n'), ((1499, 1530), 'numpy.sum', 'np.sum', (['(points * points)'], {'axis': '(1)'}), '(points * points, axis=1)\n', (1505, 1530), True, 'import numpy as np\n'), ((2993, 3033), 'zengl.bind', 'zengl.bind', (['vertex_buffer', '"""3f 3f"""', '(0)', '(1)'], {}), "(vertex_buffer, '3f 3f', 0, 1)\n", (3003, 3033), False, 'import zengl\n'), ((1746, 1787), 'numpy.concatenate', 'np.concatenate', (['[points, normals]'], {'axis': '(1)'}), '([points, normals], axis=1)\n', (1760, 1787), True, 'import numpy as np\n'), ((3122, 3149), 'math.sin', 'math.sin', (['(window.time * 0.5)'], {}), '(window.time * 0.5)\n', (3130, 3149), False, 'import math\n'), ((3166, 3193), 'math.cos', 'math.cos', (['(window.time * 0.5)'], {}), '(window.time * 0.5)\n', (3174, 3193), False, 'import math\n')] |
import numpy as np
def project_vectors(vx, vy, onto_x, onto_y, eps = 1e-6):
# projected = dot(v,onto/mag(onto))
mag_onto = np.sqrt(onto_x*onto_x+onto_y*onto_y)
sel_valid = np.abs(mag_onto) > eps
vx = vx[sel_valid]
vy = vy[sel_valid]
onto_x = onto_x[sel_valid]
onto_y = onto_y[sel_valid]
mag_onto = mag_onto[sel_valid]
projected_length = (vx*onto_x/mag_onto)+(vy*onto_y/mag_onto)
onto_x_norm = onto_x / mag_onto
onto_y_norm = onto_y / mag_onto
projected_v_x = projected_length*onto_x_norm
projected_v_y = projected_length*onto_y_norm
orthogonal_v_x = vx-projected_v_x
orthogonal_v_y = vy-projected_v_y
return sel_valid, (projected_v_x, projected_v_y), (orthogonal_v_x, orthogonal_v_y)
| [
"numpy.abs",
"numpy.sqrt"
] | [((132, 174), 'numpy.sqrt', 'np.sqrt', (['(onto_x * onto_x + onto_y * onto_y)'], {}), '(onto_x * onto_x + onto_y * onto_y)\n', (139, 174), True, 'import numpy as np\n'), ((185, 201), 'numpy.abs', 'np.abs', (['mag_onto'], {}), '(mag_onto)\n', (191, 201), True, 'import numpy as np\n')] |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras Premade Linear models."""
import numpy as np
import tensorflow.compat.v2 as tf
from keras import backend
from keras import losses
from keras.engine import input_layer
from keras.engine import sequential
from keras.engine import training
from keras.feature_column import dense_features_v2
from keras.layers import core
from keras.optimizers.optimizer_v2 import gradient_descent
from keras.premade_models import linear
from keras.testing_infra import test_combinations
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class LinearModelTest(test_combinations.TestCase):
def test_linear_model_with_single_input(self):
model = linear.LinearModel()
inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 2))
output = 0.3 * inp[:, 0] + 0.2 * inp[:, 1]
model.compile("sgd", "mse", [])
model.fit(inp, output, epochs=5)
self.assertTrue(model.built)
def test_linear_model_with_list_input(self):
model = linear.LinearModel()
input_a = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
input_b = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
output = 0.3 * input_a + 0.2 * input_b
model.compile("sgd", "mse", [])
model.fit([input_a, input_b], output, epochs=5)
def test_linear_model_with_mismatched_dict_inputs(self):
model = linear.LinearModel()
input_a = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
input_b = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
output = 0.3 * input_a + 0.2 * input_b
model.compile("sgd", "mse", [])
model.build(
{"a": tf.TensorShape([None, 1]), "b": tf.TensorShape([None, 1])}
)
with self.assertRaisesRegex(ValueError, "Missing keys"):
model.fit({"c": input_a, "b": input_b}, output, epochs=5)
def test_linear_model_with_dict_input(self):
model = linear.LinearModel()
input_a = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
input_b = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
output = 0.3 * input_a + 0.2 * input_b
model.compile("sgd", "mse", [])
model.fit({"a": input_a, "b": input_b}, output, epochs=5)
def test_linear_model_as_layer(self):
input_a = input_layer.Input(shape=(1,), name="a")
output_a = linear.LinearModel()(input_a)
input_b = input_layer.Input(shape=(1,), name="b")
output_b = core.Dense(units=1)(input_b)
output = output_a + output_b
model = training.Model(inputs=[input_a, input_b], outputs=[output])
input_a_np = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
input_b_np = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
output_np = 0.3 * input_a_np + 0.2 * input_b_np
model.compile("sgd", "mse", [])
model.fit([input_a_np, input_b_np], output_np, epochs=5)
def test_linear_model_with_sparse_input(self):
indices = tf.constant([[0, 0], [0, 2], [1, 0], [1, 1]], dtype=tf.int64)
values = tf.constant([0.4, 0.6, 0.8, 0.5])
shape = tf.constant([2, 3], dtype=tf.int64)
model = linear.LinearModel()
inp = tf.SparseTensor(indices, values, shape)
output = model(inp)
self.evaluate(tf.compat.v1.global_variables_initializer())
if tf.executing_eagerly():
weights = model.get_weights()
weights[0] = np.ones((3, 1))
model.set_weights(weights)
output = model(inp)
self.assertAllClose([[1.0], [1.3]], self.evaluate(output))
def test_linear_model_with_sparse_input_and_custom_training(self):
batch_size = 64
indices = []
values = []
target = np.zeros((batch_size, 1))
for i in range(64):
rand_int = np.random.randint(3)
if rand_int == 0:
indices.append((i, 0))
val = np.random.uniform(low=-5.0, high=5.0)
values.append(val)
target[i] = 0.3 * val
elif rand_int == 1:
indices.append((i, 1))
val = np.random.uniform(low=-5.0, high=5.0)
values.append(val)
target[i] = 0.2 * val
else:
indices.append((i, 0))
indices.append((i, 1))
val_1 = np.random.uniform(low=-5.0, high=5.0)
val_2 = np.random.uniform(low=-5.0, high=5.0)
values.append(val_1)
values.append(val_2)
target[i] = 0.3 * val_1 + 0.2 * val_2
indices = np.asarray(indices)
values = np.asarray(values)
shape = tf.constant([batch_size, 2], dtype=tf.int64)
inp = tf.SparseTensor(indices, values, shape)
model = linear.LinearModel(use_bias=False)
opt = gradient_descent.SGD()
for _ in range(20):
with tf.GradientTape() as t:
output = model(inp)
loss = backend.mean(losses.mean_squared_error(target, output))
grads = t.gradient(loss, model.trainable_variables)
grads_and_vars = zip(grads, model.trainable_variables)
opt.apply_gradients(grads_and_vars)
# This test is an example for a regression on categorical inputs, i.e.,
# the output is 0.4, 0.6, 0.9 when input is 'alpha', 'beta', 'gamma'
# separately.
def test_linear_model_with_feature_column(self):
vocab_list = ["alpha", "beta", "gamma"]
vocab_val = [0.4, 0.6, 0.9]
data = np.random.choice(vocab_list, size=256)
y = np.zeros_like(data, dtype=np.float32)
for vocab, val in zip(vocab_list, vocab_val):
indices = np.where(data == vocab)
y[indices] = val + np.random.uniform(
low=-0.01, high=0.01, size=indices[0].shape
)
cat_column = tf.feature_column.categorical_column_with_vocabulary_list(
key="symbol", vocabulary_list=vocab_list
)
ind_column = tf.feature_column.indicator_column(cat_column)
dense_feature_layer = dense_features_v2.DenseFeatures([ind_column])
linear_model = linear.LinearModel(
use_bias=False, kernel_initializer="zeros"
)
combined = sequential.Sequential([dense_feature_layer, linear_model])
opt = gradient_descent.SGD(learning_rate=0.1)
combined.compile(opt, "mse", [])
combined.fit(x={"symbol": data}, y=y, batch_size=32, epochs=10)
self.assertAllClose(
[[0.4], [0.6], [0.9]],
combined.layers[1].dense_layers[0].kernel.numpy(),
atol=0.01,
)
def test_config(self):
linear_model = linear.LinearModel(units=3, use_bias=True)
config = linear_model.get_config()
cloned_linear_model = linear.LinearModel.from_config(config)
self.assertEqual(linear_model.units, cloned_linear_model.units)
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.compat.v2.feature_column.indicator_column",
"keras.optimizers.optimizer_v2.gradient_descent.SGD",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.SparseTensor",
"numpy.ones",
"numpy.random.randint",
"tensorflow.compat.v2.compat.v1.global_variables_initializer",
"keras.engine.input_l... | [((1180, 1238), 'keras.testing_infra.test_combinations.run_all_keras_modes', 'test_combinations.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), '(always_skip_v1=True)\n', (1217, 1238), False, 'from keras.testing_infra import test_combinations\n'), ((7680, 7694), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (7692, 7694), True, 'import tensorflow.compat.v2 as tf\n'), ((1357, 1377), 'keras.premade_models.linear.LinearModel', 'linear.LinearModel', ([], {}), '()\n', (1375, 1377), False, 'from keras.premade_models import linear\n'), ((1392, 1443), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(64, 2)'}), '(low=-5.0, high=5.0, size=(64, 2))\n', (1409, 1443), True, 'import numpy as np\n'), ((1679, 1699), 'keras.premade_models.linear.LinearModel', 'linear.LinearModel', ([], {}), '()\n', (1697, 1699), False, 'from keras.premade_models import linear\n'), ((1718, 1769), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(64, 1)'}), '(low=-5.0, high=5.0, size=(64, 1))\n', (1735, 1769), True, 'import numpy as np\n'), ((1788, 1839), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(64, 1)'}), '(low=-5.0, high=5.0, size=(64, 1))\n', (1805, 1839), True, 'import numpy as np\n'), ((2061, 2081), 'keras.premade_models.linear.LinearModel', 'linear.LinearModel', ([], {}), '()\n', (2079, 2081), False, 'from keras.premade_models import linear\n'), ((2100, 2151), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(64, 1)'}), '(low=-5.0, high=5.0, size=(64, 1))\n', (2117, 2151), True, 'import numpy as np\n'), ((2170, 2221), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(64, 1)'}), '(low=-5.0, high=5.0, size=(64, 1))\n', (2187, 2221), True, 'import numpy as np\n'), ((2618, 2638), 'keras.premade_models.linear.LinearModel', 'linear.LinearModel', ([], {}), '()\n', (2636, 2638), False, 'from keras.premade_models import linear\n'), ((2657, 2708), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(64, 1)'}), '(low=-5.0, high=5.0, size=(64, 1))\n', (2674, 2708), True, 'import numpy as np\n'), ((2727, 2778), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(64, 1)'}), '(low=-5.0, high=5.0, size=(64, 1))\n', (2744, 2778), True, 'import numpy as np\n'), ((2993, 3032), 'keras.engine.input_layer.Input', 'input_layer.Input', ([], {'shape': '(1,)', 'name': '"""a"""'}), "(shape=(1,), name='a')\n", (3010, 3032), False, 'from keras.engine import input_layer\n'), ((3100, 3139), 'keras.engine.input_layer.Input', 'input_layer.Input', ([], {'shape': '(1,)', 'name': '"""b"""'}), "(shape=(1,), name='b')\n", (3117, 3139), False, 'from keras.engine import input_layer\n'), ((3241, 3300), 'keras.engine.training.Model', 'training.Model', ([], {'inputs': '[input_a, input_b]', 'outputs': '[output]'}), '(inputs=[input_a, input_b], outputs=[output])\n', (3255, 3300), False, 'from keras.engine import training\n'), ((3322, 3373), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(64, 1)'}), '(low=-5.0, high=5.0, size=(64, 1))\n', (3339, 3373), True, 'import numpy as np\n'), ((3395, 3446), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(64, 1)'}), '(low=-5.0, high=5.0, size=(64, 1))\n', (3412, 3446), True, 'import numpy as np\n'), ((3678, 3739), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0, 0], [0, 2], [1, 0], [1, 1]]'], {'dtype': 'tf.int64'}), '([[0, 0], [0, 2], [1, 0], [1, 1]], dtype=tf.int64)\n', (3689, 3739), True, 'import tensorflow.compat.v2 as tf\n'), ((3757, 3790), 'tensorflow.compat.v2.constant', 'tf.constant', (['[0.4, 0.6, 0.8, 0.5]'], {}), '([0.4, 0.6, 0.8, 0.5])\n', (3768, 3790), True, 'import tensorflow.compat.v2 as tf\n'), ((3807, 3842), 'tensorflow.compat.v2.constant', 'tf.constant', (['[2, 3]'], {'dtype': 'tf.int64'}), '([2, 3], dtype=tf.int64)\n', (3818, 3842), True, 'import tensorflow.compat.v2 as tf\n'), ((3859, 3879), 'keras.premade_models.linear.LinearModel', 'linear.LinearModel', ([], {}), '()\n', (3877, 3879), False, 'from keras.premade_models import linear\n'), ((3894, 3933), 'tensorflow.compat.v2.SparseTensor', 'tf.SparseTensor', (['indices', 'values', 'shape'], {}), '(indices, values, shape)\n', (3909, 3933), True, 'import tensorflow.compat.v2 as tf\n'), ((4040, 4062), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (4060, 4062), True, 'import tensorflow.compat.v2 as tf\n'), ((4443, 4468), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (4451, 4468), True, 'import numpy as np\n'), ((5314, 5333), 'numpy.asarray', 'np.asarray', (['indices'], {}), '(indices)\n', (5324, 5333), True, 'import numpy as np\n'), ((5351, 5369), 'numpy.asarray', 'np.asarray', (['values'], {}), '(values)\n', (5361, 5369), True, 'import numpy as np\n'), ((5386, 5430), 'tensorflow.compat.v2.constant', 'tf.constant', (['[batch_size, 2]'], {'dtype': 'tf.int64'}), '([batch_size, 2], dtype=tf.int64)\n', (5397, 5430), True, 'import tensorflow.compat.v2 as tf\n'), ((5445, 5484), 'tensorflow.compat.v2.SparseTensor', 'tf.SparseTensor', (['indices', 'values', 'shape'], {}), '(indices, values, shape)\n', (5460, 5484), True, 'import tensorflow.compat.v2 as tf\n'), ((5501, 5535), 'keras.premade_models.linear.LinearModel', 'linear.LinearModel', ([], {'use_bias': '(False)'}), '(use_bias=False)\n', (5519, 5535), False, 'from keras.premade_models import linear\n'), ((5550, 5572), 'keras.optimizers.optimizer_v2.gradient_descent.SGD', 'gradient_descent.SGD', ([], {}), '()\n', (5570, 5572), False, 'from keras.optimizers.optimizer_v2 import gradient_descent\n'), ((6256, 6294), 'numpy.random.choice', 'np.random.choice', (['vocab_list'], {'size': '(256)'}), '(vocab_list, size=256)\n', (6272, 6294), True, 'import numpy as np\n'), ((6307, 6344), 'numpy.zeros_like', 'np.zeros_like', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (6320, 6344), True, 'import numpy as np\n'), ((6590, 6693), 'tensorflow.compat.v2.feature_column.categorical_column_with_vocabulary_list', 'tf.feature_column.categorical_column_with_vocabulary_list', ([], {'key': '"""symbol"""', 'vocabulary_list': 'vocab_list'}), "(key='symbol',\n vocabulary_list=vocab_list)\n", (6647, 6693), True, 'import tensorflow.compat.v2 as tf\n'), ((6733, 6779), 'tensorflow.compat.v2.feature_column.indicator_column', 'tf.feature_column.indicator_column', (['cat_column'], {}), '(cat_column)\n', (6767, 6779), True, 'import tensorflow.compat.v2 as tf\n'), ((6810, 6855), 'keras.feature_column.dense_features_v2.DenseFeatures', 'dense_features_v2.DenseFeatures', (['[ind_column]'], {}), '([ind_column])\n', (6841, 6855), False, 'from keras.feature_column import dense_features_v2\n'), ((6879, 6941), 'keras.premade_models.linear.LinearModel', 'linear.LinearModel', ([], {'use_bias': '(False)', 'kernel_initializer': '"""zeros"""'}), "(use_bias=False, kernel_initializer='zeros')\n", (6897, 6941), False, 'from keras.premade_models import linear\n'), ((6983, 7041), 'keras.engine.sequential.Sequential', 'sequential.Sequential', (['[dense_feature_layer, linear_model]'], {}), '([dense_feature_layer, linear_model])\n', (7004, 7041), False, 'from keras.engine import sequential\n'), ((7056, 7095), 'keras.optimizers.optimizer_v2.gradient_descent.SGD', 'gradient_descent.SGD', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (7076, 7095), False, 'from keras.optimizers.optimizer_v2 import gradient_descent\n'), ((7420, 7462), 'keras.premade_models.linear.LinearModel', 'linear.LinearModel', ([], {'units': '(3)', 'use_bias': '(True)'}), '(units=3, use_bias=True)\n', (7438, 7462), False, 'from keras.premade_models import linear\n'), ((7536, 7574), 'keras.premade_models.linear.LinearModel.from_config', 'linear.LinearModel.from_config', (['config'], {}), '(config)\n', (7566, 7574), False, 'from keras.premade_models import linear\n'), ((3052, 3072), 'keras.premade_models.linear.LinearModel', 'linear.LinearModel', ([], {}), '()\n', (3070, 3072), False, 'from keras.premade_models import linear\n'), ((3159, 3178), 'keras.layers.core.Dense', 'core.Dense', ([], {'units': '(1)'}), '(units=1)\n', (3169, 3178), False, 'from keras.layers import core\n'), ((3984, 4027), 'tensorflow.compat.v2.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (4025, 4027), True, 'import tensorflow.compat.v2 as tf\n'), ((4131, 4146), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (4138, 4146), True, 'import numpy as np\n'), ((4520, 4540), 'numpy.random.randint', 'np.random.randint', (['(3)'], {}), '(3)\n', (4537, 4540), True, 'import numpy as np\n'), ((6421, 6444), 'numpy.where', 'np.where', (['(data == vocab)'], {}), '(data == vocab)\n', (6429, 6444), True, 'import numpy as np\n'), ((2348, 2373), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[None, 1]'], {}), '([None, 1])\n', (2362, 2373), True, 'import tensorflow.compat.v2 as tf\n'), ((2380, 2405), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[None, 1]'], {}), '([None, 1])\n', (2394, 2405), True, 'import tensorflow.compat.v2 as tf\n'), ((4632, 4669), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)'}), '(low=-5.0, high=5.0)\n', (4649, 4669), True, 'import numpy as np\n'), ((5618, 5635), 'tensorflow.compat.v2.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5633, 5635), True, 'import tensorflow.compat.v2 as tf\n'), ((6476, 6538), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.01)', 'high': '(0.01)', 'size': 'indices[0].shape'}), '(low=-0.01, high=0.01, size=indices[0].shape)\n', (6493, 6538), True, 'import numpy as np\n'), ((4836, 4873), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)'}), '(low=-5.0, high=5.0)\n', (4853, 4873), True, 'import numpy as np\n'), ((5067, 5104), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)'}), '(low=-5.0, high=5.0)\n', (5084, 5104), True, 'import numpy as np\n'), ((5129, 5166), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)'}), '(low=-5.0, high=5.0)\n', (5146, 5166), True, 'import numpy as np\n'), ((5714, 5755), 'keras.losses.mean_squared_error', 'losses.mean_squared_error', (['target', 'output'], {}), '(target, output)\n', (5739, 5755), False, 'from keras import losses\n')] |
import unittest
import mock
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer.functions.connection import convolution_2d
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'c_contiguous': [True, False],
'cover_all': [True, False],
}))
class TestConvolution2DFunction(unittest.TestCase):
def setUp(self, use_cudnn=True):
in_channels = 3
out_channels = 2
kh, kw = (3, 3)
self.stride = 2
self.pad = 1
self.use_cudnn = use_cudnn
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels)),
(out_channels, in_channels, kh, kw)).astype(numpy.float32)
self.b = numpy.random.uniform(
-1, 1, out_channels).astype(numpy.float32)
self.x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(numpy.float32)
if self.cover_all:
self.gy = numpy.random.uniform(-1, 1,
(2, 2, 3, 2)).astype(numpy.float32)
else:
self.gy = numpy.random.uniform(-1, 1,
(2, 2, 2, 2)).astype(numpy.float32)
@attr.cudnn
def test_forward_consistency(self, nobias=False):
x_cpu = chainer.Variable(self.x)
W_cpu = chainer.Variable(self.W)
b_cpu = None if nobias else chainer.Variable(self.b)
y_cpu = functions.convolution_2d(
x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.pad,
use_cudnn=self.use_cudnn, cover_all=self.cover_all)
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
W_gpu = chainer.Variable(cuda.to_gpu(self.W))
b_gpu = None if nobias else chainer.Variable(cuda.to_gpu(self.b))
y_gpu = functions.convolution_2d(
x_gpu, W_gpu, b_gpu, stride=self.stride, pad=self.pad,
use_cudnn=self.use_cudnn, cover_all=self.cover_all)
gradient_check.assert_allclose(y_cpu.data, y_gpu.data.get())
@attr.gpu
def test_forward_consistency_im2col(self):
self.use_cudnn = False
self.test_forward_consistency()
@attr.gpu
def test_forward_consistency_im2col_nobias(self):
self.use_cudnn = False
self.test_forward_consistency(nobias=True)
def check_backward(self, x_data, W_data, b_data, y_grad):
xp = cuda.get_array_module(x_data)
if not self.c_contiguous:
x_data = xp.asfortranarray(x_data)
W_data = xp.asfortranarray(W_data)
y_grad = xp.asfortranarray(y_grad)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(y_grad.flags.c_contiguous)
if b_data is not None:
b = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
args = (x_data, W_data)
if b_data is not None:
args = args + (b_data,)
gradient_check.check_backward(
convolution_2d.Convolution2DFunction(
self.stride, self.pad, self.use_cudnn, self.cover_all),
args, y_grad, eps=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.W, self.b, self.gy)
@condition.retry(3)
def test_backward_cpu_nobias(self):
self.check_backward(self.x, self.W, None, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy))
@attr.cudnn
@condition.retry(3)
def test_backward_gpu_nobias(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col(self):
self.use_cudnn = False
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col_nobias(self):
self.use_cudnn = False
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy))
@testing.parameterize(*testing.product({
'use_cudnn': [True, False],
}))
@attr.cudnn
class TestConvolution2DCudnnCall(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
kh, kw = (3, 3)
self.stride = 2
self.pad = 1
self.x = cuda.cupy.random.uniform(
-1, 1, (2, 3, 4, 3)).astype(numpy.float32)
self.W = cuda.cupy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels)),
(out_channels, in_channels, kh, kw)).astype(numpy.float32)
self.gy = cuda.cupy.random.uniform(
-1, 1, (2, 2, 2, 2)).astype(numpy.float32)
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return functions.convolution_2d(
x, W, None, stride=self.stride, pad=self.pad,
use_cudnn=self.use_cudnn)
def test_call_cudnn_forward(self):
with mock.patch('cupy.cudnn.cudnn.convolutionForward') as func:
self.forward()
self.assertEqual(func.called, self.use_cudnn)
def test_call_cudnn_backrward(self):
y = self.forward()
y.grad = self.gy
v2 = 'cupy.cudnn.cudnn.convolutionBackwardData_v2'
v3 = 'cupy.cudnn.cudnn.convolutionBackwardData_v3'
with mock.patch(v2) as func_v2, mock.patch(v3) as func_v3:
y.backward()
self.assertEqual(func_v2.called or func_v3.called, self.use_cudnn)
testing.run_module(__name__, __file__)
| [
"chainer.Variable",
"chainer.testing.product",
"numpy.random.uniform",
"chainer.cuda.cupy.random.uniform",
"chainer.functions.convolution_2d",
"chainer.cuda.get_array_module",
"mock.patch",
"chainer.functions.connection.convolution_2d.Convolution2DFunction",
"chainer.cuda.to_gpu",
"chainer.testing... | [((6038, 6076), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (6056, 6076), False, 'from chainer import testing\n'), ((3417, 3435), 'chainer.testing.condition.retry', 'condition.retry', (['(3)'], {}), '(3)\n', (3432, 3435), False, 'from chainer.testing import condition\n'), ((3536, 3554), 'chainer.testing.condition.retry', 'condition.retry', (['(3)'], {}), '(3)\n', (3551, 3554), False, 'from chainer.testing import condition\n'), ((3676, 3694), 'chainer.testing.condition.retry', 'condition.retry', (['(3)'], {}), '(3)\n', (3691, 3694), False, 'from chainer.testing import condition\n'), ((3891, 3909), 'chainer.testing.condition.retry', 'condition.retry', (['(3)'], {}), '(3)\n', (3906, 3909), False, 'from chainer.testing import condition\n'), ((4096, 4114), 'chainer.testing.condition.retry', 'condition.retry', (['(3)'], {}), '(3)\n', (4111, 4114), False, 'from chainer.testing import condition\n'), ((4347, 4365), 'chainer.testing.condition.retry', 'condition.retry', (['(3)'], {}), '(3)\n', (4362, 4365), False, 'from chainer.testing import condition\n'), ((1426, 1450), 'chainer.Variable', 'chainer.Variable', (['self.x'], {}), '(self.x)\n', (1442, 1450), False, 'import chainer\n'), ((1467, 1491), 'chainer.Variable', 'chainer.Variable', (['self.W'], {}), '(self.W)\n', (1483, 1491), False, 'import chainer\n'), ((1569, 1705), 'chainer.functions.convolution_2d', 'functions.convolution_2d', (['x_cpu', 'W_cpu', 'b_cpu'], {'stride': 'self.stride', 'pad': 'self.pad', 'use_cudnn': 'self.use_cudnn', 'cover_all': 'self.cover_all'}), '(x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.\n pad, use_cudnn=self.use_cudnn, cover_all=self.cover_all)\n', (1593, 1705), False, 'from chainer import functions\n'), ((1925, 2061), 'chainer.functions.convolution_2d', 'functions.convolution_2d', (['x_gpu', 'W_gpu', 'b_gpu'], {'stride': 'self.stride', 'pad': 'self.pad', 'use_cudnn': 'self.use_cudnn', 'cover_all': 'self.cover_all'}), '(x_gpu, W_gpu, b_gpu, stride=self.stride, pad=self.\n pad, use_cudnn=self.use_cudnn, cover_all=self.cover_all)\n', (1949, 2061), False, 'from chainer import functions\n'), ((2512, 2541), 'chainer.cuda.get_array_module', 'cuda.get_array_module', (['x_data'], {}), '(x_data)\n', (2533, 2541), False, 'from chainer import cuda\n'), ((328, 404), 'chainer.testing.product', 'testing.product', (["{'c_contiguous': [True, False], 'cover_all': [True, False]}"], {}), "({'c_contiguous': [True, False], 'cover_all': [True, False]})\n", (343, 404), False, 'from chainer import testing\n'), ((5257, 5281), 'chainer.Variable', 'chainer.Variable', (['self.x'], {}), '(self.x)\n', (5273, 5281), False, 'import chainer\n'), ((5294, 5318), 'chainer.Variable', 'chainer.Variable', (['self.W'], {}), '(self.W)\n', (5310, 5318), False, 'import chainer\n'), ((5334, 5434), 'chainer.functions.convolution_2d', 'functions.convolution_2d', (['x', 'W', 'None'], {'stride': 'self.stride', 'pad': 'self.pad', 'use_cudnn': 'self.use_cudnn'}), '(x, W, None, stride=self.stride, pad=self.pad,\n use_cudnn=self.use_cudnn)\n', (5358, 5434), False, 'from chainer import functions\n'), ((4595, 4640), 'chainer.testing.product', 'testing.product', (["{'use_cudnn': [True, False]}"], {}), "({'use_cudnn': [True, False]})\n", (4610, 4640), False, 'from chainer import testing\n'), ((1528, 1552), 'chainer.Variable', 'chainer.Variable', (['self.b'], {}), '(self.b)\n', (1544, 1552), False, 'import chainer\n'), ((1760, 1779), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (1771, 1779), False, 'from chainer import cuda\n'), ((1814, 1833), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.W'], {}), '(self.W)\n', (1825, 1833), False, 'from chainer import cuda\n'), ((3265, 3360), 'chainer.functions.connection.convolution_2d.Convolution2DFunction', 'convolution_2d.Convolution2DFunction', (['self.stride', 'self.pad', 'self.use_cudnn', 'self.cover_all'], {}), '(self.stride, self.pad, self.use_cudnn,\n self.cover_all)\n', (3301, 3360), False, 'from chainer.functions.connection import convolution_2d\n'), ((3756, 3775), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (3767, 3775), False, 'from chainer import cuda\n'), ((3777, 3796), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.W'], {}), '(self.W)\n', (3788, 3796), False, 'from chainer import cuda\n'), ((3826, 3845), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.b'], {}), '(self.b)\n', (3837, 3845), False, 'from chainer import cuda\n'), ((3847, 3867), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.gy'], {}), '(self.gy)\n', (3858, 3867), False, 'from chainer import cuda\n'), ((3978, 3997), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (3989, 3997), False, 'from chainer import cuda\n'), ((3999, 4018), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.W'], {}), '(self.W)\n', (4010, 4018), False, 'from chainer import cuda\n'), ((4054, 4074), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.gy'], {}), '(self.gy)\n', (4065, 4074), False, 'from chainer import cuda\n'), ((4214, 4233), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (4225, 4233), False, 'from chainer import cuda\n'), ((4235, 4254), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.W'], {}), '(self.W)\n', (4246, 4254), False, 'from chainer import cuda\n'), ((4284, 4303), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.b'], {}), '(self.b)\n', (4295, 4303), False, 'from chainer import cuda\n'), ((4305, 4325), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.gy'], {}), '(self.gy)\n', (4316, 4325), False, 'from chainer import cuda\n'), ((4472, 4491), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (4483, 4491), False, 'from chainer import cuda\n'), ((4493, 4512), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.W'], {}), '(self.W)\n', (4504, 4512), False, 'from chainer import cuda\n'), ((4548, 4568), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.gy'], {}), '(self.gy)\n', (4559, 4568), False, 'from chainer import cuda\n'), ((5509, 5558), 'mock.patch', 'mock.patch', (['"""cupy.cudnn.cudnn.convolutionForward"""'], {}), "('cupy.cudnn.cudnn.convolutionForward')\n", (5519, 5558), False, 'import mock\n'), ((5878, 5892), 'mock.patch', 'mock.patch', (['v2'], {}), '(v2)\n', (5888, 5892), False, 'import mock\n'), ((5905, 5919), 'mock.patch', 'mock.patch', (['v3'], {}), '(v3)\n', (5915, 5919), False, 'import mock\n'), ((843, 884), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'out_channels'], {}), '(-1, 1, out_channels)\n', (863, 884), False, 'import numpy\n'), ((938, 979), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(2, 3, 4, 3)'], {}), '(-1, 1, (2, 3, 4, 3))\n', (958, 979), False, 'import numpy\n'), ((1888, 1907), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.b'], {}), '(self.b)\n', (1899, 1907), False, 'from chainer import cuda\n'), ((4871, 4916), 'chainer.cuda.cupy.random.uniform', 'cuda.cupy.random.uniform', (['(-1)', '(1)', '(2, 3, 4, 3)'], {}), '(-1, 1, (2, 3, 4, 3))\n', (4895, 4916), False, 'from chainer import cuda\n'), ((5140, 5185), 'chainer.cuda.cupy.random.uniform', 'cuda.cupy.random.uniform', (['(-1)', '(1)', '(2, 2, 2, 2)'], {}), '(-1, 1, (2, 2, 2, 2))\n', (5164, 5185), False, 'from chainer import cuda\n'), ((713, 754), 'numpy.sqrt', 'numpy.sqrt', (['(1.0 / (kh * kw * in_channels))'], {}), '(1.0 / (kh * kw * in_channels))\n', (723, 754), False, 'import numpy\n'), ((1089, 1130), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(2, 2, 3, 2)'], {}), '(-1, 1, (2, 2, 3, 2))\n', (1109, 1130), False, 'import numpy\n'), ((1232, 1273), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(2, 2, 2, 2)'], {}), '(-1, 1, (2, 2, 2, 2))\n', (1252, 1273), False, 'import numpy\n'), ((5009, 5050), 'numpy.sqrt', 'numpy.sqrt', (['(1.0 / (kh * kw * in_channels))'], {}), '(1.0 / (kh * kw * in_channels))\n', (5019, 5050), False, 'import numpy\n')] |
from numpy import genfromtxt
import numpy as np
import os
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import matplotlib as mpl
import math
from scipy.signal import savgol_filter
# yhat = savgol_filter(y, 51, 2) # window size 51, polynomial order 3
window_size = 55
order = 2
# dqn_abs = savgol_filter(genfromtxt('./data/dqn_abs.csv', delimiter=','), window_size, order)
# dqn_do = savgol_filter(genfromtxt('./data/dqn_do.csv', delimiter=','), window_size, order)
# dqn_fic = savgol_filter(genfromtxt('./data/dqn_fic.csv', delimiter=','), window_size, order)
# dqn_prd = savgol_filter(genfromtxt('./data/dqn_prd.csv', delimiter=','), window_size, order)
# dqn_do_uniform = savgol_filter(genfromtxt('./data/dqn_do_uniform.csv', delimiter=','), window_size, order)
# dqn_abs = genfromtxt('./data/dqn_abs.csv', delimiter=',')
# dqn_do = genfromtxt('./data/dqn_do.csv', delimiter=',')
# dqn_fic = genfromtxt('./data/dqn_fic.csv', delimiter=',')
# dqn_prd = genfromtxt('./data/dqn_prd.csv', delimiter=',')
# dqn_do_uniform = genfromtxt('./data/dqn_do_uniform.csv', delimiter=',')
# axes = plt.gca()
# axes.set_ylim([0.5,4])
# x = np.arange(1, len(dqn_abs)+1)
# plt.plot(x, dqn_abs, '-C1', label= "HBS")
# plt.plot(x, dqn_do, '-C5', label= "DO")
# plt.plot(x, dqn_fic, '-C4', label= "Uniform")
# plt.plot(x, dqn_prd, '-C3', label= "PRD")
# plt.plot(x, dqn_do_uniform, '-C2', label= "DO+Unifrom")
#
#
# plt.xlabel("Number of Iterations")
# plt.ylabel("NashConv")
# plt.title("Average NashConv over 10 runs in Leduc Poker")
# plt.legend(loc="best")
# plt.show()
################### Draw different NashConvs ##########################
# deepmind_fic = savgol_filter(genfromtxt('./data/2Nash_merged_csv/deepmind_fic.csv', delimiter=','), window_size, order)
# Mike_fic = savgol_filter(genfromtxt('./data/2Nash_merged_csv/Mike_fic.csv', delimiter=','), window_size, order)
# dqn_do = savgol_filter(genfromtxt('./data/2Nash_merged_csv/dqn_do.csv', delimiter=','), window_size, order)
deepmind_fic_mean = genfromtxt('./data/2Nash_merged_csv/dqn_fic_deepmind_mean.csv', delimiter=',')
Mike_fic_mean = genfromtxt('./data/2Nash_merged_csv/dqn_fic_Mike_mean.csv', delimiter=',')
dqn_do_mean = genfromtxt('./data/2Nash_merged_csv/dqn_DO_mean.csv', delimiter=',')
deepmind_prd_mean = genfromtxt('./data/2Nash_merged_csv/dqn_prd_deepmind_mean.csv', delimiter=',')
Mike_prd_mean = genfromtxt('./data/2Nash_merged_csv/dqn_prd_Mike_mean.csv', delimiter=',')
deepmind_fic_std = genfromtxt('./data/2Nash_merged_csv/dqn_fic_deepmind_std.csv', delimiter=',')
Mike_fic_std = genfromtxt('./data/2Nash_merged_csv/dqn_fic_Mike_std.csv', delimiter=',')
dqn_do_std = genfromtxt('./data/2Nash_merged_csv/dqn_DO_std.csv', delimiter=',')
deepmind_prd_std = genfromtxt('./data/2Nash_merged_csv/dqn_prd_deepmind_std.csv', delimiter=',')
Mike_prd_std = genfromtxt('./data/2Nash_merged_csv/dqn_prd_Mike_std.csv', delimiter=',')
dqn_do_prd_prd_mean = genfromtxt('./data/2Nash_merged_csv/dqn_do_prd_prd_mean.csv', delimiter=',')
dqn_do_prd_prd_std = genfromtxt('./data/2Nash_merged_csv/dqn_do_prd_prd_std.csv', delimiter=',')
axes = plt.gca()
axes.set_ylim([0.5,2])
x = np.arange(1, 151)
plt.plot(x, dqn_do_mean, '-b', label= "NE-based regret of DO")
plt.fill_between(x, dqn_do_mean+dqn_do_std, dqn_do_mean-dqn_do_std, alpha=0.1, color="b")
plt.plot(x, deepmind_fic_mean, '-C2', label= "uniform-based regret of FP")
plt.fill_between(x, deepmind_fic_mean+deepmind_fic_std, deepmind_fic_mean-deepmind_fic_std, alpha=0.1, color="C2")
#
plt.plot(x, Mike_fic_mean, '-C1', label= "NE-based regret of FP")
plt.fill_between(x, Mike_fic_mean+Mike_fic_std, Mike_fic_mean-Mike_fic_std, alpha=0.1, color="C1")
# plt.plot(x, deepmind_prd_mean, '-C2', label= "PRD-based regret of PRD")
# plt.fill_between(x, deepmind_prd_mean+deepmind_prd_std, deepmind_prd_mean-deepmind_prd_std, alpha=0.1, color="C2")
#
# plt.plot(x, Mike_prd_mean, '-C1', label= "NE-based regret of PRD")
# plt.fill_between(x, Mike_prd_mean+Mike_prd_std, Mike_prd_mean-Mike_prd_std, alpha=0.1, color="C1")
# plt.plot(x, dqn_do_prd_prd_mean, '-C5', label= "PRD-based regret of DO")
# plt.fill_between(x, dqn_do_prd_prd_mean+dqn_do_prd_prd_std, dqn_do_prd_prd_mean-dqn_do_prd_prd_std, alpha=0.1, color="C5")
plt.xticks(size = 17)
plt.yticks(size = 17)
plt.xlabel('Number of Iterations', fontsize = 22)
plt.ylabel('Regret', fontsize = 19)
# plt.xlabel("Number of Iterations")
# plt.ylabel("NashConv")
# plt.title("NashConvs under Different Metrics")
plt.legend(loc="best", prop={'size': 16})
plt.show() | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"numpy.genfromtxt",
"matplotlib.use",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.xticks",
"matplotlib.pyplo... | [((76, 99), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (90, 99), False, 'import matplotlib\n'), ((2033, 2111), 'numpy.genfromtxt', 'genfromtxt', (['"""./data/2Nash_merged_csv/dqn_fic_deepmind_mean.csv"""'], {'delimiter': '""","""'}), "('./data/2Nash_merged_csv/dqn_fic_deepmind_mean.csv', delimiter=',')\n", (2043, 2111), False, 'from numpy import genfromtxt\n'), ((2128, 2202), 'numpy.genfromtxt', 'genfromtxt', (['"""./data/2Nash_merged_csv/dqn_fic_Mike_mean.csv"""'], {'delimiter': '""","""'}), "('./data/2Nash_merged_csv/dqn_fic_Mike_mean.csv', delimiter=',')\n", (2138, 2202), False, 'from numpy import genfromtxt\n'), ((2217, 2285), 'numpy.genfromtxt', 'genfromtxt', (['"""./data/2Nash_merged_csv/dqn_DO_mean.csv"""'], {'delimiter': '""","""'}), "('./data/2Nash_merged_csv/dqn_DO_mean.csv', delimiter=',')\n", (2227, 2285), False, 'from numpy import genfromtxt\n'), ((2306, 2384), 'numpy.genfromtxt', 'genfromtxt', (['"""./data/2Nash_merged_csv/dqn_prd_deepmind_mean.csv"""'], {'delimiter': '""","""'}), "('./data/2Nash_merged_csv/dqn_prd_deepmind_mean.csv', delimiter=',')\n", (2316, 2384), False, 'from numpy import genfromtxt\n'), ((2401, 2475), 'numpy.genfromtxt', 'genfromtxt', (['"""./data/2Nash_merged_csv/dqn_prd_Mike_mean.csv"""'], {'delimiter': '""","""'}), "('./data/2Nash_merged_csv/dqn_prd_Mike_mean.csv', delimiter=',')\n", (2411, 2475), False, 'from numpy import genfromtxt\n'), ((2496, 2573), 'numpy.genfromtxt', 'genfromtxt', (['"""./data/2Nash_merged_csv/dqn_fic_deepmind_std.csv"""'], {'delimiter': '""","""'}), "('./data/2Nash_merged_csv/dqn_fic_deepmind_std.csv', delimiter=',')\n", (2506, 2573), False, 'from numpy import genfromtxt\n'), ((2589, 2662), 'numpy.genfromtxt', 'genfromtxt', (['"""./data/2Nash_merged_csv/dqn_fic_Mike_std.csv"""'], {'delimiter': '""","""'}), "('./data/2Nash_merged_csv/dqn_fic_Mike_std.csv', delimiter=',')\n", (2599, 2662), False, 'from numpy import genfromtxt\n'), ((2676, 2743), 'numpy.genfromtxt', 'genfromtxt', (['"""./data/2Nash_merged_csv/dqn_DO_std.csv"""'], {'delimiter': '""","""'}), "('./data/2Nash_merged_csv/dqn_DO_std.csv', delimiter=',')\n", (2686, 2743), False, 'from numpy import genfromtxt\n'), ((2763, 2840), 'numpy.genfromtxt', 'genfromtxt', (['"""./data/2Nash_merged_csv/dqn_prd_deepmind_std.csv"""'], {'delimiter': '""","""'}), "('./data/2Nash_merged_csv/dqn_prd_deepmind_std.csv', delimiter=',')\n", (2773, 2840), False, 'from numpy import genfromtxt\n'), ((2856, 2929), 'numpy.genfromtxt', 'genfromtxt', (['"""./data/2Nash_merged_csv/dqn_prd_Mike_std.csv"""'], {'delimiter': '""","""'}), "('./data/2Nash_merged_csv/dqn_prd_Mike_std.csv', delimiter=',')\n", (2866, 2929), False, 'from numpy import genfromtxt\n'), ((2953, 3029), 'numpy.genfromtxt', 'genfromtxt', (['"""./data/2Nash_merged_csv/dqn_do_prd_prd_mean.csv"""'], {'delimiter': '""","""'}), "('./data/2Nash_merged_csv/dqn_do_prd_prd_mean.csv', delimiter=',')\n", (2963, 3029), False, 'from numpy import genfromtxt\n'), ((3051, 3126), 'numpy.genfromtxt', 'genfromtxt', (['"""./data/2Nash_merged_csv/dqn_do_prd_prd_std.csv"""'], {'delimiter': '""","""'}), "('./data/2Nash_merged_csv/dqn_do_prd_prd_std.csv', delimiter=',')\n", (3061, 3126), False, 'from numpy import genfromtxt\n'), ((3135, 3144), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3142, 3144), True, 'import matplotlib.pyplot as plt\n'), ((3173, 3190), 'numpy.arange', 'np.arange', (['(1)', '(151)'], {}), '(1, 151)\n', (3182, 3190), True, 'import numpy as np\n'), ((3191, 3252), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'dqn_do_mean', '"""-b"""'], {'label': '"""NE-based regret of DO"""'}), "(x, dqn_do_mean, '-b', label='NE-based regret of DO')\n", (3199, 3252), True, 'import matplotlib.pyplot as plt\n'), ((3254, 3351), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(dqn_do_mean + dqn_do_std)', '(dqn_do_mean - dqn_do_std)'], {'alpha': '(0.1)', 'color': '"""b"""'}), "(x, dqn_do_mean + dqn_do_std, dqn_do_mean - dqn_do_std,\n alpha=0.1, color='b')\n", (3270, 3351), True, 'import matplotlib.pyplot as plt\n'), ((3345, 3418), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'deepmind_fic_mean', '"""-C2"""'], {'label': '"""uniform-based regret of FP"""'}), "(x, deepmind_fic_mean, '-C2', label='uniform-based regret of FP')\n", (3353, 3418), True, 'import matplotlib.pyplot as plt\n'), ((3420, 3542), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(deepmind_fic_mean + deepmind_fic_std)', '(deepmind_fic_mean - deepmind_fic_std)'], {'alpha': '(0.1)', 'color': '"""C2"""'}), "(x, deepmind_fic_mean + deepmind_fic_std, deepmind_fic_mean -\n deepmind_fic_std, alpha=0.1, color='C2')\n", (3436, 3542), True, 'import matplotlib.pyplot as plt\n'), ((3537, 3601), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'Mike_fic_mean', '"""-C1"""'], {'label': '"""NE-based regret of FP"""'}), "(x, Mike_fic_mean, '-C1', label='NE-based regret of FP')\n", (3545, 3601), True, 'import matplotlib.pyplot as plt\n'), ((3603, 3709), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(Mike_fic_mean + Mike_fic_std)', '(Mike_fic_mean - Mike_fic_std)'], {'alpha': '(0.1)', 'color': '"""C1"""'}), "(x, Mike_fic_mean + Mike_fic_std, Mike_fic_mean -\n Mike_fic_std, alpha=0.1, color='C1')\n", (3619, 3709), True, 'import matplotlib.pyplot as plt\n'), ((4268, 4287), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(17)'}), '(size=17)\n', (4278, 4287), True, 'import matplotlib.pyplot as plt\n'), ((4290, 4309), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(17)'}), '(size=17)\n', (4300, 4309), True, 'import matplotlib.pyplot as plt\n'), ((4313, 4360), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Iterations"""'], {'fontsize': '(22)'}), "('Number of Iterations', fontsize=22)\n", (4323, 4360), True, 'import matplotlib.pyplot as plt\n'), ((4363, 4396), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Regret"""'], {'fontsize': '(19)'}), "('Regret', fontsize=19)\n", (4373, 4396), True, 'import matplotlib.pyplot as plt\n'), ((4512, 4553), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'prop': "{'size': 16}"}), "(loc='best', prop={'size': 16})\n", (4522, 4553), True, 'import matplotlib.pyplot as plt\n'), ((4554, 4564), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4562, 4564), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from matplotlib import _api, ticker as mticker
from matplotlib.transforms import Bbox, Transform
from .clip_path import clip_line_to_rect
class ExtremeFinderSimple:
"""
A helper class to figure out the range of grid lines that need to be drawn.
"""
def __init__(self, nx, ny):
"""
Parameters
----------
nx, ny : int
The number of samples in each direction.
"""
self.nx = nx
self.ny = ny
def __call__(self, transform_xy, x1, y1, x2, y2):
"""
Compute an approximation of the bounding box obtained by applying
*transform_xy* to the box delimited by ``(x1, y1, x2, y2)``.
The intended use is to have ``(x1, y1, x2, y2)`` in axes coordinates,
and have *transform_xy* be the transform from axes coordinates to data
coordinates; this method then returns the range of data coordinates
that span the actual axes.
The computation is done by sampling ``nx * ny`` equispaced points in
the ``(x1, y1, x2, y2)`` box and finding the resulting points with
extremal coordinates; then adding some padding to take into account the
finite sampling.
As each sampling step covers a relative range of *1/nx* or *1/ny*,
the padding is computed by expanding the span covered by the extremal
coordinates by these fractions.
"""
x, y = np.meshgrid(
np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny))
xt, yt = transform_xy(np.ravel(x), np.ravel(y))
return self._add_pad(xt.min(), xt.max(), yt.min(), yt.max())
def _add_pad(self, x_min, x_max, y_min, y_max):
"""Perform the padding mentioned in `__call__`."""
dx = (x_max - x_min) / self.nx
dy = (y_max - y_min) / self.ny
return x_min - dx, x_max + dx, y_min - dy, y_max + dy
class GridFinder:
def __init__(self,
transform,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
transform : transform from the image coordinate (which will be
the transData of the axes to the world coordinate.
or transform = (transform_xy, inv_transform_xy)
locator1, locator2 : grid locator for 1st and 2nd axis.
"""
if extreme_finder is None:
extreme_finder = ExtremeFinderSimple(20, 20)
if grid_locator1 is None:
grid_locator1 = MaxNLocator()
if grid_locator2 is None:
grid_locator2 = MaxNLocator()
if tick_formatter1 is None:
tick_formatter1 = FormatterPrettyPrint()
if tick_formatter2 is None:
tick_formatter2 = FormatterPrettyPrint()
self.extreme_finder = extreme_finder
self.grid_locator1 = grid_locator1
self.grid_locator2 = grid_locator2
self.tick_formatter1 = tick_formatter1
self.tick_formatter2 = tick_formatter2
self.update_transform(transform)
def get_grid_info(self, x1, y1, x2, y2):
"""
lon_values, lat_values : list of grid values. if integer is given,
rough number of grids in each direction.
"""
extremes = self.extreme_finder(self.inv_transform_xy, x1, y1, x2, y2)
# min & max rage of lat (or lon) for each grid line will be drawn.
# i.e., gridline of lon=0 will be drawn from lat_min to lat_max.
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = self.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = self.grid_locator2(lat_min, lat_max)
lon_values = lon_levs[:lon_n] / lon_factor
lat_values = lat_levs[:lat_n] / lat_factor
lon_lines, lat_lines = self._get_raw_grid_lines(lon_values,
lat_values,
lon_min, lon_max,
lat_min, lat_max)
ddx = (x2-x1)*1.e-10
ddy = (y2-y1)*1.e-10
bb = Bbox.from_extents(x1-ddx, y1-ddy, x2+ddx, y2+ddy)
grid_info = {
"extremes": extremes,
"lon_lines": lon_lines,
"lat_lines": lat_lines,
"lon": self._clip_grid_lines_and_find_ticks(
lon_lines, lon_values, lon_levs, bb),
"lat": self._clip_grid_lines_and_find_ticks(
lat_lines, lat_values, lat_levs, bb),
}
tck_labels = grid_info["lon"]["tick_labels"] = {}
for direction in ["left", "bottom", "right", "top"]:
levs = grid_info["lon"]["tick_levels"][direction]
tck_labels[direction] = self.tick_formatter1(
direction, lon_factor, levs)
tck_labels = grid_info["lat"]["tick_labels"] = {}
for direction in ["left", "bottom", "right", "top"]:
levs = grid_info["lat"]["tick_levels"][direction]
tck_labels[direction] = self.tick_formatter2(
direction, lat_factor, levs)
return grid_info
def _get_raw_grid_lines(self,
lon_values, lat_values,
lon_min, lon_max, lat_min, lat_max):
lons_i = np.linspace(lon_min, lon_max, 100) # for interpolation
lats_i = np.linspace(lat_min, lat_max, 100)
lon_lines = [self.transform_xy(np.full_like(lats_i, lon), lats_i)
for lon in lon_values]
lat_lines = [self.transform_xy(lons_i, np.full_like(lons_i, lat))
for lat in lat_values]
return lon_lines, lat_lines
def _clip_grid_lines_and_find_ticks(self, lines, values, levs, bb):
gi = {
"values": [],
"levels": [],
"tick_levels": dict(left=[], bottom=[], right=[], top=[]),
"tick_locs": dict(left=[], bottom=[], right=[], top=[]),
"lines": [],
}
tck_levels = gi["tick_levels"]
tck_locs = gi["tick_locs"]
for (lx, ly), v, lev in zip(lines, values, levs):
xy, tcks = clip_line_to_rect(lx, ly, bb)
if not xy:
continue
gi["levels"].append(v)
gi["lines"].append(xy)
for tck, direction in zip(tcks,
["left", "bottom", "right", "top"]):
for t in tck:
tck_levels[direction].append(lev)
tck_locs[direction].append(t)
return gi
def update_transform(self, aux_trans):
if not isinstance(aux_trans, Transform) and len(aux_trans) != 2:
raise TypeError("'aux_trans' must be either a Transform instance "
"or a pair of callables")
self._aux_transform = aux_trans
def transform_xy(self, x, y):
aux_trf = self._aux_transform
if isinstance(aux_trf, Transform):
return aux_trf.transform(np.column_stack([x, y])).T
else:
transform_xy, inv_transform_xy = aux_trf
return transform_xy(x, y)
def inv_transform_xy(self, x, y):
aux_trf = self._aux_transform
if isinstance(aux_trf, Transform):
return aux_trf.inverted().transform(np.column_stack([x, y])).T
else:
transform_xy, inv_transform_xy = aux_trf
return inv_transform_xy(x, y)
def update(self, **kw):
for k in kw:
if k in ["extreme_finder",
"grid_locator1",
"grid_locator2",
"tick_formatter1",
"tick_formatter2"]:
setattr(self, k, kw[k])
else:
raise ValueError("Unknown update property '%s'" % k)
class MaxNLocator(mticker.MaxNLocator):
def __init__(self, nbins=10, steps=None,
trim=True,
integer=False,
symmetric=False,
prune=None):
# trim argument has no effect. It has been left for API compatibility
super().__init__(nbins, steps=steps, integer=integer,
symmetric=symmetric, prune=prune)
self.create_dummy_axis()
self._factor = 1
def __call__(self, v1, v2):
self.set_bounds(v1 * self._factor, v2 * self._factor)
locs = super().__call__()
return np.array(locs), len(locs), self._factor
@_api.deprecated("3.3")
def set_factor(self, f):
self._factor = f
class FixedLocator:
def __init__(self, locs):
self._locs = locs
self._factor = 1
def __call__(self, v1, v2):
v1, v2 = sorted([v1 * self._factor, v2 * self._factor])
locs = np.array([l for l in self._locs if v1 <= l <= v2])
return locs, len(locs), self._factor
@_api.deprecated("3.3")
def set_factor(self, f):
self._factor = f
# Tick Formatter
class FormatterPrettyPrint:
def __init__(self, useMathText=True):
self._fmt = mticker.ScalarFormatter(
useMathText=useMathText, useOffset=False)
self._fmt.create_dummy_axis()
def __call__(self, direction, factor, values):
return self._fmt.format_ticks(values)
class DictFormatter:
def __init__(self, format_dict, formatter=None):
"""
format_dict : dictionary for format strings to be used.
formatter : fall-back formatter
"""
super().__init__()
self._format_dict = format_dict
self._fallback_formatter = formatter
def __call__(self, direction, factor, values):
"""
factor is ignored if value is found in the dictionary
"""
if self._fallback_formatter:
fallback_strings = self._fallback_formatter(
direction, factor, values)
else:
fallback_strings = [""] * len(values)
return [self._format_dict.get(k, v)
for k, v in zip(values, fallback_strings)]
| [
"numpy.full_like",
"numpy.ravel",
"matplotlib.transforms.Bbox.from_extents",
"matplotlib._api.deprecated",
"numpy.array",
"numpy.linspace",
"numpy.column_stack",
"matplotlib.ticker.ScalarFormatter"
] | [((8607, 8629), 'matplotlib._api.deprecated', '_api.deprecated', (['"""3.3"""'], {}), "('3.3')\n", (8622, 8629), False, 'from matplotlib import _api, ticker as mticker\n'), ((9001, 9023), 'matplotlib._api.deprecated', '_api.deprecated', (['"""3.3"""'], {}), "('3.3')\n", (9016, 9023), False, 'from matplotlib import _api, ticker as mticker\n'), ((4246, 4303), 'matplotlib.transforms.Bbox.from_extents', 'Bbox.from_extents', (['(x1 - ddx)', '(y1 - ddy)', '(x2 + ddx)', '(y2 + ddy)'], {}), '(x1 - ddx, y1 - ddy, x2 + ddx, y2 + ddy)\n', (4263, 4303), False, 'from matplotlib.transforms import Bbox, Transform\n'), ((5423, 5457), 'numpy.linspace', 'np.linspace', (['lon_min', 'lon_max', '(100)'], {}), '(lon_min, lon_max, 100)\n', (5434, 5457), True, 'import numpy as np\n'), ((5496, 5530), 'numpy.linspace', 'np.linspace', (['lat_min', 'lat_max', '(100)'], {}), '(lat_min, lat_max, 100)\n', (5507, 5530), True, 'import numpy as np\n'), ((8899, 8949), 'numpy.array', 'np.array', (['[l for l in self._locs if v1 <= l <= v2]'], {}), '([l for l in self._locs if v1 <= l <= v2])\n', (8907, 8949), True, 'import numpy as np\n'), ((9188, 9253), 'matplotlib.ticker.ScalarFormatter', 'mticker.ScalarFormatter', ([], {'useMathText': 'useMathText', 'useOffset': '(False)'}), '(useMathText=useMathText, useOffset=False)\n', (9211, 9253), True, 'from matplotlib import _api, ticker as mticker\n'), ((1477, 1505), 'numpy.linspace', 'np.linspace', (['x1', 'x2', 'self.nx'], {}), '(x1, x2, self.nx)\n', (1488, 1505), True, 'import numpy as np\n'), ((1507, 1535), 'numpy.linspace', 'np.linspace', (['y1', 'y2', 'self.ny'], {}), '(y1, y2, self.ny)\n', (1518, 1535), True, 'import numpy as np\n'), ((1567, 1578), 'numpy.ravel', 'np.ravel', (['x'], {}), '(x)\n', (1575, 1578), True, 'import numpy as np\n'), ((1580, 1591), 'numpy.ravel', 'np.ravel', (['y'], {}), '(y)\n', (1588, 1591), True, 'import numpy as np\n'), ((8561, 8575), 'numpy.array', 'np.array', (['locs'], {}), '(locs)\n', (8569, 8575), True, 'import numpy as np\n'), ((5571, 5596), 'numpy.full_like', 'np.full_like', (['lats_i', 'lon'], {}), '(lats_i, lon)\n', (5583, 5596), True, 'import numpy as np\n'), ((5697, 5722), 'numpy.full_like', 'np.full_like', (['lons_i', 'lat'], {}), '(lons_i, lat)\n', (5709, 5722), True, 'import numpy as np\n'), ((7140, 7163), 'numpy.column_stack', 'np.column_stack', (['[x, y]'], {}), '([x, y])\n', (7155, 7163), True, 'import numpy as np\n'), ((7440, 7463), 'numpy.column_stack', 'np.column_stack', (['[x, y]'], {}), '([x, y])\n', (7455, 7463), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2019 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from optparse import OptionParser, OptionGroup
import glob
import json
import os
import pdb
import shutil
import sys
from natsort import natsorted
import numpy as np
import pandas as pd
from scipy.stats import wilcoxon, ttest_rel
import matplotlib.pyplot as plt
import seaborn as sns
import slurm
"""
basenji_test_folds.py
Train Basenji model replicates using given parameters and data.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <exp_dir> <params_file> <data1_dir> ...'
parser = OptionParser(usage)
parser.add_option('-a', '--alt', dest='alternative',
default='two-sided', help='Statistical test alternative [Default: %default]')
parser.add_option('-c', dest='crosses',
default=1, type='int',
help='Number of cross-fold rounds [Default:%default]')
parser.add_option('-d', dest='dataset_i',
default=None, type='int',
help='Dataset index [Default:%default]')
parser.add_option('--d_ref', dest='dataset_ref_i',
default=None, type='int',
help='Reference Dataset index [Default:%default]')
parser.add_option('-e', dest='conda_env',
default='tf2.6',
help='Anaconda environment [Default: %default]')
parser.add_option('-f', dest='fold_subset',
default=None, type='int',
help='Run a subset of folds [Default:%default]')
parser.add_option('--label_exp', dest='label_exp',
default='Experiment', help='Experiment label [Default: %default]')
parser.add_option('--label_ref', dest='label_ref',
default='Reference', help='Reference label [Default: %default]')
parser.add_option('-m', dest='metric',
default=None, help='Train/test metric [Default: Pearsonr or AUPRC]')
parser.add_option('--name', dest='name',
default='test', help='SLURM name prefix [Default: %default]')
parser.add_option('-o', dest='exp_dir',
default=None, help='Output experiment directory [Default: %default]')
parser.add_option('-p', dest='out_stem',
default=None, help='Output plot stem [Default: %default]')
parser.add_option('-q', dest='queue',
default='geforce')
parser.add_option('-r', dest='ref_dir',
default=None, help='Reference directory for statistical tests')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Average forward and reverse complement predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0', type='str',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--spec', dest='specificity',
default=False, action='store_true',
help='Test specificity [Default: %default]')
parser.add_option('--spec_step', dest='spec_step',
default=1, type='int',
help='Positional step for specificity predict [Default: %default]')
parser.add_option('--status', dest='status',
default=False, action='store_true',
help='Update metric status; do not run jobs [Default: %default]')
parser.add_option('--train', dest='train',
default=False, action='store_true',
help='Test on the training set, too [Default: %default]')
(options, args) = parser.parse_args()
if len(args) < 2:
parser.error('Must provide parameters file and data directory')
else:
params_file = args[0]
data_dirs = [os.path.abspath(arg) for arg in args[1:]]
# using -o for required argument for compatibility with the training script
assert(options.exp_dir is not None)
# read data parameters
data_stats_file = '%s/statistics.json' % data_dirs[0]
with open(data_stats_file) as data_stats_open:
data_stats = json.load(data_stats_open)
if options.dataset_i is None:
head_i = 0
else:
head_i = options.dataset_i
# count folds
num_folds = len([dkey for dkey in data_stats if dkey.startswith('fold')])
# subset folds
if options.fold_subset is not None:
num_folds = min(options.fold_subset, num_folds)
if options.queue == 'standard':
num_cpu = 8
num_gpu = 0
time_base = 24
else:
num_cpu = 2
num_gpu = 1
time_base = 6
################################################################
# test check
################################################################
jobs = []
if options.train:
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%dc%d' % (options.exp_dir, fi, ci)
if options.dataset_i is None:
out_dir = '%s/test_train' % it_dir
model_file = '%s/train/model_check.h5' % it_dir
else:
out_dir = '%s/test%d_train' % (it_dir, options.dataset_i)
model_file = '%s/train/model%d_check.h5' % (it_dir, options.dataset_i)
# check if done
acc_file = '%s/acc.txt' % out_dir
if os.path.isfile(acc_file):
# print('%s already generated.' % acc_file)
pass
else:
cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
cmd += ' conda activate %s;' % options.conda_env
cmd += ' basenji_test.py'
cmd += ' --head %d' % head_i
cmd += ' -o %s' % out_dir
if options.rc:
cmd += ' --rc'
if options.shifts:
cmd += ' --shifts %s' % options.shifts
cmd += ' --split train'
cmd += ' %s' % params_file
cmd += ' %s' % model_file
cmd += ' %s/data%d' % (it_dir, head_i)
name = '%s-testtr-f%dc%d' % (options.name, fi, ci)
j = slurm.Job(cmd,
name=name,
out_file='%s.out'%out_dir,
err_file='%s.err'%out_dir,
queue=options.queue,
cpu=num_cpu, gpu=num_gpu,
mem=23000,
time='%d:00:00' % (2*time_base))
jobs.append(j)
################################################################
# test best
################################################################
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%dc%d' % (options.exp_dir, fi, ci)
if options.dataset_i is None:
out_dir = '%s/test' % it_dir
model_file = '%s/train/model_best.h5' % it_dir
else:
out_dir = '%s/test%d' % (it_dir, options.dataset_i)
model_file = '%s/train/model%d_best.h5' % (it_dir, options.dataset_i)
# check if done
acc_file = '%s/acc.txt' % out_dir
if os.path.isfile(acc_file):
# print('%s already generated.' % acc_file)
pass
else:
# basenji test
cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
cmd += ' conda activate %s;' % options.conda_env
cmd += ' basenji_test.py'
cmd += ' --head %d' % head_i
cmd += ' -o %s' % out_dir
if options.rc:
cmd += ' --rc'
if options.shifts:
cmd += ' --shifts %s' % options.shifts
cmd += ' %s' % params_file
cmd += ' %s' % model_file
cmd += ' %s/data%d' % (it_dir, head_i)
name = '%s-test-f%dc%d' % (options.name, fi, ci)
j = slurm.Job(cmd,
name=name,
out_file='%s.out'%out_dir,
err_file='%s.err'%out_dir,
queue=options.queue,
cpu=num_cpu, gpu=num_gpu,
mem=23000,
time='%d:00:00' % time_base)
jobs.append(j)
################################################################
# test best specificity
################################################################
if options.specificity:
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%dc%d' % (options.exp_dir, fi, ci)
if options.dataset_i is None:
out_dir = '%s/test_spec' % it_dir
model_file = '%s/train/model_best.h5' % it_dir
else:
out_dir = '%s/test%d_spec' % (it_dir, options.dataset_i)
model_file = '%s/train/model%d_best.h5' % (it_dir, options.dataset_i)
# check if done
acc_file = '%s/acc.txt' % out_dir
if os.path.isfile(acc_file):
# print('%s already generated.' % acc_file)
pass
else:
# basenji test
cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
cmd += ' conda activate %s;' % options.conda_env
cmd += ' basenji_test_specificity.py'
cmd += ' --head %d' % head_i
cmd += ' -o %s' % out_dir
cmd += ' -s %d' % options.spec_step
if options.rc:
cmd += ' --rc'
if options.shifts:
cmd += ' --shifts %s' % options.shifts
cmd += ' %s' % params_file
cmd += ' %s' % model_file
cmd += ' %s/data%d' % (it_dir, head_i)
name = '%s-spec-f%dc%d' % (options.name, fi, ci)
j = slurm.Job(cmd,
name=name,
out_file='%s.out'%out_dir,
err_file='%s.err'%out_dir,
queue=options.queue,
cpu=num_cpu, gpu=num_gpu,
mem=90000,
time='%d:00:00' % (3*time_base))
jobs.append(j)
if not options.status:
slurm.multi_run(jobs, verbose=True)
if options.dataset_i is None:
test_prefix = 'test'
else:
test_prefix = 'test%d' % options.dataset_i
if options.dataset_ref_i is None:
test_ref_prefix = 'test'
else:
test_ref_prefix = 'test%d' % options.dataset_ref_i
# classification or regression
if options.metric is None:
with open('%s/f0c0/%s/acc.txt' % (options.exp_dir,test_prefix)) as test0_open:
header = test0_open.readline().split()
if 'pearsonr' in header:
options.metric = 'pearsonr'
else:
options.metric = 'auprc'
################################################################
# compare checkpoint on training set
################################################################
if options.train:
exp_glob_str = '%s/*/%s_train/acc.txt' % (options.exp_dir, test_prefix)
exp_cors, exp_mean, exp_stdm = read_metrics(exp_glob_str, options.metric)
if options.ref_dir is not None:
ref_glob_str = '%s/*/%s_train/acc.txt' % (options.ref_dir, test_ref_prefix)
ref_cors, ref_mean, ref_stdm = read_metrics(ref_glob_str, options.metric)
mwp, tp = stat_tests(ref_cors, exp_cors, options.alternative)
print('\nTrain (%d reps):' % len(exp_cors))
print('%12s %s: %.4f (%.4f)' % (options.label_exp, options.metric, exp_mean, exp_stdm))
if options.ref_dir is not None:
print('%12s %s: %.4f (%.4f)' % (options.label_ref, options.metric, ref_mean, ref_stdm))
print('Mann-Whitney U p-value: %.3g' % mwp)
print('T-test p-value: %.3g' % tp)
if options.out_stem is not None:
jointplot(ref_cors, exp_cors,
'%s_train.pdf' % options.out_stem,
options.label_ref, options.label_exp)
################################################################
# compare best on test set
################################################################
exp_glob_str = '%s/*/%s/acc.txt' % (options.exp_dir, test_prefix)
exp_cors, exp_mean, exp_stdm = read_metrics(exp_glob_str, options.metric)
if options.ref_dir is not None:
ref_glob_str = '%s/*/%s/acc.txt' % (options.ref_dir, test_ref_prefix)
ref_cors, ref_mean, ref_stdm = read_metrics(ref_glob_str, options.metric)
mwp, tp = stat_tests(ref_cors, exp_cors, options.alternative)
print('\nTest (%d reps):' % len(exp_cors))
print('%12s %s: %.4f (%.4f)' % (options.label_exp, options.metric, exp_mean, exp_stdm))
if options.ref_dir is not None:
print('%12s %s: %.4f (%.4f)' % (options.label_ref, options.metric, ref_mean, ref_stdm))
print('Mann-Whitney U p-value: %.3g' % mwp)
print('T-test p-value: %.3g' % tp)
if options.out_stem is not None:
jointplot(ref_cors, exp_cors,
'%s_test.pdf' % options.out_stem,
options.label_ref, options.label_exp)
################################################################
# compare best on test set specificity
################################################################
if options.specificity:
exp_glob_str = '%s/*/%s_spec/acc.txt' % (options.exp_dir, test_prefix)
exp_cors, exp_mean, exp_stdm = read_metrics(exp_glob_str, options.metric)
if options.ref_dir is not None:
ref_glob_str = '%s/*/%s_spec/acc.txt' % (options.ref_dir, test_ref_prefix)
ref_cors, ref_mean, ref_stdm = read_metrics(ref_glob_str, options.metric)
mwp, tp = stat_tests(ref_cors, exp_cors, options.alternative)
print('\nSpecificity:')
print('%12s %s: %.4f (%.4f)' % (options.label_exp, options.metric, exp_mean, exp_stdm))
if options.ref_dir is not None:
print('%12s %s: %.4f (%.4f)' % (options.label_ref, options.metric, ref_mean, ref_stdm))
print('Mann-Whitney U p-value: %.3g' % mwp)
print('T-test p-value: %.3g' % tp)
if options.out_stem is not None:
jointplot(ref_cors, exp_cors,
'%s_spec.pdf' % options.out_stem,
options.label_ref, options.label_exp)
def jointplot(ref_cors, exp_cors, out_pdf, label1, label2):
vmin = min(np.min(ref_cors), np.min(exp_cors))
vmax = max(np.max(ref_cors), np.max(exp_cors))
vspan = vmax - vmin
vbuf = vspan * 0.1
vmin -= vbuf
vmax += vbuf
g = sns.jointplot(ref_cors, exp_cors, space=0)
eps = 0.05
g.ax_joint.text(1-eps, eps, 'Mean: %.4f' % np.mean(ref_cors),
horizontalalignment='right', transform=g.ax_joint.transAxes)
g.ax_joint.text(eps, 1-eps, 'Mean: %.4f' % np.mean(exp_cors),
verticalalignment='top', transform=g.ax_joint.transAxes)
g.ax_joint.plot([vmin,vmax], [vmin,vmax], linestyle='--', color='orange')
g.ax_joint.set_xlabel(label1)
g.ax_joint.set_ylabel(label2)
plt.tight_layout(w_pad=0, h_pad=0)
plt.savefig(out_pdf)
def read_metrics(acc_glob_str, metric='pearsonr'):
rep_cors = []
acc_files = natsorted(glob.glob(acc_glob_str))
for acc_file in acc_files:
try:
# tf2 version
acc_df = pd.read_csv(acc_file, sep='\t', index_col=0)
rep_cors.append(acc_df.loc[:,metric].mean())
except:
# tf1 version
cors = []
for line in open(acc_file):
a = line.split()
cors.append(float(a[3]))
rep_cors.append(np.mean(cors))
cors_mean = np.mean(rep_cors)
cors_stdm = np.std(rep_cors) / np.sqrt(len(rep_cors))
return rep_cors, cors_mean, cors_stdm
def stat_tests(ref_cors, exp_cors, alternative):
# hack for the common situtation where I have more reference
# crosses than experiment crosses
if len(ref_cors) == 2*len(exp_cors):
ref_cors = [ref_cors[i] for i in range(len(ref_cors)) if i % 2 == 0]
_, mwp = wilcoxon(exp_cors, ref_cors, alternative=alternative)
tt, tp = ttest_rel(exp_cors, ref_cors)
if alternative == 'less':
if tt <= 0:
tp /= 2
else:
tp = 1 - (1-tp)/2
elif alternative == 'greater':
if tt >= 0:
tp /= 2
else:
tp = 1 - (1-tp)/2
return mwp, tp
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| [
"os.path.abspath",
"json.load",
"optparse.OptionParser",
"scipy.stats.ttest_rel",
"numpy.std",
"pandas.read_csv",
"slurm.multi_run",
"numpy.min",
"numpy.mean",
"numpy.max",
"os.path.isfile",
"seaborn.jointplot",
"glob.glob",
"slurm.Job",
"scipy.stats.wilcoxon",
"matplotlib.pyplot.tight... | [((1333, 1352), 'optparse.OptionParser', 'OptionParser', (['usage'], {}), '(usage)\n', (1345, 1352), False, 'from optparse import OptionParser, OptionGroup\n'), ((14335, 14377), 'seaborn.jointplot', 'sns.jointplot', (['ref_cors', 'exp_cors'], {'space': '(0)'}), '(ref_cors, exp_cors, space=0)\n', (14348, 14377), True, 'import seaborn as sns\n'), ((14792, 14826), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'w_pad': '(0)', 'h_pad': '(0)'}), '(w_pad=0, h_pad=0)\n', (14808, 14826), True, 'import matplotlib.pyplot as plt\n'), ((14829, 14849), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_pdf'], {}), '(out_pdf)\n', (14840, 14849), True, 'import matplotlib.pyplot as plt\n'), ((15332, 15349), 'numpy.mean', 'np.mean', (['rep_cors'], {}), '(rep_cors)\n', (15339, 15349), True, 'import numpy as np\n'), ((15721, 15774), 'scipy.stats.wilcoxon', 'wilcoxon', (['exp_cors', 'ref_cors'], {'alternative': 'alternative'}), '(exp_cors, ref_cors, alternative=alternative)\n', (15729, 15774), False, 'from scipy.stats import wilcoxon, ttest_rel\n'), ((15786, 15815), 'scipy.stats.ttest_rel', 'ttest_rel', (['exp_cors', 'ref_cors'], {}), '(exp_cors, ref_cors)\n', (15795, 15815), False, 'from scipy.stats import wilcoxon, ttest_rel\n'), ((4426, 4452), 'json.load', 'json.load', (['data_stats_open'], {}), '(data_stats_open)\n', (4435, 4452), False, 'import json\n'), ((10156, 10191), 'slurm.multi_run', 'slurm.multi_run', (['jobs'], {'verbose': '(True)'}), '(jobs, verbose=True)\n', (10171, 10191), False, 'import slurm\n'), ((14170, 14186), 'numpy.min', 'np.min', (['ref_cors'], {}), '(ref_cors)\n', (14176, 14186), True, 'import numpy as np\n'), ((14188, 14204), 'numpy.min', 'np.min', (['exp_cors'], {}), '(exp_cors)\n', (14194, 14204), True, 'import numpy as np\n'), ((14219, 14235), 'numpy.max', 'np.max', (['ref_cors'], {}), '(ref_cors)\n', (14225, 14235), True, 'import numpy as np\n'), ((14237, 14253), 'numpy.max', 'np.max', (['exp_cors'], {}), '(exp_cors)\n', (14243, 14253), True, 'import numpy as np\n'), ((14943, 14966), 'glob.glob', 'glob.glob', (['acc_glob_str'], {}), '(acc_glob_str)\n', (14952, 14966), False, 'import glob\n'), ((15364, 15380), 'numpy.std', 'np.std', (['rep_cors'], {}), '(rep_cors)\n', (15370, 15380), True, 'import numpy as np\n'), ((4119, 4139), 'os.path.abspath', 'os.path.abspath', (['arg'], {}), '(arg)\n', (4134, 4139), False, 'import os\n'), ((7295, 7319), 'os.path.isfile', 'os.path.isfile', (['acc_file'], {}), '(acc_file)\n', (7309, 7319), False, 'import os\n'), ((14437, 14454), 'numpy.mean', 'np.mean', (['ref_cors'], {}), '(ref_cors)\n', (14444, 14454), True, 'import numpy as np\n'), ((14566, 14583), 'numpy.mean', 'np.mean', (['exp_cors'], {}), '(exp_cors)\n', (14573, 14583), True, 'import numpy as np\n'), ((15041, 15085), 'pandas.read_csv', 'pd.read_csv', (['acc_file'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(acc_file, sep='\\t', index_col=0)\n", (15052, 15085), True, 'import pandas as pd\n'), ((5588, 5612), 'os.path.isfile', 'os.path.isfile', (['acc_file'], {}), '(acc_file)\n', (5602, 5612), False, 'import os\n'), ((7955, 8134), 'slurm.Job', 'slurm.Job', (['cmd'], {'name': 'name', 'out_file': "('%s.out' % out_dir)", 'err_file': "('%s.err' % out_dir)", 'queue': 'options.queue', 'cpu': 'num_cpu', 'gpu': 'num_gpu', 'mem': '(23000)', 'time': "('%d:00:00' % time_base)"}), "(cmd, name=name, out_file='%s.out' % out_dir, err_file='%s.err' %\n out_dir, queue=options.queue, cpu=num_cpu, gpu=num_gpu, mem=23000, time\n ='%d:00:00' % time_base)\n", (7964, 8134), False, 'import slurm\n'), ((8994, 9018), 'os.path.isfile', 'os.path.isfile', (['acc_file'], {}), '(acc_file)\n', (9008, 9018), False, 'import os\n'), ((6307, 6492), 'slurm.Job', 'slurm.Job', (['cmd'], {'name': 'name', 'out_file': "('%s.out' % out_dir)", 'err_file': "('%s.err' % out_dir)", 'queue': 'options.queue', 'cpu': 'num_cpu', 'gpu': 'num_gpu', 'mem': '(23000)', 'time': "('%d:00:00' % (2 * time_base))"}), "(cmd, name=name, out_file='%s.out' % out_dir, err_file='%s.err' %\n out_dir, queue=options.queue, cpu=num_cpu, gpu=num_gpu, mem=23000, time\n ='%d:00:00' % (2 * time_base))\n", (6316, 6492), False, 'import slurm\n'), ((9748, 9933), 'slurm.Job', 'slurm.Job', (['cmd'], {'name': 'name', 'out_file': "('%s.out' % out_dir)", 'err_file': "('%s.err' % out_dir)", 'queue': 'options.queue', 'cpu': 'num_cpu', 'gpu': 'num_gpu', 'mem': '(90000)', 'time': "('%d:00:00' % (3 * time_base))"}), "(cmd, name=name, out_file='%s.out' % out_dir, err_file='%s.err' %\n out_dir, queue=options.queue, cpu=num_cpu, gpu=num_gpu, mem=90000, time\n ='%d:00:00' % (3 * time_base))\n", (9757, 9933), False, 'import slurm\n'), ((15300, 15313), 'numpy.mean', 'np.mean', (['cors'], {}), '(cors)\n', (15307, 15313), True, 'import numpy as np\n')] |
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
import gzip
import os
import pdb
import subprocess
import sys
import tempfile
import numpy as np
import pandas as pd
import pysam
import basenji.dna_io
"""vcf.py
Methods and classes to support .vcf SNP analysis.
"""
def cap_allele(allele, cap=5):
""" Cap the length of an allele in the figures """
if len(allele) > cap:
allele = allele[:cap] + '*'
return allele
def intersect_seqs_snps(vcf_file, gene_seqs, vision_p=1):
""" Intersect a VCF file with a list of sequence coordinates.
In
vcf_file:
gene_seqs: list of GeneSeq's
vision_p: proportion of sequences visible to center genes.
Out
seqs_snps: list of list mapping segment indexes to overlapping SNP indexes
"""
# print segments to BED
# hash segments to indexes
seq_temp = tempfile.NamedTemporaryFile()
seq_bed_file = seq_temp.name
seq_bed_out = open(seq_bed_file, 'w')
seq_indexes = {}
for si in range(len(gene_seqs)):
gs = gene_seqs[si]
gene_seq_key = (gs.chrom, gs.start)
seq_indexes[gene_seq_key] = si
print('%s\t%d\t%d' % (gs.chrom, gs.start, gs.end), file=seq_bed_out)
seq_bed_out.close()
# hash SNPs to indexes
snp_indexes = {}
si = 0
vcf_in = open(vcf_file)
line = vcf_in.readline()
while line[0] == '#':
line = vcf_in.readline()
while line:
a = line.split()
snp_id = a[2]
if snp_id in snp_indexes:
raise Exception('Duplicate SNP id %s will break the script' % snp_id)
snp_indexes[snp_id] = si
si += 1
line = vcf_in.readline()
vcf_in.close()
# initialize list of lists
seqs_snps = []
for _ in range(len(gene_seqs)):
seqs_snps.append([])
# intersect
p = subprocess.Popen(
'bedtools intersect -wo -a %s -b %s' % (vcf_file, seq_bed_file),
shell=True,
stdout=subprocess.PIPE)
for line in p.stdout:
line = line.decode('UTF-8')
a = line.split()
pos = int(a[1])
snp_id = a[2]
seq_chrom = a[-4]
seq_start = int(a[-3])
seq_end = int(a[-2])
seq_key = (seq_chrom, seq_start)
vision_buffer = (seq_end - seq_start) * (1 - vision_p) // 2
if seq_start + vision_buffer < pos < seq_end - vision_buffer:
seqs_snps[seq_indexes[seq_key]].append(snp_indexes[snp_id])
p.communicate()
return seqs_snps
def intersect_snps_seqs(vcf_file, seq_coords, vision_p=1):
""" Intersect a VCF file with a list of sequence coordinates.
In
vcf_file:
seq_coords: list of sequence coordinates
vision_p: proportion of sequences visible to center genes.
Out
snp_segs: list of list mapping SNP indexes to overlapping sequence indexes
"""
# print segments to BED
# hash segments to indexes
seg_temp = tempfile.NamedTemporaryFile()
seg_bed_file = seg_temp.name
seg_bed_out = open(seg_bed_file, 'w')
segment_indexes = {}
for si in range(len(seq_coords)):
segment_indexes[seq_coords[si]] = si
print('%s\t%d\t%d' % seq_coords[si], file=seg_bed_out)
seg_bed_out.close()
# hash SNPs to indexes
snp_indexes = {}
si = 0
vcf_in = open(vcf_file)
line = vcf_in.readline()
while line[0] == '#':
line = vcf_in.readline()
while line:
a = line.split()
snp_id = a[2]
if snp_id in snp_indexes:
raise Exception('Duplicate SNP id %s will break the script' % snp_id)
snp_indexes[snp_id] = si
si += 1
line = vcf_in.readline()
vcf_in.close()
# initialize list of lists
snp_segs = []
for i in range(len(snp_indexes)):
snp_segs.append([])
# intersect
p = subprocess.Popen(
'bedtools intersect -wo -a %s -b %s' % (vcf_file, seg_bed_file),
shell=True,
stdout=subprocess.PIPE)
for line in p.stdout:
line = line.decode('UTF-8')
a = line.split()
pos = int(a[1])
snp_id = a[2]
seg_chrom = a[-4]
seg_start = int(a[-3])
seg_end = int(a[-2])
seg_key = (seg_chrom, seg_start, seg_end)
vision_buffer = (seg_end - seg_start) * (1 - vision_p) // 2
if seg_start + vision_buffer < pos < seg_end - vision_buffer:
snp_segs[snp_indexes[snp_id]].append(segment_indexes[seg_key])
p.communicate()
return snp_segs
def snp_seq1(snp, seq_len, genome_open):
""" Produce a one hot coded sequences for a SNP.
Attrs:
snp [SNP] :
seq_len (int) : sequence length to code
genome_open (File) : open genome FASTA file
Return:
seq_vecs_list [array] : list of one hot coded sequences surrounding the
SNP
"""
left_len = seq_len // 2 - 1
right_len = seq_len // 2
# initialize one hot coded vector list
seq_vecs_list = []
# specify positions in GFF-style 1-based
seq_start = snp.pos - left_len
seq_end = snp.pos + right_len + max(0,
len(snp.ref_allele) - snp.longest_alt())
# extract sequence as BED style
if seq_start < 0:
seq = 'N'*(1-seq_start) + genome_open.fetch(snp.chr, 0, seq_end).upper()
else:
seq = genome_open.fetch(snp.chr, seq_start - 1, seq_end).upper()
# extend to full length
if len(seq) < seq_end - seq_start:
seq += 'N' * (seq_end - seq_start - len(seq))
# verify that ref allele matches ref sequence
seq_ref = seq[left_len:left_len + len(snp.ref_allele)]
ref_found = True
if seq_ref != snp.ref_allele:
# search for reference allele in alternatives
ref_found = False
# for each alternative allele
for alt_al in snp.alt_alleles:
# grab reference sequence matching alt length
seq_ref_alt = seq[left_len:left_len + len(alt_al)]
if seq_ref_alt == alt_al:
# found it!
ref_found = True
# warn user
print(
'WARNING: %s - alt (as opposed to ref) allele matches reference genome; changing reference genome to match.'
% (snp.rsid),
file=sys.stderr)
# remove alt allele and include ref allele
seq = seq[:left_len] + snp.ref_allele + seq[left_len + len(alt_al):]
break
if not ref_found:
print('WARNING: %s - reference genome does not match any allele' % snp.rsid, file=sys.stderr)
else:
# one hot code ref allele
seq_vecs_ref, seq_ref = dna_length_1hot(seq, seq_len)
seq_vecs_list.append(seq_vecs_ref)
for alt_al in snp.alt_alleles:
# remove ref allele and include alt allele
seq_alt = seq[:left_len] + alt_al + seq[left_len + len(snp.ref_allele):]
# one hot code
seq_vecs_alt, seq_alt = dna_length_1hot(seq_alt, seq_len)
seq_vecs_list.append(seq_vecs_alt)
return seq_vecs_list
def snps_seq1(snps, seq_len, genome_fasta, return_seqs=False):
""" Produce an array of one hot coded sequences for a list of SNPs.
Attrs:
snps [SNP] : list of SNPs
seq_len (int) : sequence length to code
genome_fasta (str) : genome FASTA file
Return:
seq_vecs (array) : one hot coded sequences surrounding the SNPs
seq_headers [str] : headers for sequences
seq_snps [SNP] : list of used SNPs
"""
left_len = seq_len // 2 - 1
right_len = seq_len // 2
# initialize one hot coded vector list
seq_vecs_list = []
# save successful SNPs
seq_snps = []
# save sequence strings, too
seqs = []
# name sequences
seq_headers = []
# open genome FASTA
genome_open = pysam.Fastafile(genome_fasta)
for snp in snps:
# specify positions in GFF-style 1-based
seq_start = snp.pos - left_len
seq_end = snp.pos + right_len + max(0,
len(snp.ref_allele) - snp.longest_alt())
# extract sequence as BED style
if seq_start < 0:
seq = 'N' * (-seq_start) + genome_open.fetch(snp.chr, 0,
seq_end).upper()
else:
seq = genome_open.fetch(snp.chr, seq_start - 1, seq_end).upper()
# extend to full length
if len(seq) < seq_end - seq_start:
seq += 'N' * (seq_end - seq_start - len(seq))
# verify that ref allele matches ref sequence
seq_ref = seq[left_len:left_len + len(snp.ref_allele)]
if seq_ref != snp.ref_allele:
# search for reference allele in alternatives
ref_found = False
# for each alternative allele
for alt_al in snp.alt_alleles:
# grab reference sequence matching alt length
seq_ref_alt = seq[left_len:left_len + len(alt_al)]
if seq_ref_alt == alt_al:
# found it!
ref_found = True
# warn user
print(
'WARNING: %s - alt (as opposed to ref) allele matches reference genome; changing reference genome to match.'
% (snp.rsid),
file=sys.stderr)
# remove alt allele and include ref allele
seq = seq[:left_len] + snp.ref_allele + seq[left_len + len(alt_al):]
break
if not ref_found:
print(
'WARNING: %s - reference genome does not match any allele; skipping'
% (snp.rsid),
file=sys.stderr)
continue
seq_snps.append(snp)
# one hot code ref allele
seq_vecs_ref, seq_ref = dna_length_1hot(seq, seq_len)
seq_vecs_list.append(seq_vecs_ref)
if return_seqs:
seqs.append(seq_ref)
# name ref allele
seq_headers.append('%s_%s' % (snp.rsid, cap_allele(snp.ref_allele)))
for alt_al in snp.alt_alleles:
# remove ref allele and include alt allele
seq_alt = seq[:left_len] + alt_al + seq[left_len + len(snp.ref_allele):]
# one hot code
seq_vecs_alt, seq_alt = dna_length_1hot(seq_alt, seq_len)
seq_vecs_list.append(seq_vecs_alt)
if return_seqs:
seqs.append(seq_alt) # not using right now
# name
seq_headers.append('%s_%s' % (snp.rsid, cap_allele(alt_al)))
# convert to array
seq_vecs = np.array(seq_vecs_list)
if return_seqs:
return seq_vecs, seq_headers, seq_snps, seqs
else:
return seq_vecs, seq_headers, seq_snps
def snps2_seq1(snps, seq_len, genome1_fasta, genome2_fasta, return_seqs=False):
""" Produce an array of one hot coded sequences for a list of SNPs.
Attrs:
snps [SNP] : list of SNPs
seq_len (int) : sequence length to code
genome_fasta (str) : major allele genome FASTA file
genome2_fasta (str) : minor allele genome FASTA file
Return:
seq_vecs (array) : one hot coded sequences surrounding the SNPs
seq_headers [str] : headers for sequences
seq_snps [SNP] : list of used SNPs
"""
left_len = seq_len // 2 - 1
right_len = seq_len // 2
# open genome FASTA
genome1 = pysam.Fastafile(genome1_fasta)
genome2 = pysam.Fastafile(genome2_fasta)
# initialize one hot coded vector list
seq_vecs_list = []
# save successful SNPs
seq_snps = []
# save sequence strings, too
seqs = []
# name sequences
seq_headers = []
for snp in snps:
if len(snp.alt_alleles) > 1:
raise Exception(
'Major/minor genome mode requires only two alleles: %s' % snp.rsid)
alt_al = snp.alt_alleles[0]
# specify positions in GFF-style 1-based
seq_start = snp.pos - left_len
seq_end = snp.pos + right_len + len(snp.ref_allele)
# extract sequence as BED style
if seq_start < 0:
seq_ref = 'N' * (-seq_start) + genome1.fetch(snp.chr, 0,
seq_end).upper()
else:
seq_ref = genome1.fetch(snp.chr, seq_start - 1, seq_end).upper()
# extend to full length
if len(seq_ref) < seq_end - seq_start:
seq_ref += 'N' * (seq_end - seq_start - len(seq_ref))
# verify that ref allele matches ref sequence
seq_ref_snp = seq_ref[left_len:left_len + len(snp.ref_allele)]
if seq_ref_snp != snp.ref_allele:
raise Exception(
'WARNING: Major allele SNP %s doesnt match reference genome: %s vs %s'
% (snp.rsid, snp.ref_allele, seq_ref_snp))
# specify positions in GFF-style 1-based
seq_start = snp.pos2 - left_len
seq_end = snp.pos2 + right_len + len(alt_al)
# extract sequence as BED style
if seq_start < 0:
seq_alt = 'N' * (-seq_start) + genome2.fetch(snp.chr, 0,
seq_end).upper()
else:
seq_alt = genome2.fetch(snp.chr, seq_start - 1, seq_end).upper()
# extend to full length
if len(seq_alt) < seq_end - seq_start:
seq_alt += 'N' * (seq_end - seq_start - len(seq_alt))
# verify that ref allele matches ref sequence
seq_alt_snp = seq_alt[left_len:left_len + len(alt_al)]
if seq_alt_snp != alt_al:
raise Exception(
'WARNING: Minor allele SNP %s doesnt match reference genome: %s vs %s'
% (snp.rsid, snp.alt_alleles[0], seq_alt_snp))
seq_snps.append(snp)
# one hot code ref allele
seq_vecs_ref, seq_ref = dna_length_1hot(seq_ref, seq_len)
seq_vecs_list.append(seq_vecs_ref)
if return_seqs:
seqs.append(seq_ref)
# name ref allele
seq_headers.append('%s_%s' % (snp.rsid, cap_allele(snp.ref_allele)))
# one hot code alt allele
seq_vecs_alt, seq_alt = dna_length_1hot(seq_alt, seq_len)
seq_vecs_list.append(seq_vecs_alt)
if return_seqs:
seqs.append(seq_alt)
# name
seq_headers.append('%s_%s' % (snp.rsid, cap_allele(alt_al)))
# convert to array
seq_vecs = np.array(seq_vecs_list)
if return_seqs:
return seq_vecs, seq_headers, seq_snps, seqs
else:
return seq_vecs, seq_headers, seq_snps
def dna_length_1hot(seq, length):
""" Adjust the sequence length and compute
a 1hot coding. """
if length < len(seq):
# trim the sequence
seq_trim = (len(seq) - length) // 2
seq = seq[seq_trim:seq_trim + length]
elif length > len(seq):
# extend with N's
nfront = (length - len(seq)) // 2
nback = length - len(seq) - nfront
seq = 'N' * nfront + seq + 'N' * nback
seq_1hot = basenji.dna_io.dna_1hot(seq)
return seq_1hot, seq
def vcf_snps(vcf_file, require_sorted=False, validate_ref_fasta=None, flip_ref=False, pos2=False):
""" Load SNPs from a VCF file """
if vcf_file[-3:] == '.gz':
vcf_in = gzip.open(vcf_file, 'rt')
else:
vcf_in = open(vcf_file)
# read through header
line = vcf_in.readline()
while line[0] == '#':
line = vcf_in.readline()
# to check sorted
if require_sorted:
seen_chrs = set()
prev_chr = None
prev_pos = -1
# to check reference
if validate_ref_fasta is not None:
genome_open = pysam.Fastafile(validate_ref_fasta)
# read in SNPs
snps = []
while line:
snps.append(SNP(line, pos2))
if require_sorted:
if prev_chr is not None:
# same chromosome
if prev_chr == snps[-1].chr:
if snps[-1].pos < prev_pos:
print('Sorted VCF required. Mis-ordered position: %s' % line.rstrip(),
file=sys.stderr)
exit(1)
elif snps[-1].chr in seen_chrs:
print('Sorted VCF required. Mis-ordered chromosome: %s' % line.rstrip(),
file=sys.stderr)
exit(1)
seen_chrs.add(snps[-1].chr)
prev_chr = snps[-1].chr
prev_pos = snps[-1].pos
if validate_ref_fasta is not None:
ref_n = len(snps[-1].ref_allele)
snp_pos = snps[-1].pos-1
ref_snp = genome_open.fetch(snps[-1].chr, snp_pos, snp_pos+ref_n)
if snps[-1].ref_allele != ref_snp:
if not flip_ref:
# bail
print('ERROR: %s does not match reference %s' % (snps[-1], ref_snp), file=sys.stderr)
exit(1)
else:
alt_n = len(snps[-1].alt_alleles[0])
ref_snp = genome_open.fetch(snps[-1].chr, snp_pos, snp_pos+alt_n)
# if alt matches fasta reference
if snps[-1].alt_alleles[0] == ref_snp:
# flip alleles
snps[-1].flip_alleles()
else:
# bail
print('ERROR: %s does not match reference %s' % (snps[-1], ref_snp), file=sys.stderr)
exit(1)
line = vcf_in.readline()
vcf_in.close()
return snps
def vcf_sort(vcf_file):
# move
os.rename(vcf_file, '%s.tmp' % vcf_file)
# print header
vcf_out = open(vcf_file, 'w')
print('##fileformat=VCFv4.0', file=vcf_out)
vcf_out.close()
# sort
subprocess.call(
'bedtools sort -i %s.tmp >> %s' % (vcf_file, vcf_file), shell=True)
# clean
os.remove('%s.tmp' % vcf_file)
class SNP:
""" SNP
Represent SNPs read in from a VCF file
Attributes:
vcf_line (str)
"""
def __init__(self, vcf_line, pos2=False):
a = vcf_line.split()
if a[0].startswith('chr'):
self.chr = a[0]
else:
self.chr = 'chr%s' % a[0]
self.pos = int(a[1])
self.rsid = a[2]
self.ref_allele = a[3]
self.alt_alleles = a[4].split(',')
# self.alt_allele = self.alt_alleles[0]
self.flipped = False
if self.rsid == '.':
self.rsid = '%s:%d' % (self.chr, self.pos)
self.pos2 = None
if pos2:
self.pos2 = int(a[5])
def flip_alleles(self):
""" Flip reference and first alt allele."""
assert(len(self.alt_alleles) == 1)
self.ref_allele, self.alt_alleles[0] = self.alt_alleles[0], self.ref_allele
self.flipped = True
def get_alleles(self):
""" Return a list of all alleles """
alleles = [self.ref_allele] + self.alt_alleles
return alleles
def longest_alt(self):
""" Return the longest alt allele. """
return max([len(al) for al in self.alt_alleles])
def __str__(self):
return 'SNP(%s, %s:%d, %s/%s)' % (self.rsid, self.chr, self.pos,
self.ref_allele,
','.join(self.alt_alleles))
| [
"tempfile.NamedTemporaryFile",
"subprocess.Popen",
"os.remove",
"gzip.open",
"os.rename",
"numpy.array",
"subprocess.call",
"pysam.Fastafile"
] | [((1475, 1504), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (1502, 1504), False, 'import tempfile\n'), ((2357, 2478), 'subprocess.Popen', 'subprocess.Popen', (["('bedtools intersect -wo -a %s -b %s' % (vcf_file, seq_bed_file))"], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), "('bedtools intersect -wo -a %s -b %s' % (vcf_file,\n seq_bed_file), shell=True, stdout=subprocess.PIPE)\n", (2373, 2478), False, 'import subprocess\n'), ((3379, 3408), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (3406, 3408), False, 'import tempfile\n'), ((4197, 4318), 'subprocess.Popen', 'subprocess.Popen', (["('bedtools intersect -wo -a %s -b %s' % (vcf_file, seg_bed_file))"], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), "('bedtools intersect -wo -a %s -b %s' % (vcf_file,\n seg_bed_file), shell=True, stdout=subprocess.PIPE)\n", (4213, 4318), False, 'import subprocess\n'), ((7944, 7973), 'pysam.Fastafile', 'pysam.Fastafile', (['genome_fasta'], {}), '(genome_fasta)\n', (7959, 7973), False, 'import pysam\n'), ((10423, 10446), 'numpy.array', 'np.array', (['seq_vecs_list'], {}), '(seq_vecs_list)\n', (10431, 10446), True, 'import numpy as np\n'), ((11211, 11241), 'pysam.Fastafile', 'pysam.Fastafile', (['genome1_fasta'], {}), '(genome1_fasta)\n', (11226, 11241), False, 'import pysam\n'), ((11254, 11284), 'pysam.Fastafile', 'pysam.Fastafile', (['genome2_fasta'], {}), '(genome2_fasta)\n', (11269, 11284), False, 'import pysam\n'), ((13940, 13963), 'numpy.array', 'np.array', (['seq_vecs_list'], {}), '(seq_vecs_list)\n', (13948, 13963), True, 'import numpy as np\n'), ((16689, 16729), 'os.rename', 'os.rename', (['vcf_file', "('%s.tmp' % vcf_file)"], {}), "(vcf_file, '%s.tmp' % vcf_file)\n", (16698, 16729), False, 'import os\n'), ((16856, 16943), 'subprocess.call', 'subprocess.call', (["('bedtools sort -i %s.tmp >> %s' % (vcf_file, vcf_file))"], {'shell': '(True)'}), "('bedtools sort -i %s.tmp >> %s' % (vcf_file, vcf_file),\n shell=True)\n", (16871, 16943), False, 'import subprocess\n'), ((16960, 16990), 'os.remove', 'os.remove', (["('%s.tmp' % vcf_file)"], {}), "('%s.tmp' % vcf_file)\n", (16969, 16990), False, 'import os\n'), ((14737, 14762), 'gzip.open', 'gzip.open', (['vcf_file', '"""rt"""'], {}), "(vcf_file, 'rt')\n", (14746, 14762), False, 'import gzip\n'), ((15085, 15120), 'pysam.Fastafile', 'pysam.Fastafile', (['validate_ref_fasta'], {}), '(validate_ref_fasta)\n', (15100, 15120), False, 'import pysam\n')] |
from __future__ import absolute_import
import sys
sys.path.append('./')
import argparse
import os
import os.path as osp
import numpy as np
import math
import time
from PIL import Image, ImageFile
import torch
from torch import nn, optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import transforms
from config import get_args
from lib import datasets, evaluation_metrics, models
from lib.models.model_builder import ModelBuilder
from lib.datasets.dataset import LmdbDataset, AlignCollate
from lib.loss import SequenceCrossEntropyLoss
from lib.trainers import Trainer
from lib.evaluators import Evaluator
from lib.utils.logging import Logger, TFLogger
from lib.utils.serialization import load_checkpoint, save_checkpoint
from lib.utils.osutils import make_symlink_if_not_exists
from lib.evaluation_metrics.metrics import get_str_list
from lib.utils.labelmaps import get_vocabulary, labels2strs
global_args = get_args(sys.argv[1:])
def image_process(image_path, imgH=32, imgW=100, keep_ratio=False, min_ratio=1):
img = Image.open(image_path).convert('RGB')
if keep_ratio:
w, h = img.size
ratio = w / float(h)
imgW = int(np.floor(ratio * imgH))
imgW = max(imgH * min_ratio, imgW)
img = img.resize((imgW, imgH), Image.BILINEAR)
img = transforms.ToTensor()(img)
img.sub_(0.5).div_(0.5)
return img
class DataInfo(object):
"""
Save the info about the dataset.
This a code snippet from dataset.py
"""
def __init__(self, voc_type):
super(DataInfo, self).__init__()
self.voc_type = voc_type
assert voc_type in ['LOWERCASE', 'ALLCASES', 'ALLCASES_SYMBOLS']
self.EOS = 'EOS'
self.PADDING = 'PADDING'
self.UNKNOWN = 'UNKNOWN'
self.voc = get_vocabulary(voc_type, EOS=self.EOS, PADDING=self.PADDING, UNKNOWN=self.UNKNOWN)
self.char2id = dict(zip(self.voc, range(len(self.voc))))
self.id2char = dict(zip(range(len(self.voc)), self.voc))
self.rec_num_classes = len(self.voc)
def main(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
args.cuda = args.cuda and torch.cuda.is_available()
if args.cuda:
print('using cuda.')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
# Create data loaders
if args.height is None or args.width is None:
args.height, args.width = (32, 100)
dataset_info = DataInfo(args.voc_type)
# Create model
model = ModelBuilder(arch=args.arch, rec_num_classes=dataset_info.rec_num_classes,
sDim=args.decoder_sdim, attDim=args.attDim, max_len_labels=args.max_len,
eos=dataset_info.char2id[dataset_info.EOS], STN_ON=args.STN_ON)
# Load from checkpoint
if args.resume:
checkpoint = load_checkpoint(args.resume)
model.load_state_dict(checkpoint['state_dict'])
if args.cuda:
device = torch.device("cuda")
model = model.to(device)
model = nn.DataParallel(model)
# Evaluation
model.eval()
img = image_process(args.image_path)
with torch.no_grad():
img = img.to(device)
input_dict = {}
input_dict['images'] = img.unsqueeze(0)
# TODO: testing should be more clean.
# to be compatible with the lmdb-based testing, need to construct some meaningless variables.
rec_targets = torch.IntTensor(1, args.max_len).fill_(1)
rec_targets[:, args.max_len - 1] = dataset_info.char2id[dataset_info.EOS]
input_dict['rec_targets'] = rec_targets
input_dict['rec_lengths'] = [args.max_len]
output_dict = model(input_dict)
pred_rec = output_dict['output']['pred_rec']
pred_str, _ = get_str_list(pred_rec, input_dict['rec_targets'], dataset=dataset_info)
print('Recognition result: {0}'.format(pred_str[0]))
if __name__ == '__main__':
# parse the config
args = get_args(sys.argv[1:])
main(args)
| [
"numpy.random.seed",
"numpy.floor",
"torch.set_default_tensor_type",
"torch.device",
"torch.no_grad",
"sys.path.append",
"config.get_args",
"lib.evaluation_metrics.metrics.get_str_list",
"lib.models.model_builder.ModelBuilder",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.cuda.is_avai... | [((51, 72), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (66, 72), False, 'import sys\n'), ((959, 981), 'config.get_args', 'get_args', (['sys.argv[1:]'], {}), '(sys.argv[1:])\n', (967, 981), False, 'from config import get_args\n'), ((2098, 2123), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2112, 2123), True, 'import numpy as np\n'), ((2128, 2156), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2145, 2156), False, 'import torch\n'), ((2161, 2194), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2183, 2194), False, 'import torch\n'), ((2199, 2236), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (2225, 2236), False, 'import torch\n'), ((2744, 2963), 'lib.models.model_builder.ModelBuilder', 'ModelBuilder', ([], {'arch': 'args.arch', 'rec_num_classes': 'dataset_info.rec_num_classes', 'sDim': 'args.decoder_sdim', 'attDim': 'args.attDim', 'max_len_labels': 'args.max_len', 'eos': 'dataset_info.char2id[dataset_info.EOS]', 'STN_ON': 'args.STN_ON'}), '(arch=args.arch, rec_num_classes=dataset_info.rec_num_classes,\n sDim=args.decoder_sdim, attDim=args.attDim, max_len_labels=args.max_len,\n eos=dataset_info.char2id[dataset_info.EOS], STN_ON=args.STN_ON)\n', (2756, 2963), False, 'from lib.models.model_builder import ModelBuilder\n'), ((3956, 4027), 'lib.evaluation_metrics.metrics.get_str_list', 'get_str_list', (['pred_rec', "input_dict['rec_targets']"], {'dataset': 'dataset_info'}), "(pred_rec, input_dict['rec_targets'], dataset=dataset_info)\n", (3968, 4027), False, 'from lib.evaluation_metrics.metrics import get_str_list\n'), ((4148, 4170), 'config.get_args', 'get_args', (['sys.argv[1:]'], {}), '(sys.argv[1:])\n', (4156, 4170), False, 'from config import get_args\n'), ((1334, 1355), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1353, 1355), False, 'from torchvision import transforms\n'), ((1817, 1904), 'lib.utils.labelmaps.get_vocabulary', 'get_vocabulary', (['voc_type'], {'EOS': 'self.EOS', 'PADDING': 'self.PADDING', 'UNKNOWN': 'self.UNKNOWN'}), '(voc_type, EOS=self.EOS, PADDING=self.PADDING, UNKNOWN=self.\n UNKNOWN)\n', (1831, 1904), False, 'from lib.utils.labelmaps import get_vocabulary, labels2strs\n'), ((2341, 2366), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2364, 2366), False, 'import torch\n'), ((2422, 2477), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.cuda.FloatTensor"""'], {}), "('torch.cuda.FloatTensor')\n", (2451, 2477), False, 'import torch\n'), ((2496, 2546), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.FloatTensor"""'], {}), "('torch.FloatTensor')\n", (2525, 2546), False, 'import torch\n'), ((3075, 3103), 'lib.utils.serialization.load_checkpoint', 'load_checkpoint', (['args.resume'], {}), '(args.resume)\n', (3090, 3103), False, 'from lib.utils.serialization import load_checkpoint, save_checkpoint\n'), ((3196, 3216), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3208, 3216), False, 'import torch\n'), ((3266, 3288), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (3281, 3288), False, 'from torch import nn, optim\n'), ((3374, 3389), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3387, 3389), False, 'import torch\n'), ((1075, 1097), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1085, 1097), False, 'from PIL import Image, ImageFile\n'), ((1205, 1227), 'numpy.floor', 'np.floor', (['(ratio * imgH)'], {}), '(ratio * imgH)\n', (1213, 1227), True, 'import numpy as np\n'), ((3642, 3674), 'torch.IntTensor', 'torch.IntTensor', (['(1)', 'args.max_len'], {}), '(1, args.max_len)\n', (3657, 3674), False, 'import torch\n')] |
from io import StringIO
import unittest
import pandas as pd
import numpy as np
from connector.stateful import StatefulConnector
from data_manager.base_manager import DataParam
from data_manager.time_series_manager import TimeSeriesDataManager
from proto.aiengine.v1 import aiengine_pb2
class StatefulConnectorTests(unittest.TestCase):
def __init__(self, method_name='runTest'):
super().__init__(method_name)
self.original_csv = "time,baz\n5,0.0\n9,2.0\n20,2.0\n30,3.0\n40,4.0\n50,5.0"
self.original_data = pd.read_csv(StringIO(self.original_csv))
self.original_data["time"] = pd.to_datetime(self.original_data["time"], unit="s")
self.original_data = self.original_data.set_index("time")
self.granularity = pd.to_timedelta(10, unit="s")
self.epoch_time = pd.to_datetime(10, unit="s")
self.period = pd.to_timedelta(50, unit="s")
self.interval = pd.to_timedelta(20, unit="s")
def setUp(self):
self.data_manager = TimeSeriesDataManager(
param=DataParam(
epoch_time=self.epoch_time,
period_secs=self.period,
interval_secs=self.interval,
granularity_secs=self.granularity),
fields={
"foo": aiengine_pb2.FieldData(initializer=10.0),
"bar": aiengine_pb2.FieldData(initializer=5.0),
"baz": aiengine_pb2.FieldData(initializer=1.0),
},
action_rewards={
"foo_action": "reward = 1",
"bar_action": "reward = 1",
},
actions_order={
"foo_action": 0,
"bar_action": 1,
},
laws=["bar >= 0"],
external_reward_funcs="",
)
self.data_manager.merge_data(self.original_data)
self.data_manager.reset()
self.data_manager.start_training()
def tearDown(self):
self.data_manager.end_training()
def test_apply_action(self):
action_effects = {
"foo_action": "foo += 5\nbar -= 1",
"bar_action": "foo += baz",
}
stateful_connector = StatefulConnector(self.data_manager, action_effects)
current_window = self.data_manager.get_current_window()
is_valid = stateful_connector.apply_action(0, current_window)
self.assertTrue(is_valid)
index_to_check = pd.to_datetime(30, unit="s")
expected_bar = 4.0
expected_foo = 15.0
actual_bar = self.data_manager.massive_table_training_filled.loc[index_to_check]["bar"]
actual_foo = self.data_manager.massive_table_training_filled.loc[index_to_check]["foo"]
self.assertEqual(expected_bar, actual_bar)
self.assertEqual(expected_foo, actual_foo)
self.assertTrue(
np.isnan(
self.data_manager.massive_table_sparse.loc[index_to_check + self.granularity][
"bar"
]
)
)
def test_laws(self):
action_effects = {
"foo_action": "foo += 5\nbar -= 10",
"bar_action": "foo += baz",
}
stateful_connector = StatefulConnector(self.data_manager, action_effects)
current_window = self.data_manager.get_current_window()
# This should not be valid and not apply the update
is_valid = stateful_connector.apply_action(0, current_window)
self.assertFalse(is_valid)
index_to_check = pd.to_datetime(30, unit="s")
actual_bar = self.data_manager.massive_table_sparse.loc[index_to_check]["bar"]
actual_foo = self.data_manager.massive_table_sparse.loc[index_to_check]["foo"]
self.assertTrue(np.isnan(actual_bar))
self.assertTrue(np.isnan(actual_foo))
def test_is_calling_merge_row(self):
original_fill_table = self.data_manager._fill_table # pylint: disable=protected-access
def new_fill_table():
raise Exception("Should not call this on apply_action")
try:
self.data_manager._fill_table = new_fill_table # pylint: disable=protected-access
action_effects = {
"foo_action": "foo += 5\nbar -= 1",
"bar_action": "foo += baz",
}
stateful_connector = StatefulConnector(self.data_manager, action_effects)
current_window = self.data_manager.get_current_window()
is_valid = stateful_connector.apply_action(0, current_window)
self.assertTrue(is_valid)
finally:
self.data_manager._fill_table = original_fill_table # pylint: disable=protected-access
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"proto.aiengine.v1.aiengine_pb2.FieldData",
"io.StringIO",
"data_manager.base_manager.DataParam",
"numpy.isnan",
"pandas.to_timedelta",
"pandas.to_datetime",
"connector.stateful.StatefulConnector"
] | [((4705, 4720), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4718, 4720), False, 'import unittest\n'), ((617, 669), 'pandas.to_datetime', 'pd.to_datetime', (["self.original_data['time']"], {'unit': '"""s"""'}), "(self.original_data['time'], unit='s')\n", (631, 669), True, 'import pandas as pd\n'), ((764, 793), 'pandas.to_timedelta', 'pd.to_timedelta', (['(10)'], {'unit': '"""s"""'}), "(10, unit='s')\n", (779, 793), True, 'import pandas as pd\n'), ((820, 848), 'pandas.to_datetime', 'pd.to_datetime', (['(10)'], {'unit': '"""s"""'}), "(10, unit='s')\n", (834, 848), True, 'import pandas as pd\n'), ((871, 900), 'pandas.to_timedelta', 'pd.to_timedelta', (['(50)'], {'unit': '"""s"""'}), "(50, unit='s')\n", (886, 900), True, 'import pandas as pd\n'), ((925, 954), 'pandas.to_timedelta', 'pd.to_timedelta', (['(20)'], {'unit': '"""s"""'}), "(20, unit='s')\n", (940, 954), True, 'import pandas as pd\n'), ((2177, 2229), 'connector.stateful.StatefulConnector', 'StatefulConnector', (['self.data_manager', 'action_effects'], {}), '(self.data_manager, action_effects)\n', (2194, 2229), False, 'from connector.stateful import StatefulConnector\n'), ((2425, 2453), 'pandas.to_datetime', 'pd.to_datetime', (['(30)'], {'unit': '"""s"""'}), "(30, unit='s')\n", (2439, 2453), True, 'import pandas as pd\n'), ((3195, 3247), 'connector.stateful.StatefulConnector', 'StatefulConnector', (['self.data_manager', 'action_effects'], {}), '(self.data_manager, action_effects)\n', (3212, 3247), False, 'from connector.stateful import StatefulConnector\n'), ((3504, 3532), 'pandas.to_datetime', 'pd.to_datetime', (['(30)'], {'unit': '"""s"""'}), "(30, unit='s')\n", (3518, 3532), True, 'import pandas as pd\n'), ((551, 578), 'io.StringIO', 'StringIO', (['self.original_csv'], {}), '(self.original_csv)\n', (559, 578), False, 'from io import StringIO\n'), ((2841, 2940), 'numpy.isnan', 'np.isnan', (["self.data_manager.massive_table_sparse.loc[index_to_check + self.granularity][\n 'bar']"], {}), "(self.data_manager.massive_table_sparse.loc[index_to_check + self.\n granularity]['bar'])\n", (2849, 2940), True, 'import numpy as np\n'), ((3731, 3751), 'numpy.isnan', 'np.isnan', (['actual_bar'], {}), '(actual_bar)\n', (3739, 3751), True, 'import numpy as np\n'), ((3777, 3797), 'numpy.isnan', 'np.isnan', (['actual_foo'], {}), '(actual_foo)\n', (3785, 3797), True, 'import numpy as np\n'), ((4320, 4372), 'connector.stateful.StatefulConnector', 'StatefulConnector', (['self.data_manager', 'action_effects'], {}), '(self.data_manager, action_effects)\n', (4337, 4372), False, 'from connector.stateful import StatefulConnector\n'), ((1046, 1176), 'data_manager.base_manager.DataParam', 'DataParam', ([], {'epoch_time': 'self.epoch_time', 'period_secs': 'self.period', 'interval_secs': 'self.interval', 'granularity_secs': 'self.granularity'}), '(epoch_time=self.epoch_time, period_secs=self.period,\n interval_secs=self.interval, granularity_secs=self.granularity)\n', (1055, 1176), False, 'from data_manager.base_manager import DataParam\n'), ((1283, 1323), 'proto.aiengine.v1.aiengine_pb2.FieldData', 'aiengine_pb2.FieldData', ([], {'initializer': '(10.0)'}), '(initializer=10.0)\n', (1305, 1323), False, 'from proto.aiengine.v1 import aiengine_pb2\n'), ((1348, 1387), 'proto.aiengine.v1.aiengine_pb2.FieldData', 'aiengine_pb2.FieldData', ([], {'initializer': '(5.0)'}), '(initializer=5.0)\n', (1370, 1387), False, 'from proto.aiengine.v1 import aiengine_pb2\n'), ((1412, 1451), 'proto.aiengine.v1.aiengine_pb2.FieldData', 'aiengine_pb2.FieldData', ([], {'initializer': '(1.0)'}), '(initializer=1.0)\n', (1434, 1451), False, 'from proto.aiengine.v1 import aiengine_pb2\n')] |
import numpy as np
from market.baselines.baselines.common.runners import AbstractEnvRunner
class Runner(AbstractEnvRunner):
def __init__(self, env, model, nsteps, nstack):
super().__init__(env=env, model=model, nsteps=nsteps)
self.nstack = nstack
nh, nw, nc = env.observation_space.shape
self.nc = nc # nc = 1 for atari, but just in case
self.nact = env.action_space.n
nenv = self.nenv
self.nbatch = nenv * nsteps
self.batch_ob_shape = (nenv*(nsteps+1), nh, nw, nc*nstack)
self.obs = np.zeros((nenv, nh, nw, nc * nstack), dtype=np.uint8)
obs = env.reset()
self.update_obs(obs)
def update_obs(self, obs, dones=None):
#self.obs = obs
if dones is not None:
self.obs *= (1 - dones.astype(np.uint8))[:, None, None, None]
self.obs = np.roll(self.obs, shift=-self.nc, axis=3)
self.obs[:, :, :, -self.nc:] = obs[:, :, :, :]
def run(self):
enc_obs = np.split(self.obs, self.nstack, axis=3) # so now list of obs steps
mb_obs, mb_actions, mb_mus, mb_dones, mb_rewards = [], [], [], [], []
for _ in range(self.nsteps):
actions, mus, states = self.model._step(self.obs, S=self.states, M=self.dones)
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_mus.append(mus)
mb_dones.append(self.dones)
obs, rewards, dones, _ = self.env.step(actions)
# states information for statefull models like LSTM
self.states = states
self.dones = dones
self.update_obs(obs, dones)
mb_rewards.append(rewards)
enc_obs.append(obs)
mb_obs.append(np.copy(self.obs))
mb_dones.append(self.dones)
enc_obs = np.asarray(enc_obs, dtype=np.uint8).swapaxes(1, 0)
mb_obs = np.asarray(mb_obs, dtype=np.uint8).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_mus = np.asarray(mb_mus, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones # Used for statefull models like LSTM's to mask state when done
mb_dones = mb_dones[:, 1:] # Used for calculating returns. The dones array is now aligned with rewards
# shapes are now [nenv, nsteps, []]
# When pulling from buffer, arrays will now be reshaped in place, preventing a deep copy.
return enc_obs, mb_obs, mb_actions, mb_rewards, mb_mus, mb_dones, mb_masks
| [
"numpy.copy",
"numpy.roll",
"numpy.asarray",
"numpy.zeros",
"numpy.split"
] | [((563, 616), 'numpy.zeros', 'np.zeros', (['(nenv, nh, nw, nc * nstack)'], {'dtype': 'np.uint8'}), '((nenv, nh, nw, nc * nstack), dtype=np.uint8)\n', (571, 616), True, 'import numpy as np\n'), ((863, 904), 'numpy.roll', 'np.roll', (['self.obs'], {'shift': '(-self.nc)', 'axis': '(3)'}), '(self.obs, shift=-self.nc, axis=3)\n', (870, 904), True, 'import numpy as np\n'), ((998, 1037), 'numpy.split', 'np.split', (['self.obs', 'self.nstack'], {'axis': '(3)'}), '(self.obs, self.nstack, axis=3)\n', (1006, 1037), True, 'import numpy as np\n'), ((1748, 1765), 'numpy.copy', 'np.copy', (['self.obs'], {}), '(self.obs)\n', (1755, 1765), True, 'import numpy as np\n'), ((1298, 1315), 'numpy.copy', 'np.copy', (['self.obs'], {}), '(self.obs)\n', (1305, 1315), True, 'import numpy as np\n'), ((1822, 1857), 'numpy.asarray', 'np.asarray', (['enc_obs'], {'dtype': 'np.uint8'}), '(enc_obs, dtype=np.uint8)\n', (1832, 1857), True, 'import numpy as np\n'), ((1890, 1924), 'numpy.asarray', 'np.asarray', (['mb_obs'], {'dtype': 'np.uint8'}), '(mb_obs, dtype=np.uint8)\n', (1900, 1924), True, 'import numpy as np\n'), ((1961, 1999), 'numpy.asarray', 'np.asarray', (['mb_actions'], {'dtype': 'np.int32'}), '(mb_actions, dtype=np.int32)\n', (1971, 1999), True, 'import numpy as np\n'), ((2036, 2076), 'numpy.asarray', 'np.asarray', (['mb_rewards'], {'dtype': 'np.float32'}), '(mb_rewards, dtype=np.float32)\n', (2046, 2076), True, 'import numpy as np\n'), ((2109, 2145), 'numpy.asarray', 'np.asarray', (['mb_mus'], {'dtype': 'np.float32'}), '(mb_mus, dtype=np.float32)\n', (2119, 2145), True, 'import numpy as np\n'), ((2181, 2216), 'numpy.asarray', 'np.asarray', (['mb_dones'], {'dtype': 'np.bool'}), '(mb_dones, dtype=np.bool)\n', (2191, 2216), True, 'import numpy as np\n')] |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import numpy
import autograd.numpy as anp
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping \
import OneDimensionalWarping, Warping
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.constants \
import DATA_TYPE, NUMERICAL_JITTER
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.gluon_blocks_helpers \
import LogarithmScalarEncoding, PositiveScalarEncoding
def test_warping_encoding():
input_range = (0., 2.)
warping = OneDimensionalWarping(input_range)
assert isinstance(warping.encoding, LogarithmScalarEncoding)
assert warping.encoding.dimension == 2
warping = OneDimensionalWarping(input_range, encoding_type="positive")
assert isinstance(warping.encoding, PositiveScalarEncoding)
def test_warping_default_parameters():
x = anp.array([0., 1., 2.], dtype=DATA_TYPE)
input_range = (0., 2.)
warping = OneDimensionalWarping(input_range)
warping.collect_params().initialize()
warping_parameters = warping.encoding.get(warping.warping_internal.data())
numpy.testing.assert_almost_equal(warping_parameters, anp.ones(2))
numpy.testing.assert_almost_equal(warping(x), anp.array([NUMERICAL_JITTER, 0.5, 1.-NUMERICAL_JITTER]))
def test_warping_with_arbitrary_parameters():
x = anp.array([0., 1., 2.], dtype=DATA_TYPE)
input_range = (0., 2.)
warping = OneDimensionalWarping(input_range)
warping.collect_params().initialize()
warping.encoding.set(warping.warping_internal, [2., 0.5])
warping_parameters = warping.encoding.get(warping.warping_internal.data())
numpy.testing.assert_almost_equal(warping_parameters, [2., 0.5])
# In that case (with parameters [2., 0.5]), the warping is given by x => 1. - sqrt(1. - x^2)
def expected_warping(x):
return 1. - anp.sqrt(1. - x*x)
numpy.testing.assert_almost_equal(warping(x), expected_warping(anp.array([NUMERICAL_JITTER, 0.5, 1.-NUMERICAL_JITTER])))
def test_warping_with_multidimension_and_arbitrary_parameters():
X = anp.array([[0., 1., 0.], [1.,2.,1.], [2., 0., 2.]], dtype=DATA_TYPE)
dimension=3
# We transform only the columns {0,2} of the 3-dimensional data X
input_range = (0., 2.)
warping = Warping(index_to_range={0:input_range, 2:input_range}, dimension=dimension)
assert len(warping.transformations) == dimension
warping.collect_params().initialize()
# We change the warping parameters of the first dimension only
w0 = warping.transformations[0]
w0.encoding.set(w0.warping_internal, [2., 0.5])
w2 = warping.transformations[2]
w2_parameters = w2.encoding.get(w2.warping_internal.data())
# The parameters of w2 should be the default ones (as there was no set operations)
numpy.testing.assert_almost_equal(w2_parameters, anp.ones(2))
# With parameters [2., 0.5], the warping is given by x => 1. - sqrt(1. - x^2)
def expected_warping(x):
return 1. - anp.sqrt(1. - x*x)
expected_column0 = expected_warping(anp.array([NUMERICAL_JITTER, 0.5, 1.-NUMERICAL_JITTER])).reshape((-1,1))
expected_column1 = anp.array([1., 2., 0.]).reshape((-1,1))
expected_column2 = anp.array([NUMERICAL_JITTER, 0.5, 1.-NUMERICAL_JITTER]).reshape((-1,1))
numpy.testing.assert_almost_equal(warping(X), anp.hstack([expected_column0, expected_column1, expected_column2]))
| [
"autograd.numpy.sqrt",
"numpy.testing.assert_almost_equal",
"autograd.numpy.array",
"autograd.numpy.ones",
"syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping.Warping",
"autograd.numpy.hstack",
"syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping.OneDimensionalWarping"... | [((1073, 1107), 'syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping.OneDimensionalWarping', 'OneDimensionalWarping', (['input_range'], {}), '(input_range)\n', (1094, 1107), False, 'from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping import OneDimensionalWarping, Warping\n'), ((1230, 1290), 'syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping.OneDimensionalWarping', 'OneDimensionalWarping', (['input_range'], {'encoding_type': '"""positive"""'}), "(input_range, encoding_type='positive')\n", (1251, 1290), False, 'from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping import OneDimensionalWarping, Warping\n'), ((1404, 1447), 'autograd.numpy.array', 'anp.array', (['[0.0, 1.0, 2.0]'], {'dtype': 'DATA_TYPE'}), '([0.0, 1.0, 2.0], dtype=DATA_TYPE)\n', (1413, 1447), True, 'import autograd.numpy as anp\n'), ((1486, 1520), 'syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping.OneDimensionalWarping', 'OneDimensionalWarping', (['input_range'], {}), '(input_range)\n', (1507, 1520), False, 'from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping import OneDimensionalWarping, Warping\n'), ((1886, 1929), 'autograd.numpy.array', 'anp.array', (['[0.0, 1.0, 2.0]'], {'dtype': 'DATA_TYPE'}), '([0.0, 1.0, 2.0], dtype=DATA_TYPE)\n', (1895, 1929), True, 'import autograd.numpy as anp\n'), ((1968, 2002), 'syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping.OneDimensionalWarping', 'OneDimensionalWarping', (['input_range'], {}), '(input_range)\n', (1989, 2002), False, 'from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping import OneDimensionalWarping, Warping\n'), ((2190, 2255), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['warping_parameters', '[2.0, 0.5]'], {}), '(warping_parameters, [2.0, 0.5])\n', (2223, 2255), False, 'import numpy\n'), ((2627, 2706), 'autograd.numpy.array', 'anp.array', (['[[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [2.0, 0.0, 2.0]]'], {'dtype': 'DATA_TYPE'}), '([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [2.0, 0.0, 2.0]], dtype=DATA_TYPE)\n', (2636, 2706), True, 'import autograd.numpy as anp\n'), ((2833, 2919), 'syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping.Warping', 'Warping', ([], {'index_to_range': '{(0): input_range, (2): input_range}', 'dimension': 'dimension'}), '(index_to_range={(0): input_range, (2): input_range}, dimension=\n dimension)\n', (2840, 2919), False, 'from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping import OneDimensionalWarping, Warping\n'), ((1710, 1721), 'autograd.numpy.ones', 'anp.ones', (['(2)'], {}), '(2)\n', (1718, 1721), True, 'import autograd.numpy as anp\n'), ((1773, 1831), 'autograd.numpy.array', 'anp.array', (['[NUMERICAL_JITTER, 0.5, 1.0 - NUMERICAL_JITTER]'], {}), '([NUMERICAL_JITTER, 0.5, 1.0 - NUMERICAL_JITTER])\n', (1782, 1831), True, 'import autograd.numpy as anp\n'), ((3424, 3435), 'autograd.numpy.ones', 'anp.ones', (['(2)'], {}), '(2)\n', (3432, 3435), True, 'import autograd.numpy as anp\n'), ((3927, 3993), 'autograd.numpy.hstack', 'anp.hstack', (['[expected_column0, expected_column1, expected_column2]'], {}), '([expected_column0, expected_column1, expected_column2])\n', (3937, 3993), True, 'import autograd.numpy as anp\n'), ((2404, 2425), 'autograd.numpy.sqrt', 'anp.sqrt', (['(1.0 - x * x)'], {}), '(1.0 - x * x)\n', (2412, 2425), True, 'import autograd.numpy as anp\n'), ((2490, 2548), 'autograd.numpy.array', 'anp.array', (['[NUMERICAL_JITTER, 0.5, 1.0 - NUMERICAL_JITTER]'], {}), '([NUMERICAL_JITTER, 0.5, 1.0 - NUMERICAL_JITTER])\n', (2499, 2548), True, 'import autograd.numpy as anp\n'), ((3573, 3594), 'autograd.numpy.sqrt', 'anp.sqrt', (['(1.0 - x * x)'], {}), '(1.0 - x * x)\n', (3581, 3594), True, 'import autograd.numpy as anp\n'), ((3737, 3763), 'autograd.numpy.array', 'anp.array', (['[1.0, 2.0, 0.0]'], {}), '([1.0, 2.0, 0.0])\n', (3746, 3763), True, 'import autograd.numpy as anp\n'), ((3800, 3858), 'autograd.numpy.array', 'anp.array', (['[NUMERICAL_JITTER, 0.5, 1.0 - NUMERICAL_JITTER]'], {}), '([NUMERICAL_JITTER, 0.5, 1.0 - NUMERICAL_JITTER])\n', (3809, 3858), True, 'import autograd.numpy as anp\n'), ((3641, 3699), 'autograd.numpy.array', 'anp.array', (['[NUMERICAL_JITTER, 0.5, 1.0 - NUMERICAL_JITTER]'], {}), '([NUMERICAL_JITTER, 0.5, 1.0 - NUMERICAL_JITTER])\n', (3650, 3699), True, 'import autograd.numpy as anp\n')] |
import numpy as np
from pylearn2.datasets.four_regions import FourRegions
def test_four_regions():
dataset = FourRegions(5000)
X = dataset.get_design_matrix()
np.testing.assert_(((X < 1.) & (X > -1.)).all())
y = dataset.get_targets()
np.testing.assert_equal(np.unique(y), [0, 1, 2, 3])
| [
"pylearn2.datasets.four_regions.FourRegions",
"numpy.unique"
] | [((115, 132), 'pylearn2.datasets.four_regions.FourRegions', 'FourRegions', (['(5000)'], {}), '(5000)\n', (126, 132), False, 'from pylearn2.datasets.four_regions import FourRegions\n'), ((280, 292), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (289, 292), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
from constants import *
import os
import sys
############################################################## Variables ##############################################################
# variables for matching
# Initiate SIFT description detector
Orb = cv2.ORB_create()
# create BFMatcher object
Bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# reference images
ref_parking = cv2.imread(os.path.dirname(os.path.abspath(__file__)) + '/parking.jpg', cv2.IMREAD_GRAYSCALE)
# print("path: ", os.getcwd())
print("ref parking shape1: ",ref_parking.shape)
ref_left = cv2.imread(os.path.dirname(os.path.abspath(__file__))+'/left.png', cv2.IMREAD_GRAYSCALE)
ref_right = cv2.imread(os.path.dirname(os.path.abspath(__file__))+'/right.png', cv2.IMREAD_GRAYSCALE)
ref_intersection = cv2.imread(os.path.dirname(os.path.abspath(__file__))+ '/T_2.png', cv2.IMREAD_GRAYSCALE)
ref_construction = cv2.imread(os.path.dirname(os.path.abspath(__file__))+ '/construction.jpg', cv2.IMREAD_GRAYSCALE)
ref_turnel = cv2.imread(os.path.dirname(os.path.abspath(__file__))+ '/turnel.png', cv2.IMREAD_GRAYSCALE)
# reference keypoints & descriptor
just_init = 1
ref_parking_keypoints = just_init
ref_parking_descriptor = just_init
ref_left_keypoints = just_init
ref_left_descriptor = just_init
ref_right_keypoints = just_init
ref_right_descriptor = just_init
ref_intersection_keypoints = just_init
ref_intersection_descriptor = just_init
ref_construction_keypoints = just_init
ref_construction_descriptor = just_init
ref_turnel_keypoints = just_init
ref_turnel_descriptor = just_init
# threshold values
threshold_parking = THRESHOLDS["parking"]
threshold_lrmin = THRESHOLDS["left_right_min"]
threshold_lrmax = THRESHOLDS["left_right_max"]
threshold_intersection = THRESHOLDS["intersection"]
threshold_turnel = THRESHOLDS["turnel"]
threshold_construction = THRESHOLDS["construction"]
# HSV Ranges used in each state
lower_blue = np.array([90,80,0])
upper_blue = np.array([130,255,255])
#lower_blue = np.array([93, 40, 60])
#upper_blue = np.array([115, 140, 140])
lower_red = np.array([0, 100, 100])
upper_red = np.array([20, 255, 255])
lower_green = np.array([65, 60, 60])
upper_green = np.array([80, 255, 255])
############################################################## Functions ##############################################################
def refimage_init():
global ref_parking; global ref_left; global ref_right; global ref_intersection; global ref_construction; global ref_turnel
global ref_parking_keypoints; global ref_left_keypoints; global ref_right_keypoints; global ref_intersection_keypoints; global ref_construction_keypoints; global ref_turnel_keypoints
global ref_parking_descriptor; global ref_left_descriptor; global ref_right_descriptor; global ref_intersection_descriptor; global ref_construction_descriptor; global ref_turnel_descriptor
ref_parking = cv2.GaussianBlur(ref_parking,(11, 11), 0)
ref_parking = cv2.medianBlur(ref_parking, 11)
ref_parking_keypoints, ref_parking_descriptor = Orb.detectAndCompute(
ref_parking, None)
ref_left = cv2.GaussianBlur(ref_left, (11, 11), 0)
ref_left = cv2.medianBlur(ref_left, 11)
ref_left_keypoints, ref_left_descriptor = Orb.detectAndCompute(
ref_left, None)
ref_right = cv2.GaussianBlur(ref_right, (11, 11), 0)
ref_right = cv2.medianBlur(ref_right, 11)
ref_right_keypoints, ref_right_descriptor = Orb.detectAndCompute(
ref_right, None)
ref_intersection = cv2.GaussianBlur(ref_intersection, (11, 11), 0)
ref_intersection = cv2.medianBlur(ref_intersection, 11)
ref_intersection_keypoints, ref_intersection_descriptor = Orb.detectAndCompute(
ref_intersection, None)
ref_construction = cv2.GaussianBlur(ref_construction, (11, 11), 0)
ref_construction = cv2.medianBlur(ref_construction, 11)
ref_construction_keypoints, ref_construction_descriptor = Orb.detectAndCompute(
ref_construction, None)
ref_turnel = cv2.GaussianBlur(ref_turnel, (11, 11), 0)
ref_turnel = cv2.medianBlur(ref_turnel, 11)
ref_turnel_keypoints, ref_turnel_descriptor = Orb.detectAndCompute(
ref_turnel, None)
def blob_parameter(_recog_type):
''' blob_parameter function for making detector for some blob shapes(circle or triangle)
& setting parameter of detector
* Input
_recog_type : recognition type in recognition_list (ex : 'parking')
* Output
blob_detector : blob detector that has parameters setted by recognition type
'''
if _recog_type == 'traffic_light':
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 0
params.maxThreshold = 256
# Filter by Area.
params.filterByArea = True
params.minArea = 500
params.maxArea = 2300
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.4
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.1
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.01
elif _recog_type == 'intersection' or _recog_type == 'construction' or _recog_type == 'turnel':
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 10
params.maxThreshold = 200
# Filter by Area.
params.filterByArea = True
params.minArea = 1000
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.1
# Filter by Convexity
params.filterByConvexity = False
params.minConvexity = 0.1
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.01
else:
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 0
params.maxThreshold = 256
# Filter by Area.
params.filterByArea = True
params.minArea = 3000
params.maxArea = 35000
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.5
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.1
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.01
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3:
blob_detector = cv2.SimpleBlobDetector(params)
else:
blob_detector = cv2.SimpleBlobDetector_create(params)
return blob_detector
def blob_detecting(_input_img, _blob_detector, _recog_type):
''' blob_detecting function for finding center point of ROI
by HSV range thresholding & detecting specific blob shape
* Input
_input_img : front camera image from pi camera --> BGR image
_blob_detector : HSV blob detector made by blob_parameter() function
* Output
blob_centers : center points of blob (specific blob shape -> potential ROI center point)
centers : if `blob_centers` is detected, change the type of data to pt array
'''
_hsv_maxThreshold = just_init
_hsv_minThreshold = just_init
if _recog_type=='intersection' or _recog_type == 'construction' or _recog_type == 'turnel':
_hsv_maxThreshold = upper_red
_hsv_minThreshold = lower_red
elif _recog_type=='traffic_light':
_hsv_maxThreshold = upper_green
_hsv_minThreshold = lower_green
else:
_hsv_maxThreshold = upper_blue
_hsv_minThreshold = lower_blue
# thresholding process rgb_image to hsv_image by HSV Threshold
hsv = cv2.cvtColor(_input_img,cv2.COLOR_BGR2HSV)
# make mask and pre-image-processing : morphology (erode or dilate)
kernel = np.ones((3, 3), np.uint8)
mask = cv2.inRange(hsv, _hsv_minThreshold, _hsv_maxThreshold)
mask = cv2.dilate(mask, kernel, iterations=5)
#maks = cv2.erode(mask, kernel, iterations = 3)
reversemask = 255-mask
# Detect specific blobshape and center point of blob
if _recog_type=='intersection' or _recog_type == 'construction' or _recog_type == 'turnel':
blob_centers = _blob_detector.detect(mask)
else:
blob_centers = _blob_detector.detect(reversemask)
BGR_ROI = cv2.cvtColor(reversemask, cv2.COLOR_GRAY2BGR)
if IS_DEBUG_MODE == True:
print(len(blob_centers))
show_centers = cv2.drawKeypoints(reversemask, blob_centers, np.array(
[]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) # ! code for debugging
cv2.imshow('hsv',hsv) #! code for debugging
cv2.imshow('mask',mask) #! code for debugging
cv2.imshow('reverse',reversemask) #! code for debugging
cv2.imshow('result', show_centers) # ! code for debugging
cv2.waitKey() # ! code for debugging
if len(blob_centers) >= 1:
centers = []
for i in blob_centers:
centers.append(i.pt)
return centers
else:
return blob_centers
def signROI_detecting(_input_img, _recog_type):
''' signROI_detecting function for detecting signROI
using HSV range thresholding and specific blob shape detectiong, different by cases
* Input
_input_img : front camera image from pi camera --> BGR image
_recog_type : recognition type
* Output
signROI : sign detection result image --> BGR image
True : thiere is singROI
False : there is no signROI
'''
# _input_img ROI detecting using HSV range
#_input_img = cv2.GaussianBlur(_input_img, (5, 5), 0)
sign_detector = blob_parameter(_recog_type)
sign_centers = blob_detecting(_input_img, sign_detector, _recog_type)
# cv2.imshow('input',_input_img) #! code for debugging
# cv2.waitKey() #! code for debugging
# print 'test' #! code for debugging
# print len(sign_centers) #! code for debugging
if len(sign_centers) >= 1:
xx = int(sign_centers[0][0])
yy = int(sign_centers[0][1])
# print sign_centers[0][1], sign_centers[0][0] #! code for debugging
if sign_centers[0][1]-70 < 0 or sign_centers[0][0] < 70:
if sign_centers[0][0] < sign_centers[0][1]:
signROI_size = int(sign_centers[0][0])
else:
signROI_size = int(sign_centers[0][1])
else:
signROI_size = 70
signROI = _input_img[yy - signROI_size: yy +
signROI_size, xx - signROI_size: xx + signROI_size]
# cv2.imshow('ROI',signROI) #! code for debugging
# cv2.waitKey() #! code for debugging
return signROI, True
else:
signROI = _input_img
return signROI, False
def ORB_matching(_roi, _ref_img, _ref_keypoints, _ref_descriptor):
''' ORB_matching function for matching two input image and output is matching result
* Input
_roi : sign ROI image --> BGR image
_ref : sign ref image --> gray image
* Output
matches : ORB descriptor matching result
'''
global Orb
global Bf
# image pretreatment
_roi = cv2.cvtColor(_roi, cv2.COLOR_BGR2GRAY)
_roi = cv2.medianBlur(_roi, 5)
# find the keypoints and descriptors with SIFT
ROI_keypoints, ROI_descriptor = Orb.detectAndCompute(_roi, None)
# Match descriptors.
matches = Bf.match(ROI_descriptor, _ref_descriptor)
# Sort them in the order of their distance.
# Not use distance values yet, but can use it at thresholding
matches = sorted(matches, key=lambda x: x.distance)
if IS_DEBUG_MODE == True:
print(len(matches)) #! code for debugging
matching_image = cv2.drawMatches(
_roi, ROI_keypoints, _ref_img, _ref_keypoints, matches, None, flags=2) # ! code for debugging
cv2.imshow('matching',matching_image) #! code for debugging
cv2.waitKey() #! code for debugging
return matches
def left_or_right(_frontcam_roi):
''' left_or_right function for check direction of arrow sign
* Input
_frontcam_roi : sign roi image from front pi camera image --> gray image
* Output
direction left or right
'''
_frontcam_roi = cv2.cvtColor(_frontcam_roi, cv2.COLOR_BGR2GRAY)
# Threshloding --> binary image by otsu's method
#cv2.imshow('gray_frontcam_roi',_frontcam_roi)
_frontcam_roi = cv2.GaussianBlur(_frontcam_roi, (7, 7), 0)
#cv2.imshow('gaussian_gray_frontcam_roi',_frontcam_roi)
tmp, binary_roi = cv2.threshold(
_frontcam_roi, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#height = np.size(binary_roi, 0)/2
#width = np.size(binary_roi, 1)/2
# cutting small ROI from under center point 40 X 20 box
#small_roi = binary_roi[height:height+20, width-20:width+20]
# compare sum of left pixels' value and right's one
sum_left = 0
sum_right = 0
'''
for j in range(20):
for i in range(20):
sum_left += small_roi[j, i]
for i in range(20, 40):
sum_right += small_roi[j, i]
'''
bin_roi_H = binary_roi.shape[0]
bin_roi_W = binary_roi.shape[1]
for j in range(bin_roi_H//2):
for i in range(bin_roi_W//2):
sum_left += binary_roi[j + bin_roi_H//2, i]
for i in range(bin_roi_W//2):
sum_right += binary_roi[j + bin_roi_H//2, i+binary_roi.shape[1]//2]
print("=========================================")
print("sum left: ",sum_left/255,", sum right: ", sum_right/255) #! code for debugging
print("==============sum printing================")
#cv2.imshow('binary_roi',binary_roi) #! code for debugging
#cv2.imshow('small_roi',small_roi) #! code for debugging
#cv2.waitKey(1) #! code for debugging
if sum_left > sum_right:
return 'right'
else:
return 'left'
def is_light_green(image):
""" check the traffic_light's light
image: front image
return True if light is green
return False if light is not green
"""
detector = blob_parameter('traffic_light')
centers = blob_detecting(image, detector, 'traffic_light')
if len(centers) >= 1:
return True
else:
return False
def is_intersection(image):
""" check whether the image is intersection
return True if intersection
return False if not intersection
"""
ROI_img, ROI_OX = signROI_detecting(image, 'intersection')
if ROI_OX != False:
# matching & thresholding
result_matching = ORB_matching(ROI_img, ref_intersection,ref_intersection_keypoints,ref_intersection_descriptor)
if len(result_matching) >= threshold_intersection:
return True
else:
return False
else:
return False
def is_parking(image):
""" check whether the image is parking
return True if parking
return False if not parking
"""
ROI_img, ROI_OX = signROI_detecting(image, 'parking')
if ROI_OX != False:
# matching & thresholding
result_matching = ORB_matching(
ROI_img, ref_parking, ref_parking_keypoints, ref_parking_descriptor)
if len(result_matching) >= threshold_parking:
return True
else:
return False
else:
return False
#TODO : is it needed?
return False / True
def is_construction(image):
ROI_img, ROI_OX = signROI_detecting(image, 'construction')
if ROI_OX != False:
# matching & thresholding
result_matching = ORB_matching(
ROI_img, ref_construction, ref_construction_keypoints, ref_construction_descriptor)
if IS_DEBUG_MODE == True:
print("result matching : " + len(result_matching))
if len(result_matching) >= threshold_construction:
return True
else:
return False
else:
return False
def is_turnel(image):
ROI_img, ROI_OX = signROI_detecting(image, 'turnel')
if ROI_OX != False:
# matching & thresholding
result_matching = ORB_matching(
ROI_img, ref_turnel, ref_turnel_keypoints, ref_turnel_descriptor)
if IS_DEBUG_MODE == True:
print("result matching : " + len(result_matching))
if len(result_matching) >= threshold_turnel:
return True
else:
return False
else:
return False
def check_left_right_sign(_frontcam):
""" check the sign of left or right sign
return 'left' if the sign means 'left'
return 'right' if the sign means 'right'
return 'none' if there isn't a sign
"""
global threshold_lrmin
global threshold_lrmax
global ref_left
global ref_left_keypoints
global ref_left_descriptor
global ref_right
global ref_right_keypoints
global ref_right_descriptor
ROI_img, ROI_OX = signROI_detecting(_frontcam, 'left_or_right')
if ROI_OX != False:
# LEFT or RIGHT
result_matching_left = ORB_matching(
ROI_img, ref_left, ref_left_keypoints, ref_left_descriptor)
result_matching_right = ORB_matching(
ROI_img, ref_right, ref_right_keypoints, ref_right_descriptor)
#print("left length: ",len(result_matching_left))
#print("right length: ",len(result_matching_right))
if len(result_matching_left) >= threshold_lrmin and len(result_matching_left) <= threshold_lrmax and len(result_matching_right) >= threshold_lrmin and len(result_matching_right) <= threshold_lrmax:
return left_or_right(ROI_img)
else:
return 'none'
else:
return 'none'
def check_sign(image):
""" check what the sign means
image: front image
return 'intersection' if the sign means 'intersection' state
return 'construction' if the sign means 'construction' state
return 'parking' if the sign means 'parking' state
return 'tunnel' if the sign means 'tunnel' state
return 'nothing' if there is no sign
"""
if(is_intersection(image) == True):
return 'intersection'
elif(is_parking(image) == True):
return 'parking'
else:
return 'nothing'
def has_curve_in(distance, image):
""" check if there is a curve in distance"""
# TODO: future task
return False
def is_straight_in(distance, image):
""" check if the way is straight in distance"""
# TODO: future task
return False
def is_stopping_sign(image):
""" check if the sign means 'stop """
# TODO: future task
return False
def has_crossing_line(image):
""" returns true if there is a crossing line from left to right"""
# TODO: future task
return False
# reference image & keypoints & descriptors initialize
refimage_init()
| [
"cv2.GaussianBlur",
"os.path.abspath",
"cv2.SimpleBlobDetector_Params",
"cv2.medianBlur",
"cv2.cvtColor",
"cv2.dilate",
"cv2.threshold",
"cv2.BFMatcher",
"cv2.waitKey",
"numpy.ones",
"cv2.drawMatches",
"cv2.SimpleBlobDetector_create",
"cv2.SimpleBlobDetector",
"numpy.array",
"cv2.ORB_cre... | [((323, 339), 'cv2.ORB_create', 'cv2.ORB_create', ([], {}), '()\n', (337, 339), False, 'import cv2\n'), ((371, 419), 'cv2.BFMatcher', 'cv2.BFMatcher', (['cv2.NORM_HAMMING'], {'crossCheck': '(True)'}), '(cv2.NORM_HAMMING, crossCheck=True)\n', (384, 419), False, 'import cv2\n'), ((1977, 1998), 'numpy.array', 'np.array', (['[90, 80, 0]'], {}), '([90, 80, 0])\n', (1985, 1998), True, 'import numpy as np\n'), ((2010, 2035), 'numpy.array', 'np.array', (['[130, 255, 255]'], {}), '([130, 255, 255])\n', (2018, 2035), True, 'import numpy as np\n'), ((2123, 2146), 'numpy.array', 'np.array', (['[0, 100, 100]'], {}), '([0, 100, 100])\n', (2131, 2146), True, 'import numpy as np\n'), ((2159, 2183), 'numpy.array', 'np.array', (['[20, 255, 255]'], {}), '([20, 255, 255])\n', (2167, 2183), True, 'import numpy as np\n'), ((2198, 2220), 'numpy.array', 'np.array', (['[65, 60, 60]'], {}), '([65, 60, 60])\n', (2206, 2220), True, 'import numpy as np\n'), ((2235, 2259), 'numpy.array', 'np.array', (['[80, 255, 255]'], {}), '([80, 255, 255])\n', (2243, 2259), True, 'import numpy as np\n'), ((2945, 2987), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['ref_parking', '(11, 11)', '(0)'], {}), '(ref_parking, (11, 11), 0)\n', (2961, 2987), False, 'import cv2\n'), ((3005, 3036), 'cv2.medianBlur', 'cv2.medianBlur', (['ref_parking', '(11)'], {}), '(ref_parking, 11)\n', (3019, 3036), False, 'import cv2\n'), ((3154, 3193), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['ref_left', '(11, 11)', '(0)'], {}), '(ref_left, (11, 11), 0)\n', (3170, 3193), False, 'import cv2\n'), ((3209, 3237), 'cv2.medianBlur', 'cv2.medianBlur', (['ref_left', '(11)'], {}), '(ref_left, 11)\n', (3223, 3237), False, 'import cv2\n'), ((3347, 3387), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['ref_right', '(11, 11)', '(0)'], {}), '(ref_right, (11, 11), 0)\n', (3363, 3387), False, 'import cv2\n'), ((3404, 3433), 'cv2.medianBlur', 'cv2.medianBlur', (['ref_right', '(11)'], {}), '(ref_right, 11)\n', (3418, 3433), False, 'import cv2\n'), ((3553, 3600), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['ref_intersection', '(11, 11)', '(0)'], {}), '(ref_intersection, (11, 11), 0)\n', (3569, 3600), False, 'import cv2\n'), ((3624, 3660), 'cv2.medianBlur', 'cv2.medianBlur', (['ref_intersection', '(11)'], {}), '(ref_intersection, 11)\n', (3638, 3660), False, 'import cv2\n'), ((3801, 3848), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['ref_construction', '(11, 11)', '(0)'], {}), '(ref_construction, (11, 11), 0)\n', (3817, 3848), False, 'import cv2\n'), ((3872, 3908), 'cv2.medianBlur', 'cv2.medianBlur', (['ref_construction', '(11)'], {}), '(ref_construction, 11)\n', (3886, 3908), False, 'import cv2\n'), ((4043, 4084), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['ref_turnel', '(11, 11)', '(0)'], {}), '(ref_turnel, (11, 11), 0)\n', (4059, 4084), False, 'import cv2\n'), ((4102, 4132), 'cv2.medianBlur', 'cv2.medianBlur', (['ref_turnel', '(11)'], {}), '(ref_turnel, 11)\n', (4116, 4132), False, 'import cv2\n'), ((6696, 6722), 'cv2.__version__.split', 'cv2.__version__.split', (['"""."""'], {}), "('.')\n", (6717, 6722), False, 'import cv2\n'), ((8008, 8051), 'cv2.cvtColor', 'cv2.cvtColor', (['_input_img', 'cv2.COLOR_BGR2HSV'], {}), '(_input_img, cv2.COLOR_BGR2HSV)\n', (8020, 8051), False, 'import cv2\n'), ((8136, 8161), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (8143, 8161), True, 'import numpy as np\n'), ((8173, 8227), 'cv2.inRange', 'cv2.inRange', (['hsv', '_hsv_minThreshold', '_hsv_maxThreshold'], {}), '(hsv, _hsv_minThreshold, _hsv_maxThreshold)\n', (8184, 8227), False, 'import cv2\n'), ((8239, 8277), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernel'], {'iterations': '(5)'}), '(mask, kernel, iterations=5)\n', (8249, 8277), False, 'import cv2\n'), ((8645, 8690), 'cv2.cvtColor', 'cv2.cvtColor', (['reversemask', 'cv2.COLOR_GRAY2BGR'], {}), '(reversemask, cv2.COLOR_GRAY2BGR)\n', (8657, 8690), False, 'import cv2\n'), ((12313, 12351), 'cv2.cvtColor', 'cv2.cvtColor', (['_roi', 'cv2.COLOR_BGR2GRAY'], {}), '(_roi, cv2.COLOR_BGR2GRAY)\n', (12325, 12351), False, 'import cv2\n'), ((12363, 12386), 'cv2.medianBlur', 'cv2.medianBlur', (['_roi', '(5)'], {}), '(_roi, 5)\n', (12377, 12386), False, 'import cv2\n'), ((13663, 13710), 'cv2.cvtColor', 'cv2.cvtColor', (['_frontcam_roi', 'cv2.COLOR_BGR2GRAY'], {}), '(_frontcam_roi, cv2.COLOR_BGR2GRAY)\n', (13675, 13710), False, 'import cv2\n'), ((13835, 13877), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['_frontcam_roi', '(7, 7)', '(0)'], {}), '(_frontcam_roi, (7, 7), 0)\n', (13851, 13877), False, 'import cv2\n'), ((13961, 14034), 'cv2.threshold', 'cv2.threshold', (['_frontcam_roi', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(_frontcam_roi, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (13974, 14034), False, 'import cv2\n'), ((4719, 4750), 'cv2.SimpleBlobDetector_Params', 'cv2.SimpleBlobDetector_Params', ([], {}), '()\n', (4748, 4750), False, 'import cv2\n'), ((6774, 6804), 'cv2.SimpleBlobDetector', 'cv2.SimpleBlobDetector', (['params'], {}), '(params)\n', (6796, 6804), False, 'import cv2\n'), ((6839, 6876), 'cv2.SimpleBlobDetector_create', 'cv2.SimpleBlobDetector_create', (['params'], {}), '(params)\n', (6868, 6876), False, 'import cv2\n'), ((8952, 8974), 'cv2.imshow', 'cv2.imshow', (['"""hsv"""', 'hsv'], {}), "('hsv', hsv)\n", (8962, 8974), False, 'import cv2\n'), ((9109, 9133), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (9119, 9133), False, 'import cv2\n'), ((9266, 9300), 'cv2.imshow', 'cv2.imshow', (['"""reverse"""', 'reversemask'], {}), "('reverse', reversemask)\n", (9276, 9300), False, 'import cv2\n'), ((9424, 9458), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'show_centers'], {}), "('result', show_centers)\n", (9434, 9458), False, 'import cv2\n'), ((9491, 9504), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (9502, 9504), False, 'import cv2\n'), ((12953, 13043), 'cv2.drawMatches', 'cv2.drawMatches', (['_roi', 'ROI_keypoints', '_ref_img', '_ref_keypoints', 'matches', 'None'], {'flags': '(2)'}), '(_roi, ROI_keypoints, _ref_img, _ref_keypoints, matches,\n None, flags=2)\n', (12968, 13043), False, 'import cv2\n'), ((13085, 13123), 'cv2.imshow', 'cv2.imshow', (['"""matching"""', 'matching_image'], {}), "('matching', matching_image)\n", (13095, 13123), False, 'import cv2\n'), ((13226, 13239), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (13237, 13239), False, 'import cv2\n'), ((481, 506), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (496, 506), False, 'import os\n'), ((665, 690), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (680, 690), False, 'import os\n'), ((766, 791), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (781, 791), False, 'import os\n'), ((875, 900), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (890, 900), False, 'import os\n'), ((983, 1008), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (998, 1008), False, 'import os\n'), ((1094, 1119), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1109, 1119), False, 'import os\n'), ((5453, 5484), 'cv2.SimpleBlobDetector_Params', 'cv2.SimpleBlobDetector_Params', ([], {}), '()\n', (5482, 5484), False, 'import cv2\n'), ((6070, 6101), 'cv2.SimpleBlobDetector_Params', 'cv2.SimpleBlobDetector_Params', ([], {}), '()\n', (6099, 6101), False, 'import cv2\n'), ((8827, 8839), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8835, 8839), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2021 by ExopyHqcLegacy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""
This module defines drivers for agilent PXA.
:Contains:
SpecDescriptor
AgilentPXA
"""
from inspect import cleandoc
import numpy as np
from ..driver_tools import (InstrIOError, secure_communication,
instrument_property)
from ..visa_tools import VisaInstrument
DATA_FORMATTING_DICT = {'raw I/Q data': 0,
'descriptor': 1,
'(I,Q) vs time': 3,
'log(mag) vs freq': 4,
'average of log(mag) vs freq': 7,
'mag vs freq in Vrms': 11,
'average of mag vs freq in Vrms': 12}
class SpecDescriptor():
def __init__(self):
self.initialized = False
self.FFTpeak = 0
self.FFTfreq = 0
self.FFTnbrSteps = 2
self.Firstfreq = 0
self.Freqstep = 0
self.TimenbrSteps = 2
self.firsttime = 0
self.TimeStep = 0.1
self.timedomaincheck = 1
self.totaltime = 1.0
self.averagenbr = 1
class AgilentPXA(VisaInstrument):
"""
"""
caching_permissions = {'start_frequency_SA': False,
'stop_frequency_SA': False,
'mode': False}
def __init__(self, connection_info, caching_allowed=True,
caching_permissions={}, auto_open=True):
super(AgilentPXA, self).__init__(connection_info,
caching_allowed,
caching_permissions,
auto_open)
self.mode = 'SA'
@secure_communication(2)
def get_spec_header(self):
"""
"""
if self.mode == 'SPEC':
answer = self.query_ascii_values("FETCH:SPEC1?")
if answer:
self.spec_header.initialized = True
self.spec_header.FFTpeak = answer[0]
self.spec_header.FFTfreq = answer[1]/1e9
self.spec_header.FFTnbrSteps = answer[2]
self.spec_header.Firstfreq = answer[3]/1e9
self.spec_header.Freqstep = answer[4]/1e9
self.spec_header.TimenbrSteps = answer[5]
self.spec_header.firsttime = answer[6]
self.spec_header.TimeStep = answer[7]
self.spec_header.timedomaincheck = answer[8]
self.spec_header.totaltime = answer[9]
self.spec_header.averagenbr = answer[10]
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return its
mode'''))
else:
raise '''PXA is not in Spectrum mode'''
@secure_communication()
def read_data(self, trace):
"""
"""
# must be read in ASCii format
self.write("FORM:DATA ASCii")
# stop all the measurements
self.write(":ABORT")
# go to the "Single sweep" mode
self.write(":INIT:CONT OFF")
# initiate measurement
self.write(":INIT")
#
self.query("SWEEP:TIME?")
self.write("*WAI") # SA waits until the averaging is done
# Loop to see when the averaging is done
while True:
try:
self.query("SWEEP:TIME?")
break
except:
pass
data = self.query_ascii_values('trace? trace{}'.format(trace))
if data:
freq = np.linspace(self.start_frequency_SA,
self.stop_frequency_SA,
self.sweep_points_SA)
return np.rec.fromarrays([freq, np.array(data)],
names=['Frequency',
'Data'])
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
trace {} data'''.format(trace)))
@instrument_property
@secure_communication()
def start_frequency_SA(self):
"""Start frequency getter method
"""
if self.mode == 'SA':
freq = self.query('FREQ:STAR?')
if freq:
return float(freq)/1e9
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
start frequency'''))
elif self.mode == 'SPEC':
if not self.spec_header.initialized:
self.get_spec_header()
return self.spec_header.Firstfreq
else:
raise '''PXA is not in the appropriate mode to get correctly the
start frequency'''
@start_frequency_SA.setter
@secure_communication()
def start_frequency_SA(self, value):
"""Start frequency setter method
"""
if self.mode == 'SA':
self.write('FREQ:STAR {} GHz'.format(value))
result = self.query('FREQ:STAR?')
if result:
if abs(float(result)/1e9 - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PXA did not set correctly
the start frequency'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
start frequency'''))
else:
raise '''PXA is not in the appropriate mode to set correctly the
start frequency'''
@instrument_property
@secure_communication()
def stop_frequency_SA(self):
"""Stop frequency getter method
"""
if self.mode == 'SA':
freq = self.query('FREQ:STOP?')
if freq:
return float(freq)/1e9
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
stop frequency'''))
else:
raise '''PXA is not in the appropriate mode to get correctly the
stop frequency'''
@stop_frequency_SA.setter
@secure_communication()
def stop_frequency_SA(self, value):
"""Stop frequency setter method
"""
if self.mode == 'SA':
self.write('FREQ:STOP {} GHz'.format(value))
result = self.query('FREQ:STOP?')
if result:
if abs(float(result)/1e9 - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PXA did not set correctly
the stop frequency'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
stop frequency'''))
else:
raise '''PXA is not in the appropriate mode to set correctly the
stop frequency'''
@instrument_property
@secure_communication()
def center_frequency(self):
"""Center frequency getter method
"""
freq = self.query('FREQ:CENT?')
if freq:
return float(freq)/1e9
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
center frequency'''))
@center_frequency.setter
@secure_communication()
def center_frequency(self, value):
"""center frequency setter method
"""
self.write('FREQ:CENT {} GHz'.format(value))
result = self.query('FREQ:CENT?')
if result:
if abs(float(result)/1e9 - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PXA did not set correctly the
center frequency'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
center frequency'''))
@instrument_property
@secure_communication()
def span_frequency(self):
"""Span frequency getter method
"""
if self.mode == 'SPEC':
freq = self.query('SENS:SPEC:FREQ:SPAN?')
if freq:
return float(freq)/1e9
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
span frequency'''))
elif self.mode == 'SA':
freq = self.query('FREQ:SPAN?')
if freq:
return float(freq)/1e9
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
span frequency'''))
else:
raise '''PXA is not in the appropriate mode to get correctly the
span frequency'''
@span_frequency.setter
@secure_communication()
def span_frequency(self, value):
"""span frequency setter method
"""
if self.mode == 'SA':
self.write('FREQ:SPAN {} GHz'.format(value))
result = self.query('FREQ:SPAN?')
if result:
if abs(float(result)/1e9 - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PXA did not set correctly
the span frequency'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
span frequency'''))
elif self.mode == 'SPEC':
self.write('SENS:SPEC:FREQ:SPAN {} GHz'.format(value))
result = self.query('SENS:SPEC:FREQ:SPAN?')
if result:
if abs(float(result)/1e9 - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PXA did not set correctly
the span frequency'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
span frequency'''))
else:
raise '''PXA is not in the appropriate mode to set correctly the
span frequency'''
@instrument_property
@secure_communication()
def sweep_time(self):
"""Sweep time getter method
"""
if self.mode == 'WAV':
sweep = self.query('SENS:WAV:SWEEP:TIME?')
if sweep:
return float(sweep)
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
sweep time'''))
elif self.mode == 'SA':
sweep = self.query('SWEEP:TIME?')
if sweep:
return float(sweep)
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
sweep time'''))
else:
raise '''PXA is not in the appropriate mode to get correctly the
sweep time'''
@sweep_time.setter
@secure_communication()
def sweep_time(self, value):
"""sweep time setter method
"""
if self.mode == 'WAV':
self.write('SENS:WAV:SWEEP:TIME {}'.format(value))
result = self.query('SENS:WAV:SWEEP:TIME?')
if result:
if abs(float(result) - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PXA did not set correctly
the sweep time'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
sweep time'''))
elif self.mode == 'SA':
self.write('SWEEP:TIME {}'.format(value))
result = self.query('SWEEP:TIME?')
if result:
if abs(float(result) - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PXA did not set correctly
the sweep time'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
sweep time'''))
else:
raise '''PXA is not in the appropriate mode to set correctly the
sweep time'''
@instrument_property
@secure_communication()
def RBW(self):
"""
"""
if self.mode == 'WAV':
rbw = self.query('SENS:WAV:BWIDTH?')
if rbw:
return float(rbw)
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
RBW'''))
elif self.mode == 'SPEC':
rbw = self.query('SENS:SPEC:BWIDTH?')
if rbw:
return float(rbw)
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
RBW'''))
else:
rbw = self.query('BWIDTH?')
if rbw:
return float(rbw)
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
channel Resolution bandwidth'''))
@RBW.setter
@secure_communication()
def RBW(self, value):
"""
"""
if self.mode == 'WAV':
self.write('SENS:WAV:BWIDTH {}'.format(value))
result = self.query('SENS:WAV:BWIDTH?')
if result:
if abs(float(result) - value) > 10**-12:
raise InstrIOError(cleandoc('''PXA did not set correctly
the channel Resolution bandwidth'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
channel Resolution bandwidth'''))
elif self.mode == 'SPEC':
self.write('SENS:SPEC:BWIDTH {}'.format(value))
result = self.query('SENS:SPEC:BWIDTH?')
if result:
if abs(float(result) - value) > 10**-12:
raise InstrIOError(cleandoc('''PXA did not set correctly
the channel Resolution bandwidth'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
channel Resolution bandwidth'''))
else:
self.write('BAND {}'.format(value))
result = self.query('BWIDTH?')
if result:
if abs(float(result) - value) > 10**-12:
raise InstrIOError(cleandoc('''PXA did not set correctly
the channel Resolution bandwidth'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
channel Resolution bandwidth'''))
@instrument_property
@secure_communication()
def VBW_SA(self):
"""
"""
if self.mode == 'SA':
vbw = self.query('BAND:VID?')
if vbw:
return float(vbw)
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
channel Video bandwidth'''))
else:
raise '''PXA is not in the appropriate mode to set correctly the
sweep time'''
@VBW_SA.setter
@secure_communication()
def VBW_SA(self, value):
"""
"""
if self.mode == 'WAV':
raise InstrIOError(cleandoc('''PXA did not set correctly the
channel Resolution bandwidth'''))
elif self.mode == 'SPEC':
raise InstrIOError(cleandoc('''PXA did not set correctly the
channel Resolution bandwidth'''))
else:
self.write('BAND:VID {}'.format(value))
result = self.query('BAND:VID?')
if result:
if abs(float(result) - value) > 10**-12:
raise InstrIOError(cleandoc('''PXA did not set correctly
the channel Video bandwidth'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
channel Video bandwidth'''))
@instrument_property
@secure_communication()
def sweep_points_SA(self):
"""
"""
points = self.query('SENSe:SWEep:POINts?')
if points:
return int(points)
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
sweep point number'''))
@sweep_points_SA.setter
@secure_communication()
def sweep_points_SA(self, value):
"""
"""
self.write('SENSe:SWEep:POINts {}'.format(value))
result = self.query('SENSe:SWEep:POINts?')
if result:
if int(result) != value:
raise InstrIOError(cleandoc('''PXA did not set correctly the
sweep point number'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
sweep point number'''))
@instrument_property
@secure_communication()
def average_count_SA(self):
"""
"""
count = self.query('AVERage:COUNt?')
if count:
return int(count)
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
average count'''))
@average_count_SA.setter
@secure_communication()
def average_count_SA(self, value):
"""
"""
self.write('AVERage:COUNt {}'.format(value))
result = self.query('AVERage:COUNt?')
if result:
if int(result) != value:
raise InstrIOError(cleandoc('''PXA did not set correctly the
average count'''))
else:
raise InstrIOError(cleandoc('''PXA did not set correctly the
average count'''))
@instrument_property
@secure_communication()
def average_state_SA(self):
"""
"""
mode = self.query('AVERage?')
if mode:
return mode
else:
raise InstrIOError(cleandoc('''Agilent PXA did not return the
average state'''))
@average_state_SA.setter
@secure_communication()
def average_state_SA(self, value):
"""
"""
self.write('AVERage:STATE {}'.format(value))
result = self.query('AVERage?')
if result.lower() != value.lower()[:len(result)]:
raise InstrIOError(cleandoc('''PXA did not set correctly the
average state''')) | [
"numpy.array",
"inspect.cleandoc",
"numpy.linspace"
] | [((3861, 3948), 'numpy.linspace', 'np.linspace', (['self.start_frequency_SA', 'self.stop_frequency_SA', 'self.sweep_points_SA'], {}), '(self.start_frequency_SA, self.stop_frequency_SA, self.\n sweep_points_SA)\n', (3872, 3948), True, 'import numpy as np\n'), ((7376, 7464), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n center frequency"""'], {}), '(\n """Agilent PXA did not return the\n center frequency""")\n', (7384, 7464), False, 'from inspect import cleandoc\n'), ((7954, 8041), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n center frequency"""'], {}), '(\n """PXA did not set correctly the\n center frequency""")\n', (7962, 8041), False, 'from inspect import cleandoc\n'), ((15244, 15348), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n channel Resolution bandwidth"""'], {}), '(\n """PXA did not set correctly the\n channel Resolution bandwidth"""\n )\n', (15252, 15348), False, 'from inspect import cleandoc\n'), ((16221, 16316), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n sweep point number"""'], {}), '(\n """Agilent PXA did not return the\n sweep point number"""\n )\n', (16229, 16316), False, 'from inspect import cleandoc\n'), ((16758, 16847), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n sweep point number"""'], {}), '(\n """PXA did not set correctly the\n sweep point number""")\n', (16766, 16847), False, 'from inspect import cleandoc\n'), ((17092, 17178), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n average count"""'], {}), '(\n """Agilent PXA did not return the\n average count""")\n', (17100, 17178), False, 'from inspect import cleandoc\n'), ((17613, 17698), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n average count"""'], {}), '("""PXA did not set correctly the\n average count"""\n )\n', (17621, 17698), False, 'from inspect import cleandoc\n'), ((17929, 18014), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n average state"""'], {}), '("""Agilent PXA did not return the\n average state"""\n )\n', (17937, 18014), False, 'from inspect import cleandoc\n'), ((18315, 18390), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n average state"""'], {}), '("""PXA did not set correctly the\n average state""")\n', (18323, 18390), False, 'from inspect import cleandoc\n'), ((2942, 3017), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return its\n mode"""'], {}), '("""Agilent PXA did not return its\n mode""")\n', (2950, 3017), False, 'from inspect import cleandoc\n'), ((4050, 4064), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4058, 4064), True, 'import numpy as np\n'), ((4651, 4738), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n start frequency"""'], {}), '(\n """Agilent PXA did not return the\n start frequency""")\n', (4659, 4738), False, 'from inspect import cleandoc\n'), ((5587, 5673), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n start frequency"""'], {}), '(\n """PXA did not set correctly the\n start frequency""")\n', (5595, 5673), False, 'from inspect import cleandoc\n'), ((6126, 6212), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n stop frequency"""'], {}), '(\n """Agilent PXA did not return the\n stop frequency""")\n', (6134, 6212), False, 'from inspect import cleandoc\n'), ((6887, 6972), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n stop frequency"""'], {}), '("""PXA did not set correctly the\n stop frequency"""\n )\n', (6895, 6972), False, 'from inspect import cleandoc\n'), ((7825, 7912), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n center frequency"""'], {}), '(\n """PXA did not set correctly the\n center frequency""")\n', (7833, 7912), False, 'from inspect import cleandoc\n'), ((8374, 8460), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n span frequency"""'], {}), '(\n """Agilent PXA did not return the\n span frequency""")\n', (8382, 8460), False, 'from inspect import cleandoc\n'), ((9401, 9486), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n span frequency"""'], {}), '("""PXA did not set correctly the\n span frequency"""\n )\n', (9409, 9486), False, 'from inspect import cleandoc\n'), ((10443, 10520), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n sweep time"""'], {}), '("""Agilent PXA did not return the\n sweep time""")\n', (10451, 10520), False, 'from inspect import cleandoc\n'), ((11455, 11531), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n sweep time"""'], {}), '("""PXA did not set correctly the\n sweep time""")\n', (11463, 11531), False, 'from inspect import cleandoc\n'), ((12409, 12479), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n RBW"""'], {}), '("""Agilent PXA did not return the\n RBW""")\n', (12417, 12479), False, 'from inspect import cleandoc\n'), ((13507, 13611), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n channel Resolution bandwidth"""'], {}), '(\n """PXA did not set correctly the\n channel Resolution bandwidth"""\n )\n', (13515, 13611), False, 'from inspect import cleandoc\n'), ((14864, 14964), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n channel Video bandwidth"""'], {}), '(\n """Agilent PXA did not return the\n channel Video bandwidth"""\n )\n', (14872, 14964), False, 'from inspect import cleandoc\n'), ((15405, 15509), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n channel Resolution bandwidth"""'], {}), '(\n """PXA did not set correctly the\n channel Resolution bandwidth"""\n )\n', (15413, 15509), False, 'from inspect import cleandoc\n'), ((16627, 16716), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n sweep point number"""'], {}), '(\n """PXA did not set correctly the\n sweep point number""")\n', (16635, 16716), False, 'from inspect import cleandoc\n'), ((17486, 17571), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n average count"""'], {}), '("""PXA did not set correctly the\n average count"""\n )\n', (17494, 17571), False, 'from inspect import cleandoc\n'), ((5451, 5537), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly\n the start frequency"""'], {}), '(\n """PXA did not set correctly\n the start frequency""")\n', (5459, 5537), False, 'from inspect import cleandoc\n'), ((6752, 6837), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly\n the stop frequency"""'], {}), '("""PXA did not set correctly\n the stop frequency"""\n )\n', (6760, 6837), False, 'from inspect import cleandoc\n'), ((8646, 8732), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n span frequency"""'], {}), '(\n """Agilent PXA did not return the\n span frequency""")\n', (8654, 8732), False, 'from inspect import cleandoc\n'), ((9266, 9351), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly\n the span frequency"""'], {}), '("""PXA did not set correctly\n the span frequency"""\n )\n', (9274, 9351), False, 'from inspect import cleandoc\n'), ((9905, 9990), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n span frequency"""'], {}), '("""PXA did not set correctly the\n span frequency"""\n )\n', (9913, 9990), False, 'from inspect import cleandoc\n'), ((10711, 10788), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n sweep time"""'], {}), '("""Agilent PXA did not return the\n sweep time""")\n', (10719, 10788), False, 'from inspect import cleandoc\n'), ((11324, 11400), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly\n the sweep time"""'], {}), '("""PXA did not set correctly\n the sweep time""")\n', (11332, 11400), False, 'from inspect import cleandoc\n'), ((11922, 11998), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n sweep time"""'], {}), '("""PXA did not set correctly the\n sweep time""")\n', (11930, 11998), False, 'from inspect import cleandoc\n'), ((12672, 12742), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n RBW"""'], {}), '("""Agilent PXA did not return the\n RBW""")\n', (12680, 12742), False, 'from inspect import cleandoc\n'), ((12905, 13010), 'inspect.cleandoc', 'cleandoc', (['"""Agilent PXA did not return the\n channel Resolution bandwidth"""'], {}), '(\n """Agilent PXA did not return the\n channel Resolution bandwidth"""\n )\n', (12913, 13010), False, 'from inspect import cleandoc\n'), ((13358, 13462), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly\n the channel Resolution bandwidth"""'], {}), '(\n """PXA did not set correctly\n the channel Resolution bandwidth"""\n )\n', (13366, 13462), False, 'from inspect import cleandoc\n'), ((14019, 14123), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n channel Resolution bandwidth"""'], {}), '(\n """PXA did not set correctly the\n channel Resolution bandwidth"""\n )\n', (14027, 14123), False, 'from inspect import cleandoc\n'), ((14488, 14592), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n channel Resolution bandwidth"""'], {}), '(\n """PXA did not set correctly the\n channel Resolution bandwidth"""\n )\n', (14496, 14592), False, 'from inspect import cleandoc\n'), ((15875, 15974), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly the\n channel Video bandwidth"""'], {}), '(\n """PXA did not set correctly the\n channel Video bandwidth"""\n )\n', (15883, 15974), False, 'from inspect import cleandoc\n'), ((9770, 9855), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly\n the span frequency"""'], {}), '("""PXA did not set correctly\n the span frequency"""\n )\n', (9778, 9855), False, 'from inspect import cleandoc\n'), ((11791, 11867), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly\n the sweep time"""'], {}), '("""PXA did not set correctly\n the sweep time""")\n', (11799, 11867), False, 'from inspect import cleandoc\n'), ((13870, 13974), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly\n the channel Resolution bandwidth"""'], {}), '(\n """PXA did not set correctly\n the channel Resolution bandwidth"""\n )\n', (13878, 13974), False, 'from inspect import cleandoc\n'), ((14339, 14443), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly\n the channel Resolution bandwidth"""'], {}), '(\n """PXA did not set correctly\n the channel Resolution bandwidth"""\n )\n', (14347, 14443), False, 'from inspect import cleandoc\n'), ((15731, 15830), 'inspect.cleandoc', 'cleandoc', (['"""PXA did not set correctly\n the channel Video bandwidth"""'], {}), '(\n """PXA did not set correctly\n the channel Video bandwidth"""\n )\n', (15739, 15830), False, 'from inspect import cleandoc\n')] |
# -*- coding: utf-8 -*-
"""transferlearning.py
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oD3EweYMUiNQi9TGbXH8BHenN9vKCLxB
"""
i#loading basic dependencies
import pandas as pd
import numpy as np
import os
import keras
import matplotlib.pyplot as plt
from keras.layers import Dense,GlobalAveragePooling2D
from keras.applications import MobileNet
from keras.preprocessing import image
from keras.applications.mobilenet import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score,confusion_matrix
from keras.models import load_model
from PIL import Image
import numpy as np
from skimage import transform
#function to load file
def load(filename):
np_image = Image.open(filename)
np_image = np.array(np_image).astype('float32')/255
np_image = transform.resize(np_image, (224, 224, 3))
np_image = np.expand_dims(np_image, axis=0)
return np_image
#provide image address as parameter
img=load("img.jpg")
#loading trained model weights
model = load_model('transfer_learning.h5')
y=model.predict(img)
print(y) | [
"keras.models.load_model",
"numpy.expand_dims",
"PIL.Image.open",
"skimage.transform.resize",
"numpy.array"
] | [((1212, 1246), 'keras.models.load_model', 'load_model', (['"""transfer_learning.h5"""'], {}), "('transfer_learning.h5')\n", (1222, 1246), False, 'from keras.models import load_model\n'), ((915, 935), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (925, 935), False, 'from PIL import Image\n'), ((1005, 1046), 'skimage.transform.resize', 'transform.resize', (['np_image', '(224, 224, 3)'], {}), '(np_image, (224, 224, 3))\n', (1021, 1046), False, 'from skimage import transform\n'), ((1061, 1093), 'numpy.expand_dims', 'np.expand_dims', (['np_image'], {'axis': '(0)'}), '(np_image, axis=0)\n', (1075, 1093), True, 'import numpy as np\n'), ((950, 968), 'numpy.array', 'np.array', (['np_image'], {}), '(np_image)\n', (958, 968), True, 'import numpy as np\n')] |
import six
import math
import numpy as np
from numba import jit
from .bbox import *
from .mask import *
@jit
def generate_rpn_anchor_target(anchors,
gt_boxes,
is_crowd,
im_info,
rpn_straddle_thresh,
rpn_batch_size_per_im,
rpn_positive_overlap,
rpn_negative_overlap,
rpn_fg_fraction,
use_random=True,
anchor_reg_weights=[1., 1., 1., 1.]):
anchor_num = anchors.shape[0]
batch_size = gt_boxes.shape[0]
loc_indexes = []
cls_indexes = []
tgt_labels = []
tgt_deltas = []
anchor_inside_weights = []
for i in range(batch_size):
# TODO: move anchor filter into anchor generator
im_height = im_info[i][0]
im_width = im_info[i][1]
im_scale = im_info[i][2]
if rpn_straddle_thresh >= 0:
anchor_inds = np.where((anchors[:, 0] >= -rpn_straddle_thresh) & (
anchors[:, 1] >= -rpn_straddle_thresh) & (
anchors[:, 2] < im_width + rpn_straddle_thresh) & (
anchors[:, 3] < im_height + rpn_straddle_thresh))[0]
anchor = anchors[anchor_inds, :]
else:
anchor_inds = np.arange(anchors.shape[0])
anchor = anchors
gt_bbox = gt_boxes[i] * im_scale
is_crowd_slice = is_crowd[i]
not_crowd_inds = np.where(is_crowd_slice == 0)[0]
gt_bbox = gt_bbox[not_crowd_inds]
# Step1: match anchor and gt_bbox
anchor_gt_bbox_inds, anchor_gt_bbox_iou, labels = label_anchor(anchor,
gt_bbox)
# Step2: sample anchor
fg_inds, bg_inds, fg_fake_inds, fake_num = sample_anchor(
anchor_gt_bbox_iou, labels, rpn_positive_overlap,
rpn_negative_overlap, rpn_batch_size_per_im, rpn_fg_fraction,
use_random)
# Step3: make output
loc_inds = np.hstack([fg_fake_inds, fg_inds])
cls_inds = np.hstack([fg_inds, bg_inds])
sampled_labels = labels[cls_inds]
sampled_anchors = anchor[loc_inds]
sampled_gt_boxes = gt_bbox[anchor_gt_bbox_inds[loc_inds]]
sampled_deltas = bbox2delta(sampled_anchors, sampled_gt_boxes,
anchor_reg_weights)
anchor_inside_weight = np.zeros((len(loc_inds), 4), dtype=np.float32)
anchor_inside_weight[fake_num:, :] = 1
loc_indexes.append(anchor_inds[loc_inds] + i * anchor_num)
cls_indexes.append(anchor_inds[cls_inds] + i * anchor_num)
tgt_labels.append(sampled_labels)
tgt_deltas.append(sampled_deltas)
anchor_inside_weights.append(anchor_inside_weight)
loc_indexes = np.concatenate(loc_indexes)
cls_indexes = np.concatenate(cls_indexes)
tgt_labels = np.concatenate(tgt_labels).astype('float32')
tgt_deltas = np.vstack(tgt_deltas).astype('float32')
anchor_inside_weights = np.vstack(anchor_inside_weights)
return loc_indexes, cls_indexes, tgt_labels, tgt_deltas, anchor_inside_weights
@jit
def label_anchor(anchors, gt_boxes):
iou = bbox_overlaps(anchors, gt_boxes)
# every gt's anchor's index
gt_bbox_anchor_inds = iou.argmax(axis=0)
gt_bbox_anchor_iou = iou[gt_bbox_anchor_inds, np.arange(iou.shape[1])]
gt_bbox_anchor_iou_inds = np.where(iou == gt_bbox_anchor_iou)[0]
# every anchor's gt bbox's index
anchor_gt_bbox_inds = iou.argmax(axis=1)
anchor_gt_bbox_iou = iou[np.arange(iou.shape[0]), anchor_gt_bbox_inds]
labels = np.ones((iou.shape[0], ), dtype=np.int32) * -1
labels[gt_bbox_anchor_iou_inds] = 1
return anchor_gt_bbox_inds, anchor_gt_bbox_iou, labels
@jit
def sample_anchor(anchor_gt_bbox_iou,
labels,
rpn_positive_overlap,
rpn_negative_overlap,
rpn_batch_size_per_im,
rpn_fg_fraction,
use_random=True):
labels[anchor_gt_bbox_iou >= rpn_positive_overlap] = 1
num_fg = int(rpn_fg_fraction * rpn_batch_size_per_im)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg and use_random:
disable_inds = np.random.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
else:
disable_inds = fg_inds[num_fg:]
labels[disable_inds] = -1
fg_inds = np.where(labels == 1)[0]
num_bg = rpn_batch_size_per_im - np.sum(labels == 1)
bg_inds = np.where(anchor_gt_bbox_iou < rpn_negative_overlap)[0]
if len(bg_inds) > num_bg and use_random:
enable_inds = bg_inds[np.random.randint(len(bg_inds), size=num_bg)]
else:
enable_inds = bg_inds[:num_bg]
fg_fake_inds = np.array([], np.int32)
fg_value = np.array([fg_inds[0]], np.int32)
fake_num = 0
for bg_id in enable_inds:
if bg_id in fg_inds:
fake_num += 1
fg_fake_inds = np.hstack([fg_fake_inds, fg_value])
labels[enable_inds] = 0
fg_inds = np.where(labels == 1)[0]
bg_inds = np.where(labels == 0)[0]
return fg_inds, bg_inds, fg_fake_inds, fake_num
@jit
def generate_proposal_target(rpn_rois,
rpn_rois_num,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im,
fg_fraction,
fg_thresh,
bg_thresh_hi,
bg_thresh_lo,
bbox_reg_weights,
class_nums=81,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
rois = []
tgt_labels = []
tgt_deltas = []
rois_inside_weights = []
rois_outside_weights = []
new_rois_num = []
st_num = 0
end_num = 0
for im_i in range(len(rpn_rois_num)):
length = rpn_rois_num[im_i]
end_num += length
rpn_roi = rpn_rois[st_num:end_num]
im_scale = im_info[im_i][2]
rpn_roi = rpn_roi / im_scale
gt_bbox = gt_boxes[im_i]
if is_cascade_rcnn:
rpn_roi = rpn_roi[gt_bbox.shape[0]:, :]
bbox = np.vstack([gt_bbox, rpn_roi])
# Step1: label bbox
roi_gt_bbox_inds, roi_gt_bbox_iou, labels, = label_bbox(
bbox, gt_bbox, gt_classes[im_i], is_crowd[im_i])
# Step2: sample bbox
if is_cascade_rcnn:
ws = bbox[:, 2] - bbox[:, 0] + 1
hs = bbox[:, 3] - bbox[:, 1] + 1
keep = np.where((ws > 0) & (hs > 0))[0]
bbox = bbox[keep]
fg_inds, bg_inds, fg_nums = sample_bbox(
roi_gt_bbox_iou, batch_size_per_im, fg_fraction, fg_thresh,
bg_thresh_hi, bg_thresh_lo, bbox_reg_weights, class_nums,
use_random, is_cls_agnostic, is_cascade_rcnn)
# Step3: make output
sampled_inds = np.append(fg_inds, bg_inds)
sampled_labels = labels[sampled_inds]
sampled_labels[fg_nums:] = 0
sampled_boxes = bbox[sampled_inds]
sampled_gt_boxes = gt_bbox[roi_gt_bbox_inds[sampled_inds]]
sampled_gt_boxes[fg_nums:, :] = gt_bbox[0]
sampled_deltas = compute_bbox_targets(sampled_boxes, sampled_gt_boxes,
sampled_labels, bbox_reg_weights)
sampled_deltas, bbox_inside_weights = expand_bbox_targets(
sampled_deltas, class_nums, is_cls_agnostic)
bbox_outside_weights = np.array(
bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype)
roi = sampled_boxes * im_scale
st_num += length
rois.append(roi)
new_rois_num.append(roi.shape[0])
tgt_labels.append(sampled_labels)
tgt_deltas.append(sampled_deltas)
rois_inside_weights.append(bbox_inside_weights)
rois_outside_weights.append(bbox_outside_weights)
rois = np.concatenate(rois, axis=0).astype(np.float32)
tgt_labels = np.concatenate(
tgt_labels, axis=0).astype(np.int32).reshape(-1, 1)
tgt_deltas = np.concatenate(tgt_deltas, axis=0).astype(np.float32)
rois_inside_weights = np.concatenate(
rois_inside_weights, axis=0).astype(np.float32)
rois_outside_weights = np.concatenate(
rois_outside_weights, axis=0).astype(np.float32)
new_rois_num = np.asarray(new_rois_num, np.int32)
return rois, tgt_labels, tgt_deltas, rois_inside_weights, rois_outside_weights, new_rois_num
@jit
def label_bbox(boxes,
gt_boxes,
gt_classes,
is_crowd,
class_nums=81,
is_cascade_rcnn=False):
iou = bbox_overlaps(boxes, gt_boxes)
# every roi's gt box's index
roi_gt_bbox_inds = np.zeros((boxes.shape[0]), dtype=np.int32)
roi_gt_bbox_iou = np.zeros((boxes.shape[0], class_nums))
iou_argmax = iou.argmax(axis=1)
iou_max = iou.max(axis=1)
overlapped_boxes_ind = np.where(iou_max > 0)[0].astype('int32')
roi_gt_bbox_inds[overlapped_boxes_ind] = iou_argmax[overlapped_boxes_ind]
overlapped_boxes_gt_classes = gt_classes[iou_argmax[
overlapped_boxes_ind]].astype('int32')
roi_gt_bbox_iou[overlapped_boxes_ind,
overlapped_boxes_gt_classes] = iou_max[overlapped_boxes_ind]
crowd_ind = np.where(is_crowd)[0]
roi_gt_bbox_iou[crowd_ind] = -1
labels = roi_gt_bbox_iou.argmax(axis=1)
return roi_gt_bbox_inds, roi_gt_bbox_iou, labels
@jit
def sample_bbox(roi_gt_bbox_iou,
batch_size_per_im,
fg_fraction,
fg_thresh,
bg_thresh_hi,
bg_thresh_lo,
bbox_reg_weights,
class_nums,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
roi_gt_bbox_iou_max = roi_gt_bbox_iou.max(axis=1)
rois_per_image = int(batch_size_per_im)
fg_rois_per_im = int(np.round(fg_fraction * rois_per_image))
if is_cascade_rcnn:
fg_inds = np.where(roi_gt_bbox_iou_max >= fg_thresh)[0]
bg_inds = np.where((roi_gt_bbox_iou_max < bg_thresh_hi) & (
roi_gt_bbox_iou_max >= bg_thresh_lo))[0]
fg_nums = fg_inds.shape[0]
bg_nums = bg_inds.shape[0]
else:
# sampe fg
fg_inds = np.where(roi_gt_bbox_iou_max >= fg_thresh)[0]
fg_nums = np.minimum(fg_rois_per_im, fg_inds.shape[0])
if (fg_inds.shape[0] > fg_nums) and use_random:
fg_inds = np.random.choice(fg_inds, size=fg_nums, replace=False)
fg_inds = fg_inds[:fg_nums]
# sample bg
bg_inds = np.where((roi_gt_bbox_iou_max < bg_thresh_hi) & (
roi_gt_bbox_iou_max >= bg_thresh_lo))[0]
bg_nums = rois_per_image - fg_nums
bg_nums = np.minimum(bg_nums, bg_inds.shape[0])
if (bg_inds.shape[0] > bg_nums) and use_random:
bg_inds = np.random.choice(bg_inds, size=bg_nums, replace=False)
bg_inds = bg_inds[:bg_nums]
return fg_inds, bg_inds, fg_nums
@jit
def generate_mask_target(im_info, gt_classes, is_crowd, gt_segms, rois,
rois_num, labels_int32, num_classes, resolution):
mask_rois = []
mask_rois_num = []
rois_has_mask_int32 = []
mask_int32 = []
st_num = 0
end_num = 0
for k in range(len(rois_num)):
length = rois_num[k]
end_num += length
# remove padding
gt_polys = gt_segms[k]
new_gt_polys = []
for i in range(gt_polys.shape[0]):
gt_segs = []
for j in range(gt_polys[i].shape[0]):
new_poly = []
polys = gt_polys[i][j]
for ii in range(polys.shape[0]):
x, y = polys[ii]
if (x == -1 and y == -1):
continue
elif (x >= 0 and y >= 0):
new_poly.append([x, y]) # array, one poly
if len(new_poly) > 0:
gt_segs.append(new_poly)
new_gt_polys.append(gt_segs)
im_scale = im_info[k][2]
boxes = rois[st_num:end_num] / im_scale
bbox_fg, bbox_has_mask, masks = sample_mask(
boxes, new_gt_polys, labels_int32[st_num:end_num], gt_classes[k],
is_crowd[k], num_classes, resolution)
st_num += length
mask_rois.append(bbox_fg * im_scale)
mask_rois_num.append(len(bbox_fg))
rois_has_mask_int32.append(bbox_has_mask)
mask_int32.append(masks)
mask_rois = np.concatenate(mask_rois, axis=0).astype(np.float32)
mask_rois_num = np.array(mask_rois_num).astype(np.int32)
rois_has_mask_int32 = np.concatenate(
rois_has_mask_int32, axis=0).astype(np.int32)
mask_int32 = np.concatenate(mask_int32, axis=0).astype(np.int32)
return mask_rois, mask_rois_num, rois_has_mask_int32, mask_int32
@jit
def sample_mask(boxes, gt_polys, label_int32, gt_classes, is_crowd, num_classes,
resolution):
gt_polys_inds = np.where((gt_classes > 0) & (is_crowd == 0))[0]
_gt_polys = [gt_polys[i] for i in gt_polys_inds]
boxes_from_polys = polys_to_boxes(_gt_polys)
fg_inds = np.where(label_int32 > 0)[0]
bbox_has_mask = fg_inds.copy()
if fg_inds.shape[0] > 0:
labels_fg = label_int32[fg_inds]
masks_fg = np.zeros((fg_inds.shape[0], resolution**2), dtype=np.int32)
bbox_fg = boxes[fg_inds]
iou = bbox_overlaps_mask(bbox_fg, boxes_from_polys)
fg_polys_inds = np.argmax(iou, axis=1)
for i in range(bbox_fg.shape[0]):
poly_gt = _gt_polys[fg_polys_inds[i]]
roi_fg = bbox_fg[i]
mask = polys_to_mask_wrt_box(poly_gt, roi_fg, resolution)
mask = np.array(mask > 0, dtype=np.int32)
masks_fg[i, :] = np.reshape(mask, resolution**2)
else:
bg_inds = np.where(label_int32 == 0)[0]
bbox_fg = boxes[bg_inds[0]].reshape((1, -1))
masks_fg = -np.ones((1, resolution**2), dtype=np.int32)
labels_fg = np.zeros((1, ))
bbox_has_mask = np.append(bbox_has_mask, 0)
masks = expand_mask_targets(masks_fg, labels_fg, resolution, num_classes)
return bbox_fg, bbox_has_mask, masks
| [
"numpy.minimum",
"numpy.sum",
"numpy.concatenate",
"numpy.argmax",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.append",
"numpy.where",
"numpy.array",
"numpy.arange",
"numpy.reshape",
"numpy.random.choice",
"numpy.round",
"numpy.vstack"
] | [((2969, 2996), 'numpy.concatenate', 'np.concatenate', (['loc_indexes'], {}), '(loc_indexes)\n', (2983, 2996), True, 'import numpy as np\n'), ((3015, 3042), 'numpy.concatenate', 'np.concatenate', (['cls_indexes'], {}), '(cls_indexes)\n', (3029, 3042), True, 'import numpy as np\n'), ((3190, 3222), 'numpy.vstack', 'np.vstack', (['anchor_inside_weights'], {}), '(anchor_inside_weights)\n', (3199, 3222), True, 'import numpy as np\n'), ((4944, 4966), 'numpy.array', 'np.array', (['[]', 'np.int32'], {}), '([], np.int32)\n', (4952, 4966), True, 'import numpy as np\n'), ((4982, 5014), 'numpy.array', 'np.array', (['[fg_inds[0]]', 'np.int32'], {}), '([fg_inds[0]], np.int32)\n', (4990, 5014), True, 'import numpy as np\n'), ((8722, 8756), 'numpy.asarray', 'np.asarray', (['new_rois_num', 'np.int32'], {}), '(new_rois_num, np.int32)\n', (8732, 8756), True, 'import numpy as np\n'), ((9130, 9170), 'numpy.zeros', 'np.zeros', (['boxes.shape[0]'], {'dtype': 'np.int32'}), '(boxes.shape[0], dtype=np.int32)\n', (9138, 9170), True, 'import numpy as np\n'), ((9195, 9233), 'numpy.zeros', 'np.zeros', (['(boxes.shape[0], class_nums)'], {}), '((boxes.shape[0], class_nums))\n', (9203, 9233), True, 'import numpy as np\n'), ((2182, 2216), 'numpy.hstack', 'np.hstack', (['[fg_fake_inds, fg_inds]'], {}), '([fg_fake_inds, fg_inds])\n', (2191, 2216), True, 'import numpy as np\n'), ((2236, 2265), 'numpy.hstack', 'np.hstack', (['[fg_inds, bg_inds]'], {}), '([fg_inds, bg_inds])\n', (2245, 2265), True, 'import numpy as np\n'), ((3577, 3612), 'numpy.where', 'np.where', (['(iou == gt_bbox_anchor_iou)'], {}), '(iou == gt_bbox_anchor_iou)\n', (3585, 3612), True, 'import numpy as np\n'), ((3789, 3829), 'numpy.ones', 'np.ones', (['(iou.shape[0],)'], {'dtype': 'np.int32'}), '((iou.shape[0],), dtype=np.int32)\n', (3796, 3829), True, 'import numpy as np\n'), ((4331, 4352), 'numpy.where', 'np.where', (['(labels == 1)'], {}), '(labels == 1)\n', (4339, 4352), True, 'import numpy as np\n'), ((4602, 4623), 'numpy.where', 'np.where', (['(labels == 1)'], {}), '(labels == 1)\n', (4610, 4623), True, 'import numpy as np\n'), ((4665, 4684), 'numpy.sum', 'np.sum', (['(labels == 1)'], {}), '(labels == 1)\n', (4671, 4684), True, 'import numpy as np\n'), ((4699, 4750), 'numpy.where', 'np.where', (['(anchor_gt_bbox_iou < rpn_negative_overlap)'], {}), '(anchor_gt_bbox_iou < rpn_negative_overlap)\n', (4707, 4750), True, 'import numpy as np\n'), ((5223, 5244), 'numpy.where', 'np.where', (['(labels == 1)'], {}), '(labels == 1)\n', (5231, 5244), True, 'import numpy as np\n'), ((5262, 5283), 'numpy.where', 'np.where', (['(labels == 0)'], {}), '(labels == 0)\n', (5270, 5283), True, 'import numpy as np\n'), ((6561, 6590), 'numpy.vstack', 'np.vstack', (['[gt_bbox, rpn_roi]'], {}), '([gt_bbox, rpn_roi])\n', (6570, 6590), True, 'import numpy as np\n'), ((7282, 7309), 'numpy.append', 'np.append', (['fg_inds', 'bg_inds'], {}), '(fg_inds, bg_inds)\n', (7291, 7309), True, 'import numpy as np\n'), ((7870, 7936), 'numpy.array', 'np.array', (['(bbox_inside_weights > 0)'], {'dtype': 'bbox_inside_weights.dtype'}), '(bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype)\n', (7878, 7936), True, 'import numpy as np\n'), ((9691, 9709), 'numpy.where', 'np.where', (['is_crowd'], {}), '(is_crowd)\n', (9699, 9709), True, 'import numpy as np\n'), ((10337, 10375), 'numpy.round', 'np.round', (['(fg_fraction * rois_per_image)'], {}), '(fg_fraction * rois_per_image)\n', (10345, 10375), True, 'import numpy as np\n'), ((10769, 10813), 'numpy.minimum', 'np.minimum', (['fg_rois_per_im', 'fg_inds.shape[0]'], {}), '(fg_rois_per_im, fg_inds.shape[0])\n', (10779, 10813), True, 'import numpy as np\n'), ((11187, 11224), 'numpy.minimum', 'np.minimum', (['bg_nums', 'bg_inds.shape[0]'], {}), '(bg_nums, bg_inds.shape[0])\n', (11197, 11224), True, 'import numpy as np\n'), ((13436, 13480), 'numpy.where', 'np.where', (['((gt_classes > 0) & (is_crowd == 0))'], {}), '((gt_classes > 0) & (is_crowd == 0))\n', (13444, 13480), True, 'import numpy as np\n'), ((13601, 13626), 'numpy.where', 'np.where', (['(label_int32 > 0)'], {}), '(label_int32 > 0)\n', (13609, 13626), True, 'import numpy as np\n'), ((13755, 13816), 'numpy.zeros', 'np.zeros', (['(fg_inds.shape[0], resolution ** 2)'], {'dtype': 'np.int32'}), '((fg_inds.shape[0], resolution ** 2), dtype=np.int32)\n', (13763, 13816), True, 'import numpy as np\n'), ((13933, 13955), 'numpy.argmax', 'np.argmax', (['iou'], {'axis': '(1)'}), '(iou, axis=1)\n', (13942, 13955), True, 'import numpy as np\n'), ((14462, 14476), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (14470, 14476), True, 'import numpy as np\n'), ((14502, 14529), 'numpy.append', 'np.append', (['bbox_has_mask', '(0)'], {}), '(bbox_has_mask, 0)\n', (14511, 14529), True, 'import numpy as np\n'), ((1434, 1461), 'numpy.arange', 'np.arange', (['anchors.shape[0]'], {}), '(anchors.shape[0])\n', (1443, 1461), True, 'import numpy as np\n'), ((1595, 1624), 'numpy.where', 'np.where', (['(is_crowd_slice == 0)'], {}), '(is_crowd_slice == 0)\n', (1603, 1624), True, 'import numpy as np\n'), ((3060, 3086), 'numpy.concatenate', 'np.concatenate', (['tgt_labels'], {}), '(tgt_labels)\n', (3074, 3086), True, 'import numpy as np\n'), ((3122, 3143), 'numpy.vstack', 'np.vstack', (['tgt_deltas'], {}), '(tgt_deltas)\n', (3131, 3143), True, 'import numpy as np\n'), ((3522, 3545), 'numpy.arange', 'np.arange', (['iou.shape[1]'], {}), '(iou.shape[1])\n', (3531, 3545), True, 'import numpy as np\n'), ((3729, 3752), 'numpy.arange', 'np.arange', (['iou.shape[0]'], {}), '(iou.shape[0])\n', (3738, 3752), True, 'import numpy as np\n'), ((5144, 5179), 'numpy.hstack', 'np.hstack', (['[fg_fake_inds, fg_value]'], {}), '([fg_fake_inds, fg_value])\n', (5153, 5179), True, 'import numpy as np\n'), ((8293, 8321), 'numpy.concatenate', 'np.concatenate', (['rois'], {'axis': '(0)'}), '(rois, axis=0)\n', (8307, 8321), True, 'import numpy as np\n'), ((8451, 8485), 'numpy.concatenate', 'np.concatenate', (['tgt_deltas'], {'axis': '(0)'}), '(tgt_deltas, axis=0)\n', (8465, 8485), True, 'import numpy as np\n'), ((8531, 8574), 'numpy.concatenate', 'np.concatenate', (['rois_inside_weights'], {'axis': '(0)'}), '(rois_inside_weights, axis=0)\n', (8545, 8574), True, 'import numpy as np\n'), ((8630, 8674), 'numpy.concatenate', 'np.concatenate', (['rois_outside_weights'], {'axis': '(0)'}), '(rois_outside_weights, axis=0)\n', (8644, 8674), True, 'import numpy as np\n'), ((10420, 10462), 'numpy.where', 'np.where', (['(roi_gt_bbox_iou_max >= fg_thresh)'], {}), '(roi_gt_bbox_iou_max >= fg_thresh)\n', (10428, 10462), True, 'import numpy as np\n'), ((10484, 10574), 'numpy.where', 'np.where', (['((roi_gt_bbox_iou_max < bg_thresh_hi) & (roi_gt_bbox_iou_max >= bg_thresh_lo))'], {}), '((roi_gt_bbox_iou_max < bg_thresh_hi) & (roi_gt_bbox_iou_max >=\n bg_thresh_lo))\n', (10492, 10574), True, 'import numpy as np\n'), ((10705, 10747), 'numpy.where', 'np.where', (['(roi_gt_bbox_iou_max >= fg_thresh)'], {}), '(roi_gt_bbox_iou_max >= fg_thresh)\n', (10713, 10747), True, 'import numpy as np\n'), ((10892, 10946), 'numpy.random.choice', 'np.random.choice', (['fg_inds'], {'size': 'fg_nums', 'replace': '(False)'}), '(fg_inds, size=fg_nums, replace=False)\n', (10908, 10946), True, 'import numpy as np\n'), ((11023, 11113), 'numpy.where', 'np.where', (['((roi_gt_bbox_iou_max < bg_thresh_hi) & (roi_gt_bbox_iou_max >= bg_thresh_lo))'], {}), '((roi_gt_bbox_iou_max < bg_thresh_hi) & (roi_gt_bbox_iou_max >=\n bg_thresh_lo))\n', (11031, 11113), True, 'import numpy as np\n'), ((11303, 11357), 'numpy.random.choice', 'np.random.choice', (['bg_inds'], {'size': 'bg_nums', 'replace': '(False)'}), '(bg_inds, size=bg_nums, replace=False)\n', (11319, 11357), True, 'import numpy as np\n'), ((12949, 12982), 'numpy.concatenate', 'np.concatenate', (['mask_rois'], {'axis': '(0)'}), '(mask_rois, axis=0)\n', (12963, 12982), True, 'import numpy as np\n'), ((13022, 13045), 'numpy.array', 'np.array', (['mask_rois_num'], {}), '(mask_rois_num)\n', (13030, 13045), True, 'import numpy as np\n'), ((13089, 13132), 'numpy.concatenate', 'np.concatenate', (['rois_has_mask_int32'], {'axis': '(0)'}), '(rois_has_mask_int32, axis=0)\n', (13103, 13132), True, 'import numpy as np\n'), ((13176, 13210), 'numpy.concatenate', 'np.concatenate', (['mask_int32'], {'axis': '(0)'}), '(mask_int32, axis=0)\n', (13190, 13210), True, 'import numpy as np\n'), ((14171, 14205), 'numpy.array', 'np.array', (['(mask > 0)'], {'dtype': 'np.int32'}), '(mask > 0, dtype=np.int32)\n', (14179, 14205), True, 'import numpy as np\n'), ((14235, 14268), 'numpy.reshape', 'np.reshape', (['mask', '(resolution ** 2)'], {}), '(mask, resolution ** 2)\n', (14245, 14268), True, 'import numpy as np\n'), ((14295, 14321), 'numpy.where', 'np.where', (['(label_int32 == 0)'], {}), '(label_int32 == 0)\n', (14303, 14321), True, 'import numpy as np\n'), ((14398, 14443), 'numpy.ones', 'np.ones', (['(1, resolution ** 2)'], {'dtype': 'np.int32'}), '((1, resolution ** 2), dtype=np.int32)\n', (14405, 14443), True, 'import numpy as np\n'), ((1088, 1291), 'numpy.where', 'np.where', (['((anchors[:, 0] >= -rpn_straddle_thresh) & (anchors[:, 1] >= -\n rpn_straddle_thresh) & (anchors[:, 2] < im_width + rpn_straddle_thresh) &\n (anchors[:, 3] < im_height + rpn_straddle_thresh))'], {}), '((anchors[:, 0] >= -rpn_straddle_thresh) & (anchors[:, 1] >= -\n rpn_straddle_thresh) & (anchors[:, 2] < im_width + rpn_straddle_thresh) &\n (anchors[:, 3] < im_height + rpn_straddle_thresh))\n', (1096, 1291), True, 'import numpy as np\n'), ((6915, 6944), 'numpy.where', 'np.where', (['((ws > 0) & (hs > 0))'], {}), '((ws > 0) & (hs > 0))\n', (6923, 6944), True, 'import numpy as np\n'), ((9328, 9349), 'numpy.where', 'np.where', (['(iou_max > 0)'], {}), '(iou_max > 0)\n', (9336, 9349), True, 'import numpy as np\n'), ((8358, 8392), 'numpy.concatenate', 'np.concatenate', (['tgt_labels'], {'axis': '(0)'}), '(tgt_labels, axis=0)\n', (8372, 8392), True, 'import numpy as np\n')] |
import random
import numpy as np
import dialog_config
import db_helper
class user_simulator():
# init file path and load data
def __init__(self):
self.path_events_db = 'events_db.json'
self.information_slot_names = dialog_config.INFORMATION_SLOTS
self.request_slot_names = dialog_config.REQUEST_SLOTS
self.event_data = self.__load_event_Data__()
self.start_conversation()
# init user goal and state
def start_conversation(self):
self.state = {}
self.state['history_slots'] = {}
self.state['inform_slots'] = {}
self.state['request_slots'] = []
self.state['rest_slots'] = []
self.state['turn'] = 1
self.state['act'] = dialog_config.DIALOG_ACT['INFORM']
self.goal = {}
self.goal['inform_slots'] = {}
self.goal['request_slots'] = []
self.dialog_status = dialog_config.DIALOG_STATUS['NO_OUTCOME_YET']
self.agent_proposed = 0
self.deny_count = 0
# load event data which is used for generating user goal and state
def __load_event_Data__(self):
return db_helper.get_training_data()
# first round: generate user agenda (user goal and initial state)
def generate_user_agenda(self, max_turn):
self.max_turn = max_turn
self.start_conversation()
data = self.event_data[np.random.randint(len(self.event_data))]
all_slots = list(data.keys())
# random select several slots
number_of_slots = np.random.randint(6,8)
random_choose_slots = [all_slots[i] for i in random.sample(range(len(all_slots)), number_of_slots)]
random_inform_slots = int(number_of_slots/2)
for slot in random_choose_slots:
if slot in self.information_slot_names and slot != 'event' and \
len(self.goal['inform_slots']) < random_inform_slots:
self.goal['inform_slots'][slot] = data[slot]
if len(self.state['inform_slots']) < np.random.randint(1,3):
self.state['inform_slots'][slot] = data[slot]
else:
self.state['rest_slots'].append(slot)
elif slot in self.request_slot_names:
self.goal['request_slots'].append(slot)
self.state['rest_slots'].append(slot)
self.state['request_slots'].append('event')
print('User Goal: {}\nUser State: {}'.format(self.goal, self.state))
sample_action = {}
sample_action['act'] = self.state['act']
sample_action['inform_slots'] = self.state['inform_slots']
sample_action['request_slots'] = self.state['request_slots']
sample_action['turn'] = self.state['turn']
sample_action['sentence'] = ''
return sample_action, self.dialog_status
# user respond after the first round
def generate_user_response(self, system_action):
self.state['turn'] += 1
self.dialog_status = dialog_config.DIALOG_STATUS['NO_OUTCOME_YET']
sys_act = system_action['act']
if (self.max_turn > 0 and self.state['turn'] > self.max_turn):
self.state['act'] = dialog_config.DIALOG_ACT['CLOSING']
self.dialog_status = dialog_config.DIALOG_STATUS['FAILED_DIALOG']
else:
self.state['history_slots'].update(self.state['inform_slots'])
self.state['inform_slots'].clear()
if sys_act == dialog_config.DIALOG_ACT['INFORM']:
self.response_inform(system_action)
elif sys_act == dialog_config.DIALOG_ACT['REQUEST']:
self.response_request(system_action)
elif sys_act == dialog_config.DIALOG_ACT['CONFIRM_ANSWER']:
self.response_confirm_answer(system_action)
elif sys_act == dialog_config.DIALOG_ACT['CLOSING']:
self.response_closing(system_action)
elif sys_act == dialog_config.DIALOG_ACT['GREETING']:
self.response_greeting(system_action)
if self.agent_proposed < self.deny_count:
if system_action['act'] != dialog_config.DIALOG_ACT['INFORM']:
self.dialog_status = dialog_config.DIALOG_STATUS['FAILED_DIALOG']
self.state['act'] = dialog_config.DIALOG_ACT['CLOSING']
self.state['inform_slots'].clear()
self.state['request_slots'] = []
elif 'event' not in system_action['inform_slots'].keys():
self.dialog_status = dialog_config.DIALOG_STATUS['FAILED_DIALOG']
self.state['act'] = dialog_config.DIALOG_ACT['CLOSING']
self.state['inform_slots'].clear()
self.state['request_slots'] = []
if self.agent_proposed > self.deny_count+1:
self.dialog_status = dialog_config.DIALOG_STATUS['FAILED_DIALOG']
self.state['act'] = dialog_config.DIALOG_ACT['CLOSING']
self.state['inform_slots'].clear()
self.state['request_slots'] = []
response_action = {}
response_action['act'] = self.state['act']
response_action['inform_slots'] = self.state['inform_slots']
response_action['request_slots'] = self.state['request_slots']
response_action['turn'] = self.state['turn']
response_action['sentence'] = ''
return response_action, self.dialog_status, self.state
def response_inform(self, system_action):
''' Get temporary success or fail - return the correct(wrong) answer for user request slot '''
self.dialog_status = dialog_config.DIALOG_STATUS['SUCCESS_TEMP']
# Fail to find an event
if 'event' in system_action['inform_slots'].keys():
self.agent_proposed += 1
if system_action['inform_slots']['event'] == dialog_config.SPECIAL_SLOT_VALUES['NO_VALUE_MATCH']:
self.dialog_status = dialog_config.DIALOG_STATUS['SUCCESS_DIALOG']
self.state['act'] = dialog_config.DIALOG_ACT['CLOSING']
return
else:
rest_inform = list(set(self.state['rest_slots']) & set(self.goal['inform_slots']))
if np.random.rand() <= (4 - len(self.state['history_slots']))*0.3:#min(1, len(rest_inform)*0.5): # deny
self.dialog_status = dialog_config.DIALOG_STATUS['FAILED_TEMP']
self.state['act'] = dialog_config.DIALOG_ACT['DENY']
if len(rest_inform) > 0:
selected_slot = random.choice(rest_inform)
self.state['inform_slots'][selected_slot] = self.goal['inform_slots'][selected_slot]
self.state['rest_slots'].remove(selected_slot)
self.deny_count += 1
return
else:
rest_inform = list(set(self.state['rest_slots']) & set(self.goal['inform_slots']))
for inform in rest_inform:
self.state['rest_slots'].remove(inform)
# Ask new a question
rest_request = list(set(self.state['rest_slots']) & set(self.goal['request_slots']))
if len(rest_request) > 0:
selected_slot = random.choice(rest_request)
self.state['act'] = dialog_config.DIALOG_ACT['REQUEST']
self.state['request_slots'] = [selected_slot]
self.state['rest_slots'].remove(selected_slot)
return
else:
self.state['request_slots'] = []
self.state['act'] = dialog_config.DIALOG_ACT['CLOSING']
self.dialog_status = dialog_config.DIALOG_STATUS['SUCCESS_DIALOG']
return
if len(self.state['request_slots']) > 0 :
if self.state['request_slots'][0] in system_action['inform_slots'].keys():
self.dialog_status = dialog_config.DIALOG_STATUS['SUCCESS_TEMP']
# Ask new a question
rest_request = list(set(self.state['rest_slots']) & set(self.goal['request_slots']))
if len(rest_request) > 0:
selected_slot = random.choice(rest_request)
self.state['act'] = dialog_config.DIALOG_ACT['REQUEST']
self.state['request_slots'] = [selected_slot]
self.state['rest_slots'].remove(selected_slot)
else:
self.state['request_slots'] = []
self.state['act'] = dialog_config.DIALOG_ACT['CLOSING']
self.dialog_status = dialog_config.DIALOG_STATUS['SUCCESS_DIALOG']
else:
self.dialog_status = dialog_config.DIALOG_STATUS['FAILED_TEMP']
# Ask the original question again
self.state['act'] = dialog_config.DIALOG_ACT['REQUEST']
else:
self.state['act'] = dialog_config.DIALOG_ACT['CLOSING']
self.dialog_status = dialog_config.DIALOG_STATUS['SUCCESS_DIALOG']
def response_request(self, system_action):
if len(system_action['request_slots']) > 0:
if 'event' not in self.state['request_slots']:
self.dialog_status = dialog_config.DIALOG_STATUS['FAILED_TEMP']
elif system_action['request_slots'][0] not in self.state['history_slots']:
self.dialog_status = dialog_config.DIALOG_STATUS['PROVIDE_INFO']
else:
self.dialog_status = dialog_config.DIALOG_STATUS['FAILED_TEMP']
self.state['act'] = dialog_config.DIALOG_ACT['INFORM']
slot = system_action['request_slots'][0] # only one slot
# request slot in user's constraints #and slot not in self.state['request_slots'].keys():
if slot in self.goal['inform_slots'].keys():
self.state['inform_slots'][slot] = self.goal['inform_slots'][slot]
if slot in self.state['rest_slots']:
self.state['rest_slots'].remove(slot)
if slot in self.state['request_slots']:
index = self.state['request_slots'].index(slot)
del self.state['request_slots'][index]
# the requested slot has been answered
elif slot in self.goal['request_slots'] and slot not in self.state['rest_slots'] and slot in \
self.state['history_slots'].keys():
self.state['inform_slots'][slot] = self.state['history_slots'][slot]
# request slot in user's goal's request slots, and not answered yet
elif slot in self.goal['request_slots'] and slot in self.state['rest_slots']:
self.state['inform_slots'][slot] = dialog_config.SPECIAL_SLOT_VALUES['I_DO_NOT_KNOW']
if len(self.state['rest_slots']) > 0:
for selected_slot in self.state['rest_slots']:
if selected_slot in self.goal['inform_slots'].keys():
self.state['inform_slots'][selected_slot] = self.goal['inform_slots'][selected_slot]
self.state['rest_slots'].remove(selected_slot)
break
else:
self.state['inform_slots'][slot] = dialog_config.SPECIAL_SLOT_VALUES['I_DO_NOT_CARE']
if len(self.state['rest_slots']) > 0:
for selected_slot in self.state['rest_slots']:
if selected_slot in self.goal['inform_slots'].keys():
self.state['inform_slots'][selected_slot] = self.goal['inform_slots'][selected_slot]
self.state['rest_slots'].remove(selected_slot)
break
def response_closing(self, system_action):
self.dialog_status = dialog_config.DIALOG_STATUS['SUCCESS_DIALOG']
self.state['act'] = dialog_config.DIALOG_ACT['CLOSING']
request_slot_set = self.state['request_slots']
rest_slot_set = self.state['rest_slots']
if (len(request_slot_set) > 0 or len(rest_slot_set) > 0) and \
'event' not in system_action['inform_slots'].keys():
self.dialog_status = dialog_config.DIALOG_STATUS['FAILED_DIALOG']
# Find mistakes in previous turn
sum_mistakes = 0
for info_slot in self.state['history_slots'].keys():
if self.state['history_slots'][info_slot] == dialog_config.SPECIAL_SLOT_VALUES['NO_VALUE_MATCH']:
sum_mistakes += 1
if info_slot in self.goal['inform_slots'].keys():
if self.state['history_slots'][info_slot] != self.goal['inform_slots'][info_slot]:
sum_mistakes += 1
if sum_mistakes > 1:
self.dialog_status = dialog_config.DIALOG_STATUS['FAILED_DIALOG']
def response_confirm_answer(self, system_action):
''' The agent confirm user inform slots by saying something like "Okay!" '''
self.dialog_status = dialog_config.DIALOG_STATUS['NO_OUTCOME_YET']
if len(self.state['request_slots']) != 0:
if 'event' not in self.state['request_slots']:
self.dialog_status = dialog_config.DIALOG_STATUS['FAILED_TEMP']
self.state['act'] = dialog_config.DIALOG_ACT['REQUEST']
elif len(self.state['rest_slots']) > 0:
rest_inform = list(set(self.state['rest_slots']) & set(self.goal['inform_slots']))
if len(rest_inform) > 0:
selected_slot = random.choice(rest_inform)
else:
selected_slot = random.choice(self.state['rest_slots'])
if selected_slot in self.goal['inform_slots']:
self.state['act'] = dialog_config.DIALOG_ACT['INFORM']
self.state['inform_slots'][selected_slot] = self.goal['inform_slots'][selected_slot]
elif selected_slot in self.goal['request_slots']:
self.state['act'] = dialog_config.DIALOG_ACT['REQUEST']
self.state['request_slots'] = [selected_slot]
self.state['rest_slots'].remove(selected_slot)
else:
self.state['act'] = dialog_config.DIALOG_ACT['CLOSING']
def response_greeting(self,system_action):
if len(self.state['request_slots']) != 0:
if 'event' not in self.state['request_slots']:
self.dialog_status = dialog_config.DIALOG_STATUS['FAILED_TEMP']
self.state['act'] = dialog_config.DIALOG_ACT['REQUEST']
if __name__ == '__main__':
user = user_simulator()
user.start_conversation()
user.generate_user_agenda() | [
"db_helper.get_training_data",
"numpy.random.randint",
"numpy.random.rand",
"random.choice"
] | [((1132, 1161), 'db_helper.get_training_data', 'db_helper.get_training_data', ([], {}), '()\n', (1159, 1161), False, 'import db_helper\n'), ((1521, 1544), 'numpy.random.randint', 'np.random.randint', (['(6)', '(8)'], {}), '(6, 8)\n', (1538, 1544), True, 'import numpy as np\n'), ((2011, 2034), 'numpy.random.randint', 'np.random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (2028, 2034), True, 'import numpy as np\n'), ((6228, 6244), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6242, 6244), True, 'import numpy as np\n'), ((8314, 8341), 'random.choice', 'random.choice', (['rest_request'], {}), '(rest_request)\n', (8327, 8341), False, 'import random\n'), ((13658, 13684), 'random.choice', 'random.choice', (['rest_inform'], {}), '(rest_inform)\n', (13671, 13684), False, 'import random\n'), ((13735, 13774), 'random.choice', 'random.choice', (["self.state['rest_slots']"], {}), "(self.state['rest_slots'])\n", (13748, 13774), False, 'import random\n'), ((6571, 6597), 'random.choice', 'random.choice', (['rest_inform'], {}), '(rest_inform)\n', (6584, 6597), False, 'import random\n'), ((7314, 7341), 'random.choice', 'random.choice', (['rest_request'], {}), '(rest_request)\n', (7327, 7341), False, 'import random\n')] |
# coding: utf-8
#
# Copyright 2018 <NAME>
"""
Basic module that provides the means for evaluating the B-Splines basis
functions and their derivatives. In order to simplify automatic Fortran code
generation with Pyccel, no object-oriented features are employed.
References
----------
.. [1] <NAME> and <NAME>. The NURBS Book, 2nd ed.,
Springer-Verlag Berlin Heidelberg GmbH, 1997.
.. [2] SELALIB, Semi-Lagrangian Library. http://selalib.gforge.inria.fr
"""
import numpy as np
from psydac.core.bsplines_pyccel import (find_span_p,
find_spans_p,
basis_funs_p,
basis_funs_array_p,
basis_funs_1st_der_p,
basis_funs_all_ders_p,
collocation_matrix_p,
histopolation_matrix_p,
greville_p,
breakpoints_p,
elements_spans_p,
make_knots_p,
elevate_knots_p,
quadrature_grid_p,
basis_ders_on_quad_grid_p,
basis_integrals_p)
__all__ = ['find_span',
'find_spans',
'basis_funs',
'basis_funs_array',
'basis_funs_1st_der',
'basis_funs_all_ders',
'collocation_matrix',
'histopolation_matrix',
'breakpoints',
'greville',
'elements_spans',
'make_knots',
'elevate_knots',
'quadrature_grid',
'basis_integrals',
'basis_ders_on_quad_grid']
#==============================================================================
def find_span(knots, degree, x):
"""
Determine the knot span index at location x, given the B-Splines' knot
sequence and polynomial degree. See Algorithm A2.1 in [1].
For a degree p, the knot span index i identifies the indices [i-p:i] of all
p+1 non-zero basis functions at a given location x.
Parameters
----------
knots : array_like
Knots sequence.
degree : int
Polynomial degree of B-splines.
x : float
Location of interest.
Returns
-------
span : int
Knot span index.
"""
x = float(x)
knots = np.asarray(knots, dtype=float)
return find_span_p(knots, degree, x)
#==============================================================================
def find_spans(knots, degree, x, out=None):
"""
Determine the knot span index at a set of locations x, given the B-Splines' knot
sequence and polynomial degree. See Algorithm A2.1 in [1].
For a degree p, the knot span index i identifies the indices [i-p:i] of all
p+1 non-zero basis functions at a given location x.
Parameters
----------
knots : array_like
Knots sequence.
degree : int
Polynomial degree of B-splines.
x : array_like of floats
Locations of interest.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
spans : array of ints
Knots span indexes.
"""
knots = np.asarray(knots, dtype=float)
x = np.asarray(x, dtype=float)
if out is None:
out = np.zeros_like(x)
find_spans_p(knots, degree, x, out)
return out
#==============================================================================
def basis_funs(knots, degree, x, span, out=None):
"""
Compute the non-vanishing B-splines at a unique location.
Parameters
----------
knots : array_like of floats
Knots sequence.
degree : int
Polynomial degree of B-splines.
x : float
Evaluation point.
span : int
Knot span index.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
array
1D array containing the values of ``degree + 1`` non-zero
Bsplines at location ``x``.
"""
knots = np.asarray(knots, dtype=float)
x = float(x)
if out is None:
out = np.zeros(degree + 1)
basis_funs_p(knots, degree, x, span, out)
return out
#==============================================================================
def basis_funs_array(knots, degree, span, x, out=None):
"""Compute the non-vanishing B-splines at several locations.
Parameters
----------
knots : array_like of floats
Knots sequence.
degree : int
Polynomial degree of B-splines.
x : array_like of floats
Evaluation points.
span : array_like of int
Knot span indexes.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
array
2D array of shape ``(len(x), degree + 1)`` containing the values of ``degree + 1`` non-zero
Bsplines at each location in ``x``.
"""
knots = np.asarray(knots, dtype=float)
x = np.asarray(x, dtype=float)
if out is None:
out = np.zeros((x.shape, degree + 1))
basis_funs_array_p(knots, degree, x, span, out)
return out
#==============================================================================
def basis_funs_1st_der(knots, degree, x, span, out=None):
"""
Compute the first derivative of the non-vanishing B-splines at a location.
Parameters
----------
knots : array_like
Knots sequence.
degree : int
Polynomial degree of B-splines.
x : float
Evaluation point.
span : int
Knot span index.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
array
1D array of size ``degree + 1`` containing the derivatives of the
``degree + 1`` non-vanishing B-Splines at location x.
Notes
-----
See function 's_bsplines_non_uniform__eval_deriv' in Selalib's ([2]) source file
'src/splines/sll_m_bsplines_non_uniform.F90'.
References
----------
.. [2] SELALIB, Semi-Lagrangian Library. http://selalib.gforge.inria.fr
"""
knots = np.asarray(knots, dtype=float)
x = float(x)
if out is None:
out = np.zeros(degree + 1)
basis_funs_1st_der_p(knots, degree, x, span, out)
return out
#==============================================================================
def basis_funs_all_ders(knots, degree, x, span, n, normalization='B', out=None):
"""
Evaluate value and n derivatives at x of all basis functions with
support in interval :math:`[x_{span-1}, x_{span}]`.
If called with normalization='M', this uses M-splines instead of B-splines.
Parameters
----------
knots : array_like
Knots sequence.
degree : int
Polynomial degree of B-splines.
x : float
Evaluation point.
span : int
Knot span index.
n : int
Max derivative of interest.
normalization: str
Set to 'B' to get B-Splines and 'M' to get M-Splines
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
ders : array
2D array of n+1 (from 0-th to n-th) derivatives at x of all (degree+1)
non-vanishing basis functions in given span.
ders[i,j] = (d/dx)^i B_k(x) with k=(span-degree+j),
for 0 <= i <= n and 0 <= j <= degree+1.
"""
knots = np.asarray(knots, dtype=float)
x = float(x)
if out is None:
out = np.zeros((n + 1, degree + 1))
basis_funs_all_ders_p(knots, degree, x, span, n, normalization == 'M', out)
return out
#==============================================================================
def collocation_matrix(knots, degree, periodic, normalization, xgrid, out=None):
"""Computes the collocation matrix
If called with normalization='M', this uses M-splines instead of B-splines.
Parameters
----------
knots : array_like
Knots sequence.
degree : int
Polynomial degree of spline space.
periodic : bool
True if domain is periodic, False otherwise.
normalization : str
Set to 'B' for B-splines, and 'M' for M-splines.
xgrid : array_like
Evaluation points.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
colloc_matrix : ndarray of floats
Array containing the collocation matrix.
Notes
-----
The collocation matrix :math:`C_ij = B_j(x_i)`, contains the
values of each B-spline basis function :math:`B_j` at all locations :math:`x_i`.
"""
knots = np.asarray(knots, dtype=float)
xgrid = np.asarray(xgrid, dtype=float)
if out is None:
nb = len(knots) - degree - 1
if periodic:
nb -= degree
out = np.zeros((xgrid.shape[0], nb))
bool_normalization = normalization == "M"
collocation_matrix_p(knots, degree, periodic, bool_normalization, xgrid, out)
return out
#==============================================================================
def histopolation_matrix(knots, degree, periodic, normalization, xgrid, check_boundary=True, out=None):
"""Computes the histopolation matrix.
If called with normalization='M', this uses M-splines instead of B-splines.
Parameters
----------
knots : array_like
Knots sequence.
degree : int
Polynomial degree of spline space.
periodic : bool
True if domain is periodic, False otherwise.
normalization : str
Set to 'B' for B-splines, and 'M' for M-splines.
xgrid : array_like
Grid points.
check_boundary : bool, default=True
If true and ``periodic``, will check the boundaries of ``xgrid``.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
array
Histopolation matrix
Notes
-----
The histopolation matrix :math:`H_{ij} = \\int_{x_i}^{x_{i+1}}B_j(x)\\,dx`
contains the integrals of each B-spline basis function :math:`B_j` between
two successive grid points.
"""
# Check that knots are ordered (but allow repeated knots)
if not np.all(np.diff(knots) >= 0):
raise ValueError("Cannot accept knot sequence: {}".format(knots))
# Check that spline degree is non-negative integer
if not isinstance(degree, (int, np.integer)):
raise TypeError("Degree {} must be integer, got type {} instead".format(degree, type(degree)))
if degree < 0:
raise ValueError("Cannot accept negative degree: {}".format(degree))
# Check 'periodic' flag
if not isinstance(periodic, bool):
raise TypeError("Cannot accept non-boolean 'periodic' parameter: {}".format(periodic))
# Check 'normalization' option
if normalization not in ['B', 'M']:
raise ValueError("Cannot accept 'normalization' parameter: {}".format(normalization))
# Check that grid points are ordered, and do not allow repetitions
if not np.all(np.diff(xgrid) > 0):
raise ValueError("Grid points must be ordered, with no repetitions: {}".format(xgrid))
knots = np.asarray(knots, dtype=float)
xgrid = np.asarray(xgrid, dtype=float)
elevated_knots = elevate_knots(knots, degree, periodic)
normalization = normalization == "M"
if out is None:
if periodic:
out = np.zeros((len(xgrid), len(knots) - 2 * degree - 1))
else:
out = np.zeros((len(xgrid) - 1, len(elevated_knots) - (degree + 1) - 1 - 1))
histopolation_matrix_p(knots, degree, periodic, normalization, xgrid, check_boundary, elevated_knots, out)
return out
#==============================================================================
def breakpoints(knots, degree, tol=1e-15, out=None):
"""
Determine breakpoints' coordinates.
Parameters
----------
knots : array_like
Knots sequence.
degree : int
Polynomial degree of B-splines.
tol: float
If the distance between two knots is less than tol, we assume
that they are repeated knots which correspond to the same break point.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
breaks : numpy.ndarray (1D)
Abscissas of all breakpoints.
"""
knots = np.asarray(knots, dtype=float)
if out is None:
out = np.zeros(len(knots))
i_final = breakpoints_p(knots, degree, out, tol)
return out[:i_final]
#==============================================================================
def greville(knots, degree, periodic, out=None):
"""
Compute coordinates of all Greville points.
Parameters
----------
knots : array_like
Knots sequence.
degree : int
Polynomial degree of B-splines.
periodic : bool
True if domain is periodic, False otherwise.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
greville : numpy.ndarray (1D)
Abscissas of all Greville points.
"""
knots = np.asarray(knots, dtype=float)
if out is None:
n = len(knots) - 2 * degree - 1 if periodic else len(knots) - degree - 1
out = np.zeros(n)
greville_p(knots, degree, periodic, out)
return out
#===============================================================================
def elements_spans(knots, degree, out=None):
"""
Compute the index of the last non-vanishing spline on each grid element
(cell). The length of the returned array is the number of cells.
Parameters
----------
knots : array_like
Knots sequence.
degree : int
Polynomial degree of B-splines.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
spans : numpy.ndarray (1D)
Index of last non-vanishing spline on each grid element.
Examples
--------
>>> import numpy as np
>>> from psydac.core.bsplines import make_knots, elements_spans
>>> p = 3 ; n = 8
>>> grid = np.arange( n-p+1 )
>>> knots = make_knots( breaks=grid, degree=p, periodic=False )
>>> spans = elements_spans( knots=knots, degree=p )
>>> spans
array([3, 4, 5, 6, 7])
Notes
-----
1) Numbering of basis functions starts from 0, not 1;
2) This function could be written in two lines:
breaks = breakpoints( knots, degree )
spans = np.searchsorted( knots, breaks[:-1], side='right' ) - 1
"""
knots = np.asarray(knots, dtype=float)
if out is None:
out = np.zeros(len(knots), dtype=int)
i_final = elements_spans_p(knots, degree, out)
return out[:i_final]
#===============================================================================
def make_knots(breaks, degree, periodic, multiplicity=1, out=None):
"""
Create spline knots from breakpoints, with appropriate boundary conditions.
Let p be spline degree. If domain is periodic, knot sequence is extended
by periodicity so that first p basis functions are identical to last p.
Otherwise, knot sequence is clamped (i.e. endpoints are repeated p times).
Parameters
----------
breaks : array_like
Coordinates of breakpoints (= cell edges); given in increasing order and
with no duplicates.
degree : int
Spline degree (= polynomial degree within each interval).
periodic : bool
True if domain is periodic, False otherwise.
multiplicity: int
Multiplicity of the knots in the knot sequence, we assume that the same
multiplicity applies to each interior knot.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
------
T : numpy.ndarray (1D)
Coordinates of spline knots.
"""
# Type checking
assert isinstance( degree , int )
assert isinstance( periodic, bool )
# Consistency checks
assert len(breaks) > 1
assert all( np.diff(breaks) > 0 )
assert degree > 0
assert 1 <= multiplicity and multiplicity <= degree + 1
if periodic:
assert len(breaks) > degree
breaks = np.asarray(breaks, dtype=float)
p = degree
if out is None:
out = np.zeros(multiplicity * len(breaks[1:-1]) + 2 + 2 * degree)
make_knots_p(breaks, degree, periodic, out, multiplicity)
return out
#==============================================================================
def elevate_knots(knots, degree, periodic, multiplicity=1, tol=1e-15, out=None):
"""
Given the knot sequence of a spline space S of degree p, compute the knot
sequence of a spline space S_0 of degree p+1 such that u' is in S for all
u in S_0.
Specifically, on bounded domains the first and last knots are repeated in
the sequence, and in the periodic case the knot sequence is extended by
periodicity.
Parameters
----------
knots : array_like
Knots sequence of spline space of degree p.
degree : int
Spline degree (= polynomial degree within each interval).
periodic : bool
True if domain is periodic, False otherwise.
multiplicity : int
Multiplicity of the knots in the knot sequence, we assume that the same
multiplicity applies to each interior knot.
tol: float
If the distance between two knots is less than tol, we assume
that they are repeated knots which correspond to the same break point.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
new_knots : ndarray
Knots sequence of spline space of degree p+1.
"""
knots = np.asarray(knots, dtype=float)
if out is None:
if periodic:
out = np.zeros(knots.shape[0] + 2)
else:
shape = 2*(degree + 2)
if len(knots) - 2 * (degree + 1) > 0:
uniques = np.asarray(np.diff(knots[degree + 1:-degree - 1]) > tol).nonzero()
shape += multiplicity * (1 + uniques[0].shape[0])
out = np.zeros(shape)
elevate_knots_p(knots, degree, periodic, out, multiplicity, tol)
return out
#==============================================================================
def quadrature_grid(breaks, quad_rule_x, quad_rule_w):
"""
Compute the quadrature points and weights for performing integrals over
each element (interval) of the 1D domain, given a certain Gaussian
quadrature rule.
An n-point Gaussian quadrature rule for the canonical interval :math:`[-1,+1]`
and trivial weighting function :math:`\\omega(x)=1` is defined by the n abscissas
:math:`x_i` and n weights :math:`w_i` that satisfy the following identity for
polynomial functions :math:`f(x)` of degree :math:`2n-1` or less:
.. math :: \\int_{-1}^{+1} f(x) dx = \\sum_{i=0}^{n-1} w_i f(x_i)
Parameters
----------
breaks : array_like of floats
Coordinates of spline breakpoints.
quad_rule_x : array_like of ints
Coordinates of quadrature points on canonical interval [-1,1].
quad_rule_w : array_like of ints
Weights assigned to quadrature points on canonical interval [-1,1].
Returns
-------
quad_x : 2D numpy.ndarray
Abscissas of quadrature points on each element (interval) of the 1D
domain. See notes below.
quad_w : 2D numpy.ndarray
Weights assigned to the quadrature points on each element (interval)
of the 1D domain. See notes below.
Notes
-----
Contents of 2D output arrays 'quad_x' and 'quad_w' are accessed with two
indices (ie,iq) where:
. ie is the global element index;
. iq is the local index of a quadrature point within the element.
"""
# Check that input arrays have correct size
assert len(breaks) >= 2
assert len(quad_rule_x) == len(quad_rule_w)
# Check that provided quadrature rule is defined on interval [-1,1]
assert min(quad_rule_x) >= -1
assert max(quad_rule_x) <= +1
breaks = np.asarray(breaks, dtype=float)
quad_rule_x = np.asarray( quad_rule_x )
quad_rule_w = np.asarray( quad_rule_w )
out1 = np.zeros((len(breaks) - 1, len(quad_rule_x)))
out2 = np.zeros_like(out1)
quadrature_grid_p(breaks, quad_rule_x, quad_rule_w, out1, out2,)
return out1, out2
#==============================================================================
def basis_ders_on_quad_grid(knots, degree, quad_grid, nders, normalization, out=None):
"""
Evaluate B-Splines and their derivatives on the quadrature grid.
If called with normalization='M', this uses M-splines instead of B-splines.
Parameters
----------
knots : array_like
Knots sequence.
degree : int
Polynomial degree of B-splines.
quad_grid: ndarray
2D array of shape (ne, nq). Coordinates of quadrature points of
each element in 1D domain.
nders : int
Maximum derivative of interest.
normalization : str
Set to 'B' for B-splines, and 'M' for M-splines.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
basis: ndarray
Values of B-Splines and their derivatives at quadrature points in
each element of 1D domain. Indices are
. ie: global element (0 <= ie < ne )
. il: local basis function (0 <= il <= degree)
. id: derivative (0 <= id <= nders )
. iq: local quadrature point (0 <= iq < nq )
Examples
--------
>>> knots = np.array([0.0, 0.0, 0.25, 0.5, 0.75, 1., 1.])
>>> degree = 2
>>> bk = breakpoints(knots, degree)
>>> grid = np.array([np.linspace(bk[i], bk[i+1], 4, endpoint=False) for i in range(len(bk) - 1)])
>>> basis_ders_on_quad_grid(knots, degree, grid, 0, "B")
array([[[[0.5, 0.28125, 0.125, 0.03125]],
[[0.5, 0.6875 , 0.75 , 0.6875 ]],
[[0. , 0.03125, 0.125, 0.28125]]],
[[[0.5, 0.28125, 0.125, 0.03125]],
[[0.5, 0.6875 , 0.75 , 0.6875 ]],
[[0. , 0.03125, 0.125, 0.28125]]]])
"""
ne, nq = quad_grid.shape
knots = np.asarray(knots, dtype=float)
quad_grid = np.ascontiguousarray(quad_grid, dtype=float)
if out is None:
out = np.zeros((ne, degree + 1, nders + 1, nq))
basis_ders_on_quad_grid_p(knots, degree, quad_grid, nders, normalization == 'M', out)
return out
#==============================================================================
def basis_integrals(knots, degree, out=None):
"""
Return the integral of each B-spline basis function over the real line:
.. math:: K[i] = \\int_{-\\infty}^{+\\infty} B_i(x) dx = (T[i+p+1]-T[i]) / (p+1).
This array can be used to convert B-splines to M-splines, which have unit
integral over the real line but no partition-of-unity property.
Parameters
----------
knots : array_like
Knots sequence.
degree : int
Polynomial degree of B-splines.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
Returns
-------
K : numpy.ndarray
Array with the integrals of each B-spline basis function.
Notes
-----
For convenience, this function does not distinguish between periodic and
non-periodic spaces, hence the length of the output array is always equal
to (len(knots)-degree-1). In the periodic case the last (degree) values in
the array are redundant, as they are a copy of the first (degree) values.
"""
knots = np.asarray(knots, dtype=float)
if out is None:
out = np.zeros(len(knots) - degree - 1)
basis_integrals_p(knots, degree, out)
return out
| [
"psydac.core.bsplines_pyccel.histopolation_matrix_p",
"psydac.core.bsplines_pyccel.elements_spans_p",
"psydac.core.bsplines_pyccel.basis_funs_all_ders_p",
"psydac.core.bsplines_pyccel.elevate_knots_p",
"psydac.core.bsplines_pyccel.basis_integrals_p",
"psydac.core.bsplines_pyccel.breakpoints_p",
"psydac.... | [((2585, 2615), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (2595, 2615), True, 'import numpy as np\n'), ((2627, 2656), 'psydac.core.bsplines_pyccel.find_span_p', 'find_span_p', (['knots', 'degree', 'x'], {}), '(knots, degree, x)\n', (2638, 2656), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((3521, 3551), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (3531, 3551), True, 'import numpy as np\n'), ((3560, 3586), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'float'}), '(x, dtype=float)\n', (3570, 3586), True, 'import numpy as np\n'), ((3642, 3677), 'psydac.core.bsplines_pyccel.find_spans_p', 'find_spans_p', (['knots', 'degree', 'x', 'out'], {}), '(knots, degree, x, out)\n', (3654, 3677), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((4429, 4459), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (4439, 4459), True, 'import numpy as np\n'), ((4536, 4577), 'psydac.core.bsplines_pyccel.basis_funs_p', 'basis_funs_p', (['knots', 'degree', 'x', 'span', 'out'], {}), '(knots, degree, x, span, out)\n', (4548, 4577), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((5404, 5434), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (5414, 5434), True, 'import numpy as np\n'), ((5443, 5469), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'float'}), '(x, dtype=float)\n', (5453, 5469), True, 'import numpy as np\n'), ((5540, 5587), 'psydac.core.bsplines_pyccel.basis_funs_array_p', 'basis_funs_array_p', (['knots', 'degree', 'x', 'span', 'out'], {}), '(knots, degree, x, span, out)\n', (5558, 5587), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((6652, 6682), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (6662, 6682), True, 'import numpy as np\n'), ((6759, 6808), 'psydac.core.bsplines_pyccel.basis_funs_1st_der_p', 'basis_funs_1st_der_p', (['knots', 'degree', 'x', 'span', 'out'], {}), '(knots, degree, x, span, out)\n', (6779, 6808), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((8005, 8035), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (8015, 8035), True, 'import numpy as np\n'), ((8121, 8196), 'psydac.core.bsplines_pyccel.basis_funs_all_ders_p', 'basis_funs_all_ders_p', (['knots', 'degree', 'x', 'span', 'n', "(normalization == 'M')", 'out'], {}), "(knots, degree, x, span, n, normalization == 'M', out)\n", (8142, 8196), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((9293, 9323), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (9303, 9323), True, 'import numpy as np\n'), ((9336, 9366), 'numpy.asarray', 'np.asarray', (['xgrid'], {'dtype': 'float'}), '(xgrid, dtype=float)\n', (9346, 9366), True, 'import numpy as np\n'), ((9568, 9645), 'psydac.core.bsplines_pyccel.collocation_matrix_p', 'collocation_matrix_p', (['knots', 'degree', 'periodic', 'bool_normalization', 'xgrid', 'out'], {}), '(knots, degree, periodic, bool_normalization, xgrid, out)\n', (9588, 9645), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((11890, 11920), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (11900, 11920), True, 'import numpy as np\n'), ((11933, 11963), 'numpy.asarray', 'np.asarray', (['xgrid'], {'dtype': 'float'}), '(xgrid, dtype=float)\n', (11943, 11963), True, 'import numpy as np\n'), ((12286, 12396), 'psydac.core.bsplines_pyccel.histopolation_matrix_p', 'histopolation_matrix_p', (['knots', 'degree', 'periodic', 'normalization', 'xgrid', 'check_boundary', 'elevated_knots', 'out'], {}), '(knots, degree, periodic, normalization, xgrid,\n check_boundary, elevated_knots, out)\n', (12308, 12396), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((13157, 13187), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (13167, 13187), True, 'import numpy as np\n'), ((13257, 13295), 'psydac.core.bsplines_pyccel.breakpoints_p', 'breakpoints_p', (['knots', 'degree', 'out', 'tol'], {}), '(knots, degree, out, tol)\n', (13270, 13295), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((13989, 14019), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (13999, 14019), True, 'import numpy as np\n'), ((14151, 14191), 'psydac.core.bsplines_pyccel.greville_p', 'greville_p', (['knots', 'degree', 'periodic', 'out'], {}), '(knots, degree, periodic, out)\n', (14161, 14191), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((15509, 15539), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (15519, 15539), True, 'import numpy as np\n'), ((15621, 15657), 'psydac.core.bsplines_pyccel.elements_spans_p', 'elements_spans_p', (['knots', 'degree', 'out'], {}), '(knots, degree, out)\n', (15637, 15657), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((17218, 17249), 'numpy.asarray', 'np.asarray', (['breaks'], {'dtype': 'float'}), '(breaks, dtype=float)\n', (17228, 17249), True, 'import numpy as np\n'), ((17363, 17420), 'psydac.core.bsplines_pyccel.make_knots_p', 'make_knots_p', (['breaks', 'degree', 'periodic', 'out', 'multiplicity'], {}), '(breaks, degree, periodic, out, multiplicity)\n', (17375, 17420), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((18808, 18838), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (18818, 18838), True, 'import numpy as np\n'), ((19223, 19287), 'psydac.core.bsplines_pyccel.elevate_knots_p', 'elevate_knots_p', (['knots', 'degree', 'periodic', 'out', 'multiplicity', 'tol'], {}), '(knots, degree, periodic, out, multiplicity, tol)\n', (19238, 19287), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((21184, 21215), 'numpy.asarray', 'np.asarray', (['breaks'], {'dtype': 'float'}), '(breaks, dtype=float)\n', (21194, 21215), True, 'import numpy as np\n'), ((21234, 21257), 'numpy.asarray', 'np.asarray', (['quad_rule_x'], {}), '(quad_rule_x)\n', (21244, 21257), True, 'import numpy as np\n'), ((21278, 21301), 'numpy.asarray', 'np.asarray', (['quad_rule_w'], {}), '(quad_rule_w)\n', (21288, 21301), True, 'import numpy as np\n'), ((21373, 21392), 'numpy.zeros_like', 'np.zeros_like', (['out1'], {}), '(out1)\n', (21386, 21392), True, 'import numpy as np\n'), ((21397, 21460), 'psydac.core.bsplines_pyccel.quadrature_grid_p', 'quadrature_grid_p', (['breaks', 'quad_rule_x', 'quad_rule_w', 'out1', 'out2'], {}), '(breaks, quad_rule_x, quad_rule_w, out1, out2)\n', (21414, 21460), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((23398, 23428), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (23408, 23428), True, 'import numpy as np\n'), ((23445, 23489), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['quad_grid'], {'dtype': 'float'}), '(quad_grid, dtype=float)\n', (23465, 23489), True, 'import numpy as np\n'), ((23570, 23659), 'psydac.core.bsplines_pyccel.basis_ders_on_quad_grid_p', 'basis_ders_on_quad_grid_p', (['knots', 'degree', 'quad_grid', 'nders', "(normalization == 'M')", 'out'], {}), "(knots, degree, quad_grid, nders, normalization ==\n 'M', out)\n", (23595, 23659), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((24869, 24899), 'numpy.asarray', 'np.asarray', (['knots'], {'dtype': 'float'}), '(knots, dtype=float)\n', (24879, 24899), True, 'import numpy as np\n'), ((24972, 25009), 'psydac.core.bsplines_pyccel.basis_integrals_p', 'basis_integrals_p', (['knots', 'degree', 'out'], {}), '(knots, degree, out)\n', (24989, 25009), False, 'from psydac.core.bsplines_pyccel import find_span_p, find_spans_p, basis_funs_p, basis_funs_array_p, basis_funs_1st_der_p, basis_funs_all_ders_p, collocation_matrix_p, histopolation_matrix_p, greville_p, breakpoints_p, elements_spans_p, make_knots_p, elevate_knots_p, quadrature_grid_p, basis_ders_on_quad_grid_p, basis_integrals_p\n'), ((3621, 3637), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (3634, 3637), True, 'import numpy as np\n'), ((4511, 4531), 'numpy.zeros', 'np.zeros', (['(degree + 1)'], {}), '(degree + 1)\n', (4519, 4531), True, 'import numpy as np\n'), ((5504, 5535), 'numpy.zeros', 'np.zeros', (['(x.shape, degree + 1)'], {}), '((x.shape, degree + 1))\n', (5512, 5535), True, 'import numpy as np\n'), ((6734, 6754), 'numpy.zeros', 'np.zeros', (['(degree + 1)'], {}), '(degree + 1)\n', (6742, 6754), True, 'import numpy as np\n'), ((8087, 8116), 'numpy.zeros', 'np.zeros', (['(n + 1, degree + 1)'], {}), '((n + 1, degree + 1))\n', (8095, 8116), True, 'import numpy as np\n'), ((9485, 9515), 'numpy.zeros', 'np.zeros', (['(xgrid.shape[0], nb)'], {}), '((xgrid.shape[0], nb))\n', (9493, 9515), True, 'import numpy as np\n'), ((14135, 14146), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (14143, 14146), True, 'import numpy as np\n'), ((23524, 23565), 'numpy.zeros', 'np.zeros', (['(ne, degree + 1, nders + 1, nq)'], {}), '((ne, degree + 1, nders + 1, nq))\n', (23532, 23565), True, 'import numpy as np\n'), ((17047, 17062), 'numpy.diff', 'np.diff', (['breaks'], {}), '(breaks)\n', (17054, 17062), True, 'import numpy as np\n'), ((18898, 18926), 'numpy.zeros', 'np.zeros', (['(knots.shape[0] + 2)'], {}), '(knots.shape[0] + 2)\n', (18906, 18926), True, 'import numpy as np\n'), ((19203, 19218), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (19211, 19218), True, 'import numpy as np\n'), ((10937, 10951), 'numpy.diff', 'np.diff', (['knots'], {}), '(knots)\n', (10944, 10951), True, 'import numpy as np\n'), ((11761, 11775), 'numpy.diff', 'np.diff', (['xgrid'], {}), '(xgrid)\n', (11768, 11775), True, 'import numpy as np\n'), ((19063, 19101), 'numpy.diff', 'np.diff', (['knots[degree + 1:-degree - 1]'], {}), '(knots[degree + 1:-degree - 1])\n', (19070, 19101), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# A demo of ridge noise.
import numpy as np
import sys
import util
def noise_octave(shape, f):
return util.fbm(shape, -1, lower=f, upper=(2 * f))
def main(argv):
shape = (512,) * 2
values = np.zeros(shape)
for p in range(1, 10):
a = 2 ** p
values += np.abs(noise_octave(shape, a) - 0.5)/ a
result = (1.0 - util.normalize(values)) ** 2
np.save('ridge', result)
if __name__ == '__main__':
main(sys.argv)
| [
"util.normalize",
"numpy.save",
"numpy.zeros",
"util.fbm"
] | [((135, 176), 'util.fbm', 'util.fbm', (['shape', '(-1)'], {'lower': 'f', 'upper': '(2 * f)'}), '(shape, -1, lower=f, upper=2 * f)\n', (143, 176), False, 'import util\n'), ((234, 249), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (242, 249), True, 'import numpy as np\n'), ((401, 425), 'numpy.save', 'np.save', (['"""ridge"""', 'result'], {}), "('ridge', result)\n", (408, 425), True, 'import numpy as np\n'), ((367, 389), 'util.normalize', 'util.normalize', (['values'], {}), '(values)\n', (381, 389), False, 'import util\n')] |
from functools import partial
from typing import Optional, Tuple
import numpy as np
from gdsfactory.component import Component
from gdsfactory.config import logger
from gdsfactory.port import Port, read_port_markers, sort_ports_clockwise
from gdsfactory.snap import snap_to_grid
from gdsfactory.types import Layer
def add_ports_from_markers_square(
component: Component,
pin_layer: Layer = (69, 0),
port_layer: Optional[Layer] = None,
orientation: Optional[int] = 90,
min_pin_area_um2: float = 0,
max_pin_area_um2: float = 150 * 150,
pin_extra_width: float = 0.0,
port_names: Optional[Tuple[str, ...]] = None,
port_name_prefix: Optional[str] = None,
port_type: str = "optical",
) -> Component:
"""Add ports from square markers at the port center in port_layer
Args:
component: to read polygons from and to write ports to.
pin_layer: for port markers.
port_layer: for the new created port.
orientation: in degrees 90: north, 0: east, 180: west, 270: south
min_pin_area_um2: ignores pins with area smaller than min_pin_area_um2
max_pin_area_um2: ignore pins for area above certain size
pin_extra_width: 2*offset from pin to straight
port_names: names of the ports (defaults to {i})
port_name_prefix: defaults to 'o' for optical and 'e' for electrical
port_type: optical, electrical
"""
port_name_prefix_default = "o" if port_type == "optical" else "e"
port_name_prefix = port_name_prefix or port_name_prefix_default
port_markers = read_port_markers(component, [pin_layer])
port_names = port_names or [
f"{port_name_prefix}{i+1}" for i in range(len(port_markers.polygons))
]
layer = port_layer or pin_layer
for port_name, p in zip(port_names, port_markers.polygons):
dy = snap_to_grid(p.ymax - p.ymin)
dx = snap_to_grid(p.xmax - p.xmin)
x = p.x
y = p.y
if dx == dy and max_pin_area_um2 > dx * dy > min_pin_area_um2:
component.add_port(
port_name,
midpoint=(x, y),
width=dx - pin_extra_width,
orientation=orientation,
layer=layer,
)
return component
def add_ports_from_markers_center(
component: Component,
pin_layer: Layer = (1, 10),
port_layer: Optional[Layer] = None,
inside: bool = False,
tol: float = 0.1,
pin_extra_width: float = 0.0,
min_pin_area_um2: Optional[float] = None,
max_pin_area_um2: float = 150.0 * 150.0,
skip_square_ports: bool = False,
xcenter: Optional[float] = None,
ycenter: Optional[float] = None,
port_name_prefix: Optional[str] = None,
port_type: str = "optical",
auto_rename_ports: bool = True,
) -> Component:
"""Add ports from rectangular pin markers.
markers at port center, so half of the marker goes inside and half ouside the port.
guess port orientation from the component center (xcenter)
Args:
component: to read polygons from and to write ports to.
pin_layer: GDS layer for maker [int, int].
port_layer: for the new created port
inside: True-> markers inside. False-> markers at center
tol: tolerance for comparing how rectangular is the pin
pin_extra_width: 2*offset from pin to straight
min_pin_area_um2: ignores pins with area smaller than min_pin_area_um2
max_pin_area_um2: ignore pins for area above certain size
skip_square_ports: skips square ports (hard to guess orientation)
xcenter: for guessing orientation of rectangular ports
ycenter: for guessing orientation of rectangular ports
port_name_prefix: defaults to 'o' for optical and 'e' for electrical ports.
port_type: type of port (optical, electrical ...)
auto_rename_ports:
For inside=False the port location is at the middle of the PIN
.. code::
_______________
| |
| |
||| |||____ | pin_extra_width/2 > 0
||| |||
||| |||____
||| |||
| __ |
|_____|__|______|
|__|
For inside=True all the pin is inside the port
.. code::
_______________
| |
| |
|_ |
| | |
|_| |
| |
| __ |
|_____|__|______|
dx < dy: port is east or west
x > xc: east
x < xc: west
dx > dy: port is north or south
y > yc: north
y < yc: south
dx = dy
x > xc: east
x < xc: west
"""
xc = xcenter or component.x
yc = ycenter or component.y
xmax = component.xmax
xmin = component.xmin
ymax = component.ymax
ymin = component.ymin
port_markers = read_port_markers(component, layers=(pin_layer,))
layer = port_layer or pin_layer
port_locations = []
ports = {}
port_name_prefix_default = "o" if port_type == "optical" else "e"
port_name_prefix = port_name_prefix or port_name_prefix_default
for i, p in enumerate(port_markers.polygons):
port_name = f"{port_name_prefix}{i+1}" if port_name_prefix else i
dy = p.ymax - p.ymin
dx = p.xmax - p.xmin
x = p.x
y = p.y
if min_pin_area_um2 and dx * dy < min_pin_area_um2:
logger.debug(f"skipping port at ({x}, {y}) with min_pin_area_um2 {dx * dy}")
continue
if max_pin_area_um2 and dx * dy > max_pin_area_um2:
continue
if skip_square_ports and snap_to_grid(dx) == snap_to_grid(dy):
logger.debug(f"skipping square port at ({x}, {y})")
continue
pxmax = p.xmax
pxmin = p.xmin
pymax = p.ymax
pymin = p.ymin
if dx < dy and x > xc: # east
orientation = 0
width = dy
x = p.xmax if inside else p.x
elif dx < dy and x < xc: # west
orientation = 180
width = dy
x = p.xmin if inside else p.x
elif dx > dy and y > yc: # north
orientation = 90
width = dx
y = p.ymax if inside else p.y
elif dx > dy and y < yc: # south
orientation = 270
width = dx
y = p.ymin if inside else p.y
# square port markers have same width and height
# check which edge (E, W, N, S) they are closer to
elif pxmax > xmax - tol: # east
orientation = 0
width = dy
x = p.xmax if inside else p.x
elif pxmin < xmin + tol: # west
orientation = 180
width = dy
x = p.xmin if inside else p.x
elif pymax > ymax - tol: # north
orientation = 90
width = dx
y = p.ymax if inside else p.y
elif pymin < ymin + tol: # south
orientation = 270
width = dx
y = p.ymin if inside else p.y
elif pxmax > xc:
orientation = 0
width = dy
x = p.xmax if inside else p.x
elif pxmax < xc:
orientation = 180
width = dy
x = p.xmin if inside else p.x
x = snap_to_grid(x)
y = snap_to_grid(y)
width = np.round(width - pin_extra_width, 3)
if (x, y) not in port_locations:
port_locations.append((x, y))
ports[port_name] = Port(
name=port_name,
midpoint=(x, y),
width=width,
orientation=orientation,
layer=layer,
port_type=port_type,
)
ports = sort_ports_clockwise(ports)
for port_name, port in ports.items():
if port_name in component.ports:
component_ports = list(component.ports.keys())
raise ValueError(
f"port {port_name!r} already in {component_ports}. "
"You can pass a port_name_prefix to add it with a different name."
)
else:
component.add_port(name=port_name, port=port)
if auto_rename_ports:
component.auto_rename_ports()
return component
add_ports_from_markers_inside = partial(add_ports_from_markers_center, inside=True)
def add_ports_from_labels(
component: Component,
port_width: float,
port_layer: Layer,
xcenter: Optional[float] = None,
port_name_prefix: Optional[str] = None,
port_type: str = "optical",
) -> Component:
"""Add ports from labels.
Assumes that all ports have a label at the port center.
because labels do not have width, you have to manually specify the ports width
Args:
component: to read polygons from and to write ports to.
port_width: for ports.
port_layer: for the new created port.
xcenter: center of the component, for guessing port orientation.
port_name_prefix: defaults to 'o' for optical and 'e' for electrical
port_type: optical, electrical
"""
port_name_prefix_default = "o" if port_type == "optical" else "e"
port_name_prefix = port_name_prefix or port_name_prefix_default
xc = xcenter or component.x
yc = component.y
for i, label in enumerate(component.labels):
x, y = label.position
port_name = f"{port_name_prefix}{i+1}" if port_name_prefix else i
if x > xc: # east
orientation = 0
elif x < xc: # west
orientation = 180
elif y > yc: # north
orientation = 90
elif y < yc: # south
orientation = 270
if port_name in component.ports:
component_ports = list(component.ports.keys())
raise ValueError(
f"port {port_name!r} already in {component_ports}. "
"You can pass a port_name_prefix to add it with a different name."
)
else:
component.add_port(
name=port_name,
midpoint=(x, y),
width=port_width,
orientation=orientation,
port_type=port_type,
layer=port_layer,
)
return component
if __name__ == "__main__":
pass
| [
"functools.partial",
"gdsfactory.port.Port",
"gdsfactory.port.read_port_markers",
"gdsfactory.port.sort_ports_clockwise",
"gdsfactory.config.logger.debug",
"gdsfactory.snap.snap_to_grid",
"numpy.round"
] | [((8453, 8504), 'functools.partial', 'partial', (['add_ports_from_markers_center'], {'inside': '(True)'}), '(add_ports_from_markers_center, inside=True)\n', (8460, 8504), False, 'from functools import partial\n'), ((1579, 1620), 'gdsfactory.port.read_port_markers', 'read_port_markers', (['component', '[pin_layer]'], {}), '(component, [pin_layer])\n', (1596, 1620), False, 'from gdsfactory.port import Port, read_port_markers, sort_ports_clockwise\n'), ((5009, 5058), 'gdsfactory.port.read_port_markers', 'read_port_markers', (['component'], {'layers': '(pin_layer,)'}), '(component, layers=(pin_layer,))\n', (5026, 5058), False, 'from gdsfactory.port import Port, read_port_markers, sort_ports_clockwise\n'), ((7894, 7921), 'gdsfactory.port.sort_ports_clockwise', 'sort_ports_clockwise', (['ports'], {}), '(ports)\n', (7914, 7921), False, 'from gdsfactory.port import Port, read_port_markers, sort_ports_clockwise\n'), ((1852, 1881), 'gdsfactory.snap.snap_to_grid', 'snap_to_grid', (['(p.ymax - p.ymin)'], {}), '(p.ymax - p.ymin)\n', (1864, 1881), False, 'from gdsfactory.snap import snap_to_grid\n'), ((1895, 1924), 'gdsfactory.snap.snap_to_grid', 'snap_to_grid', (['(p.xmax - p.xmin)'], {}), '(p.xmax - p.xmin)\n', (1907, 1924), False, 'from gdsfactory.snap import snap_to_grid\n'), ((7448, 7463), 'gdsfactory.snap.snap_to_grid', 'snap_to_grid', (['x'], {}), '(x)\n', (7460, 7463), False, 'from gdsfactory.snap import snap_to_grid\n'), ((7476, 7491), 'gdsfactory.snap.snap_to_grid', 'snap_to_grid', (['y'], {}), '(y)\n', (7488, 7491), False, 'from gdsfactory.snap import snap_to_grid\n'), ((7508, 7544), 'numpy.round', 'np.round', (['(width - pin_extra_width)', '(3)'], {}), '(width - pin_extra_width, 3)\n', (7516, 7544), True, 'import numpy as np\n'), ((5562, 5638), 'gdsfactory.config.logger.debug', 'logger.debug', (['f"""skipping port at ({x}, {y}) with min_pin_area_um2 {dx * dy}"""'], {}), "(f'skipping port at ({x}, {y}) with min_pin_area_um2 {dx * dy}')\n", (5574, 5638), False, 'from gdsfactory.config import logger\n'), ((5826, 5877), 'gdsfactory.config.logger.debug', 'logger.debug', (['f"""skipping square port at ({x}, {y})"""'], {}), "(f'skipping square port at ({x}, {y})')\n", (5838, 5877), False, 'from gdsfactory.config import logger\n'), ((7660, 7773), 'gdsfactory.port.Port', 'Port', ([], {'name': 'port_name', 'midpoint': '(x, y)', 'width': 'width', 'orientation': 'orientation', 'layer': 'layer', 'port_type': 'port_type'}), '(name=port_name, midpoint=(x, y), width=width, orientation=orientation,\n layer=layer, port_type=port_type)\n', (7664, 7773), False, 'from gdsfactory.port import Port, read_port_markers, sort_ports_clockwise\n'), ((5776, 5792), 'gdsfactory.snap.snap_to_grid', 'snap_to_grid', (['dx'], {}), '(dx)\n', (5788, 5792), False, 'from gdsfactory.snap import snap_to_grid\n'), ((5796, 5812), 'gdsfactory.snap.snap_to_grid', 'snap_to_grid', (['dy'], {}), '(dy)\n', (5808, 5812), False, 'from gdsfactory.snap import snap_to_grid\n')] |
from src.config import get_params
from src.utils import init_experiment
from src.dataloader import get_dataloader, get_conll2003_dataloader, get_dataloader_for_bilstmtagger
from src.trainer import BaseTrainer
from src.model import BertTagger, BiLSTMTagger
from src.coach.dataloader import get_dataloader_for_coach
from src.coach.model import EntityPredictor
from src.coach.trainer import CoachTrainer
import torch
import numpy as np
from tqdm import tqdm
import random
def random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def train(params):
# initialize experiment
logger = init_experiment(params, logger_filename=params.logger_filename)
if params.bilstm:
# dataloader
dataloader_train, dataloader_dev, dataloader_test, vocab = get_dataloader_for_bilstmtagger(params)
# bilstm-crf model
model = BiLSTMTagger(params, vocab)
model.cuda()
# trainer
trainer = BaseTrainer(params, model)
elif params.coach:
# dataloader
dataloader_train, dataloader_dev, dataloader_test, vocab = get_dataloader_for_coach(params)
# coach model
binary_tagger = BiLSTMTagger(params, vocab)
entity_predictor = EntityPredictor(params)
binary_tagger.cuda()
entity_predictor.cuda()
# trainer
trainer = CoachTrainer(params, binary_tagger, entity_predictor)
else:
# dataloader
dataloader_train, dataloader_dev, dataloader_test = get_dataloader(params)
# BERT-based NER Tagger
model = BertTagger(params)
model.cuda()
# trainer
trainer = BaseTrainer(params, model)
if params.conll and not params.joint:
conll_trainloader, conll_devloader, conll_testloader = get_conll2003_dataloader(params.batch_size, params.tgt_dm)
trainer.train_conll(conll_trainloader, conll_devloader, conll_testloader, params.tgt_dm)
no_improvement_num = 0
best_f1 = 0
logger.info("Training on target domain ...")
for e in range(params.epoch):
logger.info("============== epoch %d ==============" % e)
pbar = tqdm(enumerate(dataloader_train), total=len(dataloader_train))
if params.bilstm:
loss_list = []
for i, (X, lengths, y) in pbar:
X, lengths = X.cuda(), lengths.cuda()
loss = trainer.train_step_for_bilstm(X, lengths, y)
loss_list.append(loss)
pbar.set_description("(Epoch {}) LOSS:{:.4f}".format(e, np.mean(loss_list)))
logger.info("Finish training epoch %d. loss: %.4f" % (e, np.mean(loss_list)))
elif params.coach:
loss_bin_list, loss_entity_list = [], []
for i, (X, lengths, y_bin, y_final) in pbar:
X, lengths = X.cuda(), lengths.cuda()
loss_bin, loss_entityname = trainer.train_step(X, lengths, y_bin, y_final)
loss_bin_list.append(loss_bin)
loss_entity_list.append(loss_entityname)
pbar.set_description("(Epoch {}) LOSS BIN:{:.4f}; LOSS ENTITY:{:.4f}".format(e, np.mean(loss_bin_list), np.mean(loss_entity_list)))
logger.info("Finish training epoch %d. loss_bin: %.4f. loss_entity: %.4f" % (e, np.mean(loss_bin_list), np.mean(loss_entity_list)))
else:
loss_list = []
for i, (X, y) in pbar:
X, y = X.cuda(), y.cuda()
loss = trainer.train_step(X, y)
loss_list.append(loss)
pbar.set_description("(Epoch {}) LOSS:{:.4f}".format(e, np.mean(loss_list)))
logger.info("Finish training epoch %d. loss: %.4f" % (e, np.mean(loss_list)))
logger.info("============== Evaluate epoch %d on Train Set ==============" % e)
f1_train, _, _ = trainer.evaluate(dataloader_train, params.tgt_dm, use_bilstm=params.bilstm)
logger.info("Evaluate on Train Set. F1: %.4f." % f1_train)
logger.info("============== Evaluate epoch %d on Dev Set ==============" % e)
f1_dev, _, _ = trainer.evaluate(dataloader_dev, params.tgt_dm, use_bilstm=params.bilstm)
logger.info("Evaluate on Dev Set. F1: %.4f." % f1_dev)
logger.info("============== Evaluate epoch %d on Test Set ==============" % e)
f1_test, precision_test, recall_test = trainer.evaluate(dataloader_test, params.tgt_dm, use_bilstm=params.bilstm)
logger.info("Evaluate on Test Set. precision: %.4f." % precision_test)
logger.info("Evaluate on Test Set. recall: %.4f." % recall_test)
logger.info("Evaluate on Test Set. F1: %.4f." % f1_test)
if f1_dev > best_f1:
logger.info("Found better model!!")
best_f1 = f1_dev
no_improvement_num = 0
# trainer.save_model()
else:
no_improvement_num += 1
logger.info("No better model found (%d/%d)" % (no_improvement_num, params.early_stop))
if no_improvement_num >= params.early_stop:
break
if __name__ == "__main__":
params = get_params()
random_seed(params.seed)
train(params)
| [
"src.utils.init_experiment",
"numpy.random.seed",
"torch.manual_seed",
"src.coach.trainer.CoachTrainer",
"torch.cuda.manual_seed",
"src.dataloader.get_conll2003_dataloader",
"src.config.get_params",
"src.model.BiLSTMTagger",
"numpy.mean",
"random.seed",
"src.dataloader.get_dataloader_for_bilstmt... | [((500, 517), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (511, 517), False, 'import random\n'), ((522, 542), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (536, 542), True, 'import numpy as np\n'), ((547, 570), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (564, 570), False, 'import torch\n'), ((575, 603), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (597, 603), False, 'import torch\n'), ((712, 775), 'src.utils.init_experiment', 'init_experiment', (['params'], {'logger_filename': 'params.logger_filename'}), '(params, logger_filename=params.logger_filename)\n', (727, 775), False, 'from src.utils import init_experiment\n'), ((5216, 5228), 'src.config.get_params', 'get_params', ([], {}), '()\n', (5226, 5228), False, 'from src.config import get_params\n'), ((891, 930), 'src.dataloader.get_dataloader_for_bilstmtagger', 'get_dataloader_for_bilstmtagger', (['params'], {}), '(params)\n', (922, 930), False, 'from src.dataloader import get_dataloader, get_conll2003_dataloader, get_dataloader_for_bilstmtagger\n'), ((974, 1001), 'src.model.BiLSTMTagger', 'BiLSTMTagger', (['params', 'vocab'], {}), '(params, vocab)\n', (986, 1001), False, 'from src.model import BertTagger, BiLSTMTagger\n'), ((1059, 1085), 'src.trainer.BaseTrainer', 'BaseTrainer', (['params', 'model'], {}), '(params, model)\n', (1070, 1085), False, 'from src.trainer import BaseTrainer\n'), ((1877, 1935), 'src.dataloader.get_conll2003_dataloader', 'get_conll2003_dataloader', (['params.batch_size', 'params.tgt_dm'], {}), '(params.batch_size, params.tgt_dm)\n', (1901, 1935), False, 'from src.dataloader import get_dataloader, get_conll2003_dataloader, get_dataloader_for_bilstmtagger\n'), ((1197, 1229), 'src.coach.dataloader.get_dataloader_for_coach', 'get_dataloader_for_coach', (['params'], {}), '(params)\n', (1221, 1229), False, 'from src.coach.dataloader import get_dataloader_for_coach\n'), ((1276, 1303), 'src.model.BiLSTMTagger', 'BiLSTMTagger', (['params', 'vocab'], {}), '(params, vocab)\n', (1288, 1303), False, 'from src.model import BertTagger, BiLSTMTagger\n'), ((1331, 1354), 'src.coach.model.EntityPredictor', 'EntityPredictor', (['params'], {}), '(params)\n', (1346, 1354), False, 'from src.coach.model import EntityPredictor\n'), ((1452, 1505), 'src.coach.trainer.CoachTrainer', 'CoachTrainer', (['params', 'binary_tagger', 'entity_predictor'], {}), '(params, binary_tagger, entity_predictor)\n', (1464, 1505), False, 'from src.coach.trainer import CoachTrainer\n'), ((1597, 1619), 'src.dataloader.get_dataloader', 'get_dataloader', (['params'], {}), '(params)\n', (1611, 1619), False, 'from src.dataloader import get_dataloader, get_conll2003_dataloader, get_dataloader_for_bilstmtagger\n'), ((1668, 1686), 'src.model.BertTagger', 'BertTagger', (['params'], {}), '(params)\n', (1678, 1686), False, 'from src.model import BertTagger, BiLSTMTagger\n'), ((1744, 1770), 'src.trainer.BaseTrainer', 'BaseTrainer', (['params', 'model'], {}), '(params, model)\n', (1755, 1770), False, 'from src.trainer import BaseTrainer\n'), ((2643, 2661), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (2650, 2661), True, 'import numpy as np\n'), ((2734, 2752), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (2741, 2752), True, 'import numpy as np\n'), ((3238, 3260), 'numpy.mean', 'np.mean', (['loss_bin_list'], {}), '(loss_bin_list)\n', (3245, 3260), True, 'import numpy as np\n'), ((3262, 3287), 'numpy.mean', 'np.mean', (['loss_entity_list'], {}), '(loss_entity_list)\n', (3269, 3287), True, 'import numpy as np\n'), ((3395, 3417), 'numpy.mean', 'np.mean', (['loss_bin_list'], {}), '(loss_bin_list)\n', (3402, 3417), True, 'import numpy as np\n'), ((3419, 3444), 'numpy.mean', 'np.mean', (['loss_entity_list'], {}), '(loss_entity_list)\n', (3426, 3444), True, 'import numpy as np\n'), ((3725, 3743), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (3732, 3743), True, 'import numpy as np\n'), ((3816, 3834), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (3823, 3834), True, 'import numpy as np\n')] |
# https://github.com/flyingdoog/PGExplainer/blob/master/MUTAG.ipynb
import yaml
import torch
import numpy as np
import pickle as pkl
from pathlib import Path
from torch_geometric.data import InMemoryDataset, Data
class Mutag(InMemoryDataset):
def __init__(self, root):
super().__init__(root=root)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return ['Mutagenicity_A.txt', 'Mutagenicity_edge_gt.txt', 'Mutagenicity_edge_labels.txt',
'Mutagenicity_graph_indicator.txt', 'Mutagenicity_graph_labels.txt', 'Mutagenicity_label_readme.txt',
'Mutagenicity_node_labels.txt', 'Mutagenicity.pkl']
@property
def processed_file_names(self):
return ['data.pt']
def download(self):
raise NotImplementedError
def process(self):
with open(self.raw_dir + '/Mutagenicity.pkl', 'rb') as fin:
_, original_features, original_labels = pkl.load(fin)
edge_lists, graph_labels, edge_label_lists, node_type_lists = self.get_graph_data()
data_list = []
for i in range(original_labels.shape[0]):
num_nodes = len(node_type_lists[i])
edge_index = torch.tensor(edge_lists[i], dtype=torch.long).T
y = torch.tensor(graph_labels[i]).float().reshape(-1, 1)
x = torch.tensor(original_features[i][:num_nodes]).float()
assert original_features[i][num_nodes:].sum() == 0
edge_label = torch.tensor(edge_label_lists[i]).float()
if y.item() != 0:
edge_label = torch.zeros_like(edge_label).float()
node_label = torch.zeros(x.shape[0])
signal_nodes = list(set(edge_index[:, edge_label.bool()].reshape(-1).tolist()))
if y.item() == 0:
node_label[signal_nodes] = 1
if len(signal_nodes) != 0:
node_type = torch.tensor(node_type_lists[i])
node_type = set(node_type[signal_nodes].tolist())
assert node_type in ({4, 1}, {4, 3}, {4, 1, 3}) # NO or NH
if y.item() == 0 and len(signal_nodes) == 0:
continue
data_list.append(Data(x=x, y=y, edge_index=edge_index, node_label=node_label, edge_label=edge_label, node_type=torch.tensor(node_type_lists[i])))
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
def get_graph_data(self):
pri = self.raw_dir + '/Mutagenicity_'
file_edges = pri + 'A.txt'
# file_edge_labels = pri + 'edge_labels.txt'
file_edge_labels = pri + 'edge_gt.txt'
file_graph_indicator = pri + 'graph_indicator.txt'
file_graph_labels = pri + 'graph_labels.txt'
file_node_labels = pri + 'node_labels.txt'
edges = np.loadtxt(file_edges, delimiter=',').astype(np.int32)
try:
edge_labels = np.loadtxt(file_edge_labels, delimiter=',').astype(np.int32)
except Exception as e:
print(e)
print('use edge label 0')
edge_labels = np.zeros(edges.shape[0]).astype(np.int32)
graph_indicator = np.loadtxt(file_graph_indicator, delimiter=',').astype(np.int32)
graph_labels = np.loadtxt(file_graph_labels, delimiter=',').astype(np.int32)
try:
node_labels = np.loadtxt(file_node_labels, delimiter=',').astype(np.int32)
except Exception as e:
print(e)
print('use node label 0')
node_labels = np.zeros(graph_indicator.shape[0]).astype(np.int32)
graph_id = 1
starts = [1]
node2graph = {}
for i in range(len(graph_indicator)):
if graph_indicator[i] != graph_id:
graph_id = graph_indicator[i]
starts.append(i+1)
node2graph[i+1] = len(starts)-1
# print(starts)
# print(node2graph)
graphid = 0
edge_lists = []
edge_label_lists = []
edge_list = []
edge_label_list = []
for (s, t), l in list(zip(edges, edge_labels)):
sgid = node2graph[s]
tgid = node2graph[t]
if sgid != tgid:
print('edges connecting different graphs, error here, please check.')
print(s, t, 'graph id', sgid, tgid)
exit(1)
gid = sgid
if gid != graphid:
edge_lists.append(edge_list)
edge_label_lists.append(edge_label_list)
edge_list = []
edge_label_list = []
graphid = gid
start = starts[gid]
edge_list.append((s-start, t-start))
edge_label_list.append(l)
edge_lists.append(edge_list)
edge_label_lists.append(edge_label_list)
# node labels
node_label_lists = []
graphid = 0
node_label_list = []
for i in range(len(node_labels)):
nid = i+1
gid = node2graph[nid]
# start = starts[gid]
if gid != graphid:
node_label_lists.append(node_label_list)
graphid = gid
node_label_list = []
node_label_list.append(node_labels[i])
node_label_lists.append(node_label_list)
return edge_lists, graph_labels, edge_label_lists, node_label_lists
| [
"torch.zeros_like",
"torch.load",
"numpy.zeros",
"torch.save",
"pickle.load",
"numpy.loadtxt",
"torch.zeros",
"torch.tensor"
] | [((345, 380), 'torch.load', 'torch.load', (['self.processed_paths[0]'], {}), '(self.processed_paths[0])\n', (355, 380), False, 'import torch\n'), ((2418, 2469), 'torch.save', 'torch.save', (['(data, slices)', 'self.processed_paths[0]'], {}), '((data, slices), self.processed_paths[0])\n', (2428, 2469), False, 'import torch\n'), ((991, 1004), 'pickle.load', 'pkl.load', (['fin'], {}), '(fin)\n', (999, 1004), True, 'import pickle as pkl\n'), ((1686, 1709), 'torch.zeros', 'torch.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (1697, 1709), False, 'import torch\n'), ((1245, 1290), 'torch.tensor', 'torch.tensor', (['edge_lists[i]'], {'dtype': 'torch.long'}), '(edge_lists[i], dtype=torch.long)\n', (1257, 1290), False, 'import torch\n'), ((1945, 1977), 'torch.tensor', 'torch.tensor', (['node_type_lists[i]'], {}), '(node_type_lists[i])\n', (1957, 1977), False, 'import torch\n'), ((2863, 2900), 'numpy.loadtxt', 'np.loadtxt', (['file_edges'], {'delimiter': '""","""'}), "(file_edges, delimiter=',')\n", (2873, 2900), True, 'import numpy as np\n'), ((3203, 3250), 'numpy.loadtxt', 'np.loadtxt', (['file_graph_indicator'], {'delimiter': '""","""'}), "(file_graph_indicator, delimiter=',')\n", (3213, 3250), True, 'import numpy as np\n'), ((3291, 3335), 'numpy.loadtxt', 'np.loadtxt', (['file_graph_labels'], {'delimiter': '""","""'}), "(file_graph_labels, delimiter=',')\n", (3301, 3335), True, 'import numpy as np\n'), ((1379, 1425), 'torch.tensor', 'torch.tensor', (['original_features[i][:num_nodes]'], {}), '(original_features[i][:num_nodes])\n', (1391, 1425), False, 'import torch\n'), ((1522, 1555), 'torch.tensor', 'torch.tensor', (['edge_label_lists[i]'], {}), '(edge_label_lists[i])\n', (1534, 1555), False, 'import torch\n'), ((2957, 3000), 'numpy.loadtxt', 'np.loadtxt', (['file_edge_labels'], {'delimiter': '""","""'}), "(file_edge_labels, delimiter=',')\n", (2967, 3000), True, 'import numpy as np\n'), ((3393, 3436), 'numpy.loadtxt', 'np.loadtxt', (['file_node_labels'], {'delimiter': '""","""'}), "(file_node_labels, delimiter=',')\n", (3403, 3436), True, 'import numpy as np\n'), ((1623, 1651), 'torch.zeros_like', 'torch.zeros_like', (['edge_label'], {}), '(edge_label)\n', (1639, 1651), False, 'import torch\n'), ((2327, 2359), 'torch.tensor', 'torch.tensor', (['node_type_lists[i]'], {}), '(node_type_lists[i])\n', (2339, 2359), False, 'import torch\n'), ((3134, 3158), 'numpy.zeros', 'np.zeros', (['edges.shape[0]'], {}), '(edges.shape[0])\n', (3142, 3158), True, 'import numpy as np\n'), ((3570, 3604), 'numpy.zeros', 'np.zeros', (['graph_indicator.shape[0]'], {}), '(graph_indicator.shape[0])\n', (3578, 3604), True, 'import numpy as np\n'), ((1310, 1339), 'torch.tensor', 'torch.tensor', (['graph_labels[i]'], {}), '(graph_labels[i])\n', (1322, 1339), False, 'import torch\n')] |
import numpy as np
import cv2
cam = cv2.VideoCapture(0)
def find_shape(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
return cv2.threshold(blurred, 30, 255, cv2.THRESH_BINARY)[1]
def color_mask(image, color = "Red"):
if color == "Blue":
boundary = blueboundaries = [([55, 1, 1], [255, 80, 80])]
elif color == "Green":
boundary = greenboundaries = [([1, 55, 1], [80, 255, 80])]
else:
boundary = redboundaries = [([1, 1, 55], [80, 80, 255])]
for (lower, upper) in boundary:
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
mask = cv2.inRange(image, lower, upper)
return cv2.bitwise_and(image, image, mask = mask)
while True:
ret, image = cam.read()
filtered = color_mask(image, color = "Red")
threshed = find_shape(filtered)
#grayfilter = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#merged = cv2.bitwise_and(threshed, grayfilter)
threshed_scaled = cv2.resize(threshed, None, fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)
image_scaled = cv2.resize(image, None, fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)
filtered_scaled = cv2.resize(filtered, None, fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)
cv2.imshow('Thresh', threshed_scaled)
cv2.imshow('Filter', np.hstack([image_scaled, filtered_scaled]))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
| [
"cv2.GaussianBlur",
"cv2.bitwise_and",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.threshold",
"cv2.imshow",
"numpy.hstack",
"cv2.VideoCapture",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.inRange",
"cv2.resize"
] | [((37, 56), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (53, 56), False, 'import cv2\n'), ((1492, 1515), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1513, 1515), False, 'import cv2\n'), ((92, 131), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (104, 131), False, 'import cv2\n'), ((146, 179), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (162, 179), False, 'import cv2\n'), ((1044, 1117), 'cv2.resize', 'cv2.resize', (['threshed', 'None'], {'fx': '(0.7)', 'fy': '(0.7)', 'interpolation': 'cv2.INTER_CUBIC'}), '(threshed, None, fx=0.7, fy=0.7, interpolation=cv2.INTER_CUBIC)\n', (1054, 1117), False, 'import cv2\n'), ((1139, 1209), 'cv2.resize', 'cv2.resize', (['image', 'None'], {'fx': '(0.7)', 'fy': '(0.7)', 'interpolation': 'cv2.INTER_CUBIC'}), '(image, None, fx=0.7, fy=0.7, interpolation=cv2.INTER_CUBIC)\n', (1149, 1209), False, 'import cv2\n'), ((1234, 1307), 'cv2.resize', 'cv2.resize', (['filtered', 'None'], {'fx': '(0.7)', 'fy': '(0.7)', 'interpolation': 'cv2.INTER_CUBIC'}), '(filtered, None, fx=0.7, fy=0.7, interpolation=cv2.INTER_CUBIC)\n', (1244, 1307), False, 'import cv2\n'), ((1314, 1351), 'cv2.imshow', 'cv2.imshow', (['"""Thresh"""', 'threshed_scaled'], {}), "('Thresh', threshed_scaled)\n", (1324, 1351), False, 'import cv2\n'), ((191, 241), 'cv2.threshold', 'cv2.threshold', (['blurred', '(30)', '(255)', 'cv2.THRESH_BINARY'], {}), '(blurred, 30, 255, cv2.THRESH_BINARY)\n', (204, 241), False, 'import cv2\n'), ((596, 626), 'numpy.array', 'np.array', (['lower'], {'dtype': '"""uint8"""'}), "(lower, dtype='uint8')\n", (604, 626), True, 'import numpy as np\n'), ((645, 675), 'numpy.array', 'np.array', (['upper'], {'dtype': '"""uint8"""'}), "(upper, dtype='uint8')\n", (653, 675), True, 'import numpy as np\n'), ((694, 726), 'cv2.inRange', 'cv2.inRange', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (705, 726), False, 'import cv2\n'), ((742, 782), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask'}), '(image, image, mask=mask)\n', (757, 782), False, 'import cv2\n'), ((1377, 1419), 'numpy.hstack', 'np.hstack', (['[image_scaled, filtered_scaled]'], {}), '([image_scaled, filtered_scaled])\n', (1386, 1419), True, 'import numpy as np\n'), ((1428, 1442), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1439, 1442), False, 'import cv2\n')] |
"""Multi Transforms.
Applies the same transformation on a COCO dataset image and annotation pair.
"""
from PIL import Image
import random
import torchvision.transforms.functional as F
from torchvision.transforms.transforms import Normalize
import numpy as np
__all__ = ["MultiCompose", "MultiToTensor", "MultiNormalize", "MultiResize",
"MultiRandomFlip", "MultiToPILImage"]
class MultiCompose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to
compose.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, annotation):
for t in self.transforms:
img, annotation = t(img, annotation)
return img, annotation
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class MultiToTensor(object):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to preds.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes
(L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray has
dtype = np.uint8
In the other cases, tensors are returned without scaling.
Returns:
tuple: Tuple (image, target). target is the object returned by
coco.loadAnns
"""
def __call__(self, pic, target):
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to preds.
target (object array): Annotation array as returned by
coco.loadAnns.
Returns:
tuple: Tuple of the converted image and the annotation.
"""
# Nothing happens to the target, so we return it as is
return F.to_tensor(pic), target
def __repr__(self):
return self.__class__.__name__ + '()'
class MultiResize(object):
def __init__(self, size):
"""Resizes the given PIL Image and its corresponsding target.
Args:
size (tuple): The size to resize to in (h, w)
"""
self.h, self.w = size
self.area = self.h * self.w
def __call__(self, img, target):
"""Resizes the image and target to the same size.
Args:
img (PIL.Image.Image): A PIL Image.
target (object array): The annotation array.
"""
img_w, img_h = img.size
w_ratio = float(self.w) / float(img_w)
h_ratio = float(self.h) / float(img_h)
area_ratio = self.area / (img_w * img_h)
img = img.resize((self.w, self.h), Image.BICUBIC)
out_target = []
# Target contains a list of bbox annotations. We have to iterate through
# all the target objects and resize the properties of each.
for box in target:
current_box = {'id': box['id'],
'iscrowd': box['iscrowd'],
'image_id': box['image_id'],
'category_id': box['category_id'],
'area': box['area'] * area_ratio}
# Deal with first case: Not crowd
if box['iscrowd'] == 0:
current_segmentation = []
# Do segmentation first
# multiply x values by width ratio, y values by height ratio
for segmentation in box['segmentation']:
count = 0
current_coordinates = []
for coords in segmentation:
if count % 2 == 0:
current_coordinates.append(coords * w_ratio)
else:
current_coordinates.append(coords * h_ratio)
count += 1
current_segmentation.append(current_coordinates)
current_box['segmentation'] = current_segmentation
else:
raise NotImplementedError
# Next do bboxes
current_bbox = []
count = 0
for coord in box['bbox']:
if count % 2 == 0:
current_bbox.append(coord * w_ratio)
else:
current_bbox.append(coord * h_ratio)
count += 1
current_box['bbox'] = current_bbox
out_target.append(current_box)
return img, out_target
def __repr__(self):
return self.__class__.__name__ + '(size=({}, {})'.format(self.w, self.h)
class MultiRandomFlip:
def __init__(self, probability):
"""Randomly flips vertically or horizontally with a given probability.
Args:
probability (float): Probability to flip.
"""
self.probability = probability
def __call__(self, img, target):
"""Flips an image and its target.
Args:
img (PIL.Image.Image): A PIL Image.
target (object array): The annotation array.
"""
should_flip = np.random.choice(
np.array([True, False]),
p=np.array([self.probability, 1 - self.probability]))
if not should_flip:
return img, target
method = random.choice((Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM))
out_target = []
# Transform the target
for box in target:
current_box = {'id': box['id'],
'iscrowd': box['iscrowd'],
'image_id': box['image_id'],
'category_id': box['category_id'],
'area': box['area']}
# Deal with first case: Not crowd
if box['iscrowd'] == 0:
current_segmentation = []
# Do segmentation first
# multiply x values by width ratio, y values by height ratio
for segmentation in box['segmentation']:
count = 0
current_coordinates = []
for coords in segmentation:
if count % 2 == 0:
# Horizontal transform
if method == Image.FLIP_LEFT_RIGHT:
current_coordinates.append(img.size[0] - coords)
else:
current_coordinates.append(coords)
else:
if method == Image.FLIP_TOP_BOTTOM:
current_coordinates.append(img.size[1] - coords)
else:
current_coordinates.append(coords)
count += 1
current_segmentation.append(current_coordinates)
current_box['segmentation'] = current_segmentation
else:
raise NotImplementedError
# Next do bboxes
x_pos, y_pos, width, height = box['bbox']
if method == Image.FLIP_LEFT_RIGHT:
x_pos = img.size[0] - x_pos - width
else:
y_pos = img.size[1] - y_pos - height
current_box['bbox'] = [x_pos, y_pos, width, height]
out_target.append(current_box)
return img.transpose(method), out_target
class MultiNormalize:
def __init__(self, mean, std, **kwargs):
"""Normalizes images.
Args:
mean (list): List of means of each channel.
std (list): List of standard deviations of each channel
"""
self.normalize = Normalize(mean, std)
def __call__(self, img, target):
"""Flips an image and its target.
Args:
img (PIL.Image.Image): A PIL Image.
target (object array): The annotation array.
"""
return self.normalize(img), target
class MultiToPILImage:
def __call__(self, img, ann):
"""Converts a preds to a PIL Image."""
return F.to_pil_image(img), ann
| [
"torchvision.transforms.functional.to_tensor",
"random.choice",
"torchvision.transforms.functional.to_pil_image",
"numpy.array",
"torchvision.transforms.transforms.Normalize"
] | [((5478, 5539), 'random.choice', 'random.choice', (['(Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM)'], {}), '((Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM))\n', (5491, 5539), False, 'import random\n'), ((7844, 7864), 'torchvision.transforms.transforms.Normalize', 'Normalize', (['mean', 'std'], {}), '(mean, std)\n', (7853, 7864), False, 'from torchvision.transforms.transforms import Normalize\n'), ((2050, 2066), 'torchvision.transforms.functional.to_tensor', 'F.to_tensor', (['pic'], {}), '(pic)\n', (2061, 2066), True, 'import torchvision.transforms.functional as F\n'), ((5310, 5333), 'numpy.array', 'np.array', (['[True, False]'], {}), '([True, False])\n', (5318, 5333), True, 'import numpy as np\n'), ((8241, 8260), 'torchvision.transforms.functional.to_pil_image', 'F.to_pil_image', (['img'], {}), '(img)\n', (8255, 8260), True, 'import torchvision.transforms.functional as F\n'), ((5349, 5399), 'numpy.array', 'np.array', (['[self.probability, 1 - self.probability]'], {}), '([self.probability, 1 - self.probability])\n', (5357, 5399), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 6 15:31:23 2019
@author: qls
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 5 20:35:53 2019
@author: qls
"""
import pandas as pd
import os
import pandas
import keras.backend as K
import tensorflow as tf
import random
import numpy as np
import scipy.io
import keras
from keras.layers import SimpleRNN,Permute,Reshape,CuDNNLSTM,LSTM,Dropout,Input, Add, Dense,\
Activation,ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D,AveragePooling1D,MaxPooling1D,MaxPooling2D,GlobalMaxPooling1D\
,AveragePooling1D,UpSampling1D,concatenate,Permute,SeparableConv1D,LeakyReLU,Conv2DTranspose
from keras.models import Model, load_model
from keras.callbacks import ReduceLROnPlateau
from scipy import signal
import pywt
def wavelet_data(data):#基础方法
w='db5'
a = data
ca = []#近似分量
cd = []#细节分量
mode = pywt.Modes.smooth
for i in range(7):
(a, d) = pywt.dwt(a, w,mode)#进行7阶离散小波变换
ca.append(a)
cd.append(d)
rec = []
rec_a = []
rec_d = []
for i, coeff in enumerate(ca):
coeff_list = [coeff, None] + [None] * i
rec_a.append(pywt.waverec(coeff_list, w))#重构
for i, coeff in enumerate(cd):
coeff_list = [None, coeff] + [None] * i
rec_d.append(pywt.waverec(coeff_list, w))
rec_a=np.array(rec_a)
rec_d=np.array(rec_d)
rec=np.concatenate((rec_a,rec_d))
rec=rec.T
return rec
def wavelet_all_data(data):#基础方法
rec_all=[]
for i in range(len(data)):
rec=wavelet_data(data[i])
rec_all.append(rec)
rec_all=np.array(rec_all)
return rec_all
# alldata=scipy.io.loadmat('/home/lirongwang/CWQ/XG/jiangnanSTYLE/alldata_meiquzao.mat')
# alllabel=scipy.io.loadmat('/home/lirongwang/CWQ/XG/jiangnanSTYLE/alllabel_new.mat')
# new_data3=scipy.io.loadmat('/home/lirongwang/CWQ/XG/jiangnanSTYLE/alldata_7_9.mat')
# new_label3=scipy.io.loadmat('/home/lirongwang/CWQ/XG/jiangnanSTYLE/alllabel_7_9.mat')
# AFdata=scipy.io.loadmat('/home/lirongwang/CWQ/XG/jiangnanSTYLE/AFdata.mat')#D:/心梗/2019生理参数竞赛/自采ECG信号/房颤数据/合并/
# AFlabel=scipy.io.loadmat('/home/lirongwang/CWQ/XG/jiangnanSTYLE/AFlabel.mat')
# tianchi_data=scipy.io.loadmat('/home/lirongwang/CWQ/XG/jiangnanSTYLE/tianchi_data.mat')#D:/心梗/2019生理参数竞赛/自采ECG信号/房颤数据/合并/
# tianchi_label=scipy.io.loadmat('/home/lirongwang/CWQ/XG/jiangnanSTYLE/tianchi_label.mat')
# alldata=alldata['alldata_meiquzao']
# alllabel=alllabel['alllabel_new']
# new_data3=new_data3['alldata_7_9'][2000:]
# new_label3=new_label3['alllabel_7_9'][2000:]
# AFdata=AFdata['AFdata']
# AFlabel=AFlabel['AFlabel']
# tianchi_data=tianchi_data['tianchi_data']
# tianchi_label=tianchi_label['tianchi_label']
# random.seed(1)
# length=len(alldata)
# #length=2000
# c = range(0,length)
# q = random.sample(c,int(length*0.8))
# c=set(c)
# q=set(q)
# e=c-q
# e=list(e)
# q=list(q)
# train_x=alldata[q]
# train_y=alllabel[q]
# test_x=alldata[e]
# test_y=alllabel[e]
# train_x=np.concatenate((train_x,new_data3,AFdata,tianchi_data))
# train_y=np.concatenate((train_y,new_label3,AFlabel,tianchi_label))
# train_x=wavelet_all_data(train_x)
# test_x=wavelet_all_data(test_x)
# train_x=train_x.reshape(train_x.shape[0],train_x.shape[1],14)
# test_x=test_x.reshape(test_x.shape[0],test_x.shape[1],14)
# train_y=train_y.reshape(train_y.shape[0],train_y.shape[1],1)
# test_y=test_y.reshape(test_y.shape[0],test_y.shape[1],1)
inputs = Input((5000,14),name='inputs')
conv1 = BatchNormalization()(inputs)
conv1 = Conv1D(16, 20, padding='same',kernel_initializer='he_normal')(conv1)
conv1 = BatchNormalization()(conv1)
conv1=Activation('relu')(conv1)
conv1 = Conv1D(16, 20, padding='same',kernel_initializer='he_normal')(conv1)
conv1 = BatchNormalization()(conv1)
conv1=Activation('relu')(conv1)
conv1 = Dropout(0.15)(conv1)
conv1 = Conv1D(16, 20, padding='same', kernel_initializer='he_normal')(conv1)
conv1 = BatchNormalization()(conv1)
conv1=Activation('relu')(conv1)
pool1 = MaxPooling1D(pool_size=10)(conv1)
conv2 = Conv1D(24, 10, padding='same', kernel_initializer='he_normal')(pool1)
conv2 = BatchNormalization()(conv2)
conv2=Activation('relu')(conv2)
conv2 = Conv1D(24, 10,padding='same', kernel_initializer='he_normal')(conv2)
conv2 = BatchNormalization()(conv2)
conv2=Activation('relu')(conv2)
conv2 = Dropout(0.15)(conv2)
conv2 = Conv1D(24, 10,padding='same', kernel_initializer='he_normal')(conv2)
conv2 = BatchNormalization()(conv2)
conv2=Activation('relu')(conv2)
pool2 = MaxPooling1D(pool_size=5)(conv2)
conv3 = Conv1D(48, 5, padding='same', kernel_initializer='he_normal')(pool2)
conv3 = BatchNormalization()(conv3)
conv3 =Activation('relu')(conv3)
conv3 = Conv1D(48, 5, padding='same', kernel_initializer='he_normal')(conv3)
conv3 = BatchNormalization()(conv3)
conv3=Activation('relu')(conv3)
conv3 = Dropout(0.15)(conv3)
conv3 = Conv1D(48, 5, padding='same', kernel_initializer='he_normal')(conv3)
conv3 = BatchNormalization()(conv3)
conv3=Activation('relu')(conv3)
pool3 = MaxPooling1D(pool_size=2)(conv3)
conv4_1 = Conv1D(96, 5, padding='same', kernel_initializer='he_normal')(pool3)
conv4_1 = BatchNormalization()(conv4_1)
conv4_1=Activation('relu')(conv4_1)
conv4_1 = Dropout(0.15)(conv4_1)
conv4_1 = Conv1D(96, 5, padding='same', kernel_initializer='he_normal')(conv4_1)
conv4_1 = BatchNormalization()(conv4_1)
conv4_1=Activation('relu')(conv4_1)
conv4_2 = Conv1D(96, 5, padding='same',dilation_rate=10, kernel_initializer='he_normal')(pool3)
conv4_2 = BatchNormalization()(conv4_2)
conv4_2=Activation('relu')(conv4_2)
conv4_2 = Dropout(0.15)(conv4_2)
conv4_2 = Conv1D(96, 5, padding='same',dilation_rate=10, kernel_initializer='he_normal')(conv4_2)
conv4_2 = BatchNormalization()(conv4_2)
conv4_2=Activation('relu')(conv4_2)
conv4_3 = keras.layers.Subtract()([conv4_1, conv4_2])
conv4=concatenate([conv4_1,conv4_2,conv4_3], axis=-1)
temp1=UpSampling1D(size=2)(conv4)
merge1 = concatenate([temp1, conv3], axis=-1)
conv5 = Conv1D(48, 5, padding='same', kernel_initializer='he_normal')(merge1)
conv5 = BatchNormalization()(conv5)
conv5=Activation('relu')(conv5)
conv5 = Dropout(0.15)(conv5)
conv5 = Conv1D(48, 5,padding='same', kernel_initializer='he_normal')(conv5)
conv5 = BatchNormalization()(conv5)
conv5=Activation('relu')(conv5)
conv5 = Dropout(0.15)(conv5)
conv5 = Conv1D(48, 5,padding='same', kernel_initializer='he_normal')(conv5)
conv5 = BatchNormalization()(conv5)
conv5=Activation('relu')(conv5)
temp2=UpSampling1D(size=5)(conv5)
merge2 = concatenate([temp2, conv2], axis=-1)
conv6 = Conv1D(24, 10, padding='same', kernel_initializer = 'he_normal')(merge2)
conv6 = BatchNormalization()(conv6)
conv6=Activation('relu')(conv6)
conv6 = Dropout(0.15)(conv6)
conv6 = Conv1D(24, 10, padding='same', kernel_initializer = 'he_normal')(conv6)
conv6 = BatchNormalization()(conv6)
conv6=Activation('relu')(conv6)
conv6 = Dropout(0.15)(conv6)
conv6 = Conv1D(24, 10, padding='same', kernel_initializer = 'he_normal')(conv6)
conv6 = BatchNormalization()(conv6)
conv6=Activation('relu')(conv6)
temp3=UpSampling1D(size=10)(conv6)
merge3 = concatenate([temp3, conv1], axis=-1)
conv7 = Conv1D(16, 20,padding='same', kernel_initializer='he_normal')(merge3)
conv7 = BatchNormalization()(conv7)
conv7=Activation('relu')(conv7)
conv7 = Dropout(0.15)(conv7)
conv7 = Conv1D(16, 20, padding='same', kernel_initializer='he_normal')(conv7)
conv7 = BatchNormalization()(conv7)
conv7=Activation('relu')(conv7)
conv7 = Dropout(0.15)(conv7)
conv7 = Conv1D(16, 20, padding='same', kernel_initializer='he_normal')(conv7)
conv7 = BatchNormalization()(conv7)
conv7=Activation('relu')(conv7)
conv8 = Conv1D(1, 1,padding='same', kernel_initializer='he_normal')(conv7,)
conv8 = Reshape((5000, 1))(conv8)
conv9 = Activation('sigmoid',name='output')(conv8)
model = Model(inputs=inputs, outputs=conv9)
checkpoint= keras.callbacks.ModelCheckpoint('jiangnastyle_10_7.h5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')
adam=keras.optimizers.Adam(lr=0.001,beta_1=0.9, beta_2=0.999,epsilon=1e-08,clipvalue=0.5)
model.compile(loss='binary_crossentropy', optimizer=adam,metrics=['accuracy'])
# model.fit({'inputs':train_x}, {'output':train_y}, validation_data=(test_x,test_y),epochs=10000,batch_size=256,callbacks=[checkpoint])
| [
"keras.layers.Activation",
"keras.callbacks.ModelCheckpoint",
"keras.layers.Dropout",
"pywt.dwt",
"keras.optimizers.Adam",
"keras.models.Model",
"keras.layers.Conv1D",
"keras.layers.UpSampling1D",
"keras.layers.BatchNormalization",
"keras.layers.MaxPooling1D",
"pywt.waverec",
"numpy.array",
... | [((3442, 3474), 'keras.layers.Input', 'Input', (['(5000, 14)'], {'name': '"""inputs"""'}), "((5000, 14), name='inputs')\n", (3447, 3474), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5820, 5869), 'keras.layers.concatenate', 'concatenate', (['[conv4_1, conv4_2, conv4_3]'], {'axis': '(-1)'}), '([conv4_1, conv4_2, conv4_3], axis=-1)\n', (5831, 5869), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5913, 5949), 'keras.layers.concatenate', 'concatenate', (['[temp1, conv3]'], {'axis': '(-1)'}), '([temp1, conv3], axis=-1)\n', (5924, 5949), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6488, 6524), 'keras.layers.concatenate', 'concatenate', (['[temp2, conv2]'], {'axis': '(-1)'}), '([temp2, conv2], axis=-1)\n', (6499, 6524), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7074, 7110), 'keras.layers.concatenate', 'concatenate', (['[temp3, conv1]'], {'axis': '(-1)'}), '([temp3, conv1], axis=-1)\n', (7085, 7110), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7778, 7813), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'conv9'}), '(inputs=inputs, outputs=conv9)\n', (7783, 7813), False, 'from keras.models import Model, load_model\n'), ((7827, 7949), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['"""jiangnastyle_10_7.h5"""'], {'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "('jiangnastyle_10_7.h5', monitor='val_acc',\n verbose=1, save_best_only=True, mode='max')\n", (7858, 7949), False, 'import keras\n'), ((7952, 8043), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)', 'clipvalue': '(0.5)'}), '(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08,\n clipvalue=0.5)\n', (7973, 8043), False, 'import keras\n'), ((1330, 1345), 'numpy.array', 'np.array', (['rec_a'], {}), '(rec_a)\n', (1338, 1345), True, 'import numpy as np\n'), ((1356, 1371), 'numpy.array', 'np.array', (['rec_d'], {}), '(rec_d)\n', (1364, 1371), True, 'import numpy as np\n'), ((1380, 1410), 'numpy.concatenate', 'np.concatenate', (['(rec_a, rec_d)'], {}), '((rec_a, rec_d))\n', (1394, 1410), True, 'import numpy as np\n'), ((1601, 1618), 'numpy.array', 'np.array', (['rec_all'], {}), '(rec_all)\n', (1609, 1618), True, 'import numpy as np\n'), ((3482, 3502), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3500, 3502), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((3519, 3581), 'keras.layers.Conv1D', 'Conv1D', (['(16)', '(20)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(16, 20, padding='same', kernel_initializer='he_normal')\n", (3525, 3581), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((3596, 3616), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3614, 3616), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((3630, 3648), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3640, 3648), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((3664, 3726), 'keras.layers.Conv1D', 'Conv1D', (['(16)', '(20)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(16, 20, padding='same', kernel_initializer='he_normal')\n", (3670, 3726), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((3741, 3761), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3759, 3761), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((3775, 3793), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3785, 3793), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((3809, 3822), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (3816, 3822), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((3838, 3900), 'keras.layers.Conv1D', 'Conv1D', (['(16)', '(20)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(16, 20, padding='same', kernel_initializer='he_normal')\n", (3844, 3900), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((3916, 3936), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3934, 3936), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((3950, 3968), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3960, 3968), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((3984, 4010), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(10)'}), '(pool_size=10)\n', (3996, 4010), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4027, 4089), 'keras.layers.Conv1D', 'Conv1D', (['(24)', '(10)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(24, 10, padding='same', kernel_initializer='he_normal')\n", (4033, 4089), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4105, 4125), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4123, 4125), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4139, 4157), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4149, 4157), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4173, 4235), 'keras.layers.Conv1D', 'Conv1D', (['(24)', '(10)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(24, 10, padding='same', kernel_initializer='he_normal')\n", (4179, 4235), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4250, 4270), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4268, 4270), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4284, 4302), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4294, 4302), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4318, 4331), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (4325, 4331), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4347, 4409), 'keras.layers.Conv1D', 'Conv1D', (['(24)', '(10)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(24, 10, padding='same', kernel_initializer='he_normal')\n", (4353, 4409), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4424, 4444), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4442, 4444), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4458, 4476), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4468, 4476), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4492, 4517), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(5)'}), '(pool_size=5)\n', (4504, 4517), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4534, 4595), 'keras.layers.Conv1D', 'Conv1D', (['(48)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(48, 5, padding='same', kernel_initializer='he_normal')\n", (4540, 4595), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4611, 4631), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4629, 4631), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4646, 4664), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4656, 4664), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4680, 4741), 'keras.layers.Conv1D', 'Conv1D', (['(48)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(48, 5, padding='same', kernel_initializer='he_normal')\n", (4686, 4741), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4757, 4777), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4775, 4777), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4791, 4809), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4801, 4809), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4825, 4838), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (4832, 4838), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4854, 4915), 'keras.layers.Conv1D', 'Conv1D', (['(48)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(48, 5, padding='same', kernel_initializer='he_normal')\n", (4860, 4915), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4931, 4951), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4949, 4951), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4965, 4983), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4975, 4983), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((4999, 5024), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (5011, 5024), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5044, 5105), 'keras.layers.Conv1D', 'Conv1D', (['(96)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(96, 5, padding='same', kernel_initializer='he_normal')\n", (5050, 5105), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5123, 5143), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5141, 5143), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5161, 5179), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5171, 5179), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5199, 5212), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (5206, 5212), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5232, 5293), 'keras.layers.Conv1D', 'Conv1D', (['(96)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(96, 5, padding='same', kernel_initializer='he_normal')\n", (5238, 5293), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5313, 5333), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5331, 5333), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5351, 5369), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5361, 5369), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5389, 5468), 'keras.layers.Conv1D', 'Conv1D', (['(96)', '(5)'], {'padding': '"""same"""', 'dilation_rate': '(10)', 'kernel_initializer': '"""he_normal"""'}), "(96, 5, padding='same', dilation_rate=10, kernel_initializer='he_normal')\n", (5395, 5468), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5485, 5505), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5503, 5505), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5523, 5541), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5533, 5541), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5561, 5574), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (5568, 5574), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5594, 5673), 'keras.layers.Conv1D', 'Conv1D', (['(96)', '(5)'], {'padding': '"""same"""', 'dilation_rate': '(10)', 'kernel_initializer': '"""he_normal"""'}), "(96, 5, padding='same', dilation_rate=10, kernel_initializer='he_normal')\n", (5600, 5673), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5692, 5712), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5710, 5712), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5730, 5748), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5740, 5748), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5768, 5791), 'keras.layers.Subtract', 'keras.layers.Subtract', ([], {}), '()\n', (5789, 5791), False, 'import keras\n'), ((5876, 5896), 'keras.layers.UpSampling1D', 'UpSampling1D', ([], {'size': '(2)'}), '(size=2)\n', (5888, 5896), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((5958, 6019), 'keras.layers.Conv1D', 'Conv1D', (['(48)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(48, 5, padding='same', kernel_initializer='he_normal')\n", (5964, 6019), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6036, 6056), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6054, 6056), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6070, 6088), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6080, 6088), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6104, 6117), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (6111, 6117), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6133, 6194), 'keras.layers.Conv1D', 'Conv1D', (['(48)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(48, 5, padding='same', kernel_initializer='he_normal')\n", (6139, 6194), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6210, 6230), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6228, 6230), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6244, 6262), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6254, 6262), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6278, 6291), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (6285, 6291), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6307, 6368), 'keras.layers.Conv1D', 'Conv1D', (['(48)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(48, 5, padding='same', kernel_initializer='he_normal')\n", (6313, 6368), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6383, 6403), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6401, 6403), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6417, 6435), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6427, 6435), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6451, 6471), 'keras.layers.UpSampling1D', 'UpSampling1D', ([], {'size': '(5)'}), '(size=5)\n', (6463, 6471), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6533, 6595), 'keras.layers.Conv1D', 'Conv1D', (['(24)', '(10)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(24, 10, padding='same', kernel_initializer='he_normal')\n", (6539, 6595), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6614, 6634), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6632, 6634), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6648, 6666), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6658, 6666), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6682, 6695), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (6689, 6695), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6711, 6773), 'keras.layers.Conv1D', 'Conv1D', (['(24)', '(10)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(24, 10, padding='same', kernel_initializer='he_normal')\n", (6717, 6773), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6791, 6811), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6809, 6811), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6825, 6843), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6835, 6843), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6859, 6872), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (6866, 6872), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6888, 6950), 'keras.layers.Conv1D', 'Conv1D', (['(24)', '(10)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(24, 10, padding='same', kernel_initializer='he_normal')\n", (6894, 6950), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((6968, 6988), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6986, 6988), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7002, 7020), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7012, 7020), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7036, 7057), 'keras.layers.UpSampling1D', 'UpSampling1D', ([], {'size': '(10)'}), '(size=10)\n', (7048, 7057), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7119, 7181), 'keras.layers.Conv1D', 'Conv1D', (['(16)', '(20)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(16, 20, padding='same', kernel_initializer='he_normal')\n", (7125, 7181), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7197, 7217), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7215, 7217), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7231, 7249), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7241, 7249), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7265, 7278), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (7272, 7278), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7294, 7356), 'keras.layers.Conv1D', 'Conv1D', (['(16)', '(20)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(16, 20, padding='same', kernel_initializer='he_normal')\n", (7300, 7356), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7372, 7392), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7390, 7392), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7406, 7424), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7416, 7424), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7440, 7453), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (7447, 7453), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7469, 7531), 'keras.layers.Conv1D', 'Conv1D', (['(16)', '(20)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(16, 20, padding='same', kernel_initializer='he_normal')\n", (7475, 7531), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7547, 7567), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7565, 7567), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7581, 7599), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7591, 7599), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7616, 7676), 'keras.layers.Conv1D', 'Conv1D', (['(1)', '(1)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(1, 1, padding='same', kernel_initializer='he_normal')\n", (7622, 7676), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7692, 7710), 'keras.layers.Reshape', 'Reshape', (['(5000, 1)'], {}), '((5000, 1))\n', (7699, 7710), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((7726, 7762), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {'name': '"""output"""'}), "('sigmoid', name='output')\n", (7736, 7762), False, 'from keras.layers import SimpleRNN, Permute, Reshape, CuDNNLSTM, LSTM, Dropout, Input, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, AveragePooling1D, UpSampling1D, concatenate, Permute, SeparableConv1D, LeakyReLU, Conv2DTranspose\n'), ((927, 947), 'pywt.dwt', 'pywt.dwt', (['a', 'w', 'mode'], {}), '(a, w, mode)\n', (935, 947), False, 'import pywt\n'), ((1150, 1177), 'pywt.waverec', 'pywt.waverec', (['coeff_list', 'w'], {}), '(coeff_list, w)\n', (1162, 1177), False, 'import pywt\n'), ((1286, 1313), 'pywt.waverec', 'pywt.waverec', (['coeff_list', 'w'], {}), '(coeff_list, w)\n', (1298, 1313), False, 'import pywt\n')] |
#!/usr/bin/env python3
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
# Keyboard controlling for CARLA. Please refer to client_example.py for a simpler
# and more documented example.
"""
Welcome to CARLA manual control.
Use ARROWS or WASD keys for control.
W : throttle
S : brake
AD : steer
Q : toggle reverse
Space : hand-brake
P : toggle autopilot
R : restart level
STARTING in a moment...
"""
import argparse
import cv2
import logging
import random
import time
import math
import colorsys
import os
try:
import pygame
except ImportError:
raise RuntimeError(
'cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
from numpy.linalg import pinv, inv
except ImportError:
raise RuntimeError(
'cannot import numpy, make sure numpy package is installed')
from carla import image_converter
from carla.client import make_carla_client, VehicleControl
from carla.planner.map import CarlaMap
from carla.tcp import TCPConnectionError
from carla.transform import Transform, Scale
from utils import Timer, rand_color, vector3d_to_array, degrees_to_radians
from datadescriptor import KittiDescriptor
from dataexport import *
from bounding_box import create_kitti_datapoint
from carla_utils import KeyboardHelper, MeasurementsDisplayHelper
from constants import *
from settings import make_carla_settings
import lidar_utils # from lidar_utils import project_point_cloud
import time
from math import cos, sin
""" OUTPUT FOLDER GENERATION """
PHASE = "training"
OUTPUT_FOLDER = os.path.join("_out", PHASE)
folders = ['calib', 'image_2', 'label_2', 'velodyne', 'planes']
def maybe_create_dir(path):
if not os.path.exists(directory):
os.makedirs(directory)
for folder in folders:
directory = os.path.join(OUTPUT_FOLDER, folder)
maybe_create_dir(directory)
""" DATA SAVE PATHS """
GROUNDPLANE_PATH = os.path.join(OUTPUT_FOLDER, 'planes/{0:06}.txt')
LIDAR_PATH = os.path.join(OUTPUT_FOLDER, 'velodyne/{0:06}.bin')
LABEL_PATH = os.path.join(OUTPUT_FOLDER, 'label_2/{0:06}.txt')
IMAGE_PATH = os.path.join(OUTPUT_FOLDER, 'image_2/{0:06}.png')
CALIBRATION_PATH = os.path.join(OUTPUT_FOLDER, 'calib/{0:06}.txt')
class CarlaGame(object):
def __init__(self, carla_client, args):
self.client = carla_client
self._carla_settings, self._intrinsic, self._camera_to_car_transform, self._lidar_to_car_transform = make_carla_settings(
args)
self._timer = None
self._display = None
self._main_image = None
self._mini_view_image1 = None
self._mini_view_image2 = None
self._enable_autopilot = args.autopilot
self._lidar_measurement = None
self._map_view = None
self._is_on_reverse = False
self._city_name = args.map_name
self._map = CarlaMap(self._city_name, 16.43,
50.0) if self._city_name is not None else None
self._map_shape = self._map.map_image.shape if self._city_name is not None else None
self._map_view = self._map.get_map(
WINDOW_HEIGHT) if self._city_name is not None else None
self._position = None
self._agent_positions = None
self.captured_frame_no = self.current_captured_frame_num()
self._measurements = None
self._extrinsic = None
# To keep track of how far the car has driven since the last capture of data
self._agent_location_on_last_capture = None
self._frames_since_last_capture = 0
# How many frames we have captured since reset
self._captured_frames_since_restart = 0
def current_captured_frame_num(self):
# Figures out which frame number we currently are on
# This is run once, when we start the simulator in case we already have a dataset.
# The user can then choose to overwrite or append to the dataset.
label_path = os.path.join(OUTPUT_FOLDER, 'label_2/')
num_existing_data_files = len(
[name for name in os.listdir(label_path) if name.endswith('.txt')])
print(num_existing_data_files)
if num_existing_data_files == 0:
return 0
answer = input(
"There already exists a dataset in {}. Would you like to (O)verwrite or (A)ppend the dataset? (O/A)".format(OUTPUT_FOLDER))
if answer.upper() == "O":
logging.info(
"Resetting frame number to 0 and overwriting existing")
# Overwrite the data
return 0
logging.info("Continuing recording data on frame number {}".format(
num_existing_data_files))
return num_existing_data_files
def execute(self):
"""Launch the PyGame."""
pygame.init()
self._initialize_game()
try:
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
reset = self._on_loop()
if not reset:
self._on_render()
finally:
pygame.quit()
def _initialize_game(self):
if self._city_name is not None:
self._display = pygame.display.set_mode(
(WINDOW_WIDTH + int((WINDOW_HEIGHT /
float(self._map.map_image.shape[0]))*self._map.map_image.shape[1]), WINDOW_HEIGHT),
pygame.HWSURFACE | pygame.DOUBLEBUF)
else:
self._display = pygame.display.set_mode(
(WINDOW_WIDTH, WINDOW_HEIGHT),
pygame.HWSURFACE | pygame.DOUBLEBUF)
logging.debug('pygame started')
self._on_new_episode()
def _on_new_episode(self):
self._carla_settings.randomize_seeds()
self._carla_settings.randomize_weather()
scene = self.client.load_settings(self._carla_settings)
number_of_player_starts = len(scene.player_start_spots)
player_start = np.random.randint(number_of_player_starts)
logging.info('Starting new episode...')
self.client.start_episode(player_start)
self._timer = Timer()
self._is_on_reverse = False
# Reset all tracking variables
self._agent_location_on_last_capture = None
self._frames_since_last_capture = 0
self._captured_frames_since_restart = 0
def _on_loop(self):
self._timer.tick()
measurements, sensor_data = self.client.read_data()
# logging.info("Frame no: {}, = {}".format(self.captured_frame_no,
# (self.captured_frame_no + 1) % NUM_RECORDINGS_BEFORE_RESET))
# Reset the environment if the agent is stuck or can't find any agents or if we have captured enough frames in this one
is_stuck = self._frames_since_last_capture >= NUM_EMPTY_FRAMES_BEFORE_RESET
is_enough_datapoints = (
self._captured_frames_since_restart + 1) % NUM_RECORDINGS_BEFORE_RESET == 0
if (is_stuck or is_enough_datapoints) and GEN_DATA:
logging.warning("Is stucK: {}, is_enough_datapoints: {}".format(
is_stuck, is_enough_datapoints))
self._on_new_episode()
# If we dont sleep, the client will continue to render
return True
# (Extrinsic) Rt Matrix
# (Camera) local 3d to world 3d.
# Get the transform from the player protobuf transformation.
world_transform = Transform(
measurements.player_measurements.transform
)
# Compute the final transformation matrix.
self._extrinsic = world_transform * self._camera_to_car_transform
self._measurements = measurements
self._last_player_location = measurements.player_measurements.transform.location
self._main_image = sensor_data.get('CameraRGB', None)
self._lidar_measurement = sensor_data.get('Lidar32', None)
self._depth_image = sensor_data.get('DepthCamera', None)
# Print measurements every second.
if self._timer.elapsed_seconds_since_lap() > 1.0:
if self._city_name is not None:
# Function to get car position on map.
map_position = self._map.convert_to_pixel([
measurements.player_measurements.transform.location.x,
measurements.player_measurements.transform.location.y,
measurements.player_measurements.transform.location.z])
# Function to get orientation of the road car is in.
lane_orientation = self._map.get_lane_orientation([
measurements.player_measurements.transform.location.x,
measurements.player_measurements.transform.location.y,
measurements.player_measurements.transform.location.z])
MeasurementsDisplayHelper.print_player_measurements_map(
measurements.player_measurements,
map_position,
lane_orientation, self._timer)
else:
MeasurementsDisplayHelper.print_player_measurements(
measurements.player_measurements, self._timer)
# Plot position on the map as well.
self._timer.lap()
control = self._get_keyboard_control(pygame.key.get_pressed())
# Set the player position
if self._city_name is not None:
self._position = self._map.convert_to_pixel([
measurements.player_measurements.transform.location.x,
measurements.player_measurements.transform.location.y,
measurements.player_measurements.transform.location.z])
self._agent_positions = measurements.non_player_agents
if control is None:
self._on_new_episode()
elif self._enable_autopilot:
self.client.send_control(
measurements.player_measurements.autopilot_control)
else:
self.client.send_control(control)
def _get_keyboard_control(self, keys):
"""
Return a VehicleControl message based on the pressed keys. Return None
if a new episode was requested.
"""
control = KeyboardHelper.get_keyboard_control(
keys, self._is_on_reverse, self._enable_autopilot)
if control is not None:
control, self._is_on_reverse, self._enable_autopilot = control
return control
def _on_render(self):
datapoints = []
if self._main_image is not None and self._depth_image is not None:
# Convert main image
image = image_converter.to_rgb_array(self._main_image)
# Retrieve and draw datapoints
image, datapoints = self._generate_datapoints(image)
# Draw lidar
# Camera coordinate system is left, up, forwards
if VISUALIZE_LIDAR:
# Calculation to shift bboxes relative to pitch and roll of player
rotation = self._measurements.player_measurements.transform.rotation
pitch, roll, yaw = rotation.pitch, rotation.roll, rotation.yaw
# Since measurements are in degrees, convert to radians
pitch = degrees_to_radians(pitch)
roll = degrees_to_radians(roll)
yaw = degrees_to_radians(yaw)
print('pitch: ', pitch)
print('roll: ', roll)
print('yaw: ', yaw)
# Rotation matrix for pitch
rotP = np.array([[cos(pitch), 0, sin(pitch)],
[0, 1, 0],
[-sin(pitch), 0, cos(pitch)]])
# Rotation matrix for roll
rotR = np.array([[1, 0, 0],
[0, cos(roll), -sin(roll)],
[0, sin(roll), cos(roll)]])
# combined rotation matrix, must be in order roll, pitch, yaw
rotRP = np.matmul(rotR, rotP)
# Take the points from the point cloud and transform to car space
point_cloud = np.array(self._lidar_to_car_transform.transform_points(
self._lidar_measurement.data))
point_cloud[:, 2] -= LIDAR_HEIGHT_POS
point_cloud = np.matmul(rotRP, point_cloud.T).T
# print(self._lidar_to_car_transform.matrix)
# print(self._camera_to_car_transform.matrix)
# Transform to camera space by the inverse of camera_to_car transform
point_cloud_cam = self._camera_to_car_transform.inverse().transform_points(point_cloud)
point_cloud_cam[:, 1] += LIDAR_HEIGHT_POS
image = lidar_utils.project_point_cloud(
image, point_cloud_cam, self._intrinsic, 1)
# Display image
surface = pygame.surfarray.make_surface(image.swapaxes(0, 1))
self._display.blit(surface, (0, 0))
if self._map_view is not None:
self._display_agents(self._map_view)
pygame.display.flip()
# Determine whether to save files
distance_driven = self._distance_since_last_recording()
#print("Distance driven since last recording: {}".format(distance_driven))
has_driven_long_enough = distance_driven is None or distance_driven > DISTANCE_SINCE_LAST_RECORDING
if (self._timer.step + 1) % STEPS_BETWEEN_RECORDINGS == 0:
if has_driven_long_enough and datapoints:
# Avoid doing this twice or unnecessarily often
if not VISUALIZE_LIDAR:
# Calculation to shift bboxes relative to pitch and roll of player
rotation = self._measurements.player_measurements.transform.rotation
pitch, roll, yaw = rotation.pitch, rotation.roll, rotation.yaw
# Since measurements are in degrees, convert to radians
pitch = degrees_to_radians(pitch)
roll = degrees_to_radians(roll)
yaw = degrees_to_radians(yaw)
print('pitch: ', pitch)
print('roll: ', roll)
print('yaw: ', yaw)
# Rotation matrix for pitch
rotP = np.array([[cos(pitch), 0, sin(pitch)],
[0, 1, 0],
[-sin(pitch), 0, cos(pitch)]])
# Rotation matrix for roll
rotR = np.array([[1, 0, 0],
[0, cos(
roll), -sin(roll)],
[0, sin(roll), cos(roll)]])
# combined rotation matrix, must be in order roll, pitch, yaw
rotRP = np.matmul(rotR, rotP)
# Take the points from the point cloud and transform to car space
point_cloud = np.array(self._lidar_to_car_transform.transform_points(
self._lidar_measurement.data))
point_cloud[:, 2] -= LIDAR_HEIGHT_POS
point_cloud = np.matmul(rotRP, point_cloud.T).T
self._update_agent_location()
# Save screen, lidar and kitti training labels together with calibration and groundplane files
self._save_training_files(datapoints, point_cloud)
self.captured_frame_no += 1
self._captured_frames_since_restart += 1
self._frames_since_last_capture = 0
else:
logging.debug("Could save datapoint, but agent has not driven {} meters since last recording (Currently {} meters)".format(
DISTANCE_SINCE_LAST_RECORDING, distance_driven))
else:
self._frames_since_last_capture += 1
logging.debug(
"Could not save training data - no visible agents of selected classes in scene")
def _distance_since_last_recording(self):
if self._agent_location_on_last_capture is None:
return None
cur_pos = vector3d_to_array(
self._measurements.player_measurements.transform.location)
last_pos = vector3d_to_array(self._agent_location_on_last_capture)
def dist_func(x, y): return sum((x - y)**2)
return dist_func(cur_pos, last_pos)
def _update_agent_location(self):
self._agent_location_on_last_capture = self._measurements.player_measurements.transform.location
def _generate_datapoints(self, image):
""" Returns a list of datapoints (labels and such) that are generated this frame together with the main image image """
datapoints = []
image = image.copy()
# Remove this
rotRP = np.identity(3)
# Stores all datapoints for the current frames
for agent in self._measurements.non_player_agents:
if should_detect_class(agent) and GEN_DATA:
image, kitti_datapoint = create_kitti_datapoint(
agent, self._intrinsic, self._extrinsic.matrix, image, self._depth_image, self._measurements.player_measurements, rotRP)
if kitti_datapoint:
datapoints.append(kitti_datapoint)
return image, datapoints
def _save_training_files(self, datapoints, point_cloud):
logging.info("Attempting to save at timer step {}, frame no: {}".format(
self._timer.step, self.captured_frame_no))
groundplane_fname = GROUNDPLANE_PATH.format(self.captured_frame_no)
lidar_fname = LIDAR_PATH.format(self.captured_frame_no)
kitti_fname = LABEL_PATH.format(self.captured_frame_no)
img_fname = IMAGE_PATH.format(self.captured_frame_no)
calib_filename = CALIBRATION_PATH.format(self.captured_frame_no)
save_groundplanes(
groundplane_fname, self._measurements.player_measurements, LIDAR_HEIGHT_POS)
save_ref_files(OUTPUT_FOLDER, self.captured_frame_no)
save_image_data(
img_fname, image_converter.to_rgb_array(self._main_image))
save_kitti_data(kitti_fname, datapoints)
save_lidar_data(lidar_fname, point_cloud,
LIDAR_HEIGHT_POS, LIDAR_DATA_FORMAT)
save_calibration_matrices(
calib_filename, self._intrinsic, self._extrinsic)
def _display_agents(self, map_view):
image = image[:, :, :3]
new_window_width = (float(WINDOW_HEIGHT) / float(self._map_shape[0])) * \
float(self._map_shape[1])
surface = pygame.surfarray.make_surface(image.swapaxes(0, 1))
w_pos = int(
self._position[0]*(float(WINDOW_HEIGHT)/float(self._map_shape[0])))
h_pos = int(self._position[1] *
(new_window_width/float(self._map_shape[1])))
pygame.draw.circle(surface, [255, 0, 0, 255], (w_pos, h_pos), 6, 0)
for agent in self._agent_positions:
if agent.HasField('vehicle'):
agent_position = self._map.convert_to_pixel([
agent.vehicle.transform.location.x,
agent.vehicle.transform.location.y,
agent.vehicle.transform.location.z])
w_pos = int(
agent_position[0]*(float(WINDOW_HEIGHT)/float(self._map_shape[0])))
h_pos = int(
agent_position[1] * (new_window_width/float(self._map_shape[1])))
pygame.draw.circle(
surface, [255, 0, 255, 255], (w_pos, h_pos), 4, 0)
self._display.blit(surface, (WINDOW_WIDTH, 0))
def should_detect_class(agent):
""" Returns true if the agent is of the classes that we want to detect.
Note that Carla has class types in lowercase """
return True in [agent.HasField(class_type.lower()) for class_type in CLASSES_TO_LABEL]
def parse_args():
argparser = argparse.ArgumentParser(
description='CARLA Manual Control Client')
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='logging.info debug information')
argparser.add_argument(
'--host',
metavar='H',
default='localhost',
help='IP of the host server (default: localhost)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'-l', '--lidar',
action='store_true',
help='enable Lidar')
argparser.add_argument(
'-q', '--quality-level',
choices=['Low', 'Epic'],
type=lambda s: s.title(),
default='Epic',
help='graphics quality level, a lower level makes the simulation run considerably faster.')
argparser.add_argument(
'-m', '--map-name',
metavar='M',
default=None,
help='plot the map of the current city (needs to match active map in '
'server, options: Town01 or Town02)')
args = argparser.parse_args()
return args
def main():
args = parse_args()
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
logging.info(__doc__)
while True:
try:
with make_carla_client(args.host, args.port) as client:
game = CarlaGame(client, args)
game.execute()
break
except TCPConnectionError as error:
logging.error(error)
time.sleep(1)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
logging.info('\nCancelled by user. Bye!')
| [
"argparse.ArgumentParser",
"pygame.event.get",
"numpy.random.randint",
"carla.planner.map.CarlaMap",
"os.path.join",
"carla_utils.MeasurementsDisplayHelper.print_player_measurements",
"logging.error",
"carla.image_converter.to_rgb_array",
"pygame.display.set_mode",
"os.path.exists",
"numpy.ident... | [((1817, 1844), 'os.path.join', 'os.path.join', (['"""_out"""', 'PHASE'], {}), "('_out', PHASE)\n", (1829, 1844), False, 'import os\n'), ((2161, 2209), 'os.path.join', 'os.path.join', (['OUTPUT_FOLDER', '"""planes/{0:06}.txt"""'], {}), "(OUTPUT_FOLDER, 'planes/{0:06}.txt')\n", (2173, 2209), False, 'import os\n'), ((2223, 2273), 'os.path.join', 'os.path.join', (['OUTPUT_FOLDER', '"""velodyne/{0:06}.bin"""'], {}), "(OUTPUT_FOLDER, 'velodyne/{0:06}.bin')\n", (2235, 2273), False, 'import os\n'), ((2287, 2336), 'os.path.join', 'os.path.join', (['OUTPUT_FOLDER', '"""label_2/{0:06}.txt"""'], {}), "(OUTPUT_FOLDER, 'label_2/{0:06}.txt')\n", (2299, 2336), False, 'import os\n'), ((2350, 2399), 'os.path.join', 'os.path.join', (['OUTPUT_FOLDER', '"""image_2/{0:06}.png"""'], {}), "(OUTPUT_FOLDER, 'image_2/{0:06}.png')\n", (2362, 2399), False, 'import os\n'), ((2419, 2466), 'os.path.join', 'os.path.join', (['OUTPUT_FOLDER', '"""calib/{0:06}.txt"""'], {}), "(OUTPUT_FOLDER, 'calib/{0:06}.txt')\n", (2431, 2466), False, 'import os\n'), ((2049, 2084), 'os.path.join', 'os.path.join', (['OUTPUT_FOLDER', 'folder'], {}), '(OUTPUT_FOLDER, folder)\n', (2061, 2084), False, 'import os\n'), ((20722, 20788), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CARLA Manual Control Client"""'}), "(description='CARLA Manual Control Client')\n", (20745, 20788), False, 'import argparse\n'), ((22138, 22211), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""', 'level': 'log_level'}), "(format='%(levelname)s: %(message)s', level=log_level)\n", (22157, 22211), False, 'import logging\n'), ((22216, 22279), 'logging.info', 'logging.info', (['"""listening to server %s:%s"""', 'args.host', 'args.port'], {}), "('listening to server %s:%s', args.host, args.port)\n", (22228, 22279), False, 'import logging\n'), ((22284, 22305), 'logging.info', 'logging.info', (['__doc__'], {}), '(__doc__)\n', (22296, 22305), False, 'import logging\n'), ((1950, 1975), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1964, 1975), False, 'import os\n'), ((1985, 2007), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1996, 2007), False, 'import os\n'), ((2682, 2707), 'settings.make_carla_settings', 'make_carla_settings', (['args'], {}), '(args)\n', (2701, 2707), False, 'from settings import make_carla_settings\n'), ((4185, 4224), 'os.path.join', 'os.path.join', (['OUTPUT_FOLDER', '"""label_2/"""'], {}), "(OUTPUT_FOLDER, 'label_2/')\n", (4197, 4224), False, 'import os\n'), ((5009, 5022), 'pygame.init', 'pygame.init', ([], {}), '()\n', (5020, 5022), False, 'import pygame\n'), ((5902, 5933), 'logging.debug', 'logging.debug', (['"""pygame started"""'], {}), "('pygame started')\n", (5915, 5933), False, 'import logging\n'), ((6244, 6286), 'numpy.random.randint', 'np.random.randint', (['number_of_player_starts'], {}), '(number_of_player_starts)\n', (6261, 6286), True, 'import numpy as np\n'), ((6295, 6334), 'logging.info', 'logging.info', (['"""Starting new episode..."""'], {}), "('Starting new episode...')\n", (6307, 6334), False, 'import logging\n'), ((6405, 6412), 'utils.Timer', 'Timer', ([], {}), '()\n', (6410, 6412), False, 'from utils import Timer, rand_color, vector3d_to_array, degrees_to_radians\n'), ((7746, 7799), 'carla.transform.Transform', 'Transform', (['measurements.player_measurements.transform'], {}), '(measurements.player_measurements.transform)\n', (7755, 7799), False, 'from carla.transform import Transform, Scale\n'), ((10523, 10614), 'carla_utils.KeyboardHelper.get_keyboard_control', 'KeyboardHelper.get_keyboard_control', (['keys', 'self._is_on_reverse', 'self._enable_autopilot'], {}), '(keys, self._is_on_reverse, self.\n _enable_autopilot)\n', (10558, 10614), False, 'from carla_utils import KeyboardHelper, MeasurementsDisplayHelper\n'), ((16915, 16991), 'utils.vector3d_to_array', 'vector3d_to_array', (['self._measurements.player_measurements.transform.location'], {}), '(self._measurements.player_measurements.transform.location)\n', (16932, 16991), False, 'from utils import Timer, rand_color, vector3d_to_array, degrees_to_radians\n'), ((17024, 17079), 'utils.vector3d_to_array', 'vector3d_to_array', (['self._agent_location_on_last_capture'], {}), '(self._agent_location_on_last_capture)\n', (17041, 17079), False, 'from utils import Timer, rand_color, vector3d_to_array, degrees_to_radians\n'), ((17585, 17599), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (17596, 17599), True, 'import numpy as np\n'), ((19649, 19716), 'pygame.draw.circle', 'pygame.draw.circle', (['surface', '[255, 0, 0, 255]', '(w_pos, h_pos)', '(6)', '(0)'], {}), '(surface, [255, 0, 0, 255], (w_pos, h_pos), 6, 0)\n', (19667, 19716), False, 'import pygame\n'), ((3098, 3136), 'carla.planner.map.CarlaMap', 'CarlaMap', (['self._city_name', '(16.43)', '(50.0)'], {}), '(self._city_name, 16.43, 50.0)\n', (3106, 3136), False, 'from carla.planner.map import CarlaMap\n'), ((4651, 4719), 'logging.info', 'logging.info', (['"""Resetting frame number to 0 and overwriting existing"""'], {}), "('Resetting frame number to 0 and overwriting existing')\n", (4663, 4719), False, 'import logging\n'), ((5359, 5372), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (5370, 5372), False, 'import pygame\n'), ((5768, 5863), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WINDOW_WIDTH, WINDOW_HEIGHT)', '(pygame.HWSURFACE | pygame.DOUBLEBUF)'], {}), '((WINDOW_WIDTH, WINDOW_HEIGHT), pygame.HWSURFACE |\n pygame.DOUBLEBUF)\n', (5791, 5863), False, 'import pygame\n'), ((9612, 9636), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (9634, 9636), False, 'import pygame\n'), ((10933, 10979), 'carla.image_converter.to_rgb_array', 'image_converter.to_rgb_array', (['self._main_image'], {}), '(self._main_image)\n', (10961, 10979), False, 'from carla import image_converter\n'), ((13531, 13552), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (13550, 13552), False, 'import pygame\n'), ((18865, 18911), 'carla.image_converter.to_rgb_array', 'image_converter.to_rgb_array', (['self._main_image'], {}), '(self._main_image)\n', (18893, 18911), False, 'from carla import image_converter\n'), ((22698, 22742), 'logging.info', 'logging.info', (['"""\nCancelled by user. Bye!"""'], {}), '("""\nCancelled by user. Bye!""")\n', (22710, 22742), False, 'import logging\n'), ((5121, 5139), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (5137, 5139), False, 'import pygame\n'), ((9138, 9277), 'carla_utils.MeasurementsDisplayHelper.print_player_measurements_map', 'MeasurementsDisplayHelper.print_player_measurements_map', (['measurements.player_measurements', 'map_position', 'lane_orientation', 'self._timer'], {}), '(measurements.\n player_measurements, map_position, lane_orientation, self._timer)\n', (9193, 9277), False, 'from carla_utils import KeyboardHelper, MeasurementsDisplayHelper\n'), ((9368, 9471), 'carla_utils.MeasurementsDisplayHelper.print_player_measurements', 'MeasurementsDisplayHelper.print_player_measurements', (['measurements.player_measurements', 'self._timer'], {}), '(measurements.\n player_measurements, self._timer)\n', (9419, 9471), False, 'from carla_utils import KeyboardHelper, MeasurementsDisplayHelper\n'), ((11552, 11577), 'utils.degrees_to_radians', 'degrees_to_radians', (['pitch'], {}), '(pitch)\n', (11570, 11577), False, 'from utils import Timer, rand_color, vector3d_to_array, degrees_to_radians\n'), ((11601, 11625), 'utils.degrees_to_radians', 'degrees_to_radians', (['roll'], {}), '(roll)\n', (11619, 11625), False, 'from utils import Timer, rand_color, vector3d_to_array, degrees_to_radians\n'), ((11648, 11671), 'utils.degrees_to_radians', 'degrees_to_radians', (['yaw'], {}), '(yaw)\n', (11666, 11671), False, 'from utils import Timer, rand_color, vector3d_to_array, degrees_to_radians\n'), ((12421, 12442), 'numpy.matmul', 'np.matmul', (['rotR', 'rotP'], {}), '(rotR, rotP)\n', (12430, 12442), True, 'import numpy as np\n'), ((13175, 13250), 'lidar_utils.project_point_cloud', 'lidar_utils.project_point_cloud', (['image', 'point_cloud_cam', 'self._intrinsic', '(1)'], {}), '(image, point_cloud_cam, self._intrinsic, 1)\n', (13206, 13250), False, 'import lidar_utils\n'), ((16653, 16757), 'logging.debug', 'logging.debug', (['"""Could not save training data - no visible agents of selected classes in scene"""'], {}), "(\n 'Could not save training data - no visible agents of selected classes in scene'\n )\n", (16666, 16757), False, 'import logging\n'), ((17811, 17958), 'bounding_box.create_kitti_datapoint', 'create_kitti_datapoint', (['agent', 'self._intrinsic', 'self._extrinsic.matrix', 'image', 'self._depth_image', 'self._measurements.player_measurements', 'rotRP'], {}), '(agent, self._intrinsic, self._extrinsic.matrix,\n image, self._depth_image, self._measurements.player_measurements, rotRP)\n', (17833, 17958), False, 'from bounding_box import create_kitti_datapoint\n'), ((20282, 20351), 'pygame.draw.circle', 'pygame.draw.circle', (['surface', '[255, 0, 255, 255]', '(w_pos, h_pos)', '(4)', '(0)'], {}), '(surface, [255, 0, 255, 255], (w_pos, h_pos), 4, 0)\n', (20300, 20351), False, 'import pygame\n'), ((22353, 22392), 'carla.client.make_carla_client', 'make_carla_client', (['args.host', 'args.port'], {}), '(args.host, args.port)\n', (22370, 22392), False, 'from carla.client import make_carla_client, VehicleControl\n'), ((22560, 22580), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (22573, 22580), False, 'import logging\n'), ((22593, 22606), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (22603, 22606), False, 'import time\n'), ((4294, 4316), 'os.listdir', 'os.listdir', (['label_path'], {}), '(label_path)\n', (4304, 4316), False, 'import os\n'), ((12746, 12777), 'numpy.matmul', 'np.matmul', (['rotRP', 'point_cloud.T'], {}), '(rotRP, point_cloud.T)\n', (12755, 12777), True, 'import numpy as np\n'), ((14492, 14517), 'utils.degrees_to_radians', 'degrees_to_radians', (['pitch'], {}), '(pitch)\n', (14510, 14517), False, 'from utils import Timer, rand_color, vector3d_to_array, degrees_to_radians\n'), ((14549, 14573), 'utils.degrees_to_radians', 'degrees_to_radians', (['roll'], {}), '(roll)\n', (14567, 14573), False, 'from utils import Timer, rand_color, vector3d_to_array, degrees_to_radians\n'), ((14604, 14627), 'utils.degrees_to_radians', 'degrees_to_radians', (['yaw'], {}), '(yaw)\n', (14622, 14627), False, 'from utils import Timer, rand_color, vector3d_to_array, degrees_to_radians\n'), ((15527, 15548), 'numpy.matmul', 'np.matmul', (['rotR', 'rotP'], {}), '(rotR, rotP)\n', (15536, 15548), True, 'import numpy as np\n'), ((11865, 11875), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (11868, 11875), False, 'from math import cos, sin\n'), ((11904, 11914), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (11907, 11914), False, 'from math import cos, sin\n'), ((12041, 12051), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (12044, 12051), False, 'from math import cos, sin\n'), ((12214, 12223), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (12217, 12223), False, 'from math import cos, sin\n'), ((12290, 12299), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (12293, 12299), False, 'from math import cos, sin\n'), ((12305, 12314), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (12308, 12314), False, 'from math import cos, sin\n'), ((15892, 15923), 'numpy.matmul', 'np.matmul', (['rotRP', 'point_cloud.T'], {}), '(rotRP, point_cloud.T)\n', (15901, 15923), True, 'import numpy as np\n'), ((12011, 12021), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (12014, 12021), False, 'from math import cos, sin\n'), ((12230, 12239), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (12233, 12239), False, 'from math import cos, sin\n'), ((14861, 14871), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (14864, 14871), False, 'from math import cos, sin\n'), ((14900, 14910), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (14903, 14910), False, 'from math import cos, sin\n'), ((15053, 15063), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (15056, 15063), False, 'from math import cos, sin\n'), ((15250, 15259), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (15253, 15259), False, 'from math import cos, sin\n'), ((15380, 15389), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (15383, 15389), False, 'from math import cos, sin\n'), ((15395, 15404), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (15398, 15404), False, 'from math import cos, sin\n'), ((15023, 15033), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (15026, 15033), False, 'from math import cos, sin\n'), ((15312, 15321), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (15315, 15321), False, 'from math import cos, sin\n')] |
import math
from datetime import timedelta
import numpy as np
from scipy.signal import convolve
from scipy.signal import cspline1d
from scipy.signal import savgol_filter
from pandas import DataFrame
from utils.SettingsReader import SettingsReader
from utils import Utils
class MultiData():
"""Basic data structure to hold various data channels.
Includes some helper methods to select different data channels and filter by timestamp.
"""
def __init__(self, main):
self.main = main
self.settingsReader = main.settingsReader
self.multiData = {}
self.multiData['availColumns'] = {}
self.multiData['samples'] = {}
self.multiData['messages'] = {}
#----
self.multiData['fixation'] = {}
self.multiData['saccade'] = {}
self.multiData['pursuit'] = {}
#----
self.multiData['gaze'] = {}
self.empty = True
def genChannelIds(self, channel:str) -> tuple:
"""Generator of ids present in multiData in particular channel (i.e. 'type' tag).
:param channel:
:return: Tuple with current channel and id, if such id present in multiData.
"""
if self.settingsReader.check() and self.check():
channelZeroName = self.settingsReader.substVersatileChannels(channel)
typeList = self.settingsReader.getTypes(channelZeroName)
#на случай если нет комбинированного типа в настройках
if not len(typeList):
typeList = self.settingsReader.getTypes(channel)
for file in typeList:
id = file.get('id')
if self.hasChannelById(channel, id):
yield (channel, id)
def reset(self) -> None:
"""Makes this multiData empty.
:return: None.
"""
self.__init__(self.main)
def setNode(self, channel: str, id: str, data: object) -> None:
"""Sets chosen node in hierarchy of multiData to given data object.
:param channel: string of type from settings.
:param id: string of channel id from settings.
:param data: object with some data, probably pandas dataframe or python list.
:return:
"""
#self.main.logger.debug('setting data node...')
self.multiData[channel][id] = data
self.empty = False
#FILTERING methods
def getChannelById(self, channel:str, id:str, format:str='as_is') -> object:
"""Returns what's inside multiData[channel][id] dict hierarchy, possibly converting to dataframe.
:param channel: string of type from settings.
:param id: string of channel id from settings.
:param format: specify this str to convert to DataFrame type
:return: data object, can be converted to dataframe.
"""
#self.main.logger.debug('get channel by id')
result = self.multiData[channel][id]
if format=='dataframe':
if type(result) == DataFrame:
return result
#elif type(result) == your custom data type:
# return yourParserFunction(self.main, result, settingsReader = self.settingsReader)
else:
self.main.printToOut('WARNING: Converting this type of data to DataFrame not implemented.')
return None
elif format=='as_is':
return result
def getChannelAndTag(self, channel:str, id:str, block:str, format:str='as_is', ignoreEmpty:bool=True) -> object:
"""Returns what's inside the given channel, but tags the data by record tag, id and interval first.
:param channel:
:param id:
:param block: which block to tag by.
:param format: str for type conversion
:param ignoreEmpty: Whether to cut off the empty and utility intervals.
:return:
"""
chData = self.getChannelById(channel, id, format=format)
channelZeroName = self.settingsReader.substVersatileChannels(channel)
#zeroTime tag only applicable to INTERVALS block, because it is predefined for data channels
if block == 'interval':
startFrom = self.settingsReader.getZeroTimeById(channelZeroName, id)
else:
startFrom = 0
pathAttr = self.settingsReader.getPathAttrById(type=channelZeroName, id=id)
if ('Record tag' not in chData.columns) and ('Id' not in chData.columns):
chData.insert(2, 'Record tag', pathAttr)
chData.insert(3, 'Id', id)
#FIXME hotfix
elif 'Id 2' not in chData.columns:
chData['Record tag'] = pathAttr
chData.insert(8, 'Id 2', id)
return self.tagIntervals(chData, startFrom, block=block, ignoreEmpty=ignoreEmpty)
def getDataBetween(self, data:object, timeStart:object, timeEnd:object) -> object:
"""Selects and returns those data where timestamp is in given interval range.
Assuming timestamp in column 0.
:param data: data to trim from, usually after getChannelById method.
:param timeStart: timestamp to begin data with in 'M:S.f' str or timedelta format.
:param timeEnd: timestamp to end data with in 'M:S.f' str or timedelta format.
:return: Trimmed data.
"""
#self.main.logger.debug('get data between')
parsedTime = Utils.parseTimeV(data.iloc[:,0])
try:
data.insert(1, 'Timedelta', parsedTime)
except ValueError:
pass
if type(timeStart) is not timedelta:
timeStart = Utils.parseTime(timeStart)
if type(timeEnd) is not timedelta:
timeEnd = Utils.parseTime(timeEnd)
return data.loc[(data['Timedelta'] >= timeStart) & (data['Timedelta'] < timeEnd)]
def getDataInterval(self, data:object, startFrom:object, interval:str, block:str) -> object:
"""Selects and returns data where timestamp is inside interval defined by its id name.
:param data: data to trim from, usually after getChannelById method.
:param startFrom: Time value to start first interval from.
:param interval: id of interval in str format from settings.
:param block:
:return: Trimmed data.
"""
if type(startFrom) is not timedelta:
startFrom = Utils.parseTime(startFrom)
startTime=self.settingsReader.getStartTimeById(interval, block=block) + startFrom
endTime = self.settingsReader.getEndTimeById(interval, block=block) + startFrom
return self.getDataBetween(data, startTime, endTime)
def tagIntervals(self, chData:object, startFrom:object, block:str, ignoreEmpty:bool=True) -> DataFrame:
"""Tags given data by intervals, then returns a single dataframe.
:param chData: data to stack intervals from, usually after getChannelById method.
:param startFrom: zeroTime to start from.
:param block: which type of block to tag by.
:param ignoreEmpty: Whether to cut off the empty and utility intervals.
:return: DataFrame object ready to group by intervals.
"""
data = []
ints = self.settingsReader.getIntervals(block=block, ignoreEmpty=ignoreEmpty)
for interval in ints:
intData = self.getDataInterval(chData, startFrom, interval.get('id'), block=block)
intData.insert(4, block, interval.get('id'))
intData.insert(5, '{0} duration'.format(block), interval.get('duration'))
data.append(intData)
#case when there is no interval block in settings at all - nothing to tag
if len(ints)==0:
return chData
if len(ints)==1:
data = data[0]
else:
data = data[0].append(data[1:])
zeroBased=[]
zeroTime = data.iloc[0, 0]
for timestamp in data.iloc[:, 0]:
zeroBased.append(timestamp - zeroTime)
#FIXME should be no duplicates
data.insert(1, 'TimestampZeroBased', zeroBased, allow_duplicates=True)
#inheriting metadata
try:
data.metadata = chData.metadata
except AttributeError:
pass
return data
#EYE MOVEMENT methods
def getVelocity(self, samplesData:DataFrame, smooth:str, convertToDeg:bool) -> DataFrame:
"""Method for calculating eye velocity, normally pixels converted to degrees first.
:param samplesData: dataframe to operate on, containing appropriate eyetracker columns (Time, X, Y, etc.).
:param smooth: algo to use, normally passed by command line argument.
:param convertToDeg: whether data is passed in raw pixel values or visual angle degrees.
:return: data with added *Velocity columns (and smoothed position columns).
"""
#TODO data column names hard-coded, need refactor to global name dictionary mapper (SMI, Tobii variants)
# mapping goes to multiData metadata property
#TODO B side (binocular) variant not implemented (applicable for SMI ETG)
if all(samplesData['L POR X [px]'] == samplesData['R POR X [px]']) and all(samplesData['L POR Y [px]'] == samplesData['R POR Y [px]']):
self.main.printToOut('Left and right channels detected equivalent. Working with one channel only.')
samplesData.metadata['equivalent'] = True
metadata = samplesData.metadata
self.main.printToOut('WARNING: Dimensions metadata from samples file is considered correct and precise, and used in pixel-to-degree conversions.')
self.main.printToOut('Now calculating velocity, be patient.')
if metadata['equivalent']:
#должен быть ведущий глаз
sides = ['R']
else:
sides = ['L', 'R']
#TODO skipping one channel if same
for side in sides:
for dim in ['X', 'Y']:
# smoothing
dataToSmooth = samplesData['{0} POR {1} [px]'.format(side, dim)]
if smooth == 'savgol':
samplesData['{0}POR{1}PxSmoothed'.format(side, dim)] = savgol_filter(dataToSmooth, 15, 2)
elif smooth == 'spline':
#scipy.interpolate.UnivariateSpline(x,y, k=1).get_coeffs()
samplesData['{0}POR{1}PxSmoothed'.format(side, dim)] = cspline1d(np.array(dataToSmooth), lamb=3)
elif smooth == 'conv':
#width and shape of convolution, equivalent to moving average if all 1
win = np.array([1,1,1,1,1,1])
samplesData['{0}POR{1}PxSmoothed'.format(side, dim)] = convolve(np.array(dataToSmooth), in2=win, mode='same') / win.sum()
else:
self.main.printToOut('ERROR: Invalid smoothing function specified.')
if dim == 'X':
screenDim = metadata['screenWidthPx']
screenRes = metadata['screenHResMm']
multiplier = 1
elif dim == 'Y':
screenDim = metadata['screenHeightPx']
screenRes = metadata['screenVResMm']
multiplier = -1
if not convertToDeg:
self.main.printToOut('ERROR: Raw pixels in data are currently assumed, column names hard-coded.')
raise NotImplementedError
else:
#converting to DEGREES
samplesData['{0}POR{1}Mm'.format(side, dim)] = multiplier * (samplesData['{0} POR {1} [px]'.format(side, dim)] - screenDim / 2) * screenRes
coordsMm = samplesData['{0}POR{1}Mm'.format(side, dim)]
samplesData['{0}POR{1}Deg'.format(side, dim)] = np.sign(coordsMm) * coordsMm.apply(lambda x: Utils.getSeparation(x,0, 0,0, z=metadata['headDistanceMm'], mode='fromCartesian'))
#----
samplesData['{0}POR{1}MmSmoothed'.format(side, dim)] = multiplier * (samplesData['{0}POR{1}PxSmoothed'.format(side, dim)] - screenDim / 2) * screenRes
coordsMm = samplesData['{0}POR{1}MmSmoothed'.format(side, dim)]
samplesData['{0}POR{1}DegSmoothed'.format(side, dim)] = np.sign(coordsMm) * coordsMm.apply(lambda x: Utils.getSeparation(x,0, 0,0, z=metadata['headDistanceMm'], mode='fromCartesian'))
#VELOCITY calculation
x = samplesData['{0}PORXDeg'.format(side)]
y = samplesData['{0}PORYDeg'.format(side)]
row = DataFrame({'x1':x[1:].reset_index(drop=True), 'y1':y[1:].reset_index(drop=True), 'x0':x[:(len(x) - 1)].reset_index(drop=True), 'y0':y[:(len(y) - 1)].reset_index(drop=True)})
seps = row.apply(lambda rowApply: Utils.getSeparation(x1=rowApply['x1'], y1=rowApply['y1'], x2=rowApply['x0'], y2=rowApply['y0'], z=metadata['headDistanceMm'], mode='fromPolar'), axis=1)
separation = np.hstack((1, seps))
timelag = np.hstack((1, np.diff(samplesData['Time'])))
samplesData['{0}Velocity'.format(side)] = separation / timelag
#----
x = samplesData['{0}PORXDegSmoothed'.format(side)]
y = samplesData['{0}PORYDegSmoothed'.format(side)]
row = DataFrame({'x1': x[1:].reset_index(drop=True), 'y1': y[1:].reset_index(drop=True), 'x0': x[:(len(x) - 1)].reset_index(drop=True), 'y0': y[:(len(y) - 1)].reset_index(drop=True)})
seps = row.apply(lambda rowApply: Utils.getSeparation(x1=rowApply['x1'], y1=rowApply['y1'], x2=rowApply['x0'], y2=rowApply['y0'], z=metadata['headDistanceMm'], mode='fromPolar'), axis=1)
separation = np.hstack((1, seps))
timelag = np.hstack(( 1, np.diff(samplesData['Time']) ))
samplesData['{0}VelocitySmoothed'.format(side)] = separation / timelag
self.main.printToOut('Done.', status='ok')
return samplesData
#SANITY check methods
def hasColumn(self, column:str, id:str) -> bool:
"""Checks if multiData contains such column in its gaze channel.
:param column: Column name from Tobii gaze data.
:param id: string of channel id from settings.
:return: True if column present, False otherwise.
"""
return column in self.multiData['availColumns'][id]
def hasAllColumns(self, columns:list, id:str) -> bool:
"""Checks if multiData contains ALL these columns passed in list.
:param columns: List of strings with column names.
:param id: string of channel id from settings.
:return: True if all columns present, False otherwise.
"""
for col in columns:
if col not in self.multiData['availColumns'][id]:
return False
return True
def hasChannelById(self, channel:str, id:str) -> bool:
"""Checks if multiData contains this channel.id node in its hierarchy.
:param channel: string of type from settings.
:param id: string of channel id from settings.
:return: True if such id in such channel present, False otherwise.
"""
try:
self.multiData[channel][id]
return True
except KeyError:
return False
def check(self) -> bool:
"""Helper method that checks if multiData present at all.
:return: True if it is, False otherwise.
"""
#self.main.logger.debug('check data')
if not self.empty:
return True
else:
self.main.printToOut('WARNING: No data loaded yet. Read data first!')
return False | [
"scipy.signal.savgol_filter",
"utils.Utils.parseTime",
"utils.Utils.parseTimeV",
"numpy.hstack",
"numpy.diff",
"utils.Utils.getSeparation",
"numpy.array",
"numpy.sign"
] | [((5413, 5446), 'utils.Utils.parseTimeV', 'Utils.parseTimeV', (['data.iloc[:, 0]'], {}), '(data.iloc[:, 0])\n', (5429, 5446), False, 'from utils import Utils\n'), ((5625, 5651), 'utils.Utils.parseTime', 'Utils.parseTime', (['timeStart'], {}), '(timeStart)\n', (5640, 5651), False, 'from utils import Utils\n'), ((5717, 5741), 'utils.Utils.parseTime', 'Utils.parseTime', (['timeEnd'], {}), '(timeEnd)\n', (5732, 5741), False, 'from utils import Utils\n'), ((6381, 6407), 'utils.Utils.parseTime', 'Utils.parseTime', (['startFrom'], {}), '(startFrom)\n', (6396, 6407), False, 'from utils import Utils\n'), ((12997, 13017), 'numpy.hstack', 'np.hstack', (['(1, seps)'], {}), '((1, seps))\n', (13006, 13017), True, 'import numpy as np\n'), ((13725, 13745), 'numpy.hstack', 'np.hstack', (['(1, seps)'], {}), '((1, seps))\n', (13734, 13745), True, 'import numpy as np\n'), ((10166, 10200), 'scipy.signal.savgol_filter', 'savgol_filter', (['dataToSmooth', '(15)', '(2)'], {}), '(dataToSmooth, 15, 2)\n', (10179, 10200), False, 'from scipy.signal import savgol_filter\n'), ((12816, 12963), 'utils.Utils.getSeparation', 'Utils.getSeparation', ([], {'x1': "rowApply['x1']", 'y1': "rowApply['y1']", 'x2': "rowApply['x0']", 'y2': "rowApply['y0']", 'z': "metadata['headDistanceMm']", 'mode': '"""fromPolar"""'}), "(x1=rowApply['x1'], y1=rowApply['y1'], x2=rowApply['x0'],\n y2=rowApply['y0'], z=metadata['headDistanceMm'], mode='fromPolar')\n", (12835, 12963), False, 'from utils import Utils\n'), ((13054, 13082), 'numpy.diff', 'np.diff', (["samplesData['Time']"], {}), "(samplesData['Time'])\n", (13061, 13082), True, 'import numpy as np\n'), ((13547, 13694), 'utils.Utils.getSeparation', 'Utils.getSeparation', ([], {'x1': "rowApply['x1']", 'y1': "rowApply['y1']", 'x2': "rowApply['x0']", 'y2': "rowApply['y0']", 'z': "metadata['headDistanceMm']", 'mode': '"""fromPolar"""'}), "(x1=rowApply['x1'], y1=rowApply['y1'], x2=rowApply['x0'],\n y2=rowApply['y0'], z=metadata['headDistanceMm'], mode='fromPolar')\n", (13566, 13694), False, 'from utils import Utils\n'), ((13783, 13811), 'numpy.diff', 'np.diff', (["samplesData['Time']"], {}), "(samplesData['Time'])\n", (13790, 13811), True, 'import numpy as np\n'), ((11813, 11830), 'numpy.sign', 'np.sign', (['coordsMm'], {}), '(coordsMm)\n', (11820, 11830), True, 'import numpy as np\n'), ((12300, 12317), 'numpy.sign', 'np.sign', (['coordsMm'], {}), '(coordsMm)\n', (12307, 12317), True, 'import numpy as np\n'), ((10406, 10428), 'numpy.array', 'np.array', (['dataToSmooth'], {}), '(dataToSmooth)\n', (10414, 10428), True, 'import numpy as np\n'), ((10594, 10622), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1])\n', (10602, 10622), True, 'import numpy as np\n'), ((11858, 11946), 'utils.Utils.getSeparation', 'Utils.getSeparation', (['x', '(0)', '(0)', '(0)'], {'z': "metadata['headDistanceMm']", 'mode': '"""fromCartesian"""'}), "(x, 0, 0, 0, z=metadata['headDistanceMm'], mode=\n 'fromCartesian')\n", (11877, 11946), False, 'from utils import Utils\n'), ((12345, 12433), 'utils.Utils.getSeparation', 'Utils.getSeparation', (['x', '(0)', '(0)', '(0)'], {'z': "metadata['headDistanceMm']", 'mode': '"""fromCartesian"""'}), "(x, 0, 0, 0, z=metadata['headDistanceMm'], mode=\n 'fromCartesian')\n", (12364, 12433), False, 'from utils import Utils\n'), ((10702, 10724), 'numpy.array', 'np.array', (['dataToSmooth'], {}), '(dataToSmooth)\n', (10710, 10724), True, 'import numpy as np\n')] |
"""
This file contains a wrapper for sampling environment states
from a set of demonstrations on every reset. The main use case is for
altering the start state distribution of training episodes for
learning RL policies.
"""
import random
import os
import h5py
import time
import numpy as np
from robosuite.utils.mjcf_utils import postprocess_model_xml
from robosuite.wrappers import Wrapper
class DemoSamplerWrapper(Wrapper):
env = None
def __init__(
self,
env,
demo_path,
need_xml=False,
num_traj=-1,
sampling_schemes=["uniform", "random"],
scheme_ratios=[0.9, 0.1],
open_loop_increment_freq=100,
open_loop_initial_window_width=25,
open_loop_window_increment=25,
):
"""
Initializes a wrapper that provides support for resetting the environment
state to one from a demonstration. It also supports curriculums for
altering how often to sample from demonstration vs. sampling a reset
state from the environment.
Args:
env (MujocoEnv instance): The environment to wrap.
demo_path (string): The path to the folder containing the demonstrations.
There should be a `demo.hdf5` file and a folder named `models` with
all of the stored model xml files from the demonstrations.
need_xml (bool): If True, the mujoco model needs to be reloaded when
sampling a state from a demonstration. This could be because every
demonstration was taken under varied object properties, for example.
In this case, every sampled state comes with a corresponding xml to
be used for the environment reset.
preload (bool): If True, fetch all demonstrations into memory at the
beginning. Otherwise, load demonstrations as they are needed lazily.
num_traj (int): If provided, subsample @number demonstrations from the
provided set of demonstrations instead of using all of them.
sampling_schemes (list of strings): A list of sampling schemes
to be used. The following strings are valid schemes:
"random" : sample a reset state directly from the wrapped environment
"uniform" : sample a state from a demonstration uniformly at random
"forward" : sample a state from a window that grows progressively from
the start of demonstrations
"reverse" : sample a state from a window that grows progressively from
the end of demonstrations
scheme_ratios (list of floats): A list of probability values to
assign to each member of @sampling_schemes. Must be non-negative and
sum to 1.
open_loop_increment_freq (int): How frequently to increase
the window size in open loop schemes ("forward" and "reverse"). The
window size will increase by @open_loop_window_increment every
@open_loop_increment_freq samples. Only samples that are generated
by open loop schemes contribute to this count.
open_loop_initial_window_width (int): The width of the initial sampling
window, in terms of number of demonstration time steps, for
open loop schemes.
open_loop_window_increment (int): The window size will increase by
@open_loop_window_increment every @open_loop_increment_freq samples.
This number is in terms of number of demonstration time steps.
"""
super().__init__(env)
self.demo_path = demo_path
hdf5_path = os.path.join(self.demo_path, "demo.hdf5")
self.demo_file = h5py.File(hdf5_path, "r")
# ensure that wrapped env matches the env on which demonstrations were collected
env_name = self.demo_file["data"].attrs["env"]
assert (
env_name == self.unwrapped.__class__.__name__
), "Wrapped env {} does not match env on which demos were collected ({})".format(
env.__class__.__name__, env_name
)
# list of all demonstrations episodes
self.demo_list = list(self.demo_file["data"].keys())
# subsample a selection of demonstrations if requested
if num_traj > 0:
random.seed(3141) # ensure that the same set is sampled every time
self.demo_list = random.sample(self.demo_list, num_traj)
self.need_xml = need_xml
self.demo_sampled = 0
self.sample_method_dict = {
"random": "_random_sample",
"uniform": "_uniform_sample",
"forward": "_forward_sample_open_loop",
"reverse": "_reverse_sample_open_loop",
}
self.sampling_schemes = sampling_schemes
self.scheme_ratios = np.asarray(scheme_ratios)
# make sure the list of schemes is valid
schemes = self.sample_method_dict.keys()
assert np.all([(s in schemes) for s in self.sampling_schemes])
# make sure the distribution is the correct size
assert len(self.sampling_schemes) == len(self.scheme_ratios)
# make sure the distribution lies in the probability simplex
assert np.all(self.scheme_ratios > 0.)
assert sum(self.scheme_ratios) == 1.0
# open loop configuration
self.open_loop_increment_freq = open_loop_increment_freq
self.open_loop_window_increment = open_loop_window_increment
# keep track of window size
self.open_loop_window_size = open_loop_initial_window_width
def reset(self):
"""
Logic for sampling a state from the demonstration and resetting
the simulation to that state.
"""
state = self.sample()
if state is None:
# None indicates that a normal env reset should occur
return self.env.reset()
else:
if self.need_xml:
# reset the simulation from the model if necessary
state, xml = state
self.env.reset_from_xml_string(xml)
if isinstance(state, tuple):
state = state[0]
# force simulator state to one from the demo
self.sim.set_state_from_flattened(state)
self.sim.forward()
return self.env._get_observation()
def sample(self):
"""
This is the core sampling method. Samples a state from a
demonstration, in accordance with the configuration.
"""
# chooses a sampling scheme randomly based on the mixing ratios
seed = random.uniform(0, 1)
ratio = np.cumsum(self.scheme_ratios)
ratio = ratio > seed
for i, v in enumerate(ratio):
if v:
break
sample_method = getattr(self, self.sample_method_dict[self.sampling_schemes[i]])
return sample_method()
def _random_sample(self):
"""
Sampling method.
Return None to indicate that the state should be sampled directly
from the environment.
"""
return None
def _uniform_sample(self):
"""
Sampling method.
First uniformly sample a demonstration from the set of demonstrations.
Then uniformly sample a state from the selected demonstration.
"""
# get a random episode index
ep_ind = random.choice(self.demo_list)
# select a flattened mujoco state uniformly from this episode
states = self.demo_file["data/{}/states".format(ep_ind)].value
state = random.choice(states)
if self.need_xml:
model_xml = self._xml_for_episode_index(ep_ind)
xml = postprocess_model_xml(model_xml)
return state, xml
return state
def _reverse_sample_open_loop(self):
"""
Sampling method.
Open loop reverse sampling from demonstrations. Starts by
sampling from states near the end of the demonstrations.
Increases the window backwards as the number of calls to
this sampling method increases at a fixed rate.
"""
# get a random episode index
ep_ind = random.choice(self.demo_list)
# sample uniformly in a window that grows backwards from the end of the demos
states = self.demo_file["data/{}/states".format(ep_ind)].value
eps_len = states.shape[0]
index = np.random.randint(max(eps_len - self.open_loop_window_size, 0), eps_len)
state = states[index]
# increase window size at a fixed frequency (open loop)
self.demo_sampled += 1
if self.demo_sampled >= self.open_loop_increment_freq:
if self.open_loop_window_size < eps_len:
self.open_loop_window_size += self.open_loop_window_increment
self.demo_sampled = 0
if self.need_xml:
model_xml = self._xml_for_episode_index(ep_ind)
xml = postprocess_model_xml(model_xml)
return state, xml
return state
def _forward_sample_open_loop(self):
"""
Sampling method.
Open loop forward sampling from demonstrations. Starts by
sampling from states near the beginning of the demonstrations.
Increases the window forwards as the number of calls to
this sampling method increases at a fixed rate.
"""
# get a random episode index
ep_ind = random.choice(self.demo_list)
# sample uniformly in a window that grows forwards from the beginning of the demos
states = self.demo_file["data/{}/states".format(ep_ind)].value
eps_len = states.shape[0]
index = np.random.randint(0, min(self.open_loop_window_size, eps_len))
state = states[index]
# increase window size at a fixed frequency (open loop)
self.demo_sampled += 1
if self.demo_sampled >= self.open_loop_increment_freq:
if self.open_loop_window_size < eps_len:
self.open_loop_window_size += self.open_loop_window_increment
self.demo_sampled = 0
if self.need_xml:
model_xml = self._xml_for_episode_index(ep_ind)
xml = postprocess_model_xml(model_xml)
return state, xml
return state
def _xml_for_episode_index(self, ep_ind):
"""
Helper method to retrieve the corresponding model xml string
for the passed episode index.
"""
# read the model xml, using the metadata stored in the attribute for this episode
model_file = self.demo_file["data/{}".format(ep_ind)].attrs["model_file"]
model_path = os.path.join(self.demo_path, "models", model_file)
with open(model_path, "r") as model_f:
model_xml = model_f.read()
return model_xml
| [
"h5py.File",
"random.uniform",
"random.sample",
"numpy.asarray",
"random.choice",
"numpy.cumsum",
"random.seed",
"robosuite.utils.mjcf_utils.postprocess_model_xml",
"os.path.join",
"numpy.all"
] | [((3819, 3860), 'os.path.join', 'os.path.join', (['self.demo_path', '"""demo.hdf5"""'], {}), "(self.demo_path, 'demo.hdf5')\n", (3831, 3860), False, 'import os\n'), ((3886, 3911), 'h5py.File', 'h5py.File', (['hdf5_path', '"""r"""'], {}), "(hdf5_path, 'r')\n", (3895, 3911), False, 'import h5py\n'), ((4999, 5024), 'numpy.asarray', 'np.asarray', (['scheme_ratios'], {}), '(scheme_ratios)\n', (5009, 5024), True, 'import numpy as np\n'), ((5139, 5194), 'numpy.all', 'np.all', (['[(s in schemes) for s in self.sampling_schemes]'], {}), '([(s in schemes) for s in self.sampling_schemes])\n', (5145, 5194), True, 'import numpy as np\n'), ((5407, 5439), 'numpy.all', 'np.all', (['(self.scheme_ratios > 0.0)'], {}), '(self.scheme_ratios > 0.0)\n', (5413, 5439), True, 'import numpy as np\n'), ((6798, 6818), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (6812, 6818), False, 'import random\n'), ((6835, 6864), 'numpy.cumsum', 'np.cumsum', (['self.scheme_ratios'], {}), '(self.scheme_ratios)\n', (6844, 6864), True, 'import numpy as np\n'), ((7585, 7614), 'random.choice', 'random.choice', (['self.demo_list'], {}), '(self.demo_list)\n', (7598, 7614), False, 'import random\n'), ((7773, 7794), 'random.choice', 'random.choice', (['states'], {}), '(states)\n', (7786, 7794), False, 'import random\n'), ((8384, 8413), 'random.choice', 'random.choice', (['self.demo_list'], {}), '(self.demo_list)\n', (8397, 8413), False, 'import random\n'), ((9643, 9672), 'random.choice', 'random.choice', (['self.demo_list'], {}), '(self.demo_list)\n', (9656, 9672), False, 'import random\n'), ((10865, 10915), 'os.path.join', 'os.path.join', (['self.demo_path', '"""models"""', 'model_file'], {}), "(self.demo_path, 'models', model_file)\n", (10877, 10915), False, 'import os\n'), ((4486, 4503), 'random.seed', 'random.seed', (['(3141)'], {}), '(3141)\n', (4497, 4503), False, 'import random\n'), ((4583, 4622), 'random.sample', 'random.sample', (['self.demo_list', 'num_traj'], {}), '(self.demo_list, num_traj)\n', (4596, 4622), False, 'import random\n'), ((7900, 7932), 'robosuite.utils.mjcf_utils.postprocess_model_xml', 'postprocess_model_xml', (['model_xml'], {}), '(model_xml)\n', (7921, 7932), False, 'from robosuite.utils.mjcf_utils import postprocess_model_xml\n'), ((9154, 9186), 'robosuite.utils.mjcf_utils.postprocess_model_xml', 'postprocess_model_xml', (['model_xml'], {}), '(model_xml)\n', (9175, 9186), False, 'from robosuite.utils.mjcf_utils import postprocess_model_xml\n'), ((10408, 10440), 'robosuite.utils.mjcf_utils.postprocess_model_xml', 'postprocess_model_xml', (['model_xml'], {}), '(model_xml)\n', (10429, 10440), False, 'from robosuite.utils.mjcf_utils import postprocess_model_xml\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @Version : Python 3.6
import os
import json
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
class WordEmbeddingLoader(object):
"""
A loader for pre-trained word embedding
"""
def __init__(self, config):
self.path_word = config.embedding_path # path of pre-trained word embedding
self.word_dim = config.word_dim # dimension of word embedding
def load_embedding(self):
word2id = dict() # word to wordID
word_vec = list() # wordID to word embedding
word2id['PAD'] = len(word2id) # PAD character
word2id['UNK'] = len(word2id) # words, out of vocabulary
special_char = ['SUBJ-ORGANIZATION', 'SUBJ-PERSON', 'OBJ-PERSON',
'OBJ-ORGANIZATION', 'OBJ-DATE', 'OBJ-NUMBER',
'OBJ-TITLE', 'OBJ-COUNTRY', 'OBJ-LOCATION',
'OBJ-CITY', 'OBJ-MISC', 'OBJ-STATE_OR_PROVINCE',
'OBJ-DURATION', 'OBJ-NATIONALITY', 'OBJ-CAUSE_OF_DEATH',
'OBJ-CRIMINAL_CHARGE', 'OBJ-RELIGION', 'OBJ-URL',
'OBJ-IDEOLOGY']
for sc in special_char:
word2id[sc] = len(word2id)
with open(self.path_word, 'r', encoding='utf-8') as fr:
for line in fr:
line = line.strip().split()
if len(line) != self.word_dim + 1:
continue
word2id[line[0]] = len(word2id)
word_vec.append(np.asarray(line[1:], dtype=np.float32))
special_emb = np.random.uniform(-1, 1, (len(special_char)+2, self.word_dim))
word_vec = np.concatenate((special_emb, word_vec), axis=0)
word_vec[0] = 0 # <pad> is initialize as zero
word_vec = word_vec.astype(np.float32).reshape(-1, self.word_dim)
word_vec = torch.from_numpy(word_vec)
return word2id, word_vec
class RelationLoader(object):
def __init__(self, config):
self.data_dir = config.data_dir
def __load_relation(self):
relation_file = os.path.join(self.data_dir, 'relation2id.txt')
rel2id = {}
id2rel = {}
with open(relation_file, 'r', encoding='utf-8') as fr:
for line in fr:
relation, id_s = line.strip().split()
id_d = int(id_s)
rel2id[relation] = id_d
id2rel[id_d] = relation
return rel2id, id2rel, len(rel2id)
def get_relation(self):
return self.__load_relation()
class TacredDateset(Dataset):
def __init__(self, filename, rel2id, word2id, config):
self.filename = filename
self.rel2id = rel2id
self.word2id = word2id
self.max_len = config.max_len
self.data_dir = config.data_dir
self.dataset, self.label = self.__load_data()
def __get_pos_index(self, x):
return x + self.max_len - 1
def __get_relative_pos(self, x, entity_pos):
if x < entity_pos[0]:
return self.__get_pos_index(x-entity_pos[0])
elif x > entity_pos[1]:
return self.__get_pos_index(x-entity_pos[1])
else:
return self.__get_pos_index(0)
def __symbolize_sentence(self, e1_pos, e2_pos, sentence):
"""
Args:
e1_pos (tuple) span of e1
e2_pos (tuple) span of e2
sentence (list)
"""
mask = [1] * len(sentence)
if e1_pos[0] < e2_pos[0]:
for i in range(e1_pos[0], e2_pos[1]+1):
mask[i] = 2
for i in range(e2_pos[1]+1, len(sentence)):
mask[i] = 3
else:
for i in range(e2_pos[0], e1_pos[1]+1):
mask[i] = 2
for i in range(e1_pos[1]+1, len(sentence)):
mask[i] = 3
words = []
pos1 = []
pos2 = []
length = min(self.max_len, len(sentence))
mask = mask[:length]
for i in range(length):
words.append(self.word2id.get(sentence[i], self.word2id['UNK']))
pos1.append(self.__get_relative_pos(i, e1_pos))
pos2.append(self.__get_relative_pos(i, e2_pos))
if length < self.max_len:
for i in range(length, self.max_len):
mask.append(0) # 'PAD' mask is zero
words.append(self.word2id['PAD'])
pos1.append(self.__get_relative_pos(i, e1_pos))
pos2.append(self.__get_relative_pos(i, e2_pos))
unit = np.asarray([words, pos1, pos2, mask], dtype=np.int64)
unit = np.reshape(unit, newshape=(1, 4, self.max_len))
return unit
def __load_data(self):
path_data_file = os.path.join(self.data_dir, self.filename)
data = []
labels = []
with open(path_data_file, 'r', encoding='utf-8') as fr:
for line in fr:
line = json.loads(line.strip())
label = line['relation']
sentence = line['token']
e1_pos = (line['subj_start'], line['subj_end'])
e2_pos = (line['obj_start'], line['obj_end'])
label_idx = self.rel2id[label]
ss, se = line['subj_start'], line['subj_end'] # entity1 span
oss, oe = line['obj_start'], line['obj_end'] # entity2 span
sentence[ss:se+1] = ['SUBJ-'+line['subj_type']] * (se-ss+1)
sentence[oss:oe+1] = ['OBJ-'+line['obj_type']] * (oe-oss+1)
one_sentence = self.__symbolize_sentence(e1_pos, e2_pos, sentence)
data.append(one_sentence)
labels.append(label_idx)
return data, labels
def __getitem__(self, index):
data = self.dataset[index]
label = self.label[index]
return data, label
def __len__(self):
return len(self.label)
class TacredDataLoader(object):
def __init__(self, rel2id, word2id, config):
self.rel2id = rel2id
self.word2id = word2id
self.config = config
def __collate_fn(self, batch):
data, label = zip(*batch) # unzip the batch data
data = list(data)
label = list(label)
data = torch.from_numpy(np.concatenate(data, axis=0))
label = torch.from_numpy(np.asarray(label, dtype=np.int64))
return data, label
def __get_data(self, filename, shuffle=False):
dataset = TacredDateset(filename, self.rel2id, self.word2id, self.config)
loader = DataLoader(
dataset=dataset,
batch_size=self.config.batch_size,
shuffle=shuffle,
num_workers=2,
collate_fn=self.__collate_fn
)
return loader
def get_train(self):
return self.__get_data('train.json', shuffle=True)
def get_dev(self):
return self.__get_data('dev.json', shuffle=False)
def get_test(self):
return self.__get_data('test.json', shuffle=False)
if __name__ == '__main__':
from config import Config
config = Config()
word2id, word_vec = WordEmbeddingLoader(config).load_embedding()
rel2id, id2rel, class_num = RelationLoader(config).get_relation()
loader = TacredDataLoader(rel2id, word2id, config)
test_loader = loader.get_dev()
min_v, max_v = float('inf'), -float('inf')
for step, (data, label) in enumerate(test_loader):
# print(type(data), data.shape)
# print(type(label), label.shape)
# break
pos1 = data[:, 1, :].view(-1, config.max_len)
pos2 = data[:, 2, :].view(-1, config.max_len)
mask = data[:, 3, :].view(-1, config.max_len)
min_v = min(min_v, torch.min(pos1).item())
max_v = max(max_v, torch.max(pos1).item())
min_v = min(min_v, torch.min(pos2).item())
max_v = max(max_v, torch.max(pos2).item())
print(min_v, max_v)
| [
"config.Config",
"torch.utils.data.DataLoader",
"numpy.asarray",
"torch.max",
"numpy.reshape",
"os.path.join",
"torch.min",
"numpy.concatenate",
"torch.from_numpy"
] | [((7101, 7109), 'config.Config', 'Config', ([], {}), '()\n', (7107, 7109), False, 'from config import Config\n'), ((1707, 1754), 'numpy.concatenate', 'np.concatenate', (['(special_emb, word_vec)'], {'axis': '(0)'}), '((special_emb, word_vec), axis=0)\n', (1721, 1754), True, 'import numpy as np\n'), ((1904, 1930), 'torch.from_numpy', 'torch.from_numpy', (['word_vec'], {}), '(word_vec)\n', (1920, 1930), False, 'import torch\n'), ((2124, 2170), 'os.path.join', 'os.path.join', (['self.data_dir', '"""relation2id.txt"""'], {}), "(self.data_dir, 'relation2id.txt')\n", (2136, 2170), False, 'import os\n'), ((4578, 4631), 'numpy.asarray', 'np.asarray', (['[words, pos1, pos2, mask]'], {'dtype': 'np.int64'}), '([words, pos1, pos2, mask], dtype=np.int64)\n', (4588, 4631), True, 'import numpy as np\n'), ((4647, 4694), 'numpy.reshape', 'np.reshape', (['unit'], {'newshape': '(1, 4, self.max_len)'}), '(unit, newshape=(1, 4, self.max_len))\n', (4657, 4694), True, 'import numpy as np\n'), ((4768, 4810), 'os.path.join', 'os.path.join', (['self.data_dir', 'self.filename'], {}), '(self.data_dir, self.filename)\n', (4780, 4810), False, 'import os\n'), ((6561, 6690), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'self.config.batch_size', 'shuffle': 'shuffle', 'num_workers': '(2)', 'collate_fn': 'self.__collate_fn'}), '(dataset=dataset, batch_size=self.config.batch_size, shuffle=\n shuffle, num_workers=2, collate_fn=self.__collate_fn)\n', (6571, 6690), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((6285, 6313), 'numpy.concatenate', 'np.concatenate', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (6299, 6313), True, 'import numpy as np\n'), ((6348, 6381), 'numpy.asarray', 'np.asarray', (['label'], {'dtype': 'np.int64'}), '(label, dtype=np.int64)\n', (6358, 6381), True, 'import numpy as np\n'), ((1562, 1600), 'numpy.asarray', 'np.asarray', (['line[1:]'], {'dtype': 'np.float32'}), '(line[1:], dtype=np.float32)\n', (1572, 1600), True, 'import numpy as np\n'), ((7729, 7744), 'torch.min', 'torch.min', (['pos1'], {}), '(pos1)\n', (7738, 7744), False, 'import torch\n'), ((7780, 7795), 'torch.max', 'torch.max', (['pos1'], {}), '(pos1)\n', (7789, 7795), False, 'import torch\n'), ((7831, 7846), 'torch.min', 'torch.min', (['pos2'], {}), '(pos2)\n', (7840, 7846), False, 'import torch\n'), ((7882, 7897), 'torch.max', 'torch.max', (['pos2'], {}), '(pos2)\n', (7891, 7897), False, 'import torch\n')] |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
assert return_value is None
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
ser = pd.Series(np.arange(5), dtype="int64")
def check_replace(to_rep, val, expected):
sc = ser.copy()
result = ser.replace(to_rep, val)
return_value = sc.replace(to_rep, val, inplace=True)
assert return_value is None
tm.assert_series_equal(expected, result)
tm.assert_series_equal(expected, sc)
# 3.0 can still be held in our int64 series, so we do not upcast GH#44940
tr, v = [3], [3.0]
check_replace(tr, v, ser)
# Note this matches what we get with the scalars 3 and 3.0
check_replace(tr[0], v[0], ser)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, "a"])
tr, v = [3, 4], [3.5, "a"]
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])
tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype="object")
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace("fun", "in-the-sun")
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, "2u")
expected = pd.Series(["2u", False, "2u"])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
result = s.replace({"asdf": "asdb", True: "yes"})
expected = pd.Series(["yes", False, "yes"])
tm.assert_series_equal(result, expected)
def test_replace_Int_with_na(self, any_int_ea_dtype):
# GH 38267
result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)
expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
tm.assert_series_equal(result, expected)
result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)
result.replace(1, pd.NA, inplace=True)
tm.assert_series_equal(result, expected)
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype):
# GH 32621, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype)
expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype)
result = ser.replace({"one": "1", "two": "2"})
tm.assert_series_equal(expected, result)
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list("abcd"))
tm.assert_series_equal(s, s.replace({}))
with tm.assert_produces_warning(FutureWarning):
empty_series = pd.Series([])
tm.assert_series_equal(s, s.replace(empty_series))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_replacer_equals_replacement(self):
# GH 20656
# make sure all replacers are matching against original values
s = pd.Series(["a", "b"])
expected = pd.Series(["b", "a"])
result = s.replace({"a": "b", "b": "a"})
tm.assert_series_equal(expected, result)
def test_replace_unicode_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_mixed_types_with_string(self):
# Testing mixed
s = pd.Series([1, 2, 3, "4", 4, 5])
result = s.replace([2, "4"], np.nan)
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"categorical, numeric",
[
(pd.Categorical(["A"], categories=["A", "B"]), [1]),
(pd.Categorical(["A", "B"], categories=["A", "B"]), [1, 2]),
],
)
def test_replace_categorical(self, categorical, numeric):
# GH 24971, GH#23305
ser = pd.Series(categorical)
result = ser.replace({"A": 1, "B": 2})
expected = pd.Series(numeric).astype("category")
if 2 not in expected.cat.categories:
# i.e. categories should be [1, 2] even if there are no "B"s present
# GH#44940
expected = expected.cat.add_categories(2)
tm.assert_series_equal(expected, result)
def test_replace_categorical_single(self):
# GH 26988
dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
s = pd.Series(dti)
c = s.astype("category")
expected = c.copy()
expected = expected.cat.add_categories("foo")
expected[2] = "foo"
expected = expected.cat.remove_unused_categories()
assert c[2] != "foo"
result = c.replace(c[2], "foo")
tm.assert_series_equal(expected, result)
assert c[2] != "foo" # ensure non-inplace call does not alter original
return_value = c.replace(c[2], "foo", inplace=True)
assert return_value is None
tm.assert_series_equal(expected, c)
first_value = c[0]
return_value = c.replace(c[1], c[0], inplace=True)
assert return_value is None
assert c[0] == c[1] == first_value # test replacing with existing value
def test_replace_with_no_overflowerror(self):
# GH 25616
# casts to object without Exception from OverflowError
s = pd.Series([0, 1, 2, 3, 4])
result = s.replace([3], ["100000000000000000000"])
expected = pd.Series([0, 1, 2, "100000000000000000000", 4])
tm.assert_series_equal(result, expected)
s = pd.Series([0, "100000000000000000000", "100000000000000000001"])
result = s.replace(["100000000000000000000"], [1])
expected = pd.Series([0, 1, "100000000000000000001"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, to_replace, exp",
[
([1, 2, 3], {1: 2, 2: 3, 3: 4}, [2, 3, 4]),
(["1", "2", "3"], {"1": "2", "2": "3", "3": "4"}, ["2", "3", "4"]),
],
)
def test_replace_commutative(self, ser, to_replace, exp):
# GH 16051
# DataFrame.replace() overwrites when values are non-numeric
series = pd.Series(ser)
expected = pd.Series(exp)
result = series.replace(to_replace)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, exp", [([1, 2, 3], [1, True, 3]), (["x", 2, 3], ["x", True, 3])]
)
def test_replace_no_cast(self, ser, exp):
# GH 9113
# BUG: replace int64 dtype with bool coerces to int64
series = pd.Series(ser)
result = series.replace(2, True)
expected = pd.Series(exp)
tm.assert_series_equal(result, expected)
def test_replace_invalid_to_replace(self):
# GH 18634
# API: replace() should raise an exception if invalid argument is given
series = pd.Series(["a", "b", "c "])
msg = (
r"Expecting 'to_replace' to be either a scalar, array-like, "
r"dict or None, got invalid type.*"
)
with pytest.raises(TypeError, match=msg):
series.replace(lambda x: x.strip())
@pytest.mark.parametrize("frame", [False, True])
def test_replace_nonbool_regex(self, frame):
obj = pd.Series(["a", "b", "c "])
if frame:
obj = obj.to_frame()
msg = "'to_replace' must be 'None' if 'regex' is not a bool"
with pytest.raises(ValueError, match=msg):
obj.replace(to_replace=["a"], regex="foo")
@pytest.mark.parametrize("frame", [False, True])
def test_replace_empty_copy(self, frame):
obj = pd.Series([], dtype=np.float64)
if frame:
obj = obj.to_frame()
res = obj.replace(4, 5, inplace=True)
assert res is None
res = obj.replace(4, 5, inplace=False)
tm.assert_equal(res, obj)
assert res is not obj
def test_replace_only_one_dictlike_arg(self, fixed_now_ts):
# GH#33340
ser = pd.Series([1, 2, "A", fixed_now_ts, True])
to_replace = {0: 1, 2: "A"}
value = "foo"
msg = "Series.replace cannot use dict-like to_replace and non-None value"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
to_replace = 1
value = {0: "foo", 2: "bar"}
msg = "Series.replace cannot use dict-value and non-None to_replace"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
def test_replace_extension_other(self, frame_or_series):
# https://github.com/pandas-dev/pandas/issues/34530
obj = frame_or_series(pd.array([1, 2, 3], dtype="Int64"))
result = obj.replace("", "") # no exception
# should not have changed dtype
tm.assert_equal(obj, result)
def _check_replace_with_method(self, ser: pd.Series):
df = ser.to_frame()
res = ser.replace(ser[1], method="pad")
expected = pd.Series([ser[0], ser[0]] + list(ser[2:]), dtype=ser.dtype)
tm.assert_series_equal(res, expected)
res_df = df.replace(ser[1], method="pad")
tm.assert_frame_equal(res_df, expected.to_frame())
ser2 = ser.copy()
res2 = ser2.replace(ser[1], method="pad", inplace=True)
assert res2 is None
tm.assert_series_equal(ser2, expected)
res_df2 = df.replace(ser[1], method="pad", inplace=True)
assert res_df2 is None
tm.assert_frame_equal(df, expected.to_frame())
def test_replace_ea_dtype_with_method(self, any_numeric_ea_dtype):
arr = pd.array([1, 2, pd.NA, 4], dtype=any_numeric_ea_dtype)
ser = pd.Series(arr)
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_interval_with_method(self, as_categorical):
# in particular interval that can't hold NA
idx = pd.IntervalIndex.from_breaks(range(4))
ser = pd.Series(idx)
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_period", [True, False])
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_datetimelike_with_method(self, as_period, as_categorical):
idx = pd.date_range("2016-01-01", periods=5, tz="US/Pacific")
if as_period:
idx = idx.tz_localize(None).to_period("D")
ser = pd.Series(idx)
ser.iloc[-2] = pd.NaT
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
def test_replace_with_compiled_regex(self):
# https://github.com/pandas-dev/pandas/issues/35680
s = pd.Series(["a", "b", "c"])
regex = re.compile("^a$")
result = s.replace({regex: "z"}, regex=True)
expected = pd.Series(["z", "b", "c"])
tm.assert_series_equal(result, expected)
def test_pandas_replace_na(self):
# GH#43344
ser = pd.Series(["AA", "BB", "CC", "DD", "EE", "", pd.NA], dtype="string")
regex_mapping = {
"AA": "CC",
"BB": "CC",
"EE": "CC",
"CC": "CC-REPL",
}
result = ser.replace(regex_mapping, regex=True)
exp = pd.Series(["CC", "CC", "CC-REPL", "DD", "CC", "", pd.NA], dtype="string")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"dtype, input_data, to_replace, expected_data",
[
("bool", [True, False], {True: False}, [False, False]),
("int64", [1, 2], {1: 10, 2: 20}, [10, 20]),
("Int64", [1, 2], {1: 10, 2: 20}, [10, 20]),
("float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]),
("Float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]),
("string", ["one", "two"], {"one": "1", "two": "2"}, ["1", "2"]),
(
pd.IntervalDtype("int64"),
IntervalArray([pd.Interval(1, 2), pd.Interval(2, 3)]),
{pd.Interval(1, 2): pd.Interval(10, 20)},
IntervalArray([pd.Interval(10, 20), pd.Interval(2, 3)]),
),
(
pd.IntervalDtype("float64"),
IntervalArray([pd.Interval(1.0, 2.7), pd.Interval(2.8, 3.1)]),
{pd.Interval(1.0, 2.7): pd.Interval(10.6, 20.8)},
IntervalArray([pd.Interval(10.6, 20.8), pd.Interval(2.8, 3.1)]),
),
(
pd.PeriodDtype("M"),
[pd.Period("2020-05", freq="M")],
{pd.Period("2020-05", freq="M"): pd.Period("2020-06", freq="M")},
[pd.Period("2020-06", freq="M")],
),
],
)
def test_replace_dtype(self, dtype, input_data, to_replace, expected_data):
# GH#33484
ser = pd.Series(input_data, dtype=dtype)
result = ser.replace(to_replace)
expected = pd.Series(expected_data, dtype=dtype)
tm.assert_series_equal(result, expected)
def test_replace_string_dtype(self):
# GH#40732, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype="string")
res = ser.replace({"one": "1", "two": "2"})
expected = pd.Series(["1", "2", np.nan], dtype="string")
tm.assert_series_equal(res, expected)
# GH#31644
ser2 = pd.Series(["A", np.nan], dtype="string")
res2 = ser2.replace("A", "B")
expected2 = pd.Series(["B", np.nan], dtype="string")
tm.assert_series_equal(res2, expected2)
ser3 = pd.Series(["A", "B"], dtype="string")
res3 = ser3.replace("A", pd.NA)
expected3 = pd.Series([pd.NA, "B"], dtype="string")
tm.assert_series_equal(res3, expected3)
def test_replace_string_dtype_list_to_replace(self):
# GH#41215, GH#44940
ser = pd.Series(["abc", "def"], dtype="string")
res = ser.replace(["abc", "any other string"], "xyz")
expected = pd.Series(["xyz", "def"], dtype="string")
tm.assert_series_equal(res, expected)
def test_replace_string_dtype_regex(self):
# GH#31644
ser = pd.Series(["A", "B"], dtype="string")
res = ser.replace(r".", "C", regex=True)
expected = pd.Series(["C", "C"], dtype="string")
tm.assert_series_equal(res, expected)
def test_replace_nullable_numeric(self):
# GH#40732, GH#44940
floats = pd.Series([1.0, 2.0, 3.999, 4.4], dtype=pd.Float64Dtype())
assert floats.replace({1.0: 9}).dtype == floats.dtype
assert floats.replace(1.0, 9).dtype == floats.dtype
assert floats.replace({1.0: 9.0}).dtype == floats.dtype
assert floats.replace(1.0, 9.0).dtype == floats.dtype
res = floats.replace(to_replace=[1.0, 2.0], value=[9.0, 10.0])
assert res.dtype == floats.dtype
ints = pd.Series([1, 2, 3, 4], dtype=pd.Int64Dtype())
assert ints.replace({1: 9}).dtype == ints.dtype
assert ints.replace(1, 9).dtype == ints.dtype
assert ints.replace({1: 9.0}).dtype == ints.dtype
assert ints.replace(1, 9.0).dtype == ints.dtype
# FIXME: ints.replace({1: 9.5}) raises bc of incorrect _can_hold_element
@pytest.mark.parametrize("regex", [False, True])
def test_replace_regex_dtype_series(self, regex):
# GH-48644
series = pd.Series(["0"])
expected = pd.Series([1])
result = series.replace(to_replace="0", value=1, regex=regex)
tm.assert_series_equal(result, expected)
| [
"pandas._testing.assert_equal",
"pandas.Interval",
"numpy.arange",
"pytest.mark.parametrize",
"pandas.Int64Dtype",
"numpy.random.randn",
"pandas.IntervalDtype",
"pandas.Float64Dtype",
"pandas._testing.assert_series_equal",
"pytest.raises",
"pandas.Period",
"pandas.PeriodDtype",
"pandas.isna"... | [((14907, 15084), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ser, to_replace, exp"""', "[([1, 2, 3], {(1): 2, (2): 3, (3): 4}, [2, 3, 4]), (['1', '2', '3'], {'1':\n '2', '2': '3', '3': '4'}, ['2', '3', '4'])]"], {}), "('ser, to_replace, exp', [([1, 2, 3], {(1): 2, (2): \n 3, (3): 4}, [2, 3, 4]), (['1', '2', '3'], {'1': '2', '2': '3', '3': '4'\n }, ['2', '3', '4'])])\n", (14930, 15084), False, 'import pytest\n'), ((15445, 15545), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ser, exp"""', "[([1, 2, 3], [1, True, 3]), (['x', 2, 3], ['x', True, 3])]"], {}), "('ser, exp', [([1, 2, 3], [1, True, 3]), (['x', 2, 3\n ], ['x', True, 3])])\n", (15468, 15545), False, 'import pytest\n'), ((16283, 16330), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""frame"""', '[False, True]'], {}), "('frame', [False, True])\n", (16306, 16330), False, 'import pytest\n'), ((16655, 16702), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""frame"""', '[False, True]'], {}), "('frame', [False, True])\n", (16678, 16702), False, 'import pytest\n'), ((18870, 18926), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""as_categorical"""', '[True, False]'], {}), "('as_categorical', [True, False])\n", (18893, 18926), False, 'import pytest\n'), ((19247, 19298), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""as_period"""', '[True, False]'], {}), "('as_period', [True, False])\n", (19270, 19298), False, 'import pytest\n'), ((19304, 19360), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""as_categorical"""', '[True, False]'], {}), "('as_categorical', [True, False])\n", (19327, 19360), False, 'import pytest\n'), ((24392, 24439), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""regex"""', '[False, True]'], {}), "('regex', [False, True])\n", (24415, 24439), False, 'import pytest\n'), ((299, 334), 'pandas.Series', 'pd.Series', (["[0, 0, '']"], {'dtype': 'object'}), "([0, 0, ''], dtype=object)\n", (308, 334), True, 'import pandas as pd\n'), ((393, 430), 'pandas.Series', 'pd.Series', (['[0, 0, None]'], {'dtype': 'object'}), '([0, 0, None], dtype=object)\n', (402, 430), True, 'import pandas as pd\n'), ((439, 479), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (461, 479), True, 'import pandas._testing as tm\n'), ((837, 876), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (858, 876), True, 'import pandas._testing as tm\n'), ((940, 983), 'pandas.Series', 'pd.Series', (["[10, 20, 30, 'a', 'a', 'b', 'a']"], {}), "([10, 20, 30, 'a', 'a', 'b', 'a'])\n", (949, 983), True, 'import pandas as pd\n'), ((1043, 1089), 'pandas.Series', 'pd.Series', (["[10, 20, 30, None, None, 'b', None]"], {}), "([10, 20, 30, None, None, 'b', None])\n", (1052, 1089), True, 'import pandas as pd\n'), ((1139, 1179), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (1161, 1179), True, 'import pandas._testing as tm\n'), ((1406, 1438), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'ser'], {}), '(res, ser)\n', (1428, 1438), True, 'import pandas._testing as tm\n'), ((1577, 1609), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'ser'], {}), '(res, ser)\n', (1599, 1609), True, 'import pandas._testing as tm\n'), ((1963, 1995), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['ser', 'exp'], {}), '(ser, exp)\n', (1985, 1995), True, 'import pandas._testing as tm\n'), ((2076, 2107), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['rs', 'ser'], {}), '(rs, ser)\n', (2098, 2107), True, 'import pandas._testing as tm\n'), ((2908, 2939), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['rs', 'rs2'], {}), '(rs, rs2)\n', (2930, 2939), True, 'import pandas._testing as tm\n'), ((3253, 3283), 'pandas.Series', 'pd.Series', (['[np.nan, 0, np.inf]'], {}), '([np.nan, 0, np.inf])\n', (3262, 3283), True, 'import pandas as pd\n'), ((3369, 3427), 'pandas.Series', 'pd.Series', (["[np.nan, 0, 'foo', 'bar', np.inf, None, pd.NaT]"], {}), "([np.nan, 0, 'foo', 'bar', np.inf, None, pd.NaT])\n", (3378, 3427), True, 'import pandas as pd\n'), ((3702, 3734), 'pandas.Series', 'pd.Series', (['datetime_series.index'], {}), '(datetime_series.index)\n', (3711, 3734), True, 'import pandas as pd\n'), ((4131, 4166), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'ser'], {}), '(result, ser)\n', (4153, 4166), True, 'import pandas._testing as tm\n'), ((4182, 4208), 'pandas.Series', 'pd.Series', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (4191, 4208), True, 'import pandas as pd\n'), ((4439, 4470), 'pandas.Series', 'pd.Series', (['[0, np.nan, 2, 3, 4]'], {}), '([0, np.nan, 2, 3, 4])\n', (4448, 4470), True, 'import pandas as pd\n'), ((4549, 4589), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (4571, 4589), True, 'import pandas._testing as tm\n'), ((4605, 4636), 'pandas.Series', 'pd.Series', (['[0, np.nan, 2, 3, 4]'], {}), '([0, np.nan, 2, 3, 4])\n', (4614, 4636), True, 'import pandas as pd\n'), ((4713, 4753), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (4735, 4753), True, 'import pandas._testing as tm\n'), ((4930, 4954), 'pandas.Timestamp', 'pd.Timestamp', (['"""20120101"""'], {}), "('20120101')\n", (4942, 4954), True, 'import pandas as pd\n'), ((5046, 5086), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (5068, 5086), True, 'import pandas._testing as tm\n'), ((5176, 5216), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (5198, 5216), True, 'import pandas._testing as tm\n'), ((5338, 5374), 'pandas.Timestamp', 'pd.Timestamp', (['"""2015/01/01"""'], {'tz': '"""UTC"""'}), "('2015/01/01', tz='UTC')\n", (5350, 5374), True, 'import pandas as pd\n'), ((5527, 5574), 'pandas.Series', 'pd.Series', (['[pd.Timestamp.min, ts]'], {'dtype': 'object'}), '([pd.Timestamp.min, ts], dtype=object)\n', (5536, 5574), True, 'import pandas as pd\n'), ((5583, 5623), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'result'], {}), '(expected, result)\n', (5605, 5623), True, 'import pandas._testing as tm\n'), ((5682, 5714), 'pandas.timedelta_range', 'pd.timedelta_range', (['(0)'], {'periods': '(5)'}), '(0, periods=5)\n', (5700, 5714), True, 'import pandas as pd\n'), ((5729, 5743), 'pandas.Series', 'pd.Series', (['tdi'], {}), '(tdi)\n', (5738, 5743), True, 'import pandas as pd\n'), ((5884, 5935), 'pandas.Series', 'pd.Series', (['[ser[0], ser[3], ser[2], ser[3], ser[4]]'], {}), '([ser[0], ser[3], ser[2], ser[3], ser[4]])\n', (5893, 5935), True, 'import pandas as pd\n'), ((5944, 5984), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (5966, 5984), True, 'import pandas._testing as tm\n'), ((6045, 6071), 'pandas.Series', 'pd.Series', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (6054, 6071), True, 'import pandas as pd\n'), ((6781, 6811), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['s', 'ser'], {}), '(s, ser)\n', (6803, 6811), True, 'import pandas._testing as tm\n'), ((7533, 7561), 'pandas.Series', 'pd.Series', (['[0, 1, 2, 3.5, 4]'], {}), '([0, 1, 2, 3.5, 4])\n', (7542, 7561), True, 'import pandas as pd\n'), ((7660, 7690), 'pandas.Series', 'pd.Series', (["[0, 1, 2, 3.5, 'a']"], {}), "([0, 1, 2, 3.5, 'a'])\n", (7669, 7690), True, 'import pandas as pd\n'), ((7982, 8029), 'pandas.Series', 'pd.Series', (['[0, 1, 2, 3.5, True]'], {'dtype': '"""object"""'}), "([0, 1, 2, 3.5, True], dtype='object')\n", (7991, 8029), True, 'import pandas as pd\n'), ((8403, 8443), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (8425, 8443), True, 'import pandas._testing as tm\n'), ((8508, 8538), 'pandas.Series', 'pd.Series', (['[True, False, True]'], {}), '([True, False, True])\n', (8517, 8538), True, 'import pandas as pd\n'), ((8595, 8628), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['s', 'result'], {}), '(s, result)\n', (8617, 8628), True, 'import pandas._testing as tm\n'), ((8718, 8748), 'pandas.Series', 'pd.Series', (['[True, False, True]'], {}), '([True, False, True])\n', (8727, 8748), True, 'import pandas as pd\n'), ((8807, 8837), 'pandas.Series', 'pd.Series', (["['2u', False, '2u']"], {}), "(['2u', False, '2u'])\n", (8816, 8837), True, 'import pandas as pd\n'), ((8846, 8886), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'result'], {}), '(expected, result)\n', (8868, 8886), True, 'import pandas._testing as tm\n'), ((8943, 8973), 'pandas.Series', 'pd.Series', (['[True, False, True]'], {}), '([True, False, True])\n', (8952, 8973), True, 'import pandas as pd\n'), ((9069, 9109), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'result'], {}), '(expected, result)\n', (9091, 9109), True, 'import pandas._testing as tm\n'), ((9176, 9206), 'pandas.Series', 'pd.Series', (['[True, False, True]'], {}), '([True, False, True])\n', (9185, 9206), True, 'import pandas as pd\n'), ((9284, 9316), 'pandas.Series', 'pd.Series', (["['yes', False, 'yes']"], {}), "(['yes', False, 'yes'])\n", (9293, 9316), True, 'import pandas as pd\n'), ((9325, 9365), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (9347, 9365), True, 'import pandas._testing as tm\n'), ((9543, 9592), 'pandas.Series', 'pd.Series', (['[pd.NA, pd.NA]'], {'dtype': 'any_int_ea_dtype'}), '([pd.NA, pd.NA], dtype=any_int_ea_dtype)\n', (9552, 9592), True, 'import pandas as pd\n'), ((9601, 9641), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (9623, 9641), True, 'import pandas._testing as tm\n'), ((9774, 9814), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (9796, 9814), True, 'import pandas._testing as tm\n'), ((10660, 10691), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['rs', 'rs2'], {}), '(rs, rs2)\n', (10682, 10691), True, 'import pandas._testing as tm\n'), ((11074, 11136), 'pandas.Series', 'pd.Series', (["['one', 'two', np.nan]"], {'dtype': 'nullable_string_dtype'}), "(['one', 'two', np.nan], dtype=nullable_string_dtype)\n", (11083, 11136), True, 'import pandas as pd\n'), ((11156, 11214), 'pandas.Series', 'pd.Series', (["['1', '2', np.nan]"], {'dtype': 'nullable_string_dtype'}), "(['1', '2', np.nan], dtype=nullable_string_dtype)\n", (11165, 11214), True, 'import pandas as pd\n'), ((11278, 11318), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'result'], {}), '(expected, result)\n', (11300, 11318), True, 'import pandas._testing as tm\n'), ((11708, 11728), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (11717, 11728), True, 'import pandas as pd\n'), ((11788, 11808), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (11797, 11808), True, 'import pandas as pd\n'), ((11817, 11857), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'result'], {}), '(expected, result)\n', (11839, 11857), True, 'import pandas._testing as tm\n'), ((12017, 12038), 'pandas.Series', 'pd.Series', (["['a', 'b']"], {}), "(['a', 'b'])\n", (12026, 12038), True, 'import pandas as pd\n'), ((12058, 12079), 'pandas.Series', 'pd.Series', (["['b', 'a']"], {}), "(['b', 'a'])\n", (12067, 12079), True, 'import pandas as pd\n'), ((12137, 12177), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'result'], {}), '(expected, result)\n', (12159, 12177), True, 'import pandas._testing as tm\n'), ((12258, 12278), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (12267, 12278), True, 'import pandas as pd\n'), ((12338, 12358), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (12347, 12358), True, 'import pandas as pd\n'), ((12367, 12407), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'result'], {}), '(expected, result)\n', (12389, 12407), True, 'import pandas._testing as tm\n'), ((12497, 12528), 'pandas.Series', 'pd.Series', (["[1, 2, 3, '4', 4, 5]"], {}), "([1, 2, 3, '4', 4, 5])\n", (12506, 12528), True, 'import pandas as pd\n'), ((12593, 12632), 'pandas.Series', 'pd.Series', (['[1, np.nan, 3, np.nan, 4, 5]'], {}), '([1, np.nan, 3, np.nan, 4, 5])\n', (12602, 12632), True, 'import pandas as pd\n'), ((12641, 12681), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'result'], {}), '(expected, result)\n', (12663, 12681), True, 'import pandas._testing as tm\n'), ((13015, 13037), 'pandas.Series', 'pd.Series', (['categorical'], {}), '(categorical)\n', (13024, 13037), True, 'import pandas as pd\n'), ((13353, 13393), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'result'], {}), '(expected, result)\n', (13375, 13393), True, 'import pandas._testing as tm\n'), ((13475, 13530), 'pandas.date_range', 'pd.date_range', (['"""2016-01-01"""'], {'periods': '(3)', 'tz': '"""US/Pacific"""'}), "('2016-01-01', periods=3, tz='US/Pacific')\n", (13488, 13530), True, 'import pandas as pd\n'), ((13543, 13557), 'pandas.Series', 'pd.Series', (['dti'], {}), '(dti)\n', (13552, 13557), True, 'import pandas as pd\n'), ((13839, 13879), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'result'], {}), '(expected, result)\n', (13861, 13879), True, 'import pandas._testing as tm\n'), ((14065, 14100), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'c'], {}), '(expected, c)\n', (14087, 14100), True, 'import pandas._testing as tm\n'), ((14450, 14476), 'pandas.Series', 'pd.Series', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (14459, 14476), True, 'import pandas as pd\n'), ((14555, 14603), 'pandas.Series', 'pd.Series', (["[0, 1, 2, '100000000000000000000', 4]"], {}), "([0, 1, 2, '100000000000000000000', 4])\n", (14564, 14603), True, 'import pandas as pd\n'), ((14612, 14652), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (14634, 14652), True, 'import pandas._testing as tm\n'), ((14666, 14730), 'pandas.Series', 'pd.Series', (["[0, '100000000000000000000', '100000000000000000001']"], {}), "([0, '100000000000000000000', '100000000000000000001'])\n", (14675, 14730), True, 'import pandas as pd\n'), ((14809, 14851), 'pandas.Series', 'pd.Series', (["[0, 1, '100000000000000000001']"], {}), "([0, 1, '100000000000000000001'])\n", (14818, 14851), True, 'import pandas as pd\n'), ((14860, 14900), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (14882, 14900), True, 'import pandas._testing as tm\n'), ((15295, 15309), 'pandas.Series', 'pd.Series', (['ser'], {}), '(ser)\n', (15304, 15309), True, 'import pandas as pd\n'), ((15330, 15344), 'pandas.Series', 'pd.Series', (['exp'], {}), '(exp)\n', (15339, 15344), True, 'import pandas as pd\n'), ((15398, 15438), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (15420, 15438), True, 'import pandas._testing as tm\n'), ((15699, 15713), 'pandas.Series', 'pd.Series', (['ser'], {}), '(ser)\n', (15708, 15713), True, 'import pandas as pd\n'), ((15774, 15788), 'pandas.Series', 'pd.Series', (['exp'], {}), '(exp)\n', (15783, 15788), True, 'import pandas as pd\n'), ((15798, 15838), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (15820, 15838), True, 'import pandas._testing as tm\n'), ((16003, 16030), 'pandas.Series', 'pd.Series', (["['a', 'b', 'c ']"], {}), "(['a', 'b', 'c '])\n", (16012, 16030), True, 'import pandas as pd\n'), ((16394, 16421), 'pandas.Series', 'pd.Series', (["['a', 'b', 'c ']"], {}), "(['a', 'b', 'c '])\n", (16403, 16421), True, 'import pandas as pd\n'), ((16763, 16794), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (16772, 16794), True, 'import pandas as pd\n'), ((16976, 17001), 'pandas._testing.assert_equal', 'tm.assert_equal', (['res', 'obj'], {}), '(res, obj)\n', (16991, 17001), True, 'import pandas._testing as tm\n'), ((17131, 17173), 'pandas.Series', 'pd.Series', (["[1, 2, 'A', fixed_now_ts, True]"], {}), "([1, 2, 'A', fixed_now_ts, True])\n", (17140, 17173), True, 'import pandas as pd\n'), ((17929, 17957), 'pandas._testing.assert_equal', 'tm.assert_equal', (['obj', 'result'], {}), '(obj, result)\n', (17944, 17957), True, 'import pandas._testing as tm\n'), ((18182, 18219), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (18204, 18219), True, 'import pandas._testing as tm\n'), ((18457, 18495), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['ser2', 'expected'], {}), '(ser2, expected)\n', (18479, 18495), True, 'import pandas._testing as tm\n'), ((18734, 18788), 'pandas.array', 'pd.array', (['[1, 2, pd.NA, 4]'], {'dtype': 'any_numeric_ea_dtype'}), '([1, 2, pd.NA, 4], dtype=any_numeric_ea_dtype)\n', (18742, 18788), True, 'import pandas as pd\n'), ((18803, 18817), 'pandas.Series', 'pd.Series', (['arr'], {}), '(arr)\n', (18812, 18817), True, 'import pandas as pd\n'), ((19112, 19126), 'pandas.Series', 'pd.Series', (['idx'], {}), '(idx)\n', (19121, 19126), True, 'import pandas as pd\n'), ((19455, 19510), 'pandas.date_range', 'pd.date_range', (['"""2016-01-01"""'], {'periods': '(5)', 'tz': '"""US/Pacific"""'}), "('2016-01-01', periods=5, tz='US/Pacific')\n", (19468, 19510), True, 'import pandas as pd\n'), ((19603, 19617), 'pandas.Series', 'pd.Series', (['idx'], {}), '(idx)\n', (19612, 19617), True, 'import pandas as pd\n'), ((19883, 19909), 'pandas.Series', 'pd.Series', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (19892, 19909), True, 'import pandas as pd\n'), ((19926, 19943), 're.compile', 're.compile', (['"""^a$"""'], {}), "('^a$')\n", (19936, 19943), False, 'import re\n'), ((20016, 20042), 'pandas.Series', 'pd.Series', (["['z', 'b', 'c']"], {}), "(['z', 'b', 'c'])\n", (20025, 20042), True, 'import pandas as pd\n'), ((20051, 20091), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (20073, 20091), True, 'import pandas._testing as tm\n'), ((20164, 20232), 'pandas.Series', 'pd.Series', (["['AA', 'BB', 'CC', 'DD', 'EE', '', pd.NA]"], {'dtype': '"""string"""'}), "(['AA', 'BB', 'CC', 'DD', 'EE', '', pd.NA], dtype='string')\n", (20173, 20232), True, 'import pandas as pd\n'), ((20440, 20513), 'pandas.Series', 'pd.Series', (["['CC', 'CC', 'CC-REPL', 'DD', 'CC', '', pd.NA]"], {'dtype': '"""string"""'}), "(['CC', 'CC', 'CC-REPL', 'DD', 'CC', '', pd.NA], dtype='string')\n", (20449, 20513), True, 'import pandas as pd\n'), ((20522, 20557), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (20544, 20557), True, 'import pandas._testing as tm\n'), ((22017, 22051), 'pandas.Series', 'pd.Series', (['input_data'], {'dtype': 'dtype'}), '(input_data, dtype=dtype)\n', (22026, 22051), True, 'import pandas as pd\n'), ((22112, 22149), 'pandas.Series', 'pd.Series', (['expected_data'], {'dtype': 'dtype'}), '(expected_data, dtype=dtype)\n', (22121, 22149), True, 'import pandas as pd\n'), ((22158, 22198), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (22180, 22198), True, 'import pandas._testing as tm\n'), ((22284, 22333), 'pandas.Series', 'pd.Series', (["['one', 'two', np.nan]"], {'dtype': '"""string"""'}), "(['one', 'two', np.nan], dtype='string')\n", (22293, 22333), True, 'import pandas as pd\n'), ((22405, 22450), 'pandas.Series', 'pd.Series', (["['1', '2', np.nan]"], {'dtype': '"""string"""'}), "(['1', '2', np.nan], dtype='string')\n", (22414, 22450), True, 'import pandas as pd\n'), ((22459, 22496), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (22481, 22496), True, 'import pandas._testing as tm\n'), ((22532, 22572), 'pandas.Series', 'pd.Series', (["['A', np.nan]"], {'dtype': '"""string"""'}), "(['A', np.nan], dtype='string')\n", (22541, 22572), True, 'import pandas as pd\n'), ((22631, 22671), 'pandas.Series', 'pd.Series', (["['B', np.nan]"], {'dtype': '"""string"""'}), "(['B', np.nan], dtype='string')\n", (22640, 22671), True, 'import pandas as pd\n'), ((22680, 22719), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['res2', 'expected2'], {}), '(res2, expected2)\n', (22702, 22719), True, 'import pandas._testing as tm\n'), ((22736, 22773), 'pandas.Series', 'pd.Series', (["['A', 'B']"], {'dtype': '"""string"""'}), "(['A', 'B'], dtype='string')\n", (22745, 22773), True, 'import pandas as pd\n'), ((22834, 22873), 'pandas.Series', 'pd.Series', (["[pd.NA, 'B']"], {'dtype': '"""string"""'}), "([pd.NA, 'B'], dtype='string')\n", (22843, 22873), True, 'import pandas as pd\n'), ((22882, 22921), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['res3', 'expected3'], {}), '(res3, expected3)\n', (22904, 22921), True, 'import pandas._testing as tm\n'), ((23023, 23064), 'pandas.Series', 'pd.Series', (["['abc', 'def']"], {'dtype': '"""string"""'}), "(['abc', 'def'], dtype='string')\n", (23032, 23064), True, 'import pandas as pd\n'), ((23146, 23187), 'pandas.Series', 'pd.Series', (["['xyz', 'def']"], {'dtype': '"""string"""'}), "(['xyz', 'def'], dtype='string')\n", (23155, 23187), True, 'import pandas as pd\n'), ((23196, 23233), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (23218, 23233), True, 'import pandas._testing as tm\n'), ((23315, 23352), 'pandas.Series', 'pd.Series', (["['A', 'B']"], {'dtype': '"""string"""'}), "(['A', 'B'], dtype='string')\n", (23324, 23352), True, 'import pandas as pd\n'), ((23421, 23458), 'pandas.Series', 'pd.Series', (["['C', 'C']"], {'dtype': '"""string"""'}), "(['C', 'C'], dtype='string')\n", (23430, 23458), True, 'import pandas as pd\n'), ((23467, 23504), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (23489, 23504), True, 'import pandas._testing as tm\n'), ((24530, 24546), 'pandas.Series', 'pd.Series', (["['0']"], {}), "(['0'])\n", (24539, 24546), True, 'import pandas as pd\n'), ((24566, 24580), 'pandas.Series', 'pd.Series', (['[1]'], {}), '([1])\n', (24575, 24580), True, 'import pandas as pd\n'), ((24659, 24699), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (24681, 24699), True, 'import pandas._testing as tm\n'), ((507, 523), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (515, 523), True, 'import numpy as np\n'), ((1714, 1732), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (1729, 1732), True, 'import numpy as np\n'), ((2162, 2181), 'pandas._testing.makeDateIndex', 'tm.makeDateIndex', (['N'], {}), '(N)\n', (2178, 2181), True, 'import pandas._testing as tm\n'), ((3915, 3951), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (3928, 3951), False, 'import pytest\n'), ((4311, 4337), 'pandas.Series', 'pd.Series', (['[4, 3, 2, 1, 0]'], {}), '([4, 3, 2, 1, 0])\n', (4320, 4337), True, 'import pandas as pd\n'), ((4836, 4872), 'pandas.date_range', 'pd.date_range', (['"""20130101"""'], {'periods': '(5)'}), "('20130101', periods=5)\n", (4849, 4872), True, 'import pandas as pd\n'), ((5116, 5140), 'pandas.Timestamp', 'pd.Timestamp', (['"""20130103"""'], {}), "('20130103')\n", (5128, 5140), True, 'import pandas as pd\n'), ((5142, 5166), 'pandas.Timestamp', 'pd.Timestamp', (['"""20120101"""'], {}), "('20120101')\n", (5154, 5166), True, 'import pandas as pd\n'), ((6151, 6177), 'pandas.Series', 'pd.Series', (['[0, 0, 0, 0, 4]'], {}), '([0, 0, 0, 0, 4])\n', (6160, 6177), True, 'import pandas as pd\n'), ((6331, 6357), 'pandas.Series', 'pd.Series', (['[0, 0, 0, 0, 4]'], {}), '([0, 0, 0, 0, 4])\n', (6340, 6357), True, 'import pandas as pd\n'), ((6610, 6646), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (6623, 6646), False, 'import pytest\n'), ((6877, 6889), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (6886, 6889), True, 'import numpy as np\n'), ((7148, 7188), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'result'], {}), '(expected, result)\n', (7170, 7188), True, 'import pandas._testing as tm\n'), ((7201, 7237), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 'sc'], {}), '(expected, sc)\n', (7223, 7237), True, 'import pandas._testing as tm\n'), ((8188, 8236), 'pandas.date_range', 'pd.date_range', (['"""1/1/2001"""', '"""1/10/2001"""'], {'freq': '"""D"""'}), "('1/1/2001', '1/10/2001', freq='D')\n", (8201, 8236), True, 'import pandas as pd\n'), ((9914, 9933), 'pandas._testing.makeDateIndex', 'tm.makeDateIndex', (['N'], {}), '(N)\n', (9930, 9933), True, 'import pandas._testing as tm\n'), ((11486, 11527), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (11512, 11527), True, 'import pandas._testing as tm\n'), ((11556, 11569), 'pandas.Series', 'pd.Series', (['[]'], {}), '([])\n', (11565, 11569), True, 'import pandas as pd\n'), ((16192, 16227), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (16205, 16227), False, 'import pytest\n'), ((16556, 16592), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (16569, 16592), False, 'import pytest\n'), ((17327, 17363), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (17340, 17363), False, 'import pytest\n'), ((17559, 17595), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (17572, 17595), False, 'import pytest\n'), ((17792, 17826), 'pandas.array', 'pd.array', (['[1, 2, 3]'], {'dtype': '"""Int64"""'}), "([1, 2, 3], dtype='Int64')\n", (17800, 17826), True, 'import pandas as pd\n'), ((656, 667), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (664, 667), True, 'import numpy as np\n'), ((688, 699), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (696, 699), True, 'import numpy as np\n'), ((720, 760), 'numpy.array', 'np.array', (['[0.0, 0.0, None]'], {'dtype': 'object'}), '([0.0, 0.0, None], dtype=object)\n', (728, 760), True, 'import numpy as np\n'), ((1286, 1318), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-12-16 17:31"""'], {}), "('2021-12-16 17:31')\n", (1298, 1318), True, 'import pandas as pd\n'), ((2141, 2159), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (2156, 2159), True, 'import numpy as np\n'), ((2502, 2518), 'pandas.isna', 'pd.isna', (['ser[:5]'], {}), '(ser[:5])\n', (2509, 2518), True, 'import pandas as pd\n'), ((2758, 2774), 'pandas.isna', 'pd.isna', (['ser[:5]'], {}), '(ser[:5])\n', (2765, 2774), True, 'import pandas as pd\n'), ((4985, 5009), 'pandas.Timestamp', 'pd.Timestamp', (['"""20130103"""'], {}), "('20130103')\n", (4997, 5009), True, 'import pandas as pd\n'), ((5011, 5035), 'pandas.Timestamp', 'pd.Timestamp', (['"""20120101"""'], {}), "('20120101')\n", (5023, 5035), True, 'import pandas as pd\n'), ((5406, 5442), 'pandas.Timestamp', 'pd.Timestamp', (['"""2015/01/01"""'], {'tz': '"""UTC"""'}), "('2015/01/01', tz='UTC')\n", (5418, 5442), True, 'import pandas as pd\n'), ((7828, 7852), 'pandas.Timestamp', 'pd.Timestamp', (['"""20130101"""'], {}), "('20130101')\n", (7840, 7852), True, 'import pandas as pd\n'), ((7885, 7909), 'pandas.Timestamp', 'pd.Timestamp', (['"""20130101"""'], {}), "('20130101')\n", (7897, 7909), True, 'import pandas as pd\n'), ((9461, 9505), 'pandas.Series', 'pd.Series', (['[0, None]'], {'dtype': 'any_int_ea_dtype'}), '([0, None], dtype=any_int_ea_dtype)\n', (9470, 9505), True, 'import pandas as pd\n'), ((9659, 9700), 'pandas.Series', 'pd.Series', (['[0, 1]'], {'dtype': 'any_int_ea_dtype'}), '([0, 1], dtype=any_int_ea_dtype)\n', (9668, 9700), True, 'import pandas as pd\n'), ((9893, 9911), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (9908, 9911), True, 'import numpy as np\n'), ((10254, 10270), 'pandas.isna', 'pd.isna', (['ser[:5]'], {}), '(ser[:5])\n', (10261, 10270), True, 'import pandas as pd\n'), ((10510, 10526), 'pandas.isna', 'pd.isna', (['ser[:5]'], {}), '(ser[:5])\n', (10517, 10526), True, 'import pandas as pd\n'), ((13104, 13122), 'pandas.Series', 'pd.Series', (['numeric'], {}), '(numeric)\n', (13113, 13122), True, 'import pandas as pd\n'), ((12768, 12812), 'pandas.Categorical', 'pd.Categorical', (["['A']"], {'categories': "['A', 'B']"}), "(['A'], categories=['A', 'B'])\n", (12782, 12812), True, 'import pandas as pd\n'), ((12833, 12882), 'pandas.Categorical', 'pd.Categorical', (["['A', 'B']"], {'categories': "['A', 'B']"}), "(['A', 'B'], categories=['A', 'B'])\n", (12847, 12882), True, 'import pandas as pd\n'), ((21095, 21120), 'pandas.IntervalDtype', 'pd.IntervalDtype', (['"""int64"""'], {}), "('int64')\n", (21111, 21120), True, 'import pandas as pd\n'), ((21369, 21396), 'pandas.IntervalDtype', 'pd.IntervalDtype', (['"""float64"""'], {}), "('float64')\n", (21385, 21396), True, 'import pandas as pd\n'), ((21669, 21688), 'pandas.PeriodDtype', 'pd.PeriodDtype', (['"""M"""'], {}), "('M')\n", (21683, 21688), True, 'import pandas as pd\n'), ((23638, 23655), 'pandas.Float64Dtype', 'pd.Float64Dtype', ([], {}), '()\n', (23653, 23655), True, 'import pandas as pd\n'), ((24064, 24079), 'pandas.Int64Dtype', 'pd.Int64Dtype', ([], {}), '()\n', (24077, 24079), True, 'import pandas as pd\n'), ((21210, 21227), 'pandas.Interval', 'pd.Interval', (['(1)', '(2)'], {}), '(1, 2)\n', (21221, 21227), True, 'import pandas as pd\n'), ((21229, 21248), 'pandas.Interval', 'pd.Interval', (['(10)', '(20)'], {}), '(10, 20)\n', (21240, 21248), True, 'import pandas as pd\n'), ((21494, 21515), 'pandas.Interval', 'pd.Interval', (['(1.0)', '(2.7)'], {}), '(1.0, 2.7)\n', (21505, 21515), True, 'import pandas as pd\n'), ((21517, 21540), 'pandas.Interval', 'pd.Interval', (['(10.6)', '(20.8)'], {}), '(10.6, 20.8)\n', (21528, 21540), True, 'import pandas as pd\n'), ((21707, 21737), 'pandas.Period', 'pd.Period', (['"""2020-05"""'], {'freq': '"""M"""'}), "('2020-05', freq='M')\n", (21716, 21737), True, 'import pandas as pd\n'), ((21757, 21787), 'pandas.Period', 'pd.Period', (['"""2020-05"""'], {'freq': '"""M"""'}), "('2020-05', freq='M')\n", (21766, 21787), True, 'import pandas as pd\n'), ((21789, 21819), 'pandas.Period', 'pd.Period', (['"""2020-06"""'], {'freq': '"""M"""'}), "('2020-06', freq='M')\n", (21798, 21819), True, 'import pandas as pd\n'), ((21839, 21869), 'pandas.Period', 'pd.Period', (['"""2020-06"""'], {'freq': '"""M"""'}), "('2020-06', freq='M')\n", (21848, 21869), True, 'import pandas as pd\n'), ((21153, 21170), 'pandas.Interval', 'pd.Interval', (['(1)', '(2)'], {}), '(1, 2)\n', (21164, 21170), True, 'import pandas as pd\n'), ((21172, 21189), 'pandas.Interval', 'pd.Interval', (['(2)', '(3)'], {}), '(2, 3)\n', (21183, 21189), True, 'import pandas as pd\n'), ((21282, 21301), 'pandas.Interval', 'pd.Interval', (['(10)', '(20)'], {}), '(10, 20)\n', (21293, 21301), True, 'import pandas as pd\n'), ((21303, 21320), 'pandas.Interval', 'pd.Interval', (['(2)', '(3)'], {}), '(2, 3)\n', (21314, 21320), True, 'import pandas as pd\n'), ((21429, 21450), 'pandas.Interval', 'pd.Interval', (['(1.0)', '(2.7)'], {}), '(1.0, 2.7)\n', (21440, 21450), True, 'import pandas as pd\n'), ((21452, 21473), 'pandas.Interval', 'pd.Interval', (['(2.8)', '(3.1)'], {}), '(2.8, 3.1)\n', (21463, 21473), True, 'import pandas as pd\n'), ((21574, 21597), 'pandas.Interval', 'pd.Interval', (['(10.6)', '(20.8)'], {}), '(10.6, 20.8)\n', (21585, 21597), True, 'import pandas as pd\n'), ((21599, 21620), 'pandas.Interval', 'pd.Interval', (['(2.8)', '(3.1)'], {}), '(2.8, 3.1)\n', (21610, 21620), True, 'import pandas as pd\n')] |
import pathlib
import imageio
import numpy as np
from PIL import Image
class AttrDict(dict):
__getattr__ = dict.__getitem__
class staticproperty:
def __init__(self, function):
self.function = function
def __get__(self, instance, owner=None):
return self.function()
class World:
def __init__(self, area, seed=None):
self._terrain = np.zeros(area, np.uint8)
self._material_names = {0: None}
self._material_ids = {None: 0}
self._objects = [None]
self._coords = np.zeros(area, np.uint32)
self._random = np.random.RandomState(seed)
@property
def area(self):
return self._terrain.shape
@property
def random(self):
return self._random
@property
def objects(self):
yield from (obj for obj in self._objects if obj)
def reset(self, seed=None):
# TODO: Not really needed. Can just create new instance.
self._random = np.random.RandomState(seed)
self._terrain[:] = 0
self._objects = [None]
self._coords[:] = 0
def __setitem__(self, pos, material):
if material not in self._material_ids:
id_ = len(self._material_ids)
self._material_ids[material] = id_
self._material_names[id_] = material
self._terrain[pos] = self._material_ids[material]
def __getitem__(self, pos):
if _inside((0, 0), pos, self.area):
material = self._material_names[self._terrain[tuple(pos)]]
else:
material = None
if not (0 <= pos[0] < self._coords.shape[0]):
obj = False
elif not (0 <= pos[1] < self._coords.shape[1]):
obj = False
else:
obj = self._objects[self._coords[tuple(pos)]]
return material, obj
def nearby(self, pos, distance):
# TODO: Return both nearby materials and objects.
ids = set(self._terrain[
pos[0] - distance: pos[0] + distance,
pos[1] - distance: pos[1] + distance].flatten().tolist())
return tuple(self._material_names[x] for x in ids)
def count(self, material):
if material not in self._material_ids:
return 0
return (self._terrain == self._material_ids[material]).sum()
def add(self, obj):
assert hasattr(obj, 'pos')
obj.pos = np.array(obj.pos)
assert self[obj.pos][1] is None
self._coords[tuple(obj.pos)] = len(self._objects)
self._objects.append(obj)
def remove(self, obj):
self._objects[self._coords[tuple(obj.pos)]] = None
self._coords[tuple(obj.pos)] = 0
def move(self, obj, pos):
pos = np.array(pos)
assert self[pos][1] is None
self._coords[tuple(pos)] = self._coords[tuple(obj.pos)]
self._coords[tuple(obj.pos)] = 0
obj.pos = pos
class Textures:
def __init__(self, directory):
self._originals = {}
self._textures = {}
for filename in pathlib.Path(directory).glob('*.png'):
image = imageio.imread(filename.read_bytes())
image = image.transpose((1, 0) + tuple(range(2, len(image.shape))))
self._originals[filename.stem] = image
self._textures[(filename.stem, image.shape[:2])] = image
def get(self, name, size):
size = int(size[0]), int(size[1])
key = name, size
if key not in self._textures:
image = self._originals[name]
image = Image.fromarray(image)
image = image.resize(size[::-1], resample=Image.NEAREST)
image = np.array(image)
self._textures[key] = image
return self._textures[key]
class GlobalView:
pass
class UncoverView:
pass
class LocalView:
def __init__(self, world, textures, unit, grid):
self._world = world
self._textures = textures
self._unit = np.array(unit)
self._grid = np.array(grid)
self._offset = self._grid // 2
self._area = np.array(self._world.area)
self._center = None
def __call__(self, player):
self._center = np.array(player.pos)
canvas = np.zeros(tuple(self._grid * self._unit) + (3,), np.uint8) + 127
for x in range(self._grid[0]):
for y in range(self._grid[1]):
pos = self._center + np.array([x, y]) - self._offset
if not _inside((0, 0), pos, self._area):
continue
texture = self._textures.get(self._world[pos][0], self._unit)
_draw(canvas, np.array([x, y]) * self._unit, texture)
for obj in self._world.objects:
pos = obj.pos - self._center + self._offset
if not _inside((0, 0), pos, self._grid):
continue
texture = self._textures.get(obj.texture, self._unit)
_draw_alpha(canvas, pos * self._unit, texture)
return canvas
class ItemView:
def __init__(self, textures, unit, grid):
self._textures = textures
self._unit = np.array(unit)
self._grid = np.array(grid)
def __call__(self, inventory):
canvas = np.zeros(tuple(self._grid * self._unit) + (3,), np.uint8)
for index, (item, amount) in enumerate(inventory.items()):
if amount < 1:
continue
self._item(canvas, index, item)
self._amount(canvas, index, amount)
return canvas
def _item(self, canvas, index, item):
pos = index % self._grid[0], index // self._grid[0]
pos = (pos * self._unit + 0.1 * self._unit).astype(np.int32)
texture = self._textures.get(item, 0.8 * self._unit)
_draw_alpha(canvas, pos, texture)
def _amount(self, canvas, index, amount):
pos = index % self._grid[0], index // self._grid[0]
pos = (pos * self._unit + 0.4 * self._unit).astype(np.int32)
if amount == float('inf'):
text = 'infinity'
elif amount in (1, 2, 3, 4, 5):
text = str(amount)
else:
text = 'unknown'
texture = self._textures.get(text, 0.6 * self._unit)
_draw_alpha(canvas, pos, texture)
def _inside(lhs, mid, rhs):
return (lhs[0] <= mid[0] < rhs[0]) and (lhs[1] <= mid[1] < rhs[1])
def _draw(canvas, pos, texture):
(x, y), (w, h) = pos, texture.shape[:2]
if texture.shape[-1] == 4:
texture = texture[..., :3]
canvas[x: x + w, y: y + h] = texture
def _draw_alpha(canvas, pos, texture):
(x, y), (w, h) = pos, texture.shape[:2]
if texture.shape[-1] == 4:
alpha = texture[..., 3:].astype(np.float32) / 255
texture = texture[..., :3].astype(np.float32) / 255
current = canvas[x: x + w, y: y + h].astype(np.float32) / 255
blended = alpha * texture + (1 - alpha) * current
texture = (255 * blended).astype(np.uint8)
canvas[x: x + w, y: y + h] = texture
| [
"numpy.zeros",
"numpy.random.RandomState",
"pathlib.Path",
"numpy.array",
"PIL.Image.fromarray"
] | [((362, 386), 'numpy.zeros', 'np.zeros', (['area', 'np.uint8'], {}), '(area, np.uint8)\n', (370, 386), True, 'import numpy as np\n'), ((505, 530), 'numpy.zeros', 'np.zeros', (['area', 'np.uint32'], {}), '(area, np.uint32)\n', (513, 530), True, 'import numpy as np\n'), ((550, 577), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (571, 577), True, 'import numpy as np\n'), ((895, 922), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (916, 922), True, 'import numpy as np\n'), ((2157, 2174), 'numpy.array', 'np.array', (['obj.pos'], {}), '(obj.pos)\n', (2165, 2174), True, 'import numpy as np\n'), ((2452, 2465), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (2460, 2465), True, 'import numpy as np\n'), ((3560, 3574), 'numpy.array', 'np.array', (['unit'], {}), '(unit)\n', (3568, 3574), True, 'import numpy as np\n'), ((3592, 3606), 'numpy.array', 'np.array', (['grid'], {}), '(grid)\n', (3600, 3606), True, 'import numpy as np\n'), ((3659, 3685), 'numpy.array', 'np.array', (['self._world.area'], {}), '(self._world.area)\n', (3667, 3685), True, 'import numpy as np\n'), ((3760, 3780), 'numpy.array', 'np.array', (['player.pos'], {}), '(player.pos)\n', (3768, 3780), True, 'import numpy as np\n'), ((4582, 4596), 'numpy.array', 'np.array', (['unit'], {}), '(unit)\n', (4590, 4596), True, 'import numpy as np\n'), ((4614, 4628), 'numpy.array', 'np.array', (['grid'], {}), '(grid)\n', (4622, 4628), True, 'import numpy as np\n'), ((3180, 3202), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (3195, 3202), False, 'from PIL import Image\n'), ((3280, 3295), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3288, 3295), True, 'import numpy as np\n'), ((2734, 2757), 'pathlib.Path', 'pathlib.Path', (['directory'], {}), '(directory)\n', (2746, 2757), False, 'import pathlib\n'), ((3959, 3975), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (3967, 3975), True, 'import numpy as np\n'), ((4151, 4167), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (4159, 4167), True, 'import numpy as np\n')] |
import logging
from collections import defaultdict
from copy import deepcopy
from typing import Dict, List, Optional, Union, Generator
from uuid import uuid4
import time
import numpy as np
from scipy.spatial.distance import cosine
from tqdm import tqdm
from haystack import Document, Label
from haystack.document_store.base import BaseDocumentStore, DuplicateDocumentError
from haystack.retriever.base import BaseRetriever
from haystack.utils import get_batches_from_generator
logger = logging.getLogger(__name__)
class InMemoryDocumentStore(BaseDocumentStore):
"""
In-memory document store
"""
def __init__(
self,
index: str = "document",
label_index: str = "label",
embedding_field: Optional[str] = "embedding",
embedding_dim: int = 768,
return_embedding: bool = False,
similarity: str = "dot_product",
progress_bar: bool = True,
duplicate_documents: str = 'overwrite',
):
"""
:param index: The documents are scoped to an index attribute that can be used when writing, querying,
or deleting documents. This parameter sets the default value for document index.
:param label_index: The default value of index attribute for the labels.
:param embedding_field: Name of field containing an embedding vector (Only needed when using a dense retriever (e.g. DensePassageRetriever, EmbeddingRetriever) on top)
:param embedding_dim: The size of the embedding vector.
:param return_embedding: To return document embedding
:param similarity: The similarity function used to compare document vectors. 'dot_product' is the default sine it is
more performant with DPR embeddings. 'cosine' is recommended if you are using a Sentence BERT model.
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
:param duplicate_documents: Handle duplicates document based on parameter options.
Parameter options : ( 'skip','overwrite','fail')
skip: Ignore the duplicates documents
overwrite: Update any existing documents with the same ID when adding documents.
fail: an error is raised if the document ID of the document being added already
exists.
"""
# save init parameters to enable export of component config as YAML
self.set_config(
index=index, label_index=label_index, embedding_field=embedding_field, embedding_dim=embedding_dim,
return_embedding=return_embedding, similarity=similarity, progress_bar=progress_bar,
duplicate_documents=duplicate_documents,
)
self.indexes: Dict[str, Dict] = defaultdict(dict)
self.index: str = index
self.label_index: str = label_index
self.embedding_field = embedding_field
self.embedding_dim = embedding_dim
self.return_embedding = return_embedding
self.similarity = similarity
self.progress_bar = progress_bar
self.duplicate_documents = duplicate_documents
def write_documents(self, documents: Union[List[dict], List[Document]], index: Optional[str] = None, # type: ignore
duplicate_documents: Optional[str] = None):
"""
Indexes documents for later queries.
:param documents: a list of Python dictionaries or a list of Haystack Document objects.
For documents as dictionaries, the format is {"text": "<the-actual-text>"}.
Optionally: Include meta data via {"text": "<the-actual-text>",
"meta": {"name": "<some-document-name>, "author": "somebody", ...}}
It can be used for filtering and is accessible in the responses of the Finder.
:param index: write documents to a custom namespace. For instance, documents for evaluation can be indexed in a
separate index than the documents for search.
:param duplicate_documents: Handle duplicates document based on parameter options.
Parameter options : ( 'skip','overwrite','fail')
skip: Ignore the duplicates documents
overwrite: Update any existing documents with the same ID when adding documents.
fail: an error is raised if the document ID of the document being added already
exists.
:raises DuplicateDocumentError: Exception trigger on duplicate document
:return: None
"""
index = index or self.index
duplicate_documents = duplicate_documents or self.duplicate_documents
assert duplicate_documents in self.duplicate_documents_options, \
f"duplicate_documents parameter must be {', '.join(self.duplicate_documents_options)}"
field_map = self._create_document_field_map()
documents = deepcopy(documents)
documents_objects = [Document.from_dict(d, field_map=field_map) if isinstance(d, dict) else d for d in
documents]
for document in documents_objects:
if document.id in self.indexes[index]:
if duplicate_documents == "fail":
raise DuplicateDocumentError(f"Document with id '{document.id} already "
f"exists in index '{index}'")
elif duplicate_documents == "skip":
logger.warning(f"Duplicate Documents: Document with id '{document.id} already exists in index "
f"'{index}'")
continue
self.indexes[index][document.id] = document
def _create_document_field_map(self):
return {
self.embedding_field: "embedding",
}
def write_labels(self, labels: Union[List[dict], List[Label]], index: Optional[str] = None):
"""Write annotation labels into document store."""
index = index or self.label_index
label_objects = [Label.from_dict(l) if isinstance(l, dict) else l for l in labels]
duplicate_ids: list = [label.id for label in self._get_duplicate_labels(label_objects, index=index)]
if len(duplicate_ids) > 0:
logger.warning(f"Duplicate Label IDs: Inserting a Label whose id already exists in this document store."
f" This will overwrite the old Label. Please make sure Label.id is a unique identifier of"
f" the answer annotation and not the question."
f" Problematic ids: {','.join(duplicate_ids)}")
for label in label_objects:
# create timestamps if not available yet
if not label.created_at:
label.created_at = time.strftime("%Y-%m-%d %H:%M:%S")
if not label.updated_at:
label.updated_at = label.created_at
self.indexes[index][label.id] = label
def get_document_by_id(self, id: str, index: Optional[str] = None) -> Optional[Document]:
"""Fetch a document by specifying its text id string"""
index = index or self.index
documents = self.get_documents_by_id([id], index=index)
if documents:
return documents[0]
else:
return None
def get_documents_by_id(self, ids: List[str], index: Optional[str] = None) -> List[Document]: # type: ignore
"""Fetch documents by specifying a list of text id strings"""
index = index or self.index
documents = [self.indexes[index][id] for id in ids]
return documents
def query_by_embedding(self,
query_emb: np.ndarray,
filters: Optional[Dict[str, List[str]]] = None,
top_k: int = 10,
index: Optional[str] = None,
return_embedding: Optional[bool] = None) -> List[Document]:
"""
Find the document that is most similar to the provided `query_emb` by using a vector similarity metric.
:param query_emb: Embedding of the query (e.g. gathered from DPR)
:param filters: Optional filters to narrow down the search space.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param top_k: How many documents to return
:param index: Index name for storing the docs and metadata
:param return_embedding: To return document embedding
:return:
"""
from numpy import dot
from numpy.linalg import norm
index = index or self.index
if return_embedding is None:
return_embedding = self.return_embedding
if query_emb is None:
return []
document_to_search = self.get_all_documents(index=index, filters=filters, return_embedding=True)
candidate_docs = []
for doc in document_to_search:
curr_meta = deepcopy(doc.meta)
new_document = Document(
id=doc.id,
text=doc.text,
meta=curr_meta,
embedding=doc.embedding
)
new_document.embedding = doc.embedding if return_embedding is True else None
if self.similarity == "dot_product":
score = dot(query_emb, doc.embedding) / (
norm(query_emb) * norm(doc.embedding)
)
elif self.similarity == "cosine":
# cosine similarity score = 1 - cosine distance
score = 1 - cosine(query_emb, doc.embedding)
new_document.score = (score + 1) / 2
candidate_docs.append(new_document)
return sorted(candidate_docs, key=lambda x: x.score if x.score is not None else 0.0, reverse=True)[0:top_k]
def update_embeddings(
self,
retriever: BaseRetriever,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
update_existing_embeddings: bool = True,
batch_size: int = 10_000,
):
"""
Updates the embeddings in the the document store using the encoding model specified in the retriever.
This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config).
:param retriever: Retriever to use to get embeddings for text
:param index: Index name for which embeddings are to be updated. If set to None, the default self.index is used.
:param update_existing_embeddings: Whether to update existing embeddings of the documents. If set to False,
only documents without embeddings are processed. This mode can be used for
incremental updating of embeddings, wherein, only newly indexed documents
get processed.
:param filters: Optional filters to narrow down the documents for which embeddings are to be updated.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
:return: None
"""
if index is None:
index = self.index
if not self.embedding_field:
raise RuntimeError("Specify the arg embedding_field when initializing InMemoryDocumentStore()")
# TODO Index embeddings every X batches to avoid OOM for huge document collections
result = self._query(
index=index, filters=filters, only_documents_without_embedding=not update_existing_embeddings
)
document_count = len(result)
logger.info(f"Updating embeddings for {document_count} docs ...")
batched_documents = get_batches_from_generator(result, batch_size)
with tqdm(total=document_count, disable=not self.progress_bar, position=0, unit=" docs",
desc="Updating Embedding") as progress_bar:
for document_batch in batched_documents:
embeddings = retriever.embed_passages(document_batch) # type: ignore
assert len(document_batch) == len(embeddings)
if embeddings[0].shape[0] != self.embedding_dim:
raise RuntimeError(f"Embedding dim. of model ({embeddings[0].shape[0]})"
f" doesn't match embedding dim. in DocumentStore ({self.embedding_dim})."
"Specify the arg `embedding_dim` when initializing InMemoryDocumentStore()")
for doc, emb in zip(document_batch, embeddings):
self.indexes[index][doc.id].embedding = emb
progress_bar.set_description_str("Documents Processed")
progress_bar.update(batch_size)
def get_document_count(self, filters: Optional[Dict[str, List[str]]] = None, index: Optional[str] = None) -> int:
"""
Return the number of documents in the document store.
"""
documents = self.get_all_documents(index=index, filters=filters)
return len(documents)
def get_embedding_count(self, filters: Optional[Dict[str, List[str]]] = None, index: Optional[str] = None) -> int:
"""
Return the count of embeddings in the document store.
"""
documents = self.get_all_documents(filters=filters, index=index)
embedding_count = sum(doc.embedding is not None for doc in documents)
return embedding_count
def get_label_count(self, index: Optional[str] = None) -> int:
"""
Return the number of labels in the document store
"""
index = index or self.label_index
return len(self.indexes[index].items())
def _query(
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
return_embedding: Optional[bool] = None,
only_documents_without_embedding: bool = False,
batch_size: int = 10_000,
):
index = index or self.index
documents = deepcopy(list(self.indexes[index].values()))
filtered_documents = []
if return_embedding is None:
return_embedding = self.return_embedding
if return_embedding is False:
for doc in documents:
doc.embedding = None
if only_documents_without_embedding:
documents = [doc for doc in documents if doc.embedding is None]
if filters:
for doc in documents:
is_hit = True
for key, values in filters.items():
if doc.meta.get(key):
if doc.meta[key] not in values:
is_hit = False
else:
is_hit = False
if is_hit:
filtered_documents.append(doc)
else:
filtered_documents = documents
return filtered_documents
def get_all_documents(
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) -> List[Document]:
result = self.get_all_documents_generator(index=index, filters=filters, return_embedding=return_embedding)
documents = list(result)
return documents
def get_all_documents_generator(
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) -> Generator[Document, None, None]:
"""
Get all documents from the document store. The methods returns a Python Generator that yields individual
documents.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
"""
result = self._query(
index=index,
filters=filters,
return_embedding=return_embedding,
batch_size=batch_size
)
yield from result
def get_all_labels(self, index: str = None, filters: Optional[Dict[str, List[str]]] = None) -> List[Label]:
"""
Return all labels in the document store
"""
index = index or self.label_index
if filters:
result = []
for label in self.indexes[index].values():
label_dict = label.to_dict()
is_hit = True
for key, values in filters.items():
if label_dict[key] not in values:
is_hit = False
break
if is_hit:
result.append(label)
else:
result = list(self.indexes[index].values())
return result
def delete_all_documents(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None):
"""
Delete documents in an index. All documents are deleted if no filters are passed.
:param index: Index name to delete the document from.
:param filters: Optional filters to narrow down the documents to be deleted.
:return: None
"""
logger.warning(
"""DEPRECATION WARNINGS:
1. delete_all_documents() method is deprecated, please use delete_documents method
For more details, please refer to the issue: https://github.com/deepset-ai/haystack/issues/1045
"""
)
self.delete_documents(index, filters)
def delete_documents(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None):
"""
Delete documents in an index. All documents are deleted if no filters are passed.
:param index: Index name to delete the document from.
:param filters: Optional filters to narrow down the documents to be deleted.
:return: None
"""
if filters:
raise NotImplementedError("Delete by filters is not implemented for InMemoryDocumentStore.")
index = index or self.index
self.indexes[index] = {}
| [
"copy.deepcopy",
"tqdm.tqdm",
"haystack.Label.from_dict",
"scipy.spatial.distance.cosine",
"time.strftime",
"logging.getLogger",
"haystack.document_store.base.DuplicateDocumentError",
"collections.defaultdict",
"haystack.Document.from_dict",
"haystack.utils.get_batches_from_generator",
"numpy.li... | [((489, 516), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (506, 516), False, 'import logging\n'), ((2974, 2991), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (2985, 2991), False, 'from collections import defaultdict\n'), ((5266, 5285), 'copy.deepcopy', 'deepcopy', (['documents'], {}), '(documents)\n', (5274, 5285), False, 'from copy import deepcopy\n'), ((12242, 12288), 'haystack.utils.get_batches_from_generator', 'get_batches_from_generator', (['result', 'batch_size'], {}), '(result, batch_size)\n', (12268, 12288), False, 'from haystack.utils import get_batches_from_generator\n'), ((9347, 9365), 'copy.deepcopy', 'deepcopy', (['doc.meta'], {}), '(doc.meta)\n', (9355, 9365), False, 'from copy import deepcopy\n'), ((9393, 9468), 'haystack.Document', 'Document', ([], {'id': 'doc.id', 'text': 'doc.text', 'meta': 'curr_meta', 'embedding': 'doc.embedding'}), '(id=doc.id, text=doc.text, meta=curr_meta, embedding=doc.embedding)\n', (9401, 9468), False, 'from haystack import Document, Label\n'), ((12302, 12417), 'tqdm.tqdm', 'tqdm', ([], {'total': 'document_count', 'disable': '(not self.progress_bar)', 'position': '(0)', 'unit': '""" docs"""', 'desc': '"""Updating Embedding"""'}), "(total=document_count, disable=not self.progress_bar, position=0, unit=\n ' docs', desc='Updating Embedding')\n", (12306, 12417), False, 'from tqdm import tqdm\n'), ((5315, 5357), 'haystack.Document.from_dict', 'Document.from_dict', (['d'], {'field_map': 'field_map'}), '(d, field_map=field_map)\n', (5333, 5357), False, 'from haystack import Document, Label\n'), ((6397, 6415), 'haystack.Label.from_dict', 'Label.from_dict', (['l'], {}), '(l)\n', (6412, 6415), False, 'from haystack import Document, Label\n'), ((7155, 7189), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (7168, 7189), False, 'import time\n'), ((5608, 5705), 'haystack.document_store.base.DuplicateDocumentError', 'DuplicateDocumentError', (['f"""Document with id \'{document.id} already exists in index \'{index}\'"""'], {}), '(\n f"Document with id \'{document.id} already exists in index \'{index}\'")\n', (5630, 5705), False, 'from haystack.document_store.base import BaseDocumentStore, DuplicateDocumentError\n'), ((9710, 9739), 'numpy.dot', 'dot', (['query_emb', 'doc.embedding'], {}), '(query_emb, doc.embedding)\n', (9713, 9739), False, 'from numpy import dot\n'), ((9764, 9779), 'numpy.linalg.norm', 'norm', (['query_emb'], {}), '(query_emb)\n', (9768, 9779), False, 'from numpy.linalg import norm\n'), ((9782, 9801), 'numpy.linalg.norm', 'norm', (['doc.embedding'], {}), '(doc.embedding)\n', (9786, 9801), False, 'from numpy.linalg import norm\n'), ((9958, 9990), 'scipy.spatial.distance.cosine', 'cosine', (['query_emb', 'doc.embedding'], {}), '(query_emb, doc.embedding)\n', (9964, 9990), False, 'from scipy.spatial.distance import cosine\n')] |
# load sentera csv format
import csv
import fileinput
import math
import numpy as np
import os
import re
import navpy
d2r = math.pi / 180.0
g = 9.81
# empty class we'll fill in with data members
class Record: pass
def isFloat(string):
try:
float(string)
return True
except ValueError:
return False
def load(flight_dir):
result = {}
gps_data = []
filter_data = []
# load imu/gps data files
imu_file = os.path.join(flight_dir, "imu.csv")
gps_file = os.path.join(flight_dir, "gps.csv")
filter_post = os.path.join(flight_dir, "filter-post-ins.txt")
# calibration by plotting and eye-balling (just finding center point, no
# normalization cooked into calibration.)
#hx_coeffs = np.array([ 1.0, -1.5], dtype=np.float64)
#hy_coeffs = np.array([ 1.0, -78.5], dtype=np.float64)
#hz_coeffs = np.array([ 1.0, -156.5], dtype=np.float64)
#~/Projects/PILLS/Phantom\ 3\ Flight\ Data/2016-03-22\ --\ imagery_0012\ -\ 400\ ft\ survey
#hx_coeffs = np.array([ 0.01857771, -0.18006661], dtype=np.float64)
#hy_coeffs = np.array([ 0.01856938, -1.20854406], dtype=np.float64)
#hz_coeffs = np.array([ 0.01559645, 2.81011976], dtype=np.float64)
# ~/Projects/PILLS/Phantom\ 3\ Flight\ Data/imagery_0009 - 0012
#hx_coeffs = np.array([ 0.01789447, 3.70605872], dtype=np.float64)
#hy_coeffs = np.array([ 0.017071, 0.7125617], dtype=np.float64)
#hz_coeffs = np.array([ 0.01447557, -6.54621951], dtype=np.float64)
# ~/Projects/PILLS/2016-04-04\ --\ imagery_0002
# ~/Projects/PILLS/2016-04-14\ --\ imagery_0003
# ~/Projects/PILLS/2016-04-14\ --\ imagery_0004
#hx_coeffs = np.array([ 0.01658555, -0.07790598], dtype=np.float64)
#hy_coeffs = np.array([ 0.01880532, -1.26548151], dtype=np.float64)
#hz_coeffs = np.array([ 0.01339084, 2.61905809], dtype=np.float64)
# ~/Projects/PILLS/2016-05-12\ --\ imagery_0004
#hx_coeffs = np.array([ 0.01925678, 0.01527908], dtype=np.float64)
#hy_coeffs = np.array([ 0.01890112, -1.18040666], dtype=np.float64)
#hz_coeffs = np.array([ 0.01645011, 2.87769626], dtype=np.float64)
#hx_func = np.poly1d(hx_coeffs)
#hy_func = np.poly1d(hy_coeffs)
#hz_func = np.poly1d(hz_coeffs)
# ~/Projects/PILLS/2016-06-29\ --\ calibration_0002/
# mag_affine = np.array(
# [[ 0.0223062041, -0.0002700799, -0.0001325525, 1.2016235718],
# [-0.0002700799, 0.0229484854, 0.0000356172, 0.1177744077],
# [-0.0001325525, 0.0000356172, 0.0206129279, -3.2713740483],
# [ 0. , 0. , 0. , 1. ]]
# )
# Phantom 3 - Aug 2016 (ellipse cal)
# mag_affine = np.array(
# [[ 0.0189725067, 0.0000203615, 0.0002139272, -0.0134053645],
# [ 0.0000760692, 0.0180178765, 0.0000389461, -1.044762755 ],
# [ 0.0002417847, 0.0000458039, 0.0171450614, 2.647911793 ],
# [ 0. , 0. , 0. , 1. ]]
# )
# Phantom 3 - Aug 2016 (ekf cal)
# mag_affine = np.array(
# [[ 0.0181297161, 0.000774339, -0.002037224 , -0.2576406372],
# [ 0.0002434548, 0.018469032, 0.0016475328, -0.8452362072],
# [ 0.0000145964, 0.000267444, 0.0159433791, 2.5630653789],
# [ 0. , 0. , 0. , 1. ]]
# )
# 2017-06-07_23-43-50
mag_affine = np.array(
[[ 0.0182094965, 0.0001891445, 0.0005079058, -1.0275778093],
[ 0.0001891445, 0.0188836673, 0.0003014306, -0.7472003813],
[ 0.0005079058, 0.0003014306, 0.0176589615, 0.9988130618],
[ 0. , 0. , 0. , 1. ]]
)
print(mag_affine)
result['imu'] = []
with open(imu_file, 'r') as fimu:
reader = csv.DictReader(fimu)
for row in reader:
# print(row)
imu = Record()
try:
imu.time = float(row['Time Stamp (ns since boot)']) / 1000000000.0
imu.p = float(row['xGyro (rad/s)'])
imu.q = float(row['yGyro (rad/s)'])
imu.r = float(row['zGyro (rad/s)'])
imu.ax = float(row[' xAccel (g)'])*g
imu.ay = float(row[' yAccel (g)'])*g
imu.az = float(row[' zAccel (g)'])*g
imu.hx = float(row[' xMag (uT)']) # not logged
imu.hy = float(row[' xMag (uT)']) # not logged
imu.hz = float(row[' xMag (uT)']) # not logged
temp = row[' Temp (C)']
if temp != 'N/A':
imu.temp = float(row[' Temp (C)']) # not logged
else:
imu.temp = 0.0
result['imu'].append( imu )
except:
print('[IMU] failed to parse incomplete row:', row)
result['gps'] = []
with open(gps_file, 'r') as fgps:
reader = csv.DictReader(fgps)
for row in reader:
#print(row)
gps = Record()
try:
gps.time = float(row['Timestamp (ns since boot)']) / 1000000000.0
gps.unix_sec = gps.time # hack
#gps.lat = float(row['Lat (deg)'])
#gps.lon = float(row['Lon (deg)'])
#gps.alt = float(row['Alt Geoid EGM 96 (m)'])
ecefx = float(row['ecefX (cm)'])
ecefy = float(row['ecefY (cm)'])
ecefz = float(row['ecefZ (cm)'])
ecefvx = float(row['ecefVX (cm/s)'])
ecefvy = float(row['ecefVY (cm/s)'])
ecefvz = float(row['ecefVZ (cm/s)'])
gps.sats = int(row['Num SVs Used'])
# wgs84 position
pos_source = 'llh' # 'llh' or 'ecef'
llh = navpy.ecef2lla([float(ecefx)/100.0,
float(ecefy)/100.0,
float(ecefz)/100.0], "deg")
gps.lat = llh[0]
gps.lon = llh[1]
gps.alt = llh[2]
# velocity
ned = navpy.ecef2ned([float(ecefvx)/100.0,
float(ecefvy)/100.0,
float(ecefvz)/100.0],
llh[0], llh[1], llh[2])
gps.vn = ned[0]
gps.ve = ned[1]
gps.vd = ned[2]
if int(row['Fix Type']) == 3:
result['gps'].append(gps)
except:
print('[GPS] failed to parse incomplete row:', row)
result['filter'] = []
# load filter (post process) records if they exist (for comparison
# purposes)
if os.path.exists(filter_post):
print('found filter-post-ins.txt, using that for ekf results')
result['filter'] = []
ffilter = fileinput.input(filter_post)
for line in ffilter:
tokens = re.split('[,\s]+', line.rstrip())
lat = float(tokens[1])
lon = float(tokens[2])
if abs(lat) > 0.0001 and abs(lon) > 0.0001:
filterpt = Record()
filterpt.time = float(tokens[0])
filterpt.lat = lat*d2r
filterpt.lon = lon*d2r
filterpt.alt = float(tokens[3])
filterpt.vn = float(tokens[4])
filterpt.ve = float(tokens[5])
filterpt.vd = float(tokens[6])
filterpt.phi = float(tokens[7])*d2r
filterpt.the = float(tokens[8])*d2r
psi = float(tokens[9])
if psi > 180.0:
psi = psi - 360.0
if psi < -180.0:
psi = psi + 360.0
filterpt.psi = psi*d2r
result['filter'].append(filterpt)
return result
def save_filter_result(filename, data_store):
f = open(filename, 'w')
size = len(data_store.time)
for i in range(size):
line = "%.3f,%.10f,%.10f,%.2f,%.4f,%.4f,%.4f,%.2f,%.2f,%.2f,0" % \
(data_store.time[i],
data_store.lat[i]*180.0/math.pi,
data_store.lon[i]*180.0/math.pi,
data_store.alt[i], data_store.vn[i],
data_store.ve[i], data_store.vd[i],
data_store.phi[i]*180.0/math.pi,
data_store.the[i]*180.0/math.pi,
data_store.psi[i]*180.0/math.pi)
f.write(line + '\n')
f.close()
def rewrite_image_metadata_txt(base_dir, data_store):
meta_file = os.path.join(base_dir, 'image-metadata.txt')
new_file = os.path.join(base_dir, 'image-metadata-ekf.txt')
if not os.path.isfile(meta_file):
return
f_out = open(new_file, 'w')
f_out.write('File Name,Lat (decimal degrees),Lon (decimal degrees),Alt (meters MSL),Yaw (decimal degrees),Pitch (decimal degrees),Roll (decimal degrees),GPS Time (us since epoch)\n')
i = 0
for line in fileinput.input(meta_file):
if fileinput.isfirstline():
continue
tokens = line.split(',')
image = tokens[0]
(lat, lon, alt, psi, the, phi, time_orig) = map(float, tokens[1:])
time_sec = time_orig / 1000000.0 # convert seconds
while data_store.time[i] < time_sec:
i += 1
line = "%s,%.8f,%.8f,%.4f,%.4f,%.4f,%.4f,%.0f" % \
(image,
data_store.nav_lat[i]*180.0/math.pi,
data_store.nav_lon[i]*180.0/math.pi,
data_store.nav_alt[i],
data_store.psi[i]*180.0/math.pi,
data_store.the[i]*180.0/math.pi,
data_store.phi[i]*180.0/math.pi,
time_orig)
f_out.write(line + '\n');
f_out.close()
def rewrite_pix4d_csv(base_dir, data_store):
meta_file = os.path.join(base_dir, 'image-metadata.txt')
pix4d_file = os.path.join(base_dir, 'pix4d-ekf.csv')
if not os.path.isfile(meta_file):
return
f_out = open(pix4d_file, 'w')
f_out.write('File Name,Lat (decimal degrees),Lon (decimal degrees),Alt (meters MSL),Roll (decimal degrees),Pitch (decimal degrees),Yaw (decimal degrees)\n')
i = 0
for line in fileinput.input(meta_file):
if fileinput.isfirstline():
continue
tokens = line.split(',')
image = tokens[0]
(lat, lon, alt, psi, the, phi, time) = map(float, tokens[1:8])
time /= 1000000.0 # convert seconds
while data_store.time[i] < time:
print(i, data_store.time[i], '<', time)
i += 1
line = "%s,%.8f,%.8f,%.4f,%.4f,%.4f,%.4f" % \
(image,
data_store.nav_lat[i]*180.0/math.pi,
data_store.nav_lon[i]*180.0/math.pi,
data_store.nav_alt[i],
data_store.phi[i]*180.0/math.pi,
data_store.the[i]*180.0/math.pi,
data_store.psi[i]*180.0/math.pi)
f_out.write(line + '\n');
f_out.close()
| [
"fileinput.isfirstline",
"fileinput.input",
"csv.DictReader",
"os.path.exists",
"os.path.isfile",
"numpy.array",
"os.path.join"
] | [((468, 503), 'os.path.join', 'os.path.join', (['flight_dir', '"""imu.csv"""'], {}), "(flight_dir, 'imu.csv')\n", (480, 503), False, 'import os\n'), ((519, 554), 'os.path.join', 'os.path.join', (['flight_dir', '"""gps.csv"""'], {}), "(flight_dir, 'gps.csv')\n", (531, 554), False, 'import os\n'), ((573, 620), 'os.path.join', 'os.path.join', (['flight_dir', '"""filter-post-ins.txt"""'], {}), "(flight_dir, 'filter-post-ins.txt')\n", (585, 620), False, 'import os\n'), ((3451, 3668), 'numpy.array', 'np.array', (['[[0.0182094965, 0.0001891445, 0.0005079058, -1.0275778093], [0.0001891445, \n 0.0188836673, 0.0003014306, -0.7472003813], [0.0005079058, 0.0003014306,\n 0.0176589615, 0.9988130618], [0.0, 0.0, 0.0, 1.0]]'], {}), '([[0.0182094965, 0.0001891445, 0.0005079058, -1.0275778093], [\n 0.0001891445, 0.0188836673, 0.0003014306, -0.7472003813], [0.0005079058,\n 0.0003014306, 0.0176589615, 0.9988130618], [0.0, 0.0, 0.0, 1.0]])\n', (3459, 3668), True, 'import numpy as np\n'), ((6766, 6793), 'os.path.exists', 'os.path.exists', (['filter_post'], {}), '(filter_post)\n', (6780, 6793), False, 'import os\n'), ((8605, 8649), 'os.path.join', 'os.path.join', (['base_dir', '"""image-metadata.txt"""'], {}), "(base_dir, 'image-metadata.txt')\n", (8617, 8649), False, 'import os\n'), ((8665, 8713), 'os.path.join', 'os.path.join', (['base_dir', '"""image-metadata-ekf.txt"""'], {}), "(base_dir, 'image-metadata-ekf.txt')\n", (8677, 8713), False, 'import os\n'), ((9019, 9045), 'fileinput.input', 'fileinput.input', (['meta_file'], {}), '(meta_file)\n', (9034, 9045), False, 'import fileinput\n'), ((9882, 9926), 'os.path.join', 'os.path.join', (['base_dir', '"""image-metadata.txt"""'], {}), "(base_dir, 'image-metadata.txt')\n", (9894, 9926), False, 'import os\n'), ((9944, 9983), 'os.path.join', 'os.path.join', (['base_dir', '"""pix4d-ekf.csv"""'], {}), "(base_dir, 'pix4d-ekf.csv')\n", (9956, 9983), False, 'import os\n'), ((10265, 10291), 'fileinput.input', 'fileinput.input', (['meta_file'], {}), '(meta_file)\n', (10280, 10291), False, 'import fileinput\n'), ((3852, 3872), 'csv.DictReader', 'csv.DictReader', (['fimu'], {}), '(fimu)\n', (3866, 3872), False, 'import csv\n'), ((4967, 4987), 'csv.DictReader', 'csv.DictReader', (['fgps'], {}), '(fgps)\n', (4981, 4987), False, 'import csv\n'), ((6914, 6942), 'fileinput.input', 'fileinput.input', (['filter_post'], {}), '(filter_post)\n', (6929, 6942), False, 'import fileinput\n'), ((8726, 8751), 'os.path.isfile', 'os.path.isfile', (['meta_file'], {}), '(meta_file)\n', (8740, 8751), False, 'import os\n'), ((9058, 9081), 'fileinput.isfirstline', 'fileinput.isfirstline', ([], {}), '()\n', (9079, 9081), False, 'import fileinput\n'), ((9996, 10021), 'os.path.isfile', 'os.path.isfile', (['meta_file'], {}), '(meta_file)\n', (10010, 10021), False, 'import os\n'), ((10304, 10327), 'fileinput.isfirstline', 'fileinput.isfirstline', ([], {}), '()\n', (10325, 10327), False, 'import fileinput\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""See primitives_test docstring for how the Jax2TfLimitations are used"""
import itertools
import numpy as np
from typing import Any, Callable, Optional, Sequence
from jax import dtypes
from jax import lax
from jax import numpy as jnp
from jax.experimental.jax2tf.tests import primitive_harness
DType = Any
class Jax2TfLimitation(primitive_harness.Limitation):
"""Specific primitive limitations for jax2tf.
See the primitive_test module docstring for details.
"""
def __init__(
self,
description: str,
*,
devices: Sequence[str] = ("cpu", "gpu", "tpu"),
dtypes: Sequence[DType] = (),
enabled: bool = True,
# jax2tf specific
modes=("eager", "graph", "compiled"),
skip_tf_run=False,
expect_tf_error: bool = True,
skip_comparison=False,
custom_assert: Optional[Callable] = None,
tol=None):
"""See the primitive_harness.Limitation common arguments.
Args :
modes: one of "eager", "graph", "compiled"
skip_tf_run: if set will skip the TF execution. Use this sparingly,
prefer `expect_tf_error`. Use only when the test cannot recover from
the TF error.
expect_tf_error: if set, then expect a TF error in the given mode when
executing the result of jax2tf conversion. If not set, then the
limitation must have a custom_assert or non-default tol.
skip_comparison: skips the numeric comparison.
tol: a tolerance to use for both atol and rtol. We will use the maximum
tolerance over all the applicable limitations, irrespective of their
order.
custom_assert: if given, then execute as
`custom_assert(tst, result_jax, result_tf, args=args, tol=tol)`, where
`tst` is the current TestCase instance, and args are the input
arguments that the harness created. The `tol` is the maximum tolerance
based on the applicable limitations.
`result_tf` is already converted to NumPy arrays.
"""
super().__init__(
description,
devices=devices,
dtypes=dtypes,
enabled=enabled)
if isinstance(modes, str):
modes = (modes,)
assert all(m in ["eager", "graph", "compiled"] for m in modes)
self.modes = modes
self.expect_tf_error = expect_tf_error
self.skip_tf_run = skip_tf_run
self.custom_assert = custom_assert
self.tol = tol
self.skip_comparison = skip_comparison
def get_max_tolerance_limitation(
self, limitations: Sequence["Jax2TfLimitation"]) -> Optional["Jax2TfLimitation"]:
"""Pick the tolerance limitation that establishes the maximum tolerance"""
# TODO: it would be best if the limitations with tolerance are mutually exclusive
# and we don't have to compute the maximum
# TODO: we made this an instance method only so that we don't have to import
# this module from tf_test.util.
max_tol_lim = None
for l in limitations:
if l.tol is not None:
if max_tol_lim is None or l.tol > max_tol_lim.tol:
max_tol_lim = l
return max_tol_lim
def filter(self, # type: ignore[override]
dtype: Optional[DType] = None,
device: Optional[str] = None,
mode: Optional[str] = None) -> bool:
return ((mode is None or mode in self.modes) and
super().filter(device=device, dtype=dtype))
@classmethod
def limitations_for_harness(
cls, harness: primitive_harness.Harness) -> Sequence["Jax2TfLimitation"]:
group_method = getattr(cls, harness.group_name, None)
if harness.group_name in cls.harness_groups_no_limitations:
assert group_method is None, (
f"Harness group {harness.group_name} is both in "
f"'harness_groups_no_limitations' and has a custom "
f"Jax2TfLimitation.classmethod defined (see module docstring)"
)
return []
else:
assert group_method is not None, (
f"Harness group {harness.group_name} must be either part of "
f"'harness_groups_no_limitations' or must have a custom "
f"Jax2TfLimitation.classmethod defined (see module docstring)"
)
limitations = group_method(harness)
assert isinstance(limitations, (list, tuple))
return limitations
# We keep here the explicit set of groups for which we don't have limitations
harness_groups_no_limitations = {
"abs", "and", "argmin", "argmax", "broadcast", "broadcast_in_dim", "ceil",
"concatenate", "cos", "complex", "conj", "device_put", "dynamic_slice",
"dynamic_update_slice", "exp", "eq", "floor", "log", "gather", "imag",
"iota", "is_finite", "ne", "not", "or", "pad", "random_split",
"reduce_and", "reduce_prod", "reduce_or", "reduce_sum", "real", "reshape",
"select", "shift_left", "shift_right_logical", "shift_right_arithmetic",
"sin", "slice", "sqrt", "squeeze", "stop_gradient", "tie_in", "transpose",
"xor", "zeros_like"
}
@classmethod
def helper_get_trig_custom_limitation(cls, np_inverse):
def custom_assert(tst, result_jax, result_tf, *, args, tol):
operand, = args
tst.assertAllClose(operand, np_inverse(result_tf), atol=tol, rtol=tol)
return custom_numeric(
description="May return different but still correct results",
dtypes=[np.complex64, np.complex128],
custom_assert=custom_assert,
modes=("eager", "graph"))
@classmethod
def acos(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[np.float16, dtypes.bfloat16, np.complex64],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
missing_tf_kernel(
dtypes=[np.complex128],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.complex128, tol=1e-13),
custom_numeric(dtypes=np.complex64, devices="tpu", tol=1e-3),
custom_numeric(dtypes=np.complex64, devices=("cpu", "gpu"), tol=1e-4),
cls.helper_get_trig_custom_limitation(np.cos),
]
@classmethod
def acosh(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16, np.float16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.complex64, devices=("cpu", "gpu"), tol=1e-3),
custom_numeric(dtypes=np.complex128, devices=("cpu", "gpu"), tol=1e-12),
cls.helper_get_trig_custom_limitation(np.cosh)
]
@classmethod
def add(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(dtypes=[np.uint16]),
missing_tf_kernel(dtypes=[np.uint64], devices=("cpu", "gpu"))
]
@classmethod
# Also called add_jaxvals
def add_any(cls, harness: primitive_harness.Harness):
return [missing_tf_kernel(dtypes=[np.uint16, np.uint64])]
@classmethod
def asin(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[np.float16, dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
missing_tf_kernel(dtypes=[np.complex64, np.complex128]),
cls.helper_get_trig_custom_limitation(np.sin)
]
@classmethod
def asinh(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[np.float16, dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.complex64, devices=("cpu", "gpu"), tol=1e-3),
custom_numeric(dtypes=np.complex128, devices=("cpu", "gpu"), tol=1e-12),
cls.helper_get_trig_custom_limitation(np.sinh)
]
@classmethod
def atan(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[np.float16, dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
missing_tf_kernel(dtypes=[np.complex64, np.complex128]),
cls.helper_get_trig_custom_limitation(np.tan)
]
@classmethod
def atanh(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[np.float16, dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.float64, tol=1e-14),
custom_numeric(dtypes=np.complex64, tol=1e-3),
custom_numeric(dtypes=np.complex128, devices=("cpu", "gpu"), tol=1e-12),
cls.helper_get_trig_custom_limitation(np.tanh)
]
@classmethod
def atan2(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[np.float16, dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def bessel_i0e(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def bessel_i1e(cls, harness: primitive_harness.Harness):
return cls.bessel_i0e(harness)
@classmethod
def bitcast_convert_type(cls, harness: primitive_harness.Harness):
return [missing_tf_kernel(dtypes=[np.bool_])]
@classmethod
def cholesky(cls, harness: primitive_harness.Harness):
def custom_assert(tst, result_jax, result_tf, *, tol, **_):
# cholesky_p returns garbage in the strictly upper triangular part of the
# result, so we can safely ignore that part.
tst.assertAllClose(jnp.tril(result_jax), result_tf, atol=tol)
return [
# See https://github.com/google/jax/pull/3775#issuecomment-659407824;
Jax2TfLimitation(
"function not compilable",
dtypes=[np.complex64, np.complex128],
devices=("cpu", "gpu"),
modes="compiled"),
missing_tf_kernel(
# Interesting: on TPU, complex64 works in eager
# mode, but fails otherwise.
dtypes=[np.complex64, np.complex128],
devices="tpu",
modes=("graph", "compiled")),
# TODO(bchetioui): very high discrepancy in the float32/complex64 case
custom_numeric(dtypes=[np.float32, np.complex64], tol=1e-2),
custom_numeric(dtypes=[np.float64, np.complex128], tol=1e-6),
custom_numeric(dtypes=[dtypes.bfloat16, np.float16], tol=5e-2),
custom_numeric(
custom_assert=custom_assert,
description=(
"May return different values in the strictly upper triangular "
"part of the result. This does not matter for correctness, "
"because this part of the matrix is not considered in the result."
))
]
@classmethod
def clamp(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(dtypes=[np.int8, np.uint16, np.uint32, np.uint64])
]
@classmethod
def convert_element_type(cls, harness: primitive_harness.Harness):
return []
@classmethod
def conv_general_dilated(cls, harness: primitive_harness.Harness):
return [
Jax2TfLimitation(
"jax2tf BUG: batch_group_count > 1 not yet converted",
enabled=(harness.params["batch_group_count"] > 1)),
custom_numeric(devices="gpu", tol=1e-4),
custom_numeric(devices="tpu", tol=1e-3),
# TODO(bchetioui): significant discrepancies in some float16 cases.
custom_numeric(dtypes=np.float16, tol=1),
# TODO(bchetioui): slight occasional discrepancy in float32 cases.
custom_numeric(dtypes=np.float32, devices="tpu", tol=0.5),
custom_numeric(dtypes=np.float32, devices="gpu", tol=1e-3),
custom_numeric(dtypes=np.float32, devices="cpu", tol=1e-4),
custom_numeric(dtypes=np.complex64, devices="tpu", tol=0.1),
custom_numeric(dtypes=[np.complex64, np.complex128], devices=("cpu", "gpu"), tol=5e-4),
# TODO(bchetioui): slight discrepancy when going through the path using
# tf.nn.convolution.
custom_numeric(dtypes=np.float64, devices="cpu", tol=1e-13),
]
# TODO(bchetioui): unidentified bug in compiled mode. The test that fails is
#
# test_conv_general_dilated_tf_conversion_path_3d_lhs=float32[1,4,28,28,1]_rhs=float32[2,3,3,1,16]_windowstrides=(1,1,1)_padding=VALID_lhsdilation=(1,1,1)_rhsdilation=(1,1,2)_dimensionnumbers=('NDHWC','DHWIO','NDHWC')_featuregroupcount=1_batchgroupcount=1_precision=None_enablexla=False
#
# with the following assertion error in TensorFlowTrace.process_primitive:
#
# AssertionError: conv_general_dilated: out.aval = ShapedArray(float32[1,3,24,26,16]); expected ShapedArray(float32[1,3,26,24,16])
#
# Deactivating this assertion is enough to pass the test, which suggests
# that the end shape is indeed the correct one (i.e. (1,3,26,24,16)).
# Further investigation is required to really understand this behavior,
# which we have not managed to reproduce as a pure TF test.
#
# This bug is low priority since it only occurs when using a non-TFXLA
# conversion path in compiled mode, i.e. in a context where using the
# TFXLA path is possible.
# if harness.name == "_tf_conversion_path_3d_lhs=float32[1,4,28,28,1]_rhs=float32[2,3,3,1,16]_windowstrides=(1,1,1)_padding=VALID_lhsdilation=(1,1,1)_rhsdilation=(1,1,2)_dimensionnumbers=('NDHWC','DHWIO','NDHWC')_featuregroupcount=1_batchgroupcount=1_precision=None_enablexla=False":
# raise unittest.SkipTest("TODO: known but unidentified bug in compiled "
# "mode")
@classmethod
def cosh(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[np.float16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def cummax(cls, harness):
return [
missing_tf_kernel(
dtypes=[np.uint64, np.complex128],
devices=("cpu", "gpu"),
),
missing_tf_kernel(
dtypes=[np.uint16, np.uint32, np.int8, np.complex64],),
custom_numeric(dtypes=np.float16, tol=0.1),
custom_numeric(dtypes=dtypes.bfloat16, tol=0.5)
]
@classmethod
def cummin(cls, harness):
return [
missing_tf_kernel(
dtypes=[np.uint64, np.complex128],
devices=("cpu", "gpu"),
),
missing_tf_kernel(
dtypes=[np.uint16, np.uint32, np.int8, np.complex64],),
custom_numeric(dtypes=np.float16, tol=0.1),
custom_numeric(dtypes=dtypes.bfloat16, tol=0.5),
]
@classmethod
def cumprod(cls, harness):
return [
missing_tf_kernel(
dtypes=[np.uint64],
devices=("cpu", "gpu"),
),
missing_tf_kernel(dtypes=[np.uint32]),
custom_numeric(dtypes=np.float16, tol=0.1),
custom_numeric(dtypes=dtypes.bfloat16, tol=0.5),
]
@classmethod
def cumsum(cls, harness):
return [
missing_tf_kernel(
dtypes=[np.uint64],
devices=("cpu", "gpu"),
),
missing_tf_kernel(dtypes=[np.complex64], devices="tpu"),
missing_tf_kernel(dtypes=[np.uint16]),
custom_numeric(dtypes=np.float16, tol=0.1),
custom_numeric(dtypes=dtypes.bfloat16, tol=0.5),
]
@classmethod
def custom_linear_solve(cls, harness: primitive_harness.Harness):
return [
Jax2TfLimitation(
"TODO: large numerical discrepancy",
dtypes=np.float32,
devices="tpu",
expect_tf_error=False,
skip_comparison=True),
custom_numeric(dtypes=np.float32, devices="tpu", tol=0.01),
custom_numeric(tol=1e-3),
]
@classmethod
def digamma(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
# In the bfloat16 case, TF and lax both return NaN in undefined cases.
# digamma is not defined at 0 and -1
def custom_assert(tst, result_jax, result_tf, *, args, tol):
# lax.digamma returns NaN and tf.math.digamma returns inf
arg, = args
special_cases = (arg == 0.) | (arg == -1.)
nr_special_cases = np.count_nonzero(special_cases)
tst.assertAllClose(
np.full((nr_special_cases,), dtype(np.nan)),
result_jax[special_cases])
tst.assertAllClose(
np.full((nr_special_cases,), dtype(np.inf)), result_tf[special_cases])
# non-special cases are equal
tst.assertAllClose(
result_jax[~special_cases],
result_tf[~special_cases],
atol=tol,
rtol=tol)
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.float64, tol=1e-13),
custom_numeric(dtypes=np.float32, devices=["cpu", "gpu"], tol=1e-3),
custom_numeric(
dtypes=dtypes.bfloat16,
custom_assert=custom_assert,
description=(
"May return different results at singularity points 0 and -1."
"JAX returns nan and TF returns inf"),
modes=("eager", "graph"))
]
@classmethod
def div(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[
np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16
],),
Jax2TfLimitation(
"TF integer division fails if divisor contains 0; JAX returns NaN",
dtypes=[
np.uint8, np.int8, np.uint16, np.uint32, np.uint64, np.int8,
np.int16, np.int32, np.int64
],
# Only the harnesses with "singularity" will have divide by 0
enabled=("singularity" in harness.name))
]
@classmethod
def dot_general(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[
np.bool_, np.uint8, np.uint16, np.uint32, np.uint64, np.int8,
np.int16
],),
missing_tf_kernel(
dtypes=[np.int64], devices=("cpu", "gpu"), modes="compiled"),
custom_numeric(dtypes=dtypes.bfloat16, tol=0.3),
custom_numeric(
dtypes=[np.complex64, np.float32], devices=("cpu", "gpu"),
tol=1e-5),
custom_numeric(dtypes=np.float32, devices="tpu", tol=0.1),
custom_numeric(dtypes=np.complex64, devices="tpu", tol=0.3),
custom_numeric(dtypes=np.float16, devices=("gpu", "tpu"), tol=0.1),
custom_numeric(dtypes=np.float16, devices="cpu", tol=0.01)
]
@classmethod
def eig(cls, harness: primitive_harness.Harness):
compute_left_eigenvectors = harness.params["compute_left_eigenvectors"]
compute_right_eigenvectors = harness.params["compute_right_eigenvectors"]
dtype = harness.dtype
def custom_assert(tst, result_jax, result_tf, *, args, tol):
operand, = args
inner_dimension = operand.shape[-1]
# Test ported from tests.linlag_test.testEig
# Norm, adjusted for dimension and type.
def norm(x):
norm = np.linalg.norm(x, axis=(-2, -1))
return norm / ((inner_dimension + 1) * jnp.finfo(dtype).eps)
def check_right_eigenvectors(a, w, vr):
tst.assertTrue(
np.all(norm(np.matmul(a, vr) - w[..., None, :] * vr) < 100))
def check_left_eigenvectors(a, w, vl):
rank = len(a.shape)
aH = jnp.conj(a.transpose(list(range(rank - 2)) + [rank - 1, rank - 2]))
wC = jnp.conj(w)
check_right_eigenvectors(aH, wC, vl)
def check_eigenvalue_is_in_array(eigenvalue, eigenvalues_array):
tol = None
# TODO(bchetioui): numerical discrepancies
if dtype in [np.float32, np.complex64]:
tol = 1e-4
elif dtype in [np.float64, np.complex128]:
tol = 1e-13
closest_diff = min(abs(eigenvalues_array - eigenvalue))
tst.assertAllClose(
closest_diff, np.array(0., closest_diff.dtype), atol=tol)
all_w_jax, all_w_tf = result_jax[0], result_tf[0]
for idx in itertools.product(*map(range, operand.shape[:-2])):
w_jax, w_tf = all_w_jax[idx], all_w_tf[idx]
for i in range(inner_dimension):
check_eigenvalue_is_in_array(w_jax[i], w_tf)
check_eigenvalue_is_in_array(w_tf[i], w_jax)
if compute_left_eigenvectors:
check_left_eigenvectors(operand, all_w_tf, result_tf[1])
if compute_right_eigenvectors:
check_right_eigenvectors(operand, all_w_tf,
result_tf[1 + compute_left_eigenvectors])
return [
# Eig does not work in JAX on gpu or tpu
Jax2TfLimitation("function not compilable", modes="compiled",
devices="cpu"),
Jax2TfLimitation(
"TF Conversion of eig is not implemented when both compute_left_eigenvectors and compute_right_eigenvectors are set to True",
enabled=(compute_left_eigenvectors and compute_right_eigenvectors)),
custom_numeric(
custom_assert=custom_assert,
description=("May return the eigenvalues and eigenvectors in a "
"potentially different order. The eigenvectors may "
"also be different, but equally valid."),
modes=("eager", "graph"))
]
@classmethod
def eigh(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
shape = harness.params["shape"]
def custom_assert(tst, result_jax, result_tf, *, args, tol):
operand, = args
inner_dimension = operand.shape[-1]
def check_right_eigenvectors(a, w, vr):
tol = 1e-16
# TODO(bchetioui): tolerance needs to be very high in compiled mode,
# specifically for eigenvectors.
if dtype == np.float64:
tol = 1e-6
elif dtype == np.float32:
tol = 1e-2
elif dtype in [dtypes.bfloat16, np.complex64]:
tol = 1e-3
elif dtype == np.complex128:
tol = 1e-13
tst.assertAllClose(
np.matmul(a, vr) - w[..., None, :] * vr,
np.zeros(a.shape, dtype=vr.dtype),
atol=tol)
def check_eigenvalue_is_in_array(eigenvalue, eigenvalues_array):
tol = None
if dtype in [dtypes.bfloat16, np.float32, np.complex64]:
tol = 1e-3
elif dtype in [np.float64, np.complex128]:
tol = 1e-11
closest_diff = min(abs(eigenvalues_array - eigenvalue))
tst.assertAllClose(
closest_diff, np.array(0., closest_diff.dtype), atol=tol)
_, all_w_jax = result_jax
all_vr_tf, all_w_tf = result_tf
for idx in itertools.product(*map(range, operand.shape[:-2])):
w_jax, w_tf = all_w_jax[idx], all_w_tf[idx]
for i in range(inner_dimension):
check_eigenvalue_is_in_array(w_jax[i], w_tf)
check_eigenvalue_is_in_array(w_tf[i], w_jax)
check_right_eigenvectors(operand, all_w_tf, all_vr_tf)
return [
# See https://github.com/google/jax/pull/3775#issuecomment-659407824;
Jax2TfLimitation(
"function not compilable",
dtypes=[np.complex64, np.complex128],
modes="compiled",
enabled=(shape[0] > 0)),
Jax2TfLimitation(
"TODO: numeric discrepancies",
dtypes=[np.float64],
modes="compiled",
devices=("cpu", "gpu"),
expect_tf_error=False,
skip_comparison=True),
Jax2TfLimitation(
"TODO: numeric discrepancies",
dtypes=[np.float16],
devices=("tpu",),
expect_tf_error=False,
skip_comparison=True),
custom_numeric(
custom_assert=custom_assert,
description=("May return the eigenvalues and eigenvectors in a "
"potentially different order. The eigenvectors may "
"also be different, but equally valid."))
]
@classmethod
def ge(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(dtypes=[np.bool_]),
missing_tf_kernel(
dtypes=[np.uint16, np.uint32],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
missing_tf_kernel(
dtypes=[np.uint64],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def gt(cls, harness: primitive_harness.Harness):
return cls.ge(harness)
@classmethod
def erf(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def erfc(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def erf_inv(cls, harness: primitive_harness.Harness):
# erf_inv is not defined for arg <= -1 or arg >= 1
def custom_assert(tst, result_jax, result_tf, *, args, tol): # noqa: F811
arg, = args
# for arg < -1 or arg > 1
# lax.erf_inv returns NaN; tf.math.erf_inv return +/- inf
special_cases = (arg < -1.) | (arg > 1.)
# non-special cases are equal
tst.assertAllClose(
result_jax[~special_cases],
result_tf[~special_cases],
atol=tol,
rtol=tol)
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16, np.float16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=[np.float32, np.float64], tol=1e-4),
custom_numeric(
dtypes=[np.float32, np.float64],
custom_assert=custom_assert,
description=(
"May return different results at undefined points (< -1 or > 1):"
" JAX returns `NaN` and TF returns `+inf` or `-inf`."))
]
@classmethod
def expm1(cls, harness: primitive_harness.Harness):
return [custom_numeric(dtypes=np.float64, tol=1e-5)]
@classmethod
def fft(cls, harness):
return [
Jax2TfLimitation(
"TF function not compileable",
devices=("cpu", "gpu"),
dtypes=[np.float64, np.complex128],
modes="compiled"),
custom_numeric(tol=1e-3)
]
@classmethod
def _pow_test_util(cls, harness: primitive_harness.Harness):
def custom_assert(tst, result_jax, result_tf, *, args, tol):
# NaNs are mismatched, but assertAllClose will also behave weirdly for
# complex numbers containing np.inf as one of their components. See
# https://github.com/numpy/numpy/issues/15959 for more details.
mask = (
np.isnan(result_jax) + np.isnan(result_tf) + np.isinf(result_jax) +
np.isinf(result_tf))
tst.assertAllClose(result_jax[~mask], result_tf[~mask], rtol=tol)
return [
custom_numeric(
dtypes=[np.float32, np.complex64], devices="tpu", tol=1e-2),
custom_numeric(
dtypes=[np.float32, np.complex64], devices=("cpu", "gpu"),
tol=1e-3),
custom_numeric(dtypes=[np.float64, np.complex128], tol=1e-12),
custom_numeric(dtypes=np.float16, tol=1),
# Values get really small for large negative powers.
custom_numeric(dtypes=dtypes.bfloat16, tol=3),
custom_numeric(
dtypes=[np.complex64, np.complex128],
custom_assert=custom_assert,
)
]
@classmethod
def igamma(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
# igamma is not defined when the first argument is <=0
def custom_assert(tst, result_jax, result_tf, *, args, tol):
arg1, arg2 = args
# lax.igamma returns NaN when arg1 == arg2 == 0; tf.math.igamma returns 0
special_cases = (arg1 == 0.) & (arg2 == 0.)
nr_special_cases = np.count_nonzero(special_cases)
tst.assertAllClose(
np.full((nr_special_cases,), np.nan, dtype=dtype),
result_jax[special_cases])
tst.assertAllClose(
np.full((nr_special_cases,), 0., dtype=dtype),
result_tf[special_cases])
# non-special cases are equal
tst.assertAllClose(result_jax[~special_cases], result_tf[~special_cases])
return [
custom_numeric(
custom_assert=custom_assert,
description=(
"May return different results at undefined points "
"(both arguments 0). JAX returns `NaN` and TF returns 0 or "
"JAX returns 1 and TF returns `NaN`"),
modes=("eager", "graph"))
]
@classmethod
def igammac(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
# igammac is not defined when the first argument is <=0
def custom_assert(tst, result_jax, result_tf, *, args, tol): # noqa: F811
arg1, arg2 = args
# lax.igammac returns 1. when arg1 <= 0; tf.math.igammac returns NaN
special_cases = (arg1 <= 0.) | (arg2 <= 0)
nr_special_cases = np.count_nonzero(special_cases)
tst.assertAllClose(
np.full((nr_special_cases,), 1., dtype=dtype),
result_jax[special_cases])
tst.assertAllClose(
np.full((nr_special_cases,), np.nan, dtype=dtype),
result_tf[special_cases])
# non-special cases are equal
tst.assertAllClose(
result_jax[~special_cases],
result_tf[~special_cases],
atol=tol,
rtol=tol)
return [
custom_numeric(dtypes=np.float64, tol=1e-9),
custom_numeric(devices="gpu", tol=1e-3),
custom_numeric(
custom_assert=custom_assert,
devices=("cpu", "gpu"),
modes=("eager", "graph"),
description=(
"May return different results at undefined points "
"(both arguments less or equal 0). JAX returns `NaN` and TF returns 0 or "
"JAX returns 1 and TF returns `NaN`")),
]
@classmethod
def integer_pow(cls, harness: primitive_harness.Harness):
y = harness.params["y"]
return [
missing_tf_kernel(
dtypes=[
np.uint8, np.uint16, np.int8, np.int16, np.uint32, np.uint64
],),
# hitting rtol = nan
Jax2TfLimitation(("Different overflow behavior for large exponents. It "
"and `+inf`/`-inf` differently in JAX and TF."),
devices="tpu",
dtypes=np.complex64,
enabled=(y in [1000, -1000]),
expect_tf_error=False,
skip_comparison=True),
Jax2TfLimitation(
"Different overflow behavior for large exponents. ",
dtypes=[np.int8, np.int16, np.int32, np.int64, np.float32],
enabled=(y > 10),
expect_tf_error=False,
skip_comparison=True)
] + list(cls._pow_test_util(harness))
@classmethod
def pow(cls, harness: primitive_harness.Harness):
return cls._pow_test_util(harness)
@classmethod
def le(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(dtypes=[np.bool_]),
missing_tf_kernel(
dtypes=[np.uint16, np.uint32],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
missing_tf_kernel(
dtypes=[np.uint64],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def lt(cls, harness: primitive_harness.Harness):
return cls.ge(harness)
@classmethod
def lgamma(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.float64, tol=1e-11),
custom_numeric(dtypes=np.float32, tol=1e-3)
]
@classmethod
def log1p(cls, harness: primitive_harness.Harness):
return [
custom_numeric(dtypes=np.float64, tol=1e-10),
custom_numeric(dtypes=np.float32, tol=1e-3)
]
@classmethod
def lu(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
def custom_assert(tst, result_jax, result_tf, *, args, tol):
operand, = args
lu, pivots, perm = result_tf
batch_dims = operand.shape[:-2]
m, n = operand.shape[-2], operand.shape[-1]
def _make_permutation_matrix(perm):
result = []
for idx in itertools.product(*map(range, operand.shape[:-1])):
result += [0 if c != perm[idx] else 1 for c in range(m)]
result = np.reshape(np.array(result, dtype=dtype), [*batch_dims, m, m])
return result
k = min(m, n)
l = jnp.tril(lu, -1)[..., :, :k] + jnp.eye(m, k, dtype=dtype)
u = jnp.triu(lu)[..., :k, :]
p_mat = _make_permutation_matrix(perm)
tst.assertArraysEqual(
lax.linalg.lu_pivots_to_permutation(pivots, m), perm)
tst.assertAllClose(
jnp.matmul(p_mat, operand), jnp.matmul(l, u), atol=tol, rtol=tol)
return [
missing_tf_kernel(dtypes=[np.complex64], devices="tpu"),
custom_numeric(
dtypes=[np.float32, np.complex64], devices="tpu", tol=0.1),
custom_numeric(
dtypes=[np.float32, np.complex64], devices=("cpu", "gpu"),
tol=1e-5),
custom_numeric(dtypes=[np.float64, np.complex128], tol=1e-13),
custom_numeric(
custom_assert=custom_assert,
description=("May return different, but also correct, results when "
"the decomposition is not unique")),
]
@classmethod
def _min_max_test_util(cls, harness: primitive_harness.Harness):
# TODO(bchetioui): discrepancies between TF & JAX when comparing with NaN;
# JAX always returns NaN, while TF returns the value NaN is compared with.
def custom_assert(tst, result_jax, result_tf, **_):
mask = np.isnan(result_jax)
tst.assertAllClose(result_jax[~mask], result_tf[~mask])
return [
missing_tf_kernel(
dtypes=[
np.bool_, np.int8, np.complex64, np.uint16, np.uint32, np.uint64
],),
missing_tf_kernel(
dtypes=[np.complex128],
devices=("cpu", "gpu"),
),
custom_numeric(
custom_assert=custom_assert,
description=(
"May return different values when one of the values is NaN. "
"JAX always returns NaN, while TF returns the value NaN is compared with."
))
]
@classmethod
def max(cls, harness: primitive_harness.Harness):
return cls._min_max_test_util(harness)
@classmethod
def min(cls, harness: primitive_harness.Harness):
return cls._min_max_test_util(harness)
@classmethod
def mul(cls, harness: primitive_harness.Harness):
return [missing_tf_kernel(dtypes=[np.uint32, np.uint64])]
@classmethod
def neg(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(dtypes=[np.uint8, np.uint16, np.uint32, np.uint64],)
]
@classmethod
def nextafter(cls, harness: primitive_harness.Harness):
return [missing_tf_kernel(dtypes=[np.float16, dtypes.bfloat16])]
@classmethod
def population_count(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[np.uint32, np.uint64],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def qr(cls, harness: primitive_harness.Harness):
# See https://github.com/google/jax/pull/3775#issuecomment-659407824;
# # jit_compile=True breaks for complex types.
# TODO: see https://github.com/google/jax/pull/3775#issuecomment-659407824.
# - for now, the performance of the HLO QR implementation called when
# compiling with TF is expected to have worse performance than the
# custom calls made in JAX.
return [
custom_numeric(tol=1e-5),
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices="tpu",
)
]
@classmethod
def random_gamma(cls, harness: primitive_harness.Harness):
return [custom_numeric(devices="tpu", tol=1e-3)]
@classmethod
def reduce_max(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(dtypes=[np.complex64]),
missing_tf_kernel(dtypes=[np.complex128])
]
@classmethod
def reduce_min(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(dtypes=[np.complex64]),
missing_tf_kernel(dtypes=[np.complex128])
]
@classmethod
def reduce_window_add(cls, harness):
assert "add" == harness.params["computation"].__name__
return [
missing_tf_kernel(dtypes=[np.uint16]),
missing_tf_kernel(dtypes=[np.complex64], devices="tpu"),
missing_tf_kernel(dtypes=[np.uint64], devices=("cpu", "gpu"))
]
@classmethod
def reduce_window_mul(cls, harness):
assert "mul" == harness.params["computation"].__name__
return [
missing_tf_kernel(dtypes=[np.uint32]),
missing_tf_kernel(dtypes=[np.uint64], devices=("cpu", "gpu"))
]
@classmethod
def reduce_window_min(cls, harness):
assert "min" == harness.params["computation"].__name__
return [
missing_tf_kernel(
dtypes=[np.uint32, np.uint16, np.bool_, np.complex64, np.int8],),
missing_tf_kernel(
dtypes=[np.uint64, np.complex128],
devices=("cpu", "gpu"),
)
]
@classmethod
def reduce_window_max(cls, harness):
assert "max" == harness.params["computation"].__name__
dtype = harness.dtype
init_value = harness.params["init_value"]
return [
missing_tf_kernel(dtypes=[np.uint32, np.bool_, np.complex64]),
missing_tf_kernel(
dtypes=[np.uint64, np.complex128],
devices=("cpu", "gpu"),
),
Jax2TfLimitation(
"TF kernel missing, except when the initial_value is the minimum for the dtype",
dtypes=[np.uint16, np.int8],
enabled=((dtype == np.uint16 and init_value != 0) or
(dtype == np.int8 and init_value != -128)))
]
@classmethod
def regularized_incomplete_beta(cls, harness: primitive_harness.Harness):
return [
custom_numeric(dtypes=np.float64, tol=1e-14),
missing_tf_kernel(dtypes=[np.float16, dtypes.bfloat16])
]
@classmethod
def rem(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[
np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16
],),
Jax2TfLimitation(
"TF integer division fails if divisor contains 0; JAX returns NaN",
dtypes=[
np.uint8, np.int8, np.uint16, np.uint32, np.uint64, np.int8,
np.int16, np.int32, np.int64
],
# Only the harnesses with "singularity" will have divide by 0
enabled=("singularity" in harness.name)),
missing_tf_kernel(
dtypes=[np.float16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
]
@classmethod
def rev(cls, harness: primitive_harness.Harness):
return [missing_tf_kernel(dtypes=[np.uint32, np.uint64])]
@classmethod
def round(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def rsqrt(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def scatter_add(cls, harness):
return [
missing_tf_kernel(dtypes=[np.uint16, np.uint64, np.bool_],),
missing_tf_kernel(
dtypes=[np.complex64],
devices="tpu",
),
]
@classmethod
def scatter_max(cls, harness):
return [
missing_tf_kernel(
dtypes=[
np.int8, np.uint16, np.uint32, np.uint64, np.complex64,
np.complex128, np.bool_
],)
]
@classmethod
def scatter_min(cls, harness):
return [
missing_tf_kernel(
dtypes=[
np.int8, np.uint16, np.uint32, np.complex64, np.bool_,
np.uint64, np.complex128
],)
]
@classmethod
def scatter_mul(cls, harness):
return [
missing_tf_kernel(dtypes=[np.uint32, np.uint64, np.bool_],),
missing_tf_kernel(
dtypes=[np.complex64],
devices="tpu",
),
]
@classmethod
def select_and_gather_add(cls, harness):
return [
missing_tf_kernel(
dtypes=[np.float32],
devices="tpu",
description=(
"This JAX primitives is not not exposed directly in the JAX API "
"but arises from JVP of `lax.reduce_window` for reducers "
"`lax.max` or `lax.min`. It also arises from second-order "
"VJP of the same. Implemented using XlaReduceWindow")),
Jax2TfLimitation((
"jax2tf unimplemented for 64-bit inputs because the current implementation "
"relies on packing two values into a single value. This can be "
"fixed by using a variadic XlaReduceWindow, when available"),
dtypes=[np.float64],
devices=("cpu", "gpu"))
]
@classmethod
def select_and_scatter_add(cls, harness):
return [
missing_tf_kernel(dtypes=[np.uint16]),
missing_tf_kernel(
dtypes=[np.uint64],
devices=("cpu", "gpu"),
)
]
@classmethod
def sign(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[
np.uint32, np.uint16, np.int16, np.int8, np.uint8, np.uint64
],)
]
@classmethod
def sinh(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[np.float16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def sort(cls, harness: primitive_harness.Harness):
return [
Jax2TfLimitation(
# I think that this is because TF is running on CPU even for GPU tests?
"TODO: TF non-stable multiple-array sort",
devices="gpu",
enabled=(harness.params["num_arrays"] > 1 and
not harness.params["is_stable"]),
expect_tf_error=False,
skip_comparison=True),
missing_tf_kernel(
dtypes=[np.complex128, np.float64], devices=("cpu", "gpu")),
missing_tf_kernel(dtypes=[np.bool_],),
]
@classmethod
def sub(cls, harness):
return [missing_tf_kernel(dtypes=[np.uint64])]
@classmethod
def svd(cls, harness: primitive_harness.Harness):
# TODO: slow test
def custom_assert(tst, r_jax, r_tf, *, args, tol):
def _reconstruct_operand(result, is_tf: bool):
# Reconstructing operand as documented in numpy.linalg.svd (see
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.svd.html)
s, u, v = result
U = u[..., :s.shape[-1]]
V = v[..., :s.shape[-1], :]
S = s[..., None, :]
return jnp.matmul(U * S, V), s.shape, u.shape, v.shape
if harness.params["compute_uv"]:
r_jax_reconstructed = _reconstruct_operand(r_jax, False)
r_tf_reconstructed = _reconstruct_operand(r_tf, True)
tst.assertAllClose(
r_jax_reconstructed, r_tf_reconstructed, atol=tol, rtol=tol)
else:
tst.assertAllClose(r_jax, r_tf, atol=tol, rtol=tol)
return [
# Works in JAX for complex due to custom calls on cpu and gpu
Jax2TfLimitation(
"function not compilable. Implemented using `tf.linalg.svd` and `tf.linalg.adjoint`",
dtypes=[np.complex64, np.complex128],
devices=("cpu", "gpu"),
modes=("compiled",)),
missing_tf_kernel(dtypes=[dtypes.bfloat16], devices="tpu"),
custom_numeric(tol=1e-4),
custom_numeric(custom_assert=custom_assert)
]
@classmethod
def tan(cls, harness):
return [
custom_numeric(dtypes=np.complex64, devices="tpu", tol=1e-4),
custom_numeric(dtypes=np.complex64, devices=("cpu", "gpu"), tol=1e-3),
custom_numeric(dtypes=np.complex128, devices=("cpu", "gpu"), tol=1e-12)]
@classmethod
def tanh(cls, harness):
return [
custom_numeric(dtypes=np.complex128, tol=1e-7),
custom_numeric(dtypes=np.complex64, tol=1e-4)]
@classmethod
def top_k(cls, harness):
def custom_assert(tst, result_jax, result_tf, **_):
assert len(result_jax) == len(result_tf)
# TODO: TF and JAX sort [inf, nan] differently.
first_arr_jax, first_arr_tf = result_jax[0], result_tf[0]
if np.all(first_arr_jax == first_arr_tf):
for arr_jax, arr_tf in zip(result_jax, result_tf):
tst.assertArraysEqual(arr_jax, arr_tf)
else:
mask_jax, mask_tf = np.isnan(first_arr_jax), np.isnan(first_arr_tf)
tst.assertArraysEqual(first_arr_jax[~mask_jax], first_arr_tf[~mask_tf])
return [
missing_tf_kernel(
dtypes=[np.uint64, np.int64],
devices=("cpu", "gpu"),
modes="compiled"),
custom_numeric(
dtypes=[np.float16, dtypes.bfloat16, np.float32, np.float64],
custom_assert=custom_assert,
description=(
"Produces different results when the array contains `inf` and `NaN`"
" (they are sorted differently in TF vs. XLA).")
)]
@classmethod
def triangular_solve(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(dtypes=[dtypes.bfloat16]),
missing_tf_kernel(
dtypes=[np.float16],
devices=("gpu", "cpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.float32, tol=5e-3)
]
def custom_numeric(
*,
description="custom numeric comparison",
dtypes=(), # All
modes=("eager", "graph", "compiled"),
devices=("cpu", "gpu", "tpu"),
custom_assert=None,
tol=None) -> Jax2TfLimitation:
return Jax2TfLimitation(
description,
expect_tf_error=False,
dtypes=dtypes,
devices=devices,
modes=modes,
custom_assert=custom_assert,
tol=tol)
def missing_tf_kernel(
*,
description="op not defined for dtype",
dtypes,
modes=("eager", "graph", "compiled"),
devices=("cpu", "gpu", "tpu")
) -> Jax2TfLimitation:
return Jax2TfLimitation(
description,
dtypes=dtypes,
devices=devices,
modes=modes)
| [
"numpy.full",
"numpy.count_nonzero",
"jax.numpy.triu",
"jax.numpy.finfo",
"jax.numpy.eye",
"numpy.zeros",
"numpy.isinf",
"numpy.isnan",
"jax.numpy.tril",
"jax.numpy.matmul",
"numpy.linalg.norm",
"jax.numpy.conj",
"numpy.array",
"jax.lax.linalg.lu_pivots_to_permutation",
"numpy.matmul",
... | [((16769, 16800), 'numpy.count_nonzero', 'np.count_nonzero', (['special_cases'], {}), '(special_cases)\n', (16785, 16800), True, 'import numpy as np\n'), ((28662, 28693), 'numpy.count_nonzero', 'np.count_nonzero', (['special_cases'], {}), '(special_cases)\n', (28678, 28693), True, 'import numpy as np\n'), ((29813, 29844), 'numpy.count_nonzero', 'np.count_nonzero', (['special_cases'], {}), '(special_cases)\n', (29829, 29844), True, 'import numpy as np\n'), ((34754, 34774), 'numpy.isnan', 'np.isnan', (['result_jax'], {}), '(result_jax)\n', (34762, 34774), True, 'import numpy as np\n'), ((45899, 45936), 'numpy.all', 'np.all', (['(first_arr_jax == first_arr_tf)'], {}), '(first_arr_jax == first_arr_tf)\n', (45905, 45936), True, 'import numpy as np\n'), ((10142, 10162), 'jax.numpy.tril', 'jnp.tril', (['result_jax'], {}), '(result_jax)\n', (10150, 10162), True, 'from jax import numpy as jnp\n'), ((19742, 19774), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': '(-2, -1)'}), '(x, axis=(-2, -1))\n', (19756, 19774), True, 'import numpy as np\n'), ((20156, 20167), 'jax.numpy.conj', 'jnp.conj', (['w'], {}), '(w)\n', (20164, 20167), True, 'from jax import numpy as jnp\n'), ((27571, 27590), 'numpy.isinf', 'np.isinf', (['result_tf'], {}), '(result_tf)\n', (27579, 27590), True, 'import numpy as np\n'), ((28730, 28779), 'numpy.full', 'np.full', (['(nr_special_cases,)', 'np.nan'], {'dtype': 'dtype'}), '((nr_special_cases,), np.nan, dtype=dtype)\n', (28737, 28779), True, 'import numpy as np\n'), ((28854, 28900), 'numpy.full', 'np.full', (['(nr_special_cases,)', '(0.0)'], {'dtype': 'dtype'}), '((nr_special_cases,), 0.0, dtype=dtype)\n', (28861, 28900), True, 'import numpy as np\n'), ((29881, 29927), 'numpy.full', 'np.full', (['(nr_special_cases,)', '(1.0)'], {'dtype': 'dtype'}), '((nr_special_cases,), 1.0, dtype=dtype)\n', (29888, 29927), True, 'import numpy as np\n'), ((30001, 30050), 'numpy.full', 'np.full', (['(nr_special_cases,)', 'np.nan'], {'dtype': 'dtype'}), '((nr_special_cases,), np.nan, dtype=dtype)\n', (30008, 30050), True, 'import numpy as np\n'), ((33563, 33589), 'jax.numpy.eye', 'jnp.eye', (['m', 'k'], {'dtype': 'dtype'}), '(m, k, dtype=dtype)\n', (33570, 33589), True, 'from jax import numpy as jnp\n'), ((33600, 33612), 'jax.numpy.triu', 'jnp.triu', (['lu'], {}), '(lu)\n', (33608, 33612), True, 'from jax import numpy as jnp\n'), ((33710, 33756), 'jax.lax.linalg.lu_pivots_to_permutation', 'lax.linalg.lu_pivots_to_permutation', (['pivots', 'm'], {}), '(pivots, m)\n', (33745, 33756), False, 'from jax import lax\n'), ((33800, 33826), 'jax.numpy.matmul', 'jnp.matmul', (['p_mat', 'operand'], {}), '(p_mat, operand)\n', (33810, 33826), True, 'from jax import numpy as jnp\n'), ((33828, 33844), 'jax.numpy.matmul', 'jnp.matmul', (['l', 'u'], {}), '(l, u)\n', (33838, 33844), True, 'from jax import numpy as jnp\n'), ((20615, 20648), 'numpy.array', 'np.array', (['(0.0)', 'closest_diff.dtype'], {}), '(0.0, closest_diff.dtype)\n', (20623, 20648), True, 'import numpy as np\n'), ((22786, 22819), 'numpy.zeros', 'np.zeros', (['a.shape'], {'dtype': 'vr.dtype'}), '(a.shape, dtype=vr.dtype)\n', (22794, 22819), True, 'import numpy as np\n'), ((23211, 23244), 'numpy.array', 'np.array', (['(0.0)', 'closest_diff.dtype'], {}), '(0.0, closest_diff.dtype)\n', (23219, 23244), True, 'import numpy as np\n'), ((27538, 27558), 'numpy.isinf', 'np.isinf', (['result_jax'], {}), '(result_jax)\n', (27546, 27558), True, 'import numpy as np\n'), ((33427, 33456), 'numpy.array', 'np.array', (['result'], {'dtype': 'dtype'}), '(result, dtype=dtype)\n', (33435, 33456), True, 'import numpy as np\n'), ((33532, 33548), 'jax.numpy.tril', 'jnp.tril', (['lu', '(-1)'], {}), '(lu, -1)\n', (33540, 33548), True, 'from jax import numpy as jnp\n'), ((44299, 44319), 'jax.numpy.matmul', 'jnp.matmul', (['(U * S)', 'V'], {}), '(U * S, V)\n', (44309, 44319), True, 'from jax import numpy as jnp\n'), ((46086, 46109), 'numpy.isnan', 'np.isnan', (['first_arr_jax'], {}), '(first_arr_jax)\n', (46094, 46109), True, 'import numpy as np\n'), ((46111, 46133), 'numpy.isnan', 'np.isnan', (['first_arr_tf'], {}), '(first_arr_tf)\n', (46119, 46133), True, 'import numpy as np\n'), ((22733, 22749), 'numpy.matmul', 'np.matmul', (['a', 'vr'], {}), '(a, vr)\n', (22742, 22749), True, 'import numpy as np\n'), ((27493, 27513), 'numpy.isnan', 'np.isnan', (['result_jax'], {}), '(result_jax)\n', (27501, 27513), True, 'import numpy as np\n'), ((27516, 27535), 'numpy.isnan', 'np.isnan', (['result_tf'], {}), '(result_tf)\n', (27524, 27535), True, 'import numpy as np\n'), ((19822, 19838), 'jax.numpy.finfo', 'jnp.finfo', (['dtype'], {}), '(dtype)\n', (19831, 19838), True, 'from jax import numpy as jnp\n'), ((19939, 19955), 'numpy.matmul', 'np.matmul', (['a', 'vr'], {}), '(a, vr)\n', (19948, 19955), True, 'import numpy as np\n')] |
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
import turbulence as tb
from ROHSApy import ROHSA
#DATA
hdu = fits.open("/data/amarchal/ROHSA_paper/data/observation/GHIGLS_NEP_Tb.fits")
hdr = hdu[0].header
hdr['CRPIX3'] -= 200; hdr['NAXIS3'] = 300
w = tb.proj.wcs2D(hdr)
cube = hdu[0].data[0][200:500,:,:]
core = ROHSA(cube, hdr=hdr)
#FULL FIELD
core = ROHSA(np.zeros((cube.shape[0],cube.shape[1],cube.shape[2])), hdr=hdr)
output = core.read_gaussian("/data/amarchal/ROHSA_paper/ROHSA/GHIGLS_NEP_Tb_gauss_run_4.dat")
params = core.physical_gaussian(output)
reconstructed_cube = core.return_result_cube(output)
field = params[0::3] * params[2::3] * np.sqrt(2.*np.pi) * tb.cst.C / 1.e18
ampfield = params[0::3]
vfield = params[1::3]
sigfield = params[2::3]
ampfield_pix = output[0::3]
vfield_pix = output[1::3]
sigfield_pix = output[2::3]
iddx = np.argsort(np.mean(vfield, axis=(1,2)))
field = [field[idd] for idd in iddx]
vfield = [vfield[idd] for idd in iddx]
ampfield = [ampfield[idd] for idd in iddx]
sigfield = [sigfield[idd] for idd in iddx]
vfield_pix = [vfield_pix[idd] for idd in iddx]
ampfield_pix = [ampfield_pix[idd] for idd in iddx]
sigfield_pix = [sigfield_pix[idd] for idd in iddx]
#CNM/LNM/WNM fraction
#Sort sigfield
iddx = np.argsort(np.median(sigfield, axis=(1,2)))
field = [field[idd] for idd in iddx]
vfield = [vfield[idd] for idd in iddx]
ampfield = [ampfield[idd] for idd in iddx]
sigfield = [sigfield[idd] for idd in iddx]
vfield_pix = [vfield_pix[idd] for idd in iddx]
ampfield_pix = [ampfield_pix[idd] for idd in iddx]
sigfield_pix = [sigfield_pix[idd] for idd in iddx]
idx_CNM_local = np.where((np.median(sigfield, axis=(1,2)) < 3.) & (np.median(vfield, axis=(1,2)) > -20.) & (np.median(vfield, axis=(1,2)) < 20.))[0]
idx_LNM_local = np.where((np.median(sigfield, axis=(1,2)) > 3.) & (np.median(sigfield, axis=(1,2)) < 6.) & (np.median(vfield, axis=(1,2)) > -20.) & (np.median(vfield, axis=(1,2)) < 20.))[0]
idx_WNM_local = np.where((np.median(sigfield, axis=(1,2)) > 6.) & (np.median(vfield, axis=(1,2)) > -20.) & (np.median(vfield, axis=(1,2)) < 20.))[0]
idx_CNM_ivc = np.where((np.median(sigfield, axis=(1,2)) < 3.) & (np.median(vfield, axis=(1,2)) < -20.) | (np.median(vfield, axis=(1,2)) > 20.))[0]
idx_LNM_ivc = np.where((np.median(sigfield, axis=(1,2)) > 3.) & (np.median(sigfield, axis=(1,2)) < 8.) & (np.median(vfield, axis=(1,2)) < -20.) | (np.median(vfield, axis=(1,2)) > 20.))[0]
idx_WNM_ivc = np.where((np.median(sigfield, axis=(1,2)) > 8.) & (np.median(vfield, axis=(1,2)) < -20.) | (np.median(vfield, axis=(1,2)) > 20.))[0]
#Reconstruct cube phase
model_CNM_local = core.return_result_cube(ampfield=np.array(ampfield_pix)[idx_CNM_local], pixfield=np.array(vfield_pix)[idx_CNM_local], sigfield=np.array(sigfield_pix)[idx_CNM_local])
model_WNM_local = core.return_result_cube(ampfield=np.array(ampfield_pix)[idx_WNM_local], pixfield=np.array(vfield_pix)[idx_WNM_local], sigfield=np.array(sigfield_pix)[idx_WNM_local])
model_LNM_local = core.return_result_cube(ampfield=np.array(ampfield_pix)[idx_LNM_local], pixfield=np.array(vfield_pix)[idx_LNM_local], sigfield=np.array(sigfield_pix)[idx_LNM_local])
model_CNM_ivc = core.return_result_cube(ampfield=np.array(ampfield_pix)[idx_CNM_ivc], pixfield=np.array(vfield_pix)[idx_CNM_ivc], sigfield=np.array(sigfield_pix)[idx_CNM_ivc])
model_WNM_ivc = core.return_result_cube(ampfield=np.array(ampfield_pix)[idx_WNM_ivc], pixfield=np.array(vfield_pix)[idx_WNM_ivc], sigfield=np.array(sigfield_pix)[idx_WNM_ivc])
model_LNM_ivc = core.return_result_cube(ampfield=np.array(ampfield_pix)[idx_LNM_ivc], pixfield=np.array(vfield_pix)[idx_LNM_ivc], sigfield=np.array(sigfield_pix)[idx_LNM_ivc])
stop
#Write output local
hdu0 = fits.PrimaryHDU(model_CNM_local)
hdu0.header = hdr
hdulist = fits.HDUList([hdu0])
hdulist.writeto("/home/amarchal/Projects/ROHSA_paper/OBS/output/GHIGLS_NEP_Tb_CNM_local.fits", overwrite=True)
hdu0 = fits.PrimaryHDU(model_LNM_local)
hdu0.header = hdr
hdulist = fits.HDUList([hdu0])
hdulist.writeto("/home/amarchal/Projects/ROHSA_paper/OBS/output/GHIGLS_NEP_Tb_LNM_local.fits", overwrite=True)
hdu0 = fits.PrimaryHDU(model_WNM_local)
hdu0.header = hdr
hdulist = fits.HDUList([hdu0])
hdulist.writeto("/home/amarchal/Projects/ROHSA_paper/OBS/output/GHIGLS_NEP_Tb_WNM_local.fits", overwrite=True)
#Write output ivc
hdu0 = fits.PrimaryHDU(model_CNM_ivc)
hdu0.header = hdr
hdulist = fits.HDUList([hdu0])
hdulist.writeto("/home/amarchal/Projects/ROHSA_paper/OBS/output/GHIGLS_NEP_Tb_CNM_ivc.fits", overwrite=True)
hdu0 = fits.PrimaryHDU(model_LNM_ivc)
hdu0.header = hdr
hdulist = fits.HDUList([hdu0])
hdulist.writeto("/home/amarchal/Projects/ROHSA_paper/OBS/output/GHIGLS_NEP_Tb_LNM_ivc.fits", overwrite=True)
hdu0 = fits.PrimaryHDU(model_WNM_ivc)
hdu0.header = hdr
hdulist = fits.HDUList([hdu0])
hdulist.writeto("/home/amarchal/Projects/ROHSA_paper/OBS/output/GHIGLS_NEP_Tb_WNM_ivc.fits", overwrite=True)
| [
"numpy.median",
"astropy.io.fits.PrimaryHDU",
"turbulence.proj.wcs2D",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"astropy.io.fits.open",
"ROHSApy.ROHSA",
"astropy.io.fits.HDUList",
"numpy.sqrt"
] | [((462, 537), 'astropy.io.fits.open', 'fits.open', (['"""/data/amarchal/ROHSA_paper/data/observation/GHIGLS_NEP_Tb.fits"""'], {}), "('/data/amarchal/ROHSA_paper/data/observation/GHIGLS_NEP_Tb.fits')\n", (471, 537), False, 'from astropy.io import fits\n'), ((604, 622), 'turbulence.proj.wcs2D', 'tb.proj.wcs2D', (['hdr'], {}), '(hdr)\n', (617, 622), True, 'import turbulence as tb\n'), ((665, 685), 'ROHSApy.ROHSA', 'ROHSA', (['cube'], {'hdr': 'hdr'}), '(cube, hdr=hdr)\n', (670, 685), False, 'from ROHSApy import ROHSA\n'), ((5376, 5408), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['model_CNM_local'], {}), '(model_CNM_local)\n', (5391, 5408), False, 'from astropy.io import fits\n'), ((5437, 5457), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hdu0]'], {}), '([hdu0])\n', (5449, 5457), False, 'from astropy.io import fits\n'), ((5577, 5609), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['model_LNM_local'], {}), '(model_LNM_local)\n', (5592, 5609), False, 'from astropy.io import fits\n'), ((5638, 5658), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hdu0]'], {}), '([hdu0])\n', (5650, 5658), False, 'from astropy.io import fits\n'), ((5778, 5810), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['model_WNM_local'], {}), '(model_WNM_local)\n', (5793, 5810), False, 'from astropy.io import fits\n'), ((5839, 5859), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hdu0]'], {}), '([hdu0])\n', (5851, 5859), False, 'from astropy.io import fits\n'), ((6261, 6291), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['model_CNM_ivc'], {}), '(model_CNM_ivc)\n', (6276, 6291), False, 'from astropy.io import fits\n'), ((6320, 6340), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hdu0]'], {}), '([hdu0])\n', (6332, 6340), False, 'from astropy.io import fits\n'), ((6458, 6488), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['model_LNM_ivc'], {}), '(model_LNM_ivc)\n', (6473, 6488), False, 'from astropy.io import fits\n'), ((6517, 6537), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hdu0]'], {}), '([hdu0])\n', (6529, 6537), False, 'from astropy.io import fits\n'), ((6655, 6685), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['model_WNM_ivc'], {}), '(model_WNM_ivc)\n', (6670, 6685), False, 'from astropy.io import fits\n'), ((6714, 6734), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hdu0]'], {}), '([hdu0])\n', (6726, 6734), False, 'from astropy.io import fits\n'), ((979, 1034), 'numpy.zeros', 'np.zeros', (['(cube.shape[0], cube.shape[1], cube.shape[2])'], {}), '((cube.shape[0], cube.shape[1], cube.shape[2]))\n', (987, 1034), True, 'import numpy as np\n'), ((1480, 1508), 'numpy.mean', 'np.mean', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (1487, 1508), True, 'import numpy as np\n'), ((2399, 2431), 'numpy.median', 'np.median', (['sigfield'], {'axis': '(1, 2)'}), '(sigfield, axis=(1, 2))\n', (2408, 2431), True, 'import numpy as np\n'), ((1271, 1291), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (1278, 1291), True, 'import numpy as np\n'), ((4048, 4070), 'numpy.array', 'np.array', (['ampfield_pix'], {}), '(ampfield_pix)\n', (4056, 4070), True, 'import numpy as np\n'), ((4096, 4116), 'numpy.array', 'np.array', (['vfield_pix'], {}), '(vfield_pix)\n', (4104, 4116), True, 'import numpy as np\n'), ((4142, 4164), 'numpy.array', 'np.array', (['sigfield_pix'], {}), '(sigfield_pix)\n', (4150, 4164), True, 'import numpy as np\n'), ((4232, 4254), 'numpy.array', 'np.array', (['ampfield_pix'], {}), '(ampfield_pix)\n', (4240, 4254), True, 'import numpy as np\n'), ((4280, 4300), 'numpy.array', 'np.array', (['vfield_pix'], {}), '(vfield_pix)\n', (4288, 4300), True, 'import numpy as np\n'), ((4326, 4348), 'numpy.array', 'np.array', (['sigfield_pix'], {}), '(sigfield_pix)\n', (4334, 4348), True, 'import numpy as np\n'), ((4416, 4438), 'numpy.array', 'np.array', (['ampfield_pix'], {}), '(ampfield_pix)\n', (4424, 4438), True, 'import numpy as np\n'), ((4464, 4484), 'numpy.array', 'np.array', (['vfield_pix'], {}), '(vfield_pix)\n', (4472, 4484), True, 'import numpy as np\n'), ((4510, 4532), 'numpy.array', 'np.array', (['sigfield_pix'], {}), '(sigfield_pix)\n', (4518, 4532), True, 'import numpy as np\n'), ((4599, 4621), 'numpy.array', 'np.array', (['ampfield_pix'], {}), '(ampfield_pix)\n', (4607, 4621), True, 'import numpy as np\n'), ((4645, 4665), 'numpy.array', 'np.array', (['vfield_pix'], {}), '(vfield_pix)\n', (4653, 4665), True, 'import numpy as np\n'), ((4689, 4711), 'numpy.array', 'np.array', (['sigfield_pix'], {}), '(sigfield_pix)\n', (4697, 4711), True, 'import numpy as np\n'), ((4775, 4797), 'numpy.array', 'np.array', (['ampfield_pix'], {}), '(ampfield_pix)\n', (4783, 4797), True, 'import numpy as np\n'), ((4821, 4841), 'numpy.array', 'np.array', (['vfield_pix'], {}), '(vfield_pix)\n', (4829, 4841), True, 'import numpy as np\n'), ((4865, 4887), 'numpy.array', 'np.array', (['sigfield_pix'], {}), '(sigfield_pix)\n', (4873, 4887), True, 'import numpy as np\n'), ((4951, 4973), 'numpy.array', 'np.array', (['ampfield_pix'], {}), '(ampfield_pix)\n', (4959, 4973), True, 'import numpy as np\n'), ((4997, 5017), 'numpy.array', 'np.array', (['vfield_pix'], {}), '(vfield_pix)\n', (5005, 5017), True, 'import numpy as np\n'), ((5041, 5063), 'numpy.array', 'np.array', (['sigfield_pix'], {}), '(sigfield_pix)\n', (5049, 5063), True, 'import numpy as np\n'), ((2854, 2884), 'numpy.median', 'np.median', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (2863, 2884), True, 'import numpy as np\n'), ((3044, 3074), 'numpy.median', 'np.median', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (3053, 3074), True, 'import numpy as np\n'), ((3193, 3223), 'numpy.median', 'np.median', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (3202, 3223), True, 'import numpy as np\n'), ((3341, 3371), 'numpy.median', 'np.median', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (3350, 3371), True, 'import numpy as np\n'), ((3529, 3559), 'numpy.median', 'np.median', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (3538, 3559), True, 'import numpy as np\n'), ((3676, 3706), 'numpy.median', 'np.median', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (3685, 3706), True, 'import numpy as np\n'), ((2772, 2804), 'numpy.median', 'np.median', (['sigfield'], {'axis': '(1, 2)'}), '(sigfield, axis=(1, 2))\n', (2781, 2804), True, 'import numpy as np\n'), ((2813, 2843), 'numpy.median', 'np.median', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (2822, 2843), True, 'import numpy as np\n'), ((3003, 3033), 'numpy.median', 'np.median', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (3012, 3033), True, 'import numpy as np\n'), ((3111, 3143), 'numpy.median', 'np.median', (['sigfield'], {'axis': '(1, 2)'}), '(sigfield, axis=(1, 2))\n', (3120, 3143), True, 'import numpy as np\n'), ((3152, 3182), 'numpy.median', 'np.median', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (3161, 3182), True, 'import numpy as np\n'), ((3259, 3291), 'numpy.median', 'np.median', (['sigfield'], {'axis': '(1, 2)'}), '(sigfield, axis=(1, 2))\n', (3268, 3291), True, 'import numpy as np\n'), ((3300, 3330), 'numpy.median', 'np.median', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (3309, 3330), True, 'import numpy as np\n'), ((3488, 3518), 'numpy.median', 'np.median', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (3497, 3518), True, 'import numpy as np\n'), ((3594, 3626), 'numpy.median', 'np.median', (['sigfield'], {'axis': '(1, 2)'}), '(sigfield, axis=(1, 2))\n', (3603, 3626), True, 'import numpy as np\n'), ((3635, 3665), 'numpy.median', 'np.median', (['vfield'], {'axis': '(1, 2)'}), '(vfield, axis=(1, 2))\n', (3644, 3665), True, 'import numpy as np\n'), ((2921, 2953), 'numpy.median', 'np.median', (['sigfield'], {'axis': '(1, 2)'}), '(sigfield, axis=(1, 2))\n', (2930, 2953), True, 'import numpy as np\n'), ((2962, 2994), 'numpy.median', 'np.median', (['sigfield'], {'axis': '(1, 2)'}), '(sigfield, axis=(1, 2))\n', (2971, 2994), True, 'import numpy as np\n'), ((3406, 3438), 'numpy.median', 'np.median', (['sigfield'], {'axis': '(1, 2)'}), '(sigfield, axis=(1, 2))\n', (3415, 3438), True, 'import numpy as np\n'), ((3447, 3479), 'numpy.median', 'np.median', (['sigfield'], {'axis': '(1, 2)'}), '(sigfield, axis=(1, 2))\n', (3456, 3479), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
from multiprocessing import Pool
import pydensecrf.densecrf as dcrf
# File I/O
INPUT_PATH = 'D:/Datasets/penguinguy/%03d%03d.png'
OUTPUT_PATH = "output/penguinguy_crf_8"
# Arch Camera Configurlation
TOTAL_CAMERA = 10
TOTAL_IMAGE_PER_CAMERA = 40
THRESHOLD_RATIO = 0.05 # Difference betweem image [0 - 1]
# Rotate
USE_ROTATE_TO_IMAGE = True
ROTATE_DIRECTION = cv2.ROTATE_90_CLOCKWISE
# Remove noise when differnece 2 image
USE_REMOVE_PEPPER_NOISE = True
OPENNING_KERNEL_SIZE = (5,5)
THRESHOLD_STRENG = 20
THRESHOLD_USE_TRIANGLE = False
USE_CLOSING_FOREGROUND = True
CLOSING_KERNEL = (30,30)
MP_POOL_SIZE = 3
# CRF configure
CRF_TOTAL_LABEL = 2
#Maximize varience
FOREGROUND_CLUSTER = 4
BACKGROUND_CLUSTER = 4
# BACKGROUND
USE_BLUR_EDGE = False
BLUR_KERNEL = (11,11)
BLUR_ERODE_KERNEL = (5,5)
USE_MEAN_BACKGROUND = True
NEW_BACKGROUND_COLOR = (0,0,0) # range 0-255 / active when not use mean background
USE_DENOISE_FOREGROUND_MASK = True
FOREGROUND_OPENNING_KERNEL_SIZE = (7,7)
def get_diff_mask(camerea_id,current_shot):
previous_shot = (current_shot - 1) % TOTAL_IMAGE_PER_CAMERA
# read image
image_prev_uint = cv2.imread(INPUT_PATH % (camerea_id,previous_shot))
image_current_uint = cv2.imread(INPUT_PATH % (camerea_id,current_shot))
# convert to RGB
image_prev_uint = cv2.cvtColor(image_prev_uint,cv2.COLOR_BGR2RGB)
image_current_uint = cv2.cvtColor(image_current_uint,cv2.COLOR_BGR2RGB)
# rotate
if USE_ROTATE_TO_IMAGE:
image_prev_uint = cv2.rotate(image_prev_uint, ROTATE_DIRECTION)
image_current_uint = cv2.rotate(image_current_uint, ROTATE_DIRECTION)
# convert to [0-1]
image_prev = image_prev_uint / 255.0
image_current = image_current_uint / 255.0
# difference mask between 2 images
diff_mask = np.linalg.norm(image_current - image_prev, axis=-1)
diff_mask = (diff_mask > THRESHOLD_RATIO) * 1.0
#remove noise from sensor (hope this not ruin image)
if USE_REMOVE_PEPPER_NOISE:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,OPENNING_KERNEL_SIZE)
denoised_mask = cv2.morphologyEx(diff_mask, cv2.MORPH_OPEN, kernel)
else:
denoised_mask = diff_mask
return denoised_mask
def processed_camera(CAMERA_NUMBER):
print("Intializing Camera:%02d" % (CAMERA_NUMBER, ))
foreground_prob = get_diff_mask(CAMERA_NUMBER,0)
for i in range(1,40):
foreground_prob = cv2.bitwise_or(foreground_prob,get_diff_mask(CAMERA_NUMBER,i))
if USE_CLOSING_FOREGROUND:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,CLOSING_KERNEL)
mask_closed = cv2.morphologyEx(foreground_prob, cv2.MORPH_CLOSE, kernel)
else:
mask_closed = foreground_prob
# find boundary (rectangle) of object
mask_y, mask_x = np.nonzero(mask_closed)
min_x = np.min(mask_x)
max_x = np.max(mask_x)
min_y = np.min(mask_y)
max_y = np.max(mask_y)
#flood fill to remove hole
image_flooded = (mask_closed.copy() * 255.0).astype(np.uint8)
image_height, image_width = image_flooded.shape[:2]
flood_mask = np.zeros((image_height+2,image_width+2),dtype=np.uint8)
has_flooded = False
# top bar
if min_y != 0:
for i in range(image_flooded.shape[1]):
if image_flooded[0,i] != 255:
has_flooded = True
cv2.floodFill(image_flooded, flood_mask, (i,0), 255)
# left bar
if min_x != 0:
for i in range(image_flooded.shape[0]):
if image_flooded[i,0] != 255:
has_flooded = True
cv2.floodFill(image_flooded, flood_mask, (0,i), 255)
# right bar
most_right = image_flooded.shape[1] -1
if max_x != most_right:
for i in range(image_flooded.shape[0]):
if image_flooded[i,most_right] != 255:
has_flooded = True
cv2.floodFill(image_flooded, flood_mask, (most_right,i), 255)
# bottom bar
most_bottom = image_flooded.shape[0] -1
if max_y != most_bottom:
for i in range(image_flooded.shape[1]):
if image_flooded[most_bottom,i] != 255:
has_flooded = True
cv2.floodFill(image_flooded, flood_mask, (i,most_bottom), 255)
# we get background from floodfill
if has_flooded:
background_mask = flood_mask[1:-1,1:-1]
else:
background_mask = 1 - mask_closed
is_background = background_mask == 1
# backgroud mm model
default_image = cv2.imread(INPUT_PATH % (CAMERA_NUMBER,0))
if USE_ROTATE_TO_IMAGE:
default_image = cv2.rotate(default_image, ROTATE_DIRECTION)
default_image = default_image / 255.0
background_mm_model = cv2.ml.EM_create()
background_mm_model.setClustersNumber(BACKGROUND_CLUSTER)
background_mm_model.trainEM(default_image[is_background])
background_weights = background_mm_model.getWeights()
for IMAGE_NUMBER in range(TOTAL_IMAGE_PER_CAMERA):
print("working on Cam:%02d, Shot:%02d" % (CAMERA_NUMBER, IMAGE_NUMBER))
image_current_uint = cv2.imread(INPUT_PATH % (CAMERA_NUMBER,IMAGE_NUMBER))
if USE_ROTATE_TO_IMAGE:
image_current_uint = cv2.rotate(image_current_uint, ROTATE_DIRECTION)
HEIGHT, WIDTH, CHANNEL = image_current_uint.shape
#Threshold for foreground
image_gray = cv2.cvtColor(image_current_uint,cv2.COLOR_BGR2GRAY)
if THRESHOLD_USE_TRIANGLE:
ret2,object_threshold = cv2.threshold(image_gray,200,255,cv2.THRESH_TRIANGLE)
else:
ret2,object_threshold = cv2.threshold(image_gray,THRESHOLD_STRENG,255,cv2.THRESH_BINARY)
if USE_DENOISE_FOREGROUND_MASK:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,FOREGROUND_OPENNING_KERNEL_SIZE)
object_threshold_opened = cv2.morphologyEx(object_threshold, cv2.MORPH_OPEN, kernel)
else:
object_threshold_opened = object_threshold
is_foreground = object_threshold_opened > 0
# normalize image to float32 [0,1]
image_current_float32 = image_current_uint / 255.0
# unknown mask
unknown_mask = np.ones((HEIGHT, WIDTH))
unknown_mask[is_background] = 0
unknown_mask[is_foreground] = 0
is_unknown = unknown_mask > 0
#Forground mm model
foreground_mm_model = cv2.ml.EM_create()
foreground_mm_model.setClustersNumber(FOREGROUND_CLUSTER)
foreground_mm_model.trainEM(image_current_float32[is_foreground])
foreground_weights = foreground_mm_model.getWeights()
# prediction foreground
_, foreground_predicteds = foreground_mm_model.predict(image_current_float32[is_unknown])
foreground_probability = foreground_predicteds.dot(foreground_weights.T)
# prediction foreground
_, background_predicteds = background_mm_model.predict(image_current_float32[is_unknown])
background_probability = background_predicteds.dot(background_weights.T)
# Foreground probability map
foreground_probability_map = np.zeros((HEIGHT, WIDTH))
foreground_probability_map[is_unknown] = foreground_probability[:,0]
foreground_probability_map[is_foreground] = 1.0
foreground_probability_map[is_background] = 0.0
# Background probability map
background_probability_map = np.zeros((HEIGHT, WIDTH))
background_probability_map[is_unknown] = background_probability[:,0]
background_probability_map[is_foreground] = 0.0
background_probability_map[is_background] = 1.0
# DENSE CRF
denseCRF = dcrf.DenseCRF2D(WIDTH, HEIGHT, CRF_TOTAL_LABEL)
unary = np.dstack((foreground_probability_map,background_probability_map))
unary = -np.log(unary) # denseCRF require negative log probability
unary = unary.astype(np.float32) #require float32
unary = unary.transpose(2, 0, 1).reshape((CRF_TOTAL_LABEL,-1)) # unary need to be flat.
unary = np.ascontiguousarray(unary) #avoid cython problem :X
denseCRF.setUnaryEnergy(unary)
image_current_rgb = cv2.cvtColor(image_current_uint,cv2.COLOR_BGR2RGB)
CRF_PAIRWISE_GAUSSIAN_SXY = 3
CRF_PAIRWISE_GAUSSIAN_COMPACT = 3
CRF_PAIRWISE_BILATERAL_SXY = 80
CRF_PAIRWISE_BILATERAL_SRGB = 13
CRF_PAIRWISE_BILATERAL_COMPACT = 10
CRF_ITERATION = 20
denseCRF.addPairwiseGaussian(
sxy=CRF_PAIRWISE_GAUSSIAN_SXY,
compat=CRF_PAIRWISE_GAUSSIAN_COMPACT
)
denseCRF.addPairwiseBilateral(
sxy=CRF_PAIRWISE_BILATERAL_SXY,
srgb=CRF_PAIRWISE_BILATERAL_SRGB,
rgbim=image_current_rgb,
compat=CRF_PAIRWISE_BILATERAL_COMPACT
)
segmented_probability = denseCRF.inference(CRF_ITERATION)
segmented_mask = np.argmax(segmented_probability, axis=0).reshape((HEIGHT,WIDTH))
segmented_foreground = segmented_mask == 0
segmented_background = segmented_mask == 1
# Find background color
if USE_MEAN_BACKGROUND:
background_color = np.mean(image_current_uint[segmented_background],axis=0)
else:
background_color = NEW_BACKGROUND_COLOR
output_image = image_current_uint.copy()
output_image[segmented_background] = background_color
if USE_BLUR_EDGE:
output_image = cv2.blur(output_image, BLUR_KERNEL)
new_mask = 1 - segmented_mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, BLUR_ERODE_KERNEL)
eroded_mask = cv2.erode(new_mask.astype(np.uint8),kernel)
remain_foreground = eroded_mask == 1
output_image[remain_foreground] = image_current_uint[remain_foreground]
#create output image
cv2.imwrite("%s/cam%03d_%05d.png"%(OUTPUT_PATH,CAMERA_NUMBER,IMAGE_NUMBER), output_image)
if __name__ == '__main__':
if not os.path.exists(OUTPUT_PATH):
os.mkdir(OUTPUT_PATH)
#params = list(range(TOTAL_CAMERA))
#params = [6,8,9]
#pool = Pool(MP_POOL_SIZE)
#pool.map(processed_camera, params)
processed_camera(8) | [
"os.mkdir",
"numpy.argmax",
"numpy.ones",
"numpy.mean",
"numpy.linalg.norm",
"cv2.floodFill",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.exists",
"numpy.max",
"pydensecrf.densecrf.DenseCRF2D",
"numpy.dstack",
"cv2.morphologyEx",
"numpy.min",
"numpy.log",
"cv2.rotate",
"cv2.getStructurin... | [((1208, 1260), 'cv2.imread', 'cv2.imread', (['(INPUT_PATH % (camerea_id, previous_shot))'], {}), '(INPUT_PATH % (camerea_id, previous_shot))\n', (1218, 1260), False, 'import cv2\n'), ((1286, 1337), 'cv2.imread', 'cv2.imread', (['(INPUT_PATH % (camerea_id, current_shot))'], {}), '(INPUT_PATH % (camerea_id, current_shot))\n', (1296, 1337), False, 'import cv2\n'), ((1381, 1429), 'cv2.cvtColor', 'cv2.cvtColor', (['image_prev_uint', 'cv2.COLOR_BGR2RGB'], {}), '(image_prev_uint, cv2.COLOR_BGR2RGB)\n', (1393, 1429), False, 'import cv2\n'), ((1454, 1505), 'cv2.cvtColor', 'cv2.cvtColor', (['image_current_uint', 'cv2.COLOR_BGR2RGB'], {}), '(image_current_uint, cv2.COLOR_BGR2RGB)\n', (1466, 1505), False, 'import cv2\n'), ((1863, 1914), 'numpy.linalg.norm', 'np.linalg.norm', (['(image_current - image_prev)'], {'axis': '(-1)'}), '(image_current - image_prev, axis=-1)\n', (1877, 1914), True, 'import numpy as np\n'), ((2849, 2872), 'numpy.nonzero', 'np.nonzero', (['mask_closed'], {}), '(mask_closed)\n', (2859, 2872), True, 'import numpy as np\n'), ((2885, 2899), 'numpy.min', 'np.min', (['mask_x'], {}), '(mask_x)\n', (2891, 2899), True, 'import numpy as np\n'), ((2912, 2926), 'numpy.max', 'np.max', (['mask_x'], {}), '(mask_x)\n', (2918, 2926), True, 'import numpy as np\n'), ((2939, 2953), 'numpy.min', 'np.min', (['mask_y'], {}), '(mask_y)\n', (2945, 2953), True, 'import numpy as np\n'), ((2966, 2980), 'numpy.max', 'np.max', (['mask_y'], {}), '(mask_y)\n', (2972, 2980), True, 'import numpy as np\n'), ((3152, 3213), 'numpy.zeros', 'np.zeros', (['(image_height + 2, image_width + 2)'], {'dtype': 'np.uint8'}), '((image_height + 2, image_width + 2), dtype=np.uint8)\n', (3160, 3213), True, 'import numpy as np\n'), ((4541, 4584), 'cv2.imread', 'cv2.imread', (['(INPUT_PATH % (CAMERA_NUMBER, 0))'], {}), '(INPUT_PATH % (CAMERA_NUMBER, 0))\n', (4551, 4584), False, 'import cv2\n'), ((4750, 4768), 'cv2.ml.EM_create', 'cv2.ml.EM_create', ([], {}), '()\n', (4766, 4768), False, 'import cv2\n'), ((1572, 1617), 'cv2.rotate', 'cv2.rotate', (['image_prev_uint', 'ROTATE_DIRECTION'], {}), '(image_prev_uint, ROTATE_DIRECTION)\n', (1582, 1617), False, 'import cv2\n'), ((1647, 1695), 'cv2.rotate', 'cv2.rotate', (['image_current_uint', 'ROTATE_DIRECTION'], {}), '(image_current_uint, ROTATE_DIRECTION)\n', (1657, 1695), False, 'import cv2\n'), ((2073, 2139), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', 'OPENNING_KERNEL_SIZE'], {}), '(cv2.MORPH_ELLIPSE, OPENNING_KERNEL_SIZE)\n', (2098, 2139), False, 'import cv2\n'), ((2163, 2214), 'cv2.morphologyEx', 'cv2.morphologyEx', (['diff_mask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(diff_mask, cv2.MORPH_OPEN, kernel)\n', (2179, 2214), False, 'import cv2\n'), ((2595, 2655), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', 'CLOSING_KERNEL'], {}), '(cv2.MORPH_ELLIPSE, CLOSING_KERNEL)\n', (2620, 2655), False, 'import cv2\n'), ((2677, 2735), 'cv2.morphologyEx', 'cv2.morphologyEx', (['foreground_prob', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(foreground_prob, cv2.MORPH_CLOSE, kernel)\n', (2693, 2735), False, 'import cv2\n'), ((4637, 4680), 'cv2.rotate', 'cv2.rotate', (['default_image', 'ROTATE_DIRECTION'], {}), '(default_image, ROTATE_DIRECTION)\n', (4647, 4680), False, 'import cv2\n'), ((5120, 5174), 'cv2.imread', 'cv2.imread', (['(INPUT_PATH % (CAMERA_NUMBER, IMAGE_NUMBER))'], {}), '(INPUT_PATH % (CAMERA_NUMBER, IMAGE_NUMBER))\n', (5130, 5174), False, 'import cv2\n'), ((5412, 5464), 'cv2.cvtColor', 'cv2.cvtColor', (['image_current_uint', 'cv2.COLOR_BGR2GRAY'], {}), '(image_current_uint, cv2.COLOR_BGR2GRAY)\n', (5424, 5464), False, 'import cv2\n'), ((6228, 6252), 'numpy.ones', 'np.ones', (['(HEIGHT, WIDTH)'], {}), '((HEIGHT, WIDTH))\n', (6235, 6252), True, 'import numpy as np\n'), ((6430, 6448), 'cv2.ml.EM_create', 'cv2.ml.EM_create', ([], {}), '()\n', (6446, 6448), False, 'import cv2\n'), ((7150, 7175), 'numpy.zeros', 'np.zeros', (['(HEIGHT, WIDTH)'], {}), '((HEIGHT, WIDTH))\n', (7158, 7175), True, 'import numpy as np\n'), ((7440, 7465), 'numpy.zeros', 'np.zeros', (['(HEIGHT, WIDTH)'], {}), '((HEIGHT, WIDTH))\n', (7448, 7465), True, 'import numpy as np\n'), ((7696, 7743), 'pydensecrf.densecrf.DenseCRF2D', 'dcrf.DenseCRF2D', (['WIDTH', 'HEIGHT', 'CRF_TOTAL_LABEL'], {}), '(WIDTH, HEIGHT, CRF_TOTAL_LABEL)\n', (7711, 7743), True, 'import pydensecrf.densecrf as dcrf\n'), ((7760, 7827), 'numpy.dstack', 'np.dstack', (['(foreground_probability_map, background_probability_map)'], {}), '((foreground_probability_map, background_probability_map))\n', (7769, 7827), True, 'import numpy as np\n'), ((8072, 8099), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['unary'], {}), '(unary)\n', (8092, 8099), True, 'import numpy as np\n'), ((8193, 8244), 'cv2.cvtColor', 'cv2.cvtColor', (['image_current_uint', 'cv2.COLOR_BGR2RGB'], {}), '(image_current_uint, cv2.COLOR_BGR2RGB)\n', (8205, 8244), False, 'import cv2\n'), ((9906, 10003), 'cv2.imwrite', 'cv2.imwrite', (["('%s/cam%03d_%05d.png' % (OUTPUT_PATH, CAMERA_NUMBER, IMAGE_NUMBER))", 'output_image'], {}), "('%s/cam%03d_%05d.png' % (OUTPUT_PATH, CAMERA_NUMBER,\n IMAGE_NUMBER), output_image)\n", (9917, 10003), False, 'import cv2\n'), ((10035, 10062), 'os.path.exists', 'os.path.exists', (['OUTPUT_PATH'], {}), '(OUTPUT_PATH)\n', (10049, 10062), False, 'import os\n'), ((10072, 10093), 'os.mkdir', 'os.mkdir', (['OUTPUT_PATH'], {}), '(OUTPUT_PATH)\n', (10080, 10093), False, 'import os\n'), ((5240, 5288), 'cv2.rotate', 'cv2.rotate', (['image_current_uint', 'ROTATE_DIRECTION'], {}), '(image_current_uint, ROTATE_DIRECTION)\n', (5250, 5288), False, 'import cv2\n'), ((5535, 5591), 'cv2.threshold', 'cv2.threshold', (['image_gray', '(200)', '(255)', 'cv2.THRESH_TRIANGLE'], {}), '(image_gray, 200, 255, cv2.THRESH_TRIANGLE)\n', (5548, 5591), False, 'import cv2\n'), ((5639, 5706), 'cv2.threshold', 'cv2.threshold', (['image_gray', 'THRESHOLD_STRENG', '(255)', 'cv2.THRESH_BINARY'], {}), '(image_gray, THRESHOLD_STRENG, 255, cv2.THRESH_BINARY)\n', (5652, 5706), False, 'import cv2\n'), ((5774, 5851), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', 'FOREGROUND_OPENNING_KERNEL_SIZE'], {}), '(cv2.MORPH_ELLIPSE, FOREGROUND_OPENNING_KERNEL_SIZE)\n', (5799, 5851), False, 'import cv2\n'), ((5889, 5947), 'cv2.morphologyEx', 'cv2.morphologyEx', (['object_threshold', 'cv2.MORPH_OPEN', 'kernel'], {}), '(object_threshold, cv2.MORPH_OPEN, kernel)\n', (5905, 5947), False, 'import cv2\n'), ((7844, 7857), 'numpy.log', 'np.log', (['unary'], {}), '(unary)\n', (7850, 7857), True, 'import numpy as np\n'), ((9206, 9263), 'numpy.mean', 'np.mean', (['image_current_uint[segmented_background]'], {'axis': '(0)'}), '(image_current_uint[segmented_background], axis=0)\n', (9213, 9263), True, 'import numpy as np\n'), ((9502, 9537), 'cv2.blur', 'cv2.blur', (['output_image', 'BLUR_KERNEL'], {}), '(output_image, BLUR_KERNEL)\n', (9510, 9537), False, 'import cv2\n'), ((9601, 9664), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', 'BLUR_ERODE_KERNEL'], {}), '(cv2.MORPH_ELLIPSE, BLUR_ERODE_KERNEL)\n', (9626, 9664), False, 'import cv2\n'), ((3406, 3459), 'cv2.floodFill', 'cv2.floodFill', (['image_flooded', 'flood_mask', '(i, 0)', '(255)'], {}), '(image_flooded, flood_mask, (i, 0), 255)\n', (3419, 3459), False, 'import cv2\n'), ((3634, 3687), 'cv2.floodFill', 'cv2.floodFill', (['image_flooded', 'flood_mask', '(0, i)', '(255)'], {}), '(image_flooded, flood_mask, (0, i), 255)\n', (3647, 3687), False, 'import cv2\n'), ((3925, 3987), 'cv2.floodFill', 'cv2.floodFill', (['image_flooded', 'flood_mask', '(most_right, i)', '(255)'], {}), '(image_flooded, flood_mask, (most_right, i), 255)\n', (3938, 3987), False, 'import cv2\n'), ((4230, 4293), 'cv2.floodFill', 'cv2.floodFill', (['image_flooded', 'flood_mask', '(i, most_bottom)', '(255)'], {}), '(image_flooded, flood_mask, (i, most_bottom), 255)\n', (4243, 4293), False, 'import cv2\n'), ((8943, 8983), 'numpy.argmax', 'np.argmax', (['segmented_probability'], {'axis': '(0)'}), '(segmented_probability, axis=0)\n', (8952, 8983), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
from tqdm.auto import tqdm
from multiprocessing import Pool
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.cm import ScalarMappable
from matplotlib.colors import Normalize, LogNorm
from matplotlib.offsetbox import AnchoredText
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.shapereader as shpreader
# In[2]:
# https://gist.github.com/JeffPaine/3083347
# https://gist.github.com/tlancon/9794920a0c3a9990279de704f936050c
US_STATES = {
'Alabama': 'AL',
'Alaska': 'AK',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'
}
# In[3]:
name2map = {
'Inner Mongolia':'Nei Mongol',
'Ningxia':'<NAME>',
'Xinjiang':'<NAME>',
'Macau':'Macao',
'Tibet':'Xizang'
}
# In[4]:
china_geo_data = {}
usa_geo_data = {}
# 中国大陆
for record in shpreader.Reader('./data_GADM/gadm36_CHN_1.shp').records():
name = record.attributes['NAME_1']
geo = record.geometry
china_geo_data[name] = geo
# 香港、澳门、台湾
for sp in ['HKG','MAC','TWN']:
record = list(shpreader.Reader('./data_GADM/gadm36_{:s}_0.shp'.format(sp)).records())[0]
name = record.attributes['NAME_0']
geo = record.geometry
china_geo_data[name] = geo
# USA
for record in shpreader.Reader(shpreader.natural_earth(resolution='110m',category='cultural', name='admin_1_states_provinces')).records():
name = record.attributes['name']
geo = record.geometry
usa_geo_data[name] = geo
# In[5]:
color_mapper = ScalarMappable(norm=Normalize(0,5,clip=True), cmap='Reds')
# color_mapper = ScalarMappable(norm=LogNorm(1,5,clip=True), cmap='Reds')
# plt.scatter(np.arange(1,5e4,50),np.arange(1,5e4,50),c=color_mapper.to_rgba(np.log10(np.arange(1,5e4,50))))
# In[6]:
usa_data = []
china_data = []
for file_type in ['Confirmed','Deaths','Recovered']:
# Load csv
total_data_df = pd.read_csv('time_series_19-covid-{:s}.csv'.format(file_type)).set_index('Country/Region')
# Save TimeSeries as strings
date_idx = total_data_df.columns.drop(['Province/State','Lat','Long'])
date_str = pd.to_datetime(date_idx.tolist()).strftime('%Y-%m-%d')
data_dict = dict(zip(date_idx, date_str))
usa_data_df = (
total_data_df.loc['US']
.set_index('Province/State')
.loc[US_STATES.keys()]
.fillna(0)
.rename(columns=data_dict)
)
china_data_df = (
total_data_df.loc['China']
.append(
total_data_df.loc['Taiwan*'].fillna('Taiwan')
)
.set_index('Province/State')
# Rename for some provinces
.rename(index=name2map)
.loc[china_geo_data.keys()]
.fillna(0)
.rename(columns=data_dict)
)
# Convert to int
usa_data_df.loc[ :,date_str] = usa_data_df.loc[ :,date_str].astype(int)
china_data_df.loc[:,date_str] = china_data_df.loc[:,date_str].astype(int)
usa_data.append(usa_data_df)
china_data.append(china_data_df)
# Define `existed` = `confirmed` - `cured` - `dead`
usa_data_df = usa_data[0] - usa_data[1] - usa_data[2]
china_data_df = china_data[0] - china_data[1] - china_data[2]
usa_data_df.loc[ :,['Lat','Long']] *= -1
china_data_df.loc[:,['Lat','Long']] *= -1
# ---
# In[7]:
def plot_main_land(ax):
'''
Plot the main land, ocean, coastline and borders.
'''
ax.add_feature(cfeature.LAND.with_scale('110m'), facecolor='white', alpha=0.5)
ax.add_feature(cfeature.OCEAN.with_scale('110m'))
ax.add_feature(cfeature.COASTLINE.with_scale('110m'), zorder=100)
ax.add_feature(cfeature.BORDERS.with_scale('110m'), zorder=100)
return
def plot_states(ax, df, geo_data):
'''
Plot provinces/states.
'''
for k in geo_data.keys():
if df[k] == 0:
gcolor = 'white'
else:
gcolor = color_mapper.to_rgba( np.log10(df[k]) )
ax.add_geometries(
geo_data[k],
crs=ccrs.PlateCarree(),
facecolor=gcolor,
lw=0.1,
edgecolor='k',
zorder=0
)
cax = ax.inset_axes([0.9, 0.1, 0.02, 0.35])
plt.colorbar( color_mapper, cax=cax, extend='max', ticks=np.arange(0,6) )
cax.set_yticklabels(['$10^{:d}$'.format(x) for x in np.arange(0,6)], fontsize=10, ha='left',va='center')
return
# In[8]:
def two_countries_plot(df1, df2):
fig = plt.figure(figsize=(14,5))
ax1 = fig.add_subplot(121, projection=ccrs.LambertConformal(central_latitude=90,central_longitude=105))
ax2 = fig.add_subplot(122, projection=ccrs.LambertConformal())
ax1.set_title('China', fontsize=24)
ax2.set_title('US', fontsize=24)
ax1.set_extent([80, 130, 15, 53])
ax2.set_extent([-120, -70, 22, 53])
plot_main_land(ax1)
plot_main_land(ax2)
plot_states(ax1, df1, china_geo_data)
plot_states(ax2, df2, usa_geo_data)
text = AnchoredText(
'Visualization by ch',
loc='lower left',
prop={'size': 8, 'alpha':0.25}, frameon=False,
)
ax1.add_artist(text)
text = AnchoredText(
'Data from CSSE @ Johns Hopkins University',
loc='lower right',
bbox_to_anchor=(1.015, -0.15),
bbox_transform=ax2.transAxes,
prop={'size': 10}, frameon=True,
)
ax2.add_artist(text)
text = AnchoredText(
'@chAwater',
loc='lower left',
prop={'size': 8, 'alpha':0.25}, frameon=False,
)
ax2.add_artist(text)
return fig
# In[9]:
def worker(idx):
day_str = date_str[idx]
file_name = 'frames/frame_{:02d}.jpg'.format(idx)
ndf1 = china_data_df[day_str]
ndf2 = usa_data_df[day_str]
fig = two_countries_plot(ndf1, ndf2)
fig.suptitle('Existing COVID-19\n'+day_str,y=1.1,fontsize=28,ha='center',va='top')
fig.savefig(file_name, dpi=150, bbox_inches='tight', facecolor=None)
plt.close(fig)
return 1
# In[ ]:
# In[10]:
pool = Pool(4)
_ = list(
tqdm(
pool.imap(worker, np.arange(date_str.shape[0])), total=date_str.shape[0]
)
)
pool.close()
pool.join()
# In[11]:
get_ipython().system(' ffmpeg -f image2 -framerate 8 -y -i ./frames/frame_%002d.jpg China_vs_US.gif')
# In[12]:
get_ipython().system(' ffmpeg -f image2 -framerate 8 -y -i ./frames/frame_%002d.jpg -vf "pad=ceil(iw/2)*2:ceil(ih/2)*2" China_vs_US.mp4')
# In[ ]:
| [
"cartopy.crs.LambertConformal",
"matplotlib.offsetbox.AnchoredText",
"cartopy.feature.OCEAN.with_scale",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.close",
"cartopy.feature.LAND.with_scale",
"cartopy.io.shapereader.natural_earth",
"numpy.log10",
"matplotlib.pyplot.figure",
"cartopy.io.shape... | [((6959, 6966), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6963, 6966), False, 'from multiprocessing import Pool\n'), ((5375, 5402), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 5)'}), '(figsize=(14, 5))\n', (5385, 5402), True, 'import matplotlib.pyplot as plt\n'), ((5886, 5991), 'matplotlib.offsetbox.AnchoredText', 'AnchoredText', (['"""Visualization by ch"""'], {'loc': '"""lower left"""', 'prop': "{'size': 8, 'alpha': 0.25}", 'frameon': '(False)'}), "('Visualization by ch', loc='lower left', prop={'size': 8,\n 'alpha': 0.25}, frameon=False)\n", (5898, 5991), False, 'from matplotlib.offsetbox import AnchoredText\n'), ((6058, 6237), 'matplotlib.offsetbox.AnchoredText', 'AnchoredText', (['"""Data from CSSE @ Johns Hopkins University"""'], {'loc': '"""lower right"""', 'bbox_to_anchor': '(1.015, -0.15)', 'bbox_transform': 'ax2.transAxes', 'prop': "{'size': 10}", 'frameon': '(True)'}), "('Data from CSSE @ Johns Hopkins University', loc='lower right',\n bbox_to_anchor=(1.015, -0.15), bbox_transform=ax2.transAxes, prop={\n 'size': 10}, frameon=True)\n", (6070, 6237), False, 'from matplotlib.offsetbox import AnchoredText\n'), ((6316, 6411), 'matplotlib.offsetbox.AnchoredText', 'AnchoredText', (['"""@chAwater"""'], {'loc': '"""lower left"""', 'prop': "{'size': 8, 'alpha': 0.25}", 'frameon': '(False)'}), "('@chAwater', loc='lower left', prop={'size': 8, 'alpha': 0.25},\n frameon=False)\n", (6328, 6411), False, 'from matplotlib.offsetbox import AnchoredText\n'), ((6891, 6905), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6900, 6905), True, 'import matplotlib.pyplot as plt\n'), ((1840, 1888), 'cartopy.io.shapereader.Reader', 'shpreader.Reader', (['"""./data_GADM/gadm36_CHN_1.shp"""'], {}), "('./data_GADM/gadm36_CHN_1.shp')\n", (1856, 1888), True, 'import cartopy.io.shapereader as shpreader\n'), ((2517, 2543), 'matplotlib.colors.Normalize', 'Normalize', (['(0)', '(5)'], {'clip': '(True)'}), '(0, 5, clip=True)\n', (2526, 2543), False, 'from matplotlib.colors import Normalize, LogNorm\n'), ((4364, 4396), 'cartopy.feature.LAND.with_scale', 'cfeature.LAND.with_scale', (['"""110m"""'], {}), "('110m')\n", (4388, 4396), True, 'import cartopy.feature as cfeature\n'), ((4447, 4480), 'cartopy.feature.OCEAN.with_scale', 'cfeature.OCEAN.with_scale', (['"""110m"""'], {}), "('110m')\n", (4472, 4480), True, 'import cartopy.feature as cfeature\n'), ((4501, 4538), 'cartopy.feature.COASTLINE.with_scale', 'cfeature.COASTLINE.with_scale', (['"""110m"""'], {}), "('110m')\n", (4530, 4538), True, 'import cartopy.feature as cfeature\n'), ((4571, 4606), 'cartopy.feature.BORDERS.with_scale', 'cfeature.BORDERS.with_scale', (['"""110m"""'], {}), "('110m')\n", (4598, 4606), True, 'import cartopy.feature as cfeature\n'), ((2268, 2369), 'cartopy.io.shapereader.natural_earth', 'shpreader.natural_earth', ([], {'resolution': '"""110m"""', 'category': '"""cultural"""', 'name': '"""admin_1_states_provinces"""'}), "(resolution='110m', category='cultural', name=\n 'admin_1_states_provinces')\n", (2291, 2369), True, 'import cartopy.io.shapereader as shpreader\n'), ((5180, 5195), 'numpy.arange', 'np.arange', (['(0)', '(6)'], {}), '(0, 6)\n', (5189, 5195), True, 'import numpy as np\n'), ((5444, 5509), 'cartopy.crs.LambertConformal', 'ccrs.LambertConformal', ([], {'central_latitude': '(90)', 'central_longitude': '(105)'}), '(central_latitude=90, central_longitude=105)\n', (5465, 5509), True, 'import cartopy.crs as ccrs\n'), ((5552, 5575), 'cartopy.crs.LambertConformal', 'ccrs.LambertConformal', ([], {}), '()\n', (5573, 5575), True, 'import cartopy.crs as ccrs\n'), ((7014, 7042), 'numpy.arange', 'np.arange', (['date_str.shape[0]'], {}), '(date_str.shape[0])\n', (7023, 7042), True, 'import numpy as np\n'), ((4851, 4866), 'numpy.log10', 'np.log10', (['df[k]'], {}), '(df[k])\n', (4859, 4866), True, 'import numpy as np\n'), ((4942, 4960), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4958, 4960), True, 'import cartopy.crs as ccrs\n'), ((5253, 5268), 'numpy.arange', 'np.arange', (['(0)', '(6)'], {}), '(0, 6)\n', (5262, 5268), True, 'import numpy as np\n')] |
from setuptools.command.build_ext import build_ext
from setuptools import dist, setup, Extension, find_packages
import os
dist.Distribution().fetch_build_eggs(['numpy>=1.12'])
import numpy as np # noqa
descr = 'Fast algorithm with dual extrapolation for sparse problems'
version = None
with open(os.path.join('celer', '__init__.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
version = line.split('=')[1].strip().strip('\'')
break
if version is None:
raise RuntimeError('Could not determine version')
DISTNAME = 'celer'
DESCRIPTION = descr
MAINTAINER = '<NAME>'
MAINTAINER_EMAIL = '<EMAIL>'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/mathurinm/celer.git'
VERSION = version
URL = 'https://mathurinm.github.io/celer'
setup(name='celer',
version=VERSION,
description=DESCRIPTION,
long_description=open('README.rst').read(),
license=LICENSE,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
install_requires=['numpy>=1.12', 'seaborn>=0.7', 'scipy>=0.18.0',
'matplotlib>=2.0.0', 'Cython>=0.26', 'libsvmdata',
'scikit-learn>=0.23', 'xarray', 'download', 'tqdm'],
packages=find_packages(),
cmdclass={'build_ext': build_ext},
ext_modules=[
Extension('celer.lasso_fast',
sources=['celer/lasso_fast.pyx'],
language='c++',
include_dirs=[np.get_include()],
extra_compile_args=["-O3"]),
Extension('celer.cython_utils',
sources=['celer/cython_utils.pyx'],
language='c++',
include_dirs=[np.get_include()],
extra_compile_args=["-O3"]),
Extension('celer.PN_logreg',
sources=['celer/PN_logreg.pyx'],
language='c++',
include_dirs=[np.get_include()],
extra_compile_args=["-O3"]),
Extension('celer.multitask_fast',
sources=['celer/multitask_fast.pyx'],
language='c++',
include_dirs=[np.get_include()],
extra_compile_args=["-O3"]),
Extension('celer.group_fast',
sources=['celer/group_fast.pyx'],
language='c++',
include_dirs=[np.get_include()],
extra_compile_args=["-O3"]),
],
)
| [
"setuptools.dist.Distribution",
"os.path.join",
"setuptools.find_packages",
"numpy.get_include"
] | [((122, 141), 'setuptools.dist.Distribution', 'dist.Distribution', ([], {}), '()\n', (139, 141), False, 'from setuptools import dist, setup, Extension, find_packages\n'), ((299, 335), 'os.path.join', 'os.path.join', (['"""celer"""', '"""__init__.py"""'], {}), "('celer', '__init__.py')\n", (311, 335), False, 'import os\n'), ((1333, 1348), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1346, 1348), False, 'from setuptools import dist, setup, Extension, find_packages\n'), ((1575, 1591), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (1589, 1591), True, 'import numpy as np\n'), ((1811, 1827), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (1825, 1827), True, 'import numpy as np\n'), ((2041, 2057), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (2055, 2057), True, 'import numpy as np\n'), ((2281, 2297), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (2295, 2297), True, 'import numpy as np\n'), ((2513, 2529), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (2527, 2529), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Aggregated conformal predictors
"""
# Authors: <NAME>
import numpy as np
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit
from sklearn.base import clone
from cqr.nonconformist_base import BaseEstimator
from cqr.nonconformist_util import calc_p
# -----------------------------------------------------------------------------
# Sampling strategies
# -----------------------------------------------------------------------------
class BootstrapSampler(object):
"""Bootstrap sampler.
See also
--------
CrossSampler, RandomSubSampler
Examples
--------
"""
def gen_samples(self, y, n_samples, problem_type):
for i in range(n_samples):
idx = np.array(range(y.size))
train = np.random.choice(y.size, y.size, replace=True)
cal_mask = np.array(np.ones(idx.size), dtype=bool)
for j in train:
cal_mask[j] = False
cal = idx[cal_mask]
yield train, cal
class CrossSampler(object):
"""Cross-fold sampler.
See also
--------
BootstrapSampler, RandomSubSampler
Examples
--------
"""
def gen_samples(self, y, n_samples, problem_type):
if problem_type == 'classification':
folds = StratifiedKFold(y, n_folds=n_samples)
else:
folds = KFold(y.size, n_folds=n_samples)
for train, cal in folds:
yield train, cal
class RandomSubSampler(object):
"""Random subsample sampler.
Parameters
----------
calibration_portion : float
Ratio (0-1) of examples to use for calibration.
See also
--------
BootstrapSampler, CrossSampler
Examples
--------
"""
def __init__(self, calibration_portion=0.3):
self.cal_portion = calibration_portion
def gen_samples(self, y, n_samples, problem_type):
if problem_type == 'classification':
splits = StratifiedShuffleSplit(y,
n_iter=n_samples,
test_size=self.cal_portion)
else:
splits = ShuffleSplit(y.size,
n_iter=n_samples,
test_size=self.cal_portion)
for train, cal in splits:
yield train, cal
# -----------------------------------------------------------------------------
# Conformal ensemble
# -----------------------------------------------------------------------------
class AggregatedCp(BaseEstimator):
"""Aggregated conformal predictor.
Combines multiple IcpClassifier or IcpRegressor predictors into an
aggregated model.
Parameters
----------
predictor : object
Prototype conformal predictor (e.g. IcpClassifier or IcpRegressor)
used for defining conformal predictors included in the aggregate model.
sampler : object
Sampler object used to generate training and calibration examples
for the underlying conformal predictors.
aggregation_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors. Defaults to ``numpy.mean``.
n_models : int
Number of models to aggregate.
Attributes
----------
predictor : object
Prototype conformal predictor.
predictors : list
List of underlying conformal predictors.
sampler : object
Sampler object used to generate training and calibration examples.
agg_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors
References
----------
.. [1] <NAME>. (2013). Cross-conformal predictors. Annals of Mathematics
and Artificial Intelligence, 1-20.
.. [2] <NAME>., <NAME>., & <NAME>. (2014). Aggregated
Conformal Prediction. In Artificial Intelligence Applications and
Innovations (pp. 231-240). Springer Berlin Heidelberg.
Examples
--------
"""
def __init__(self,
predictor,
sampler=BootstrapSampler(),
aggregation_func=None,
n_models=10):
self.predictors = []
self.n_models = n_models
self.predictor = predictor
self.sampler = sampler
if aggregation_func is not None:
self.agg_func = aggregation_func
else:
self.agg_func = lambda x: np.mean(x, axis=2)
def fit(self, x, y):
"""Fit underlying conformal predictors.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of examples for fitting the underlying conformal predictors.
y : numpy array of shape [n_samples]
Outputs of examples for fitting the underlying conformal predictors.
Returns
-------
None
"""
self.n_train = y.size
self.predictors = []
idx = np.random.permutation(y.size)
x, y = x[idx, :], y[idx]
problem_type = self.predictor.__class__.get_problem_type()
samples = self.sampler.gen_samples(y,
self.n_models,
problem_type)
for train, cal in samples:
predictor = clone(self.predictor)
predictor.fit(x[train, :], y[train])
predictor.calibrate(x[cal, :], y[cal])
self.predictors.append(predictor)
if problem_type == 'classification':
self.classes = self.predictors[0].classes
def predict(self, x, significance=None):
"""Predict the output values for a set of input patterns.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of patters for which to predict output values.
significance : float or None
Significance level (maximum allowed error rate) of predictions.
Should be a float between 0 and 1. If ``None``, then the p-values
are output rather than the predictions. Note: ``significance=None``
is applicable to classification problems only.
Returns
-------
p : numpy array of shape [n_samples, n_classes] or [n_samples, 2]
For classification problems: If significance is ``None``, then p
contains the p-values for each sample-class pair; if significance
is a float between 0 and 1, then p is a boolean array denoting
which labels are included in the prediction sets.
For regression problems: Prediction interval (minimum and maximum
boundaries) for the set of test patterns.
"""
is_regression =\
self.predictor.__class__.get_problem_type() == 'regression'
n_examples = x.shape[0]
if is_regression and significance is None:
signs = np.arange(0.01, 1.0, 0.01)
pred = np.zeros((n_examples, 2, signs.size))
for i, s in enumerate(signs):
predictions = np.dstack([p.predict(x, s)
for p in self.predictors])
predictions = self.agg_func(predictions)
pred[:, :, i] = predictions
return pred
else:
def f(p, x):
return p.predict(x, significance if is_regression else None)
predictions = np.dstack([f(p, x) for p in self.predictors])
predictions = self.agg_func(predictions)
if significance and not is_regression:
return predictions >= significance
else:
return predictions
class CrossConformalClassifier(AggregatedCp):
"""Cross-conformal classifier.
Combines multiple IcpClassifiers into a cross-conformal classifier.
Parameters
----------
predictor : object
Prototype conformal predictor (e.g. IcpClassifier or IcpRegressor)
used for defining conformal predictors included in the aggregate model.
aggregation_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors. Defaults to ``numpy.mean``.
n_models : int
Number of models to aggregate.
Attributes
----------
predictor : object
Prototype conformal predictor.
predictors : list
List of underlying conformal predictors.
sampler : object
Sampler object used to generate training and calibration examples.
agg_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors
References
----------
.. [1] <NAME>. (2013). Cross-conformal predictors. Annals of Mathematics
and Artificial Intelligence, 1-20.
Examples
--------
"""
def __init__(self,
predictor,
n_models=10):
super(CrossConformalClassifier, self).__init__(predictor,
CrossSampler(),
n_models)
def predict(self, x, significance=None):
ncal_ngt_neq = np.stack([p._get_stats(x) for p in self.predictors],
axis=3)
ncal_ngt_neq = ncal_ngt_neq.sum(axis=3)
p = calc_p(ncal_ngt_neq[:, :, 0],
ncal_ngt_neq[:, :, 1],
ncal_ngt_neq[:, :, 2],
smoothing=self.predictors[0].smoothing)
if significance:
return p > significance
else:
return p
class BootstrapConformalClassifier(AggregatedCp):
"""Bootstrap conformal classifier.
Combines multiple IcpClassifiers into a bootstrap conformal classifier.
Parameters
----------
predictor : object
Prototype conformal predictor (e.g. IcpClassifier or IcpRegressor)
used for defining conformal predictors included in the aggregate model.
aggregation_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors. Defaults to ``numpy.mean``.
n_models : int
Number of models to aggregate.
Attributes
----------
predictor : object
Prototype conformal predictor.
predictors : list
List of underlying conformal predictors.
sampler : object
Sampler object used to generate training and calibration examples.
agg_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors
References
----------
.. [1] <NAME>. (2013). Cross-conformal predictors. Annals of Mathematics
and Artificial Intelligence, 1-20.
Examples
--------
"""
def __init__(self,
predictor,
n_models=10):
super(BootstrapConformalClassifier, self).__init__(predictor,
BootstrapSampler(),
n_models)
def predict(self, x, significance=None):
ncal_ngt_neq = np.stack([p._get_stats(x) for p in self.predictors],
axis=3)
ncal_ngt_neq = ncal_ngt_neq.sum(axis=3)
p = calc_p(ncal_ngt_neq[:, :, 0] + ncal_ngt_neq[:, :, 0] / self.n_train,
ncal_ngt_neq[:, :, 1] + ncal_ngt_neq[:, :, 0] / self.n_train,
ncal_ngt_neq[:, :, 2],
smoothing=self.predictors[0].smoothing)
if significance:
return p > significance
else:
return p
| [
"cqr.nonconformist_util.calc_p",
"numpy.zeros",
"numpy.ones",
"sklearn.model_selection.KFold",
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.mean",
"sklearn.model_selection.StratifiedKFold",
"numpy.arange",
"numpy.random.choice",
"numpy.random.permutation",
"sklearn.model_selection.Sh... | [((4463, 4492), 'numpy.random.permutation', 'np.random.permutation', (['y.size'], {}), '(y.size)\n', (4484, 4492), True, 'import numpy as np\n'), ((8165, 8284), 'cqr.nonconformist_util.calc_p', 'calc_p', (['ncal_ngt_neq[:, :, 0]', 'ncal_ngt_neq[:, :, 1]', 'ncal_ngt_neq[:, :, 2]'], {'smoothing': 'self.predictors[0].smoothing'}), '(ncal_ngt_neq[:, :, 0], ncal_ngt_neq[:, :, 1], ncal_ngt_neq[:, :, 2],\n smoothing=self.predictors[0].smoothing)\n', (8171, 8284), False, 'from cqr.nonconformist_util import calc_p\n'), ((9819, 10021), 'cqr.nonconformist_util.calc_p', 'calc_p', (['(ncal_ngt_neq[:, :, 0] + ncal_ngt_neq[:, :, 0] / self.n_train)', '(ncal_ngt_neq[:, :, 1] + ncal_ngt_neq[:, :, 0] / self.n_train)', 'ncal_ngt_neq[:, :, 2]'], {'smoothing': 'self.predictors[0].smoothing'}), '(ncal_ngt_neq[:, :, 0] + ncal_ngt_neq[:, :, 0] / self.n_train, \n ncal_ngt_neq[:, :, 1] + ncal_ngt_neq[:, :, 0] / self.n_train,\n ncal_ngt_neq[:, :, 2], smoothing=self.predictors[0].smoothing)\n', (9825, 10021), False, 'from cqr.nonconformist_util import calc_p\n'), ((799, 845), 'numpy.random.choice', 'np.random.choice', (['y.size', 'y.size'], {'replace': '(True)'}), '(y.size, y.size, replace=True)\n', (815, 845), True, 'import numpy as np\n'), ((1226, 1263), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['y'], {'n_folds': 'n_samples'}), '(y, n_folds=n_samples)\n', (1241, 1263), False, 'from sklearn.model_selection import KFold, StratifiedKFold\n'), ((1283, 1315), 'sklearn.model_selection.KFold', 'KFold', (['y.size'], {'n_folds': 'n_samples'}), '(y.size, n_folds=n_samples)\n', (1288, 1315), False, 'from sklearn.model_selection import KFold, StratifiedKFold\n'), ((1801, 1872), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', (['y'], {'n_iter': 'n_samples', 'test_size': 'self.cal_portion'}), '(y, n_iter=n_samples, test_size=self.cal_portion)\n', (1823, 1872), False, 'from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit\n'), ((1963, 2029), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', (['y.size'], {'n_iter': 'n_samples', 'test_size': 'self.cal_portion'}), '(y.size, n_iter=n_samples, test_size=self.cal_portion)\n', (1975, 2029), False, 'from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit\n'), ((4768, 4789), 'sklearn.base.clone', 'clone', (['self.predictor'], {}), '(self.predictor)\n', (4773, 4789), False, 'from sklearn.base import clone\n'), ((6153, 6179), 'numpy.arange', 'np.arange', (['(0.01)', '(1.0)', '(0.01)'], {}), '(0.01, 1.0, 0.01)\n', (6162, 6179), True, 'import numpy as np\n'), ((6190, 6227), 'numpy.zeros', 'np.zeros', (['(n_examples, 2, signs.size)'], {}), '((n_examples, 2, signs.size))\n', (6198, 6227), True, 'import numpy as np\n'), ((869, 886), 'numpy.ones', 'np.ones', (['idx.size'], {}), '(idx.size)\n', (876, 886), True, 'import numpy as np\n'), ((4029, 4047), 'numpy.mean', 'np.mean', (['x'], {'axis': '(2)'}), '(x, axis=2)\n', (4036, 4047), True, 'import numpy as np\n')] |
from polynomial import *
from chebyshev import *
from polyutils import *
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| [
"numpy.testing.Tester"
] | [((114, 122), 'numpy.testing.Tester', 'Tester', ([], {}), '()\n', (120, 122), False, 'from numpy.testing import Tester\n'), ((136, 144), 'numpy.testing.Tester', 'Tester', ([], {}), '()\n', (142, 144), False, 'from numpy.testing import Tester\n')] |
# Copyright (C) 2017 <NAME>, Carnegie Mellon University
# Copyright (C) 2020 <NAME>, UCLA
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
from loguru import logger
# Data feed
class LongDataLoader(object):
"""A special efficient data loader for TBPTT"""
batch_size = 0
backward_size = 0
step_size = 0
ptr = 0
num_batch = None
batch_indexes = None
grid_indexes = None
indexes = None
data_lens = None
data_size = None
prev_alive_size = 0
name = None
def _shuffle_batch_indexes(self):
np.random.shuffle(self.batch_indexes)
def _prepare_batch(self, cur_grid, prev_grid):
raise NotImplementedError("Have to override prepare batch")
def epoch_init(self,
batch_size,
shuffle=True,
intra_shuffle=True,
no_leftover=False):
assert len(self.indexes) == self.data_size and len(
self.data_lens) == self.data_size
self.ptr = 0
self.batch_size = batch_size
self.prev_alive_size = batch_size
# create batch indexes
temp_num_batch = self.data_size // batch_size
self.batch_indexes = []
for i in range(temp_num_batch):
self.batch_indexes.append(
self.indexes[i * self.batch_size:(i + 1) * self.batch_size])
# No left over
if no_leftover:
self.batch_indexes.append(self.indexes[(temp_num_batch *
self.batch_size):])
left_over = self.data_size - temp_num_batch * batch_size
# shuffle batch indexes
if shuffle:
self._shuffle_batch_indexes()
"""
# create grid indexes
self.grid_indexes = []
for idx, b_ids in enumerate(self.batch_indexes):
# assume the b_ids are sorted
all_lens = [self.data_lens[i] for i in b_ids]
max_len = self.data_lens[b_ids[-1]]
min_len = self.data_lens[b_ids[0]]
assert np.max(all_lens) == max_len
assert np.min(all_lens) == min_len
num_seg = (max_len-self.backward_size) // self.step_size
if num_seg > 0:
cut_start = range(0, num_seg*self.step_size, step_size)
cut_end = range(self.backward_size, num_seg*self.step_size+self.backward_size, step_size)
assert cut_end[-1] < max_len
cut_start = [0] * (self.backward_size-2) +cut_start # since we give up on the seq training idea
cut_end = range(2, self.backward_size) + cut_end
else:
cut_start = [0] * (max_len-2)
cut_end = range(2, max_len)
new_grids = [(idx, s_id, e_id) for s_id, e_id in zip(cut_start, cut_end) if s_id < min_len-1]
if intra_shuffle and shuffle:
np.random.shuffle(new_grids)
self.grid_indexes.extend(new_grids)
"""
self.num_batch = len(self.batch_indexes)
print("%s begins with %d batches with %d left over samples" %
(self.name, self.num_batch, left_over))
def next_batch(self):
if self.ptr < self.num_batch:
current_index_list = self.batch_indexes[self.ptr]
self.ptr += 1
return self._prepare_batch(current_index_list)
else:
if self.labeled:
current_index_list = self.batch_indexes[0]
return self._prepare_batch(current_index_list)
else:
return None
class SWDADataLoader(LongDataLoader):
def __init__(self,
name,
data,
max_utt_len,
max_dialog_len,
labeled=False,
device='cpu'):
# assert len(data) == len(meta_data)
self.name = name
self.data = data
# self.meta_data = meta_data
self.data_size = len(data)
self.data_lens = all_lens = [len(line) for line in self.data]
self.max_utt_size = max_utt_len
self.max_dialog_size = max_dialog_len
self.labeled = labeled
self.device = device
logger.info(
"Max dialog len %d and min dialog len %d and avg len %f" %
(np.max(all_lens), np.min(all_lens), float(np.mean(all_lens))))
# self.indexes = list(np.argsort(all_lens))
self.indexes = list(range(self.data_size))
np.random.shuffle(self.indexes)
def pad_to(self, tokens, do_pad=True):
if len(tokens) >= self.max_utt_size:
return tokens[0:(self.max_utt_size -
1)] + [tokens[-1]], [1] * self.max_utt_size
elif do_pad:
return tokens + [0] * (self.max_utt_size - len(tokens)), [1] * len(
tokens) + [0] * (self.max_utt_size - len(tokens))
else:
return tokens
def pad_dialog(self, dialog):
dialog_usr_input, dialog_sys_input, dialog_usr_mask, dialog_sys_mask = [], [], [], []
if len(dialog) >= self.max_dialog_size:
for turn in dialog[:self.max_dialog_size]:
usr_input, usr_mask = self.pad_to(turn[0])
sys_input, sys_mask = self.pad_to(turn[1])
dialog_usr_input.append(usr_input)
dialog_sys_input.append(sys_input)
dialog_usr_mask.append(usr_mask)
dialog_sys_mask.append(sys_mask)
else:
all_pad_input, all_pad_mask = self.pad_to([])
for turn in dialog:
usr_input, usr_mask = self.pad_to(turn[0])
sys_input, sys_mask = self.pad_to(turn[1])
dialog_usr_input.append(usr_input)
dialog_sys_input.append(sys_input)
dialog_usr_mask.append(usr_mask)
dialog_sys_mask.append(sys_mask)
for _ in range(self.max_dialog_size - len(dialog)):
dialog_usr_input.append(all_pad_input)
dialog_sys_input.append(all_pad_input)
dialog_usr_mask.append(all_pad_mask)
dialog_sys_mask.append(all_pad_mask)
assert len(dialog_usr_input) == len(dialog_sys_input) == len(
dialog_usr_mask) == len(dialog_sys_mask) == self.max_dialog_size
return dialog_usr_input, dialog_sys_input, dialog_usr_mask, dialog_sys_mask
def _prepare_batch(self, cur_index_list):
# the batch index, the starting point and end point for segment
# need usr_input_sent, sys_input_sent, dialog_len_mask, usr_full_mask, sys_full_mask = batch
dialogs = [self.data[idx] for idx in cur_index_list]
dialog_lens = [self.data_lens[idx] for idx in cur_index_list]
usr_input_sent, sys_input_sent, usr_full_mask, sys_full_mask = [], [], [], []
for dialog in dialogs:
dialog_usr_input, dialog_sys_input, dialog_usr_mask, dialog_sys_mask = self.pad_dialog(
dialog)
usr_input_sent.append(dialog_usr_input)
sys_input_sent.append(dialog_sys_input)
usr_full_mask.append(dialog_usr_mask)
sys_full_mask.append(dialog_sys_mask)
# logger.info(f"Preparing batch, batch size: {len(cur_index_list)}")
# logger.info(f"usr_input_sent: {usr_input_sent[0]}")
# logger.info(f"sys_input_sent: {sys_input_sent}")
# logger.info(f"usr_full_mask: {usr_full_mask[0]}")
# logger.info(f"sys_full_mask: {sys_full_mask}")
# logger.info(f"dialog_lens: {dialog_lens}")
return torch.tensor(usr_input_sent).to(self.device), torch.tensor(sys_input_sent).to(self.device), torch.tensor(dialog_lens).to(self.device), \
torch.tensor(usr_full_mask).to(self.device), torch.tensor(sys_full_mask).to(self.device)
| [
"numpy.random.shuffle",
"numpy.max",
"numpy.mean",
"numpy.min",
"torch.tensor"
] | [((604, 641), 'numpy.random.shuffle', 'np.random.shuffle', (['self.batch_indexes'], {}), '(self.batch_indexes)\n', (621, 641), True, 'import numpy as np\n'), ((4531, 4562), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (4548, 4562), True, 'import numpy as np\n'), ((4357, 4373), 'numpy.max', 'np.max', (['all_lens'], {}), '(all_lens)\n', (4363, 4373), True, 'import numpy as np\n'), ((4375, 4391), 'numpy.min', 'np.min', (['all_lens'], {}), '(all_lens)\n', (4381, 4391), True, 'import numpy as np\n'), ((7647, 7675), 'torch.tensor', 'torch.tensor', (['usr_input_sent'], {}), '(usr_input_sent)\n', (7659, 7675), False, 'import torch\n'), ((7693, 7721), 'torch.tensor', 'torch.tensor', (['sys_input_sent'], {}), '(sys_input_sent)\n', (7705, 7721), False, 'import torch\n'), ((7739, 7764), 'torch.tensor', 'torch.tensor', (['dialog_lens'], {}), '(dialog_lens)\n', (7751, 7764), False, 'import torch\n'), ((7799, 7826), 'torch.tensor', 'torch.tensor', (['usr_full_mask'], {}), '(usr_full_mask)\n', (7811, 7826), False, 'import torch\n'), ((7844, 7871), 'torch.tensor', 'torch.tensor', (['sys_full_mask'], {}), '(sys_full_mask)\n', (7856, 7871), False, 'import torch\n'), ((4399, 4416), 'numpy.mean', 'np.mean', (['all_lens'], {}), '(all_lens)\n', (4406, 4416), True, 'import numpy as np\n')] |
from __future__ import print_function
import torch
import numpy as np
import os
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
MESH_EXTENSIONS = [
'.vtk',
]
def is_mesh_file(filename):
return any(filename.endswith(extension) for extension in MESH_EXTENSIONS)
def pad(input_arr, target_length, val=0, dim=1):
shp = input_arr.shape
npad = [(0, 0) for _ in range(len(shp))]
npad[dim] = (0, target_length - shp[dim])
return np.pad(input_arr, pad_width=npad, mode='constant', constant_values=val)
def seg_accuracy(predicted, ssegs, meshes):
correct = 0
"""
ssegs = ssegs.squeeze(-1)
correct_mat = ssegs.gather(2, predicted.cpu().unsqueeze(dim=2))
for mesh_id, mesh in enumerate(meshes):
correct_vec = correct_mat[mesh_id, :mesh.edges_count, 0]
edge_areas = torch.from_numpy(mesh.get_edge_areas())
correct += (correct_vec.float() * edge_areas).sum()
"""
for mesh_id, mesh in enumerate(meshes):
correct += (predicted.cpu()[0,:mesh.edges_count] * ssegs[0,:mesh.edges_count]).sum()/mesh.edges_count
return correct/len(meshes)
def print_network(net):
"""Print the total number of parameters in the network
Parameters:
network
"""
print('---------- Network initialized -------------')
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('[Network] Total number of parameters : %.3f M' % (num_params / 1e6))
print('-----------------------------------------------')
def get_heatmap_color(value, minimum=0, maximum=1):
minimum, maximum = float(minimum), float(maximum)
ratio = 2 * (value-minimum) / (maximum - minimum)
b = int(max(0, 255*(1 - ratio)))
r = int(max(0, 255*(ratio - 1)))
g = 255 - b - r
return r, g, b
def normalize_np_array(np_array):
min_value = np.min(np_array)
max_value = np.max(np_array)
return (np_array - min_value) / (max_value - min_value)
def calculate_entropy(np_array):
entropy = 0
np_array /= np.sum(np_array)
for a in np_array:
if a != 0:
entropy -= a * np.log(a)
entropy /= np.log(np_array.shape[0])
return entropy
| [
"numpy.pad",
"numpy.sum",
"numpy.log",
"os.makedirs",
"os.path.exists",
"numpy.min",
"numpy.max"
] | [((478, 549), 'numpy.pad', 'np.pad', (['input_arr'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': 'val'}), "(input_arr, pad_width=npad, mode='constant', constant_values=val)\n", (484, 549), True, 'import numpy as np\n'), ((1888, 1904), 'numpy.min', 'np.min', (['np_array'], {}), '(np_array)\n', (1894, 1904), True, 'import numpy as np\n'), ((1921, 1937), 'numpy.max', 'np.max', (['np_array'], {}), '(np_array)\n', (1927, 1937), True, 'import numpy as np\n'), ((2065, 2081), 'numpy.sum', 'np.sum', (['np_array'], {}), '(np_array)\n', (2071, 2081), True, 'import numpy as np\n'), ((2176, 2201), 'numpy.log', 'np.log', (['np_array.shape[0]'], {}), '(np_array.shape[0])\n', (2182, 2201), True, 'import numpy as np\n'), ((110, 130), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (124, 130), False, 'import os\n'), ((140, 157), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (151, 157), False, 'import os\n'), ((2151, 2160), 'numpy.log', 'np.log', (['a'], {}), '(a)\n', (2157, 2160), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright 2021 <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
"""
This script tests the particle boundary conditions.
The input file sets up absorbing, periodic, and reflecting boundary conditions
along each of the three axis. It launches particles heading toward each of the boundaries
and checks that they end up in the correct place (or are deleted).
"""
import os
import sys
import numpy as np
from scipy.constants import c, m_e
import yt
yt.funcs.mylog.setLevel(0)
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
# The min and max size of the box along the three axis.
dmin = -1.
dmax = +1.
# Open plotfile specified in command line
filename = sys.argv[1]
ds = yt.load( filename )
ad = ds.all_data()
time = ds.current_time.to_value()
filename0 = filename[:-5] + '00000'
ds0 = yt.load( filename0 )
ad0 = ds0.all_data()
# Read in the particle initial values and the current values.
# They need to be sorted by their ids since they may be ordered
# differently in the diagnostic files.
# For the absorbing particles, an extra particle was added that won't be absorbed
# so that there will be something to read in here.
r_id0 = ad0['reflecting_particles', 'particle_id'].v
a_id0 = ad0['absorbing_particles', 'particle_id'].v
p_id0 = ad0['periodic_particles', 'particle_id'].v
xx0 = ad0['reflecting_particles', 'particle_position_x'].v[np.argsort(r_id0)]
zz0 = ad0['periodic_particles', 'particle_position_z'].v[np.argsort(p_id0)]
ux0 = ad0['reflecting_particles', 'particle_momentum_x'].v[np.argsort(r_id0)]/m_e/c
uz0 = ad0['periodic_particles', 'particle_momentum_z'].v[np.argsort(p_id0)]/m_e/c
gx0 = np.sqrt(1. + ux0**2)
gz0 = np.sqrt(1. + uz0**2)
vx0 = ux0/gx0*c
vz0 = uz0/gz0*c
r_id = ad['reflecting_particles', 'particle_id'].v
a_id = ad['absorbing_particles', 'particle_id'].v
p_id = ad['periodic_particles', 'particle_id'].v
xx = ad['reflecting_particles', 'particle_position_x'].v[np.argsort(r_id)]
zz = ad['periodic_particles', 'particle_position_z'].v[np.argsort(p_id)]
ux = ad['reflecting_particles', 'particle_momentum_x'].v[np.argsort(r_id)]/m_e/c
uz = ad['periodic_particles', 'particle_momentum_z'].v[np.argsort(p_id)]/m_e/c
gx = np.sqrt(1. + ux**2)
gz = np.sqrt(1. + uz**2)
vx = ux/gx*c
vz = uz/gz*c
def do_reflect(x):
if x < dmin:
return 2.*dmin - x
elif x > dmax:
return 2.*dmax - x
else:
return x
def do_periodic(x):
if x < dmin:
return x + (dmax - dmin)
elif x > dmax:
return x - (dmax - dmin)
else:
return x
# Calculate the analytic value of the current particle locations and
# apply the appropriate boundary conditions.
xxa = xx0 + vx0*time
xxa[0] = do_reflect(xxa[0])
xxa[1] = do_reflect(xxa[1])
zza = zz0 + vz0*time
zza[0] = do_periodic(zza[0])
zza[1] = do_periodic(zza[1])
assert (len(a_id) == 1), 'Absorbing particles not absorbed'
assert (np.all(vx == -vx0)), 'Reflecting particle velocity not correct'
assert (np.all(vz == +vz0)), 'Periodic particle velocity not correct'
assert (np.all(np.abs((xx - xxa)/xx) < 1.e-15)), 'Reflecting particle position not correct'
assert (np.all(np.abs((zz - zza)/zz) < 1.e-15)), 'Periodic particle position not correct'
test_name = os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, filename)
| [
"yt.funcs.mylog.setLevel",
"numpy.abs",
"os.getcwd",
"sys.path.insert",
"checksumAPI.evaluate_checksum",
"numpy.argsort",
"yt.load",
"numpy.all",
"numpy.sqrt"
] | [((497, 523), 'yt.funcs.mylog.setLevel', 'yt.funcs.mylog.setLevel', (['(0)'], {}), '(0)\n', (520, 523), False, 'import yt\n'), ((524, 584), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../../warpx/Regression/Checksum/"""'], {}), "(1, '../../../../warpx/Regression/Checksum/')\n", (539, 584), False, 'import sys\n'), ((754, 771), 'yt.load', 'yt.load', (['filename'], {}), '(filename)\n', (761, 771), False, 'import yt\n'), ((870, 888), 'yt.load', 'yt.load', (['filename0'], {}), '(filename0)\n', (877, 888), False, 'import yt\n'), ((1695, 1718), 'numpy.sqrt', 'np.sqrt', (['(1.0 + ux0 ** 2)'], {}), '(1.0 + ux0 ** 2)\n', (1702, 1718), True, 'import numpy as np\n'), ((1722, 1745), 'numpy.sqrt', 'np.sqrt', (['(1.0 + uz0 ** 2)'], {}), '(1.0 + uz0 ** 2)\n', (1729, 1745), True, 'import numpy as np\n'), ((2241, 2263), 'numpy.sqrt', 'np.sqrt', (['(1.0 + ux ** 2)'], {}), '(1.0 + ux ** 2)\n', (2248, 2263), True, 'import numpy as np\n'), ((2266, 2288), 'numpy.sqrt', 'np.sqrt', (['(1.0 + uz ** 2)'], {}), '(1.0 + uz ** 2)\n', (2273, 2288), True, 'import numpy as np\n'), ((2940, 2958), 'numpy.all', 'np.all', (['(vx == -vx0)'], {}), '(vx == -vx0)\n', (2946, 2958), True, 'import numpy as np\n'), ((3012, 3030), 'numpy.all', 'np.all', (['(vz == +vz0)'], {}), '(vz == +vz0)\n', (3018, 3030), True, 'import numpy as np\n'), ((3299, 3349), 'checksumAPI.evaluate_checksum', 'checksumAPI.evaluate_checksum', (['test_name', 'filename'], {}), '(test_name, filename)\n', (3328, 3349), False, 'import checksumAPI\n'), ((1427, 1444), 'numpy.argsort', 'np.argsort', (['r_id0'], {}), '(r_id0)\n', (1437, 1444), True, 'import numpy as np\n'), ((1503, 1520), 'numpy.argsort', 'np.argsort', (['p_id0'], {}), '(p_id0)\n', (1513, 1520), True, 'import numpy as np\n'), ((1984, 2000), 'numpy.argsort', 'np.argsort', (['r_id'], {}), '(r_id)\n', (1994, 2000), True, 'import numpy as np\n'), ((2057, 2073), 'numpy.argsort', 'np.argsort', (['p_id'], {}), '(p_id)\n', (2067, 2073), True, 'import numpy as np\n'), ((3089, 3112), 'numpy.abs', 'np.abs', (['((xx - xxa) / xx)'], {}), '((xx - xxa) / xx)\n', (3095, 3112), True, 'import numpy as np\n'), ((3181, 3204), 'numpy.abs', 'np.abs', (['((zz - zza) / zz)'], {}), '((zz - zza) / zz)\n', (3187, 3204), True, 'import numpy as np\n'), ((3283, 3294), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3292, 3294), False, 'import os\n'), ((1582, 1599), 'numpy.argsort', 'np.argsort', (['r_id0'], {}), '(r_id0)\n', (1592, 1599), True, 'import numpy as np\n'), ((1664, 1681), 'numpy.argsort', 'np.argsort', (['p_id0'], {}), '(p_id0)\n', (1674, 1681), True, 'import numpy as np\n'), ((2133, 2149), 'numpy.argsort', 'np.argsort', (['r_id'], {}), '(r_id)\n', (2143, 2149), True, 'import numpy as np\n'), ((2212, 2228), 'numpy.argsort', 'np.argsort', (['p_id'], {}), '(p_id)\n', (2222, 2228), True, 'import numpy as np\n')] |
# vim: set fileencoding=<utf-8> :
'''Mash functions for database construction'''
# universal
import os
import sys
import subprocess
# additional
import collections
import pickle
import time
from tempfile import mkstemp
from multiprocessing import Pool, Lock
from functools import partial
from itertools import product
from glob import glob
from random import sample
import numpy as np
import sharedmem
import networkx as nx
from scipy import optimize
from .utils import iterDistRows
from .plot import plot_fit
DEFAULT_LENGTH = 2000000
def checkMashVersion(mash_exec):
"""Checks that mash can be run, and is version 2 or higher.
Exits if version < 2.
Args:
mash_exec (str)
Location of mash executable
"""
p = subprocess.Popen([mash_exec + ' --version'], shell=True, stdout=subprocess.PIPE)
version = 0
for line in iter(p.stdout.readline, ''):
if line != '':
version = line.rstrip().decode().split(".")[0]
break
if not version.isdigit() or int(version) < 2:
sys.stderr.write("Need mash v2 or higher\n")
sys.exit(1)
def getDatabaseName(prefix, k):
"""Gets the name for the mash database for a given k size
Args:
prefix (str)
db prefix
k (str)
k-mer size
Returns:
db_name (str)
Name of mash db
"""
return prefix + "/" + os.path.basename(prefix) + "." + k + ".msh"
def createDatabaseDir(outPrefix, kmers):
"""Creates the directory to write mash sketches to, removing old files if unnecessary
Args:
outPrefix (str)
output db prefix
kmers (list)
k-mer sizes in db
"""
# check for writing
if os.path.isdir(outPrefix):
# remove old database files if not needed
for msh_file in glob(outPrefix + "/" + os.path.basename(outPrefix) + "*.msh"):
knum = int(msh_file.split('.')[-2])
if not (kmers == knum).any():
sys.stderr.write("Removing old database " + msh_file + "\n")
sys.stderr.write("(k-mer size " + str(knum) +
" not in requested range " + str(knum) + ")\n")
os.remove(msh_file)
else:
try:
os.makedirs(outPrefix)
except OSError:
sys.stderr.write("Cannot create output directory\n")
sys.exit(1)
def getSketchSize(dbPrefix, klist, mash_exec = 'mash'):
"""Call to ``mash info`` to determine sketch size
``sys.exit(1)`` is called if DBs have different sketch sizes
Args:
dbprefix (str)
Prefix for mash databases
klist (list)
List of k-mer lengths which databases were constructed at
mash_exec (str)
Location of mash executable
Returns:
sketchdb (dict)
Dict of sketch sizes indexed by k-mer size
"""
sketchdb = {}
sketch = 0
oldSketch = 0
# iterate over kmer lengths
for k in klist:
dbname = dbPrefix + "/" + os.path.basename(dbPrefix) + "." + str(k) + ".msh"
try:
mash_cmd = mash_exec + " info -t " + dbname
mash_info = subprocess.Popen(mash_cmd, universal_newlines=True, shell=True, stdout=subprocess.PIPE)
for line in mash_info.stdout:
if (line.startswith("#") is False):
sketchValues = line.split("\t")
if len(sketchValues[0]) > 0:
if oldSketch == 0:
oldSketch = int(sketchValues[0])
else:
oldSketch = sketch
sketch = int(sketchValues[0])
if (sketch == oldSketch):
sketchdb[k] = sketch
else:
sys.stderr.write("Problem with database; sketch size for kmer length " +
str(k) + " is " + str(oldSketch) +
", but smaller kmers have sketch sizes of " + str(sketch) + "\n")
sys.exit(1)
break
mash_info.kill()
if sketch == 0:
raise RuntimeError("Could not find sketch size for " + str(k) + "\n")
except subprocess.CalledProcessError as e:
sys.stderr.write("Could not get info about " + dbname + "; command " + mash_exec +
" info -t " + dbname + " returned " + str(mash_info.returncode) +
": " + e.output + "\n")
sys.exit(1)
return sketchdb
def getSeqsInDb(mashSketch, mash_exec = 'mash'):
"""Return an array with the sequences in the passed mash database
Calls ``mash info -t``
Args:
mashSketch (str)
Mash sketches/database
mash_exec (str)
Location of mash executable
Returns:
seqs (list)
List of sequence names in sketch DB
"""
seqs = []
mash_cmd = str(mash_exec) + " info -t " + str(mashSketch)
try:
mash_info = subprocess.Popen(mash_cmd, universal_newlines=True, shell=True, stdout=subprocess.PIPE)
for line in mash_info.stdout:
line = line.rstrip()
if line != '':
if line.startswith("#") is False:
seqs.append(line.split("\t")[2])
# Make sure process executed correctly
wait = 0
while mash_info.poll() == None:
time.sleep(1)
wait += 1
if wait > 10:
break
if mash_info.poll() != 0:
raise RuntimeError('mash command "' + mash_cmd + '" failed')
except subprocess.CalledProcessError as e:
sys.stderr.write("Could not get info about " + str(mashSketch) + "; command " +
mash_cmd + " returned " + str(mash_info.returncode) + ": " + e.output + "\n")
sys.exit(1)
return seqs
def joinDBs(db1, db2, output, klist, mash_exec = 'mash'):
"""Join two mash sketch databases with ``mash paste``
Args:
db1 (str)
Prefix for db1
db2 (str)
Prefix for db2
output (str)
Prefix for joined output
klist (list)
List of k-mer sizes to sketch
mash_exec (str)
Location of mash executable
(default = 'mash')
"""
for kmer in klist:
try:
join_name = output + "/" + os.path.basename(output) + "." + str(kmer) + ".joined"
db1_name = db1 + "/" + os.path.basename(db1) + "." + str(kmer) + ".msh"
db2_name = db2 + "/" + os.path.basename(db2) + "." + str(kmer) + ".msh"
mash_cmd = mash_exec + " paste " + join_name + " " + db1_name + " " + db2_name
subprocess.run(mash_cmd, shell=True, check=True)
os.rename(join_name + ".msh", output + "/" + os.path.basename(output) + "." + str(kmer) + ".msh")
except subprocess.CalledProcessError as e:
sys.stderr.write("Could not run command " + mash_cmd + "; returned: " + e.output + "\n")
sys.exit(1)
def constructDatabase(assemblyList, klist, sketch, oPrefix, ignoreLengthOutliers = False,
threads = 1, mash_exec = 'mash', overwrite = False):
"""Sketch the input assemblies at the requested k-mer lengths
A multithread wrapper around :func:`~runSketch`. Threads are used to either run multiple sketch
processes for each klist value, or increase the threads used by each ``mash sketch`` process
if len(klist) > threads.
Also calculates random match probability based on length of first genome
in assemblyList.
Args:
assemblyList (str)
File with locations of assembly files to be sketched
klist (list)
List of k-mer sizes to sketch
sketch (int)
Size of sketch (``-s`` option)
oPrefix (str)
Output prefix for resulting sketch files
ignoreLengthOutliers (bool)
Whether to check for outlying genome lengths (and error
if found)
(default = False)
threads (int)
Number of threads to use
(default = 1)
mash_exec (str)
Location of mash executable
(default = 'mash')
overwrite (bool)
Whether to overwrite sketch DBs, if they already exist.
(default = False)
"""
# Genome length needed to calculate prob of random matches
genome_length = DEFAULT_LENGTH # assume 2 Mb in the absence of other information
try:
input_lengths = []
input_names = []
with open(assemblyList, 'r') as assemblyFiles:
for assembly in assemblyFiles:
with open(assembly.rstrip(), 'r') as exampleAssembly:
input_genome_length = 0
for line in exampleAssembly:
if line[0] != ">":
input_genome_length += len(line.rstrip())
input_lengths.append(input_genome_length)
input_names.append(assembly)
# Check for outliers
outliers = []
sigma = 5
if not ignoreLengthOutliers:
genome_length = np.mean(np.array(input_lengths))
outlier_low = genome_length - sigma*np.std(input_lengths)
outlier_high = genome_length + sigma*np.std(input_lengths)
for length, name in zip(input_lengths, input_names):
if length < outlier_low or length > outlier_high:
outliers.append(name)
if outliers:
sys.stderr.write("ERROR: Genomes with outlying lengths detected\n" +
"\n".join(outliers))
sys.exit(1)
except FileNotFoundError as e:
sys.stderr.write("Could not find sequence assembly " + e.filename + "\n"
"Assuming length of 2Mb for random match probs.\n")
except UnicodeDecodeError as e:
sys.stderr.write("Could not read input file. Is it zipped?\n"
"Assuming length of 2Mb for random match probs.\n")
# check minimum k-mer is above random probability threshold
if genome_length <= 0:
genome_length = DEFAULT_LENGTH
sys.stderr.write("WARNING: Could not detect genome length. Assuming 2Mb\n")
if genome_length > 10000000:
sys.stderr.write("WARNING: Average length over 10Mb - are these assemblies?\n")
k_min = min(klist)
if 1/(pow(4, k_min)/float(genome_length) + 1) > 0.05:
sys.stderr.write("Minimum k-mer length " + str(k_min) + " is too small; please increase to avoid nonsense results\n")
exit(1)
# create kmer databases
if threads > len(klist):
num_processes = 1
num_threads = threads
else:
num_processes = threads
num_threads = 1
# run database construction using multiprocessing
l = Lock()
with Pool(processes=num_processes, initializer=init_lock, initargs=(l,)) as pool:
pool.map(partial(runSketch, assemblyList=assemblyList, sketch=sketch,
genome_length=genome_length,oPrefix=oPrefix, mash_exec=mash_exec,
overwrite=overwrite, threads=num_threads), klist)
def init_lock(l):
"""Sets a global lock to use when writing to STDERR in :func:`~runSketch`"""
global lock
lock = l
def runSketch(k, assemblyList, sketch, genome_length, oPrefix, mash_exec = 'mash', overwrite = False, threads = 1):
"""Actually run the mash sketch command
Called by :func:`~constructDatabase`
Args:
k (int)
k-mer size to sketch
assemblyList (list)
Locations of assembly files to be sketched
sketch (int)
Size of sketch (``-s`` option)
genome_length (int)
Length of genomes being sketch, for random match probability calculation
oPrefix (str)
Output prefix for resulting sketch files
mash_exec (str)
Location of mash executable
(default = 'mash')
overwrite (bool)
Whether to overwrite sketch DB, if it already exists.
(default = False)
threads (int)
Number of threads to use in the mash process
(default = 1)
"""
# define database name
dbname = oPrefix + "/" + os.path.basename(oPrefix) + "." + str(k)
dbfilename = dbname + ".msh"
# Causes mash sketch to fail at end -- createDatabaseDir should stop this
if not os.path.isdir(oPrefix):
sys.stderr.write("Directory " + oPrefix + " does not exist\n")
sys.exit(1)
# calculate false positive rate
random_prob = 1/(pow(4, k)/float(genome_length) + 1)
# print info. Lock is released once all stderr printing is done to keep
# all messages from each k-mer length together
lock.acquire()
sys.stderr.write("Creating mash database for k = " + str(k) + "\n")
sys.stderr.write("Random " + str(k) + "-mer probability: " + "{:.2f}".format(random_prob) + "\n")
# overwrite existing file if instructed
if os.path.isfile(dbfilename) and overwrite == True:
sys.stderr.write("Overwriting db: " + dbfilename + "\n")
os.remove(dbfilename)
# create new file or leave original intact
if not os.path.isfile(dbfilename):
# Release lock before running sketch
lock.release()
# Run sketch
mash_cmd = mash_exec \
+ " sketch -w 1 -p " + str(threads) \
+ " -s " + str(sketch[k]) \
+ " -o " + dbname \
+ " -k " + str(k) \
+ " -l " + assemblyList \
+ " 2> /dev/null"
subprocess.run(mash_cmd, shell=True, check=True)
else:
sys.stderr.write("Found existing mash database " + dbname + ".msh for k = " + str(k) + "\n")
lock.release()
def queryDatabase(qFile, klist, dbPrefix, queryPrefix, self = True, number_plot_fits = 0,
no_stream = False, mash_exec = 'mash', threads = 1):
"""Calculate core and accessory distances between query sequences and a sketched database
For a reference database, runs the query against itself to find all pairwise
core and accessory distances.
Uses the relation :math:`pr(a, b) = (1-a)(1-c)^k`
To get the ref and query name for each row of the returned distances, call to the iterator
:func:`~PopPUNK.utils.iterDistRows` with the returned refList and queryList
Args:
qFile (str)
File with location of query sequences
klist (list)
K-mer sizes to use in the calculation
dbPrefix (str)
Prefix for reference mash sketch database created by :func:`~constructDatabase`
queryPrefix (str)
Prefix for query mash sketch database created by :func:`~constructDatabase`
self (bool)
Set true if query = ref
(default = True)
number_plot_fits (int)
If > 0, the number of k-mer length fits to plot (saved as pdfs).
Takes random pairs of comparisons and calls :func:`~PopPUNK.plot.plot_fit`
(default = 0)
no_stream (bool)
Rather than streaming mash dist input directly into parser, will write
through an intermediate temporary file
(default = False)
mash_exec (str)
Location of mash executable
(default = 'mash')
threads (int)
Number of threads to use in the mash process
(default = 1)
Returns:
refList (list)
Names of reference sequences
queryList (list)
Names of query sequences
distMat (numpy.array)
Core distances (column 0) and accessory distances (column 1) between
refList and queryList
"""
queryList = []
with open(qFile, 'r') as queryFile:
for line in queryFile:
queryList.append(line.rstrip())
refList = getSeqsInDb(dbPrefix + "/" + os.path.basename(dbPrefix) + "." + str(klist[0]) + ".msh", mash_exec)
if self:
if dbPrefix != queryPrefix:
raise RuntimeError("Must use same db for self query")
number_pairs = int(0.5 * len(refList) * (len(refList) - 1))
else:
number_pairs = int(len(refList) * len(queryList))
# Pre-assign array for storage. float32 sufficient accuracy for 10**4 sketch size, halves memory use
raw = sharedmem.empty((number_pairs, len(klist)), dtype=np.float32)
# iterate through kmer lengths
for k_idx, k in enumerate(klist):
row = 0
# run mash distance query based on current file
ref_dbname = dbPrefix + "/" + os.path.basename(dbPrefix) + "." + str(k) + ".msh"
query_dbname = queryPrefix + "/" + os.path.basename(queryPrefix) + "." + str(k) + ".msh"
# construct mash command
mash_cmd = mash_exec + " dist -p " + str(threads) + " " + ref_dbname + " " + query_dbname
if no_stream:
tmpDirName = "./" + os.path.basename(dbPrefix)
if not os.path.isdir(tmpDirName):
tmpDirName = None
tmpHandle, tmpName = mkstemp(prefix=os.path.basename(dbPrefix),
suffix=".tmp", dir=tmpDirName)
mash_cmd += " > " + tmpName
mash_cmd += " 2> " + os.path.basename(dbPrefix) + ".err.log"
sys.stderr.write(mash_cmd + "\n")
try:
if no_stream:
subprocess.run(mash_cmd, shell=True, check=True)
mashOut = open(tmpName, 'r')
else:
rawOutput = subprocess.Popen(mash_cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
mashOut = rawOutput.stdout
# Check mash output is consistent with expected order
# This is ok in all tests, but best to check and exit in case something changes between mash versions
expected_names = iterDistRows(refList, queryList, self)
prev_ref = ""
skip = 0
skipped = 0
for line in mashOut:
# Skip the first row with self and symmetric elements
if skipped < skip:
skipped += 1
continue
mashVals = line.rstrip().split("\t")
if (len(mashVals) > 2):
if self and mashVals[1] != prev_ref:
prev_ref = mashVals[1]
skip += 1
skipped = 1
else:
mashMatch = mashVals[-1].split('/')
(e_ref, e_query) = next(expected_names)
if mashVals[0] == e_ref and mashVals[1] == e_query:
raw[row, k_idx] = float(mashMatch[0])/int(mashMatch[1])
row += 1
else:
sys.stderr.write("mash dist output order:" + e_query + "," + e_ref + "\n" +
"not as expected: " + mashVals[0] + "," + mashVals[1] + "\n")
sys.exit(1)
if no_stream:
os.remove(tmpName)
else:
rawOutput.wait(timeout=1)
if rawOutput.poll() != 0:
raise RuntimeError('mash dist command "' + mash_cmd + '" failed with raw output ' + str(rawOutput.poll()))
# Remove the stderr file
if os.path.isfile(dbPrefix + ".err.log"):
os.remove(dbPrefix + ".err.log")
except subprocess.CalledProcessError as e:
sys.stderr.write("mash dist command " + mash_cmd + " failed with error " + e.message + "\n")
sys.exit(1)
# Pre-assign return (to higher precision)
sys.stderr.write("Calculating core and accessory distances\n")
# Hessian = 0, so Jacobian for regression is a constant
jacobian = -np.hstack((np.ones((klist.shape[0], 1)), klist.reshape(-1, 1)))
# option to plot core/accessory fits. Choose a random number from cmd line option
if number_plot_fits > 0:
examples = sample(range(number_pairs), k=number_plot_fits)
for plot_idx, plot_example in enumerate(sorted(examples)):
fit = fitKmerCurve(raw[plot_example, :], klist, jacobian)
plot_fit(klist, raw[plot_example, :], fit,
dbPrefix + "/fit_example_" + str(plot_idx + 1),
"Example fit " + str(plot_idx + 1) + " (row " + str(plot_example) + ")")
# run pairwise analyses across kmer lengths, mutating distMat
# Create range of rows that each thread will work with
rows_per_thread = int(number_pairs / threads)
big_threads = number_pairs % threads
start = 0
mat_chunks = []
for thread in range(threads):
end = start + rows_per_thread
if thread < big_threads:
end += 1
mat_chunks.append((start, end))
start = end
distMat = sharedmem.empty((number_pairs, 2))
with sharedmem.MapReduce(np = threads) as pool:
pool.map(partial(fitKmerBlock, distMat=distMat, raw = raw, klist=klist, jacobian=jacobian), mat_chunks)
return(refList, queryList, distMat)
def fitKmerBlock(idxRanges, distMat, raw, klist, jacobian):
"""Multirow wrapper around :func:`~fitKmerCurve` to the specified rows in idxRanges
Args:
idxRanges (int, int)
Tuple of first and last row of slice to calculate
distMat (numpy.array)
sharedmem object to store core and accessory distances in (altered in place)
raw (numpy.array)
sharedmem object with proportion of k-mer matches for each query-ref pair
by row, columns are at k-mer lengths in klist
klist (list)
List of k-mer lengths to use
jacobian (numpy.array)
The Jacobian for the fit, sent to :func:`~fitKmerCurve`
"""
(start, end) = idxRanges
distMat[start:end, :] = np.apply_along_axis(fitKmerCurve, 1, raw[start:end, :], klist, jacobian)
def fitKmerCurve(pairwise, klist, jacobian):
"""Fit the function :math:`pr = (1-a)(1-c)^k`
Supply ``jacobian = -np.hstack((np.ones((klist.shape[0], 1)), klist.reshape(-1, 1)))``
Args:
pairwise (numpy.array)
Proportion of shared k-mers at k-mer values in klist
klist (list)
k-mer sizes used
jacobian (numpy.array)
Should be set as above (set once to try and save memory)
Returns:
transformed_params (numpy.array)
Column with core and accessory distance
"""
# curve fit pr = (1-a)(1-c)^k
# log pr = log(1-a) + k*log(1-c)
# a = p[0]; c = p[1] (will flip on return)
try:
distFit = optimize.least_squares(fun=lambda p, x, y: y - (p[0] + p[1] * x),
x0=[0.0, -0.01],
jac=lambda p, x, y: jacobian,
args=(klist, np.log(pairwise)),
bounds=([-np.inf, -np.inf], [0, 0]))
transformed_params = 1 - np.exp(distFit.x)
except ValueError as e:
sys.stderr.write("Fitting k-mer curve failed: " + format(e) +
"\nWith mash input " +
np.array2string(pairwise, precision=4, separator=',',suppress_small=True) +
"\nCheck for low quality input genomes\n")
exit(0)
# Return core, accessory
return(np.flipud(transformed_params))
def readMashDBParams(dbPrefix, kmers, sketch_sizes, mash_exec = 'mash'):
"""Get kmers lengths and sketch sizes from existing database
Calls :func:`~getKmersFromReferenceDatabase` and :func:`~getSketchSize`
Uses passed values if db missing
Args:
dbPrefix (str)
Prefix for sketch DB files
kmers (list)
Kmers to use if db not found
sketch_sizes (list)
Sketch size to use if db not found
mash_exec (str)
Location of mash executable
Default = 'mash'
Returns:
kmers (list)
List of k-mer lengths used in database
sketch_sizes (list)
List of sketch sizes used in database
"""
db_kmers = getKmersFromReferenceDatabase(dbPrefix)
if len(db_kmers) == 0:
sys.stderr.write("Couldn't find mash sketches in " + dbPrefix + "\n"
"Using command line input parameters for k-mer and sketch sizes\n")
else:
kmers = db_kmers
sketch_sizes = getSketchSize(dbPrefix, kmers, mash_exec)
return kmers, sketch_sizes
def getKmersFromReferenceDatabase(dbPrefix):
"""Get kmers lengths from existing database
Parses the database name to determine klist
Args:
dbPrefix (str)
Prefix for sketch DB files
Returns:
kmers (list)
List of k-mer lengths used in database
"""
# prepare
knum = []
fullDbPrefix = dbPrefix + "/" + os.path.basename(dbPrefix) + "."
# iterate through files
for msh_file in glob(fullDbPrefix + "*.msh"):
knum.append(int(msh_file.split('.')[-2]))
# process kmer list
knum.sort()
kmers = np.asarray(knum)
return kmers
| [
"os.remove",
"multiprocessing.Lock",
"numpy.ones",
"os.path.isfile",
"numpy.exp",
"glob.glob",
"numpy.std",
"numpy.apply_along_axis",
"sharedmem.empty",
"sharedmem.MapReduce",
"functools.partial",
"subprocess.Popen",
"os.path.basename",
"numpy.asarray",
"numpy.array2string",
"numpy.fli... | [((754, 839), 'subprocess.Popen', 'subprocess.Popen', (["[mash_exec + ' --version']"], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), "([mash_exec + ' --version'], shell=True, stdout=subprocess.PIPE\n )\n", (770, 839), False, 'import subprocess\n'), ((1735, 1759), 'os.path.isdir', 'os.path.isdir', (['outPrefix'], {}), '(outPrefix)\n', (1748, 1759), False, 'import os\n'), ((11069, 11075), 'multiprocessing.Lock', 'Lock', ([], {}), '()\n', (11073, 11075), False, 'from multiprocessing import Pool, Lock\n'), ((20058, 20120), 'sys.stderr.write', 'sys.stderr.write', (['"""Calculating core and accessory distances\n"""'], {}), "('Calculating core and accessory distances\\n')\n", (20074, 20120), False, 'import sys\n'), ((21250, 21284), 'sharedmem.empty', 'sharedmem.empty', (['(number_pairs, 2)'], {}), '((number_pairs, 2))\n', (21265, 21284), False, 'import sharedmem\n'), ((22258, 22330), 'numpy.apply_along_axis', 'np.apply_along_axis', (['fitKmerCurve', '(1)', 'raw[start:end, :]', 'klist', 'jacobian'], {}), '(fitKmerCurve, 1, raw[start:end, :], klist, jacobian)\n', (22277, 22330), True, 'import numpy as np\n'), ((23790, 23819), 'numpy.flipud', 'np.flipud', (['transformed_params'], {}), '(transformed_params)\n', (23799, 23819), True, 'import numpy as np\n'), ((25391, 25419), 'glob.glob', 'glob', (["(fullDbPrefix + '*.msh')"], {}), "(fullDbPrefix + '*.msh')\n", (25395, 25419), False, 'from glob import glob\n'), ((25524, 25540), 'numpy.asarray', 'np.asarray', (['knum'], {}), '(knum)\n', (25534, 25540), True, 'import numpy as np\n'), ((1054, 1098), 'sys.stderr.write', 'sys.stderr.write', (['"""Need mash v2 or higher\n"""'], {}), "('Need mash v2 or higher\\n')\n", (1070, 1098), False, 'import sys\n'), ((1107, 1118), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1115, 1118), False, 'import sys\n'), ((5139, 5231), 'subprocess.Popen', 'subprocess.Popen', (['mash_cmd'], {'universal_newlines': '(True)', 'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(mash_cmd, universal_newlines=True, shell=True, stdout=\n subprocess.PIPE)\n', (5155, 5231), False, 'import subprocess\n'), ((10405, 10480), 'sys.stderr.write', 'sys.stderr.write', (['"""WARNING: Could not detect genome length. Assuming 2Mb\n"""'], {}), "('WARNING: Could not detect genome length. Assuming 2Mb\\n')\n", (10421, 10480), False, 'import sys\n'), ((10522, 10601), 'sys.stderr.write', 'sys.stderr.write', (['"""WARNING: Average length over 10Mb - are these assemblies?\n"""'], {}), "('WARNING: Average length over 10Mb - are these assemblies?\\n')\n", (10538, 10601), False, 'import sys\n'), ((11085, 11152), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'num_processes', 'initializer': 'init_lock', 'initargs': '(l,)'}), '(processes=num_processes, initializer=init_lock, initargs=(l,))\n', (11089, 11152), False, 'from multiprocessing import Pool, Lock\n'), ((12684, 12706), 'os.path.isdir', 'os.path.isdir', (['oPrefix'], {}), '(oPrefix)\n', (12697, 12706), False, 'import os\n'), ((12716, 12778), 'sys.stderr.write', 'sys.stderr.write', (["('Directory ' + oPrefix + ' does not exist\\n')"], {}), "('Directory ' + oPrefix + ' does not exist\\n')\n", (12732, 12778), False, 'import sys\n'), ((12787, 12798), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12795, 12798), False, 'import sys\n'), ((13266, 13292), 'os.path.isfile', 'os.path.isfile', (['dbfilename'], {}), '(dbfilename)\n', (13280, 13292), False, 'import os\n'), ((13324, 13380), 'sys.stderr.write', 'sys.stderr.write', (["('Overwriting db: ' + dbfilename + '\\n')"], {}), "('Overwriting db: ' + dbfilename + '\\n')\n", (13340, 13380), False, 'import sys\n'), ((13389, 13410), 'os.remove', 'os.remove', (['dbfilename'], {}), '(dbfilename)\n', (13398, 13410), False, 'import os\n'), ((13470, 13496), 'os.path.isfile', 'os.path.isfile', (['dbfilename'], {}), '(dbfilename)\n', (13484, 13496), False, 'import os\n'), ((13893, 13941), 'subprocess.run', 'subprocess.run', (['mash_cmd'], {'shell': '(True)', 'check': '(True)'}), '(mash_cmd, shell=True, check=True)\n', (13907, 13941), False, 'import subprocess\n'), ((17615, 17648), 'sys.stderr.write', 'sys.stderr.write', (["(mash_cmd + '\\n')"], {}), "(mash_cmd + '\\n')\n", (17631, 17648), False, 'import sys\n'), ((21294, 21325), 'sharedmem.MapReduce', 'sharedmem.MapReduce', ([], {'np': 'threads'}), '(np=threads)\n', (21313, 21325), False, 'import sharedmem\n'), ((24641, 24780), 'sys.stderr.write', 'sys.stderr.write', (['("Couldn\'t find mash sketches in " + dbPrefix +\n """\nUsing command line input parameters for k-mer and sketch sizes\n""")'], {}), '("Couldn\'t find mash sketches in " + dbPrefix +\n """\nUsing command line input parameters for k-mer and sketch sizes\n""")\n', (24657, 24780), False, 'import sys\n'), ((2279, 2301), 'os.makedirs', 'os.makedirs', (['outPrefix'], {}), '(outPrefix)\n', (2290, 2301), False, 'import os\n'), ((3203, 3295), 'subprocess.Popen', 'subprocess.Popen', (['mash_cmd'], {'universal_newlines': '(True)', 'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(mash_cmd, universal_newlines=True, shell=True, stdout=\n subprocess.PIPE)\n', (3219, 3295), False, 'import subprocess\n'), ((5545, 5558), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5555, 5558), False, 'import time\n'), ((5973, 5984), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5981, 5984), False, 'import sys\n'), ((6847, 6895), 'subprocess.run', 'subprocess.run', (['mash_cmd'], {'shell': '(True)', 'check': '(True)'}), '(mash_cmd, shell=True, check=True)\n', (6861, 6895), False, 'import subprocess\n'), ((9932, 10059), 'sys.stderr.write', 'sys.stderr.write', (['(\'Could not find sequence assembly \' + e.filename +\n """\nAssuming length of 2Mb for random match probs.\n""")'], {}), '(\'Could not find sequence assembly \' + e.filename +\n """\nAssuming length of 2Mb for random match probs.\n""")\n', (9948, 10059), False, 'import sys\n'), ((10127, 10249), 'sys.stderr.write', 'sys.stderr.write', (['"""Could not read input file. Is it zipped?\nAssuming length of 2Mb for random match probs.\n"""'], {}), '(\n """Could not read input file. Is it zipped?\nAssuming length of 2Mb for random match probs.\n"""\n )\n', (10143, 10249), False, 'import sys\n'), ((11179, 11358), 'functools.partial', 'partial', (['runSketch'], {'assemblyList': 'assemblyList', 'sketch': 'sketch', 'genome_length': 'genome_length', 'oPrefix': 'oPrefix', 'mash_exec': 'mash_exec', 'overwrite': 'overwrite', 'threads': 'num_threads'}), '(runSketch, assemblyList=assemblyList, sketch=sketch, genome_length=\n genome_length, oPrefix=oPrefix, mash_exec=mash_exec, overwrite=\n overwrite, threads=num_threads)\n', (11186, 11358), False, 'from functools import partial\n'), ((19738, 19775), 'os.path.isfile', 'os.path.isfile', (["(dbPrefix + '.err.log')"], {}), "(dbPrefix + '.err.log')\n", (19752, 19775), False, 'import os\n'), ((21354, 21433), 'functools.partial', 'partial', (['fitKmerBlock'], {'distMat': 'distMat', 'raw': 'raw', 'klist': 'klist', 'jacobian': 'jacobian'}), '(fitKmerBlock, distMat=distMat, raw=raw, klist=klist, jacobian=jacobian)\n', (21361, 21433), False, 'from functools import partial\n'), ((23400, 23417), 'numpy.exp', 'np.exp', (['distFit.x'], {}), '(distFit.x)\n', (23406, 23417), True, 'import numpy as np\n'), ((25309, 25335), 'os.path.basename', 'os.path.basename', (['dbPrefix'], {}), '(dbPrefix)\n', (25325, 25335), False, 'import os\n'), ((2004, 2064), 'sys.stderr.write', 'sys.stderr.write', (["('Removing old database ' + msh_file + '\\n')"], {}), "('Removing old database ' + msh_file + '\\n')\n", (2020, 2064), False, 'import sys\n'), ((2224, 2243), 'os.remove', 'os.remove', (['msh_file'], {}), '(msh_file)\n', (2233, 2243), False, 'import os\n'), ((2338, 2390), 'sys.stderr.write', 'sys.stderr.write', (['"""Cannot create output directory\n"""'], {}), "('Cannot create output directory\\n')\n", (2354, 2390), False, 'import sys\n'), ((2403, 2414), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2411, 2414), False, 'import sys\n'), ((4628, 4639), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4636, 4639), False, 'import sys\n'), ((7069, 7162), 'sys.stderr.write', 'sys.stderr.write', (["('Could not run command ' + mash_cmd + '; returned: ' + e.output + '\\n')"], {}), "('Could not run command ' + mash_cmd + '; returned: ' + e.\n output + '\\n')\n", (7085, 7162), False, 'import sys\n'), ((7170, 7181), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7178, 7181), False, 'import sys\n'), ((9357, 9380), 'numpy.array', 'np.array', (['input_lengths'], {}), '(input_lengths)\n', (9365, 9380), True, 'import numpy as np\n'), ((9876, 9887), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9884, 9887), False, 'import sys\n'), ((12521, 12546), 'os.path.basename', 'os.path.basename', (['oPrefix'], {}), '(oPrefix)\n', (12537, 12546), False, 'import os\n'), ((17242, 17268), 'os.path.basename', 'os.path.basename', (['dbPrefix'], {}), '(dbPrefix)\n', (17258, 17268), False, 'import os\n'), ((17288, 17313), 'os.path.isdir', 'os.path.isdir', (['tmpDirName'], {}), '(tmpDirName)\n', (17301, 17313), False, 'import os\n'), ((17567, 17593), 'os.path.basename', 'os.path.basename', (['dbPrefix'], {}), '(dbPrefix)\n', (17583, 17593), False, 'import os\n'), ((17705, 17753), 'subprocess.run', 'subprocess.run', (['mash_cmd'], {'shell': '(True)', 'check': '(True)'}), '(mash_cmd, shell=True, check=True)\n', (17719, 17753), False, 'import subprocess\n'), ((17845, 17936), 'subprocess.Popen', 'subprocess.Popen', (['mash_cmd'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)'}), '(mash_cmd, shell=True, stdout=subprocess.PIPE,\n universal_newlines=True)\n', (17861, 17936), False, 'import subprocess\n'), ((19437, 19455), 'os.remove', 'os.remove', (['tmpName'], {}), '(tmpName)\n', (19446, 19455), False, 'import os\n'), ((19793, 19825), 'os.remove', 'os.remove', (["(dbPrefix + '.err.log')"], {}), "(dbPrefix + '.err.log')\n", (19802, 19825), False, 'import os\n'), ((19890, 19986), 'sys.stderr.write', 'sys.stderr.write', (["('mash dist command ' + mash_cmd + ' failed with error ' + e.message + '\\n')"], {}), "('mash dist command ' + mash_cmd + ' failed with error ' +\n e.message + '\\n')\n", (19906, 19986), False, 'import sys\n'), ((19995, 20006), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (20003, 20006), False, 'import sys\n'), ((20209, 20237), 'numpy.ones', 'np.ones', (['(klist.shape[0], 1)'], {}), '((klist.shape[0], 1))\n', (20216, 20237), True, 'import numpy as np\n'), ((1404, 1428), 'os.path.basename', 'os.path.basename', (['prefix'], {}), '(prefix)\n', (1420, 1428), False, 'import os\n'), ((1858, 1885), 'os.path.basename', 'os.path.basename', (['outPrefix'], {}), '(outPrefix)\n', (1874, 1885), False, 'import os\n'), ((9430, 9451), 'numpy.std', 'np.std', (['input_lengths'], {}), '(input_lengths)\n', (9436, 9451), True, 'import numpy as np\n'), ((9501, 9522), 'numpy.std', 'np.std', (['input_lengths'], {}), '(input_lengths)\n', (9507, 9522), True, 'import numpy as np\n'), ((17398, 17424), 'os.path.basename', 'os.path.basename', (['dbPrefix'], {}), '(dbPrefix)\n', (17414, 17424), False, 'import os\n'), ((23274, 23290), 'numpy.log', 'np.log', (['pairwise'], {}), '(pairwise)\n', (23280, 23290), True, 'import numpy as np\n'), ((3059, 3085), 'os.path.basename', 'os.path.basename', (['dbPrefix'], {}), '(dbPrefix)\n', (3075, 3085), False, 'import os\n'), ((16223, 16249), 'os.path.basename', 'os.path.basename', (['dbPrefix'], {}), '(dbPrefix)\n', (16239, 16249), False, 'import os\n'), ((16908, 16934), 'os.path.basename', 'os.path.basename', (['dbPrefix'], {}), '(dbPrefix)\n', (16924, 16934), False, 'import os\n'), ((17002, 17031), 'os.path.basename', 'os.path.basename', (['queryPrefix'], {}), '(queryPrefix)\n', (17018, 17031), False, 'import os\n'), ((23589, 23663), 'numpy.array2string', 'np.array2string', (['pairwise'], {'precision': '(4)', 'separator': '""","""', 'suppress_small': '(True)'}), "(pairwise, precision=4, separator=',', suppress_small=True)\n", (23604, 23663), True, 'import numpy as np\n'), ((4152, 4163), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4160, 4163), False, 'import sys\n'), ((6520, 6544), 'os.path.basename', 'os.path.basename', (['output'], {}), '(output)\n', (6536, 6544), False, 'import os\n'), ((6610, 6631), 'os.path.basename', 'os.path.basename', (['db1'], {}), '(db1)\n', (6626, 6631), False, 'import os\n'), ((6694, 6715), 'os.path.basename', 'os.path.basename', (['db2'], {}), '(db2)\n', (6710, 6715), False, 'import os\n'), ((19170, 19311), 'sys.stderr.write', 'sys.stderr.write', (["('mash dist output order:' + e_query + ',' + e_ref + '\\n' +\n 'not as expected: ' + mashVals[0] + ',' + mashVals[1] + '\\n')"], {}), "('mash dist output order:' + e_query + ',' + e_ref + '\\n' +\n 'not as expected: ' + mashVals[0] + ',' + mashVals[1] + '\\n')\n", (19186, 19311), False, 'import sys\n'), ((19381, 19392), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (19389, 19392), False, 'import sys\n'), ((6953, 6977), 'os.path.basename', 'os.path.basename', (['output'], {}), '(output)\n', (6969, 6977), False, 'import os\n')] |
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from matplotlib import pyplot
from numpy import array
# return training data
def get_train():
seq = [[0.0, 0.1], [0.1, 0.2], [0.2, 0.3], [0.3, 0.4], [0.4, 0.5]]
seq = array(seq)
X, y = seq[:, 0], seq[:, 1]
X = X.reshape((5, 1, 1))
return X, y
# return validation data
def get_val():
seq = [[0.5, 0.6], [0.6, 0.7], [0.7, 0.8], [0.8, 0.9], [0.9, 1.0]]
seq = array(seq)
X, y = seq[:, 0], seq[:, 1]
X = X.reshape((len(X), 1, 1))
return X, y
# define model
model = Sequential()
model.add(LSTM(10, input_shape=(1,1)))
model.add(Dense(1, activation='linear'))
# compile model
model.compile(loss='mse', optimizer='adam')
# fit model
X,y = get_train()
valX, valY = get_val()
history = model.fit(X, y, epochs=800, validation_data=(valX, valY), shuffle=False)
# plot train and validation loss
pyplot.plot(history.history['loss'])
pyplot.plot(history.history['val_loss'])
pyplot.title('model train vs validation loss')
pyplot.ylabel('loss')
pyplot.xlabel('epoch')
pyplot.legend(['train', 'validation'], loc='upper right')
pyplot.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"keras.layers.LSTM",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"keras.layers.Dense",
"numpy.array",
"keras.models.Sequential",
"matplotlib.pyplot.xlabel"
] | [((570, 582), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (580, 582), False, 'from keras.models import Sequential\n'), ((892, 928), 'matplotlib.pyplot.plot', 'pyplot.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (903, 928), False, 'from matplotlib import pyplot\n'), ((929, 969), 'matplotlib.pyplot.plot', 'pyplot.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (940, 969), False, 'from matplotlib import pyplot\n'), ((970, 1016), 'matplotlib.pyplot.title', 'pyplot.title', (['"""model train vs validation loss"""'], {}), "('model train vs validation loss')\n", (982, 1016), False, 'from matplotlib import pyplot\n'), ((1017, 1038), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""loss"""'], {}), "('loss')\n", (1030, 1038), False, 'from matplotlib import pyplot\n'), ((1039, 1061), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (1052, 1061), False, 'from matplotlib import pyplot\n'), ((1062, 1119), 'matplotlib.pyplot.legend', 'pyplot.legend', (["['train', 'validation']"], {'loc': '"""upper right"""'}), "(['train', 'validation'], loc='upper right')\n", (1075, 1119), False, 'from matplotlib import pyplot\n'), ((1120, 1133), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (1131, 1133), False, 'from matplotlib import pyplot\n'), ((267, 277), 'numpy.array', 'array', (['seq'], {}), '(seq)\n', (272, 277), False, 'from numpy import array\n'), ((462, 472), 'numpy.array', 'array', (['seq'], {}), '(seq)\n', (467, 472), False, 'from numpy import array\n'), ((593, 621), 'keras.layers.LSTM', 'LSTM', (['(10)'], {'input_shape': '(1, 1)'}), '(10, input_shape=(1, 1))\n', (597, 621), False, 'from keras.layers import LSTM\n'), ((632, 661), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (637, 661), False, 'from keras.layers import Dense\n')] |
import numpy as np
import logging as log
import tinkerbell.app.make as tbamk
import tinkerbell.app.rcparams as tbarc
import tinkerbell.app.plot as tbapl
import tinkerbell.app.model as tbamd
import tinkerbell.domain.point as tbdpt
import example_regress_on_time_stage_training_set
def do_the_thing():
y0 = tbarc.rcparams['shale.lstm.y0_mean']*tbarc.rcparams['shale.lstm.y0_mult']
d = tbarc.rcparams['shale.lstm_stage.d']
xmax = tbarc.rcparams['shale.lstm_stage.xmax']
num_points = tbarc.rcparams['shale.lstm_stage.num_points']
xdisc = tbarc.rcparams['shale.lstm_stage.xdisc_mean'] + xmax*tbarc.rcparams['shale.lstm_stage.xdisc_mult']
np.random.seed(42)
pts, ixdisc = tbamk.points_exponential_discontinuous_declinelinear_noisy(y0, d, xmax, xdisc, num=num_points)
xcomp_pts, ycomp_pts = tbdpt.point_coordinates(pts)
stage = np.zeros_like(ycomp_pts)
stage[ixdisc:] = 1.0
features = tbamd.Features(ycomp_pts, stage)
targets = tbamd.Targets(ycomp_pts)
fname_normalizer = tbarc.rcparams['shale.lstm_stage.fnamenormalizer']
normalizer = tbamd.Normalizer.load(fname_normalizer)
fname_model = tbarc.rcparams['shale.lstm_stage.fnamenmodel']
model = tbamd.load(fname_model)
yhat = tbamd.predict(y0, stage, normalizer, model)
xplot = xcomp_pts[:-1]
yref = ycomp_pts[:-1]
fname_blinddata = tbarc.rcparams['shale.lstm_stage.fnamenblinddata']
fname_preddata = tbarc.rcparams['shale.lstm_stage.fnamenpreddata']
np.save(fname_blinddata, np.array([xplot, yref]))
np.save(fname_preddata, np.array([xplot, yhat]))
tbapl.plot([(xplot, yref), (xplot, yhat)], styles=['p', 'l'], labels=['yblind', 'yhat'])
if __name__ == '__main__':
#log.basicConfig(filename='debug00.log', level=log.DEBUG)
example_regress_on_time_stage_training_set.do_the_thing(True, 500, 3)
do_the_thing()
| [
"numpy.zeros_like",
"numpy.random.seed",
"tinkerbell.app.model.Normalizer.load",
"tinkerbell.app.model.Features",
"tinkerbell.app.plot.plot",
"tinkerbell.app.model.predict",
"example_regress_on_time_stage_training_set.do_the_thing",
"tinkerbell.app.model.Targets",
"tinkerbell.domain.point.point_coor... | [((648, 666), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (662, 666), True, 'import numpy as np\n'), ((683, 781), 'tinkerbell.app.make.points_exponential_discontinuous_declinelinear_noisy', 'tbamk.points_exponential_discontinuous_declinelinear_noisy', (['y0', 'd', 'xmax', 'xdisc'], {'num': 'num_points'}), '(y0, d, xmax,\n xdisc, num=num_points)\n', (741, 781), True, 'import tinkerbell.app.make as tbamk\n'), ((805, 833), 'tinkerbell.domain.point.point_coordinates', 'tbdpt.point_coordinates', (['pts'], {}), '(pts)\n', (828, 833), True, 'import tinkerbell.domain.point as tbdpt\n'), ((845, 869), 'numpy.zeros_like', 'np.zeros_like', (['ycomp_pts'], {}), '(ycomp_pts)\n', (858, 869), True, 'import numpy as np\n'), ((907, 939), 'tinkerbell.app.model.Features', 'tbamd.Features', (['ycomp_pts', 'stage'], {}), '(ycomp_pts, stage)\n', (921, 939), True, 'import tinkerbell.app.model as tbamd\n'), ((952, 976), 'tinkerbell.app.model.Targets', 'tbamd.Targets', (['ycomp_pts'], {}), '(ycomp_pts)\n', (965, 976), True, 'import tinkerbell.app.model as tbamd\n'), ((1065, 1104), 'tinkerbell.app.model.Normalizer.load', 'tbamd.Normalizer.load', (['fname_normalizer'], {}), '(fname_normalizer)\n', (1086, 1104), True, 'import tinkerbell.app.model as tbamd\n'), ((1179, 1202), 'tinkerbell.app.model.load', 'tbamd.load', (['fname_model'], {}), '(fname_model)\n', (1189, 1202), True, 'import tinkerbell.app.model as tbamd\n'), ((1213, 1256), 'tinkerbell.app.model.predict', 'tbamd.predict', (['y0', 'stage', 'normalizer', 'model'], {}), '(y0, stage, normalizer, model)\n', (1226, 1256), True, 'import tinkerbell.app.model as tbamd\n'), ((1552, 1645), 'tinkerbell.app.plot.plot', 'tbapl.plot', (['[(xplot, yref), (xplot, yhat)]'], {'styles': "['p', 'l']", 'labels': "['yblind', 'yhat']"}), "([(xplot, yref), (xplot, yhat)], styles=['p', 'l'], labels=[\n 'yblind', 'yhat'])\n", (1562, 1645), True, 'import tinkerbell.app.plot as tbapl\n'), ((1735, 1804), 'example_regress_on_time_stage_training_set.do_the_thing', 'example_regress_on_time_stage_training_set.do_the_thing', (['(True)', '(500)', '(3)'], {}), '(True, 500, 3)\n', (1790, 1804), False, 'import example_regress_on_time_stage_training_set\n'), ((1474, 1497), 'numpy.array', 'np.array', (['[xplot, yref]'], {}), '([xplot, yref])\n', (1482, 1497), True, 'import numpy as np\n'), ((1525, 1548), 'numpy.array', 'np.array', (['[xplot, yhat]'], {}), '([xplot, yhat])\n', (1533, 1548), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 18 2019
Learn behavior policy for this dataset using annoy (approx kNN)
@author: josephfutoma
"""
import numpy as np
import pandas as pd
import os
from datetime import datetime
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
from time import time
import annoy #approximate kNN
# TODO: set this to the correct path where your preprocessed mimic data
# for RL for this hypotension task lives
MIMIC_DATA_PATH = "XXX"
pd.set_option("display.max_columns",101)
os.chdir(MIMIC_DATA_PATH)
state_weights = [
5, #normed_time
0, #age
0, #is_F
0, #surg_ICU
0, #is_not_white
0, #is_emergency
0, #is_urgent
0, #hrs_from_admit_to_icu
0, #bicarbonate
0, #bicarbonate_ind
0, #bun
0, #bun_ind
0, #creatinine
0, #creatinine_ind
0, #fio2
0, #fio2_ind
0, #glucose
0, #glucose_ind
0, #hct
0, #hct_ind
0, #hr
0, #hr_ind
5, #lactate
1, #lactate_ind
0, #magnesium
0, #magnesium_ind
0, #platelets
0, #platelets_ind
0, #potassium
0, #potassium_ind
0, #sodium
0, #sodium_ind
0, #spo2
0, #spo2_ind
0, #spontaneousrr
0, #spontaneousrr_ind
0, #temp
0, #temp_ind
5, #urine
1, #urine_ind
0, #wbc
0, #wbc_ind
0, #alt
0, #alt_ind
0, #ast
0, #ast_ind
0, #bilirubin_total
0, #bilirubin_total_ind
0, #co2
0, #co2_ind
0, #dbp
0, #dbp_ind
0, #hgb
0, #hgb_ind
5, #map
1, #map_ind
0, #pco2
0, #pco2_ind
0, #po2
0, #po2_ind
0, #sbp
0, #sbp_ind
0, #weight
0, #weight_ind
0, #gfr
0, #GCS
0, #GCS_ind
1, #lactate_8ind
1, #lactate_everind
0, #po2_8ind
0, #po2_everind
0, #pco2_8ind
0, #pco2_everind
0, #fio2_everind
0, #alt_everind
0, #ast_everind
0, #bilirubin_total_everind
5, #last_vaso_1
5, #last_vaso_2
5, #last_vaso_3
5, #last_vaso_4
5, #last_fluid_1
5, #last_fluid_2
5, #last_fluid_3
5, #total_all_prev_vasos
5, #total_all_prev_fluids
5, #total_last_8hrs_vasos
5, #total_last_8hrs_fluids
0, #last_reward
]
state_weights = np.array(state_weights)
#sqrt because rescale features by this, so weight in squared loss is original wt
state_weights = np.sqrt(state_weights)
#################
NUM_VASO_BINS = 5
NUM_FLUID_BINS = 4
N_ACTIONS = NUM_VASO_BINS*NUM_FLUID_BINS
TIME_WINDOW = 1
MAP_NUM_BELOW_THRESH = 3
states_dat = pickle.load(open(MIMIC_DATA_PATH+'model_data/processed_finalfeatures_vaso%d_fluid%d_states_%dhr_%dbpleq65.p' %(
NUM_VASO_BINS,NUM_FLUID_BINS,int(TIME_WINDOW),MAP_NUM_BELOW_THRESH),'rb'))
actions_dat = pickle.load(open(MIMIC_DATA_PATH+'model_data/actions_discretized_vaso%d_fluid%d_%dhr_%dbpleq65.p'
%(NUM_VASO_BINS,NUM_FLUID_BINS,int(TIME_WINDOW),MAP_NUM_BELOW_THRESH),'rb'))
rewards_dat = pickle.load(open(MIMIC_DATA_PATH+'model_data/rewards_%dhr_%dbpleq65.p' %(int(TIME_WINDOW),MAP_NUM_BELOW_THRESH),'rb'))
### print out all weights along with state vars...
final_ICU_IDs = np.array(list(states_dat.keys()))
n_ids_tot = len(final_ICU_IDs)
### train/test split
tr_perc = 1.0
n_tr_pat = int(n_ids_tot*tr_perc)
n_te_pat = n_ids_tot - n_tr_pat
seed = 12345
np.random.seed(seed)
tr_ids = final_ICU_IDs
# perm = np.random.permutation(n_ids_tot)
# tr_ids = final_ICU_IDs[perm[:n_tr_pat]]
# te_ids = final_ICU_IDs[perm[n_tr_pat:]]
#TODO: should probably sort to avoid issues later...
### train
all_tr_states = []
all_tr_actions = []
all_tr_ids = [] #tr_id for every single state; to map back and reconstruct later
for ID in tr_ids:
# drop first column of state_dat (time);
# drop last row (no action associated; only used for getting final reward)
# only keep action cols of actions after discretization (2:4)
# s_dat = np.array(states_dat[ID])[:-1,1:]
# a_dat = np.array(actions_dat[ID])[:-1,2:4] #use the last action at last time, along with current state in knn
# sa_dat = np.concatenate([s_dat,np.zeros((s_dat.shape[0],2))],1)
# sa_dat[:,-2:] = a_dat #lining up [s0,0],[s1,a0],[s2,a1],...,[s_T-1,a_T-2]
# for 1 hour: sa_dat[:,-2:] = a_dat
#drop first column of state_dat (time);
#drop last row (no action associated; only used for getting final reward)
s_dat = np.array(states_dat[ID])[:-1,1:]
all_tr_states.append(s_dat)
next_actions = np.array(actions_dat[ID]['OVERALL_ACTION_ID'][1:])
all_tr_actions.extend(next_actions)
all_tr_ids.extend([ID]*len(next_actions))
all_tr_states = np.concatenate(all_tr_states,0)
all_tr_actions = np.array(all_tr_actions)
all_tr_ids = np.array(all_tr_ids)
assert all_tr_states.shape[0] == all_tr_actions.shape[0] == all_tr_ids.shape[0]
#normalize the cts columns (besides actions)
# tr_means = np.mean(all_tr_states,0)
# tr_sds = np.std(all_tr_states,0)
###TODO cache these for easy scaling in future
# pickle.dump([tr_means,tr_sds,state_cts_inds_norm],open('./model_data/state_means_sds_ctsinds_%dhr_bpleq65.p' %TIME_WINDOW,'wb'))
# all_tr_states[:,state_cts_inds_norm] = (all_tr_states[:,state_cts_inds_norm]-
# tr_means[state_cts_inds_norm])/tr_sds[state_cts_inds_norm]
all_tr_states = all_tr_states*state_weights #reweight
################## ready to run kNN on train!
n_dim = all_tr_states.shape[1]
n_trees = 500
#built in 6 min, 1 hr, 5/4
t = time()
knn = annoy.AnnoyIndex(n_dim,metric='euclidean')
for i in range(all_tr_states.shape[0]):
knn.add_item(i, all_tr_states[i,:])
knn.build(n_trees)
print("built in %.1f" %(time()-t))
t = time()
all_action_probs = []
all_nn_actions_cts = []
NUM_NN = 100 #TODO: tune...??? try a few? 50, 100, 250, 500
for i in range(all_tr_states.shape[0]):
if i%1000==0:
print("%d / %d, took %.1f so far" %(i,all_tr_states.shape[0],time()-t))
tmp = np.array(knn.get_nns_by_item(i,NUM_NN+1)[1:]) #exclude yourself. TODO: exclude all from same patient also??
nn_actions = all_tr_actions[tmp]
all_action_probs.append(np.mean(nn_actions==all_tr_actions[i]))
all_nn_actions_cts.append(np.unique(nn_actions,return_counts=True))
print("matched all in %.1f" %(time()-t)) #
all_action_probs = np.array(all_action_probs)
all_nn_actions_cts = np.array(all_nn_actions_cts)
pickle.dump([all_action_probs,all_nn_actions_cts],open(MIMIC_DATA_PATH+'model_data/all_action_probs_%dnn_vaso%d_fluid%d_%dhr_%dbpleq65.p'
%(NUM_NN,NUM_VASO,NUM_FLUID,TIME_WINDOW,MAP_NUM_BELOW_THRESH),'wb'))
#######
#### convert all_nn_actions_cts into dict with all beh act probs for all acts...
#### useful if we want to do viz on the full behavior policy, incorporating *all* action probs
#### and not just action probs for the actions actually taken...
#######
all_behprob_acts = {}
starts = np.searchsorted(all_tr_ids,tr_ids,'left')
ends = np.searchsorted(all_tr_ids,tr_ids,'right')
for i,ID in enumerate(tr_ids):
# WAT...
#skip first action for each ID, as this is the action taken at 0, and we only want beh probs for all others
# this_acts_cts = all_nn_actions_cts[(starts[i]+1):ends[i],:]
this_acts_cts = all_nn_actions_cts[starts[i]:ends[i],:]
tmp = np.zeros((this_acts_cts.shape[0],N_ACTIONS))
for ii in range(this_acts_cts.shape[0]):
tmp[ii,this_acts_cts[ii,0]] += 1/NUM_NN*this_acts_cts[ii,1]
all_behprob_acts[ID] = tmp
pickle.dump(all_behprob_acts,open(MIMIC_DATA_PATH+'model_data/all_behprobs_allactions_%dnn_vaso%d_fluid%d_%dhr_%dbpleq65.p'
%(NUM_NN,NUM_VASO,NUM_FLUID,TIME_WINDOW,MAP_NUM_BELOW_THRESH),'wb'))
############# write out behavior action probs and use as ground truth
act_probs = pickle.load(open(MIMIC_DATA_PATH+'model_data/all_action_probs_%dnn_vaso%d_fluid%d_%dhr_%dbpleq65.p'
%(NUM_NN,NUM_VASO,NUM_FLUID,TIME_WINDOW,MAP_NUM_BELOW_THRESH),'rb'))
probs = act_probs[0]
# a moderate eps for settings with 0 prob to avoid numeric issues
probs[probs==0] = 0.002 #prev: .01 for 50 nn's
all_acts_with_probs = {}
starts = np.searchsorted(all_tr_ids,final_ICU_IDs,'left')
ends = np.searchsorted(all_tr_ids,final_ICU_IDs,'right')
for ii,ID in enumerate(final_ICU_IDs):
if ii%100==99:
print('%d/%d' %(ii,len(final_ICU_IDs)))
inds = np.arange(starts[ii],ends[ii])
this_probs = probs[inds]
# this_acts = all_tr_actions[inds]
act_dat = actions_dat[ID].iloc[1:,:] #cut first row since
act_dat = act_dat.assign(ACT_PROBS=this_probs)
all_acts_with_probs[ID] = act_dat
pickle.dump(all_acts_with_probs,open(MIMIC_DATA_PATH+'model_data/actions_discretized_withprobs_%dnn_vaso%d_fluid%d_%dhr_%dbpleq65.p'
%(NUM_NN,NUM_VASO,NUM_FLUID,TIME_WINDOW,MAP_NUM_BELOW_THRESH),'wb'))
| [
"numpy.random.seed",
"numpy.unique",
"numpy.zeros",
"numpy.searchsorted",
"time.time",
"numpy.mean",
"numpy.array",
"numpy.arange",
"annoy.AnnoyIndex",
"pandas.set_option",
"os.chdir",
"numpy.concatenate",
"numpy.sqrt"
] | [((512, 553), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(101)'], {}), "('display.max_columns', 101)\n", (525, 553), True, 'import pandas as pd\n'), ((553, 578), 'os.chdir', 'os.chdir', (['MIMIC_DATA_PATH'], {}), '(MIMIC_DATA_PATH)\n', (561, 578), False, 'import os\n'), ((1882, 1905), 'numpy.array', 'np.array', (['state_weights'], {}), '(state_weights)\n', (1890, 1905), True, 'import numpy as np\n'), ((2003, 2025), 'numpy.sqrt', 'np.sqrt', (['state_weights'], {}), '(state_weights)\n', (2010, 2025), True, 'import numpy as np\n'), ((2947, 2967), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2961, 2967), True, 'import numpy as np\n'), ((4200, 4232), 'numpy.concatenate', 'np.concatenate', (['all_tr_states', '(0)'], {}), '(all_tr_states, 0)\n', (4214, 4232), True, 'import numpy as np\n'), ((4249, 4273), 'numpy.array', 'np.array', (['all_tr_actions'], {}), '(all_tr_actions)\n', (4257, 4273), True, 'import numpy as np\n'), ((4287, 4307), 'numpy.array', 'np.array', (['all_tr_ids'], {}), '(all_tr_ids)\n', (4295, 4307), True, 'import numpy as np\n'), ((5014, 5020), 'time.time', 'time', ([], {}), '()\n', (5018, 5020), False, 'from time import time\n'), ((5028, 5071), 'annoy.AnnoyIndex', 'annoy.AnnoyIndex', (['n_dim'], {'metric': '"""euclidean"""'}), "(n_dim, metric='euclidean')\n", (5044, 5071), False, 'import annoy\n'), ((5210, 5216), 'time.time', 'time', ([], {}), '()\n', (5214, 5216), False, 'from time import time\n'), ((5800, 5826), 'numpy.array', 'np.array', (['all_action_probs'], {}), '(all_action_probs)\n', (5808, 5826), True, 'import numpy as np\n'), ((5848, 5876), 'numpy.array', 'np.array', (['all_nn_actions_cts'], {}), '(all_nn_actions_cts)\n', (5856, 5876), True, 'import numpy as np\n'), ((6381, 6424), 'numpy.searchsorted', 'np.searchsorted', (['all_tr_ids', 'tr_ids', '"""left"""'], {}), "(all_tr_ids, tr_ids, 'left')\n", (6396, 6424), True, 'import numpy as np\n'), ((6430, 6474), 'numpy.searchsorted', 'np.searchsorted', (['all_tr_ids', 'tr_ids', '"""right"""'], {}), "(all_tr_ids, tr_ids, 'right')\n", (6445, 6474), True, 'import numpy as np\n'), ((7551, 7601), 'numpy.searchsorted', 'np.searchsorted', (['all_tr_ids', 'final_ICU_IDs', '"""left"""'], {}), "(all_tr_ids, final_ICU_IDs, 'left')\n", (7566, 7601), True, 'import numpy as np\n'), ((7607, 7658), 'numpy.searchsorted', 'np.searchsorted', (['all_tr_ids', 'final_ICU_IDs', '"""right"""'], {}), "(all_tr_ids, final_ICU_IDs, 'right')\n", (7622, 7658), True, 'import numpy as np\n'), ((4051, 4101), 'numpy.array', 'np.array', (["actions_dat[ID]['OVERALL_ACTION_ID'][1:]"], {}), "(actions_dat[ID]['OVERALL_ACTION_ID'][1:])\n", (4059, 4101), True, 'import numpy as np\n'), ((6753, 6798), 'numpy.zeros', 'np.zeros', (['(this_acts_cts.shape[0], N_ACTIONS)'], {}), '((this_acts_cts.shape[0], N_ACTIONS))\n', (6761, 6798), True, 'import numpy as np\n'), ((7764, 7795), 'numpy.arange', 'np.arange', (['starts[ii]', 'ends[ii]'], {}), '(starts[ii], ends[ii])\n', (7773, 7795), True, 'import numpy as np\n'), ((3972, 3996), 'numpy.array', 'np.array', (['states_dat[ID]'], {}), '(states_dat[ID])\n', (3980, 3996), True, 'import numpy as np\n'), ((5627, 5667), 'numpy.mean', 'np.mean', (['(nn_actions == all_tr_actions[i])'], {}), '(nn_actions == all_tr_actions[i])\n', (5634, 5667), True, 'import numpy as np\n'), ((5694, 5735), 'numpy.unique', 'np.unique', (['nn_actions'], {'return_counts': '(True)'}), '(nn_actions, return_counts=True)\n', (5703, 5735), True, 'import numpy as np\n'), ((5192, 5198), 'time.time', 'time', ([], {}), '()\n', (5196, 5198), False, 'from time import time\n'), ((5767, 5773), 'time.time', 'time', ([], {}), '()\n', (5771, 5773), False, 'from time import time\n'), ((5442, 5448), 'time.time', 'time', ([], {}), '()\n', (5446, 5448), False, 'from time import time\n')] |
#!/usr/bin/env python
"""
Module containing the main input class
"""
import glob
import inspect
import json
import os
from importlib import import_module
import numpy as np
import pandas as pd
from gwosc.datasets import event_gps
import bilby
from . import utils
from .utils import (
SAMPLER_SETTINGS,
BilbyPipeError,
BilbyPipeInternalError,
convert_string_to_dict,
convert_string_to_list,
get_colored_string,
get_function_from_string_path,
get_time_prior,
logger,
pretty_print_dictionary,
)
class Input(object):
""" Superclass of input handlers """
@property
def complete_ini_file(self):
return f"{self.outdir}/{self.label}_config_complete.ini"
@property
def idx(self):
""" The level A job index """
return self._idx
@idx.setter
def idx(self, idx):
self._idx = idx
@property
def known_detectors(self):
dirs = os.path.join(os.path.dirname(bilby.gw.detector.__file__), "detectors")
known_files = glob.glob(os.path.join(dirs, "*"))
return [os.path.basename(kf).split(".")[0] for kf in known_files]
@property
def detectors(self):
""" A list of the detectors to include, e.g., ['H1', 'L1'] """
return self._detectors
@detectors.setter
def detectors(self, detectors):
self._detectors = utils.convert_detectors_input(detectors)
self._check_detectors_against_known_detectors()
def _check_detectors_against_known_detectors(self):
for element in self.detectors:
if element not in self.known_detectors:
msg = (
'Argument detectors contains "{}" not in the known '
"detectors list: {} ".format(element, self.known_detectors)
+ ". This will likely fail at the data generation step"
)
logger.warning(get_colored_string(msg))
@staticmethod
def _split_string_by_space(string):
""" Converts "H1 L1" to ["H1", "L1"] """
return string.split(" ")
@staticmethod
def _convert_string_to_list(string):
""" Converts various strings to a list """
string = string.replace(",", " ")
string = string.replace("[", "")
string = string.replace("]", "")
string = string.replace('"', "")
string = string.replace("'", "")
string_list = string.split()
return string_list
@property
def outdir(self):
""" The path to the directory where output will be stored """
utils.check_directory_exists_and_if_not_mkdir(self._outdir)
return self._outdir
@outdir.setter
def outdir(self, outdir):
if outdir == ".":
raise BilbyPipeError("Unable to use '.' as an outdir")
self._outdir = os.path.relpath(outdir)
@property
def submit_directory(self):
""" The path to the directory where submit output will be stored """
path = os.path.join(self._outdir, "submit")
utils.check_directory_exists_and_if_not_mkdir(path)
return path
@property
def log_directory(self):
""" The top-level directory for the log directories """
utils.check_directory_exists_and_if_not_mkdir(self._log_directory)
return self._log_directory
@log_directory.setter
def log_directory(self, log_directory):
if log_directory is None:
self._log_directory = self._outdir
else:
self._log_directory = log_directory
@property
def data_generation_log_directory(self):
""" The path to the directory where generation logs will be stored """
path = os.path.join(self.log_directory, "log_data_generation")
utils.check_directory_exists_and_if_not_mkdir(path)
return path
@property
def data_analysis_log_directory(self):
""" The path to the directory where analysis logs will be stored """
path = os.path.join(self.log_directory, "log_data_analysis")
utils.check_directory_exists_and_if_not_mkdir(path)
return path
@property
def summary_log_directory(self):
""" The path to the directory where pesummary logs will be stored """
path = os.path.join(self.log_directory, "log_results_page")
utils.check_directory_exists_and_if_not_mkdir(path)
return path
@property
def data_directory(self):
""" The path to the directory where data output will be stored """
path = os.path.join(self._outdir, "data")
utils.check_directory_exists_and_if_not_mkdir(path)
return path
@property
def result_directory(self):
""" The path to the directory where result output will be stored """
path = os.path.join(self._outdir, "result")
utils.check_directory_exists_and_if_not_mkdir(path)
return path
@property
def webdir(self):
utils.check_directory_exists_and_if_not_mkdir(self._webdir)
return self._webdir
@webdir.setter
def webdir(self, webdir):
if webdir is None:
self._webdir = os.path.join(self.outdir, "results_page")
else:
self._webdir = webdir
@property
def gps_file(self):
""" The gps file containing the list of gps times """
return self._gps_file
@gps_file.setter
def gps_file(self, gps_file):
"""Set the gps_file
At setting, will check the file exists, read the contents, identify
which element to generate data for, and create the interferometers.
"""
if gps_file is None:
self._gps_file = None
return
elif os.path.isfile(gps_file):
self._gps_file = os.path.relpath(gps_file)
else:
raise FileNotFoundError(f"Input file gps_file={gps_file} not understood")
self._parse_gps_file()
def _parse_gps_file(self):
gpstimes = self.read_gps_file()
n = len(gpstimes)
logger.info(f"{n} start times found in gps_file={self.gps_file}")
self.gpstimes = gpstimes
def read_gps_file(self):
gpstimes = np.loadtxt(self.gps_file, ndmin=2, delimiter=",")
if gpstimes.ndim > 1:
logger.info(f"Reading column 0 from gps_file={self.gps_file}")
gpstimes = gpstimes[:, 0]
return gpstimes
@property
def timeslide_file(self):
"""Timeslide file.
Timeslide file containing the list of timeslides to apply to each
detector's start time.
"""
return self._timeslide_file
@timeslide_file.setter
def timeslide_file(self, timeslide_file):
"""Set the timeslide_file.
At setting, will check the file exists, read the contents,
save the timeslide value for each of the detectors.
"""
if timeslide_file is None:
self._timeslide_file = None
return
elif os.path.isfile(timeslide_file):
self._timeslide_file = os.path.relpath(timeslide_file)
else:
raise FileNotFoundError(
f"Input file timeslide_file={timeslide_file} not understood"
)
if hasattr(self, "_timeslide_file"):
self._parse_timeslide_file()
else:
logger.debug("No _parse_timeslide_file method present")
def read_timeslide_file(self):
"""Read timeslide file.
Each row of file is an array, hence ndmin = 2
[ [timshift1,...], [], [] ...]
"""
timeslides_list = np.loadtxt(self.timeslide_file, ndmin=2)
return timeslides_list
def _parse_timeslide_file(self):
"""Parse the timeslide file and check for correctness.
Sets the attribute "timeslides" if timeslide file correctly formatted
and passed to Inputs()
"""
timeslides_list = self.read_timeslide_file()
number_rows, number_columns = timeslides_list.shape
if number_columns != len(self.detectors):
raise BilbyPipeError(
"The timeslide file must have one column for each of the detectors. "
"Number Cols: {}, Number Detectors: {}".format(
number_columns, len(self.detectors)
)
)
if number_rows != len(self.gpstimes):
raise BilbyPipeError(
"The timeslide file must have one row for each gps time. "
"Number Rows: {}, Number Gps Times: {}".format(
number_rows, len(self.gpstimes)
)
)
times = np.hsplit(timeslides_list, len(self.detectors))
self.timeslides = {}
for i in range(len(self.detectors)):
self.timeslides.update({self.detectors[i]: times[i].flatten()})
logger.info(
f"{number_rows} timeslides found in timeslide_file={self.timeslide_file}"
)
def get_timeslide_dict(self, idx):
"""Return a specific timeslide value from the timeslide file.
Given an index, the dict of {detector: timeslide value} is created for
the specific index and returned.
"""
if not hasattr(self, "timeslides"):
raise BilbyPipeError("Timeslide file must be provided.")
if any(len(t) <= idx for t in self.timeslides.values()):
raise BilbyPipeError(
f"Timeslide index={idx} > number of timeslides available."
)
timeslide_val = {
det: timeslide[idx] for det, timeslide in self.timeslides.items()
}
logger.info(f"Timeslide value: {timeslide_val}")
return timeslide_val
@property
def bilby_frequency_domain_source_model(self):
"""
The bilby function to pass to the waveform_generator
This can be a function defined in an external package.
"""
if self.frequency_domain_source_model in bilby.gw.source.__dict__.keys():
model = self._frequency_domain_source_model
logger.info(f"Using the {model} source model")
return bilby.gw.source.__dict__[model]
elif "." in self.frequency_domain_source_model:
return get_function_from_string_path(self._frequency_domain_source_model)
else:
raise BilbyPipeError(
f"No source model {self._frequency_domain_source_model} found."
)
@property
def reference_frequency(self):
return self._reference_frequency
@reference_frequency.setter
def reference_frequency(self, reference_frequency):
self._reference_frequency = float(reference_frequency)
@property
def mode_array(self):
return self._mode_array
@mode_array.setter
def mode_array(self, mode_array):
# Pre sanitize the mode array
if mode_array == [None]:
mode_array = None
if isinstance(mode_array, list):
# Hack because configargparse splits the mode_array
mode_array = ",".join(mode_array)
if mode_array is not None:
self._mode_array = convert_string_to_list(mode_array)
else:
logger.debug("mode_array not set")
self._mode_array = None
# Ensure it is a list of lists
if np.array(self._mode_array).ndim == 1:
self._mode_array = [self._mode_array]
if np.array(self._mode_array).ndim == 2:
for mode in self._mode_array:
if len(mode) != 2:
raise BilbyPipeError(f"mode_array {self._mode_array} is invalid")
if np.array(self._mode_array).ndim > 2:
raise BilbyPipeError(f"mode_array {self._mode_array} is invalid")
def get_default_waveform_arguments(self):
wfa = dict(
reference_frequency=self.reference_frequency,
waveform_approximant=self.waveform_approximant,
minimum_frequency=self.minimum_frequency,
maximum_frequency=self.maximum_frequency,
catch_waveform_errors=self.catch_waveform_errors,
pn_spin_order=self.pn_spin_order,
pn_tidal_order=self.pn_tidal_order,
pn_phase_order=self.pn_phase_order,
pn_amplitude_order=self.pn_amplitude_order,
mode_array=self.mode_array,
)
if self.waveform_arguments_dict is not None:
wfa.update(convert_string_to_dict(self.waveform_arguments_dict))
logger.debug(f"Default waveform_arguments: {pretty_print_dictionary(wfa)}")
return wfa
def get_injection_waveform_arguments(self):
"""Get the dict of the waveform arguments needed for creating injections.
Defaults the injection-waveform-approximant to waveform-approximant, if
no injection-waveform-approximant provided. Note that the default
waveform-approximant is `IMRPhenomPv2`.
"""
if self.injection_waveform_approximant is None:
self.injection_waveform_approximant = self.waveform_approximant
waveform_arguments = self.get_default_waveform_arguments()
waveform_arguments["waveform_approximant"] = self.injection_waveform_approximant
waveform_arguments["numerical_relativity_file"] = self.numerical_relativity_file
return waveform_arguments
@property
def bilby_roq_frequency_domain_source_model(self):
if "binary_neutron_star" in self.frequency_domain_source_model:
logger.info("Using the binary_neutron_star_roq source model")
return bilby.gw.source.binary_neutron_star_roq
elif "binary_black_hole" in self.frequency_domain_source_model:
logger.info("Using the binary_black_hole_roq source model")
return bilby.gw.source.binary_black_hole_roq
else:
raise BilbyPipeError("Unable to determine roq_source from source model")
@property
def frequency_domain_source_model(self):
""" String of which frequency domain source model to use """
return self._frequency_domain_source_model
@frequency_domain_source_model.setter
def frequency_domain_source_model(self, frequency_domain_source_model):
self._frequency_domain_source_model = frequency_domain_source_model
@property
def trigger_time(self):
return self._trigger_time
@trigger_time.setter
def trigger_time(self, trigger_time):
# Convert trigger time
if trigger_time is None:
logger.debug("No trigger time given")
elif isinstance(trigger_time, str) and "GW" in trigger_time:
logger.info(f"Using gwosc to find trigger time for event {trigger_time}")
trigger_time = event_gps(trigger_time)
else:
trigger_time = float(trigger_time)
self._trigger_time = trigger_time
if trigger_time is not None:
logger.info(f"Setting trigger time {trigger_time}")
@property
def start_time(self):
if hasattr(self, "_start_time"):
self._verify_start_time(self._start_time)
return self._start_time
try:
self._start_time = (
self.trigger_time + self.post_trigger_duration - self.duration
)
return self._start_time
except AttributeError:
logger.warning("Unable to calculate default segment start time")
return None
def _verify_start_time(self, start_time):
try:
inferred_start_time = (
self.trigger_time + self.post_trigger_duration - self.duration
)
except AttributeError:
logger.warning("Unable to verify start-time consistency")
return
if inferred_start_time != start_time:
raise BilbyPipeError("Unexpected behaviour encountered with start time")
@start_time.setter
def start_time(self, start_time):
self._verify_start_time(start_time)
self._start_time = start_time
if start_time is not None:
logger.info(f"Setting segment start time {start_time}")
@property
def duration(self):
return self._duration
@duration.setter
def duration(self, duration):
self._duration = duration
if duration is not None:
logger.info(f"Setting segment duration {duration}s")
@property
def injection_numbers(self):
if hasattr(self, "_injection_numbers"):
return self._injection_numbers
else:
raise BilbyPipeInternalError("Injection numbers requested, but not yet set")
@injection_numbers.setter
def injection_numbers(self, injection_numbers):
if (
injection_numbers is None
or len(injection_numbers) == 0
or injection_numbers == "None"
or injection_numbers[0] == "None"
or injection_numbers[0] is None
):
self._injection_numbers = None
elif all(
i is not None
and not isinstance(i, float)
and utils.check_if_represents_int(i)
for i in injection_numbers
):
self._injection_numbers = [int(i) for i in injection_numbers]
else:
raise BilbyPipeError(f"Invalid injection numbers {injection_numbers}")
@property
def injection_df(self):
return self._injection_df
@injection_df.setter
def injection_df(self, injection_df):
if isinstance(injection_df, pd.DataFrame) is False:
raise BilbyPipeError("Setting injection df with non-pandas DataFrame")
elif self.injection_numbers is not None:
logger.info(
f"Truncating injection injection df to rows {self.injection_numbers}"
)
try:
self._injection_df = injection_df.iloc[self.injection_numbers]
except IndexError:
raise BilbyPipeError(
"Your injection_numbers are incompatible with the injection set"
)
else:
self._injection_df = injection_df
@property
def injection_file(self):
return self._injection_file
@injection_file.setter
def injection_file(self, injection_file):
if injection_file is None:
logger.debug("No injection file set")
self._injection_file = None
elif os.path.isfile(injection_file):
self._injection_file = os.path.relpath(injection_file)
self.injection_df = self.read_injection_file(injection_file)
self.total_number_of_injections = len(self.injection_df)
self.injection = True
else:
raise FileNotFoundError(f"Injection file {injection_file} not found")
@property
def injection_dict(self):
return self._injection_dict
@injection_dict.setter
def injection_dict(self, injection_dict):
if injection_dict is None:
self._injection_dict = None
return
elif isinstance(injection_dict, str):
self._injection_dict = convert_string_to_dict(injection_dict)
elif isinstance(injection_dict, dict):
self._injection_dict = injection_dict
else:
raise BilbyPipeError("injection-dict can not be coerced to a dict")
self.injection_df = pd.DataFrame(self._injection_dict, index=[0])
self.total_number_of_injections = 1
self.injection = True
@staticmethod
def read_injection_file(injection_file):
if "json" in injection_file:
return Input.read_json_injection_file(injection_file)
elif "dat" in injection_file:
return Input.read_dat_injection_file(injection_file)
@staticmethod
def read_json_injection_file(injection_file):
with open(injection_file, "r") as file:
injection_dict = json.load(
file, object_hook=bilby.core.utils.decode_bilby_json
)
injection_df = injection_dict["injections"]
try:
injection_df = pd.DataFrame(injection_df)
except ValueError:
# If injection_df is a dictionary of single elements, set the index-array in pandas
injection_df = pd.DataFrame(injection_df, index=[0])
return injection_df
@staticmethod
def read_dat_injection_file(injection_file):
return pd.read_csv(injection_file, delim_whitespace=True)
@property
def spline_calibration_envelope_dict(self):
return self._spline_calibration_envelope_dict
@spline_calibration_envelope_dict.setter
def spline_calibration_envelope_dict(self, spline_calibration_envelope_dict):
if spline_calibration_envelope_dict is not None:
self._spline_calibration_envelope_dict = convert_string_to_dict(
spline_calibration_envelope_dict, "spline-calibration-envelope-dict"
)
else:
logger.debug("spline_calibration_envelope_dict")
self._spline_calibration_envelope_dict = None
@property
def spline_calibration_amplitude_uncertainty_dict(self):
return self._spline_calibration_amplitude_uncertainty_dict
@spline_calibration_amplitude_uncertainty_dict.setter
def spline_calibration_amplitude_uncertainty_dict(
self, spline_calibration_amplitude_uncertainty_dict
):
if spline_calibration_amplitude_uncertainty_dict is not None:
self._spline_calibration_amplitude_uncertainty_dict = (
convert_string_to_dict(
spline_calibration_amplitude_uncertainty_dict,
"spline-calibration-amplitude-uncertainty-dict",
)
)
else:
logger.debug("spline_calibration_amplitude_uncertainty_dict")
self._spline_calibration_amplitude_uncertainty_dict = None
@property
def spline_calibration_phase_uncertainty_dict(self):
return self._spline_calibration_phase_uncertainty_dict
@spline_calibration_phase_uncertainty_dict.setter
def spline_calibration_phase_uncertainty_dict(
self, spline_calibration_phase_uncertainty_dict
):
if spline_calibration_phase_uncertainty_dict is not None:
self._spline_calibration_phase_uncertainty_dict = convert_string_to_dict(
spline_calibration_phase_uncertainty_dict,
"spline-calibration-phase-uncertainty-dict",
)
else:
logger.debug("spline_calibration_phase_uncertainty_dict")
self._spline_calibration_phase_uncertainty_dict = None
@property
def minimum_frequency(self):
"""The minimum frequency
If a per-detector dictionary is given, this will return the minimum
frequency value. To access the dictionary,
see self.minimum_frequency_dict
"""
return self._minimum_frequency
@minimum_frequency.setter
def minimum_frequency(self, minimum_frequency):
if minimum_frequency is None:
self._minimum_frequency = None
self.minimum_frequency_dict = {det: None for det in self.detectors}
else:
try:
self._minimum_frequency = float(minimum_frequency)
self.minimum_frequency_dict = {
det: float(minimum_frequency) for det in self.detectors
}
except ValueError:
self.minimum_frequency_dict = convert_string_to_dict(
minimum_frequency, "minimum-frequency"
)
self._minimum_frequency = np.min(
[xx for xx in self._minimum_frequency_dict.values()]
).item()
@property
def minimum_frequency_dict(self):
return self._minimum_frequency_dict
@minimum_frequency_dict.setter
def minimum_frequency_dict(self, minimum_frequency_dict):
self.test_frequency_dict(frequency_dict=minimum_frequency_dict, label="minimum")
self._minimum_frequency_dict = minimum_frequency_dict
@property
def maximum_frequency(self):
"""The maximum frequency
If a per-detector dictionary is given, this will return the maximum
frequency value. To access the dictionary,
see self.maximum_frequency_dict
"""
return self._maximum_frequency
@maximum_frequency.setter
def maximum_frequency(self, maximum_frequency):
if maximum_frequency is None:
self._maximum_frequency = self.sampling_frequency / 2
self.maximum_frequency_dict = {
det: self._maximum_frequency for det in self.detectors
}
logger.info(
"No maximum frequency given. "
"Setting to sampling frequency / 2 = {}".format(self._maximum_frequency)
)
else:
try:
self._maximum_frequency = float(maximum_frequency)
self.maximum_frequency_dict = {
det: float(maximum_frequency) for det in self.detectors
}
except ValueError:
self.maximum_frequency_dict = convert_string_to_dict(
maximum_frequency, "maximum-frequency"
)
self._maximum_frequency = np.max(
[xx for xx in self._maximum_frequency_dict.values()]
).item()
@property
def maximum_frequency_dict(self):
return self._maximum_frequency_dict
@maximum_frequency_dict.setter
def maximum_frequency_dict(self, maximum_frequency_dict):
self.test_frequency_dict(frequency_dict=maximum_frequency_dict, label="maximum")
self._maximum_frequency_dict = maximum_frequency_dict
def test_frequency_dict(self, frequency_dict, label=""):
for det in self.detectors:
if det not in frequency_dict.keys():
raise BilbyPipeError(
f"Input {label} frequency required for detector {det}"
)
return frequency_dict
@property
def default_prior_files(self):
return self.get_default_prior_files()
@staticmethod
def get_default_prior_files():
""" Returns a dictionary of the default priors """
prior_files_glob = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data_files/*prior"
)
filenames = glob.glob(prior_files_glob)
return {os.path.basename(ff).rstrip(".prior"): ff for ff in filenames}
def get_distance_file_lookup_table(self, prior_file_str):
direc = os.path.dirname(self.default_prior_files[prior_file_str])
fname = f"{prior_file_str}_distance_marginalization_lookup.npz"
return os.path.join(direc, fname)
@property
def prior_file(self):
return self._prior_file
@prior_file.setter
def prior_file(self, prior_file):
if prior_file is None:
self._prior_file = None
elif os.path.isfile(prior_file):
self._prior_file = prior_file
elif os.path.isfile(os.path.basename(prior_file)):
# Allows for the prior-file to be moved to the local directory (file-transfer mechanism)
self._prior_file = os.path.basename(prior_file)
elif prior_file in self.default_prior_files:
self._prior_file = self.default_prior_files[prior_file]
self.distance_marginalization_lookup_table = (
self.get_distance_file_lookup_table(prior_file)
)
else:
raise FileNotFoundError(f"No prior file {prior_file} available")
logger.info(f"Setting prior-file to {self._prior_file}")
@property
def prior_dict(self):
"""The input prior_dict from the ini (if given)
Note, this is not the bilby prior (see self.priors for that), this is
a key-val dictionary where the val's are strings which are converting
into bilby priors in `_get_prior
"""
return self._prior_dict
@prior_dict.setter
def prior_dict(self, prior_dict):
if isinstance(prior_dict, dict):
prior_dict = prior_dict
elif isinstance(prior_dict, str):
prior_dict = utils.convert_prior_string_input(prior_dict)
elif prior_dict is None:
self._prior_dict = None
return
else:
raise BilbyPipeError(f"prior_dict={prior_dict} not understood")
self._prior_dict = {
self._convert_prior_dict_key(key): val for key, val in prior_dict.items()
}
@staticmethod
def _convert_prior_dict_key(key):
"""Converts the prior dict key to standard form
In the ini read, mass_1 -> mass-1, this corrects for that
"""
if "-" in key:
key_replaced = key.replace("-", "_")
logger.debug(f"Converting prior-dict key {key} to {key_replaced}")
key = key_replaced
return key
@property
def distance_marginalization_lookup_table(self):
return self._distance_marginalization_lookup_table
@distance_marginalization_lookup_table.setter
def distance_marginalization_lookup_table(
self, distance_marginalization_lookup_table
):
if distance_marginalization_lookup_table is None:
if hasattr(self, "_distance_marginalization_lookup_table"):
pass
else:
self._distance_marginalization_lookup_table = None
else:
if hasattr(self, "_distance_marginalization_lookup_table"):
logger.info("Overwriting distance_marginalization_lookup_table")
self._distance_marginalization_lookup_table = (
distance_marginalization_lookup_table
)
@property
def default_prior(self):
return getattr(self, "_default_prior", None)
@default_prior.setter
def default_prior(self, default_prior):
self._default_prior = default_prior
@property
def combined_default_prior_dicts(self):
d = bilby.core.prior.__dict__.copy()
d.update(bilby.gw.prior.__dict__)
return d
@property
def time_parameter(self):
return f"{self.time_reference}_time"
def create_time_prior(self):
cond_a = getattr(self, "trigger_time", None) is not None
cond_b = getattr(self, "deltaT", None) is not None
if cond_a and cond_b:
logger.debug(
"Setting geocent time prior using trigger-time={} and deltaT={}".format(
self.trigger_time, self.deltaT
)
)
if self.time_reference == "geocent":
latex_label = "$t_c$"
else:
latex_label = f"$t_{self.time_reference[0]}$"
time_prior = get_time_prior(
time=self.trigger_time,
uncertainty=self.deltaT / 2.0,
name=self.time_parameter,
latex_label=latex_label,
)
else:
raise BilbyPipeError("Unable to set geocent_time prior from trigger_time")
return time_prior
@property
def priors(self):
""" Read in and compose the prior at run-time """
if getattr(self, "_priors", None) is None:
self._priors = self._get_priors()
return self._priors
@priors.setter
def priors(self, priors):
self._priors = priors
def _get_priors(self, add_time=True):
"""Construct the priors
Parameters
----------
add_time: bool
If True, the time prior is constructed from either the
prior file or the trigger time. If False (used for the overview
page where a single time-prior doesn't make sense), this isn't
added to the prior
Returns
-------
prior: bilby.core.prior.PriorDict
The generated prior
"""
if self.default_prior in self.combined_default_prior_dicts.keys():
prior_class = self.combined_default_prior_dicts[self.default_prior]
if self.prior_dict is not None:
priors = prior_class(dictionary=self.prior_dict)
else:
priors = prior_class(filename=self.prior_file)
else:
raise ValueError("Unable to set prior: default_prior unavailable")
priors = self._update_default_prior_to_sky_frame_parameters(priors)
if self.time_parameter in priors:
logger.debug(f"Using {self.time_parameter} prior from prior_file")
elif add_time:
priors[self.time_parameter] = self.create_time_prior()
else:
logger.debug("No time prior available or requested")
if self.calibration_model is not None:
priors.update(self.calibration_prior)
return priors
def _get_default_sky_priors(self):
return bilby.core.prior.PriorDict(
dict(
dec=bilby.core.prior.Cosine(name="dec"),
ra=bilby.core.prior.Uniform(
name="ra", minimum=0, maximum=2 * np.pi, boundary="periodic"
),
)
)
def _priors_contains_default_sky_prior(self, priors):
sky_priors = self._get_default_sky_priors()
for key in sky_priors:
if sky_priors[key] != priors.get(key, None):
return False
return True
def _update_default_prior_to_sky_frame_parameters(self, priors):
if (
self._priors_contains_default_sky_prior(priors)
and self.reference_frame != "sky"
):
if "ra" in priors:
del priors["ra"]
if "dec" in priors:
del priors["dec"]
if "azimuth" not in priors:
priors["azimuth"] = bilby.core.prior.Uniform(
minimum=0,
maximum=2 * np.pi,
latex_label="$\\epsilon$",
boundary="periodic",
)
if "zenith" not in priors:
priors["zenith"] = bilby.core.prior.Sine(latex_label="$\\kappa$")
return priors
@property
def calibration_model(self):
return getattr(self, "_calibration_model", None)
@calibration_model.setter
def calibration_model(self, calibration_model):
if calibration_model is not None:
logger.info(f"Setting calibration_model={calibration_model}")
self._calibration_model = calibration_model
else:
logger.info(
"No calibration_model model provided, calibration "
"marginalization will not be used"
)
self._calibration_model = None
@property
def calibration_prior(self):
if self.calibration_model is None:
return None
if getattr(self, "_calibration_prior", None) is not None:
return self._calibration_prior
self._calibration_prior = bilby.core.prior.PriorDict()
if self.calibration_model is not None:
for det in self.detectors:
if det in self.spline_calibration_envelope_dict:
logger.info(
"Creating calibration prior for {} from {}".format(
det, self.spline_calibration_envelope_dict[det]
)
)
self._calibration_prior.update(
bilby.gw.prior.CalibrationPriorDict.from_envelope_file(
self.spline_calibration_envelope_dict[det],
minimum_frequency=self.minimum_frequency_dict[det],
maximum_frequency=self.maximum_frequency_dict[det],
n_nodes=self.spline_calibration_nodes,
label=det,
boundary=self.calibration_prior_boundary,
)
)
elif (
det in self.spline_calibration_amplitude_uncertainty_dict
and det in self.spline_calibration_phase_uncertainty_dict
):
logger.info(
"Creating calibration prior for {} from "
"provided constant uncertainty values.".format(det)
)
self._calibration_prior.update(
bilby.gw.prior.CalibrationPriorDict.constant_uncertainty_spline(
amplitude_sigma=self.spline_calibration_amplitude_uncertainty_dict[
det
],
phase_sigma=self.spline_calibration_phase_uncertainty_dict[
det
],
minimum_frequency=self.minimum_frequency_dict[det],
maximum_frequency=self.maximum_frequency_dict[det],
n_nodes=self.spline_calibration_nodes,
label=det,
)
)
else:
logger.warning(f"No calibration information for {det}")
return self._calibration_prior
@property
def calibration_model(self):
return getattr(self, "_calibration_model", None)
@calibration_model.setter
def calibration_model(self, calibration_model):
if calibration_model is not None:
logger.info(f"Setting calibration_model={calibration_model}")
self._calibration_model = calibration_model
else:
logger.info(
"No calibration_model model provided, calibration "
"marginalization will not be used"
)
self._calibration_model = None
@property
def likelihood(self):
self.search_priors = self.priors.copy()
likelihood_kwargs = dict(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator,
priors=self.search_priors,
phase_marginalization=self.phase_marginalization,
distance_marginalization=self.distance_marginalization,
distance_marginalization_lookup_table=self.distance_marginalization_lookup_table,
time_marginalization=self.time_marginalization,
reference_frame=self.reference_frame,
time_reference=self.time_reference,
)
if getattr(self, "likelihood_lookup_table", None) is not None:
logger.info("Using internally loaded likelihood_lookup_table")
likelihood_kwargs["distance_marginalization_lookup_table"] = getattr(
self, "likelihood_lookup_table"
)
if self.likelihood_type in ["GravitationalWaveTransient", "zero"]:
Likelihood = bilby.gw.likelihood.GravitationalWaveTransient
likelihood_kwargs.update(jitter_time=self.jitter_time)
elif self.likelihood_type == "ROQGravitationalWaveTransient":
Likelihood = bilby.gw.likelihood.ROQGravitationalWaveTransient
if self.time_marginalization:
logger.warning(
"Time marginalization not implemented for "
"ROQGravitationalWaveTransient: option ignored"
)
likelihood_kwargs.pop("time_marginalization", None)
likelihood_kwargs.pop("jitter_time", None)
likelihood_kwargs.update(self.roq_likelihood_kwargs)
elif "." in self.likelihood_type:
split_path = self.likelihood_type.split(".")
module = ".".join(split_path[:-1])
likelihood_class = split_path[-1]
Likelihood = getattr(import_module(module), likelihood_class)
likelihood_kwargs.update(self.extra_likelihood_kwargs)
if "roq" in self.likelihood_type.lower():
likelihood_kwargs.pop("time_marginalization", None)
likelihood_kwargs.pop("jitter_time", None)
likelihood_kwargs.update(self.roq_likelihood_kwargs)
else:
raise ValueError("Unknown Likelihood class {}")
likelihood_kwargs = {
key: likelihood_kwargs[key]
for key in likelihood_kwargs
if key in inspect.getfullargspec(Likelihood.__init__).args
}
logger.debug(
f"Initialise likelihood {Likelihood} with kwargs: \n{likelihood_kwargs}"
)
likelihood = Likelihood(**likelihood_kwargs)
# If requested, use a zero likelihood: for testing purposes
if self.likelihood_type == "zero":
logger.info("Using a ZeroLikelihood")
likelihood = bilby.core.likelihood.ZeroLikelihood(likelihood)
return likelihood
@property
def extra_likelihood_kwargs(self):
return self._extra_likelihood_kwargs
@extra_likelihood_kwargs.setter
def extra_likelihood_kwargs(self, likelihood_kwargs):
if isinstance(likelihood_kwargs, str):
likelihood_kwargs = utils.convert_string_to_dict(likelihood_kwargs)
elif likelihood_kwargs is None:
likelihood_kwargs = dict()
elif not isinstance(likelihood_kwargs, dict):
raise TypeError(
f"Type {type(likelihood_kwargs)} not understood for likelihood kwargs."
)
forbidden_keys = [
"interferometers",
"waveform_generator",
"priors",
"distance_marginalization",
"time_marginalization",
"phase_marginalization",
"jitter_time",
"distance_marginalization_lookup_table",
"reference_frame",
"time_reference",
]
if "roq" in self.likelihood_type.lower():
forbidden_keys += ["weights", "roq_params", "roq_scale_factor"]
for key in forbidden_keys:
if key in likelihood_kwargs:
raise KeyError(
"{} should be passed through named argument not likelihood_kwargs".format(
key
)
)
self._extra_likelihood_kwargs = likelihood_kwargs
@property
def roq_likelihood_kwargs(self):
if hasattr(self, "likelihood_roq_params"):
params = self.likelihood_roq_params
else:
params = np.genfromtxt(self.roq_folder + "/params.dat", names=True)
if hasattr(self, "likelihood_roq_weights"):
weights = self.likelihood_roq_weights
else:
weights = self.meta_data["weight_file"]
logger.info(f"Loading ROQ weights from {weights}")
return dict(
weights=weights, roq_params=params, roq_scale_factor=self.roq_scale_factor
)
@property
def parameter_conversion(self):
if self.conversion_function is not None:
logger.info(
f"Using user-specified conversion_function {self.conversion_function}"
)
return get_function_from_string_path(self.conversion_function)
elif "binary_neutron_star" in self._frequency_domain_source_model:
logger.info(
"Using conversion_function convert_to_lal_binary_neutron_star_parameters"
)
return bilby.gw.conversion.convert_to_lal_binary_neutron_star_parameters
elif "binary_black_hole" in self._frequency_domain_source_model:
logger.info(
"Using conversion_function convert_to_lal_binary_black_hole_parameters"
)
return bilby.gw.conversion.convert_to_lal_binary_black_hole_parameters
else:
logger.info("No conversion function")
return None
@property
def waveform_generator(self):
waveform_arguments = self.get_default_waveform_arguments()
if "ROQ" in self.likelihood_type:
logger.info(
"Using {} likelihood with roq-folder={}".format(
self.likelihood_type, self.roq_folder
)
)
freq_nodes_linear = np.load(self.roq_folder + "/fnodes_linear.npy")
freq_nodes_quadratic = np.load(self.roq_folder + "/fnodes_quadratic.npy")
freq_nodes_linear *= self.roq_scale_factor
freq_nodes_quadratic *= self.roq_scale_factor
waveform_arguments["frequency_nodes_linear"] = freq_nodes_linear
waveform_arguments["frequency_nodes_quadratic"] = freq_nodes_quadratic
waveform_generator = self.waveform_generator_class(
frequency_domain_source_model=self.bilby_roq_frequency_domain_source_model,
sampling_frequency=self.interferometers.sampling_frequency,
duration=self.interferometers.duration,
start_time=self.interferometers.start_time,
parameter_conversion=self.parameter_conversion,
waveform_arguments=waveform_arguments,
)
else:
waveform_generator = self.waveform_generator_class(
frequency_domain_source_model=self.bilby_frequency_domain_source_model,
sampling_frequency=self.interferometers.sampling_frequency,
duration=self.interferometers.duration,
parameter_conversion=self.parameter_conversion,
start_time=self.interferometers.start_time,
waveform_arguments=waveform_arguments,
)
return waveform_generator
@property
def waveform_generator_class(self):
return self._waveform_generator_class
@waveform_generator_class.setter
def waveform_generator_class(self, class_name):
if "." in class_name:
module = ".".join(class_name.split(".")[:-1])
class_name = class_name.split(".")[-1]
else:
module = "bilby.gw.waveform_generator"
wfg_class = getattr(import_module(module), class_name, None)
if wfg_class is not None:
self._waveform_generator_class = wfg_class
else:
raise BilbyPipeError(
f"Cannot import waveform generator class {module}.{class_name}"
)
@property
def parameter_generation(self):
if self.generation_function is not None:
logger.info(f"Using user-specified generation {self.generation_function}")
return get_function_from_string_path(self.generation_function)
elif "binary_neutron_star" in self._frequency_domain_source_model:
logger.info("Using generation_function generate_all_bns_parameters")
return bilby.gw.conversion.generate_all_bns_parameters
elif "binary_black_hole" in self._frequency_domain_source_model:
logger.info("Using generation_function generate_all_bbh_parameters")
return bilby.gw.conversion.generate_all_bbh_parameters
else:
logger.info("No conversion function")
return None
@property
def summarypages_arguments(self):
return self._summarypages_arguments
@summarypages_arguments.setter
def summarypages_arguments(self, summarypages_arguments):
if summarypages_arguments is None:
self._summarypages_arguments = None
return
string = summarypages_arguments
if "{" in string and "}" in string:
self._summarypages_arguments = convert_string_to_dict(string)
else:
self._summarypages_arguments = summarypages_arguments
@property
def postprocessing_arguments(self):
return self._postprocessing_arguments
@postprocessing_arguments.setter
def postprocessing_arguments(self, postprocessing_arguments):
if postprocessing_arguments in [None, "None"]:
self._postprocessing_arguments = None
elif postprocessing_arguments == [None]:
self._postprocessing_arguments = None
elif isinstance(postprocessing_arguments, str):
self._postprocessing_arguments = postprocessing_arguments.split(" ")
else:
self._postprocessing_arguments = postprocessing_arguments
@property
def sampler(self):
return self._sampler
@sampler.setter
def sampler(self, sampler):
""" Setter for the sampler """
if not isinstance(sampler, str):
raise BilbyPipeError("Sampler must be a single string")
elif sampler in bilby.core.sampler.IMPLEMENTED_SAMPLERS:
self._sampler = sampler
else:
raise BilbyPipeError(f"Requested sampler {sampler} not implemented")
@property
def sampler_kwargs(self):
return self._sampler_kwargs
@sampler_kwargs.setter
def sampler_kwargs(self, sampler_kwargs):
if sampler_kwargs is not None:
if sampler_kwargs.lower() == "default":
self._sampler_kwargs = SAMPLER_SETTINGS["Default"]
elif sampler_kwargs.lower() == "fasttest":
self._sampler_kwargs = SAMPLER_SETTINGS["FastTest"]
else:
self._sampler_kwargs = convert_string_to_dict(
sampler_kwargs, "sampler-kwargs"
)
else:
self._sampler_kwargs = dict()
self.update_sampler_kwargs_conditional_on_request_cpus()
def update_sampler_kwargs_conditional_on_request_cpus(self):
""" If the user adds request-cpu >1, update kwargs based on the sampler """
# Keys are samplers, values are the dictionary inputs to update
parallelisation_dict = dict(
bilby_mcmc=dict(npool=self.request_cpus),
dynesty=dict(npool=self.request_cpus),
ptemcee=dict(npool=self.request_cpus),
cpnest=dict(nthreads=self.request_cpus),
)
# Only run if request_cpus > 1
if self.request_cpus > 1:
# Only update if parallelisation_dict contains the sampler
self._sampler_kwargs.update(parallelisation_dict.get(self.sampler, dict()))
def pretty_print_prior(self):
try:
prior = self._get_priors(add_time=False)
except Exception as e:
raise BilbyPipeError(
get_colored_string(f"Unable to parse prior, exception raised {e}")
)
pp = pretty_print_dictionary(prior)
logger.info(f"Input prior = {pp}")
| [
"numpy.load",
"pandas.read_csv",
"bilby.core.prior.Cosine",
"os.path.isfile",
"glob.glob",
"bilby.core.prior.Uniform",
"os.path.join",
"bilby.gw.prior.CalibrationPriorDict.from_envelope_file",
"pandas.DataFrame",
"bilby.core.prior.Sine",
"os.path.dirname",
"numpy.genfromtxt",
"numpy.loadtxt"... | [((2827, 2850), 'os.path.relpath', 'os.path.relpath', (['outdir'], {}), '(outdir)\n', (2842, 2850), False, 'import os\n'), ((2990, 3026), 'os.path.join', 'os.path.join', (['self._outdir', '"""submit"""'], {}), "(self._outdir, 'submit')\n", (3002, 3026), False, 'import os\n'), ((3693, 3748), 'os.path.join', 'os.path.join', (['self.log_directory', '"""log_data_generation"""'], {}), "(self.log_directory, 'log_data_generation')\n", (3705, 3748), False, 'import os\n'), ((3979, 4032), 'os.path.join', 'os.path.join', (['self.log_directory', '"""log_data_analysis"""'], {}), "(self.log_directory, 'log_data_analysis')\n", (3991, 4032), False, 'import os\n'), ((4258, 4310), 'os.path.join', 'os.path.join', (['self.log_directory', '"""log_results_page"""'], {}), "(self.log_directory, 'log_results_page')\n", (4270, 4310), False, 'import os\n'), ((4526, 4560), 'os.path.join', 'os.path.join', (['self._outdir', '"""data"""'], {}), "(self._outdir, 'data')\n", (4538, 4560), False, 'import os\n'), ((4780, 4816), 'os.path.join', 'os.path.join', (['self._outdir', '"""result"""'], {}), "(self._outdir, 'result')\n", (4792, 4816), False, 'import os\n'), ((6166, 6215), 'numpy.loadtxt', 'np.loadtxt', (['self.gps_file'], {'ndmin': '(2)', 'delimiter': '""","""'}), "(self.gps_file, ndmin=2, delimiter=',')\n", (6176, 6215), True, 'import numpy as np\n'), ((7575, 7615), 'numpy.loadtxt', 'np.loadtxt', (['self.timeslide_file'], {'ndmin': '(2)'}), '(self.timeslide_file, ndmin=2)\n', (7585, 7615), True, 'import numpy as np\n'), ((19383, 19428), 'pandas.DataFrame', 'pd.DataFrame', (['self._injection_dict'], {'index': '[0]'}), '(self._injection_dict, index=[0])\n', (19395, 19428), True, 'import pandas as pd\n'), ((20431, 20481), 'pandas.read_csv', 'pd.read_csv', (['injection_file'], {'delim_whitespace': '(True)'}), '(injection_file, delim_whitespace=True)\n', (20442, 20481), True, 'import pandas as pd\n'), ((26487, 26514), 'glob.glob', 'glob.glob', (['prior_files_glob'], {}), '(prior_files_glob)\n', (26496, 26514), False, 'import glob\n'), ((26673, 26730), 'os.path.dirname', 'os.path.dirname', (['self.default_prior_files[prior_file_str]'], {}), '(self.default_prior_files[prior_file_str])\n', (26688, 26730), False, 'import os\n'), ((26818, 26844), 'os.path.join', 'os.path.join', (['direc', 'fname'], {}), '(direc, fname)\n', (26830, 26844), False, 'import os\n'), ((30150, 30182), 'bilby.core.prior.__dict__.copy', 'bilby.core.prior.__dict__.copy', ([], {}), '()\n', (30180, 30182), False, 'import bilby\n'), ((35128, 35156), 'bilby.core.prior.PriorDict', 'bilby.core.prior.PriorDict', ([], {}), '()\n', (35154, 35156), False, 'import bilby\n'), ((948, 991), 'os.path.dirname', 'os.path.dirname', (['bilby.gw.detector.__file__'], {}), '(bilby.gw.detector.__file__)\n', (963, 991), False, 'import os\n'), ((1038, 1061), 'os.path.join', 'os.path.join', (['dirs', '"""*"""'], {}), "(dirs, '*')\n", (1050, 1061), False, 'import os\n'), ((5134, 5175), 'os.path.join', 'os.path.join', (['self.outdir', '"""results_page"""'], {}), "(self.outdir, 'results_page')\n", (5146, 5175), False, 'import os\n'), ((5699, 5723), 'os.path.isfile', 'os.path.isfile', (['gps_file'], {}), '(gps_file)\n', (5713, 5723), False, 'import os\n'), ((6965, 6995), 'os.path.isfile', 'os.path.isfile', (['timeslide_file'], {}), '(timeslide_file)\n', (6979, 6995), False, 'import os\n'), ((9948, 9979), 'bilby.gw.source.__dict__.keys', 'bilby.gw.source.__dict__.keys', ([], {}), '()\n', (9977, 9979), False, 'import bilby\n'), ((18423, 18453), 'os.path.isfile', 'os.path.isfile', (['injection_file'], {}), '(injection_file)\n', (18437, 18453), False, 'import os\n'), ((19919, 19982), 'json.load', 'json.load', (['file'], {'object_hook': 'bilby.core.utils.decode_bilby_json'}), '(file, object_hook=bilby.core.utils.decode_bilby_json)\n', (19928, 19982), False, 'import json\n'), ((20105, 20131), 'pandas.DataFrame', 'pd.DataFrame', (['injection_df'], {}), '(injection_df)\n', (20117, 20131), True, 'import pandas as pd\n'), ((27060, 27086), 'os.path.isfile', 'os.path.isfile', (['prior_file'], {}), '(prior_file)\n', (27074, 27086), False, 'import os\n'), ((40941, 40989), 'bilby.core.likelihood.ZeroLikelihood', 'bilby.core.likelihood.ZeroLikelihood', (['likelihood'], {}), '(likelihood)\n', (40977, 40989), False, 'import bilby\n'), ((42621, 42679), 'numpy.genfromtxt', 'np.genfromtxt', (["(self.roq_folder + '/params.dat')"], {'names': '(True)'}), "(self.roq_folder + '/params.dat', names=True)\n", (42634, 42679), True, 'import numpy as np\n'), ((44363, 44410), 'numpy.load', 'np.load', (["(self.roq_folder + '/fnodes_linear.npy')"], {}), "(self.roq_folder + '/fnodes_linear.npy')\n", (44370, 44410), True, 'import numpy as np\n'), ((44446, 44496), 'numpy.load', 'np.load', (["(self.roq_folder + '/fnodes_quadratic.npy')"], {}), "(self.roq_folder + '/fnodes_quadratic.npy')\n", (44453, 44496), True, 'import numpy as np\n'), ((46203, 46224), 'importlib.import_module', 'import_module', (['module'], {}), '(module)\n', (46216, 46224), False, 'from importlib import import_module\n'), ((5754, 5779), 'os.path.relpath', 'os.path.relpath', (['gps_file'], {}), '(gps_file)\n', (5769, 5779), False, 'import os\n'), ((7032, 7063), 'os.path.relpath', 'os.path.relpath', (['timeslide_file'], {}), '(timeslide_file)\n', (7047, 7063), False, 'import os\n'), ((11312, 11338), 'numpy.array', 'np.array', (['self._mode_array'], {}), '(self._mode_array)\n', (11320, 11338), True, 'import numpy as np\n'), ((11412, 11438), 'numpy.array', 'np.array', (['self._mode_array'], {}), '(self._mode_array)\n', (11420, 11438), True, 'import numpy as np\n'), ((11624, 11650), 'numpy.array', 'np.array', (['self._mode_array'], {}), '(self._mode_array)\n', (11632, 11650), True, 'import numpy as np\n'), ((14726, 14749), 'gwosc.datasets.event_gps', 'event_gps', (['trigger_time'], {}), '(trigger_time)\n', (14735, 14749), False, 'from gwosc.datasets import event_gps\n'), ((18490, 18521), 'os.path.relpath', 'os.path.relpath', (['injection_file'], {}), '(injection_file)\n', (18505, 18521), False, 'import os\n'), ((20282, 20319), 'pandas.DataFrame', 'pd.DataFrame', (['injection_df'], {'index': '[0]'}), '(injection_df, index=[0])\n', (20294, 20319), True, 'import pandas as pd\n'), ((26408, 26434), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (26424, 26434), False, 'import os\n'), ((33950, 34057), 'bilby.core.prior.Uniform', 'bilby.core.prior.Uniform', ([], {'minimum': '(0)', 'maximum': '(2 * np.pi)', 'latex_label': '"""$\\\\epsilon$"""', 'boundary': '"""periodic"""'}), "(minimum=0, maximum=2 * np.pi, latex_label=\n '$\\\\epsilon$', boundary='periodic')\n", (33974, 34057), False, 'import bilby\n'), ((34226, 34272), 'bilby.core.prior.Sine', 'bilby.core.prior.Sine', ([], {'latex_label': '"""$\\\\kappa$"""'}), "(latex_label='$\\\\kappa$')\n", (34247, 34272), False, 'import bilby\n'), ((26531, 26551), 'os.path.basename', 'os.path.basename', (['ff'], {}), '(ff)\n', (26547, 26551), False, 'import os\n'), ((27158, 27186), 'os.path.basename', 'os.path.basename', (['prior_file'], {}), '(prior_file)\n', (27174, 27186), False, 'import os\n'), ((27321, 27349), 'os.path.basename', 'os.path.basename', (['prior_file'], {}), '(prior_file)\n', (27337, 27349), False, 'import os\n'), ((33090, 33125), 'bilby.core.prior.Cosine', 'bilby.core.prior.Cosine', ([], {'name': '"""dec"""'}), "(name='dec')\n", (33113, 33125), False, 'import bilby\n'), ((33146, 33237), 'bilby.core.prior.Uniform', 'bilby.core.prior.Uniform', ([], {'name': '"""ra"""', 'minimum': '(0)', 'maximum': '(2 * np.pi)', 'boundary': '"""periodic"""'}), "(name='ra', minimum=0, maximum=2 * np.pi, boundary=\n 'periodic')\n", (33170, 33237), False, 'import bilby\n'), ((1079, 1099), 'os.path.basename', 'os.path.basename', (['kf'], {}), '(kf)\n', (1095, 1099), False, 'import os\n'), ((35617, 35930), 'bilby.gw.prior.CalibrationPriorDict.from_envelope_file', 'bilby.gw.prior.CalibrationPriorDict.from_envelope_file', (['self.spline_calibration_envelope_dict[det]'], {'minimum_frequency': 'self.minimum_frequency_dict[det]', 'maximum_frequency': 'self.maximum_frequency_dict[det]', 'n_nodes': 'self.spline_calibration_nodes', 'label': 'det', 'boundary': 'self.calibration_prior_boundary'}), '(self.\n spline_calibration_envelope_dict[det], minimum_frequency=self.\n minimum_frequency_dict[det], maximum_frequency=self.\n maximum_frequency_dict[det], n_nodes=self.spline_calibration_nodes,\n label=det, boundary=self.calibration_prior_boundary)\n', (35671, 35930), False, 'import bilby\n'), ((39957, 39978), 'importlib.import_module', 'import_module', (['module'], {}), '(module)\n', (39970, 39978), False, 'from importlib import import_module\n'), ((40523, 40566), 'inspect.getfullargspec', 'inspect.getfullargspec', (['Likelihood.__init__'], {}), '(Likelihood.__init__)\n', (40545, 40566), False, 'import inspect\n'), ((36600, 36979), 'bilby.gw.prior.CalibrationPriorDict.constant_uncertainty_spline', 'bilby.gw.prior.CalibrationPriorDict.constant_uncertainty_spline', ([], {'amplitude_sigma': 'self.spline_calibration_amplitude_uncertainty_dict[det]', 'phase_sigma': 'self.spline_calibration_phase_uncertainty_dict[det]', 'minimum_frequency': 'self.minimum_frequency_dict[det]', 'maximum_frequency': 'self.maximum_frequency_dict[det]', 'n_nodes': 'self.spline_calibration_nodes', 'label': 'det'}), '(amplitude_sigma\n =self.spline_calibration_amplitude_uncertainty_dict[det], phase_sigma=\n self.spline_calibration_phase_uncertainty_dict[det], minimum_frequency=\n self.minimum_frequency_dict[det], maximum_frequency=self.\n maximum_frequency_dict[det], n_nodes=self.spline_calibration_nodes,\n label=det)\n', (36663, 36979), False, 'import bilby\n')] |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Hadamard gate."""
import numpy
from qiskit.circuit.controlledgate import ControlledGate
from qiskit.circuit.gate import Gate
from qiskit.circuit.quantumregister import QuantumRegister
from qiskit.qasm import pi
from .t import TGate, TdgGate
from .s import SGate, SdgGate
class HGate(Gate):
r"""Single-qubit Hadamard gate.
This gate is a \pi rotation about the X+Z axis, and has the effect of
changing computation basis from :math:`|0\rangle,|1\rangle` to
:math:`|+\rangle,|-\rangle` and vice-versa.
**Circuit symbol:**
.. parsed-literal::
┌───┐
q_0: ┤ H ├
└───┘
**Matrix Representation:**
.. math::
H = \frac{1}{\sqrt{2}}
\begin{pmatrix}
1 & 1 \\
1 & -1
\end{pmatrix}
"""
def __init__(self, label=None):
"""Create new H gate."""
super().__init__('h', 1, [], label=label)
def _define(self):
"""
gate h a { u2(0,pi) a; }
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .u2 import U2Gate
q = QuantumRegister(1, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(U2Gate(0, pi), [q[0]], [])
]
qc._data = rules
self.definition = qc
def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None):
"""Return a (multi-)controlled-H gate.
One control qubit returns a CH gate.
Args:
num_ctrl_qubits (int): number of control qubits.
label (str or None): An optional label for the gate [Default: None]
ctrl_state (int or str or None): control state expressed as integer,
string (e.g. '110'), or None. If None, use all 1s.
Returns:
ControlledGate: controlled version of this gate.
"""
if num_ctrl_qubits == 1:
gate = CHGate(label=label, ctrl_state=ctrl_state)
gate.base_gate.label = self.label
return gate
return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label,
ctrl_state=ctrl_state)
def inverse(self):
r"""Return inverted H gate (itself)."""
return HGate() # self-inverse
def to_matrix(self):
"""Return a Numpy.array for the H gate."""
return numpy.array([[1, 1],
[1, -1]], dtype=complex) / numpy.sqrt(2)
class CHGate(ControlledGate):
r"""Controlled-Hadamard gate.
Applies a Hadamard on the target qubit if the control is
in the :math:`|1\rangle` state.
**Circuit symbol:**
q_0: ──■──
┌─┴─┐
q_1: ┤ H ├
└───┘
**Matrix Representation:**
.. math::
CH\ q_0, q_1 =
I \otimes |0\rangle\langle 0| + H \otimes |1\rangle\langle 1| =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & \frac{1}{\sqrt{2}} & 0 & \frac{1}{\sqrt{2}} \\
0 & 0 & 1 & 0 \\
0 & \frac{1}{\sqrt{2}} & 0 & -\frac{1}{\sqrt{2}}
\end{pmatrix}
.. note::
In Qiskit's convention, higher qubit indices are more significant
(little endian convention). In many textbooks, controlled gates are
presented with the assumption of more significant qubits as control,
which in our case would be q_1. Thus a textbook matrix for this
gate will be:
.. parsed-literal::
┌───┐
q_0: ┤ H ├
└─┬─┘
q_1: ──■──
.. math::
CH\ q_1, q_0 =
|0\rangle\langle 0| \otimes I + |1\rangle\langle 1| \otimes H =
\frac{1}{\sqrt{2}}
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 1 & 1 \\
0 & 0 & 1 & -1
\end{pmatrix}
"""
# Define class constants. This saves future allocation time.
_sqrt2o2 = 1 / numpy.sqrt(2)
_matrix1 = numpy.array([[1, 0, 0, 0],
[0, _sqrt2o2, 0, _sqrt2o2],
[0, 0, 1, 0],
[0, _sqrt2o2, 0, -_sqrt2o2]],
dtype=complex)
_matrix0 = numpy.array([[_sqrt2o2, 0, _sqrt2o2, 0],
[0, 1, 0, 0],
[_sqrt2o2, 0, -_sqrt2o2, 0],
[0, 0, 0, 1]],
dtype=complex)
def __init__(self, label=None, ctrl_state=None):
"""Create new CH gate."""
super().__init__('ch', 2, [], num_ctrl_qubits=1, label=label,
ctrl_state=ctrl_state)
self.base_gate = HGate()
def _define(self):
"""
gate ch a,b {
s b;
h b;
t b;
cx a, b;
tdg b;
h b;
sdg b;
}
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .x import CXGate # pylint: disable=cyclic-import
q = QuantumRegister(2, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(SGate(), [q[1]], []),
(HGate(), [q[1]], []),
(TGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(TdgGate(), [q[1]], []),
(HGate(), [q[1]], []),
(SdgGate(), [q[1]], [])
]
qc._data = rules
self.definition = qc
def inverse(self):
"""Return inverted CH gate (itself)."""
return CHGate() # self-inverse
def to_matrix(self):
"""Return a numpy.array for the CH gate."""
if self.ctrl_state:
return self._matrix1
else:
return self._matrix0
| [
"qiskit.circuit.quantumregister.QuantumRegister",
"numpy.array",
"qiskit.circuit.quantumcircuit.QuantumCircuit",
"numpy.sqrt"
] | [((4627, 4744), 'numpy.array', 'numpy.array', (['[[1, 0, 0, 0], [0, _sqrt2o2, 0, _sqrt2o2], [0, 0, 1, 0], [0, _sqrt2o2, 0, -\n _sqrt2o2]]'], {'dtype': 'complex'}), '([[1, 0, 0, 0], [0, _sqrt2o2, 0, _sqrt2o2], [0, 0, 1, 0], [0,\n _sqrt2o2, 0, -_sqrt2o2]], dtype=complex)\n', (4638, 4744), False, 'import numpy\n'), ((4867, 4985), 'numpy.array', 'numpy.array', (['[[_sqrt2o2, 0, _sqrt2o2, 0], [0, 1, 0, 0], [_sqrt2o2, 0, -_sqrt2o2, 0], [0,\n 0, 0, 1]]'], {'dtype': 'complex'}), '([[_sqrt2o2, 0, _sqrt2o2, 0], [0, 1, 0, 0], [_sqrt2o2, 0, -\n _sqrt2o2, 0], [0, 0, 0, 1]], dtype=complex)\n', (4878, 4985), False, 'import numpy\n'), ((1671, 1694), 'qiskit.circuit.quantumregister.QuantumRegister', 'QuantumRegister', (['(1)', '"""q"""'], {}), "(1, 'q')\n", (1686, 1694), False, 'from qiskit.circuit.quantumregister import QuantumRegister\n'), ((1708, 1741), 'qiskit.circuit.quantumcircuit.QuantumCircuit', 'QuantumCircuit', (['q'], {'name': 'self.name'}), '(q, name=self.name)\n', (1722, 1741), False, 'from qiskit.circuit.quantumcircuit import QuantumCircuit\n'), ((4598, 4611), 'numpy.sqrt', 'numpy.sqrt', (['(2)'], {}), '(2)\n', (4608, 4611), False, 'import numpy\n'), ((5718, 5741), 'qiskit.circuit.quantumregister.QuantumRegister', 'QuantumRegister', (['(2)', '"""q"""'], {}), "(2, 'q')\n", (5733, 5741), False, 'from qiskit.circuit.quantumregister import QuantumRegister\n'), ((5755, 5788), 'qiskit.circuit.quantumcircuit.QuantumCircuit', 'QuantumCircuit', (['q'], {'name': 'self.name'}), '(q, name=self.name)\n', (5769, 5788), False, 'from qiskit.circuit.quantumcircuit import QuantumCircuit\n'), ((2923, 2968), 'numpy.array', 'numpy.array', (['[[1, 1], [1, -1]]'], {'dtype': 'complex'}), '([[1, 1], [1, -1]], dtype=complex)\n', (2934, 2968), False, 'import numpy\n'), ((2999, 3012), 'numpy.sqrt', 'numpy.sqrt', (['(2)'], {}), '(2)\n', (3009, 3012), False, 'import numpy\n')] |
from astropy.io import fits
import pandas as pd
import numpy as np
from os.path import expanduser
from random import choice as choose_random_item
import os
from lightkurve import KeplerTargetPixelFile
from lightkurve.mast import ArchiveError
#load KeplerTargetPixelFile
# flatten with k2SFF i.e. create LightCurveFile
# transform to FlareLightCurveFile
# define methods
def Get(mode, file='', objectid='', win_size=3):
'''
Call a get function depending on the mode,
processes loading steps common to all modes.
Generates error from short time median scatter
if not given elsewhere.
Parameters:
------------
mode: str
type of light curve, e.g. EVEREST LC, Vanderburg LC,
raw MAST .fits file, random K2
file: '' or str
lightcurve file location
win_size: 3 or int
window size for scatter generator
Returns:
------------
lc: pandas DataFrame
light curve
'''
def GetObjectID(mode):
if mode == 'kplr':
return str(int( file[file.find('kplr')+4:file.find('-')]))
elif mode == 'ktwo':
return str(int( file[file.find('ktwo')+4:file.find('-')] ))
elif mode == 'vdb':
str(int(file[file.find('lightcurve_')+11:file.find('-')]))
elif mode == 'everest':
return str(int(file[file.find('everest')+15:file.find('-')]))
elif mode == 'k2sc':
return str(int(file[file.find('k2sc')+12:file.find('-')]))
elif mode == 'txt':
return file[0:3]
elif mode == 'csv':
return '0000'
elif mode == 'random':
return 'random'
def GetOutDir(mode):
home = expanduser("~")
outdir = '{}/research/appaloosa/aprun/{}/'.format(home,mode)
if not os.path.isdir(outdir):
try:
os.makedirs(outdir)
except OSError:
pass
return outdir
def GetOutfile(mode,file='random'):
if mode == 'everest':
return GetOutDir(mode) + file[file.find('everest')+15:]
elif mode == 'k2sc':
return GetOutDir(mode) + file[file.find('k2sc')+12:]
elif mode == 'kplr':
return GetOutDir(mode) + file[file.find('kplr')+4:]
elif mode in ('vdb','csv'):
return GetOutDir(mode) + file[file.find('lightcurve_')+11:]
elif mode in ('ktwo'):
return GetOutDir(mode) + file[file.find('ktwo')+4:]
elif mode in ('txt','random','test'):
return GetOutDir(mode) + file[-6:]
modes = {'kplr': GetLCfits,
'ktwo': GetLCfits,
'vdb': GetLCvdb,
'everest': GetLCeverest,
'k2sc': GetLCk2sc,
'txt': GetLCtxt,
'csv': GetLCvdb,
'random':GetLClightkurve}
if mode == 'test':
lc = pd.read_csv('test_suite/test/testlc.csv').dropna(how='any')
else:
lc = modes[mode](file=file).dropna(how='any')
t = lc.time.values
dt = np.nanmedian(t[1:] - t[0:-1])
if (dt < 0.01):
dtime = 54.2 / 60. / 60. / 24.
else:
dtime = 30 * 54.2 / 60. / 60. / 24.
lc['exptime'] = dtime
lc['qtr'] = 0
if 'flags' not in lc.columns:
lc['flags'] = 0
if 'error' not in lc.columns:
lc['error'] = np.nanmedian(lc.flux_raw.rolling(win_size, center=True).std())
print(GetOutfile(mode, file=file))
return GetOutfile(mode, file=file), GetObjectID(mode), lc
def GetLCfits(file=''):
'''
Parameters
----------
file : light curve file location for a MAST archive .fits file
Returns
-------
lc: light curve DataFrame with columns [time, quality, flux_raw, error]
'''
hdu = fits.open(file)
data_rec = hdu[1].data
lc = pd.DataFrame({'time':data_rec['TIME'].byteswap().newbyteorder(),
'flux_raw':data_rec['SAP_FLUX'].byteswap().newbyteorder(),
'error':data_rec['SAP_FLUX_ERR'].byteswap().newbyteorder(),
'flags':data_rec['SAP_QUALITY'].byteswap().newbyteorder()})
return lc
def GetLCvdb(file=''):
'''
Parameters
----------
file : light curve file location for a Vanderburg de-trended .txt file
Returns
-------
lc: light curve DataFrame with columns [time, flux_raw]
'''
lc = pd.read_csv(file,index_col=False)
lc.rename(index=str,
columns={'BJD - 2454833': 'time',' Corrected Flux':'flux_raw'},
inplace=True,
)
return lc
def GetLCeverest(file=''):
'''
Parameters
----------
file : light curve file location for a Vanderburg de-trended .txt file
Returns
-------
lc: light curve DataFrame with columns [time, flux_raw]
'''
hdu = fits.open(file)
data_rec = hdu[1].data
lc = pd.DataFrame({'time':np.array(data_rec['TIME']).byteswap().newbyteorder(),
'flux_raw':np.array(data_rec['FLUX']).byteswap().newbyteorder(),})
#keep the outliers... for now
#lc['quality'] = data_rec['OUTLIER'].byteswap().newbyteorder()
return lc
def GetLCk2sc(file=''):
'''
Parameters
----------
file : light curve file location for a Vanderburg de-trended .txt file
Returns
-------
lc: light curve DataFrame with columns [time, flux_raw]
'''
hdu = fits.open(file)
data_rec = hdu[1].data
lc = pd.DataFrame({'time':np.array(data_rec['time']).byteswap().newbyteorder(),
'flux_raw':np.array(data_rec['flux']).byteswap().newbyteorder(),})
#'error':np.array(data_rec['error']).byteswap().newbyteorder(),})
hdu.close()
del data_rec
#keep the outliers... for now
#lc['quality'] = data_rec['OUTLIER'].byteswap().newbyteorder()
return lc
def GetLCtxt(file=''):
'''
Parameters
----------
file : '' or
light curve file location for a basic .txt file
Returns
-------
lc: light curve DataFrame with columns [time, flux_raw, error]
'''
lc = pd.read_csv(file,
index_col=False,
usecols=(0,1,2),
skiprows=1,
header = None,
names = ['time','flux_raw','error'])
return lc
def GetLClightkurve(file='',**kwargs):
'''
Construct a light curve from either
- a local KeplerTargetPixelFile, or
- a random K2 KeplerTargetPixelFile from the archive
using the lightkurve built-in correct function.
Parameters
----------
file : '' or str
light curve file path. Default will download random file from archive.
**kwargs : dict
Keyword arguments to that will be passed to the KeplerTargetPixelFile
constructor.
Returns
-------
lc: pandas DataFrame
light curve with columns ['time', 'flux_raw', 'error']
'''
if file == '':
print('Choose a random LC from the archives...')
idlist = pd.read_csv('stars_shortlist/share/helpers/GO_all_campaigns_to_date.csv',
usecols=['EPIC ID'])
ID = choose_random_item(idlist['EPIC ID'].values)
tpf = None
try:
tpf = KeplerTargetPixelFile.from_archive(ID, cadence='long')
except ArchiveError:
print('EPIC {} was observed during several campaigns.'
'\nChoose the earliest available.'.format(ID))
C = 0
while C < 20:
print(C)
try:
tpf = KeplerTargetPixelFile.from_archive(ID, cadence='long',
campaign=C)
except ArchiveError:
C += 1
pass
if tpf != None:
break
else:
tpf = KeplerTargetPixelFile(file, quality_bitmask='default')
lc = tpf.to_lightcurve(method='aperture')
lc = lc.correct(windows=20)
LC = pd.DataFrame({'flux_raw': lc.flux,
'time':np.copy(lc.time).byteswap().newbyteorder(),
'error':lc.flux_err,
'flags':np.copy(lc.quality).byteswap().newbyteorder(),
})
return LC
# UNUSED, UNTESTED, DELETE?
# def OneCadence(data):
# '''
# Within each quarter of data from the database, pick the data with the
# fastest cadence. We want to study 1-min if available. Don't want
# multiple cadence observations in the same quarter, bad for detrending.
#
# Parameters
# ----------
# data : numpy array
# the result from MySQL database query, using the getLC() function
#
# Returns
# -------
# Data array
#
# '''
# # get the unique quarters
# qtr = data[:,0]
# cadence = data[:,5]
# uQtr = np.unique(qtr)
#
# indx = []
#
# # for each quarter, is there more than one cadence?
# for q in uQtr:
# x = np.where( (np.abs(qtr-q) < 0.1) )
#
# etimes = np.unique(cadence[x])
# y = np.where( (cadence[x] == min(etimes)) )
#
# indx = np.append(indx, x[0][y])
#
# indx = np.array(indx, dtype='int')
#
# data_out = data[indx,:]
# return data_out
#
| [
"lightkurve.KeplerTargetPixelFile",
"numpy.nanmedian",
"os.makedirs",
"pandas.read_csv",
"os.path.isdir",
"numpy.copy",
"random.choice",
"lightkurve.KeplerTargetPixelFile.from_archive",
"numpy.array",
"astropy.io.fits.open",
"os.path.expanduser"
] | [((3035, 3064), 'numpy.nanmedian', 'np.nanmedian', (['(t[1:] - t[0:-1])'], {}), '(t[1:] - t[0:-1])\n', (3047, 3064), True, 'import numpy as np\n'), ((3754, 3769), 'astropy.io.fits.open', 'fits.open', (['file'], {}), '(file)\n', (3763, 3769), False, 'from astropy.io import fits\n'), ((4374, 4408), 'pandas.read_csv', 'pd.read_csv', (['file'], {'index_col': '(False)'}), '(file, index_col=False)\n', (4385, 4408), True, 'import pandas as pd\n'), ((4816, 4831), 'astropy.io.fits.open', 'fits.open', (['file'], {}), '(file)\n', (4825, 4831), False, 'from astropy.io import fits\n'), ((5393, 5408), 'astropy.io.fits.open', 'fits.open', (['file'], {}), '(file)\n', (5402, 5408), False, 'from astropy.io import fits\n'), ((6088, 6208), 'pandas.read_csv', 'pd.read_csv', (['file'], {'index_col': '(False)', 'usecols': '(0, 1, 2)', 'skiprows': '(1)', 'header': 'None', 'names': "['time', 'flux_raw', 'error']"}), "(file, index_col=False, usecols=(0, 1, 2), skiprows=1, header=\n None, names=['time', 'flux_raw', 'error'])\n", (6099, 6208), True, 'import pandas as pd\n'), ((1702, 1717), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (1712, 1717), False, 'from os.path import expanduser\n'), ((7026, 7124), 'pandas.read_csv', 'pd.read_csv', (['"""stars_shortlist/share/helpers/GO_all_campaigns_to_date.csv"""'], {'usecols': "['EPIC ID']"}), "('stars_shortlist/share/helpers/GO_all_campaigns_to_date.csv',\n usecols=['EPIC ID'])\n", (7037, 7124), True, 'import pandas as pd\n'), ((7165, 7209), 'random.choice', 'choose_random_item', (["idlist['EPIC ID'].values"], {}), "(idlist['EPIC ID'].values)\n", (7183, 7209), True, 'from random import choice as choose_random_item\n'), ((7891, 7945), 'lightkurve.KeplerTargetPixelFile', 'KeplerTargetPixelFile', (['file'], {'quality_bitmask': '"""default"""'}), "(file, quality_bitmask='default')\n", (7912, 7945), False, 'from lightkurve import KeplerTargetPixelFile\n'), ((1802, 1823), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (1815, 1823), False, 'import os\n'), ((7260, 7314), 'lightkurve.KeplerTargetPixelFile.from_archive', 'KeplerTargetPixelFile.from_archive', (['ID'], {'cadence': '"""long"""'}), "(ID, cadence='long')\n", (7294, 7314), False, 'from lightkurve import KeplerTargetPixelFile\n'), ((1858, 1877), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (1869, 1877), False, 'import os\n'), ((2878, 2919), 'pandas.read_csv', 'pd.read_csv', (['"""test_suite/test/testlc.csv"""'], {}), "('test_suite/test/testlc.csv')\n", (2889, 2919), True, 'import pandas as pd\n'), ((7592, 7658), 'lightkurve.KeplerTargetPixelFile.from_archive', 'KeplerTargetPixelFile.from_archive', (['ID'], {'cadence': '"""long"""', 'campaign': 'C'}), "(ID, cadence='long', campaign=C)\n", (7626, 7658), False, 'from lightkurve import KeplerTargetPixelFile\n'), ((4889, 4915), 'numpy.array', 'np.array', (["data_rec['TIME']"], {}), "(data_rec['TIME'])\n", (4897, 4915), True, 'import numpy as np\n'), ((4976, 5002), 'numpy.array', 'np.array', (["data_rec['FLUX']"], {}), "(data_rec['FLUX'])\n", (4984, 5002), True, 'import numpy as np\n'), ((5467, 5493), 'numpy.array', 'np.array', (["data_rec['time']"], {}), "(data_rec['time'])\n", (5475, 5493), True, 'import numpy as np\n'), ((5554, 5580), 'numpy.array', 'np.array', (["data_rec['flux']"], {}), "(data_rec['flux'])\n", (5562, 5580), True, 'import numpy as np\n'), ((8100, 8116), 'numpy.copy', 'np.copy', (['lc.time'], {}), '(lc.time)\n', (8107, 8116), True, 'import numpy as np\n'), ((8221, 8240), 'numpy.copy', 'np.copy', (['lc.quality'], {}), '(lc.quality)\n', (8228, 8240), True, 'import numpy as np\n')] |
# pylint: disable=arguments-differ
# pylint: disable=unused-argument
# pylint: disable=abstract-method
import math
import os
from argparse import ArgumentParser
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torchtext.datasets as td
from pytorch_lightning.callbacks import (
EarlyStopping,
ModelCheckpoint,
LearningRateMonitor,
)
from sklearn.datasets import fetch_20newsgroups
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataset import IterDataPipe, random_split
from torchtext.data.functional import to_map_style_dataset
from torchtext.datasets import AG_NEWS
from transformers import BertModel, BertTokenizer, AdamW
import mlflow.pytorch
def get_20newsgroups(num_samples):
categories = ["alt.atheism", "talk.religion.misc", "comp.graphics", "sci.space"]
X, y = fetch_20newsgroups(subset="train", categories=categories, return_X_y=True)
return pd.DataFrame(data=X, columns=["description"]).assign(label=y).sample(n=num_samples)
def get_ag_news(num_samples):
# reading the input
td.AG_NEWS(root="data", split=("train", "test"))
train_csv_path = "data/AG_NEWS/train.csv"
return (
pd.read_csv(train_csv_path, usecols=[0, 2], names=["label", "description"])
.assign(label=lambda df: df["label"] - 1) # make labels zero-based
.sample(n=num_samples)
)
class NewsDataset(IterDataPipe):
def __init__(self, tokenizer, source, max_length, num_samples, dataset="20newsgroups"):
"""
Custom Dataset - Converts the input text and label to tensor
:param tokenizer: bert tokenizer
:param source: data source - Either a dataframe or DataPipe
:param max_length: maximum length of the news text
:param num_samples: number of samples to load
:param dataset: Dataset type - 20newsgroups or ag_news
"""
super(NewsDataset, self).__init__()
self.source = source
self.start = 0
self.tokenizer = tokenizer
self.max_length = max_length
self.dataset = dataset
self.end = num_samples
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
iter_start = self.start
iter_end = self.end
else:
per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
worker_id = worker_info.id
iter_start = self.start + worker_id * per_worker
iter_end = min(iter_start + per_worker, self.end)
for idx in range(iter_start, iter_end):
if self.dataset == "20newsgroups":
review = str(self.source["description"].iloc[idx])
target = int(self.source["label"].iloc[idx])
else:
target, review = self.source[idx]
target -= 1
encoding = self.tokenizer.encode_plus(
review,
add_special_tokens=True,
max_length=self.max_length,
return_token_type_ids=False,
padding="max_length",
return_attention_mask=True,
return_tensors="pt",
truncation=True,
)
yield {
"review_text": review,
"input_ids": encoding["input_ids"].flatten(),
"attention_mask": encoding["attention_mask"].flatten(),
"targets": torch.tensor(target, dtype=torch.long),
}
class BertDataModule(pl.LightningDataModule):
def __init__(self, **kwargs):
"""
Initialization of inherited lightning data module
"""
super(BertDataModule, self).__init__()
self.PRE_TRAINED_MODEL_NAME = "bert-base-uncased"
self.train_dataset = None
self.val_dataset = None
self.test_dataset = None
self.MAX_LEN = 100
self.encoding = None
self.tokenizer = None
self.args = kwargs
self.dataset = self.args["dataset"]
self.train_count = None
self.val_count = None
self.test_count = None
self.RANDOM_SEED = 42
self.news_group_df = None
def prepare_data(self):
"""
Downloads the ag_news or 20newsgroup dataset and initializes bert tokenizer
"""
np.random.seed(self.RANDOM_SEED)
torch.manual_seed(self.RANDOM_SEED)
if self.dataset == "20newsgroups":
num_samples = self.args["num_samples"]
self.news_group_df = (
get_20newsgroups(num_samples)
if self.args["dataset"] == "20newsgroups"
else get_ag_news(num_samples)
)
else:
train_iter, test_iter = AG_NEWS()
self.train_dataset = to_map_style_dataset(train_iter)
self.test_dataset = to_map_style_dataset(test_iter)
self.tokenizer = BertTokenizer.from_pretrained(self.PRE_TRAINED_MODEL_NAME)
def setup(self, stage=None):
"""
Split the data into train, test, validation data
:param stage: Stage - training or testing
"""
if stage == "fit":
if self.dataset == "20newsgroups":
self.train_dataset, self.test_dataset = train_test_split(
self.news_group_df,
test_size=0.3,
random_state=self.RANDOM_SEED,
stratify=self.news_group_df["label"],
)
self.val_dataset, self.test_dataset = train_test_split(
self.test_dataset,
test_size=0.5,
random_state=self.RANDOM_SEED,
stratify=self.test_dataset["label"],
)
self.train_count = len(self.train_dataset)
self.val_count = len(self.val_dataset)
self.test_count = len(self.test_dataset)
else:
num_train = int(len(self.train_dataset) * 0.95)
self.train_dataset, self.val_dataset = random_split(
self.train_dataset, [num_train, len(self.train_dataset) - num_train]
)
self.train_count = self.args.get("num_samples")
self.val_count = int(self.train_count / 10)
self.test_count = int(self.train_count / 10)
self.train_count = self.train_count - (self.val_count + self.test_count)
print("Number of samples used for training: {}".format(self.train_count))
print("Number of samples used for validation: {}".format(self.val_count))
print("Number of samples used for test: {}".format(self.test_count))
@staticmethod
def add_model_specific_args(parent_parser):
"""
Returns the review text and the targets of the specified item
:param parent_parser: Application specific parser
:return: Returns the augmented arugument parser
"""
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument(
"--batch_size",
type=int,
default=16,
metavar="N",
help="input batch size for training (default: 16)",
)
parser.add_argument(
"--num_workers",
type=int,
default=3,
metavar="N",
help="number of workers (default: 3)",
)
return parser
def create_data_loader(self, source, count):
"""
Generic data loader function
:param df: Input dataframe
:param tokenizer: bert tokenizer
:return: Returns the constructed dataloader
"""
ds = NewsDataset(
source=source,
tokenizer=self.tokenizer,
max_length=self.MAX_LEN,
num_samples=count,
dataset=self.dataset,
)
return DataLoader(
ds, batch_size=self.args["batch_size"], num_workers=self.args["num_workers"]
)
def train_dataloader(self):
"""
:return: output - Train data loader for the given input
"""
return self.create_data_loader(source=self.train_dataset, count=self.train_count)
def val_dataloader(self):
"""
:return: output - Validation data loader for the given input
"""
return self.create_data_loader(source=self.val_dataset, count=self.val_count)
def test_dataloader(self):
"""
:return: output - Test data loader for the given input
"""
return self.create_data_loader(source=self.test_dataset, count=self.test_count)
class BertNewsClassifier(pl.LightningModule):
def __init__(self, **kwargs):
"""
Initializes the network, optimizer and scheduler
"""
super(BertNewsClassifier, self).__init__()
self.PRE_TRAINED_MODEL_NAME = "bert-base-uncased"
self.bert_model = BertModel.from_pretrained(self.PRE_TRAINED_MODEL_NAME)
for param in self.bert_model.parameters():
param.requires_grad = False
self.drop = nn.Dropout(p=0.2)
# assigning labels
self.class_names = (
["alt.atheism", "talk.religion.misc", "comp.graphics", "sci.space"]
if kwargs["dataset"] == "20newsgroups"
else ["world", "Sports", "Business", "Sci/Tech"]
)
n_classes = len(self.class_names)
self.fc1 = nn.Linear(self.bert_model.config.hidden_size, 512)
self.out = nn.Linear(512, n_classes)
self.scheduler = None
self.optimizer = None
self.args = kwargs
def forward(self, input_ids, attention_mask):
"""
:param input_ids: Input data
:param attention_maks: Attention mask value
:return: output - Type of news for the given news snippet
"""
output = self.bert_model(input_ids=input_ids, attention_mask=attention_mask)
output = F.relu(self.fc1(output.pooler_output))
output = self.drop(output)
output = self.out(output)
return output
@staticmethod
def add_model_specific_args(parent_parser):
"""
Returns the review text and the targets of the specified item
:param parent_parser: Application specific parser
:return: Returns the augmented arugument parser
"""
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument(
"--lr",
type=float,
default=0.001,
metavar="LR",
help="learning rate (default: 0.001)",
)
return parser
def training_step(self, train_batch, batch_idx):
"""
Training the data as batches and returns training loss on each batch
:param train_batch Batch data
:param batch_idx: Batch indices
:return: output - Training loss
"""
input_ids = train_batch["input_ids"].to(self.device)
attention_mask = train_batch["attention_mask"].to(self.device)
targets = train_batch["targets"].to(self.device)
output = self.forward(input_ids, attention_mask)
loss = F.cross_entropy(output, targets)
self.log("train_loss", loss)
return {"loss": loss}
def test_step(self, test_batch, batch_idx):
"""
Performs test and computes the accuracy of the model
:param test_batch: Batch data
:param batch_idx: Batch indices
:return: output - Testing accuracy
"""
input_ids = test_batch["input_ids"].to(self.device)
attention_mask = test_batch["attention_mask"].to(self.device)
targets = test_batch["targets"].to(self.device)
output = self.forward(input_ids, attention_mask)
_, y_hat = torch.max(output, dim=1)
test_acc = accuracy_score(y_hat.cpu(), targets.cpu())
return {"test_acc": torch.tensor(test_acc)}
def validation_step(self, val_batch, batch_idx):
"""
Performs validation of data in batches
:param val_batch: Batch data
:param batch_idx: Batch indices
:return: output - valid step loss
"""
input_ids = val_batch["input_ids"].to(self.device)
attention_mask = val_batch["attention_mask"].to(self.device)
targets = val_batch["targets"].to(self.device)
output = self.forward(input_ids, attention_mask)
loss = F.cross_entropy(output, targets)
return {"val_step_loss": loss}
def validation_epoch_end(self, outputs):
"""
Computes average validation accuracy
:param outputs: outputs after every epoch end
:return: output - average valid loss
"""
avg_loss = torch.stack([x["val_step_loss"] for x in outputs]).mean()
self.log("val_loss", avg_loss, sync_dist=True)
def test_epoch_end(self, outputs):
"""
Computes average test accuracy score
:param outputs: outputs after every epoch end
:return: output - average test loss
"""
avg_test_acc = torch.stack([x["test_acc"] for x in outputs]).mean()
self.log("avg_test_acc", avg_test_acc)
def configure_optimizers(self):
"""
Initializes the optimizer and learning rate scheduler
:return: output - Initialized optimizer and scheduler
"""
self.optimizer = AdamW(self.parameters(), lr=self.args["lr"])
self.scheduler = {
"scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer,
mode="min",
factor=0.2,
patience=2,
min_lr=1e-6,
verbose=True,
),
"monitor": "val_loss",
}
return [self.optimizer], [self.scheduler]
if __name__ == "__main__":
parser = ArgumentParser(description="Bert-News Classifier Example")
parser.add_argument(
"--num_samples",
type=int,
default=2000,
metavar="N",
help="Number of samples to be used for training and evaluation steps (default: 15000) Maximum:100000",
)
parser.add_argument(
"--dataset",
default="20newsgroups",
metavar="DATASET",
help="Dataset to use",
choices=["20newsgroups", "ag_news"],
)
parser = pl.Trainer.add_argparse_args(parent_parser=parser)
parser = BertNewsClassifier.add_model_specific_args(parent_parser=parser)
parser = BertDataModule.add_model_specific_args(parent_parser=parser)
mlflow.pytorch.autolog()
args = parser.parse_args()
dict_args = vars(args)
if "accelerator" in dict_args:
if dict_args["accelerator"] == "None":
dict_args["accelerator"] = None
dm = BertDataModule(**dict_args)
model = BertNewsClassifier(**dict_args)
early_stopping = EarlyStopping(monitor="val_loss", mode="min", verbose=True)
checkpoint_callback = ModelCheckpoint(
dirpath=os.getcwd(),
save_top_k=1,
verbose=True,
monitor="val_loss",
mode="min",
)
lr_logger = LearningRateMonitor()
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=[lr_logger, early_stopping, checkpoint_callback],
enable_checkpointing=True,
)
trainer.fit(model, dm)
trainer.test(model, datamodule=dm)
| [
"torch.nn.Dropout",
"numpy.random.seed",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"pytorch_lightning.Trainer.add_argparse_args",
"torch.utils.data.DataLoader",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torchtext.datasets.AG_... | [((1008, 1082), 'sklearn.datasets.fetch_20newsgroups', 'fetch_20newsgroups', ([], {'subset': '"""train"""', 'categories': 'categories', 'return_X_y': '(True)'}), "(subset='train', categories=categories, return_X_y=True)\n", (1026, 1082), False, 'from sklearn.datasets import fetch_20newsgroups\n'), ((1238, 1286), 'torchtext.datasets.AG_NEWS', 'td.AG_NEWS', ([], {'root': '"""data"""', 'split': "('train', 'test')"}), "(root='data', split=('train', 'test'))\n", (1248, 1286), True, 'import torchtext.datasets as td\n'), ((14119, 14177), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Bert-News Classifier Example"""'}), "(description='Bert-News Classifier Example')\n", (14133, 14177), False, 'from argparse import ArgumentParser\n'), ((14607, 14657), 'pytorch_lightning.Trainer.add_argparse_args', 'pl.Trainer.add_argparse_args', ([], {'parent_parser': 'parser'}), '(parent_parser=parser)\n', (14635, 14657), True, 'import pytorch_lightning as pl\n'), ((15130, 15189), 'pytorch_lightning.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(True)'}), "(monitor='val_loss', mode='min', verbose=True)\n", (15143, 15189), False, 'from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, LearningRateMonitor\n'), ((15378, 15399), 'pytorch_lightning.callbacks.LearningRateMonitor', 'LearningRateMonitor', ([], {}), '()\n', (15397, 15399), False, 'from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, LearningRateMonitor\n'), ((15415, 15541), 'pytorch_lightning.Trainer.from_argparse_args', 'pl.Trainer.from_argparse_args', (['args'], {'callbacks': '[lr_logger, early_stopping, checkpoint_callback]', 'enable_checkpointing': '(True)'}), '(args, callbacks=[lr_logger, early_stopping,\n checkpoint_callback], enable_checkpointing=True)\n', (15444, 15541), True, 'import pytorch_lightning as pl\n'), ((2325, 2359), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (2357, 2359), False, 'import torch\n'), ((4527, 4559), 'numpy.random.seed', 'np.random.seed', (['self.RANDOM_SEED'], {}), '(self.RANDOM_SEED)\n', (4541, 4559), True, 'import numpy as np\n'), ((4568, 4603), 'torch.manual_seed', 'torch.manual_seed', (['self.RANDOM_SEED'], {}), '(self.RANDOM_SEED)\n', (4585, 4603), False, 'import torch\n'), ((5114, 5172), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['self.PRE_TRAINED_MODEL_NAME'], {}), '(self.PRE_TRAINED_MODEL_NAME)\n', (5143, 5172), False, 'from transformers import BertModel, BertTokenizer, AdamW\n'), ((7215, 7270), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'parents': '[parent_parser]', 'add_help': '(False)'}), '(parents=[parent_parser], add_help=False)\n', (7229, 7270), False, 'from argparse import ArgumentParser\n'), ((8145, 8238), 'torch.utils.data.DataLoader', 'DataLoader', (['ds'], {'batch_size': "self.args['batch_size']", 'num_workers': "self.args['num_workers']"}), "(ds, batch_size=self.args['batch_size'], num_workers=self.args[\n 'num_workers'])\n", (8155, 8238), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((9182, 9236), 'transformers.BertModel.from_pretrained', 'BertModel.from_pretrained', (['self.PRE_TRAINED_MODEL_NAME'], {}), '(self.PRE_TRAINED_MODEL_NAME)\n', (9207, 9236), False, 'from transformers import BertModel, BertTokenizer, AdamW\n'), ((9348, 9365), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.2)'}), '(p=0.2)\n', (9358, 9365), False, 'from torch import nn\n'), ((9686, 9736), 'torch.nn.Linear', 'nn.Linear', (['self.bert_model.config.hidden_size', '(512)'], {}), '(self.bert_model.config.hidden_size, 512)\n', (9695, 9736), False, 'from torch import nn\n'), ((9756, 9781), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'n_classes'], {}), '(512, n_classes)\n', (9765, 9781), False, 'from torch import nn\n'), ((10627, 10682), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'parents': '[parent_parser]', 'add_help': '(False)'}), '(parents=[parent_parser], add_help=False)\n', (10641, 10682), False, 'from argparse import ArgumentParser\n'), ((11428, 11460), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'targets'], {}), '(output, targets)\n', (11443, 11460), True, 'import torch.nn.functional as F\n'), ((12047, 12071), 'torch.max', 'torch.max', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (12056, 12071), False, 'import torch\n'), ((12688, 12720), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'targets'], {}), '(output, targets)\n', (12703, 12720), True, 'import torch.nn.functional as F\n'), ((4948, 4957), 'torchtext.datasets.AG_NEWS', 'AG_NEWS', ([], {}), '()\n', (4955, 4957), False, 'from torchtext.datasets import AG_NEWS\n'), ((4991, 5023), 'torchtext.data.functional.to_map_style_dataset', 'to_map_style_dataset', (['train_iter'], {}), '(train_iter)\n', (5011, 5023), False, 'from torchtext.data.functional import to_map_style_dataset\n'), ((5056, 5087), 'torchtext.data.functional.to_map_style_dataset', 'to_map_style_dataset', (['test_iter'], {}), '(test_iter)\n', (5076, 5087), False, 'from torchtext.data.functional import to_map_style_dataset\n'), ((12162, 12184), 'torch.tensor', 'torch.tensor', (['test_acc'], {}), '(test_acc)\n', (12174, 12184), False, 'import torch\n'), ((13748, 13874), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['self.optimizer'], {'mode': '"""min"""', 'factor': '(0.2)', 'patience': '(2)', 'min_lr': '(1e-06)', 'verbose': '(True)'}), "(self.optimizer, mode='min',\n factor=0.2, patience=2, min_lr=1e-06, verbose=True)\n", (13790, 13874), False, 'import torch\n'), ((15250, 15261), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (15259, 15261), False, 'import os\n'), ((5469, 5594), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.news_group_df'], {'test_size': '(0.3)', 'random_state': 'self.RANDOM_SEED', 'stratify': "self.news_group_df['label']"}), "(self.news_group_df, test_size=0.3, random_state=self.\n RANDOM_SEED, stratify=self.news_group_df['label'])\n", (5485, 5594), False, 'from sklearn.model_selection import train_test_split\n'), ((5743, 5866), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.test_dataset'], {'test_size': '(0.5)', 'random_state': 'self.RANDOM_SEED', 'stratify': "self.test_dataset['label']"}), "(self.test_dataset, test_size=0.5, random_state=self.\n RANDOM_SEED, stratify=self.test_dataset['label'])\n", (5759, 5866), False, 'from sklearn.model_selection import train_test_split\n'), ((12995, 13045), 'torch.stack', 'torch.stack', (["[x['val_step_loss'] for x in outputs]"], {}), "([x['val_step_loss'] for x in outputs])\n", (13006, 13045), False, 'import torch\n'), ((13340, 13385), 'torch.stack', 'torch.stack', (["[x['test_acc'] for x in outputs]"], {}), "([x['test_acc'] for x in outputs])\n", (13351, 13385), False, 'import torch\n'), ((1094, 1139), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X', 'columns': "['description']"}), "(data=X, columns=['description'])\n", (1106, 1139), True, 'import pandas as pd\n'), ((1354, 1429), 'pandas.read_csv', 'pd.read_csv', (['train_csv_path'], {'usecols': '[0, 2]', 'names': "['label', 'description']"}), "(train_csv_path, usecols=[0, 2], names=['label', 'description'])\n", (1365, 1429), True, 'import pandas as pd\n'), ((3646, 3684), 'torch.tensor', 'torch.tensor', (['target'], {'dtype': 'torch.long'}), '(target, dtype=torch.long)\n', (3658, 3684), False, 'import torch\n')] |
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Tutorial/Advanced/colored_pointcloud_registration.py
import numpy as np
import copy
from open3d import *
def draw_registration_result_original_color(source, target, transformation):
source_temp = copy.deepcopy(source)
source_temp.transform(transformation)
draw_geometries([source_temp, target])
if __name__ == "__main__":
print("1. Load two point clouds and show initial pose")
source = read_point_cloud("../../TestData/ColoredICP/frag_115.ply")
target = read_point_cloud("../../TestData/ColoredICP/frag_116.ply")
# draw initial alignment
current_transformation = np.identity(4)
draw_registration_result_original_color(
source, target, current_transformation)
# point to plane ICP
current_transformation = np.identity(4);
print("2. Point-to-plane ICP registration is applied on original point")
print(" clouds to refine the alignment. Distance threshold 0.02.")
result_icp = registration_icp(source, target, 0.02,
current_transformation, TransformationEstimationPointToPlane())
print(result_icp)
draw_registration_result_original_color(
source, target, result_icp.transformation)
# colored pointcloud registration
# This is implementation of following paper
# <NAME>, <NAME>, <NAME>,
# Colored Point Cloud Registration Revisited, ICCV 2017
voxel_radius = [ 0.04, 0.02, 0.01 ];
max_iter = [ 50, 30, 14 ];
current_transformation = np.identity(4)
print("3. Colored point cloud registration")
for scale in range(3):
iter = max_iter[scale]
radius = voxel_radius[scale]
print([iter,radius,scale])
print("3-1. Downsample with a voxel size %.2f" % radius)
source_down = voxel_down_sample(source, radius)
target_down = voxel_down_sample(target, radius)
print("3-2. Estimate normal.")
estimate_normals(source_down, KDTreeSearchParamHybrid(
radius = radius * 2, max_nn = 30))
estimate_normals(target_down, KDTreeSearchParamHybrid(
radius = radius * 2, max_nn = 30))
print("3-3. Applying colored point cloud registration")
result_icp = registration_colored_icp(source_down, target_down,
radius, current_transformation,
ICPConvergenceCriteria(relative_fitness = 1e-6,
relative_rmse = 1e-6, max_iteration = iter))
current_transformation = result_icp.transformation
print(result_icp)
draw_registration_result_original_color(
source, target, result_icp.transformation)
| [
"copy.deepcopy",
"numpy.identity"
] | [((326, 347), 'copy.deepcopy', 'copy.deepcopy', (['source'], {}), '(source)\n', (339, 347), False, 'import copy\n'), ((726, 740), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (737, 740), True, 'import numpy as np\n'), ((893, 907), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (904, 907), True, 'import numpy as np\n'), ((1591, 1605), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (1602, 1605), True, 'import numpy as np\n')] |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2017 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from scipy.io import loadmat, mmread
from scipy.sparse import issparse
import numpy as np
import tempfile
import os
from contextlib import contextmanager
import shutil
from pymor.core.logger import getLogger
def _loadmat(path, key=None):
try:
data = loadmat(path, mat_dtype=True)
except Exception as e:
raise IOError(e)
if key:
try:
return data[key]
except KeyError:
raise IOError('"{}" not found in MATLAB file {}'.format(key, path))
data = [v for v in data.values() if isinstance(v, np.ndarray) or issparse(v)]
if len(data) == 0:
raise IOError('No matrix data contained in MATLAB file {}'.format(path))
elif len(data) > 1:
raise IOError('More than one matrix object stored in MATLAB file {}'.format(path))
else:
return data[0]
def _mmread(path, key=None):
if key:
raise IOError('Cannot specify "key" for Matrix Market file')
try:
matrix = mmread(path)
if issparse(matrix):
matrix = matrix.tocsc()
return matrix
except Exception as e:
raise IOError(e)
def _load(path, key=None):
data = np.load(path)
if isinstance(data, dict):
if key:
try:
matrix = data[key]
except KeyError:
raise IOError('"{}" not found in NPY file {}'.format(key, path))
elif len(data) == 0:
raise IOError('No data contained in NPY file {}'.format(path))
elif len(data) > 1:
raise IOError('More than one object stored in NPY file {} for key {}'.format(path, key))
else:
matrix = next(iter(data.values()))
else:
matrix = data
if not isinstance(matrix, np.ndarray) and not issparse(matrix):
raise IOError('Loaded data is not a matrix in NPY file {}').format(path)
return matrix
def _loadtxt(path, key=None):
if key:
raise IOError('Cannot specify "key" for TXT file')
try:
return np.loadtxt(path)
except Exception as e:
raise IOError(e)
def load_matrix(path, key=None):
logger = getLogger('pymor.tools.io.load_matrix')
logger.info('Loading matrix from file ' + path)
path_parts = path.split('.')
if len(path_parts[-1]) == 3:
extension = path_parts[-1].lower()
elif path_parts[-1].lower() == 'gz' and len(path_parts) >= 2 and len(path_parts[-2]) == 3:
extension = '.'.join(path_parts[-2:]).lower()
else:
extension = None
file_format_map = {'mat': ('MATLAB', _loadmat),
'mtx': ('Matrix Market', _mmread),
'mtz.gz': ('Matrix Market', _mmread),
'npy': ('NPY/NPZ', _load),
'npz': ('NPY/NPZ', _load),
'txt': ('Text', _loadtxt)}
if extension in file_format_map:
file_type, loader = file_format_map[extension]
logger.info(file_type + ' file detected.')
return loader(path, key)
logger.warning('Could not detect file format. Trying all loaders ...')
loaders = [_loadmat, _mmread, _loadtxt, _load]
for loader in loaders:
try:
return loader(path, key)
except IOError:
pass
raise IOError('Could not load file {} (key = {})'.format(path, key))
@contextmanager
def SafeTemporaryFileName(name=None, parent_dir=None):
"""Cross Platform safe equivalent of re-opening a NamedTemporaryFile
Creates an automatically cleaned up temporary directory with a single file therein.
name: filename component, defaults to 'temp_file'
dir: the parent dir of the new tmp dir. defaults to tempfile.gettempdir()
"""
parent_dir = parent_dir or tempfile.gettempdir()
name = name or 'temp_file'
dirname = tempfile.mkdtemp(dir=parent_dir)
path = os.path.join(dirname, name)
yield path
shutil.rmtree(dirname)
| [
"numpy.load",
"scipy.io.loadmat",
"scipy.sparse.issparse",
"tempfile.gettempdir",
"scipy.io.mmread",
"pymor.core.logger.getLogger",
"tempfile.mkdtemp",
"numpy.loadtxt",
"shutil.rmtree",
"os.path.join"
] | [((1396, 1409), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1403, 1409), True, 'import numpy as np\n'), ((2357, 2396), 'pymor.core.logger.getLogger', 'getLogger', (['"""pymor.tools.io.load_matrix"""'], {}), "('pymor.tools.io.load_matrix')\n", (2366, 2396), False, 'from pymor.core.logger import getLogger\n'), ((4035, 4067), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'dir': 'parent_dir'}), '(dir=parent_dir)\n', (4051, 4067), False, 'import tempfile\n'), ((4079, 4106), 'os.path.join', 'os.path.join', (['dirname', 'name'], {}), '(dirname, name)\n', (4091, 4106), False, 'import os\n'), ((4126, 4148), 'shutil.rmtree', 'shutil.rmtree', (['dirname'], {}), '(dirname)\n', (4139, 4148), False, 'import shutil\n'), ((487, 516), 'scipy.io.loadmat', 'loadmat', (['path'], {'mat_dtype': '(True)'}), '(path, mat_dtype=True)\n', (494, 516), False, 'from scipy.io import loadmat, mmread\n'), ((1204, 1216), 'scipy.io.mmread', 'mmread', (['path'], {}), '(path)\n', (1210, 1216), False, 'from scipy.io import loadmat, mmread\n'), ((1228, 1244), 'scipy.sparse.issparse', 'issparse', (['matrix'], {}), '(matrix)\n', (1236, 1244), False, 'from scipy.sparse import issparse\n'), ((2239, 2255), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (2249, 2255), True, 'import numpy as np\n'), ((3968, 3989), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (3987, 3989), False, 'import tempfile\n'), ((1995, 2011), 'scipy.sparse.issparse', 'issparse', (['matrix'], {}), '(matrix)\n', (2003, 2011), False, 'from scipy.sparse import issparse\n'), ((799, 810), 'scipy.sparse.issparse', 'issparse', (['v'], {}), '(v)\n', (807, 810), False, 'from scipy.sparse import issparse\n')] |
import abc
import functools
import itertools
import os
import pickle
import time
import warnings
import weakref
from collections import defaultdict
from stat import ST_CTIME
import numpy as np
from yt.config import ytcfg
from yt.data_objects.data_containers import data_object_registry
from yt.data_objects.particle_filters import filter_registry
from yt.data_objects.particle_unions import ParticleUnion
from yt.data_objects.region_expression import RegionExpression
from yt.fields.derived_field import DerivedField, ValidateSpatial
from yt.fields.field_type_container import FieldTypeContainer
from yt.fields.fluid_fields import setup_gradient_fields
from yt.fields.particle_fields import DEP_MSG_SMOOTH_FIELD
from yt.funcs import (
ensure_list,
issue_deprecation_warning,
iterable,
mylog,
set_intersection,
setdefaultattr,
)
from yt.geometry.coordinates.api import (
CartesianCoordinateHandler,
CoordinateHandler,
CylindricalCoordinateHandler,
GeographicCoordinateHandler,
InternalGeographicCoordinateHandler,
PolarCoordinateHandler,
SpectralCubeCoordinateHandler,
SphericalCoordinateHandler,
)
from yt.units import UnitContainer, _wrap_display_ytarray
from yt.units.dimensions import current_mks
from yt.units.unit_object import Unit, define_unit
from yt.units.unit_registry import UnitRegistry
from yt.units.unit_systems import create_code_unit_system, unit_system_registry
from yt.units.yt_array import YTArray, YTQuantity
from yt.utilities.cosmology import Cosmology
from yt.utilities.exceptions import (
YTFieldNotFound,
YTGeometryNotSupported,
YTIllDefinedParticleFilter,
YTObjectNotImplemented,
)
from yt.utilities.minimal_representation import MinimalDataset
from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_root_only
from yt.utilities.parameter_file_storage import (
NoParameterShelf,
ParameterFileStore,
output_type_registry,
)
# We want to support the movie format in the future.
# When such a thing comes to pass, I'll move all the stuff that is constant up
# to here, and then have it instantiate EnzoDatasets as appropriate.
_cached_datasets = weakref.WeakValueDictionary()
_ds_store = ParameterFileStore()
def _unsupported_object(ds, obj_name):
def _raise_unsupp(*args, **kwargs):
raise YTObjectNotImplemented(ds, obj_name)
return _raise_unsupp
class RegisteredDataset(abc.ABCMeta):
def __init__(cls, name, b, d):
type.__init__(cls, name, b, d)
output_type_registry[name] = cls
mylog.debug("Registering: %s as %s", name, cls)
class IndexProxy:
# This is a simple proxy for Index objects. It enables backwards
# compatibility so that operations like .h.sphere, .h.print_stats and
# .h.grid_left_edge will correctly pass through to the various dataset or
# index objects.
def __init__(self, ds):
self.ds = weakref.proxy(ds)
ds.index
def __getattr__(self, name):
# Check the ds first
if hasattr(self.ds, name):
return getattr(self.ds, name)
# Now for a subset of the available items, check the ds.index.
elif name in self.ds.index._index_properties:
return getattr(self.ds.index, name)
raise AttributeError
class MutableAttribute:
"""A descriptor for mutable data"""
def __init__(self, display_array=False):
self.data = weakref.WeakKeyDictionary()
self.display_array = display_array
def __get__(self, instance, owner):
if not instance:
return None
ret = self.data.get(instance, None)
try:
ret = ret.copy()
except AttributeError:
pass
if self.display_array:
try:
ret._ipython_display_ = functools.partial(_wrap_display_ytarray, ret)
# This will error out if the items have yet to be turned into
# YTArrays, in which case we just let it go.
except AttributeError:
pass
return ret
def __set__(self, instance, value):
self.data[instance] = value
def requires_index(attr_name):
@property
def ireq(self):
self.index
# By now it should have been set
attr = self.__dict__[attr_name]
return attr
@ireq.setter
def ireq(self, value):
self.__dict__[attr_name] = value
return ireq
class Dataset(metaclass=RegisteredDataset):
default_fluid_type = "gas"
default_field = ("gas", "density")
fluid_types = ("gas", "deposit", "index")
particle_types = ("io",) # By default we have an 'all'
particle_types_raw = ("io",)
geometry = "cartesian"
coordinates = None
storage_filename = None
particle_unions = None
known_filters = None
_index_class = None
field_units = None
derived_field_list = requires_index("derived_field_list")
fields = requires_index("fields")
_instantiated = False
_unique_identifier = None
_particle_type_counts = None
_proj_type = "quad_proj"
_ionization_label_format = "roman_numeral"
# these are set in self._parse_parameter_file()
domain_left_edge = MutableAttribute()
domain_right_edge = MutableAttribute()
domain_dimensions = MutableAttribute()
periodicity = MutableAttribute()
# these are set in self._set_derived_attrs()
domain_width = MutableAttribute()
domain_center = MutableAttribute()
def __new__(cls, filename=None, *args, **kwargs):
if not isinstance(filename, str):
obj = object.__new__(cls)
# The Stream frontend uses a StreamHandler object to pass metadata
# to __init__.
is_stream = hasattr(filename, "get_fields") and hasattr(
filename, "get_particle_type"
)
if not is_stream:
obj.__init__(filename, *args, **kwargs)
return obj
apath = os.path.abspath(filename)
cache_key = (apath, pickle.dumps(args), pickle.dumps(kwargs))
if ytcfg.getboolean("yt", "skip_dataset_cache"):
obj = object.__new__(cls)
elif cache_key not in _cached_datasets:
obj = object.__new__(cls)
if not obj._skip_cache:
_cached_datasets[cache_key] = obj
else:
obj = _cached_datasets[cache_key]
return obj
def __init__(
self,
filename,
dataset_type=None,
file_style=None,
units_override=None,
unit_system="cgs",
):
"""
Base class for generating new output types. Principally consists of
a *filename* and a *dataset_type* which will be passed on to children.
"""
# We return early and do NOT initialize a second time if this file has
# already been initialized.
if self._instantiated:
return
self.dataset_type = dataset_type
self.file_style = file_style
self.conversion_factors = {}
self.parameters = {}
self.region_expression = self.r = RegionExpression(self)
self.known_filters = self.known_filters or {}
self.particle_unions = self.particle_unions or {}
self.field_units = self.field_units or {}
if units_override is None:
units_override = {}
self.units_override = units_override
# path stuff
self.parameter_filename = str(filename)
self.basename = os.path.basename(filename)
self.directory = os.path.expanduser(os.path.dirname(filename))
self.fullpath = os.path.abspath(self.directory)
self.backup_filename = self.parameter_filename + "_backup.gdf"
self.read_from_backup = False
if os.path.exists(self.backup_filename):
self.read_from_backup = True
if len(self.directory) == 0:
self.directory = "."
# to get the timing right, do this before the heavy lifting
self._instantiated = time.time()
self.no_cgs_equiv_length = False
if unit_system == "code":
# create a fake MKS unit system which we will override later to
# avoid chicken/egg issue of the unit registry needing a unit system
# but code units need a unit registry to define the code units on
used_unit_system = "mks"
else:
used_unit_system = unit_system
self._create_unit_registry(used_unit_system)
self._parse_parameter_file()
self.set_units()
self._assign_unit_system(unit_system)
self._setup_coordinate_handler()
# Because we need an instantiated class to check the ds's existence in
# the cache, we move that check to here from __new__. This avoids
# double-instantiation.
try:
_ds_store.check_ds(self)
except NoParameterShelf:
pass
self.print_key_parameters()
self._set_derived_attrs()
self._setup_classes()
@property
def unique_identifier(self):
if self._unique_identifier is None:
self._unique_identifier = int(os.stat(self.parameter_filename)[ST_CTIME])
return self._unique_identifier
@unique_identifier.setter
def unique_identifier(self, value):
self._unique_identifier = value
# abstract methods require implementation in subclasses
@classmethod
@abc.abstractmethod
def _is_valid(cls, *args, **kwargs):
# A heuristic test to determine if the data format can be interpreted
# with the present frontend
return False
@abc.abstractmethod
def _parse_parameter_file(self):
# set up various attributes from self.parameter_filename
# see yt.frontends._skeleton.SkeletonDataset for a full description of what is required here
pass
@abc.abstractmethod
def _set_code_unit_attributes(self):
# set up code-units to physical units normalization factors
# see yt.frontends._skeleton.SkeletonDataset for a full description of what is required here
pass
def _set_derived_attrs(self):
if self.domain_left_edge is None or self.domain_right_edge is None:
self.domain_center = np.zeros(3)
self.domain_width = np.zeros(3)
else:
self.domain_center = 0.5 * (self.domain_right_edge + self.domain_left_edge)
self.domain_width = self.domain_right_edge - self.domain_left_edge
if not isinstance(self.current_time, YTQuantity):
self.current_time = self.quan(self.current_time, "code_time")
for attr in ("center", "width", "left_edge", "right_edge"):
n = "domain_%s" % attr
v = getattr(self, n)
if not isinstance(v, YTArray) and v is not None:
# Note that we don't add on _ipython_display_ here because
# everything is stored inside a MutableAttribute.
v = self.arr(v, "code_length")
setattr(self, n, v)
def __reduce__(self):
args = (self._hash(),)
return (_reconstruct_ds, args)
def __repr__(self):
return self.basename
def _hash(self):
s = "%s;%s;%s" % (self.basename, self.current_time, self.unique_identifier)
try:
import hashlib
return hashlib.md5(s.encode("utf-8")).hexdigest()
except ImportError:
return s.replace(";", "*")
_checksum = None
@property
def checksum(self):
"""
Computes md5 sum of a dataset.
Note: Currently this property is unable to determine a complete set of
files that are a part of a given dataset. As a first approximation, the
checksum of :py:attr:`~parameter_file` is calculated. In case
:py:attr:`~parameter_file` is a directory, checksum of all files inside
the directory is calculated.
"""
if self._checksum is None:
try:
import hashlib
except ImportError:
self._checksum = "nohashlib"
return self._checksum
def generate_file_md5(m, filename, blocksize=2 ** 20):
with open(filename, "rb") as f:
while True:
buf = f.read(blocksize)
if not buf:
break
m.update(buf)
m = hashlib.md5()
if os.path.isdir(self.parameter_filename):
for root, _, files in os.walk(self.parameter_filename):
for fname in files:
fname = os.path.join(root, fname)
generate_file_md5(m, fname)
elif os.path.isfile(self.parameter_filename):
generate_file_md5(m, self.parameter_filename)
else:
m = "notafile"
if hasattr(m, "hexdigest"):
m = m.hexdigest()
self._checksum = m
return self._checksum
@property
def _mrep(self):
return MinimalDataset(self)
@property
def _skip_cache(self):
return False
@classmethod
def _guess_candidates(cls, base, directories, files):
"""
This is a class method that accepts a directory (base), a list of files
in that directory, and a list of subdirectories. It should return a
list of filenames (defined relative to the supplied directory) and a
boolean as to whether or not further directories should be recursed.
This function doesn't need to catch all possibilities, nor does it need
to filter possibilities.
"""
return [], True
def close(self):
pass
def __getitem__(self, key):
""" Returns units, parameters, or conversion_factors in that order. """
return self.parameters[key]
def __iter__(self):
for i in self.parameters:
yield i
def get_smallest_appropriate_unit(
self, v, quantity="distance", return_quantity=False
):
"""
Returns the largest whole unit smaller than the YTQuantity passed to
it as a string.
The quantity keyword can be equal to `distance` or `time`. In the
case of distance, the units are: 'Mpc', 'kpc', 'pc', 'au', 'rsun',
'km', etc. For time, the units are: 'Myr', 'kyr', 'yr', 'day', 'hr',
's', 'ms', etc.
If return_quantity is set to True, it finds the largest YTQuantity
object with a whole unit and a power of ten as the coefficient, and it
returns this YTQuantity.
"""
good_u = None
if quantity == "distance":
unit_list = [
"Ppc",
"Tpc",
"Gpc",
"Mpc",
"kpc",
"pc",
"au",
"rsun",
"km",
"cm",
"um",
"nm",
"pm",
]
elif quantity == "time":
unit_list = [
"Yyr",
"Zyr",
"Eyr",
"Pyr",
"Tyr",
"Gyr",
"Myr",
"kyr",
"yr",
"day",
"hr",
"s",
"ms",
"us",
"ns",
"ps",
"fs",
]
else:
raise SyntaxError(
"Specified quantity must be equal to 'distance'" "or 'time'."
)
for unit in unit_list:
uq = self.quan(1.0, unit)
if uq <= v:
good_u = unit
break
if good_u is None and quantity == "distance":
good_u = "cm"
if good_u is None and quantity == "time":
good_u = "s"
if return_quantity:
unit_index = unit_list.index(good_u)
# This avoids indexing errors
if unit_index == 0:
return self.quan(1, unit_list[0])
# Number of orders of magnitude between unit and next one up
OOMs = np.ceil(
np.log10(
self.quan(1, unit_list[unit_index - 1])
/ self.quan(1, unit_list[unit_index])
)
)
# Backwards order of coefficients (e.g. [100, 10, 1])
coeffs = 10 ** np.arange(OOMs)[::-1]
for j in coeffs:
uq = self.quan(j, good_u)
if uq <= v:
return uq
else:
return good_u
def has_key(self, key):
"""
Checks units, parameters, and conversion factors. Returns a boolean.
"""
return key in self.parameters
_instantiated_index = None
@property
def index(self):
if self._instantiated_index is None:
if self._index_class is None:
raise RuntimeError("You should not instantiate Dataset.")
self._instantiated_index = self._index_class(
self, dataset_type=self.dataset_type
)
# Now we do things that we need an instantiated index for
# ...first off, we create our field_info now.
oldsettings = np.geterr()
np.seterr(all="ignore")
self.create_field_info()
np.seterr(**oldsettings)
return self._instantiated_index
_index_proxy = None
@property
def h(self):
if self._index_proxy is None:
self._index_proxy = IndexProxy(self)
return self._index_proxy
hierarchy = h
@parallel_root_only
def print_key_parameters(self):
for a in [
"current_time",
"domain_dimensions",
"domain_left_edge",
"domain_right_edge",
"cosmological_simulation",
]:
if not hasattr(self, a):
mylog.error("Missing %s in parameter file definition!", a)
continue
v = getattr(self, a)
mylog.info("Parameters: %-25s = %s", a, v)
if hasattr(self, "cosmological_simulation") and self.cosmological_simulation:
for a in [
"current_redshift",
"omega_lambda",
"omega_matter",
"omega_radiation",
"hubble_constant",
]:
if not hasattr(self, a):
mylog.error("Missing %s in parameter file definition!", a)
continue
v = getattr(self, a)
mylog.info("Parameters: %-25s = %s", a, v)
@parallel_root_only
def print_stats(self):
self.index.print_stats()
@property
def field_list(self):
return self.index.field_list
def create_field_info(self):
self.field_dependencies = {}
self.derived_field_list = []
self.filtered_particle_types = []
self.field_info = self._field_info_class(self, self.field_list)
self.coordinates.setup_fields(self.field_info)
self.field_info.setup_fluid_fields()
for ptype in self.particle_types:
self.field_info.setup_particle_fields(ptype)
self.field_info.setup_fluid_index_fields()
if "all" not in self.particle_types:
mylog.debug("Creating Particle Union 'all'")
pu = ParticleUnion("all", list(self.particle_types_raw))
nfields = self.add_particle_union(pu)
if nfields == 0:
mylog.debug("zero common fields: skipping particle union 'all'")
if "nbody" not in self.particle_types:
mylog.debug("Creating Particle Union 'nbody'")
ptypes = list(self.particle_types_raw)
if hasattr(self, "_sph_ptypes"):
for sph_ptype in self._sph_ptypes:
if sph_ptype in ptypes:
ptypes.remove(sph_ptype)
if ptypes:
nbody_ptypes = []
for ptype in ptypes:
if (ptype, "particle_mass") in self.field_info:
nbody_ptypes.append(ptype)
pu = ParticleUnion("nbody", nbody_ptypes)
nfields = self.add_particle_union(pu)
if nfields == 0:
mylog.debug("zero common fields, skipping particle union 'nbody'")
self.field_info.setup_extra_union_fields()
mylog.debug("Loading field plugins.")
self.field_info.load_all_plugins(self.default_fluid_type)
deps, unloaded = self.field_info.check_derived_fields()
self.field_dependencies.update(deps)
self.fields = FieldTypeContainer(self)
self.index.field_list = sorted(self.field_list)
self._last_freq = (None, None)
def set_field_label_format(self, format_property, value):
"""
Set format properties for how fields will be written
out. Accepts
format_property : string indicating what property to set
value: the value to set for that format_property
"""
available_formats = {"ionization_label": ("plus_minus", "roman_numeral")}
if format_property in available_formats:
if value in available_formats[format_property]:
setattr(self, "_%s_format" % format_property, value)
else:
raise ValueError(
"{0} not an acceptable value for format_property "
"{1}. Choices are {2}.".format(
value, format_property, available_formats[format_property]
)
)
else:
raise ValueError(
"{0} not a recognized format_property. Available"
"properties are: {1}".format(
format_property, list(available_formats.keys())
)
)
def setup_deprecated_fields(self):
from yt.fields.field_aliases import _field_name_aliases
added = []
for old_name, new_name in _field_name_aliases:
try:
fi = self._get_field_info(new_name)
except YTFieldNotFound:
continue
self.field_info.alias(("gas", old_name), fi.name)
added.append(("gas", old_name))
self.field_info.find_dependencies(added)
def _setup_coordinate_handler(self):
kwargs = {}
if isinstance(self.geometry, tuple):
self.geometry, ordering = self.geometry
kwargs["ordering"] = ordering
if isinstance(self.geometry, CoordinateHandler):
# I kind of dislike this. The geometry field should always be a
# string, but the way we're set up with subclassing, we can't
# mandate that quite the way I'd like.
self.coordinates = self.geometry
return
elif callable(self.geometry):
cls = self.geometry
elif self.geometry == "cartesian":
cls = CartesianCoordinateHandler
elif self.geometry == "cylindrical":
cls = CylindricalCoordinateHandler
elif self.geometry == "polar":
cls = PolarCoordinateHandler
elif self.geometry == "spherical":
cls = SphericalCoordinateHandler
self.no_cgs_equiv_length = True
elif self.geometry == "geographic":
cls = GeographicCoordinateHandler
self.no_cgs_equiv_length = True
elif self.geometry == "internal_geographic":
cls = InternalGeographicCoordinateHandler
self.no_cgs_equiv_length = True
elif self.geometry == "spectral_cube":
cls = SpectralCubeCoordinateHandler
else:
raise YTGeometryNotSupported(self.geometry)
self.coordinates = cls(self, **kwargs)
def add_particle_union(self, union):
# No string lookups here, we need an actual union.
f = self.particle_fields_by_type
# find fields common to all particle types in the union
fields = set_intersection([f[s] for s in union if s in self.particle_types_raw])
if len(fields) == 0:
# don't create this union if no fields are common to all
# particle types
return len(fields)
for field in fields:
units = set([])
for s in union:
# First we check our existing fields for units
funits = self._get_field_info(s, field).units
# Then we override with field_units settings.
funits = self.field_units.get((s, field), funits)
units.add(funits)
if len(units) == 1:
self.field_units[union.name, field] = list(units)[0]
self.particle_types += (union.name,)
self.particle_unions[union.name] = union
fields = [(union.name, field) for field in fields]
new_fields = [_ for _ in fields if _ not in self.field_list]
self.field_list.extend(new_fields)
new_field_info_fields = [
_ for _ in fields if _ not in self.field_info.field_list
]
self.field_info.field_list.extend(new_field_info_fields)
self.index.field_list = sorted(self.field_list)
# Give ourselves a chance to add them here, first, then...
# ...if we can't find them, we set them up as defaults.
new_fields = self._setup_particle_types([union.name])
self.field_info.find_dependencies(new_fields)
return len(new_fields)
def add_particle_filter(self, filter):
"""Add particle filter to the dataset.
Add ``filter`` to the dataset and set up relavent derived_field.
It will also add any ``filtered_type`` that the ``filter`` depends on.
"""
# This requires an index
self.index
# This is a dummy, which we set up to enable passthrough of "all"
# concatenation fields.
n = getattr(filter, "name", filter)
self.known_filters[n] = None
if isinstance(filter, str):
used = False
f = filter_registry.get(filter, None)
if f is None:
return False
used = self._setup_filtered_type(f)
if used:
filter = f
else:
used = self._setup_filtered_type(filter)
if not used:
self.known_filters.pop(n, None)
return False
self.known_filters[filter.name] = filter
return True
def _setup_filtered_type(self, filter):
# Check if the filtered_type of this filter is known,
# otherwise add it first if it is in the filter_registry
if filter.filtered_type not in self.known_filters.keys():
if filter.filtered_type in filter_registry:
add_success = self.add_particle_filter(filter.filtered_type)
if add_success:
mylog.info(
"Added filter dependency '%s' for '%s'",
filter.filtered_type,
filter.name,
)
if not filter.available(self.derived_field_list):
raise YTIllDefinedParticleFilter(
filter, filter.missing(self.derived_field_list)
)
fi = self.field_info
fd = self.field_dependencies
available = False
for fn in self.derived_field_list:
if fn[0] == filter.filtered_type:
# Now we can add this
available = True
self.derived_field_list.append((filter.name, fn[1]))
fi[filter.name, fn[1]] = filter.wrap_func(fn, fi[fn])
# Now we append the dependencies
fd[filter.name, fn[1]] = fd[fn]
if available:
if filter.name not in self.particle_types:
self.particle_types += (filter.name,)
if filter.name not in self.filtered_particle_types:
self.filtered_particle_types.append(filter.name)
if hasattr(self, "_sph_ptypes"):
if filter.filtered_type == self._sph_ptypes[0]:
mylog.warning(
"It appears that you are filtering on an SPH field "
"type. It is recommended to use 'gas' as the "
"filtered particle type in this case instead."
)
if filter.filtered_type in (self._sph_ptypes + ("gas",)):
self._sph_ptypes = self._sph_ptypes + (filter.name,)
new_fields = self._setup_particle_types([filter.name])
deps, _ = self.field_info.check_derived_fields(new_fields)
self.field_dependencies.update(deps)
return available
def _setup_particle_types(self, ptypes=None):
df = []
if ptypes is None:
ptypes = self.ds.particle_types_raw
for ptype in set(ptypes):
df += self._setup_particle_type(ptype)
return df
_last_freq = (None, None)
_last_finfo = None
def _get_field_info(self, ftype, fname=None):
self.index
if fname is None:
if isinstance(ftype, DerivedField):
ftype, fname = ftype.name
else:
ftype, fname = "unknown", ftype
guessing_type = False
if ftype == "unknown":
guessing_type = True
ftype = self._last_freq[0] or ftype
field = (ftype, fname)
if field == self._last_freq:
if field not in self.field_info.field_aliases.values():
return self._last_finfo
if field in self.field_info:
self._last_freq = field
self._last_finfo = self.field_info[(ftype, fname)]
return self._last_finfo
if fname in self.field_info:
# Sometimes, if guessing_type == True, this will be switched for
# the type of field it is. So we look at the field type and
# determine if we need to change the type.
fi = self._last_finfo = self.field_info[fname]
if (
fi.sampling_type == "particle"
and self._last_freq[0] not in self.particle_types
):
field = "all", field[1]
elif (
not fi.sampling_type == "particle"
and self._last_freq[0] not in self.fluid_types
):
field = self.default_fluid_type, field[1]
self._last_freq = field
return self._last_finfo
# We also should check "all" for particles, which can show up if you're
# mixing deposition/gas fields with particle fields.
if guessing_type:
if hasattr(self, "_sph_ptype"):
to_guess = [self.default_fluid_type, "all"]
else:
to_guess = ["all", self.default_fluid_type]
to_guess += list(self.fluid_types) + list(self.particle_types)
for ftype in to_guess:
if (ftype, fname) in self.field_info:
self._last_freq = (ftype, fname)
self._last_finfo = self.field_info[(ftype, fname)]
return self._last_finfo
raise YTFieldNotFound((ftype, fname), self)
def _setup_classes(self):
# Called by subclass
self.object_types = []
self.objects = []
self.plots = []
for name, cls in sorted(data_object_registry.items()):
if name in self._index_class._unsupported_objects:
setattr(self, name, _unsupported_object(self, name))
continue
self._add_object_class(name, cls)
self.object_types.sort()
def _add_object_class(self, name, base):
# skip projection data objects that don't make sense
# for this type of data
if "proj" in name and name != self._proj_type:
return
elif "proj" in name:
name = "proj"
self.object_types.append(name)
obj = functools.partial(base, ds=weakref.proxy(self))
obj.__doc__ = base.__doc__
setattr(self, name, obj)
def find_max(self, field):
"""
Returns (value, location) of the maximum of a given field.
"""
mylog.debug("Searching for maximum value of %s", field)
source = self.all_data()
max_val, mx, my, mz = source.quantities.max_location(field)
# This is a hack to fix the fact that some non-cartesian datasets have
# dimensionless quantities, and we can't yet handle that.
if mx.units.is_dimensionless:
mx = self.quan(mx.v, "code_length")
if my.units.is_dimensionless:
my = self.quan(my.v, "code_length")
if mz.units.is_dimensionless:
mz = self.quan(mz.v, "code_length")
center = self.arr([mx, my, mz], dtype="float64").to("code_length")
mylog.info(
"Max Value is %0.5e at %0.16f %0.16f %0.16f",
max_val,
center[0],
center[1],
center[2],
)
return max_val, center
def find_min(self, field):
"""
Returns (value, location) for the minimum of a given field.
"""
mylog.debug("Searching for minimum value of %s", field)
source = self.all_data()
min_val, mx, my, mz = source.quantities.min_location(field)
center = self.arr([mx, my, mz], dtype="float64").to("code_length")
mylog.info(
"Min Value is %0.5e at %0.16f %0.16f %0.16f",
min_val,
center[0],
center[1],
center[2],
)
return min_val, center
def find_field_values_at_point(self, fields, coords):
"""
Returns the values [field1, field2,...] of the fields at the given
coordinates. Returns a list of field values in the same order as
the input *fields*.
"""
point = self.point(coords)
ret = []
field_list = ensure_list(fields)
for field in field_list:
ret.append(point[field])
if len(field_list) == 1:
return ret[0]
else:
return ret
def find_field_values_at_points(self, fields, coords):
"""
Returns the values [field1, field2,...] of the fields at the given
[(x1, y1, z2), (x2, y2, z2),...] points. Returns a list of field
values in the same order as the input *fields*.
"""
# If an optimized version exists on the Index object we'll use that
try:
return self.index._find_field_values_at_points(fields, coords)
except AttributeError:
pass
fields = ensure_list(fields)
out = []
# This may be slow because it creates a data object for each point
for field_index, field in enumerate(fields):
funit = self._get_field_info(field).units
out.append(self.arr(np.empty((len(coords),)), funit))
for coord_index, coord in enumerate(coords):
out[field_index][coord_index] = self.point(coord)[field]
if len(fields) == 1:
return out[0]
else:
return out
# Now all the object related stuff
def all_data(self, find_max=False, **kwargs):
"""
all_data is a wrapper to the Region object for creating a region
which covers the entire simulation domain.
"""
self.index
if find_max:
c = self.find_max("density")[1]
else:
c = (self.domain_right_edge + self.domain_left_edge) / 2.0
return self.region(c, self.domain_left_edge, self.domain_right_edge, **kwargs)
def box(self, left_edge, right_edge, **kwargs):
"""
box is a wrapper to the Region object for creating a region
without having to specify a *center* value. It assumes the center
is the midpoint between the left_edge and right_edge.
Keyword arguments are passed to the initializer of the YTRegion object
(e.g. ds.region).
"""
# we handle units in the region data object
# but need to check if left_edge or right_edge is a
# list or other non-array iterable before calculating
# the center
if isinstance(left_edge[0], YTQuantity):
left_edge = YTArray(left_edge)
right_edge = YTArray(right_edge)
left_edge = np.asanyarray(left_edge, dtype="float64")
right_edge = np.asanyarray(right_edge, dtype="float64")
c = (left_edge + right_edge) / 2.0
return self.region(c, left_edge, right_edge, **kwargs)
def _setup_particle_type(self, ptype):
orig = set(self.field_info.items())
self.field_info.setup_particle_fields(ptype)
return [n for n, v in set(self.field_info.items()).difference(orig)]
@property
def particle_fields_by_type(self):
fields = defaultdict(list)
for field in self.field_list:
if field[0] in self.particle_types_raw:
fields[field[0]].append(field[1])
return fields
@property
def particles_exist(self):
for pt, f in itertools.product(self.particle_types_raw, self.field_list):
if pt == f[0]:
return True
return False
@property
def particle_type_counts(self):
self.index
if not self.particles_exist:
return {}
# frontends or index implementation can populate this dict while
# creating the index if they know particle counts at that time
if self._particle_type_counts is not None:
return self._particle_type_counts
self._particle_type_counts = self.index._get_particle_type_counts()
return self._particle_type_counts
@property
def ires_factor(self):
o2 = np.log2(self.refine_by)
if o2 != int(o2):
raise RuntimeError
return int(o2)
def relative_refinement(self, l0, l1):
return self.refine_by ** (l1 - l0)
def _assign_unit_system(self, unit_system):
if unit_system == "cgs":
current_mks_unit = None
else:
current_mks_unit = "A"
magnetic_unit = getattr(self, "magnetic_unit", None)
if magnetic_unit is not None:
if unit_system == "mks":
if current_mks not in self.magnetic_unit.units.dimensions.free_symbols:
self.magnetic_unit = self.magnetic_unit.to("gauss").to("T")
self.unit_registry.modify("code_magnetic", self.magnetic_unit.value)
else:
# if the magnetic unit is in T, we need to create the code unit
# system as an MKS-like system
if current_mks in self.magnetic_unit.units.dimensions.free_symbols:
self.magnetic_unit = self.magnetic_unit.to("T").to("gauss")
# The following modification ensures that we get the conversion to
# cgs correct
self.unit_registry.modify(
"code_magnetic", self.magnetic_unit.value * 0.1 ** 0.5
)
us = create_code_unit_system(
self.unit_registry, current_mks_unit=current_mks_unit
)
if unit_system != "code":
us = unit_system_registry[str(unit_system).lower()]
self.unit_system = us
self.unit_registry.unit_system = self.unit_system
def _create_unit_registry(self, unit_system):
import yt.units.dimensions as dimensions
# yt assumes a CGS unit system by default (for back compat reasons).
# Since unyt is MKS by default we specify the MKS values of the base
# units in the CGS system. So, for length, 1 cm = .01 m. And so on.
self.unit_registry = UnitRegistry(unit_system=unit_system)
self.unit_registry.add("code_length", 0.01, dimensions.length)
self.unit_registry.add("code_mass", 0.001, dimensions.mass)
self.unit_registry.add("code_density", 1000.0, dimensions.density)
self.unit_registry.add(
"code_specific_energy", 1.0, dimensions.energy / dimensions.mass
)
self.unit_registry.add("code_time", 1.0, dimensions.time)
if unit_system == "mks":
self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field)
else:
self.unit_registry.add(
"code_magnetic", 0.1 ** 0.5, dimensions.magnetic_field_cgs
)
self.unit_registry.add("code_temperature", 1.0, dimensions.temperature)
self.unit_registry.add("code_pressure", 0.1, dimensions.pressure)
self.unit_registry.add("code_velocity", 0.01, dimensions.velocity)
self.unit_registry.add("code_metallicity", 1.0, dimensions.dimensionless)
self.unit_registry.add("h", 1.0, dimensions.dimensionless, r"h")
self.unit_registry.add("a", 1.0, dimensions.dimensionless)
def set_units(self):
"""
Creates the unit registry for this dataset.
"""
from yt.units.dimensions import length
if getattr(self, "cosmological_simulation", False):
# this dataset is cosmological, so add cosmological units.
self.unit_registry.modify("h", self.hubble_constant)
# Comoving lengths
for my_unit in ["m", "pc", "AU", "au"]:
new_unit = "%scm" % my_unit
my_u = Unit(my_unit, registry=self.unit_registry)
self.unit_registry.add(
new_unit,
my_u.base_value / (1 + self.current_redshift),
length,
"\\rm{%s}/(1+z)" % my_unit,
prefixable=True,
)
self.unit_registry.modify("a", 1 / (1 + self.current_redshift))
self.set_code_units()
if getattr(self, "cosmological_simulation", False):
# this dataset is cosmological, add a cosmology object
# Set dynamical dark energy parameters
use_dark_factor = getattr(self, "use_dark_factor", False)
w_0 = getattr(self, "w_0", -1.0)
w_a = getattr(self, "w_a", 0.0)
# many frontends do not set this
setdefaultattr(self, "omega_radiation", 0.0)
self.cosmology = Cosmology(
hubble_constant=self.hubble_constant,
omega_matter=self.omega_matter,
omega_lambda=self.omega_lambda,
omega_radiation=self.omega_radiation,
use_dark_factor=use_dark_factor,
w_0=w_0,
w_a=w_a,
)
self.critical_density = self.cosmology.critical_density(
self.current_redshift
)
self.scale_factor = 1.0 / (1.0 + self.current_redshift)
def get_unit_from_registry(self, unit_str):
"""
Creates a unit object matching the string expression, using this
dataset's unit registry.
Parameters
----------
unit_str : str
string that we can parse for a sympy Expr.
"""
new_unit = Unit(unit_str, registry=self.unit_registry)
return new_unit
def set_code_units(self):
# here we override units, if overrides have been provided.
self._override_code_units()
# set attributes like ds.length_unit
self._set_code_unit_attributes()
self.unit_registry.modify("code_length", self.length_unit)
self.unit_registry.modify("code_mass", self.mass_unit)
self.unit_registry.modify("code_time", self.time_unit)
vel_unit = getattr(self, "velocity_unit", self.length_unit / self.time_unit)
pressure_unit = getattr(
self,
"pressure_unit",
self.mass_unit / (self.length_unit * (self.time_unit) ** 2),
)
temperature_unit = getattr(self, "temperature_unit", 1.0)
density_unit = getattr(
self, "density_unit", self.mass_unit / self.length_unit ** 3
)
specific_energy_unit = getattr(self, "specific_energy_unit", vel_unit ** 2)
self.unit_registry.modify("code_velocity", vel_unit)
self.unit_registry.modify("code_temperature", temperature_unit)
self.unit_registry.modify("code_pressure", pressure_unit)
self.unit_registry.modify("code_density", density_unit)
self.unit_registry.modify("code_specific_energy", specific_energy_unit)
# domain_width does not yet exist
if self.domain_left_edge is not None and self.domain_right_edge is not None:
DW = self.arr(self.domain_right_edge - self.domain_left_edge, "code_length")
self.unit_registry.add(
"unitary", float(DW.max() * DW.units.base_value), DW.units.dimensions
)
def _override_code_units(self):
if len(self.units_override) == 0:
return
mylog.warning(
"Overriding code units: Use this option only if you know that the "
"dataset doesn't define the units correctly or at all."
)
for unit, cgs in [
("length", "cm"),
("time", "s"),
("mass", "g"),
("velocity", "cm/s"),
("magnetic", "gauss"),
("temperature", "K"),
]:
val = self.units_override.get("%s_unit" % unit, None)
if val is not None:
if isinstance(val, YTQuantity):
val = (val.v, str(val.units))
elif not isinstance(val, tuple):
val = (val, cgs)
mylog.info("Overriding %s_unit: %g %s.", unit, val[0], val[1])
setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
_units = None
_unit_system_id = None
@property
def units(self):
current_uid = self.unit_registry.unit_system_id
if self._units is not None and self._unit_system_id == current_uid:
return self._units
self._unit_system_id = current_uid
self._units = UnitContainer(self.unit_registry)
return self._units
_arr = None
@property
def arr(self):
"""Converts an array into a :class:`yt.units.yt_array.YTArray`
The returned YTArray will be dimensionless by default, but can be
cast to arbitrary units using the ``units`` keyword argument.
Parameters
----------
input_array : Iterable
A tuple, list, or array to attach units to
units: String unit specification, unit symbol or astropy object
The units of the array. Powers must be specified using python syntax
(cm**3, not cm^3).
input_units : Deprecated in favor of 'units'
dtype : string or NumPy dtype object
The dtype of the returned array data
Examples
--------
>>> import yt
>>> import numpy as np
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.arr([1, 2, 3], 'cm')
>>> b = ds.arr([4, 5, 6], 'm')
>>> a + b
YTArray([ 401., 502., 603.]) cm
>>> b + a
YTArray([ 4.01, 5.02, 6.03]) m
Arrays returned by this function know about the dataset's unit system
>>> a = ds.arr(np.ones(5), 'code_length')
>>> a.in_units('Mpccm/h')
YTArray([ 1.00010449, 1.00010449, 1.00010449, 1.00010449,
1.00010449]) Mpc
"""
if self._arr is not None:
return self._arr
self._arr = functools.partial(YTArray, registry=self.unit_registry)
return self._arr
_quan = None
@property
def quan(self):
"""Converts an scalar into a :class:`yt.units.yt_array.YTQuantity`
The returned YTQuantity will be dimensionless by default, but can be
cast to arbitrary units using the ``units`` keyword argument.
Parameters
----------
input_scalar : an integer or floating point scalar
The scalar to attach units to
units: String unit specification, unit symbol or astropy object
The units of the quantity. Powers must be specified using python
syntax (cm**3, not cm^3).
input_units : Deprecated in favor of 'units'
dtype : string or NumPy dtype object
The dtype of the array data.
Examples
--------
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.quan(1, 'cm')
>>> b = ds.quan(2, 'm')
>>> a + b
201.0 cm
>>> b + a
2.01 m
Quantities created this way automatically know about the unit system
of the dataset.
>>> a = ds.quan(5, 'code_length')
>>> a.in_cgs()
1.543e+25 cm
"""
if self._quan is not None:
return self._quan
self._quan = functools.partial(YTQuantity, registry=self.unit_registry)
return self._quan
def add_field(self, name, function=None, sampling_type=None, **kwargs):
"""
Dataset-specific call to add_field
Add a new field, along with supplemental metadata, to the list of
available fields. This respects a number of arguments, all of which
are passed on to the constructor for
:class:`~yt.data_objects.api.DerivedField`.
Parameters
----------
name : str
is the name of the field.
function : callable
A function handle that defines the field. Should accept
arguments (field, data)
units : str
A plain text string encoding the unit. Powers must be in
python syntax (** instead of ^).
take_log : bool
Describes whether the field should be logged
validators : list
A list of :class:`FieldValidator` objects
particle_type : bool
Is this a particle (1D) field?
vector_field : bool
Describes the dimensionality of the field. Currently unused.
display_name : str
A name used in the plots
force_override : bool
Whether to override an existing derived field. Does not work with
on-disk fields.
"""
self.index
override = kwargs.get("force_override", False)
if override and name in self.index.field_list:
raise RuntimeError(
"force_override is only meant to be used with "
"derived fields, not on-disk fields."
)
# Handle the case where the field has already been added.
if not override and name in self.field_info:
mylog.error(
"Field %s already exists. To override use " + "force_override=True.",
name,
)
if kwargs.setdefault("particle_type", False):
if sampling_type is not None and sampling_type != "particle":
raise RuntimeError(
"Clashing definition of 'sampling_type' and "
"'particle_type'. Note that 'particle_type' is "
"deprecated. Please just use 'sampling_type'."
)
else:
sampling_type = "particle"
if sampling_type is None:
warnings.warn(
"Because 'sampling_type' not specified, yt will "
"assume a cell 'sampling_type'",
stacklevel=2,
)
sampling_type = "cell"
self.field_info.add_field(name, sampling_type, function=function, **kwargs)
self.field_info._show_field_errors.append(name)
deps, _ = self.field_info.check_derived_fields([name])
self.field_dependencies.update(deps)
def add_mesh_sampling_particle_field(self, sample_field, ptype="all"):
"""Add a new mesh sampling particle field
Creates a new particle field which has the value of the
*deposit_field* at the location of each particle of type
*ptype*.
Parameters
----------
sample_field : tuple
The field name tuple of the mesh field to be deposited onto
the particles. This must be a field name tuple so yt can
appropriately infer the correct particle type.
ptype : string, default 'all'
The particle type onto which the deposition will occur.
Returns
-------
The field name tuple for the newly created field.
Examples
--------
>>> ds = yt.load('output_00080/info_00080.txt')
... ds.add_mesh_sampling_particle_field(('gas', 'density'), ptype='all')
>>> print('The density at the location of the particle is:')
... print(ds.r['all', 'cell_gas_density'])
The density at the location of the particle is:
[9.33886124e-30 1.22174333e-28 1.20402333e-28 ... 2.77410331e-30
8.79467609e-31 3.50665136e-30] g/cm**3
>>> len(ds.r['all', 'cell_gas_density']) == len(ds.r['all', 'particle_ones'])
True
"""
if isinstance(sample_field, tuple):
ftype, sample_field = sample_field[0], sample_field[1]
else:
raise RuntimeError
return self.index._add_mesh_sampling_particle_field(sample_field, ftype, ptype)
def add_deposited_particle_field(
self, deposit_field, method, kernel_name="cubic", weight_field="particle_mass"
):
"""Add a new deposited particle field
Creates a new deposited field based on the particle *deposit_field*.
Parameters
----------
deposit_field : tuple
The field name tuple of the particle field the deposited field will
be created from. This must be a field name tuple so yt can
appropriately infer the correct particle type.
method : string
This is the "method name" which will be looked up in the
`particle_deposit` namespace as `methodname_deposit`. Current
methods include `simple_smooth`, `sum`, `std`, `cic`, `weighted_mean`,
`nearest` and `count`.
kernel_name : string, default 'cubic'
This is the name of the smoothing kernel to use. It is only used for
the `simple_smooth` method and is otherwise ignored. Current
supported kernel names include `cubic`, `quartic`, `quintic`,
`wendland2`, `wendland4`, and `wendland6`.
weight_field : string, default 'particle_mass'
Weighting field name for deposition method `weighted_mean`.
Returns
-------
The field name tuple for the newly created field.
"""
self.index
if isinstance(deposit_field, tuple):
ptype, deposit_field = deposit_field[0], deposit_field[1]
else:
raise RuntimeError
units = self.field_info[ptype, deposit_field].output_units
take_log = self.field_info[ptype, deposit_field].take_log
name_map = {
"sum": "sum",
"std": "std",
"cic": "cic",
"weighted_mean": "avg",
"nearest": "nn",
"simple_smooth": "ss",
"count": "count",
}
field_name = "%s_" + name_map[method] + "_%s"
field_name = field_name % (ptype, deposit_field.replace("particle_", ""))
if method == "count":
field_name = "%s_count" % ptype
if ("deposit", field_name) in self.field_info:
mylog.warning("The deposited field %s already exists" % field_name)
return ("deposit", field_name)
else:
units = "dimensionless"
take_log = False
def _deposit_field(field, data):
"""
Create a grid field for particle quantities using given method.
"""
pos = data[ptype, "particle_position"]
fields = [data[ptype, deposit_field]]
if method == "weighted_mean":
fields.append(data[ptype, weight_field])
fields = [np.ascontiguousarray(f) for f in fields]
d = data.deposit(pos, fields, method=method, kernel_name=kernel_name)
d = data.ds.arr(d, units=units)
if method == "weighted_mean":
d[np.isnan(d)] = 0.0
return d
self.add_field(
("deposit", field_name),
function=_deposit_field,
sampling_type="cell",
units=units,
take_log=take_log,
validators=[ValidateSpatial()],
)
return ("deposit", field_name)
def add_smoothed_particle_field(
self, smooth_field, method="volume_weighted", nneighbors=64, kernel_name="cubic"
):
"""Add a new smoothed particle field
WARNING: This method is deprecated since yt-4.0.
Creates a new smoothed field based on the particle *smooth_field*.
Parameters
----------
smooth_field : tuple
The field name tuple of the particle field the smoothed field will
be created from. This must be a field name tuple so yt can
appropriately infer the correct particle type.
method : string, default 'volume_weighted'
The particle smoothing method to use. Can only be 'volume_weighted'
for now.
nneighbors : int, default 64
The number of neighbors to examine during the process.
kernel_name : string, default `cubic`
This is the name of the smoothing kernel to use. Current supported
kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
`wendland4`, and `wendland6`.
Returns
-------
The field name tuple for the newly created field.
"""
issue_deprecation_warning("This method is deprecated. " + DEP_MSG_SMOOTH_FIELD)
def add_gradient_fields(self, input_field):
"""Add gradient fields.
Creates four new grid-based fields that represent the components of
the gradient of an existing field, plus an extra field for the magnitude
of the gradient. Currently only supported in Cartesian geometries. The
gradient is computed using second-order centered differences.
Parameters
----------
input_field : tuple
The field name tuple of the particle field the deposited field will
be created from. This must be a field name tuple so yt can
appropriately infer the correct field type.
Returns
-------
A list of field name tuples for the newly created fields.
Examples
--------
>>> grad_fields = ds.add_gradient_fields(("gas","temperature"))
>>> print(grad_fields)
[('gas', 'temperature_gradient_x'),
('gas', 'temperature_gradient_y'),
('gas', 'temperature_gradient_z'),
('gas', 'temperature_gradient_magnitude')]
Note that the above example assumes ds.geometry == 'cartesian'. In general, the function
will create gradients components along the axes of the dataset coordinate system.
For instance, with cylindrical data, one gets 'temperature_gradient_<r,theta,z>'
"""
self.index
if not isinstance(input_field, tuple):
raise TypeError
ftype, input_field = input_field[0], input_field[1]
units = self.field_info[ftype, input_field].units
setup_gradient_fields(self.field_info, (ftype, input_field), units)
# Now we make a list of the fields that were just made, to check them
# and to return them
grad_fields = [
(ftype, input_field + "_gradient_%s" % suffix)
for suffix in self.coordinates.axis_order
]
grad_fields.append((ftype, input_field + "_gradient_magnitude"))
deps, _ = self.field_info.check_derived_fields(grad_fields)
self.field_dependencies.update(deps)
return grad_fields
_max_level = None
@property
def max_level(self):
if self._max_level is None:
self._max_level = self.index.max_level
return self._max_level
@max_level.setter
def max_level(self, value):
self._max_level = value
_min_level = None
@property
def min_level(self):
if self._min_level is None:
self._min_level = self.index.min_level
return self._min_level
@min_level.setter
def min_level(self, value):
self._min_level = value
def define_unit(self, symbol, value, tex_repr=None, offset=None, prefixable=False):
"""
Define a new unit and add it to the dataset's unit registry.
Parameters
----------
symbol : string
The symbol for the new unit.
value : tuple or ~yt.units.yt_array.YTQuantity
The definition of the new unit in terms of some other units. For example,
one would define a new "mph" unit with (1.0, "mile/hr")
tex_repr : string, optional
The LaTeX representation of the new unit. If one is not supplied, it will
be generated automatically based on the symbol string.
offset : float, optional
The default offset for the unit. If not set, an offset of 0 is assumed.
prefixable : bool, optional
Whether or not the new unit can use SI prefixes. Default: False
Examples
--------
>>> ds.define_unit("mph", (1.0, "mile/hr"))
>>> two_weeks = YTQuantity(14.0, "days")
>>> ds.define_unit("fortnight", two_weeks)
"""
define_unit(
symbol,
value,
tex_repr=tex_repr,
offset=offset,
prefixable=prefixable,
registry=self.unit_registry,
)
def _reconstruct_ds(*args, **kwargs):
datasets = ParameterFileStore()
ds = datasets.get_ds_hash(*args)
return ds
@functools.total_ordering
class ParticleFile:
def __init__(self, ds, io, filename, file_id, range=None):
self.ds = ds
self.io = weakref.proxy(io)
self.filename = filename
self.file_id = file_id
if range is None:
range = (None, None)
self.start, self.end = range
self.total_particles = self.io._count_particles(self)
# Now we adjust our start/end, in case there are fewer particles than
# we realized
if self.start is None:
self.start = 0
self.end = max(self.total_particles.values()) + self.start
def select(self, selector):
pass
def count(self, selector):
pass
def _calculate_offsets(self, fields, pcounts):
pass
def __lt__(self, other):
if self.filename != other.filename:
return self.filename < other.filename
return self.start < other.start
def __eq__(self, other):
if self.filename != other.filename:
return False
return self.start == other.start
def __hash__(self):
return hash((self.filename, self.file_id, self.start, self.end))
class ParticleDataset(Dataset):
_unit_base = None
filter_bbox = False
_proj_type = "particle_proj"
def __init__(
self,
filename,
dataset_type=None,
file_style=None,
units_override=None,
unit_system="cgs",
index_order=None,
index_filename=None,
):
self.index_order = validate_index_order(index_order)
self.index_filename = index_filename
super(ParticleDataset, self).__init__(
filename,
dataset_type=dataset_type,
file_style=file_style,
units_override=units_override,
unit_system=unit_system,
)
def validate_index_order(index_order):
if index_order is None:
index_order = (7, 5)
elif not iterable(index_order):
index_order = (int(index_order), 1)
else:
if len(index_order) != 2:
raise RuntimeError(
"Tried to load a dataset with index_order={}, but "
"index_order\nmust be an integer or a two-element tuple of "
"integers.".format(index_order)
)
index_order = tuple([int(o) for o in index_order])
return index_order
| [
"yt.units.unit_systems.create_code_unit_system",
"yt.utilities.exceptions.YTFieldNotFound",
"numpy.geterr",
"os.walk",
"yt.utilities.exceptions.YTGeometryNotSupported",
"numpy.isnan",
"collections.defaultdict",
"yt.fields.fluid_fields.setup_gradient_fields",
"yt.funcs.issue_deprecation_warning",
"... | [((2177, 2206), 'weakref.WeakValueDictionary', 'weakref.WeakValueDictionary', ([], {}), '()\n', (2204, 2206), False, 'import weakref\n'), ((2219, 2239), 'yt.utilities.parameter_file_storage.ParameterFileStore', 'ParameterFileStore', ([], {}), '()\n', (2237, 2239), False, 'from yt.utilities.parameter_file_storage import NoParameterShelf, ParameterFileStore, output_type_registry\n'), ((62374, 62394), 'yt.utilities.parameter_file_storage.ParameterFileStore', 'ParameterFileStore', ([], {}), '()\n', (62392, 62394), False, 'from yt.utilities.parameter_file_storage import NoParameterShelf, ParameterFileStore, output_type_registry\n'), ((2335, 2371), 'yt.utilities.exceptions.YTObjectNotImplemented', 'YTObjectNotImplemented', (['ds', 'obj_name'], {}), '(ds, obj_name)\n', (2357, 2371), False, 'from yt.utilities.exceptions import YTFieldNotFound, YTGeometryNotSupported, YTIllDefinedParticleFilter, YTObjectNotImplemented\n'), ((2561, 2608), 'yt.funcs.mylog.debug', 'mylog.debug', (['"""Registering: %s as %s"""', 'name', 'cls'], {}), "('Registering: %s as %s', name, cls)\n", (2572, 2608), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((2918, 2935), 'weakref.proxy', 'weakref.proxy', (['ds'], {}), '(ds)\n', (2931, 2935), False, 'import weakref\n'), ((3427, 3454), 'weakref.WeakKeyDictionary', 'weakref.WeakKeyDictionary', ([], {}), '()\n', (3452, 3454), False, 'import weakref\n'), ((5967, 5992), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (5982, 5992), False, 'import os\n'), ((6074, 6118), 'yt.config.ytcfg.getboolean', 'ytcfg.getboolean', (['"""yt"""', '"""skip_dataset_cache"""'], {}), "('yt', 'skip_dataset_cache')\n", (6090, 6118), False, 'from yt.config import ytcfg\n'), ((7106, 7128), 'yt.data_objects.region_expression.RegionExpression', 'RegionExpression', (['self'], {}), '(self)\n', (7122, 7128), False, 'from yt.data_objects.region_expression import RegionExpression\n'), ((7497, 7523), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (7513, 7523), False, 'import os\n'), ((7619, 7650), 'os.path.abspath', 'os.path.abspath', (['self.directory'], {}), '(self.directory)\n', (7634, 7650), False, 'import os\n'), ((7771, 7807), 'os.path.exists', 'os.path.exists', (['self.backup_filename'], {}), '(self.backup_filename)\n', (7785, 7807), False, 'import os\n'), ((8018, 8029), 'time.time', 'time.time', ([], {}), '()\n', (8027, 8029), False, 'import time\n'), ((13121, 13141), 'yt.utilities.minimal_representation.MinimalDataset', 'MinimalDataset', (['self'], {}), '(self)\n', (13135, 13141), False, 'from yt.utilities.minimal_representation import MinimalDataset\n'), ((20605, 20642), 'yt.funcs.mylog.debug', 'mylog.debug', (['"""Loading field plugins."""'], {}), "('Loading field plugins.')\n", (20616, 20642), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((20840, 20864), 'yt.fields.field_type_container.FieldTypeContainer', 'FieldTypeContainer', (['self'], {}), '(self)\n', (20858, 20864), False, 'from yt.fields.field_type_container import FieldTypeContainer\n'), ((24237, 24308), 'yt.funcs.set_intersection', 'set_intersection', (['[f[s] for s in union if s in self.particle_types_raw]'], {}), '([f[s] for s in union if s in self.particle_types_raw])\n', (24253, 24308), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((31483, 31520), 'yt.utilities.exceptions.YTFieldNotFound', 'YTFieldNotFound', (['(ftype, fname)', 'self'], {}), '((ftype, fname), self)\n', (31498, 31520), False, 'from yt.utilities.exceptions import YTFieldNotFound, YTGeometryNotSupported, YTIllDefinedParticleFilter, YTObjectNotImplemented\n'), ((32529, 32584), 'yt.funcs.mylog.debug', 'mylog.debug', (['"""Searching for maximum value of %s"""', 'field'], {}), "('Searching for maximum value of %s', field)\n", (32540, 32584), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((33172, 33274), 'yt.funcs.mylog.info', 'mylog.info', (['"""Max Value is %0.5e at %0.16f %0.16f %0.16f"""', 'max_val', 'center[0]', 'center[1]', 'center[2]'], {}), "('Max Value is %0.5e at %0.16f %0.16f %0.16f', max_val, center[0],\n center[1], center[2])\n", (33182, 33274), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((33505, 33560), 'yt.funcs.mylog.debug', 'mylog.debug', (['"""Searching for minimum value of %s"""', 'field'], {}), "('Searching for minimum value of %s', field)\n", (33516, 33560), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((33745, 33847), 'yt.funcs.mylog.info', 'mylog.info', (['"""Min Value is %0.5e at %0.16f %0.16f %0.16f"""', 'min_val', 'center[0]', 'center[1]', 'center[2]'], {}), "('Min Value is %0.5e at %0.16f %0.16f %0.16f', min_val, center[0],\n center[1], center[2])\n", (33755, 33847), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((34278, 34297), 'yt.funcs.ensure_list', 'ensure_list', (['fields'], {}), '(fields)\n', (34289, 34297), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((34984, 35003), 'yt.funcs.ensure_list', 'ensure_list', (['fields'], {}), '(fields)\n', (34995, 35003), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((36727, 36768), 'numpy.asanyarray', 'np.asanyarray', (['left_edge'], {'dtype': '"""float64"""'}), "(left_edge, dtype='float64')\n", (36740, 36768), True, 'import numpy as np\n'), ((36790, 36832), 'numpy.asanyarray', 'np.asanyarray', (['right_edge'], {'dtype': '"""float64"""'}), "(right_edge, dtype='float64')\n", (36803, 36832), True, 'import numpy as np\n'), ((37228, 37245), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (37239, 37245), False, 'from collections import defaultdict\n'), ((37475, 37534), 'itertools.product', 'itertools.product', (['self.particle_types_raw', 'self.field_list'], {}), '(self.particle_types_raw, self.field_list)\n', (37492, 37534), False, 'import itertools\n'), ((38157, 38180), 'numpy.log2', 'np.log2', (['self.refine_by'], {}), '(self.refine_by)\n', (38164, 38180), True, 'import numpy as np\n'), ((39476, 39554), 'yt.units.unit_systems.create_code_unit_system', 'create_code_unit_system', (['self.unit_registry'], {'current_mks_unit': 'current_mks_unit'}), '(self.unit_registry, current_mks_unit=current_mks_unit)\n', (39499, 39554), False, 'from yt.units.unit_systems import create_code_unit_system, unit_system_registry\n'), ((40123, 40160), 'yt.units.unit_registry.UnitRegistry', 'UnitRegistry', ([], {'unit_system': 'unit_system'}), '(unit_system=unit_system)\n', (40135, 40160), False, 'from yt.units.unit_registry import UnitRegistry\n'), ((43487, 43530), 'yt.units.unit_object.Unit', 'Unit', (['unit_str'], {'registry': 'self.unit_registry'}), '(unit_str, registry=self.unit_registry)\n', (43491, 43530), False, 'from yt.units.unit_object import Unit, define_unit\n'), ((45284, 45429), 'yt.funcs.mylog.warning', 'mylog.warning', (['"""Overriding code units: Use this option only if you know that the dataset doesn\'t define the units correctly or at all."""'], {}), '(\n "Overriding code units: Use this option only if you know that the dataset doesn\'t define the units correctly or at all."\n )\n', (45297, 45429), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((46428, 46461), 'yt.units.UnitContainer', 'UnitContainer', (['self.unit_registry'], {}), '(self.unit_registry)\n', (46441, 46461), False, 'from yt.units import UnitContainer, _wrap_display_ytarray\n'), ((47929, 47984), 'functools.partial', 'functools.partial', (['YTArray'], {'registry': 'self.unit_registry'}), '(YTArray, registry=self.unit_registry)\n', (47946, 47984), False, 'import functools\n'), ((49299, 49357), 'functools.partial', 'functools.partial', (['YTQuantity'], {'registry': 'self.unit_registry'}), '(YTQuantity, registry=self.unit_registry)\n', (49316, 49357), False, 'import functools\n'), ((58273, 58352), 'yt.funcs.issue_deprecation_warning', 'issue_deprecation_warning', (["('This method is deprecated. ' + DEP_MSG_SMOOTH_FIELD)"], {}), "('This method is deprecated. ' + DEP_MSG_SMOOTH_FIELD)\n", (58298, 58352), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((59944, 60011), 'yt.fields.fluid_fields.setup_gradient_fields', 'setup_gradient_fields', (['self.field_info', '(ftype, input_field)', 'units'], {}), '(self.field_info, (ftype, input_field), units)\n', (59965, 60011), False, 'from yt.fields.fluid_fields import setup_gradient_fields\n'), ((62123, 62240), 'yt.units.unit_object.define_unit', 'define_unit', (['symbol', 'value'], {'tex_repr': 'tex_repr', 'offset': 'offset', 'prefixable': 'prefixable', 'registry': 'self.unit_registry'}), '(symbol, value, tex_repr=tex_repr, offset=offset, prefixable=\n prefixable, registry=self.unit_registry)\n', (62134, 62240), False, 'from yt.units.unit_object import Unit, define_unit\n'), ((62596, 62613), 'weakref.proxy', 'weakref.proxy', (['io'], {}), '(io)\n', (62609, 62613), False, 'import weakref\n'), ((6021, 6039), 'pickle.dumps', 'pickle.dumps', (['args'], {}), '(args)\n', (6033, 6039), False, 'import pickle\n'), ((6041, 6061), 'pickle.dumps', 'pickle.dumps', (['kwargs'], {}), '(kwargs)\n', (6053, 6061), False, 'import pickle\n'), ((7568, 7593), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (7583, 7593), False, 'import os\n'), ((10267, 10278), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (10275, 10278), True, 'import numpy as np\n'), ((10311, 10322), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (10319, 10322), True, 'import numpy as np\n'), ((12474, 12487), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (12485, 12487), False, 'import hashlib\n'), ((12503, 12541), 'os.path.isdir', 'os.path.isdir', (['self.parameter_filename'], {}), '(self.parameter_filename)\n', (12516, 12541), False, 'import os\n'), ((17411, 17422), 'numpy.geterr', 'np.geterr', ([], {}), '()\n', (17420, 17422), True, 'import numpy as np\n'), ((17435, 17458), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (17444, 17458), True, 'import numpy as np\n'), ((17508, 17532), 'numpy.seterr', 'np.seterr', ([], {}), '(**oldsettings)\n', (17517, 17532), True, 'import numpy as np\n'), ((18207, 18249), 'yt.funcs.mylog.info', 'mylog.info', (['"""Parameters: %-25s = %s"""', 'a', 'v'], {}), "('Parameters: %-25s = %s', a, v)\n", (18217, 18249), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((19481, 19525), 'yt.funcs.mylog.debug', 'mylog.debug', (['"""Creating Particle Union \'all\'"""'], {}), '("Creating Particle Union \'all\'")\n', (19492, 19525), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((19814, 19860), 'yt.funcs.mylog.debug', 'mylog.debug', (['"""Creating Particle Union \'nbody\'"""'], {}), '("Creating Particle Union \'nbody\'")\n', (19825, 19860), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((26292, 26325), 'yt.data_objects.particle_filters.filter_registry.get', 'filter_registry.get', (['filter', 'None'], {}), '(filter, None)\n', (26311, 26325), False, 'from yt.data_objects.particle_filters import filter_registry\n'), ((31694, 31722), 'yt.data_objects.data_containers.data_object_registry.items', 'data_object_registry.items', ([], {}), '()\n', (31720, 31722), False, 'from yt.data_objects.data_containers import data_object_registry\n'), ((36642, 36660), 'yt.units.yt_array.YTArray', 'YTArray', (['left_edge'], {}), '(left_edge)\n', (36649, 36660), False, 'from yt.units.yt_array import YTArray, YTQuantity\n'), ((36686, 36705), 'yt.units.yt_array.YTArray', 'YTArray', (['right_edge'], {}), '(right_edge)\n', (36693, 36705), False, 'from yt.units.yt_array import YTArray, YTQuantity\n'), ((42579, 42623), 'yt.funcs.setdefaultattr', 'setdefaultattr', (['self', '"""omega_radiation"""', '(0.0)'], {}), "(self, 'omega_radiation', 0.0)\n", (42593, 42623), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((42654, 42864), 'yt.utilities.cosmology.Cosmology', 'Cosmology', ([], {'hubble_constant': 'self.hubble_constant', 'omega_matter': 'self.omega_matter', 'omega_lambda': 'self.omega_lambda', 'omega_radiation': 'self.omega_radiation', 'use_dark_factor': 'use_dark_factor', 'w_0': 'w_0', 'w_a': 'w_a'}), '(hubble_constant=self.hubble_constant, omega_matter=self.\n omega_matter, omega_lambda=self.omega_lambda, omega_radiation=self.\n omega_radiation, use_dark_factor=use_dark_factor, w_0=w_0, w_a=w_a)\n', (42663, 42864), False, 'from yt.utilities.cosmology import Cosmology\n'), ((51090, 51181), 'yt.funcs.mylog.error', 'mylog.error', (["('Field %s already exists. To override use ' + 'force_override=True.')", 'name'], {}), "('Field %s already exists. To override use ' +\n 'force_override=True.', name)\n", (51101, 51181), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((51716, 51833), 'warnings.warn', 'warnings.warn', (['"""Because \'sampling_type\' not specified, yt will assume a cell \'sampling_type\'"""'], {'stacklevel': '(2)'}), '(\n "Because \'sampling_type\' not specified, yt will assume a cell \'sampling_type\'"\n , stacklevel=2)\n', (51729, 51833), False, 'import warnings\n'), ((64403, 64424), 'yt.funcs.iterable', 'iterable', (['index_order'], {}), '(index_order)\n', (64411, 64424), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((3810, 3855), 'functools.partial', 'functools.partial', (['_wrap_display_ytarray', 'ret'], {}), '(_wrap_display_ytarray, ret)\n', (3827, 3855), False, 'import functools\n'), ((12581, 12613), 'os.walk', 'os.walk', (['self.parameter_filename'], {}), '(self.parameter_filename)\n', (12588, 12613), False, 'import os\n'), ((12782, 12821), 'os.path.isfile', 'os.path.isfile', (['self.parameter_filename'], {}), '(self.parameter_filename)\n', (12796, 12821), False, 'import os\n'), ((18078, 18136), 'yt.funcs.mylog.error', 'mylog.error', (['"""Missing %s in parameter file definition!"""', 'a'], {}), "('Missing %s in parameter file definition!', a)\n", (18089, 18136), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((18746, 18788), 'yt.funcs.mylog.info', 'mylog.info', (['"""Parameters: %-25s = %s"""', 'a', 'v'], {}), "('Parameters: %-25s = %s', a, v)\n", (18756, 18788), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((19690, 19754), 'yt.funcs.mylog.debug', 'mylog.debug', (['"""zero common fields: skipping particle union \'all\'"""'], {}), '("zero common fields: skipping particle union \'all\'")\n', (19701, 19754), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((20335, 20371), 'yt.data_objects.particle_unions.ParticleUnion', 'ParticleUnion', (['"""nbody"""', 'nbody_ptypes'], {}), "('nbody', nbody_ptypes)\n", (20348, 20371), False, 'from yt.data_objects.particle_unions import ParticleUnion\n'), ((32309, 32328), 'weakref.proxy', 'weakref.proxy', (['self'], {}), '(self)\n', (32322, 32328), False, 'import weakref\n'), ((41764, 41806), 'yt.units.unit_object.Unit', 'Unit', (['my_unit'], {'registry': 'self.unit_registry'}), '(my_unit, registry=self.unit_registry)\n', (41768, 41806), False, 'from yt.units.unit_object import Unit, define_unit\n'), ((45980, 46042), 'yt.funcs.mylog.info', 'mylog.info', (['"""Overriding %s_unit: %g %s."""', 'unit', 'val[0]', 'val[1]'], {}), "('Overriding %s_unit: %g %s.', unit, val[0], val[1])\n", (45990, 46042), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((55947, 56014), 'yt.funcs.mylog.warning', 'mylog.warning', (["('The deposited field %s already exists' % field_name)"], {}), "('The deposited field %s already exists' % field_name)\n", (55960, 56014), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((56525, 56548), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['f'], {}), '(f)\n', (56545, 56548), True, 'import numpy as np\n'), ((9162, 9194), 'os.stat', 'os.stat', (['self.parameter_filename'], {}), '(self.parameter_filename)\n', (9169, 9194), False, 'import os\n'), ((16543, 16558), 'numpy.arange', 'np.arange', (['OOMs'], {}), '(OOMs)\n', (16552, 16558), True, 'import numpy as np\n'), ((18605, 18663), 'yt.funcs.mylog.error', 'mylog.error', (['"""Missing %s in parameter file definition!"""', 'a'], {}), "('Missing %s in parameter file definition!', a)\n", (18616, 18663), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((20479, 20545), 'yt.funcs.mylog.debug', 'mylog.debug', (['"""zero common fields, skipping particle union \'nbody\'"""'], {}), '("zero common fields, skipping particle union \'nbody\'")\n', (20490, 20545), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((27126, 27216), 'yt.funcs.mylog.info', 'mylog.info', (['"""Added filter dependency \'%s\' for \'%s\'"""', 'filter.filtered_type', 'filter.name'], {}), '("Added filter dependency \'%s\' for \'%s\'", filter.filtered_type,\n filter.name)\n', (27136, 27216), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((28368, 28533), 'yt.funcs.mylog.warning', 'mylog.warning', (['"""It appears that you are filtering on an SPH field type. It is recommended to use \'gas\' as the filtered particle type in this case instead."""'], {}), '(\n "It appears that you are filtering on an SPH field type. It is recommended to use \'gas\' as the filtered particle type in this case instead."\n )\n', (28381, 28533), False, 'from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog, set_intersection, setdefaultattr\n'), ((56752, 56763), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (56760, 56763), True, 'import numpy as np\n'), ((57005, 57022), 'yt.fields.derived_field.ValidateSpatial', 'ValidateSpatial', ([], {}), '()\n', (57020, 57022), False, 'from yt.fields.derived_field import DerivedField, ValidateSpatial\n'), ((12687, 12712), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (12699, 12712), False, 'import os\n'), ((23928, 23965), 'yt.utilities.exceptions.YTGeometryNotSupported', 'YTGeometryNotSupported', (['self.geometry'], {}), '(self.geometry)\n', (23950, 23965), False, 'from yt.utilities.exceptions import YTFieldNotFound, YTGeometryNotSupported, YTIllDefinedParticleFilter, YTObjectNotImplemented\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import generators
from __future__ import nested_scopes
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from util import INPUT_PATH, OUTPUT_PATH
from util.preprocess import load_pixels_by_patient
def find_max_mask(pixel, n=10):
ones = np.ones([n, n], dtype=np.int16)
max_sum = 0
max_i = 0
max_j = 0
for i in range(pixel.shape[0] - n):
for j in range(pixel.shape[1] - n):
mask = np.zeros(pixel.shape)
mask[i:ones.shape[0] + i, j:ones.shape[1] + j] = ones
temp = pixel.copy()
temp[mask == 0] = 0
if np.sum(temp) > max_sum:
max_i = i
max_j = j
max_sum = np.sum(temp)
return max_i, max_j, max_sum
def differentiate(pixel, threshold=700):
upper = pixel[:-1, 1:-1].copy()
lower = pixel[1:, 1:-1].copy()
left = pixel[1:-1, :-1].copy()
right = pixel[1:-1, 1:].copy()
center_minus_up = (lower - upper)[:-1, :]
center_minus_down = (upper - lower)[1:, :]
center_minus_left = (right - left)[:, :-1]
center_minus_right = (left - right)[:, 1:]
sum_pixel = center_minus_up + center_minus_down + center_minus_left + center_minus_right
sum_pixel = np.abs(sum_pixel)
sum_pixel[sum_pixel < threshold] = 0
sum_pixel[sum_pixel >= threshold] = 1
return sum_pixel
def multi_detect_with_mask(patients):
for patient in patients:
out_file = open(OUTPUT_PATH + '/' + patient + '_mask.csv', 'w')
out_file.write('patient_id,pixel_id,max_mask,i,j\n'.encode('utf-8'))
out_file.flush()
pixels = load_pixels_by_patient(patient)
for i in range(0, len(pixels)):
print(i)
max_i, max_j, max_sum = detect_with_mask(pixels[i])
out_file.write('%s,%s,%s,%s,%s\n'.encode('utf-8') % (patient, i, max_sum, max_i, max_j))
out_file.flush()
out_file.close()
def detect_with_mask(pixel):
n = 10
sum_pixel = differentiate(pixel)
max_i, max_j, max_sum = find_max_mask(sum_pixel, n)
return max_i, max_j, max_sum # should be n * n if all pixels under the mask is turned on
def load_diffs_by_patient(patient):
"""
Load the diffs of pixels (np array) saved for this patient, if not found, try to construct the np array and save it
:param patient: patient id
:return:
"""
path = INPUT_PATH + '/' + patient
if not os.path.exists(path + '/' + patient + '_diff.npy'):
pixels = load_pixels_by_patient(patient)
diffs = []
for pixel in pixels:
diffs.append(differentiate(pixel))
diffs = np.array(diffs)
np.save(path + '/' + patient + '_diff', diffs)
else:
diffs = np.load(path + '/' + patient + '_diff.npy')
return diffs
def multi_load_diffs_by_patient(patients):
for patient in patients:
diffs = load_diffs_by_patient(patient)
print(diffs.shape)
def plot_mask(pixel, patient="Unknown", i=0):
"""
Draw 4 subplots, explore 2-Means
:param pixel: np array
:param patient: patient id for output
:param i: pixel id for output
"""
fig, axs = plt.subplots(2, 2)
for ax_row in axs:
for _ax in ax_row:
_ax.axis('off')
ax = axs[0]
ax[0].set_title('Original')
cax = ax[0].imshow(pixel)
fig.colorbar(cax, ax=ax[0])
my_min = np.min(pixel)
pixel[pixel == my_min] = -1000 # This is to cut off the margin left by the machine
middle = np.reshape(pixel[100:400, 100:400], [-1, 1])
k_means = KMeans(n_clusters=2).fit(middle)
threshold = np.mean(k_means.cluster_centers_)
ax[1].set_title('Apply 2-Means after pulling up')
cax = ax[1].imshow(np.where(pixel < threshold, [0], [1]))
fig.colorbar(cax, ax=ax[1])
ax = axs[1]
ax[0].set_title('Diff')
diff = differentiate(pixel, 700)
cax = ax[0].imshow(diff, cmap="Paired")
fig.colorbar(cax, ax=ax[0])
ax[1].set_title('Threshold diff')
diff[(pixel < threshold)[1:-1, 1:-1]] = 0
cax = ax[1].imshow(diff, cmap="Paired")
fig.colorbar(cax, ax=ax[1])
fig.savefig(OUTPUT_PATH + '/%s_%s' % (patient, i), dpi=400)
| [
"numpy.load",
"numpy.save",
"numpy.abs",
"numpy.sum",
"sklearn.cluster.KMeans",
"os.path.exists",
"numpy.ones",
"numpy.zeros",
"numpy.min",
"numpy.mean",
"numpy.array",
"numpy.reshape",
"numpy.where",
"util.preprocess.load_pixels_by_patient",
"matplotlib.pyplot.subplots"
] | [((518, 549), 'numpy.ones', 'np.ones', (['[n, n]'], {'dtype': 'np.int16'}), '([n, n], dtype=np.int16)\n', (525, 549), True, 'import numpy as np\n'), ((1492, 1509), 'numpy.abs', 'np.abs', (['sum_pixel'], {}), '(sum_pixel)\n', (1498, 1509), True, 'import numpy as np\n'), ((3419, 3437), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (3431, 3437), True, 'import matplotlib.pyplot as plt\n'), ((3642, 3655), 'numpy.min', 'np.min', (['pixel'], {}), '(pixel)\n', (3648, 3655), True, 'import numpy as np\n'), ((3757, 3801), 'numpy.reshape', 'np.reshape', (['pixel[100:400, 100:400]', '[-1, 1]'], {}), '(pixel[100:400, 100:400], [-1, 1])\n', (3767, 3801), True, 'import numpy as np\n'), ((3865, 3898), 'numpy.mean', 'np.mean', (['k_means.cluster_centers_'], {}), '(k_means.cluster_centers_)\n', (3872, 3898), True, 'import numpy as np\n'), ((1874, 1905), 'util.preprocess.load_pixels_by_patient', 'load_pixels_by_patient', (['patient'], {}), '(patient)\n', (1896, 1905), False, 'from util.preprocess import load_pixels_by_patient\n'), ((2682, 2732), 'os.path.exists', 'os.path.exists', (["(path + '/' + patient + '_diff.npy')"], {}), "(path + '/' + patient + '_diff.npy')\n", (2696, 2732), False, 'import os\n'), ((2751, 2782), 'util.preprocess.load_pixels_by_patient', 'load_pixels_by_patient', (['patient'], {}), '(patient)\n', (2773, 2782), False, 'from util.preprocess import load_pixels_by_patient\n'), ((2894, 2909), 'numpy.array', 'np.array', (['diffs'], {}), '(diffs)\n', (2902, 2909), True, 'import numpy as np\n'), ((2918, 2964), 'numpy.save', 'np.save', (["(path + '/' + patient + '_diff')", 'diffs'], {}), "(path + '/' + patient + '_diff', diffs)\n", (2925, 2964), True, 'import numpy as np\n'), ((2991, 3034), 'numpy.load', 'np.load', (["(path + '/' + patient + '_diff.npy')"], {}), "(path + '/' + patient + '_diff.npy')\n", (2998, 3034), True, 'import numpy as np\n'), ((3976, 4013), 'numpy.where', 'np.where', (['(pixel < threshold)', '[0]', '[1]'], {}), '(pixel < threshold, [0], [1])\n', (3984, 4013), True, 'import numpy as np\n'), ((697, 718), 'numpy.zeros', 'np.zeros', (['pixel.shape'], {}), '(pixel.shape)\n', (705, 718), True, 'import numpy as np\n'), ((3816, 3836), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(2)'}), '(n_clusters=2)\n', (3822, 3836), False, 'from sklearn.cluster import KMeans\n'), ((864, 876), 'numpy.sum', 'np.sum', (['temp'], {}), '(temp)\n', (870, 876), True, 'import numpy as np\n'), ((966, 978), 'numpy.sum', 'np.sum', (['temp'], {}), '(temp)\n', (972, 978), True, 'import numpy as np\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import six
import logging
from collections import defaultdict
import paddle
from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table
from paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program, device_guard
from ..fluid import framework
from ..fluid import layers
from ..fluid import unique_name
from ..fluid.backward import append_backward, _some_in_set_, _append_grad_suffix_, _get_no_grad_set_name
from ..fluid.clip import GradientClipBase, GradientClipByNorm, error_clip_callback, append_gradient_clip_ops
from ..fluid.framework import program_guard, Parameter
from ..fluid.initializer import Constant
from ..fluid.layer_helper import LayerHelper
from ..fluid.layers import ops
from ..fluid.dygraph import base as imperative_base
from ..fluid.dygraph import no_grad
from paddle.fluid import core
from paddle.fluid.layers import tensor
from functools import reduce
from ..fluid.wrapped_decorator import signature_safe_contextmanager
from .. import compat as cpt
from .lr import LRScheduler
import copy
from paddle import _C_ops
from paddle.fluid.framework import _in_legacy_dygraph, _in_eager_without_dygraph_check
__all__ = []
class Optimizer(object):
r"""Optimizer Base class.
Define the common interface of an optimizer.
User should not use this class directly,
but need to use one of it's implementation.
Args:
learning_rate (float|LRScheduler): The learning rate used to update ``Parameter``.
It can be a float value or any subclass of ``LRScheduler`` .
parameters (list|tuple, optional): List/Tuple of ``Tensor`` names to update to minimize ``loss``. \
This parameter is required in dygraph mode. And you can specify different options for \
different parameter groups such as the learning rate, weight decay, etc, \
then the parameters are list of dict. Note that the learning_rate in paramter groups \
represents the scale of base learning_rate. \
The default value is None in static mode, at this time all parameters will be updated.
weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \
It canbe a float value as coeff of L2 regularization or \
:ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`.
If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \
the regularization setting here in optimizer will be ignored for this parameter. \
Otherwise, the regularization setting here in optimizer will take effect. \
Default None, meaning there is no regularization.
grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \
some derived class of ``GradientClipBase`` . There are three cliping strategies \
( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , \
:ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
name (str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
The default value is None.
Returns:
Base class for optimizer.
Examples:
.. code-block:: python
#Take the subclass adam as an example
import paddle
linear = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear(inp)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters())
loss.backward()
adam.step()
adam.clear_grad()
#Take the subclass sgd as an example
#optimize parameters in linear_1 and linear2 in different options.
#Note that the learning_rate of linear_2 is 0.01.
linear_1 = paddle.nn.Linear(10, 10)
linear_2 = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear_1(inp)
out = linear_2(out)
loss = paddle.mean(out)
sgd = paddle.optimizer.SGD(
learning_rate=0.1,
parameters=[{
'params': linear_1.parameters()
}, {
'params': linear_2.parameters(),
'weight_decay': 0.001,
'learning_rate': 0.1
}],
weight_decay=0.01)
loss.backward()
sgd.step()
sgd.clear_grad()
"""
@imperative_base.no_grad
def __init__(self,
learning_rate,
parameters=None,
weight_decay=None,
grad_clip=None,
name=None):
if parameters is not None:
# paddle.Tensor is also iterable, so here we don't check whether
# the input is iterable, if the input is paddle.Tensor, the
# list(paddle.Tensor) will be a error value
if isinstance(parameters, (paddle.Tensor, core.eager.Tensor)):
raise TypeError(
"`parameters` argument given to the optimizer should be "
"an iterable of paddle Tensors, but got argument type is `{}`.".
format(type(parameters)))
if isinstance(parameters, dict):
raise TypeError(
"`parameters` argument should not get dict type, "
"if parameter groups is needed, please set `parameters`"
" as list of dict")
self._parameter_list = list(parameters)
else:
self._parameter_list = None
self._name = name
if framework._non_static_mode():
if self._parameter_list is None:
raise AttributeError(
"parameters argument given to the Optimizer should not be None in dygraph mode."
)
if weight_decay is not None:
if not isinstance(self._parameter_list[0], dict):
for param in self._parameter_list:
if hasattr(
param,
'regularizer') and param.regularizer is not None:
logging.info(
"If regularizer of a Parameter has been set by 'paddle.ParamAttr' or 'static.WeightNormParamAttr' already. "
"The weight_decay[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
% weight_decay.__str__())
break
if not isinstance(learning_rate, (float, LRScheduler)):
raise TypeError(
"learning rate should be float or LRScheduler, got %s here" %
type(learning_rate))
if grad_clip is not None:
if not isinstance(grad_clip, GradientClipBase):
raise TypeError(
"'grad_clip' should be an instance of GradientClipBase's derived class"
)
if isinstance(weight_decay, float):
from ..fluid.regularizer import L2Decay
self.regularization = L2Decay(weight_decay)
else:
self.regularization = weight_decay
self._grad_clip = grad_clip
self._learning_rate = learning_rate
self._dtype = None
# Infer the dtype form parameter
if self._parameter_list:
if isinstance(self._parameter_list[0], dict):
for param_group in self._parameter_list:
assert 'params' in param_group, \
'params should be set in parameters if parameter groups are optimized in different options'
self._dtype = self._parameter_list[0]['params'][0].dtype
else:
self._dtype = self._parameter_list[0].dtype
# each program should have a independent learning rate
# program -> tensor(learning_rate)
self._learning_rate_map = dict()
# Dictionary of accumulators. Some optimizer subclasses need to
# allocate and manage extra tensors associated with the parameters
# to train. These tensors are called accumulators.
# {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
self._accumulators = defaultdict(lambda: dict())
self.helper = None
self._opti_name_list = []
self._accumulators_holder = {}
self._param_device_map = dict()
self.clear_gradients = self.clear_grad
self._default_dict = {
'weight_decay': self.regularization,
'grad_clip': self._grad_clip
}
self._param_groups = []
if self._parameter_list and isinstance(self._parameter_list[0], dict):
for param_group in self._parameter_list:
self._add_param_group(param_group.copy())
else:
self._param_groups = self._parameter_list
# NOTE: Multi Tensor: Pass in all parameters and gradients to the op kernel of the Optimizer at one time for updating for dygraph mode.
# Optimizer support list: [ paddle.optimizer.Momentum, paddle.optimizer.Adam].
self._use_multi_tensor = None
self._param_dict = {'FP32_LODTensor': [], 'FP16_LODTensor': []}
self._auxiliary_vars = {}
def _set_auxiliary_var(self, key, val):
self._auxiliary_vars[key] = val
def _get_auxiliary_var(self, key):
return self._auxiliary_vars.get(key, None)
@framework.dygraph_only
def state_dict(self):
'''
Get state dict information from optimizer. It contain all the tensor used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be include in state dict.
If the optimizer never be called(minimize function), the state_dict is empty.
Args:
None
Returns:
state_dict(dict) : dict contains all the Tensor used by optimizer
Examples:
.. code-block:: python
import paddle
emb = paddle.nn.Embedding(10, 10)
adam = paddle.optimizer.Adam(0.001, parameters=emb.parameters())
state_dict = adam.state_dict()
'''
state_dict = {}
for k, v in self._accumulators.items():
for para_name, var_tmp in v.items():
state_dict[var_tmp.name] = var_tmp
# if has master weight and then save master weight
if hasattr(self, "_master_weights"):
if len(self._master_weights) != 0:
state_dict["master_weights"] = self._master_weights
# global step if use lr decay
if isinstance(self._learning_rate, LRScheduler):
state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
return state_dict
@framework.dygraph_only
def set_state_dict(self, state_dict):
'''
Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be changed.
Args:
state_dict(dict) : Dict contains all the Tensor needed by optimizer
Return:
None
Examples:
.. code-block:: python
import paddle
emb = paddle.nn.Embedding(10, 10)
layer_state_dict = emb.state_dict()
paddle.save(layer_state_dict, "emb.pdparams")
scheduler = paddle.optimizer.lr.NoamDecay(
d_model=0.01, warmup_steps=100, verbose=True)
adam = paddle.optimizer.Adam(
learning_rate=scheduler,
parameters=emb.parameters())
opt_state_dict = adam.state_dict()
paddle.save(opt_state_dict, "adam.pdopt")
opti_state_dict = paddle.load("adam.pdopt")
adam.set_state_dict(opti_state_dict)
'''
if isinstance(self._learning_rate, LRScheduler):
self._learning_rate.set_dict(state_dict["LR_Scheduler"])
if isinstance(self._learning_rate, LRScheduler):
self._learning_rate.set_state_dict(state_dict["LR_Scheduler"])
# NOTE: exclude learning rate scheduler's state from
# _accumulators_holder.
state_dict = state_dict.copy()
if "LR_Scheduler" in state_dict:
state_dict.pop("LR_Scheduler")
if "master_weights" in state_dict:
if hasattr(self, "_master_weights"):
self._master_weights = state_dict["master_weights"]
state_dict.pop("master_weights")
self._accumulators_holder = state_dict
for k, v in self._accumulators.items():
for para_name, var_tmp in v.items():
assert var_tmp.name in state_dict, \
"optimizer Tensor {} not found".format( var_tmp.name )
var = var_tmp.value()
tensor = var.get_tensor()
model_np = np.array(tensor)
load_para = state_dict[var_tmp.name]
if isinstance(load_para, Variable):
load_para_np = load_para.numpy()
elif isinstance(load_para, core.VarBase):
load_para_np = load_para.numpy()
elif isinstance(load_para, np.ndarray):
load_para_np = load_para
else:
raise RuntimeError("State dict type {} not supprt".format(
str(type(load_para))))
assert model_np.shape == load_para_np.shape, \
"Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format(
model_np.name, model_np.shape, load_para_np.shape)
assert model_np.dtype == load_para_np.dtype, \
"Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {} but load tensor with dtype {}".format(
model_np.name, model_np.dtype, load_para_np.dtype)
tensor.set(load_para_np, framework._current_expected_place())
def get_opti_var_name_list(self):
return self._opti_name_list
def _create_global_learning_rate(self):
if isinstance(self._learning_rate, LRScheduler):
lr_var = self._global_learning_rate()
# only create global lr_var once
if not isinstance(lr_var, framework.Variable):
lr_name = unique_name.generate('learning_rate')
self._learning_rate._var_name = lr_name
lr_var = self.helper.create_global_variable(
name=lr_name,
shape=[1],
persistable=True,
stop_gradient=True,
dtype=paddle.get_default_dtype()
if self._dtype is None else self._dtype)
main_prog = framework.default_main_program()
main_prog.lr_sheduler = self._learning_rate
main_prog.lr_var = lr_var
self._learning_rate_map[framework.default_main_program(
)] = lr_var
lr_value = float(self._learning_rate())
self.helper.set_variable_initializer(
lr_var, initializer=Constant(value=lr_value))
elif isinstance(self._learning_rate, float):
# only create global lr_var once
lr = self._global_learning_rate()
if isinstance(lr, framework.Variable):
return
else:
self._learning_rate_map[framework.default_main_program(
)] = layers.create_global_var(
name=unique_name.generate("learning_rate"),
shape=[1],
value=float(self._learning_rate),
dtype=paddle.get_default_dtype()
if self._dtype is None else self._dtype,
persistable=True)
@framework.dygraph_only
def set_lr(self, value):
"""
:api_attr: imperative
Set the value of the learning rate manually in the optimizer. If the optimizer use LRScheduler,
this API cannot be invoked, because it will lead to conflict.
Args:
value (float): the value of learning rate
Returns:
None
Examples:
.. code-block:: python
import paddle
linear = paddle.nn.Linear(10, 10)
adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters())
# set learning rate manually by python float value
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
lr = adam.get_lr()
print("current lr is {}".format(lr))
# Print:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
"""
if not isinstance(value, (int, float)):
raise TypeError(
"The type of 'value' in optimizer.set_lr must be float, but received %s."
% (type(value)))
if isinstance(self._learning_rate, LRScheduler):
raise RuntimeError(
"optimizer's learning rate can't be LRScheduler when invoke this API, because this will lead to conflict."
)
self._learning_rate = float(value)
current_lr = self._global_learning_rate()
if current_lr is not None:
if framework._non_static_mode():
_C_ops.fill_constant(current_lr, 'value',
float(value), 'dtype', current_lr.dtype,
'shape', list(current_lr.shape))
else:
global_block = framework.default_main_program().global_block()
global_block.append_op(
type='fill_constant',
outputs={'Out': [current_lr]},
attrs={
'dtype': current_lr.dtype,
'shape': list(current_lr.shape),
'value': float(value)
},
stop_gradient=True)
def get_lr(self):
"""
Get current learning rate of optimizer.
If 'LRScheduler' is not used, the return value is all the same.
If 'LRScheduler' is used, the return value is the current scheduled learing rete.
Returns:
float: The current learning rate of optimizer.
Examples:
.. code-block:: python
# train on default dynamic graph mode
import paddle
import numpy as np
emb = paddle.nn.Embedding(10, 3)
## example1: LRScheduler is not used, return the same value is all the same
adam = paddle.optimizer.Adam(0.01, parameters = emb.parameters())
for batch in range(10):
input = paddle.randint(low=0, high=5, shape=[5])
out = emb(input)
out.backward()
print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.01
adam.step()
## example2: StepDecay is used, return the scheduled learning rate
scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
adam = paddle.optimizer.Adam(scheduler, parameters = emb.parameters())
for batch in range(10):
input = paddle.randint(low=0, high=5, shape=[5])
out = emb(input)
out.backward()
print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.5->0.05...
adam.step()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 10])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
adam = paddle.optimizer.Adam(learning_rate=scheduler)
adam.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for batch in range(10):
print("Learning rate of step{}: {}", adam.get_lr()) # 0.5->0.05->0.005...
out = exe.run(main_prog, feed={'x': np.random.randn(3, 10).astype('float32')})
scheduler.step()
"""
if isinstance(self._learning_rate, float):
return self._learning_rate
else:
return self._learning_rate()
def _global_learning_rate(self, program=None):
"""
get global decayed learning rate
:return:
"""
if program is None:
program = framework.default_main_program()
return self._learning_rate_map.get(program, None)
def _append_optimize_op(self, block, param_and_grad):
""" append optimize operator to block and return all the added optimize_op
"""
raise NotImplementedError(
"Class \"Optimizer\" connot be used directly as an optimizer, please use its subclasses such as \"Adam\""
)
def _create_param_lr(self, param_and_grad):
# create learning rate tensor for every parameter
param = param_and_grad[0]
if hasattr(param, 'optimize_attr'):
param_lr = param.optimize_attr['learning_rate']
if type(param_lr) == Variable:
return param_lr
else:
if param_lr == 1.0:
return self._global_learning_rate()
else:
with default_main_program()._lr_schedule_guard(
is_with_opt=True), framework.name_scope(
'scale_with_param_lr'):
return self._global_learning_rate() * param_lr
else:
return self._global_learning_rate()
def _create_accumulators(self, block, parameters):
"""Create all accumulators needed by the parameters
Args:
block: the block in which the loss tensor is present
parameters: list of parameter tensors for the optimizer
"""
pass
def _finish_update(self, block, parameters_and_grads):
"""Finish any custom updates needed
before completing an optimization step
Args:
block: the block in which the loss tensor is present
parameters: list of parameter tensors for the optimizer
Returns:
None
"""
pass
def _add_accumulator(self,
name,
param,
dtype=None,
fill_value=0.0,
shape=None,
type=None,
device=None):
"""Utility function to add an accumulator for a parameter
Args:
block: the block in which the loss tensor is present
name: name of the accumulator
param: parameter tensor for which accumulator is to be added
dtype: data type of the accumulator tensor
fill_value: value to initialize the accumulator tensor
"""
if self._name is not None:
name = self._name + "_" + name
if (name in self._accumulators and
param.name in self._accumulators[name]):
if framework._non_static_mode():
return self._accumulators[name][param.name]
raise Exception("Accumulator {} already exists for parameter {}".
format(name, param.name))
if shape == None:
shape = param.shape
assert isinstance(self.helper, LayerHelper)
var_name = param.name + "_" + name
var_name = unique_name.generate(var_name)
self._opti_name_list.append(var_name)
var = self.helper.create_global_variable(
name=var_name,
persistable=True,
dtype=dtype or param.dtype,
type=core.VarDesc.VarType.LOD_TENSOR
if framework._in_eager_without_dygraph_check() else
(param.type if type is None else type),
shape=shape,
belong_to_optimizer=True)
if device is None:
device = self._get_device_for_param(param.name)
with device_guard(device):
self.helper.set_variable_initializer(
var, initializer=Constant(value=float(fill_value)))
if framework._non_static_mode():
if len(self._accumulators_holder) > 0:
assert var_name in self._accumulators_holder, \
"Optimizer set error, {} should in state dict".format( var_name )
var.set_value(self._accumulators_holder[var_name])
self._accumulators[name][param.name] = var
return var
def _get_accumulator(self, name, param):
"""Utility function to fetch an accumulator for a parameter
Args:
name: name of the accumulator
param: parameter tensor for which accumulator is to be fetched
Returns:
accumulator tensor for the parameter
"""
if self._name is not None:
name = self._name + "_" + name
if (name not in self._accumulators or
param.name not in self._accumulators[name]):
raise Exception("Accumulator {} does not exist for parameter {}".
format(name, param.name))
return self._accumulators[name][param.name]
def _update_param_device_map(self, parameters_and_grads, target_block):
for param_and_grad in parameters_and_grads:
if param_and_grad[0].stop_gradient is False:
param_name = param_and_grad[0].name
ops = target_block.ops
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName(
)
for op in ops:
input_arg_names = op.input_arg_names
if param_name in input_arg_names:
self._param_device_map[param_name] = op.attr(
device_attr_name)
break
def _get_device_for_param(self, param_name):
device = None
if param_name in self._param_device_map:
device = self._param_device_map[param_name]
return device
def _create_optimization_pass(self, parameters_and_grads):
"""Add optimization operators to update gradients to tensors.
Args:
parameters_and_grads(list(tuple(Tensor, Tensor))):
a list of (tensor, gradient) pair to update.
Returns:
return_op_list: a list of operators that will complete one step of
optimization. This will include parameter update ops, global step
update ops and any other custom ops required by subclasses to manage
their internal state.
"""
# This is a default implementation of create_optimization_pass that
# can be shared by most optimizers. This implementation assumes that
# the subclass will implement the _append_optimize_op method and the
# _initialize_tensors method. The subclass can extend the
# _create_accumulators method if it needs to create accumulators
# for parameters and extend _finish_update method to add custom ops.
# Allways called under program_guard use global block as loss block
# But if current block is in control flow, append optimize op in the
# grad block of current block
global_block = framework.default_main_program().global_block()
target_block = global_block
current_block = framework.default_main_program().current_block()
if current_block.idx != global_block.idx:
assert current_block.backward_block_idx != -1, \
"current block is not global_block, but it doesn't have backward block."
target_block = framework.default_main_program().blocks[
current_block.backward_block_idx]
start = len(target_block.ops)
self.helper = LayerHelper(self.__class__.__name__)
self._create_global_learning_rate()
# NOTE: Multi Tensor support [ Momentum, Adam ] for dygraph mode
if self._use_multi_tensor and self.__class__.__name__ in [
'Momentum', 'Adam'
]:
if len(self._param_dict['FP32_LODTensor']) == 0 and len(
self._param_dict['FP16_LODTensor']) == 0:
if isinstance(parameters_and_grads, list):
self._multi_tensor_init(target_block, [
p[0] for p in parameters_and_grads
if not p[0].stop_gradient
])
else:
self._update_param_group(parameters_and_grads)
self._multi_tensor_init(target_block, [
p[0] for p in parameters_and_grads['params']
if not p[0].stop_gradient
])
if framework._non_static_mode():
self._append_optimize_multi_tensor_op(target_block,
parameters_and_grads)
else:
self._update_param_device_map(parameters_and_grads,
target_block)
# NOTE: Multi Tensor requires all parameters to be in the same device and program.
# param_grad_list = [p_0,g_0,p_1,g_1,....]
param_grad_list = []
for param_and_grad in parameters_and_grads:
if not param_and_grad[0].stop_gradient and param_and_grad[
1] is not None:
param_grad_list.append(param_and_grad[0])
param_grad_list.append(param_and_grad[1])
with param_grad_list[0].block.program._optimized_guard(
param_grad_list), name_scope("optimizer"):
device = self._get_device_for_param(param_grad_list[0].name)
with device_guard(device):
self._append_optimize_multi_tensor_op(
target_block, parameters_and_grads)
else:
if not framework._non_static_mode():
params_grads_device_map = parameters_and_grads[
'params'] if isinstance(parameters_and_grads,
dict) else parameters_and_grads
self._update_param_device_map(params_grads_device_map,
target_block)
if isinstance(parameters_and_grads, list):
self._create_accumulators(target_block, [
p[0] for p in parameters_and_grads if not p[0].stop_gradient
])
else:
params_acc_dict = parameters_and_grads.copy()
params_acc_dict['params'] = [
p[0] for p in params_acc_dict['params']
if not p[0].stop_gradient
]
self._create_accumulators(target_block, params_acc_dict)
if framework._non_static_mode():
if isinstance(parameters_and_grads, list):
for param_and_grad in parameters_and_grads:
if param_and_grad[1] is None:
continue
if param_and_grad[0].stop_gradient is False:
self._append_optimize_op(target_block,
param_and_grad)
else:
for param_and_grad in parameters_and_grads['params']:
if param_and_grad[1] is None:
continue
if param_and_grad[0].stop_gradient is False:
param_grad_dict = dict()
param_grad_dict['params'] = param_and_grad
param_grad_dict.update({
k: v
for k, v in parameters_and_grads.items()
if k != 'params'
})
self._append_optimize_op(target_block,
param_grad_dict)
else:
for param_and_grad in parameters_and_grads:
if param_and_grad[1] is None:
continue
with param_and_grad[0].block.program._optimized_guard(
param_and_grad), name_scope("optimizer"):
if param_and_grad[0].stop_gradient is False:
device = self._get_device_for_param(param_and_grad[
0].name)
with device_guard(device):
optimize_op = self._append_optimize_op(
target_block, param_and_grad)
# Get custom finish ops for subclasses
# FIXME: Need to fix this once we figure out how to handle dependencies
self._finish_update(target_block, parameters_and_grads)
end = len(target_block.ops)
return target_block._slice_ops(start, end)
def _append_dgc_ops(self, param_and_grad):
pass
def backward(self,
loss,
startup_program=None,
parameters=None,
no_grad_set=None,
callbacks=None):
"""
The first part of ``minimize``, do auto-diff to append backward operations for
the current program.
Args:
loss (Tensor): ``loss`` tensor to run optimizations.
startup_program (Program, optional): :ref:`api_fluid_Program` for
initializing parameters in ``parameters``. The default value
is None, at this time :ref:`api_fluid_default_startup_program` will be used.
parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
to minimize ``loss``. The default value is None, at this time all parameters
will be updated.
no_grad_set (set, optional): Set of ``Tensor`` or ``Tensor.name`` that don't need
to be updated. The default value is None.
callbacks (list, optional): list of callable objects to run when appending backward
operator for one parameter. The default value is None.
Return:
list: list of (param, grad) tensor pairs, param is ``Parameter``,
grad is the gradient value corresponding to the parameter.
Examples:
.. code-block:: python
import paddle
import numpy as np
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
"""
act_no_grad_set = None
if framework._non_static_mode():
pass
else:
act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)
# Infer dtype by loss if None
if self._dtype is None:
self._dtype = loss.dtype
if framework._non_static_mode():
parameter_list = parameters if parameters \
else self._parameter_list
params_grads = []
for param in parameter_list:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
# create gradient tensor
grad_var = param._grad_ivar()
params_grads.append((param, grad_var))
else:
if callbacks is None:
callbacks = [error_clip_callback]
else:
assert (isinstance(callbacks, list))
program = loss.block.program
assert len(loss.shape) == 1 and loss.shape[0] == 1, \
"The loss.shape should be (1L,), but the current loss.shape is {}. " \
"Maybe that you should call paddle.mean to process the current loss.".format(
loss.shape)
parameter_list = parameters if parameters \
else self._parameter_list
with program_guard(program, startup_program):
params_grads = append_backward(loss, parameter_list,
act_no_grad_set, callbacks)
# Note: since we can't use all_reduce_op now,
# dgc_op should be the last op of one grad.
self._append_dgc_ops(params_grads)
return params_grads
def apply_gradients(self, params_grads):
"""
Second part of `minimize`, appending optimization operators for
given `params_grads` pairs.
Args:
params_grads (list): list of (param, grad) pair to do optimization.
Returns:
list: A list of operators appended to the current program.
Examples:
.. code-block:: python
import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
optimizer = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters())
params_grads = optimizer.backward(loss)
optimizer.apply_gradients(params_grads)
"""
params_grads = sorted(params_grads, key=lambda x: x[0].name)
# 'optimizer(grad_clip)' or 'set_gradient_clip'
if self._grad_clip is not None:
params_grads = self._grad_clip(params_grads)
else:
params_grads = append_gradient_clip_ops(params_grads)
# Add regularization if any
params_grads = self.append_regularization_ops(params_grads,
self.regularization)
optimize_ops = self._create_optimization_pass(params_grads)
return optimize_ops
def _apply_optimize(self, loss, startup_program, params_grads):
"""
Second part of `minimize`, appending optimization operators for
given `params_grads` pairs.
Args:
loss (Tensor): loss tensor to run optimizations.
startup_program (Program): startup_program for initializing parameters
in `parameters`.
params_grads (list): list of (param, grad) pair to do optimization.
Returns:
list: A list of operators appended to the current program.
"""
if framework._non_static_mode():
with program_guard(framework.default_main_program(),
framework.default_startup_program()):
if isinstance(params_grads, list):
if self._grad_clip is not None:
params_grads = self._grad_clip(params_grads)
params_grads = self.append_regularization_ops(
params_grads, self.regularization)
else:
grad_clip = params_grads['grad_clip']
if grad_clip is not None:
params_grads['params'] = grad_clip(params_grads[
'params'])
params_grads['params'] = self.append_regularization_ops(
params_grads['params'], self.regularization)
optimize_ops = self._create_optimization_pass(params_grads)
else:
program = loss.block.program
with program_guard(program, startup_program):
optimize_ops = self.apply_gradients(params_grads)
return optimize_ops
def _create_regularization_of_grad(self, param, grad, regularization=None):
""" Create and add backward regularization Operators
Function helper of append_regularization_ops.
"""
# If no gradient or no regularization is specified, then we don't need to do anything
if grad is None or ((not hasattr(param, 'regularizer') or
(hasattr(param, 'regularizer') and
param.regularizer is None)) and
regularization is None):
return grad
regularization_term = None
if hasattr(param, 'regularizer') and param.regularizer is not None:
# Add variable for regularization term in grad block
regularization_term = param.regularizer(param, grad, grad.block)
elif regularization is not None:
regularization_term = regularization(param, grad, grad.block)
assert regularization_term is not None
if framework._non_static_mode():
return _C_ops.sum([grad, regularization_term])
new_grad = grad
if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
# FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
# the grad's type and name will be changed. But the gradient's name
# is used in ParallelExecutor Reduce mode, so I add a flag for
# the new_grad here.
new_grad = grad.block.create_var(
name=grad.name + core.kNewGradSuffix(),
dtype=param.dtype,
shape=param.shape,
lod_level=param.lod_level,
type=core.VarDesc.VarType.LOD_TENSOR)
inputs = {"X": [grad, regularization_term]}
outputs = {"Out": [new_grad]}
grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
return new_grad
def append_regularization_ops(self,
parameters_and_grads,
regularization=None):
r"""Create and add backward regularization Operators
Creates and adds backward regularization operators in the BlockDesc.
This will add gradients of the regularizer function to the gradients
of the parameters and return these modified gradients. This is the
same as implementing weight decay in optimizers for regularization.
Args:
parameters_and_grads: A list of (parameters, gradients) pairs
that need to be regularized.
regularization: A global regularizer. If the parameter is not
set. It will be applied with regularizer.
Returns:
list[(Variable, Variable)]: list of (parameters, gradients) \
pair with the regularized gradient
Raises:
Exception: Unknown regularization type
"""
params_and_grads = []
if framework._non_static_mode():
for param, grad in parameters_and_grads:
new_grad = self._create_regularization_of_grad(param, grad,
regularization)
params_and_grads.append((param, new_grad))
else:
repeate_regularizer = False
with framework.name_scope('regularization'):
for param, grad in parameters_and_grads:
if not repeate_regularizer and param.regularizer is not None and regularization is not None:
repeate_regularizer = True
logging.info(
"If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
"The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
% regularization.__str__())
with param.block.program._optimized_guard([param, grad]):
new_grad = self._create_regularization_of_grad(
param, grad, regularization)
params_and_grads.append((param, new_grad))
return params_and_grads
def _get_no_grad_set(self, loss, no_grad_set=None):
no_grad_set = _get_no_grad_set_name(no_grad_set)
parameters = loss.block.program.global_block().all_parameters()
param_no_trainable = set([
param.name for param in parameters if param.stop_gradient is True
])
# If the parameter is no trainable, it should not have a gradient.
no_grad_set.update(param_no_trainable)
return no_grad_set
@framework.dygraph_only
def clear_grad(self, set_to_zero=True):
"""
Clear the gradients of all optimized parameters for model.
If not, new gradient will accumulat on previous gradient.
There are two method to clear grad: set_to_zero or delete grad.
Args:
set_to_zero (bool, optional): If set grads to zero or not, default is True.
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
"""
param_list = []
if self._parameter_list is None or not isinstance(
self._parameter_list[0], dict):
for p in self._parameter_list:
if not p.stop_gradient:
param_list.append(p)
else:
for param_group in self._param_groups:
for p in param_group['params']:
if not p.stop_gradient:
param_list.append(p)
if _in_eager_without_dygraph_check():
for p in param_list:
p.clear_gradient(set_to_zero)
else:
core.clear_gradients(param_list, set_to_zero)
@imperative_base.no_grad
def minimize(self,
loss,
startup_program=None,
parameters=None,
no_grad_set=None):
"""
Add operations to minimize ``loss`` by updating ``parameters``.
Args:
loss (Tensor): A ``Tensor`` containing the value to minimize.
startup_program (Program, optional): :ref:`api_fluid_Program` for
initializing parameters in ``parameters``. The default value
is None, at this time :ref:`api_fluid_default_startup_program` will be used.
parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
to minimize ``loss``. The default value is None, at this time all parameters
will be updated.
no_grad_set (set, optional): Set of ``Tensor`` or ``Tensor.name`` that don't need
to be updated. The default value is None.
Returns:
tuple: tuple (optimize_ops, params_grads), A list of operators appended
by minimize and a list of (param, grad) tensor pairs, param is
``Parameter``, grad is the gradient value corresponding to the parameter.
In static graph mode, the returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
indicate program pruning. If so, the program will be pruned by ``feed`` and
``fetch_list`` before run, see details in ``Executor``.
Examples:
.. code-block:: python
import paddle
linear = paddle.nn.Linear(10, 10)
input = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear(input)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=0.01)
loss.backward()
adam.minimize(loss)
adam.clear_grad()
"""
assert isinstance(loss, Variable), "The loss should be an Tensor."
parameter_list = parameters if parameters \
else self._parameter_list
params_grads = self.backward(
loss,
startup_program=startup_program,
parameters=parameter_list,
no_grad_set=no_grad_set)
optimize_ops = self._apply_optimize(
loss, startup_program=startup_program, params_grads=params_grads)
return optimize_ops, params_grads
@imperative_base.no_grad
@framework.dygraph_only
def step(self):
"""
Execute the optimizer and update parameters once.
Returns:
None
Examples:
.. code-block:: python
import paddle
import numpy as np
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
"""
if not isinstance(self._param_groups[0], dict):
params_grads = []
for param in self._param_groups:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
params_grads.append((param, grad_var))
self._apply_optimize(
loss=None, startup_program=None, params_grads=params_grads)
else:
# optimize parameters in groups
for param_group in self._param_groups:
params_grads = defaultdict(lambda: list())
for param in param_group['params']:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
params_grads['params'].append((param, grad_var))
params_grads.update(
{k: v
for k, v in param_group.items() if k != 'params'})
self._apply_optimize(
loss=None, startup_program=None, params_grads=params_grads)
def _add_param_group(self, param_group):
"""
Add a param group to parameter_list.
Args:
param_group (dict): The group of Tensors to be optimzed with
different optimization options.
"""
params = param_group['params']
if isinstance(params, Parameter):
param_group['params'] = [params]
elif isinstance(params, set):
raise TypeError(
"optimizer parameters should be in ordered collections,"
"but received set, please use list instead.")
else:
param_group['params'] = list(params)
# Update optimization options for each groups
for k, v in self._default_dict.items():
param_group.setdefault(k, v)
param_set = set()
for group in self._param_groups:
param_set.update(set(group['params']))
if not param_set.isdisjoint(set(param_group['params'])):
raise ValueError(
"some parameters appear in more than one parameter group")
for param in param_group['params']:
weight_decay = param_group['weight_decay']
if isinstance(weight_decay, float):
from ..fluid.regularizer import L2Decay
regularization = L2Decay(weight_decay)
else:
regularization = weight_decay
param.regularizer = regularization
param.optimize_attr['learning_rate'] = param_group.get(
'learning_rate', 1.)
self._param_groups.append(param_group)
def _update_param_group(self, parameters):
"""
Update the param group with new entry
Args:
parameters (dict): The extra group of Tensors to be optimzed with
different optimization options. Only used in child class.
"""
pass
@framework.dygraph_only
def _multi_tensor_init(self, target_block, parameters):
"""
All parameters used for optimizer (such as: parameters, master_weight, velocity_acc for momentum) calculations are grouped into a python list by data type (float16, float32).
This function will be overridden in the corresponding optimizer file.
Args:
target_block: the block in which the loss tensor is present
parameters: list of parameter tensors for the optimizer
"""
pass
@framework.dygraph_only
def _append_optimize_multi_tensor_op(self, target_block,
parameters_and_grads):
"""
For Multi Tensor, append optimize merged_operator to block.
"""
pass
| [
"paddle._C_ops.sum",
"paddle.fluid.core.clear_gradients",
"paddle.get_default_dtype",
"paddle.fluid.framework.device_guard",
"paddle.fluid.framework.default_main_program",
"numpy.array",
"paddle.fluid.core.kNewGradSuffix",
"paddle.fluid.core.op_proto_and_checker_maker.kOpDeviceAttrName",
"paddle.flu... | [((48866, 48899), 'paddle.fluid.framework._in_eager_without_dygraph_check', '_in_eager_without_dygraph_check', ([], {}), '()\n', (48897, 48899), False, 'from paddle.fluid.framework import _in_legacy_dygraph, _in_eager_without_dygraph_check\n'), ((26333, 26353), 'paddle.fluid.framework.device_guard', 'device_guard', (['device'], {}), '(device)\n', (26345, 26353), False, 'from paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program, device_guard\n'), ((43606, 43645), 'paddle._C_ops.sum', '_C_ops.sum', (['[grad, regularization_term]'], {}), '([grad, regularization_term])\n', (43616, 43645), False, 'from paddle import _C_ops\n'), ((49006, 49051), 'paddle.fluid.core.clear_gradients', 'core.clear_gradients', (['param_list', 'set_to_zero'], {}), '(param_list, set_to_zero)\n', (49026, 49051), False, 'from paddle.fluid import core\n'), ((14136, 14152), 'numpy.array', 'np.array', (['tensor'], {}), '(tensor)\n', (14144, 14152), True, 'import numpy as np\n'), ((27864, 27915), 'paddle.fluid.core.op_proto_and_checker_maker.kOpDeviceAttrName', 'core.op_proto_and_checker_maker.kOpDeviceAttrName', ([], {}), '()\n', (27913, 27915), False, 'from paddle.fluid import core\n'), ((32087, 32110), 'paddle.fluid.framework.name_scope', 'name_scope', (['"""optimizer"""'], {}), "('optimizer')\n", (32097, 32110), False, 'from paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program, device_guard\n'), ((32218, 32238), 'paddle.fluid.framework.device_guard', 'device_guard', (['device'], {}), '(device)\n', (32230, 32238), False, 'from paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program, device_guard\n'), ((34810, 34833), 'paddle.fluid.framework.name_scope', 'name_scope', (['"""optimizer"""'], {}), "('optimizer')\n", (34820, 34833), False, 'from paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program, device_guard\n'), ((44076, 44097), 'paddle.fluid.core.kNewGradSuffix', 'core.kNewGradSuffix', ([], {}), '()\n', (44095, 44097), False, 'from paddle.fluid import core\n'), ((16082, 16108), 'paddle.get_default_dtype', 'paddle.get_default_dtype', ([], {}), '()\n', (16106, 16108), False, 'import paddle\n'), ((17129, 17155), 'paddle.get_default_dtype', 'paddle.get_default_dtype', ([], {}), '()\n', (17153, 17155), False, 'import paddle\n'), ((23560, 23582), 'paddle.fluid.framework.default_main_program', 'default_main_program', ([], {}), '()\n', (23580, 23582), False, 'from paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program, device_guard\n'), ((35058, 35078), 'paddle.fluid.framework.device_guard', 'device_guard', (['device'], {}), '(device)\n', (35070, 35078), False, 'from paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program, device_guard\n')] |
from typing import TypeVar, Optional, Iterator, Dict, Callable, Iterable, Generic, List
from abc import ABC
from datetime import datetime
import operator, logging
import sys
import numpy as np
from hurry.filesize import size as _hurry
from psutil import virtual_memory
import pandas as pd
K = TypeVar('K')
class FacadePolicy(Generic[K]):
def should_archive(self) -> bool:
raise NotImplementedError()
def can_archive(self) -> bool:
raise NotImplementedError()
def reindex(self, items: Dict[K, pd.DataFrame]) -> None:
raise NotImplementedError()
def items(self) -> Iterator[K]:
# TODO this is not overloaded!!
raise NotImplementedError()
def accessed(self, key: K) -> None:
pass
def added(self, key: K, value: pd.DataFrame) -> None:
pass
def removed(self, key: K) -> None:
pass
class MemoryLimitingPolicy(FacadePolicy, Generic[K], ABC):
def __init__(self, max_memory_bytes: Optional[int] = None, max_fraction_available_bytes: Optional[float] = None):
self._max_memory_bytes = max_memory_bytes
self._max_fraction_available_bytes = max_fraction_available_bytes
self._total_memory_bytes = 0
self._usage_bytes = {} # type: Dict[K, int]
self._last_accessed = {} # type: Dict[K, datetime]
self._created = {} # type: Dict[K, datetime]
def can_archive(self) -> bool:
return len(self._last_accessed) > 0
def should_archive(self) -> bool:
return (
self._max_memory_bytes is not None and self._total_memory_bytes > self._max_memory_bytes
or self._max_fraction_available_bytes is not None and self._total_memory_bytes > virtual_memory().available * self._max_fraction_available_bytes
)
def reindex(self, items: Dict[K, pd.DataFrame]) -> None:
for key in set(self._last_accessed.keys()) - set(items.keys()):
if key not in items.keys():
self.removed(key)
for key, value in items.items():
self._usage_bytes[key] = sys.getsizeof(value)
self._total_memory_bytes = np.sum(self._usage_bytes.values())
def accessed(self, key: K) -> None:
self._last_accessed[key] = datetime.now()
def added(self, key: K, value: pd.DataFrame) -> None:
now = datetime.now()
self._created[key] = now
self._last_accessed[key] = now
self._usage_bytes[key] = sys.getsizeof(value)
self._total_memory_bytes += self._usage_bytes[key]
def removed(self, key: K) -> None:
self._total_memory_bytes -= self._usage_bytes[key]
del self._last_accessed[key]
del self._created[key]
del self._usage_bytes[key]
def __str__(self):
available = virtual_memory().available
return "{}(n={}, {}/{}, {}/{}={}%)".format(
type(self).__name__,
len(self._usage_bytes),
_hurry(self._total_memory_bytes),
'-' if self._max_memory_bytes is None else _hurry(self._max_memory_bytes),
_hurry(self._total_memory_bytes),
'-' if self._max_fraction_available_bytes is None else _hurry(available * self._max_fraction_available_bytes),
'-' if self._max_fraction_available_bytes is None else np.round(100 * self._total_memory_bytes / (available * self._max_fraction_available_bytes), 3)
)
def __repr__(self):
ordered = list(self.items())
ss = []
if len(ordered) > 0:
current_day = None
for k in ordered:
dt = self._last_accessed[k]
if current_day is None or current_day.date() != dt.date():
current_day = dt
ss.append('#' + current_day.strftime('%Y-%m-%d') + '...')
ss.append("{}:{}@{}".format(k, _hurry(self._usage_bytes[k]), self._last_accessed[k].strftime('%H:%M:%S')))
return "{}@{}: [{}]".format(str(self), hex(id(self)), ', '.join(ss))
class MemoryLruPolicy(MemoryLimitingPolicy, Generic[K]):
def items(self) -> Iterator[K]:
return iter([k for k, v in sorted(self._last_accessed.items(), key=operator.itemgetter(1))])
class MemoryMruPolicy(MemoryLimitingPolicy, Generic[K]):
def items(self) -> Iterator[K]:
return iter([k for k, v in reversed(sorted(self._last_accessed.items(), key=operator.itemgetter(1)))])
class DfFacade(Generic[K]):
def __init__(self, loader: Callable[[K], pd.DataFrame], policy: FacadePolicy):
self._loader = loader
self._items = {} # type: Dict[K, pd.DataFrame]
self._policy = policy
def __getitem__(self, key: K) -> pd.DataFrame:
self._policy.accessed(key)
if key in self._items:
return self._items[key]
else:
value = self._loader(key)
logging.debug("Loaded {}".format(key))
self._items[key] = value
self._policy.added(key, value)
self.archive()
return value
def __call__(self, key: K) -> pd.DataFrame:
return self[key]
def archive(self, at_least: Optional[int] = None) -> List[K]:
it = self._policy.items()
archived = []
while self._policy.can_archive() and (at_least is not None and len(archived) < at_least or self._policy.should_archive()):
key = next(it)
self._policy.removed(key)
del self._items[key]
archived.append(key)
logging.debug("Archived {} items: {}".format(len(archived), archived))
return archived
def clear(self) -> None:
it = self._policy.items()
while self._policy.can_archive():
key = next(it)
self._policy.removed(key)
del self._items[key]
def remove(self, key: K) -> None:
if key in self:
self._policy.removed(key)
del self._items[key]
def __contains__(self, key: K):
return key in self._items
def __delitem__(self, key):
self.remove(key)
def __repr__(self):
return "{}({})@{}".format(type(self).__name__, repr(self._policy), hex(id(self)))
def __str__(self):
return "{}({})".format(type(self).__name__, self._policy)
__all__ = ['FacadePolicy', 'DfFacade', 'MemoryLimitingPolicy', 'MemoryLruPolicy', 'MemoryMruPolicy']
| [
"psutil.virtual_memory",
"numpy.round",
"hurry.filesize.size",
"sys.getsizeof",
"typing.TypeVar",
"operator.itemgetter",
"datetime.datetime.now"
] | [((295, 307), 'typing.TypeVar', 'TypeVar', (['"""K"""'], {}), "('K')\n", (302, 307), False, 'from typing import TypeVar, Optional, Iterator, Dict, Callable, Iterable, Generic, List\n'), ((2031, 2045), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2043, 2045), False, 'from datetime import datetime\n'), ((2110, 2124), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2122, 2124), False, 'from datetime import datetime\n'), ((2212, 2232), 'sys.getsizeof', 'sys.getsizeof', (['value'], {}), '(value)\n', (2225, 2232), False, 'import sys\n'), ((1879, 1899), 'sys.getsizeof', 'sys.getsizeof', (['value'], {}), '(value)\n', (1892, 1899), False, 'import sys\n'), ((2496, 2512), 'psutil.virtual_memory', 'virtual_memory', ([], {}), '()\n', (2510, 2512), False, 'from psutil import virtual_memory\n'), ((2623, 2655), 'hurry.filesize.size', '_hurry', (['self._total_memory_bytes'], {}), '(self._total_memory_bytes)\n', (2629, 2655), True, 'from hurry.filesize import size as _hurry\n'), ((2738, 2770), 'hurry.filesize.size', '_hurry', (['self._total_memory_bytes'], {}), '(self._total_memory_bytes)\n', (2744, 2770), True, 'from hurry.filesize import size as _hurry\n'), ((2703, 2733), 'hurry.filesize.size', '_hurry', (['self._max_memory_bytes'], {}), '(self._max_memory_bytes)\n', (2709, 2733), True, 'from hurry.filesize import size as _hurry\n'), ((2830, 2884), 'hurry.filesize.size', '_hurry', (['(available * self._max_fraction_available_bytes)'], {}), '(available * self._max_fraction_available_bytes)\n', (2836, 2884), True, 'from hurry.filesize import size as _hurry\n'), ((2944, 3043), 'numpy.round', 'np.round', (['(100 * self._total_memory_bytes / (available * self.\n _max_fraction_available_bytes))', '(3)'], {}), '(100 * self._total_memory_bytes / (available * self.\n _max_fraction_available_bytes), 3)\n', (2952, 3043), True, 'import numpy as np\n'), ((3387, 3415), 'hurry.filesize.size', '_hurry', (['self._usage_bytes[k]'], {}), '(self._usage_bytes[k])\n', (3393, 3415), True, 'from hurry.filesize import size as _hurry\n'), ((1570, 1586), 'psutil.virtual_memory', 'virtual_memory', ([], {}), '()\n', (1584, 1586), False, 'from psutil import virtual_memory\n'), ((3695, 3717), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (3714, 3717), False, 'import operator, logging\n'), ((3890, 3912), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (3909, 3912), False, 'import operator, logging\n')] |
import os
import copy
import math
import time
import random
import numpy as np
from PIL import Image
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
def default_loader(path):
img = Image.open(path).convert('L')
return img
def default_list_reader(fileList):
imgList = []
with open(fileList, 'r') as file:
for line in file.readlines():
imgPath, label, domain = line.strip().split(' ')
imgList.append((imgPath, int(label), int(domain)))
return imgList
class ImageList(data.Dataset):
def __init__(self, root, fileList, list_reader=default_list_reader, loader=default_loader):
self.root = root
self.imgList = list_reader(fileList)
self.loader = loader
self.transform = transforms.Compose([
transforms.RandomCrop(128),
transforms.ToTensor(),
])
def __getitem__(self, index):
imgPath, target, domain = self.imgList[index]
img = self.loader(os.path.join(self.root, imgPath))
img = self.transform(img)
return {'img':img, 'label': target, 'domain_flag': domain}
def __len__(self):
return len(self.imgList)
class SeparateBatchSampler(object):
def __init__(self, real_data_idx, fake_data_idx, batch_size=128, ratio=0.5, put_back=False):
self.batch_size = batch_size
self.ratio = ratio
self.real_data_num = len(real_data_idx)
self.fake_data_num = len(fake_data_idx)
self.max_num_image = max(self.real_data_num, self.fake_data_num)
self.real_data_idx = real_data_idx
self.fake_data_idx = fake_data_idx
self.processed_idx = copy.deepcopy(self.real_data_idx)
def __len__(self):
return self.max_num_image // (int(self.batch_size * self.ratio))
def __iter__(self):
batch_size_real_data = int(math.floor(self.ratio * self.batch_size))
batch_size_fake_data = self.batch_size - batch_size_real_data
self.processed_idx = copy.deepcopy(self.real_data_idx)
rand_real_data_idx = np.random.permutation(len(self.real_data_idx) // 2)
for i in range(self.__len__()):
batch = []
idx_fake_data = random.sample(self.fake_data_idx, batch_size_fake_data // 2)
for j in range(batch_size_real_data // 2):
idx = rand_real_data_idx[(i * batch_size_real_data + j) % (self.real_data_num // 2)]
batch.append(self.processed_idx[2 * idx])
batch.append(self.processed_idx[2 * idx + 1])
for idx in idx_fake_data:
batch.append(2 * idx + self.real_data_num)
batch.append(2 * idx + 1 + self.real_data_num)
yield batch
class SeparateImageList(data.Dataset):
def __init__(self, real_data_path, real_list_path, fake_data_path, fake_total_num, ratio=0.5):
self.transform = transforms.Compose([
transforms.RandomCrop(128),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
# load real nir/vis data
real_data_list, real_data_idx = self.list_reader(real_data_path, real_list_path)
# load fake nir/vis data from noise
_idx = np.random.permutation(fake_total_num)
fake_data_list = []
fake_data_idx = []
for i in range(0, fake_total_num):
_fake_img_name = str(_idx[i] + 1) + '.jpg'
# nir_noise and vis_noise are the path of the fake data
_fake_img_nir_name = 'nir_noise/' + _fake_img_name
_fake_img_vis_name = 'vis_noise/' + _fake_img_name
_fake_img_nir_path = os.path.join(fake_data_path, _fake_img_nir_name)
_fake_img_vis_path = os.path.join(fake_data_path, _fake_img_vis_name)
fake_data_list.append((_fake_img_nir_path, -1, 0))
fake_data_list.append((_fake_img_vis_path, -1, 1))
fake_data_idx.append(i)
self.real_data_idx = real_data_idx
self.fake_data_idx = fake_data_idx
real_data_list.extend(fake_data_list)
self.all_list = real_data_list
self.ratio = ratio
print('real: {}, fake: {}, total: {}, ratio: {}\n'.format(len(self.real_data_idx), len(self.fake_data_idx), len(self.all_list), self.ratio))
def get_idx(self):
return self.real_data_idx, self.fake_data_idx
def list_reader(self, root_path, fileList):
imgList = []
imgIdx = []
img_index = 0
with open(fileList, 'r') as file:
for line in file.readlines():
img_name, label, domain = line.strip().split(' ')
img_path = os.path.join(root_path, img_name)
imgList.append((img_path, int(label), int(domain)))
imgIdx.append(img_index)
img_index += 1
return imgList, imgIdx
def loader(self, path):
img = Image.open(path).convert('L')
return img
def __getitem__(self, index):
imgPath, target, domain = self.all_list[index]
img = self.loader(imgPath)
img = self.transform(img)
return {'img': img, 'label': target, 'domain_flag': domain}
def __len__(self):
return len(self.all_list)
| [
"copy.deepcopy",
"torchvision.transforms.RandomHorizontalFlip",
"random.sample",
"math.floor",
"PIL.Image.open",
"numpy.random.permutation",
"os.path.join",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.ToTensor"
] | [((1763, 1796), 'copy.deepcopy', 'copy.deepcopy', (['self.real_data_idx'], {}), '(self.real_data_idx)\n', (1776, 1796), False, 'import copy\n'), ((2105, 2138), 'copy.deepcopy', 'copy.deepcopy', (['self.real_data_idx'], {}), '(self.real_data_idx)\n', (2118, 2138), False, 'import copy\n'), ((3370, 3407), 'numpy.random.permutation', 'np.random.permutation', (['fake_total_num'], {}), '(fake_total_num)\n', (3391, 3407), True, 'import numpy as np\n'), ((243, 259), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (253, 259), False, 'from PIL import Image\n'), ((1059, 1091), 'os.path.join', 'os.path.join', (['self.root', 'imgPath'], {}), '(self.root, imgPath)\n', (1071, 1091), False, 'import os\n'), ((1960, 2000), 'math.floor', 'math.floor', (['(self.ratio * self.batch_size)'], {}), '(self.ratio * self.batch_size)\n', (1970, 2000), False, 'import math\n'), ((2315, 2375), 'random.sample', 'random.sample', (['self.fake_data_idx', '(batch_size_fake_data // 2)'], {}), '(self.fake_data_idx, batch_size_fake_data // 2)\n', (2328, 2375), False, 'import random\n'), ((3812, 3860), 'os.path.join', 'os.path.join', (['fake_data_path', '_fake_img_nir_name'], {}), '(fake_data_path, _fake_img_nir_name)\n', (3824, 3860), False, 'import os\n'), ((3895, 3943), 'os.path.join', 'os.path.join', (['fake_data_path', '_fake_img_vis_name'], {}), '(fake_data_path, _fake_img_vis_name)\n', (3907, 3943), False, 'import os\n'), ((864, 890), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(128)'], {}), '(128)\n', (885, 890), True, 'import torchvision.transforms as transforms\n'), ((905, 926), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (924, 926), True, 'import torchvision.transforms as transforms\n'), ((3058, 3084), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(128)'], {}), '(128)\n', (3079, 3084), True, 'import torchvision.transforms as transforms\n'), ((3099, 3132), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3130, 3132), True, 'import torchvision.transforms as transforms\n'), ((3147, 3168), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3166, 3168), True, 'import torchvision.transforms as transforms\n'), ((4849, 4882), 'os.path.join', 'os.path.join', (['root_path', 'img_name'], {}), '(root_path, img_name)\n', (4861, 4882), False, 'import os\n'), ((5104, 5120), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (5114, 5120), False, 'from PIL import Image\n')] |
"""Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, lars_path_gram
from sklearn.linear_model import lasso_path
from sklearn.datasets import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features // 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 1430, 3).astype(int)
features_range = np.linspace(10, 500 , 3).astype(int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
#plt.show()
| [
"sklearn.linear_model.lasso_path",
"numpy.meshgrid",
"sklearn.linear_model.lars_path_gram",
"numpy.asarray",
"sklearn.datasets.make_regression",
"time.time",
"collections.defaultdict",
"gc.collect",
"matplotlib.pyplot.figure",
"sklearn.linear_model.lars_path",
"sys.stdout.flush",
"numpy.linspa... | [((467, 491), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (478, 491), False, 'from collections import defaultdict\n'), ((3095, 3150), 'matplotlib.pyplot.figure', 'plt.figure', (['"""scikit-learn Lasso path benchmark results"""'], {}), "('scikit-learn Lasso path benchmark results')\n", (3105, 3150), True, 'import matplotlib.pyplot as plt\n'), ((3300, 3342), 'numpy.meshgrid', 'np.meshgrid', (['samples_range', 'features_range'], {}), '(samples_range, features_range)\n', (3311, 3342), True, 'import numpy as np\n'), ((1217, 1250), 'sklearn.datasets.make_regression', 'make_regression', ([], {}), '(**dataset_kwargs)\n', (1232, 1250), False, 'from sklearn.datasets import make_regression\n'), ((1264, 1276), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1274, 1276), False, 'import gc\n'), ((1354, 1372), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1370, 1372), False, 'import sys\n'), ((1394, 1400), 'time.time', 'time', ([], {}), '()\n', (1398, 1400), False, 'from time import time\n'), ((1417, 1431), 'numpy.dot', 'np.dot', (['X.T', 'X'], {}), '(X.T, X)\n', (1423, 1431), True, 'import numpy as np\n'), ((1476, 1490), 'numpy.dot', 'np.dot', (['X.T', 'y'], {}), '(X.T, y)\n', (1482, 1490), True, 'import numpy as np\n'), ((1503, 1566), 'sklearn.linear_model.lars_path_gram', 'lars_path_gram', ([], {'Xy': 'Xy', 'Gram': 'G', 'n_samples': 'y.size', 'method': '"""lasso"""'}), "(Xy=Xy, Gram=G, n_samples=y.size, method='lasso')\n", (1517, 1566), False, 'from sklearn.linear_model import lars_path, lars_path_gram\n'), ((1711, 1723), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1721, 1723), False, 'import gc\n'), ((1804, 1822), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1820, 1822), False, 'import sys\n'), ((1844, 1850), 'time.time', 'time', ([], {}), '()\n', (1848, 1850), False, 'from time import time\n'), ((1863, 1894), 'sklearn.linear_model.lars_path', 'lars_path', (['X', 'y'], {'method': '"""lasso"""'}), "(X, y, method='lasso')\n", (1872, 1894), False, 'from sklearn.linear_model import lars_path, lars_path_gram\n'), ((2042, 2054), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2052, 2054), False, 'import gc\n'), ((2133, 2151), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2149, 2151), False, 'import sys\n'), ((2173, 2179), 'time.time', 'time', ([], {}), '()\n', (2177, 2179), False, 'from time import time\n'), ((2192, 2225), 'sklearn.linear_model.lasso_path', 'lasso_path', (['X', 'y'], {'precompute': '(True)'}), '(X, y, precompute=True)\n', (2202, 2225), False, 'from sklearn.linear_model import lasso_path\n'), ((2371, 2383), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2381, 2383), False, 'import gc\n'), ((2465, 2483), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2481, 2483), False, 'import sys\n'), ((2505, 2511), 'time.time', 'time', ([], {}), '()\n', (2509, 2511), False, 'from time import time\n'), ((2524, 2558), 'sklearn.linear_model.lasso_path', 'lasso_path', (['X', 'y'], {'precompute': '(False)'}), '(X, y, precompute=False)\n', (2534, 2558), False, 'from sklearn.linear_model import lasso_path\n'), ((2874, 2898), 'numpy.linspace', 'np.linspace', (['(10)', '(1430)', '(3)'], {}), '(10, 1430, 3)\n', (2885, 2898), True, 'import numpy as np\n'), ((2933, 2956), 'numpy.linspace', 'np.linspace', (['(10)', '(500)', '(3)'], {}), '(10, 500, 3)\n', (2944, 2956), True, 'import numpy as np\n'), ((1587, 1593), 'time.time', 'time', ([], {}), '()\n', (1591, 1593), False, 'from time import time\n'), ((1915, 1921), 'time.time', 'time', ([], {}), '()\n', (1919, 1921), False, 'from time import time\n'), ((2246, 2252), 'time.time', 'time', ([], {}), '()\n', (2250, 2252), False, 'from time import time\n'), ((2579, 2585), 'time.time', 'time', ([], {}), '()\n', (2583, 2585), False, 'from time import time\n'), ((3355, 3374), 'numpy.asarray', 'np.asarray', (['timings'], {}), '(timings)\n', (3365, 3374), True, 'import numpy as np\n')] |
import os
import sys
sys.path.append('.')
import cv2
import math
import torch
import argparse
import numpy as np
from torch.nn import functional as F
from pytorch_msssim import ssim_matlab
from model.RIFE import Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Model()
model.load_model(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train_log'))
model.eval()
model.device()
name = ['Beanbags', 'Dimetrodon', 'DogDance', 'Grove2', 'Grove3', 'Hydrangea', 'MiniCooper', 'RubberWhale', 'Urban2', 'Urban3', 'Venus', 'Walking']
IE_list = []
for i in name:
i0 = cv2.imread('other-data/{}/frame10.png'.format(i)).transpose(2, 0, 1) / 255.
i1 = cv2.imread('other-data/{}/frame11.png'.format(i)).transpose(2, 0, 1) / 255.
gt = cv2.imread('other-gt-interp/{}/frame10i11.png'.format(i))
h, w = i0.shape[1], i0.shape[2]
imgs = torch.zeros([1, 6, 480, 640])
ph = (480 - h) // 2
pw = (640 - w) // 2
imgs[:, :3, :h, :w] = torch.from_numpy(i0).unsqueeze(0).float()
imgs[:, 3:, :h, :w] = torch.from_numpy(i1).unsqueeze(0).float()
I0 = imgs[:, :3]
I2 = imgs[:, 3:]
pred = model.inference(I0, I2)
out = pred[0].cpu().numpy().transpose(1, 2, 0)
out = np.round(out[:h, :w] * 255)
IE_list.append(np.abs((out - gt * 1.0)).mean())
print(np.mean(IE_list))
| [
"sys.path.append",
"numpy.abs",
"os.path.realpath",
"model.RIFE.Model",
"numpy.mean",
"torch.cuda.is_available",
"torch.zeros",
"numpy.round",
"torch.from_numpy"
] | [((21, 41), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (36, 41), False, 'import sys\n'), ((297, 304), 'model.RIFE.Model', 'Model', ([], {}), '()\n', (302, 304), False, 'from model.RIFE import Model\n'), ((884, 913), 'torch.zeros', 'torch.zeros', (['[1, 6, 480, 640]'], {}), '([1, 6, 480, 640])\n', (895, 913), False, 'import torch\n'), ((1236, 1263), 'numpy.round', 'np.round', (['(out[:h, :w] * 255)'], {}), '(out[:h, :w] * 255)\n', (1244, 1263), True, 'import numpy as np\n'), ((250, 275), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (273, 275), False, 'import torch\n'), ((1326, 1342), 'numpy.mean', 'np.mean', (['IE_list'], {}), '(IE_list)\n', (1333, 1342), True, 'import numpy as np\n'), ((351, 377), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (367, 377), False, 'import os\n'), ((1283, 1305), 'numpy.abs', 'np.abs', (['(out - gt * 1.0)'], {}), '(out - gt * 1.0)\n', (1289, 1305), True, 'import numpy as np\n'), ((988, 1008), 'torch.from_numpy', 'torch.from_numpy', (['i0'], {}), '(i0)\n', (1004, 1008), False, 'import torch\n'), ((1056, 1076), 'torch.from_numpy', 'torch.from_numpy', (['i1'], {}), '(i1)\n', (1072, 1076), False, 'import torch\n')] |
from __future__ import annotations
import sys
sys.path += '..'
import ATL
from ATL import num
import numpy as np
import time
@ATL.func
def qform( N : size, x : num[N], A : num[N,N] ):
B[i:N,j:N] = Sum[k:N]( A[k,i] * A[k,j] )
y[i:N] = Sum[j:N]( B[i,j] * x[j] )
return Sum[i:N]( x[i] * y[i] )
DQ = qform.deriv(A=True).proj(1).simplify()
#qform = qform.simplify()
# --------------------------------------------------------------------------- #
print(DQ)
exit(0)
# --------------------------------------------------------------------------- #
# timing loop with random data
def time_for_N(N,niters=10):
x = np.asfortranarray(np.random.rand(N))
A = np.asfortranarray(np.random.rand(N,N))
dA = np.asfortranarray(np.random.rand(N,N))
scalar = np.zeros([1],order='F')
# prime the pump and cause compilation etc.
DQ(N,x,A,dA, output=scalar)
qform(N,x,A, output=scalar)
start = time.perf_counter()
for i in range(0,niters):
DQ(N,x,A,dA, output=scalar)
stop = time.perf_counter()
dt = stop - start
dtime = dt / niters
start = time.perf_counter()
for i in range(0,niters):
qform(N,x,A, output=scalar)
stop = time.perf_counter()
dt = stop - start
basetime= dt / niters
return 1e3 * basetime, 1e3 * dtime
print("qform2 timings as N varies")
print(f"{'N':8s} {'qform':8s} {'DQ':8s} Griewank")
for N in [100,200,400,800,1600,3200,6400]:
niters = 10
if N * N * niters < 1e8:
niters = int(1e8)//(N*N)
basetime, dtime = time_for_N(N,niters=niters)
print( f"{N:8d}: {basetime:8.3f} ms {dtime:8.3f} ms "
f" {dtime/basetime:8.3f}" )
| [
"numpy.random.rand",
"numpy.zeros",
"time.perf_counter"
] | [((789, 813), 'numpy.zeros', 'np.zeros', (['[1]'], {'order': '"""F"""'}), "([1], order='F')\n", (797, 813), True, 'import numpy as np\n'), ((932, 951), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (949, 951), False, 'import time\n'), ((1024, 1043), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1041, 1043), False, 'import time\n'), ((1106, 1125), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1123, 1125), False, 'import time\n'), ((1198, 1217), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1215, 1217), False, 'import time\n'), ((656, 673), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (670, 673), True, 'import numpy as np\n'), ((705, 725), 'numpy.random.rand', 'np.random.rand', (['N', 'N'], {}), '(N, N)\n', (719, 725), True, 'import numpy as np\n'), ((756, 776), 'numpy.random.rand', 'np.random.rand', (['N', 'N'], {}), '(N, N)\n', (770, 776), True, 'import numpy as np\n')] |
#encoding=utf8
import os, re, sys
import cv2
import numpy as np
from lib.sky import *
from lib.kmorphology import *
from lib import kalgorithm
from lib import pyheal
repairfile = r"./output_images/phase3/phase3_repair.png"
repairmaskfile = r"./output_images/phase3/phase3_repair_mask.png"
skyfile = r"./input_images/phase3/phase3_sky.jpg"
outfile = r"./output_images/phase3/phase3_sky.jpg"
def findAngle(out): # 图片倾斜角度
H, W = out.shape
lH = H - 1
rH = H - 1
for i in range(H):
ik = H - 1 - i
if out[ik, 0] and lH == H - 1:
lH = ik
if out[ik, W-1] and rH == H - 1:
rH = ik
return np.arctan(1.0 * (lH - rH) / W)
# 简易边缘修复算法
def maskfill(imgsrc):
H, W, C = imgsrc.shape
for y in range(H):
for x in range(W):
if np.sum(imgsrc[y, x]): continue
point = (y, x)
maskfillSearch(imgsrc, point, 0)
def maskfillSearch(imgsrc, point, depth):
if depth >= 30: return
y, x = point
H, W, C = imgsrc.shape
if y < 0 or y >= H: return
if x < 0 or x >= W: return
# 已经有值了。
if np.sum(imgsrc[y, x]): return
if y < H / 2: # 尝试下面填充
if np.sum(imgsrc[y+1, x]):
imgsrc[y, x] = imgsrc[y+1, x]
maskfillSearch(imgsrc, (y, x-1), depth+1) # 遍历左边
maskfillSearch(imgsrc, (y, x+1), depth+1) # 遍历右边
else: # 尝试用上面填充
if np.sum(imgsrc[y-1, x]):
imgsrc[y, x] = imgsrc[y-1, x]
maskfillSearch(imgsrc, (y, x-1), depth+1) # 遍历左边
maskfillSearch(imgsrc, (y, x+1), depth+1) # 遍历右边
if x < W / 2: # 尝试用右边填充
if np.sum(imgsrc[y, x+1]):
imgsrc[y, x] = imgsrc[y, x+1]
maskfillSearch(imgsrc, (y-1, x), depth+1) # 遍历上面
maskfillSearch(imgsrc, (y+1, x), depth+1) # 遍历下面
else: # 尝试用左边填充
if np.sum(imgsrc[y, x-1]):
imgsrc[y, x] = imgsrc[y, x-1]
maskfillSearch(imgsrc, (y-1, x), depth+1) # 遍历上面
maskfillSearch(imgsrc, (y+1, x), depth+1) # 遍历下面
# 两个算子各有特点,加起来,效果更佳。
def calculateMask(imgsrc):
gray = kalgorithm.bgr2gray(imgsrc)
fy, fx = kalgorithm.prewittFilter(gray, K_size=3)
out1 = fy.astype(np.float32) + fx.astype(np.float32)
fy, fx = kalgorithm.sobelFilter(gray, K_size=3)
out2 = fy.astype(np.float32) + fx.astype(np.float32)
out = out1 + out2
out = np.clip(out, 0, 255)
out = kalgorithm.thresholdOtsuBinarization(out)
return out
def mainfixfile():
srcfile = "./output_images/phase2/phase2_broken_nn.jpg.png"
dstfile = "./output_images/phase3/phase3_repair_original.png"
imgsrc = None
if not os.path.exists(dstfile):
img = kalgorithm.imgRead(srcfile)
H, W, C = img.shape
img = kalgorithm.nnInterpolateRound(img, int(H / 3), int(W / 3))
# 一上来就修复图片
# kalgorithm.imgSave(dstfile, img)
from phase2_broken_repair import mainfix
imgsrc = mainfix(img, dstfile, 240, onlyeasy=True)
else:
imgsrc = kalgorithm.imgRead(dstfile).astype(np.float32)
if not os.path.exists(dstfile+".mask.png"):
out = calculateMask(imgsrc)
kalgorithm.imgSave(dstfile+".mask.png", out)
# 分离出水平线
if not os.path.exists(repairmaskfile):
out = kalgorithm.imgRead(dstfile+".mask.png").astype(np.float32)
out = kalgorithm.bgr2gray(out)
out = morphologyErodeLine(out, 1, linelen=40)
out = morphologyDilateLine(out, 1, linelen=40)
kalgorithm.imgSave(dstfile+".mask.line.png", out)
# 根据水平线,矫正原图。
angle = findAngle(out) # 找到偏移角度。
print("angle", angle)
imgsrc = kalgorithm.imgRead(dstfile).astype(np.float32)
imgsrc = kalgorithm.affineRotation(imgsrc, angle)
# 修复边缘。
while maskfill(imgsrc):
pass
kalgorithm.imgSave(repairfile, imgsrc)
out = calculateMask(imgsrc)
kalgorithm.imgSave(repairmaskfile, out)
## 计算海平面的那条线。准确分离。
out = morphologyErodeLine(out, 1, linelen=40)
out = morphologyDilateLine(out, 3, linelen=80)
kalgorithm.imgSave(repairmaskfile+".mask.line.png", out)
def display(out, title="result"):
print(out.shape, out[0, 0])
out = np.clip(out, 0, 255)
out = out.astype(np.uint8)
cv2.imshow(title, out)
cv2.waitKey(0)
cv2.destroyAllWindows()
def main_fisher_girl_mask():
if os.path.exists(outfile+"_fisher_girl_mask.png"):
return
imgsrc = kalgorithm.imgRead(repairfile).astype(np.float32)
sky = kalgorithm.imgRead(skyfile).astype(np.float32)
# 使用色彩追踪和形态学运算得到图像中感兴趣区域
# RGB > HSV
mask = BGR2HSV(imgsrc)
# color tracking
mask = get_mask(mask)
# masking
out = masking(imgsrc, mask) # 把太黑的一起识别出来,认为是陆地。主要识别小岛。
out = kalgorithm.bgr2gray(out)
mask = kalgorithm.thresholdOtsuBinarization(out).astype(np.float32)/255
# closing,移除部分毛刺
mask = Morphology_Closing(mask, time=1) # 更多白区域,移除小黑点。
# opening,域女再变肥一点。
mask = Erode(mask, erodeTime=1)
# masking
out = masking(imgsrc, mask)
#display(out)
kalgorithm.imgSave(outfile+"_fisher_girl.png", out) # 把海岛准确识别出来了。
kalgorithm.imgSave(outfile+"_fisher_girl_mask.png", mask*255)
def findvline():
outv = kalgorithm.bgr2gray(kalgorithm.imgRead(repairmaskfile+".mask.line.png"))
outv = kalgorithm.thresholdBinarization(outv)
H, W = outv.shape
yhistogram = np.sum(outv, axis=1)
ymin = np.min(yhistogram)
ymax = np.max(yhistogram)
result = []
for i in range(len(yhistogram)):
if i > 5 and i < H-5 and yhistogram[i] > 1000:
print(i, yhistogram[i])
result.append(i)
print("findvline", np.mean(result))
return int(np.mean(result))
def main_ocean_wave():
if os.path.exists(outfile+"_ocean_wave_mask.png"):
return
imgsrc = kalgorithm.imgRead(repairfile).astype(np.float32)
sky = kalgorithm.imgRead(skyfile).astype(np.float32)
masksrc = kalgorithm.bgr2gray(kalgorithm.imgRead(repairmaskfile))
vline = findvline()
fisher_girl_mask = 255-kalgorithm.bgr2gray(kalgorithm.imgRead(outfile+"_fisher_girl_mask.png"))
# 背景减去 渔女,剩下 海平面。
outv = masksrc - masksrc * (fisher_girl_mask/255.0)
outv[:vline, ...] = 0
kalgorithm.imgSave(outfile+"_ocean_wave_mask.png", outv)
outv[:vline, ...] = 255
outv[vline:, ...] = 0
outv = outv.astype(np.float32)
outv[:vline, ...] = outv[:vline, ...] * (1-(fisher_girl_mask[:vline, ...]/255.0))
outv = np.clip(outv, 0, 255)
kalgorithm.imgSave(outfile+"_sky_mask.png", outv)
def main_replace_sky():
mask_fishergirl = outfile+"_fisher_girl_mask.png"
mask_sea = outfile+"_ocean_wave_mask.png"
mask_sky = outfile+"_sky_mask.png"
imgsrc = kalgorithm.imgRead(repairfile).astype(np.float32)
sky = kalgorithm.imgRead(skyfile).astype(np.float32)
mask_sky = kalgorithm.imgRead(mask_sky).astype(np.float32)
mask_sky = kalgorithm.meanFilter(mask_sky) # 均值滤波,接头处就不生硬了。
sky = kalgorithm.blInterpolate(sky, 0.33333333334, 0.33333333334)
print("imgsrc", imgsrc.shape, "sky", sky.shape) # (744, 1100, 3) (618, 1100, 3)
print("mask_sky", mask_sky.shape) # (744, 1100, 3)
newsky = np.zeros((mask_sky.shape[0], mask_sky.shape[1], sky.shape[2]), np.uint8)
newsky[:, :] = 0
newsky[:sky.shape[0], :sky.shape[1]] = sky
print("newsky", newsky.shape) # 搞成一样大,才可以做乘法,合成图片。
mask_sky = mask_sky / 255
print(newsky.shape, mask_sky.shape, imgsrc.shape)
finalimage = newsky * mask_sky + imgsrc * (1-mask_sky)
kalgorithm.imgSave(outfile+"_sky_cloud.png", finalimage)
if __name__ == "__main__":
mainfixfile()
# 第一步,计算渔女 的 mask 掩码
main_fisher_girl_mask()
# 第二步,分离海水、鱼女、天空模板。
main_ocean_wave()
# 第三步,合成替换天空。
main_replace_sky()
| [
"numpy.sum",
"lib.kalgorithm.thresholdBinarization",
"numpy.clip",
"lib.kalgorithm.meanFilter",
"lib.kalgorithm.imgSave",
"lib.kalgorithm.affineRotation",
"lib.kalgorithm.prewittFilter",
"numpy.mean",
"cv2.imshow",
"lib.kalgorithm.sobelFilter",
"lib.kalgorithm.blInterpolate",
"os.path.exists",... | [((651, 681), 'numpy.arctan', 'np.arctan', (['(1.0 * (lH - rH) / W)'], {}), '(1.0 * (lH - rH) / W)\n', (660, 681), True, 'import numpy as np\n'), ((1108, 1128), 'numpy.sum', 'np.sum', (['imgsrc[y, x]'], {}), '(imgsrc[y, x])\n', (1114, 1128), True, 'import numpy as np\n'), ((2057, 2084), 'lib.kalgorithm.bgr2gray', 'kalgorithm.bgr2gray', (['imgsrc'], {}), '(imgsrc)\n', (2076, 2084), False, 'from lib import kalgorithm\n'), ((2098, 2138), 'lib.kalgorithm.prewittFilter', 'kalgorithm.prewittFilter', (['gray'], {'K_size': '(3)'}), '(gray, K_size=3)\n', (2122, 2138), False, 'from lib import kalgorithm\n'), ((2209, 2247), 'lib.kalgorithm.sobelFilter', 'kalgorithm.sobelFilter', (['gray'], {'K_size': '(3)'}), '(gray, K_size=3)\n', (2231, 2247), False, 'from lib import kalgorithm\n'), ((2337, 2357), 'numpy.clip', 'np.clip', (['out', '(0)', '(255)'], {}), '(out, 0, 255)\n', (2344, 2357), True, 'import numpy as np\n'), ((2368, 2409), 'lib.kalgorithm.thresholdOtsuBinarization', 'kalgorithm.thresholdOtsuBinarization', (['out'], {}), '(out)\n', (2404, 2409), False, 'from lib import kalgorithm\n'), ((4183, 4203), 'numpy.clip', 'np.clip', (['out', '(0)', '(255)'], {}), '(out, 0, 255)\n', (4190, 4203), True, 'import numpy as np\n'), ((4240, 4262), 'cv2.imshow', 'cv2.imshow', (['title', 'out'], {}), '(title, out)\n', (4250, 4262), False, 'import cv2\n'), ((4267, 4281), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4278, 4281), False, 'import cv2\n'), ((4286, 4309), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4307, 4309), False, 'import cv2\n'), ((4348, 4397), 'os.path.exists', 'os.path.exists', (["(outfile + '_fisher_girl_mask.png')"], {}), "(outfile + '_fisher_girl_mask.png')\n", (4362, 4397), False, 'import os, re, sys\n'), ((4738, 4762), 'lib.kalgorithm.bgr2gray', 'kalgorithm.bgr2gray', (['out'], {}), '(out)\n', (4757, 4762), False, 'from lib import kalgorithm\n'), ((5050, 5103), 'lib.kalgorithm.imgSave', 'kalgorithm.imgSave', (["(outfile + '_fisher_girl.png')", 'out'], {}), "(outfile + '_fisher_girl.png', out)\n", (5068, 5103), False, 'from lib import kalgorithm\n'), ((5120, 5185), 'lib.kalgorithm.imgSave', 'kalgorithm.imgSave', (["(outfile + '_fisher_girl_mask.png')", '(mask * 255)'], {}), "(outfile + '_fisher_girl_mask.png', mask * 255)\n", (5138, 5185), False, 'from lib import kalgorithm\n'), ((5295, 5333), 'lib.kalgorithm.thresholdBinarization', 'kalgorithm.thresholdBinarization', (['outv'], {}), '(outv)\n', (5327, 5333), False, 'from lib import kalgorithm\n'), ((5375, 5395), 'numpy.sum', 'np.sum', (['outv'], {'axis': '(1)'}), '(outv, axis=1)\n', (5381, 5395), True, 'import numpy as np\n'), ((5408, 5426), 'numpy.min', 'np.min', (['yhistogram'], {}), '(yhistogram)\n', (5414, 5426), True, 'import numpy as np\n'), ((5438, 5456), 'numpy.max', 'np.max', (['yhistogram'], {}), '(yhistogram)\n', (5444, 5456), True, 'import numpy as np\n'), ((5734, 5782), 'os.path.exists', 'os.path.exists', (["(outfile + '_ocean_wave_mask.png')"], {}), "(outfile + '_ocean_wave_mask.png')\n", (5748, 5782), False, 'import os, re, sys\n'), ((6225, 6283), 'lib.kalgorithm.imgSave', 'kalgorithm.imgSave', (["(outfile + '_ocean_wave_mask.png')", 'outv'], {}), "(outfile + '_ocean_wave_mask.png', outv)\n", (6243, 6283), False, 'from lib import kalgorithm\n'), ((6469, 6490), 'numpy.clip', 'np.clip', (['outv', '(0)', '(255)'], {}), '(outv, 0, 255)\n', (6476, 6490), True, 'import numpy as np\n'), ((6495, 6546), 'lib.kalgorithm.imgSave', 'kalgorithm.imgSave', (["(outfile + '_sky_mask.png')", 'outv'], {}), "(outfile + '_sky_mask.png', outv)\n", (6513, 6546), False, 'from lib import kalgorithm\n'), ((6922, 6953), 'lib.kalgorithm.meanFilter', 'kalgorithm.meanFilter', (['mask_sky'], {}), '(mask_sky)\n', (6943, 6953), False, 'from lib import kalgorithm\n'), ((6982, 7041), 'lib.kalgorithm.blInterpolate', 'kalgorithm.blInterpolate', (['sky', '(0.33333333334)', '(0.33333333334)'], {}), '(sky, 0.33333333334, 0.33333333334)\n', (7006, 7041), False, 'from lib import kalgorithm\n'), ((7195, 7267), 'numpy.zeros', 'np.zeros', (['(mask_sky.shape[0], mask_sky.shape[1], sky.shape[2])', 'np.uint8'], {}), '((mask_sky.shape[0], mask_sky.shape[1], sky.shape[2]), np.uint8)\n', (7203, 7267), True, 'import numpy as np\n'), ((7540, 7598), 'lib.kalgorithm.imgSave', 'kalgorithm.imgSave', (["(outfile + '_sky_cloud.png')", 'finalimage'], {}), "(outfile + '_sky_cloud.png', finalimage)\n", (7558, 7598), False, 'from lib import kalgorithm\n'), ((1176, 1200), 'numpy.sum', 'np.sum', (['imgsrc[y + 1, x]'], {}), '(imgsrc[y + 1, x])\n', (1182, 1200), True, 'import numpy as np\n'), ((1387, 1411), 'numpy.sum', 'np.sum', (['imgsrc[y - 1, x]'], {}), '(imgsrc[y - 1, x])\n', (1393, 1411), True, 'import numpy as np\n'), ((1606, 1630), 'numpy.sum', 'np.sum', (['imgsrc[y, x + 1]'], {}), '(imgsrc[y, x + 1])\n', (1612, 1630), True, 'import numpy as np\n'), ((1817, 1841), 'numpy.sum', 'np.sum', (['imgsrc[y, x - 1]'], {}), '(imgsrc[y, x - 1])\n', (1823, 1841), True, 'import numpy as np\n'), ((2604, 2627), 'os.path.exists', 'os.path.exists', (['dstfile'], {}), '(dstfile)\n', (2618, 2627), False, 'import os, re, sys\n'), ((2643, 2670), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (['srcfile'], {}), '(srcfile)\n', (2661, 2670), False, 'from lib import kalgorithm\n'), ((2901, 2942), 'phase2_broken_repair.mainfix', 'mainfix', (['img', 'dstfile', '(240)'], {'onlyeasy': '(True)'}), '(img, dstfile, 240, onlyeasy=True)\n', (2908, 2942), False, 'from phase2_broken_repair import mainfix\n'), ((3029, 3066), 'os.path.exists', 'os.path.exists', (["(dstfile + '.mask.png')"], {}), "(dstfile + '.mask.png')\n", (3043, 3066), False, 'import os, re, sys\n'), ((3110, 3156), 'lib.kalgorithm.imgSave', 'kalgorithm.imgSave', (["(dstfile + '.mask.png')", 'out'], {}), "(dstfile + '.mask.png', out)\n", (3128, 3156), False, 'from lib import kalgorithm\n'), ((3180, 3210), 'os.path.exists', 'os.path.exists', (['repairmaskfile'], {}), '(repairmaskfile)\n', (3194, 3210), False, 'import os, re, sys\n'), ((3299, 3323), 'lib.kalgorithm.bgr2gray', 'kalgorithm.bgr2gray', (['out'], {}), '(out)\n', (3318, 3323), False, 'from lib import kalgorithm\n'), ((3441, 3492), 'lib.kalgorithm.imgSave', 'kalgorithm.imgSave', (["(dstfile + '.mask.line.png')", 'out'], {}), "(dstfile + '.mask.line.png', out)\n", (3459, 3492), False, 'from lib import kalgorithm\n'), ((3667, 3707), 'lib.kalgorithm.affineRotation', 'kalgorithm.affineRotation', (['imgsrc', 'angle'], {}), '(imgsrc, angle)\n', (3692, 3707), False, 'from lib import kalgorithm\n'), ((3782, 3820), 'lib.kalgorithm.imgSave', 'kalgorithm.imgSave', (['repairfile', 'imgsrc'], {}), '(repairfile, imgsrc)\n', (3800, 3820), False, 'from lib import kalgorithm\n'), ((3865, 3904), 'lib.kalgorithm.imgSave', 'kalgorithm.imgSave', (['repairmaskfile', 'out'], {}), '(repairmaskfile, out)\n', (3883, 3904), False, 'from lib import kalgorithm\n'), ((4049, 4107), 'lib.kalgorithm.imgSave', 'kalgorithm.imgSave', (["(repairmaskfile + '.mask.line.png')", 'out'], {}), "(repairmaskfile + '.mask.line.png', out)\n", (4067, 4107), False, 'from lib import kalgorithm\n'), ((5231, 5284), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (["(repairmaskfile + '.mask.line.png')"], {}), "(repairmaskfile + '.mask.line.png')\n", (5249, 5284), False, 'from lib import kalgorithm\n'), ((5653, 5668), 'numpy.mean', 'np.mean', (['result'], {}), '(result)\n', (5660, 5668), True, 'import numpy as np\n'), ((5685, 5700), 'numpy.mean', 'np.mean', (['result'], {}), '(result)\n', (5692, 5700), True, 'import numpy as np\n'), ((5953, 5987), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (['repairmaskfile'], {}), '(repairmaskfile)\n', (5971, 5987), False, 'from lib import kalgorithm\n'), ((808, 828), 'numpy.sum', 'np.sum', (['imgsrc[y, x]'], {}), '(imgsrc[y, x])\n', (814, 828), True, 'import numpy as np\n'), ((4426, 4456), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (['repairfile'], {}), '(repairfile)\n', (4444, 4456), False, 'from lib import kalgorithm\n'), ((4486, 4513), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (['skyfile'], {}), '(skyfile)\n', (4504, 4513), False, 'from lib import kalgorithm\n'), ((5811, 5841), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (['repairfile'], {}), '(repairfile)\n', (5829, 5841), False, 'from lib import kalgorithm\n'), ((5871, 5898), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (['skyfile'], {}), '(skyfile)\n', (5889, 5898), False, 'from lib import kalgorithm\n'), ((6062, 6115), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (["(outfile + '_fisher_girl_mask.png')"], {}), "(outfile + '_fisher_girl_mask.png')\n", (6080, 6115), False, 'from lib import kalgorithm\n'), ((6737, 6767), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (['repairfile'], {}), '(repairfile)\n', (6755, 6767), False, 'from lib import kalgorithm\n'), ((6797, 6824), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (['skyfile'], {}), '(skyfile)\n', (6815, 6824), False, 'from lib import kalgorithm\n'), ((6859, 6887), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (['mask_sky'], {}), '(mask_sky)\n', (6877, 6887), False, 'from lib import kalgorithm\n'), ((2970, 2997), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (['dstfile'], {}), '(dstfile)\n', (2988, 2997), False, 'from lib import kalgorithm\n'), ((3226, 3267), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (["(dstfile + '.mask.png')"], {}), "(dstfile + '.mask.png')\n", (3244, 3267), False, 'from lib import kalgorithm\n'), ((3602, 3629), 'lib.kalgorithm.imgRead', 'kalgorithm.imgRead', (['dstfile'], {}), '(dstfile)\n', (3620, 3629), False, 'from lib import kalgorithm\n'), ((4774, 4815), 'lib.kalgorithm.thresholdOtsuBinarization', 'kalgorithm.thresholdOtsuBinarization', (['out'], {}), '(out)\n', (4810, 4815), False, 'from lib import kalgorithm\n')] |
from __future__ import division, print_function
import numpy as np
import datetime
import warnings
# Drawing tools
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# Custom classes
from .box import *
from .chemistry import *
from .topology import *
from .species import *
class MolecularSystem:
"""A holder class containing all information for building a molecular system.
Contains a simulation box and topology.
Implements methods for generating atoms, visualizing the configuration,
and writing data in a standard output format.
Parameters
----------
box : Box
Simulation box for the system
"""
def __init__(self, box):
self.box = box
self.topology = Topology()
self.monomers = set()
self.species = {}
self.generate_nmol = {}
self.generated = False
def __repr__(self):
return "MolecularSystem(V = {:.2f}, {} species)".format(self.volume(), self.nspecies())
def volume(self):
return self.box.volume()
def nspecies(self):
return len(self.species)
def nmonomers(self):
return len(self.monomers)
def natoms(self):
return self.topology.natoms()
def add_species(self, species, **kwargs):
if species.id in self.species:
warnings.warn("Replacing species with id = {} in system.".format(species.id), RuntimeWarning)
if not "nmol" in kwargs and not "fill" in kwargs:
raise ValueError("One of either `nmol` or `fill` keyword arguments must be specified.")
elif "nmol" in kwargs and "fill" in kwargs:
raise ValueError("Only one of either `nmol` or `fill` keyword arguments should be specified.")
if "nmol" in kwargs:
self.generate_nmol[species.id] = int(kwargs.get("nmol"))
elif "fill" in kwargs:
fill = kwargs.get("fill")
assert 0.0 < fill < 1.0, "Fill fraction must be between zero and unity."
# Target volume of all monomers
vol_target = fill * self.box.volume()
# Get the current amount of volume taken up by other species
vol_curr = 0.0
for (sid, other) in self.species.items():
vol_curr += self.generate_nmol[sid] * other.volume()
vol_left = vol_target - vol_curr # Amount needed to fill
nmol_fill = np.rint(vol_left / species.volume())
nmol_fill = np.maximum(0, nmol_fill)
self.generate_nmol[species.id] = int(nmol_fill)
# Add the species to the system after the other stuff has worked out already
self.species[species.id] = species
for mon in species.monomers:
self.monomers.add(mon)
# Molecule generation and placement in the topology
def _expected_charge(self):
charge_gen = 0.0
for (sid, species) in self.species.items():
nmol = self.generate_nmol[sid]
charge_gen += nmol * species.charge()
return charge_gen
def _expected_volume(self):
vol_gen = 0.0
for (sid, species) in self.species.items():
nmol = self.generate_nmol[sid]
vol_gen += nmol * species.volume()
return vol_gen
def generate_molecules(self, check_volume = True, check_charge = True):
# Safety checks on charge and volume
vol_gen = self._expected_volume()
charge_gen = self._expected_charge()
if check_volume and vol_gen > self.box.volume():
raise ValueError("Volume of molecules exceeds box volume.")
if check_charge and not np.isclose(charge_gen, 0.0):
raise ValueError("Net charge of molecules not equal to zero.")
# Reset the internal topology
self.topology.clear()
# Actually generate the molecules
# Add safety checks later on for space filling and overlaps????
mid0 = 0
for (sid, species) in self.species.items():
nmol = self.generate_nmol[sid]
if nmol > 0:
species.generate(nmol, mid0, self.topology, self.box)
mid0 += nmol
# Plotting and visualization of system
def _draw_box(self, ax):
points = np.array([
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
[0.0, 1.0, 1.0]
])
Z = np.zeros((8,3))
for i in range(8): Z[i,:] = np.dot(points[i,:],self.box.h)
# Scaled polygon sides
verts = [[Z[0],Z[1],Z[2],Z[3]],
[Z[4],Z[5],Z[6],Z[7]],
[Z[0],Z[1],Z[5],Z[4]],
[Z[2],Z[3],Z[7],Z[6]],
[Z[1],Z[2],Z[6],Z[5]],
[Z[4],Z[7],Z[3],Z[0]]]
ax.add_collection3d(Poly3DCollection(verts, linewidths=1, edgecolors='k', alpha=0.01))
def _draw_molecules(self, ax):
# TODO: This is quite slow right now
# Can we speed up the individual drawing of all bonds and atoms
# by a single call to scatter3D/plot??
nmon = self.nmonomers()
cm = plt.get_cmap('gist_rainbow')
colors = [cm(1.*i/nmon) for i in range(nmon)]
color_dict = {mon.id : colors[i] for i, mon in enumerate(self.monomers)}
# Plot all the individual atoms
for ai in self.topology.atoms:
mon = ai.mon
# Unwrap the atom position
pos = ai.pos + np.dot(self.box.h, ai.img)
ax.scatter3D(pos[0], pos[1], pos[2], c = np.array([color_dict[mon.id]]),
s = 200 * mon.size, edgecolors = 'black')
# Plot all the bonds between atoms
for bi in self.topology.bonds:
a1 = self.topology.atom(bi[0])
a2 = self.topology.atom(bi[1])
# Unwrap the positions
pos1 = a1.pos + np.dot(self.box.h, a1.img)
pos2 = a2.pos + np.dot(self.box.h, a2.img)
points = np.array([pos1, pos2])
# Plot line segment
ax.plot(points[:,0], points[:,1], points[:,2], c = color_dict[a1.mon.id])
def draw(self, figsize = (10, 8), **kwargs):
elev = kwargs.get("elev", 30)
azim = kwargs.get("azim", -60)
fig = plt.figure(figsize = figsize)
ax = fig.add_subplot(111, projection='3d', elev = elev, azim = azim)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# Draw box and molecules
self._draw_box(ax)
self._draw_molecules(ax)
# File IO methods
def write_lammps_data(self, fname = None, **kwargs):
"""
Write the system structure and topology as a LAMMPS data file.
"""
# Keyword flags
write_bonds = kwargs.get("bonds", False)
write_angles = kwargs.get("angles", False)
write_dihedrals = kwargs.get("dihedrals", False)
write_impropers = kwargs.get("impropers", False)
# Meta info on system
box = self.box
top = self.topology
nmons = self.nmonomers()
natoms = self.natoms()
nbonds = top.nbonds()
nbond_types = top.nbondtypes()
# Create script header and system info
now = datetime.datetime.now()
header = "# LAMMPS data file created on {}\n\n".format(str(now.strftime("%Y-%m-%d %H:%M")))
sys_info = ("{} atoms" "\n"
"{} atom types" "\n"
"{} bonds" "\n"
"{} bond types" "\n\n"
).format(natoms, nmons, nbonds, nbond_types)
# Write the box dimensions header
if box.is_orthogonal():
box_info = ("{0:<15.12f} {1:<15.12f} xlo xhi" "\n"
"{0:<15.12f} {2:<15.12f} ylo yhi" "\n"
"{0:<15.12f} {3:<15.12f} zlo zhi" "\n\n"
).format(0, *box.dimensions[:3])
else:
# Must add the triclinic tilt factors to the file
h = box.h
box_info = ("{0:<15.12f} {1:<15.12f} xlo xhi" "\n"
"{0:<15.12f} {2:<15.12f} ylo yhi" "\n"
"{0:<15.12f} {3:<15.12f} zlo zhi" "\n"
"{4:<15.12f} {5:<15.12f} {6:<15.12f} xy xz yz" "\n\n"
).format(0, h[0,0], h[1,1], h[2, 2], h[0,1], h[0,2], h[1,2])
mass_info = "Masses\n\n"
for mon in self.monomers:
mass_info += "{} {}\n".format(mon.id, mon.mass)
# Atom section
atom_info = "\nAtoms\n\n"
for i, atom in enumerate(top.atoms):
mon = atom.mon
pos = atom.pos
img = atom.img
atom_info += "{:.0f} {:.0f} {:.0f} {:.13f} {:.13e} {:.13e} {:.13e} {:.0f} {:.0f} {:.0f}\n".format(
atom.id, atom.mid, mon.id, mon.charge,
pos[0], pos[1], pos[2], img[0], img[1], img[2]
)
# Bond section
if write_bonds and top.nbonds() > 0:
bond_info = "\nBonds\n"
for i, bond in enumerate(top.bonds):
bond_info += "\n{} {} {} {}".format(i+1, bond.type, bond[0], bond[1])
else:
bond_info = ""
# Angle section
if write_angles and top.nangles() > 0:
angle_info = "\nAngles\n"
for i, ang in enumerate(top.angles):
angle_info += "\n{} {} {} {} {}".format(i+1, ang.type, ang[0], ang[1], ang[2])
else:
angle_info = ""
# Dihedral section
if write_dihedrals and top.ndihedrals() > 0:
dihedral_info = "\nDihedrals\n"
for i, dih in enumerate(top.dihedrals):
dihedral_info += "\n{} {} {} {} {} {}".format(i+1, dih.type, dih[0], dih[1], dih[2], dih[3])
else:
dihedral_info = ""
# Improper section
if write_impropers and top.nimpropers() > 0:
improper_info = "\nImpropers\n"
for i, imp in enumerate(top.impropers):
improper_info += "\n{} {} {} {} {} {}".format(i+1, imp.type, imp[0], imp[1], imp[2], imp[3])
else:
improper_info = ""
# Piece together the final script
script = header + sys_info + box_info + mass_info + atom_info
script += bond_info + angle_info + dihedral_info + improper_info
if fname is not None:
with open(fname, "w") as file: file.write(script)
return script
| [
"numpy.maximum",
"matplotlib.pyplot.get_cmap",
"numpy.zeros",
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"matplotlib.pyplot.figure",
"numpy.isclose",
"numpy.array",
"numpy.dot",
"datetime.datetime.now"
] | [((4311, 4462), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, \n 0.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0], [0.0, 1.0, 1.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0\n ], [0.0, 0.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0], [0.0, 1.0, 1.0]])\n', (4319, 4462), True, 'import numpy as np\n'), ((4582, 4598), 'numpy.zeros', 'np.zeros', (['(8, 3)'], {}), '((8, 3))\n', (4590, 4598), True, 'import numpy as np\n'), ((5244, 5272), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gist_rainbow"""'], {}), "('gist_rainbow')\n", (5256, 5272), True, 'import matplotlib.pyplot as plt\n'), ((6370, 6397), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6380, 6397), True, 'import matplotlib.pyplot as plt\n'), ((7344, 7367), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7365, 7367), False, 'import datetime\n'), ((4634, 4666), 'numpy.dot', 'np.dot', (['points[i, :]', 'self.box.h'], {}), '(points[i, :], self.box.h)\n', (4640, 4666), True, 'import numpy as np\n'), ((4929, 4994), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'Poly3DCollection', (['verts'], {'linewidths': '(1)', 'edgecolors': '"""k"""', 'alpha': '(0.01)'}), "(verts, linewidths=1, edgecolors='k', alpha=0.01)\n", (4945, 4994), False, 'from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n'), ((6086, 6108), 'numpy.array', 'np.array', (['[pos1, pos2]'], {}), '([pos1, pos2])\n', (6094, 6108), True, 'import numpy as np\n'), ((2523, 2547), 'numpy.maximum', 'np.maximum', (['(0)', 'nmol_fill'], {}), '(0, nmol_fill)\n', (2533, 2547), True, 'import numpy as np\n'), ((3689, 3716), 'numpy.isclose', 'np.isclose', (['charge_gen', '(0.0)'], {}), '(charge_gen, 0.0)\n', (3699, 3716), True, 'import numpy as np\n'), ((5579, 5605), 'numpy.dot', 'np.dot', (['self.box.h', 'ai.img'], {}), '(self.box.h, ai.img)\n', (5585, 5605), True, 'import numpy as np\n'), ((5983, 6009), 'numpy.dot', 'np.dot', (['self.box.h', 'a1.img'], {}), '(self.box.h, a1.img)\n', (5989, 6009), True, 'import numpy as np\n'), ((6038, 6064), 'numpy.dot', 'np.dot', (['self.box.h', 'a2.img'], {}), '(self.box.h, a2.img)\n', (6044, 6064), True, 'import numpy as np\n'), ((5659, 5689), 'numpy.array', 'np.array', (['[color_dict[mon.id]]'], {}), '([color_dict[mon.id]])\n', (5667, 5689), True, 'import numpy as np\n')] |
import numpy as np
from Constants import Constants
from TARNet_Manager import TARNet_Manager
from Utils import Utils
class TARNet_Experiments:
def __init__(self, input_nodes, device):
self.data_loader_dict_test = None
self.data_loader_dict_val = None
self.input_nodes = input_nodes
self.device = device
def semi_supervised_train_eval(self, train_set,
data_loader_dict_val,
eval_set, n_total, n_treated):
_train_parameters = self.__get_train_parameters(train_set)
tensor_treated_val = \
Utils.create_tensors_from_tuple(data_loader_dict_val["treated_data"])
tensor_control_val = \
Utils.create_tensors_from_tuple(data_loader_dict_val["control_data"])
val_parameters = self.__get_test_parameters(tensor_treated_val, tensor_control_val)
tarnet_ss = TARNet_Manager(input_nodes=Constants.TARNET_INPUT_NODES,
shared_nodes=Constants.TARNET_SHARED_NODES,
outcome_nodes=Constants.TARNET_OUTPUT_NODES,
device=self.device)
tarnet_ss.train_semi_supervised(_train_parameters, val_parameters, n_total, n_treated,
self.device)
_test_parameters = {
"tensor_dataset": eval_set
}
return tarnet_ss.eval_semi_supervised(_test_parameters, self.device,
treated_flag=True)
def evaluate_TARNet_Model(self, tuple_treated_train_original,
tuple_control_train_original,
evaluate_TARNet_PM_GAN,
data_loader_dict_val,
data_loader_dict_test,
n_total_balanced_tarnet,
n_treated_balanced_tarnet):
# data loader -> (np_treated_df_X, np_treated_ps_score, np_treated_df_Y_f, np_treated_df_Y_cf)
self.data_loader_dict_test = data_loader_dict_test
self.data_loader_dict_val = data_loader_dict_val
# Model 1: TARNET
print("--" * 20)
print("###### Model 1: TARNET Supervised Training started ######")
print("Treated: "+str(tuple_treated_train_original[0].shape[0]))
print("Control: "+str(tuple_control_train_original[0].shape[0]))
tarnet_eval_dict = self.evaluate_TARNet(tuple_treated_train_original,
tuple_control_train_original)
# Model 2: TARNET PM GAN
print("--" * 20)
print("###### Model 2: TARNET PM GAN Supervised Training started ######")
print("Treated: "+str(n_treated_balanced_tarnet))
print("Control: "+str(n_total_balanced_tarnet - n_treated_balanced_tarnet))
tarnet_pm_gan_eval_dict = self.evaluate_TARNet_PM_GAN(evaluate_TARNet_PM_GAN,
n_total_balanced_tarnet,
n_treated_balanced_tarnet)
return {
"tarnet_eval_dict": tarnet_eval_dict,
"tarnet_pm_gan_eval_dict": tarnet_pm_gan_eval_dict
}
def evaluate_TARNet(self, tuple_treated_train_original,
tuple_control_train_original):
np_treated_x, np_treated_ps, np_treated_f, np_treated_cf = tuple_treated_train_original
np_control_x, np_control_ps, np_control_f, np_control_cf = tuple_control_train_original
t_1 = np.ones(np_treated_x.shape[0])
t_0 = np.zeros(np_control_x.shape[0])
n_treated = np_treated_x.shape[0]
n_control = np_control_x.shape[0]
n_total = n_treated + n_control
np_train_ss_X = np.concatenate((np_treated_x, np_control_x), axis=0)
np_train_ss_ps = np.concatenate((np_treated_ps, np_control_ps), axis=0)
np_train_ss_T = np.concatenate((t_1, t_0), axis=0)
np_train_ss_f = np.concatenate((np_treated_f, np_control_f), axis=0)
np_train_ss_cf = np.concatenate((np_treated_cf, np_control_cf), axis=0)
train_set = Utils.create_tensors_to_train_DCN_semi_supervised((np_train_ss_X, np_train_ss_ps,
np_train_ss_T, np_train_ss_f,
np_train_ss_cf))
train_parameters = self.__get_train_parameters(train_set)
tensor_treated_val = \
Utils.create_tensors_from_tuple(self.data_loader_dict_val["treated_data"])
tensor_control_val = \
Utils.create_tensors_from_tuple(self.data_loader_dict_val["control_data"])
val_parameters = self.__get_test_parameters(tensor_treated_val, tensor_control_val)
tarnet = TARNet_Manager(input_nodes=Constants.TARNET_INPUT_NODES,
shared_nodes=Constants.TARNET_SHARED_NODES,
outcome_nodes=Constants.TARNET_OUTPUT_NODES,
device=self.device)
tarnet.train_semi_supervised(train_parameters, val_parameters, n_total, n_treated, self.device)
tensor_treated_test = \
Utils.create_tensors_from_tuple(self.data_loader_dict_test["treated_data"])
tensor_control_test = \
Utils.create_tensors_from_tuple(self.data_loader_dict_test["control_data"])
_test_parameters = self.__get_test_parameters(tensor_treated_test, tensor_control_test)
tarnet_eval_dict = tarnet.eval(_test_parameters, self.device)
return tarnet_eval_dict
def evaluate_TARNet_PM_GAN(self, tensor_balanced,
n_total_balanced_tarnet,
n_treated_balanced_tarnet):
_train_parameters = self.__get_train_parameters_PM_GAN(tensor_balanced)
tensor_treated_test = \
Utils.create_tensors_from_tuple(self.data_loader_dict_test["treated_data"])
tensor_control_test = \
Utils.create_tensors_from_tuple(self.data_loader_dict_test["control_data"])
_test_parameters = self.__get_test_parameters(tensor_treated_test,
tensor_control_test)
tensor_treated_val = \
Utils.create_tensors_from_tuple(self.data_loader_dict_val["treated_data"])
tensor_control_val = \
Utils.create_tensors_from_tuple(self.data_loader_dict_val["control_data"])
_val_parameters = self.__get_test_parameters(tensor_treated_val, tensor_control_val)
tarnet = TARNet_Manager(input_nodes=Constants.TARNET_INPUT_NODES,
shared_nodes=Constants.TARNET_SHARED_NODES,
outcome_nodes=Constants.TARNET_OUTPUT_NODES,
device=self.device)
tarnet.train_semi_supervised(_train_parameters, _val_parameters,
n_total_balanced_tarnet,
n_treated_balanced_tarnet, self.device)
tarnet_eval_dict = tarnet.eval(_test_parameters, self.device)
return tarnet_eval_dict
@staticmethod
def __get_train_parameters(train_set):
return {
"epochs": Constants.TARNET_EPOCHS,
"lr": Constants.TARNET_LR,
"lambda": Constants.TARNET_LAMBDA,
"batch_size": Constants.TARNET_BATCH_SIZE,
"shuffle": True,
"tensor_dataset": train_set
}
@staticmethod
def __get_train_parameters_ss(train_set):
return {
"epochs": 200,
"lr": Constants.TARNET_LR,
"lambda": Constants.TARNET_LAMBDA,
"batch_size": 64,
"shuffle": True,
"tensor_dataset": train_set
}
@staticmethod
def __get_train_parameters_PM_GAN(tensor_treated):
return {
"epochs": Constants.TARNET_EPOCHS,
"lr": Constants.TARNET_LR,
"lambda": Constants.TARNET_LAMBDA,
"batch_size": Constants.TARNET_BATCH_SIZE,
"shuffle": True,
"tensor_dataset": tensor_treated
}
@staticmethod
def __get_test_parameters(tensor_treated_test, tensor_control_test):
return {
"treated_set": tensor_treated_test,
"control_set": tensor_control_test
}
| [
"TARNet_Manager.TARNet_Manager",
"numpy.zeros",
"numpy.ones",
"Utils.Utils.create_tensors_to_train_DCN_semi_supervised",
"Utils.Utils.create_tensors_from_tuple",
"numpy.concatenate"
] | [((628, 697), 'Utils.Utils.create_tensors_from_tuple', 'Utils.create_tensors_from_tuple', (["data_loader_dict_val['treated_data']"], {}), "(data_loader_dict_val['treated_data'])\n", (659, 697), False, 'from Utils import Utils\n'), ((741, 810), 'Utils.Utils.create_tensors_from_tuple', 'Utils.create_tensors_from_tuple', (["data_loader_dict_val['control_data']"], {}), "(data_loader_dict_val['control_data'])\n", (772, 810), False, 'from Utils import Utils\n'), ((924, 1099), 'TARNet_Manager.TARNet_Manager', 'TARNet_Manager', ([], {'input_nodes': 'Constants.TARNET_INPUT_NODES', 'shared_nodes': 'Constants.TARNET_SHARED_NODES', 'outcome_nodes': 'Constants.TARNET_OUTPUT_NODES', 'device': 'self.device'}), '(input_nodes=Constants.TARNET_INPUT_NODES, shared_nodes=\n Constants.TARNET_SHARED_NODES, outcome_nodes=Constants.\n TARNET_OUTPUT_NODES, device=self.device)\n', (938, 1099), False, 'from TARNet_Manager import TARNet_Manager\n'), ((3619, 3649), 'numpy.ones', 'np.ones', (['np_treated_x.shape[0]'], {}), '(np_treated_x.shape[0])\n', (3626, 3649), True, 'import numpy as np\n'), ((3664, 3695), 'numpy.zeros', 'np.zeros', (['np_control_x.shape[0]'], {}), '(np_control_x.shape[0])\n', (3672, 3695), True, 'import numpy as np\n'), ((3846, 3898), 'numpy.concatenate', 'np.concatenate', (['(np_treated_x, np_control_x)'], {'axis': '(0)'}), '((np_treated_x, np_control_x), axis=0)\n', (3860, 3898), True, 'import numpy as np\n'), ((3924, 3978), 'numpy.concatenate', 'np.concatenate', (['(np_treated_ps, np_control_ps)'], {'axis': '(0)'}), '((np_treated_ps, np_control_ps), axis=0)\n', (3938, 3978), True, 'import numpy as np\n'), ((4003, 4037), 'numpy.concatenate', 'np.concatenate', (['(t_1, t_0)'], {'axis': '(0)'}), '((t_1, t_0), axis=0)\n', (4017, 4037), True, 'import numpy as np\n'), ((4062, 4114), 'numpy.concatenate', 'np.concatenate', (['(np_treated_f, np_control_f)'], {'axis': '(0)'}), '((np_treated_f, np_control_f), axis=0)\n', (4076, 4114), True, 'import numpy as np\n'), ((4140, 4194), 'numpy.concatenate', 'np.concatenate', (['(np_treated_cf, np_control_cf)'], {'axis': '(0)'}), '((np_treated_cf, np_control_cf), axis=0)\n', (4154, 4194), True, 'import numpy as np\n'), ((4216, 4348), 'Utils.Utils.create_tensors_to_train_DCN_semi_supervised', 'Utils.create_tensors_to_train_DCN_semi_supervised', (['(np_train_ss_X, np_train_ss_ps, np_train_ss_T, np_train_ss_f, np_train_ss_cf)'], {}), '((np_train_ss_X,\n np_train_ss_ps, np_train_ss_T, np_train_ss_f, np_train_ss_cf))\n', (4265, 4348), False, 'from Utils import Utils\n'), ((4598, 4672), 'Utils.Utils.create_tensors_from_tuple', 'Utils.create_tensors_from_tuple', (["self.data_loader_dict_val['treated_data']"], {}), "(self.data_loader_dict_val['treated_data'])\n", (4629, 4672), False, 'from Utils import Utils\n'), ((4716, 4790), 'Utils.Utils.create_tensors_from_tuple', 'Utils.create_tensors_from_tuple', (["self.data_loader_dict_val['control_data']"], {}), "(self.data_loader_dict_val['control_data'])\n", (4747, 4790), False, 'from Utils import Utils\n'), ((4901, 5076), 'TARNet_Manager.TARNet_Manager', 'TARNet_Manager', ([], {'input_nodes': 'Constants.TARNET_INPUT_NODES', 'shared_nodes': 'Constants.TARNET_SHARED_NODES', 'outcome_nodes': 'Constants.TARNET_OUTPUT_NODES', 'device': 'self.device'}), '(input_nodes=Constants.TARNET_INPUT_NODES, shared_nodes=\n Constants.TARNET_SHARED_NODES, outcome_nodes=Constants.\n TARNET_OUTPUT_NODES, device=self.device)\n', (4915, 5076), False, 'from TARNet_Manager import TARNet_Manager\n'), ((5313, 5388), 'Utils.Utils.create_tensors_from_tuple', 'Utils.create_tensors_from_tuple', (["self.data_loader_dict_test['treated_data']"], {}), "(self.data_loader_dict_test['treated_data'])\n", (5344, 5388), False, 'from Utils import Utils\n'), ((5433, 5508), 'Utils.Utils.create_tensors_from_tuple', 'Utils.create_tensors_from_tuple', (["self.data_loader_dict_test['control_data']"], {}), "(self.data_loader_dict_test['control_data'])\n", (5464, 5508), False, 'from Utils import Utils\n'), ((6004, 6079), 'Utils.Utils.create_tensors_from_tuple', 'Utils.create_tensors_from_tuple', (["self.data_loader_dict_test['treated_data']"], {}), "(self.data_loader_dict_test['treated_data'])\n", (6035, 6079), False, 'from Utils import Utils\n'), ((6124, 6199), 'Utils.Utils.create_tensors_from_tuple', 'Utils.create_tensors_from_tuple', (["self.data_loader_dict_test['control_data']"], {}), "(self.data_loader_dict_test['control_data'])\n", (6155, 6199), False, 'from Utils import Utils\n'), ((6394, 6468), 'Utils.Utils.create_tensors_from_tuple', 'Utils.create_tensors_from_tuple', (["self.data_loader_dict_val['treated_data']"], {}), "(self.data_loader_dict_val['treated_data'])\n", (6425, 6468), False, 'from Utils import Utils\n'), ((6512, 6586), 'Utils.Utils.create_tensors_from_tuple', 'Utils.create_tensors_from_tuple', (["self.data_loader_dict_val['control_data']"], {}), "(self.data_loader_dict_val['control_data'])\n", (6543, 6586), False, 'from Utils import Utils\n'), ((6698, 6873), 'TARNet_Manager.TARNet_Manager', 'TARNet_Manager', ([], {'input_nodes': 'Constants.TARNET_INPUT_NODES', 'shared_nodes': 'Constants.TARNET_SHARED_NODES', 'outcome_nodes': 'Constants.TARNET_OUTPUT_NODES', 'device': 'self.device'}), '(input_nodes=Constants.TARNET_INPUT_NODES, shared_nodes=\n Constants.TARNET_SHARED_NODES, outcome_nodes=Constants.\n TARNET_OUTPUT_NODES, device=self.device)\n', (6712, 6873), False, 'from TARNet_Manager import TARNet_Manager\n')] |
"""
This file provides a dataset class for working with the UA-detrac tracking dataset.
Provides:
- plotting of 2D bounding boxes
- training/testing loader mode (random images from across all tracks) using __getitem__()
- track mode - returns a single image, in order, using __next__()
"""
import os,sys
import numpy as np
import random
import pandas as pd
import csv
import _pickle as pickle
import torch
import torchvision.transforms.functional as F
import cv2
from PIL import Image
import torch
from torch.utils import data
from torchvision import transforms
def cache_corrected_frames(label_directory,video_directory,last_corrected_frame,output_dir,skip_frames = 29):
"""
Caches all corrected frames for each file
label_directory - string - path to label csv files
video_directory - string - path to mp4 video files
last_corrected_frame - a dict with keys like 'p1c1' and integer last frame values, -1 if file has not been corrected
output_dir - output cached dataset directory. This directory should contain a subdirectory "frames"
"""
# to prevent automatic overwriting, as this takes a decent amount of time and cannot be interrupted without corrupting files
input("Press enter to confirm you would like to re-cache frames")
total_frame_count = 0
all_data = [] # each item will be a tuple of image_path,labels
label_files = [os.path.join(label_directory,item) for item in os.listdir(label_directory)]
for label_file in label_files:
sequence_name = label_file.split("/")[-1].split("_track_outputs")[0].split("rectified_")[1]
if sequence_name not in last_corrected_frame.keys():
continue # no corrected frames for this sequence
else:
stop_frame = last_corrected_frame[sequence_name]
print("Processing sequence {}".format(sequence_name))
camera_name = sequence_name.split("_")[0]
ignore_path = "ignored_regions/{}_ignored.csv".format(camera_name)
ignore_polygon = []
if os.path.exists(ignore_path):
with open(ignore_path,"r") as f:
read = csv.reader(f)
for row in read:
ignore_polygon.append( np.array([int(row[0]),int(row[1])]).astype(np.int32) )
ig = np.array(ignore_polygon)
ig = ig[np.newaxis,:]
frame_labels = {} # dictionary indexed by frame
with open(label_file,"r") as f:
read = csv.reader(f)
HEADERS = True
for row in read:
if not HEADERS:
if len(row) == 0:
continue
frame_idx = int(row[0])
if frame_idx > stop_frame:
break
if frame_idx not in frame_labels.keys():
frame_labels[frame_idx] = [row]
else:
frame_labels[frame_idx].append(row)
if HEADERS and len(row) > 0:
if row[0][0:5] == "Frame":
HEADERS = False # all header lines have been read at this point
video_file = os.path.join(video_directory,sequence_name + ".mp4")
cap = cv2.VideoCapture(video_file)
ret,frame = cap.read()
frame_num = 0
REPLACE = False
while ret and frame_num <= stop_frame:
# cache frame and append data to all_data if necessary
if frame_num not in frame_labels.keys():
frame_labels[frame_num] = []
output_name = os.path.join(output_dir,"frames","{}_{}.png".format(sequence_name,frame_num))
all_data.append([output_name,frame_labels[frame_num]])
total_frame_count += 1
frame = cv2.resize(frame,(1920,1080))
frame = cv2.fillPoly(frame,ig,(0,0,0))
cv2.imwrite(output_name,frame)
# get next frame
ret,frame = cap.read()
frame_num += 1
cap.release()
print("Cached frames for {}".format(sequence_name))
all_labels = os.path.join(output_dir,"labels.cpkl")
with open(all_labels,"wb") as f:
pickle.dump(all_data,f)
print("Cached {} total frames from all sequences.".format(total_frame_count))
def pil_to_cv(pil_im):
""" convert PIL image to cv2 image"""
open_cv_image = np.array(pil_im)
# Convert RGB to BGR
return open_cv_image[:, :, ::-1]
def plot_text(im,offset,cls,idnum,class_colors,class_dict):
""" Plots filled text box on original image,
utility function for plot_bboxes_2
im - cv2 image
offset - to upper left corner of bbox above which text is to be plotted
cls - string
class_colors - list of 3 tuples of ints in range (0,255)
class_dict - dictionary that converts class strings to ints and vice versa
"""
text = "{}: {}".format(idnum,class_dict[cls])
font_scale = 2.0
font = cv2.FONT_HERSHEY_PLAIN
# set the rectangle background to white
rectangle_bgr = class_colors[cls]
# get the width and height of the text box
(text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=1)[0]
# set the text start position
text_offset_x = int(offset[0])
text_offset_y = int(offset[1])
# make the coords of the box with a small padding of two pixels
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width - 2, text_offset_y - text_height - 2))
cv2.rectangle(im, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)
cv2.putText(im, text, (text_offset_x, text_offset_y), font, fontScale=font_scale, color=(0., 0., 0.), thickness=2)
class Detection_Dataset(data.Dataset):
"""
Returns 3D labels and images for 3D detector training
"""
def __init__(self, dataset_dir, label_format = "tailed_footprint", mode = "train",CROP = 0):
"""
"""
self.mode = mode
self.label_format = label_format
self.CROP = CROP
self.im_tf = transforms.Compose([
transforms.RandomApply([
transforms.ColorJitter(brightness = 0.6,contrast = 0.6,saturation = 0.5)
]),
transforms.ToTensor(),
# transforms.RandomErasing(p=0.2, scale=(0.02, 0.1), ratio=(0.3, 3.3), value=(0.485,0.456,0.406)),
# transforms.RandomErasing(p=0.2, scale=(0.02, 0.07), ratio=(0.3, 3.3), value=(0.485,0.456,0.406)),
# transforms.RandomErasing(p=0.2, scale=(0.02, 0.05), ratio=(0.3, 3.3), value=(0.485,0.456,0.406)),
# transforms.RandomErasing(p=0.1, scale=(0.02, 0.15), ratio=(0.3, 3.3), value=(0.485,0.456,0.406)),
# transforms.RandomErasing(p=0.2, scale=(0.02, 0.1), ratio=(0.3, 3.3), value=(0.485,0.456,0.406)),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# for denormalizing
self.denorm = transforms.Normalize(mean = [-0.485/0.229, -0.456/0.224, -0.406/0.225],
std = [1/0.229, 1/0.224, 1/0.225])
self.classes = { "sedan":0,
"midsize":1,
"van":2,
"pickup":3,
"semi":4,
"truck (other)":5,
"truck": 5,
"motorcycle":6,
"trailer":7,
0:"sedan",
1:"midsize",
2:"van",
3:"pickup",
4:"semi",
5:"truck (other)",
6:"motorcycle",
7:"trailer",
}
with open("camera_vps.cpkl","rb") as f:
self.vps = pickle.load(f)
self.labels = []
self.data = []
self.box_2d = []
# load label file and parse
label_file = os.path.join(dataset_dir,"labels.cpkl")
with open(label_file,"rb") as f:
all_labels = pickle.load(f)
random.shuffle(all_labels)
for item in all_labels:
EXCLUDE = False
frame_boxes = []
boxes_2d = []
if len(item[1]) == 0:
frame_boxes = [torch.zeros(21)]
else:
for box in item[1]:
try:
cls = np.ones([1])* self.classes[box[3]]
except:
cls = np.zeros([1])
try:
bbox3d = np.array(box[11:27]).astype(float)
except:
EXCLUDE = True
try:
bbox2d = np.array(box[4:8]).astype(float)
except:
bbox2d = np.zeros([4])
bbox2d[0] = np.min(bbox3d[::2])
bbox2d[1] = np.min(bbox3d[1::2])
bbox2d[2] = np.max(bbox3d[::2])
bbox2d[3] = np.max(bbox3d[1::2])
# if len(bbox3d) != 16:
# #EXCLUDE = True
# boxes_2d.append(bbox2d)
# #break
bbox = np.concatenate((bbox3d,bbox2d,cls),axis = 0).astype(float)
bbox = torch.from_numpy(bbox)
frame_boxes.append(bbox)
if not EXCLUDE:
try:
frame_boxes = torch.stack(frame_boxes)
except:
pass
self.data.append(item[0])
self.labels.append(frame_boxes)
# self.box_2d.append(boxes_2d)
# reformat label so each frame is a tensor of size [n objs, label_format_length + 1] where +1 is class index
# partition dataset
if self.mode == "train":
self.data = self.data[:int(len(self.data)*0.9)]
self.labels = self.labels[:int(len(self.labels)*0.9)]
# self.box_2d = self.box_2d[:int(len(self.labels)*0.9)]
else:
self.data = self.data[int(len(self.data)*0.9):]
self.labels = self.labels[int(len(self.labels)*0.9):]
# self.box_2d = self.box_2d[int(len(self.labels)*0.9):]
def __getitem__(self,index):
""" returns item indexed from all frames in all tracks from training
or testing indices depending on mode
"""
no_labels = False
# load image and get label
y = self.labels[index].clone()
im = Image.open(self.data[index])
camera_id = self.data[index].split("/")[-1].split("_")[0]
vps = self.vps[camera_id]
vps = torch.tensor([vps[0][0],vps[0][1],vps[1][0],vps[1][1],vps[2][0],vps[2][1]])
#mask_regions = self.box_2d[index]
if y.numel() == 0:
y = torch.zeros([1,21]) -1
no_labels = True
elif camera_id in ["p2c2","p2c3","p2c4"]:
new_y = torch.clone(y)
new_y[:,[0,2,4,6,8,10,12,14,16,18]] = y[:,[2,0,6,4,10,8,14,12,18,16]] # labels are expected left first then right, but are formatted right first
new_y[:,[1,3,5,7,9,11,13,15,17,19]] = y[:,[3,1,7,5,11,9,15,13,19,17]]
y = new_y
# inspect each - if right side is closer to vanishi
# im = F.to_tensor(im)
# for region in mask_regions:
# im[:,region[1]:region[3],region[0]:region[2]] = 0
# im = F.to_pil_image(im)
# stretch and scale randomly by a small amount (0.8 - 1.2 x in either dimension)
scale = max(1,np.random.normal(1,0.1))
aspect_ratio = max(0.75,np.random.normal(1,0.2))
size = im.size
new_size = (int(im.size[1] * scale * aspect_ratio),int(im.size[0] * scale))
im = F.resize(im,new_size)
im = F.to_tensor(im)
new_im = torch.rand([3,size[1],size[0]])
new_im[:,:min(im.shape[1],new_im.shape[1]),:min(im.shape[2],new_im.shape[2])] = im[:,:min(im.shape[1],new_im.shape[1]),:min(im.shape[2],new_im.shape[2])]
im = new_im
im = F.to_pil_image(im)
y[:,[0,2,4,6,8,10,12,14,16,18]] = y[:,[0,2,4,6,8,10,12,14,16,18]] * scale
y[:,[1,3,5,7,9,11,13,15,17,19]] = y[:,[1,3,5,7,9,11,13,15,17,19]] * scale * aspect_ratio
vps[[0,2,4]] = vps[[0,2,4]] * scale
vps[[1,3,5]] = vps[[1,3,5]] * scale * aspect_ratio
#randomly flip
FLIP = np.random.rand()
if FLIP > 0.5:
im= F.hflip(im)
# reverse coords and also switch xmin and xmax
new_y = torch.clone(y)
#new_y[:,[0,2,4,6,8,10,12,14,16,18]] = im.size[0] - y[:,[0,2,4,6,8,10,12,14,16,18]]
new_y[:,[0,2,4,6,8,10,12,14,16,18]] = im.size[0] - y[:,[2,0,6,4,10,8,14,12,18,16]] # labels are expected left first then right, but are formatted right first
new_y[:,[1,3,5,7,9,11,13,15,17,19]] = y[:,[3,1,7,5,11,9,15,13,19,17]]
y = new_y
new_vps = torch.clone(vps)
vps[[0,2,4]] = im.size[0] - new_vps[[0,2,4]]
if no_labels:
y = torch.zeros([1,21]) -1
# randomly rotate
angle = (np.random.rand()*40)-20
im = F.rotate(im, angle, resample = Image.BILINEAR)
if not no_labels:
# decompose each point into length, angle relative to center of image
y_mag = torch.sqrt((y[:,::2][:,:-1] - im.size[0]/2.0)**2 + (y[:,1::2] - im.size[1]/2.0)**2)
y_theta = torch.atan2((y[:,1::2] - im.size[1]/2.0),(y[:,::2][:,:-1] - im.size[0]/2.0))
y_theta -= angle*(np.pi/180.0)
y_new = torch.clone(y)
y_new[:,::2][:,:-1] = y_mag * torch.cos(y_theta)
y_new[:,1::2] = y_mag * torch.sin(y_theta)
y_new[:,::2][:,:-1] += im.size[0]/2.0
y_new[:,1::2] += im.size[1]/2.0
y = y_new
xmin = torch.min(y[:,::2][:,:-1],dim = 1)[0].unsqueeze(1)
xmax = torch.max(y[:,::2][:,:-1],dim = 1)[0].unsqueeze(1)
ymin = torch.min(y[:,1::2],dim = 1)[0].unsqueeze(1)
ymax = torch.max(y[:,1::2],dim = 1)[0].unsqueeze(1)
bbox_2d = torch.cat([xmin,ymin,xmax,ymax],dim = 1)
y[:,16:20] = bbox_2d
# now, rotate each point by the same amount
# remove all labels that fall fully outside of image now
keep = []
for item in y:
if min(item[[0,2,4,6,8,10,12,14,16,18]]) < im.size[0] and max(item[[0,2,4,6,8,10,12,14,16,18]]) >= 0 and min(item[[1,3,5,7,9,11,13,15,17,19]]) < im.size[1] and max(item[[1,3,5,7,9,11,13,15,17,19]]) >= 0:
keep.append(item)
try:
y = torch.stack(keep)
except:
y = torch.zeros([1,21]) -1
# if self.label_format == "tailed_footprint":
# # average top 4 points and average bottom 4 points to get height vector
# bot_y = (y[:,1] + y[:,3] + y[:,5] + y[:,7])/4.0
# bot_x = (y[:,0] + y[:,2] + y[:,4] + y[:,6])/4.0
# top_x = (y[:,8] + y[:,10] + y[:,12] + y[:,14])/4.0
# top_y = (y[:,9] + y[:,11] + y[:,13] + y[:,15])/4.0
# y_tail = top_y - bot_y
# x_tail = top_x - bot_x
# new_y = torch.zeros([len(y),11])
# new_y[:,:8] = y[:,:8]
# new_y[:,8] = x_tail
# new_y[:,9] = y_tail
# new_y[:,10] = y[:,-1]
# y = new_y
if self.CROP == 0:
# convert image to tensor
im_t = self.im_tf(im)
TILE = np.random.rand()
if TILE > 0.25:
# find min and max x coordinate for each bbox
occupied_x = []
occupied_y = []
for box in y:
xmin = min(box[[0,2,4,6,8,10,12,14]])
xmax = max(box[[0,2,4,6,8,10,12,14]])
ymin = min(box[[1,3,5,7,9,11,13,15]])
ymax = max(box[[1,3,5,7,9,11,13,15]])
occupied_x.append([xmin,xmax])
occupied_y.append([ymin,ymax])
attempts = 0
good = False
while not good and attempts < 10:
good = True
xsplit = np.random.randint(0,im.size[0])
for rang in occupied_x:
if xsplit > rang[0] and xsplit < rang[1]:
good = False
attempts += 1
break
if good:
break
attempts = 0
good = False
while not good and attempts < 10:
good = True
ysplit = np.random.randint(0,im.size[1])
for rang in occupied_y:
if ysplit > rang[0] and ysplit < rang[1]:
good = False
attempts += 1
break
if good:
break
#print(xsplit,ysplit)
im11 = im_t[:,:ysplit,:xsplit]
im12 = im_t[:,ysplit:,:xsplit]
im21 = im_t[:,:ysplit,xsplit:]
im22 = im_t[:,ysplit:,xsplit:]
if TILE > 0.25 and TILE < 0.5:
im_t = torch.cat((torch.cat((im21,im22),dim = 1),torch.cat((im11,im12),dim = 1)),dim = 2)
elif TILE > 0.5 and TILE < 0.75:
im_t = torch.cat((torch.cat((im22,im21),dim = 1),torch.cat((im12,im11),dim = 1)),dim = 2)
elif TILE > 0.75:
im_t = torch.cat((torch.cat((im12,im11),dim = 1),torch.cat((im22,im21),dim = 1)),dim = 2)
if TILE > 0.25 and TILE < 0.75:
for idx in range(0,len(y)):
if occupied_x[idx][0] > xsplit:
y[idx,[0,2,4,6,8,10,12,14,16,18]] = y[idx,[0,2,4,6,8,10,12,14,16,18]] - xsplit
else:
y[idx,[0,2,4,6,8,10,12,14,16,18]] = y[idx,[0,2,4,6,8,10,12,14,16,18]] + (im_t.shape[2] - xsplit)
if TILE > 0.5:
for idx in range(0,len(y)):
if occupied_y[idx][0] > ysplit:
y[idx,[1,3,5,7,9,11,13,15,17,19]] = y[idx,[1,3,5,7,9,11,13,15,17,19]] - ysplit
else:
y[idx,[1,3,5,7,9,11,13,15,17,19]] = y[idx,[1,3,5,7,9,11,13,15,17,19]] + (im_t.shape[1] - ysplit)
#append vp (actually we only need one copy but for simplicity append it to every label)
vps = vps.unsqueeze(0).repeat(len(y),1).float()
y = y.float()
y = torch.cat((y,vps),dim = 1)
elif self.CROP > 0:
classes = y[:,20].clone()
# use one object to define center
if y[0,0] != -1:
idx = np.random.randint(len(y))
box = y[idx]
centx = (box[16] + box[18])/2.0
centy = (box[17] + box[19])/2.0
noise = np.random.normal(0,20,size = 2)
centx += noise[0]
centy += noise[1]
size = max(box[19]-box[17],box[18] - box[16])
size_noise = max( -(size*1/4) , np.random.normal(size*1/4,size/4))
size += size_noise
if size < 50:
size = 50
else:
size = max(50,np.random.normal(300,25))
centx = np.random.randint(100,1000)
centy = np.random.randint(100,1000)
try:
minx = int(centx - size/2)
miny = int(centy - size/2)
maxx = int(centx + size/2)
maxy = int(centy + size/2)
except TypeError:
print(centx,centy,size)
try:
im_crop = F.crop(im,miny,minx,maxy-miny,maxx-minx)
except:
print (miny,minx,maxy,maxx,size,centx,centy)
im_crop = im.copy()
y = torch.zeros([1,21]) -1
del im
if im_crop.size[0] == 0 or im_crop.size[1] == 0:
print("Oh no! {} {} {}".format(centx,centy,size))
raise Exception
# shift labels if there is at least one object
if y[0,0] != -1:
y[:,::2] -= minx
y[:,1::2] -= miny
crop_size = im_crop.size
im_crop = F.resize(im_crop, (self.CROP,self.CROP))
y[:,::2] *= self.CROP/crop_size[0]
y[:,1::2] *= self.CROP/crop_size[1]
# remove all labels that aren't in crop
if torch.sum(y) != 0:
keepers = []
for i,item in enumerate(y):
if item[16] < self.CROP-15 and item[18] > 0+15 and item[17] < self.CROP-15 and item[19] > 0+15:
keepers.append(i)
y = y[keepers]
classes = classes[keepers]
if len(y) == 0:
y = torch.zeros([1,21]) -1
classes = torch.tensor([-1])
#y[0,4] = 0
im_t = self.im_tf(im_crop)
OCCLUDE = np.random.rand()
if OCCLUDE > 0.9:
# roughly occlude bottom, left or right third of image
yo_min = np.random.randint(im_t.shape[2]/3,im_t.shape[2])
yo_max = im_t.shape[2]
xo_min = np.random.randint(0,im_t.shape[1]/3)
xo_max = np.random.randint(im_t.shape[1]*2/3,im_t.shape[1])
region = torch.tensor([xo_min,yo_min,xo_max,yo_max]).int()
r = torch.normal(0.485,0.229,[int(region[3])-int(region[1]),int(region[2])-int(region[0])])
g = torch.normal(0.456,0.224,[int(region[3])-int(region[1]),int(region[2])-int(region[0])])
b = torch.normal(0.406,0.225,[int(region[3])-int(region[1]),int(region[2])-int(region[0])])
rgb = torch.stack([r,g,b])
im_t[:,int(region[1]):int(region[3]),int(region[0]):int(region[2])] = rgb
y[:,20] = classes
return im_t, y
def __len__(self):
return len(self.labels)
def label_to_name(self,num):
return self.class_dict[num]
def show(self,index):
""" plots all frames in track_idx as video
SHOW_LABELS - if True, labels are plotted on sequence
track_idx - int
"""
mean = np.array([0.485, 0.456, 0.406])
stddev = np.array([0.229, 0.224, 0.225])
cls_idx = 20
im,label = self[index]
im = self.denorm(im)
cv_im = np.array(im)
cv_im = np.clip(cv_im, 0, 1)
# Convert RGB to BGR
cv_im = cv_im[::-1, :, :]
cv_im = np.moveaxis(cv_im,[0,1,2],[2,0,1])
cv_im = cv_im.copy()
# class_colors = [
# (255,150,0),
# (255,100,0),
# (255,50,0),
# (0,255,150),
# (0,255,100),
# (0,255,50),
# (0,100,255),
# (0,50,255),
# (255,150,0),
# (255,100,0),
# (255,50,0),
# (0,255,150),
# (0,255,100),
# (0,255,50),
# (0,100,255),
# (0,50,255),
# (200,200,200) #ignored regions
# ]
class_colors = [
(0,255,0),
(255,0,0),
(0,0,255),
(255,255,0),
(255,0,255),
(0,255,255),
(255,100,0),
(255,50,0),
(0,255,150),
(0,255,100),
(0,255,50)]
# if self.label_format == "tailed_footprint":
# for bbox in label:
# thickness = 2
# bbox = bbox.int().data.numpy()
# cv2.line(cv_im,(bbox[0],bbox[1]),(bbox[2],bbox[3]), class_colors[bbox[-1]], thickness)
# cv2.line(cv_im,(bbox[0],bbox[1]),(bbox[4],bbox[5]), class_colors[bbox[-1]], thickness)
# cv2.line(cv_im,(bbox[2],bbox[3]),(bbox[6],bbox[7]), class_colors[bbox[-1]], thickness)
# cv2.line(cv_im,(bbox[4],bbox[5]),(bbox[6],bbox[7]), class_colors[bbox[-1]], thickness)
# cent_x = int((bbox[0] + bbox[2] + bbox[4] + bbox[6])/4.0)
# cent_y = int((bbox[1] + bbox[3] + bbox[5] + bbox[7])/4.0)
# cv2.line(cv_im,(bbox[0]+bbox[8],bbox[1]+bbox[9]),(bbox[2]+bbox[8],bbox[3]+bbox[9]), class_colors[bbox[-1]], thickness)
# cv2.line(cv_im,(bbox[0]+bbox[8],bbox[1]+bbox[9]),(bbox[4]+bbox[8],bbox[5]+bbox[9]), class_colors[bbox[-1]], thickness)
# cv2.line(cv_im,(bbox[2]+bbox[8],bbox[3]+bbox[9]),(bbox[6]+bbox[8],bbox[7]+bbox[9]), class_colors[bbox[-1]], thickness)
# cv2.line(cv_im,(bbox[4]+bbox[8],bbox[5]+bbox[9]),(bbox[6]+bbox[8],bbox[7]+bbox[9]), class_colors[bbox[-1]], thickness)
# plot_text(cv_im,(bbox[0],bbox[1]),bbox[-1],0,class_colors,self.classes)
if self.label_format == "8_corners":
for bbox in label:
thickness = 1
bbox = bbox.int().data.numpy()
cv2.line(cv_im,(bbox[0],bbox[1]),(bbox[2],bbox[3]), class_colors[bbox[cls_idx]], thickness)
cv2.line(cv_im,(bbox[0],bbox[1]),(bbox[4],bbox[5]), class_colors[bbox[cls_idx]], thickness)
cv2.line(cv_im,(bbox[2],bbox[3]),(bbox[6],bbox[7]), (0,255,0), thickness)
cv2.line(cv_im,(bbox[4],bbox[5]),(bbox[6],bbox[7]), (255,0,0), thickness)
cv2.line(cv_im,(bbox[8],bbox[9]),(bbox[10],bbox[11]), class_colors[bbox[cls_idx]], thickness)
cv2.line(cv_im,(bbox[8],bbox[9]),(bbox[12],bbox[13]), class_colors[bbox[cls_idx]], thickness)
cv2.line(cv_im,(bbox[10],bbox[11]),(bbox[14],bbox[15]), class_colors[bbox[cls_idx]], thickness)
cv2.line(cv_im,(bbox[12],bbox[13]),(bbox[14],bbox[15]), class_colors[bbox[cls_idx]], thickness)
cv2.line(cv_im,(bbox[0],bbox[1]),(bbox[8],bbox[9]), class_colors[bbox[cls_idx]], thickness)
cv2.line(cv_im,(bbox[2],bbox[3]),(bbox[10],bbox[11]), class_colors[bbox[cls_idx]], thickness)
cv2.line(cv_im,(bbox[4],bbox[5]),(bbox[12],bbox[13]), class_colors[bbox[cls_idx]], thickness)
cv2.line(cv_im,(bbox[6],bbox[7]),(bbox[14],bbox[15]), (0,0,255), thickness)
cv2.rectangle(cv_im, (bbox[16],bbox[17]),(bbox[18],bbox[19]),class_colors[bbox[cls_idx]],thickness)
# draw line from center to vp1
# vp1 = (int(bbox[21]),int(bbox[22]))
# center = (int((bbox[0] + bbox[2])/2),int((bbox[1] + bbox[3])/2))
# cv2.line(cv_im,vp1,center, class_colors[bbox[cls_idx]], thickness)
# for region in metadata["ignored_regions"]:
# bbox = region.astype(int)
# cv2.rectangle(cv_im,(bbox[0],bbox[1]),(bbox[2],bbox[3]), class_colors[-1], 1)
#cv_im = cv2.resize(cv_im,(1920,1080))
cv2.imshow("Frame",cv_im)
cv2.waitKey(0)
def collate(inputs):
"""
Recieves list of tuples and returns a tensor for each item in tuple, except metadata
which is returned as a single list
"""
im = [] # in this dataset, always [3 x W x H]
label = [] # variable length
max_labels = 0
for batch_item in inputs:
im.append(batch_item[0])
label.append(batch_item[1])
# keep track of image with largest number of annotations
if len(batch_item[1]) > max_labels:
max_labels = len(batch_item[1])
# collate images
ims = torch.stack(im)
size = len(label[0][0])
# collate labels
labels = torch.zeros([len(label),max_labels,size]) - 1
for idx in range(len(label)):
num_objs = len(label[idx])
labels[idx,:num_objs,:] = label[idx]
return ims,labels
if __name__ == "__main__":
#### Test script here
#%% cache frames
label_dir = "/home/worklab/Data/dataset_alpha/manual_correction"
vid_dir = "/home/worklab/Data/cv/video/ground_truth_video_06162021/segments"
cache_dir = "/home/worklab/Data/cv/dataset_alpha_cache_1a"
last_corrected_frame = {
"p1c1_0":-1,
"p1c2_0":1000,
"p1c3_0":2340,
"p1c4_0":8999,
"p1c5_0":1000,
"p1c6_0":320,
"p2c1_0":230,
"p2c2_0":215,
"p2c3_0":500,
"p2c4_0":405,
"p2c5_0":680,
"p2c6_0":300,
"p3c1_0":200,
"p3c2_0":300,
"p3c3_0":200,
"p3c4_0":-1,
"p3c5_0":-1,
"p3c6_0":-1
}
#cache_corrected_frames(label_dir,vid_dir,last_corrected_frame,cache_dir)
#%%
test = Detection_Dataset(cache_dir,label_format = "8_corners",mode = "test", CROP = 224)
for i in range(1000):
idx = np.random.randint(0,len(test))
test.show(idx)
cv2.destroyAllWindows()
| [
"numpy.moveaxis",
"csv.reader",
"torchvision.transforms.functional.to_tensor",
"torch.sqrt",
"random.shuffle",
"torch.cat",
"numpy.clip",
"cv2.fillPoly",
"numpy.ones",
"torch.cos",
"numpy.random.randint",
"numpy.random.normal",
"cv2.rectangle",
"torchvision.transforms.Normalize",
"cv2.im... | [((4410, 4449), 'os.path.join', 'os.path.join', (['output_dir', '"""labels.cpkl"""'], {}), "(output_dir, 'labels.cpkl')\n", (4422, 4449), False, 'import os, sys\n'), ((4691, 4707), 'numpy.array', 'np.array', (['pil_im'], {}), '(pil_im)\n', (4699, 4707), True, 'import numpy as np\n'), ((5854, 5928), 'cv2.rectangle', 'cv2.rectangle', (['im', 'box_coords[0]', 'box_coords[1]', 'rectangle_bgr', 'cv2.FILLED'], {}), '(im, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)\n', (5867, 5928), False, 'import cv2\n'), ((5933, 6055), 'cv2.putText', 'cv2.putText', (['im', 'text', '(text_offset_x, text_offset_y)', 'font'], {'fontScale': 'font_scale', 'color': '(0.0, 0.0, 0.0)', 'thickness': '(2)'}), '(im, text, (text_offset_x, text_offset_y), font, fontScale=\n font_scale, color=(0.0, 0.0, 0.0), thickness=2)\n', (5944, 6055), False, 'import cv2\n'), ((29616, 29631), 'torch.stack', 'torch.stack', (['im'], {}), '(im)\n', (29627, 29631), False, 'import torch\n'), ((31013, 31036), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (31034, 31036), False, 'import cv2\n'), ((1411, 1446), 'os.path.join', 'os.path.join', (['label_directory', 'item'], {}), '(label_directory, item)\n', (1423, 1446), False, 'import os, sys\n'), ((2100, 2127), 'os.path.exists', 'os.path.exists', (['ignore_path'], {}), '(ignore_path)\n', (2114, 2127), False, 'import os, sys\n'), ((2365, 2389), 'numpy.array', 'np.array', (['ignore_polygon'], {}), '(ignore_polygon)\n', (2373, 2389), True, 'import numpy as np\n'), ((3364, 3417), 'os.path.join', 'os.path.join', (['video_directory', "(sequence_name + '.mp4')"], {}), "(video_directory, sequence_name + '.mp4')\n", (3376, 3417), False, 'import os, sys\n'), ((3440, 3468), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_file'], {}), '(video_file)\n', (3456, 3468), False, 'import cv2\n'), ((4494, 4518), '_pickle.dump', 'pickle.dump', (['all_data', 'f'], {}), '(all_data, f)\n', (4505, 4518), True, 'import _pickle as pickle\n'), ((5490, 5552), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font'], {'fontScale': 'font_scale', 'thickness': '(1)'}), '(text, font, fontScale=font_scale, thickness=1)\n', (5505, 5552), False, 'import cv2\n'), ((7420, 7538), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225]', 'std': '[1 / 0.229, 1 / 0.224, 1 / 0.225]'}), '(mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225],\n std=[1 / 0.229, 1 / 0.224, 1 / 0.225])\n', (7440, 7538), False, 'from torchvision import transforms\n'), ((8438, 8478), 'os.path.join', 'os.path.join', (['dataset_dir', '"""labels.cpkl"""'], {}), "(dataset_dir, 'labels.cpkl')\n", (8450, 8478), False, 'import os, sys\n'), ((8580, 8606), 'random.shuffle', 'random.shuffle', (['all_labels'], {}), '(all_labels)\n', (8594, 8606), False, 'import random\n'), ((11334, 11362), 'PIL.Image.open', 'Image.open', (['self.data[index]'], {}), '(self.data[index])\n', (11344, 11362), False, 'from PIL import Image\n'), ((11477, 11562), 'torch.tensor', 'torch.tensor', (['[vps[0][0], vps[0][1], vps[1][0], vps[1][1], vps[2][0], vps[2][1]]'], {}), '([vps[0][0], vps[0][1], vps[1][0], vps[1][1], vps[2][0], vps[2][1]]\n )\n', (11489, 11562), False, 'import torch\n'), ((12665, 12687), 'torchvision.transforms.functional.resize', 'F.resize', (['im', 'new_size'], {}), '(im, new_size)\n', (12673, 12687), True, 'import torchvision.transforms.functional as F\n'), ((12700, 12715), 'torchvision.transforms.functional.to_tensor', 'F.to_tensor', (['im'], {}), '(im)\n', (12711, 12715), True, 'import torchvision.transforms.functional as F\n'), ((12742, 12775), 'torch.rand', 'torch.rand', (['[3, size[1], size[0]]'], {}), '([3, size[1], size[0]])\n', (12752, 12775), False, 'import torch\n'), ((12978, 12996), 'torchvision.transforms.functional.to_pil_image', 'F.to_pil_image', (['im'], {}), '(im)\n', (12992, 12996), True, 'import torchvision.transforms.functional as F\n'), ((13336, 13352), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13350, 13352), True, 'import numpy as np\n'), ((14157, 14201), 'torchvision.transforms.functional.rotate', 'F.rotate', (['im', 'angle'], {'resample': 'Image.BILINEAR'}), '(im, angle, resample=Image.BILINEAR)\n', (14165, 14201), True, 'import torchvision.transforms.functional as F\n'), ((24119, 24150), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (24127, 24150), True, 'import numpy as np\n'), ((24168, 24199), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (24176, 24199), True, 'import numpy as np\n'), ((24306, 24318), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (24314, 24318), True, 'import numpy as np\n'), ((24336, 24356), 'numpy.clip', 'np.clip', (['cv_im', '(0)', '(1)'], {}), '(cv_im, 0, 1)\n', (24343, 24356), True, 'import numpy as np\n'), ((24464, 24504), 'numpy.moveaxis', 'np.moveaxis', (['cv_im', '[0, 1, 2]', '[2, 0, 1]'], {}), '(cv_im, [0, 1, 2], [2, 0, 1])\n', (24475, 24504), True, 'import numpy as np\n'), ((28912, 28938), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'cv_im'], {}), "('Frame', cv_im)\n", (28922, 28938), False, 'import cv2\n'), ((28946, 28960), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (28957, 28960), False, 'import cv2\n'), ((1458, 1485), 'os.listdir', 'os.listdir', (['label_directory'], {}), '(label_directory)\n', (1468, 1485), False, 'import os, sys\n'), ((2548, 2561), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (2558, 2561), False, 'import csv\n'), ((4038, 4069), 'cv2.resize', 'cv2.resize', (['frame', '(1920, 1080)'], {}), '(frame, (1920, 1080))\n', (4048, 4069), False, 'import cv2\n'), ((4101, 4135), 'cv2.fillPoly', 'cv2.fillPoly', (['frame', 'ig', '(0, 0, 0)'], {}), '(frame, ig, (0, 0, 0))\n', (4113, 4135), False, 'import cv2\n'), ((4158, 4189), 'cv2.imwrite', 'cv2.imwrite', (['output_name', 'frame'], {}), '(output_name, frame)\n', (4169, 4189), False, 'import cv2\n'), ((8262, 8276), '_pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8273, 8276), True, 'import _pickle as pickle\n'), ((8544, 8558), '_pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8555, 8558), True, 'import _pickle as pickle\n'), ((12463, 12487), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0.1)'], {}), '(1, 0.1)\n', (12479, 12487), True, 'import numpy as np\n'), ((12520, 12544), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0.2)'], {}), '(1, 0.2)\n', (12536, 12544), True, 'import numpy as np\n'), ((13392, 13403), 'torchvision.transforms.functional.hflip', 'F.hflip', (['im'], {}), '(im)\n', (13399, 13403), True, 'import torchvision.transforms.functional as F\n'), ((13483, 13497), 'torch.clone', 'torch.clone', (['y'], {}), '(y)\n', (13494, 13497), False, 'import torch\n'), ((13903, 13919), 'torch.clone', 'torch.clone', (['vps'], {}), '(vps)\n', (13914, 13919), False, 'import torch\n'), ((14341, 14440), 'torch.sqrt', 'torch.sqrt', (['((y[:, ::2][:, :-1] - im.size[0] / 2.0) ** 2 + (y[:, 1::2] - im.size[1] / \n 2.0) ** 2)'], {}), '((y[:, ::2][:, :-1] - im.size[0] / 2.0) ** 2 + (y[:, 1::2] - im.\n size[1] / 2.0) ** 2)\n', (14351, 14440), False, 'import torch\n'), ((14447, 14532), 'torch.atan2', 'torch.atan2', (['(y[:, 1::2] - im.size[1] / 2.0)', '(y[:, ::2][:, :-1] - im.size[0] / 2.0)'], {}), '(y[:, 1::2] - im.size[1] / 2.0, y[:, ::2][:, :-1] - im.size[0] / 2.0\n )\n', (14458, 14532), False, 'import torch\n'), ((14600, 14614), 'torch.clone', 'torch.clone', (['y'], {}), '(y)\n', (14611, 14614), False, 'import torch\n'), ((15169, 15211), 'torch.cat', 'torch.cat', (['[xmin, ymin, xmax, ymax]'], {'dim': '(1)'}), '([xmin, ymin, xmax, ymax], dim=1)\n', (15178, 15211), False, 'import torch\n'), ((15697, 15714), 'torch.stack', 'torch.stack', (['keep'], {}), '(keep)\n', (15708, 15714), False, 'import torch\n'), ((16644, 16660), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (16658, 16660), True, 'import numpy as np\n'), ((20002, 20028), 'torch.cat', 'torch.cat', (['(y, vps)'], {'dim': '(1)'}), '((y, vps), dim=1)\n', (20011, 20028), False, 'import torch\n'), ((2197, 2210), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (2207, 2210), False, 'import csv\n'), ((6623, 6644), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6642, 6644), False, 'from torchvision import transforms\n'), ((7241, 7316), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (7261, 7316), False, 'from torchvision import transforms\n'), ((11657, 11677), 'torch.zeros', 'torch.zeros', (['[1, 21]'], {}), '([1, 21])\n', (11668, 11677), False, 'import torch\n'), ((11779, 11793), 'torch.clone', 'torch.clone', (['y'], {}), '(y)\n', (11790, 11793), False, 'import torch\n'), ((14120, 14136), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (14134, 14136), True, 'import numpy as np\n'), ((14657, 14675), 'torch.cos', 'torch.cos', (['y_theta'], {}), '(y_theta)\n', (14666, 14675), False, 'import torch\n'), ((14712, 14730), 'torch.sin', 'torch.sin', (['y_theta'], {}), '(y_theta)\n', (14721, 14730), False, 'import torch\n'), ((21965, 22006), 'torchvision.transforms.functional.resize', 'F.resize', (['im_crop', '(self.CROP, self.CROP)'], {}), '(im_crop, (self.CROP, self.CROP))\n', (21973, 22006), True, 'import torchvision.transforms.functional as F\n'), ((22763, 22779), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (22777, 22779), True, 'import numpy as np\n'), ((26948, 27048), 'cv2.line', 'cv2.line', (['cv_im', '(bbox[0], bbox[1])', '(bbox[2], bbox[3])', 'class_colors[bbox[cls_idx]]', 'thickness'], {}), '(cv_im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), class_colors[bbox[\n cls_idx]], thickness)\n', (26956, 27048), False, 'import cv2\n'), ((27056, 27156), 'cv2.line', 'cv2.line', (['cv_im', '(bbox[0], bbox[1])', '(bbox[4], bbox[5])', 'class_colors[bbox[cls_idx]]', 'thickness'], {}), '(cv_im, (bbox[0], bbox[1]), (bbox[4], bbox[5]), class_colors[bbox[\n cls_idx]], thickness)\n', (27064, 27156), False, 'import cv2\n'), ((27164, 27243), 'cv2.line', 'cv2.line', (['cv_im', '(bbox[2], bbox[3])', '(bbox[6], bbox[7])', '(0, 255, 0)', 'thickness'], {}), '(cv_im, (bbox[2], bbox[3]), (bbox[6], bbox[7]), (0, 255, 0), thickness)\n', (27172, 27243), False, 'import cv2\n'), ((27254, 27333), 'cv2.line', 'cv2.line', (['cv_im', '(bbox[4], bbox[5])', '(bbox[6], bbox[7])', '(255, 0, 0)', 'thickness'], {}), '(cv_im, (bbox[4], bbox[5]), (bbox[6], bbox[7]), (255, 0, 0), thickness)\n', (27262, 27333), False, 'import cv2\n'), ((27361, 27463), 'cv2.line', 'cv2.line', (['cv_im', '(bbox[8], bbox[9])', '(bbox[10], bbox[11])', 'class_colors[bbox[cls_idx]]', 'thickness'], {}), '(cv_im, (bbox[8], bbox[9]), (bbox[10], bbox[11]), class_colors[bbox\n [cls_idx]], thickness)\n', (27369, 27463), False, 'import cv2\n'), ((27471, 27573), 'cv2.line', 'cv2.line', (['cv_im', '(bbox[8], bbox[9])', '(bbox[12], bbox[13])', 'class_colors[bbox[cls_idx]]', 'thickness'], {}), '(cv_im, (bbox[8], bbox[9]), (bbox[12], bbox[13]), class_colors[bbox\n [cls_idx]], thickness)\n', (27479, 27573), False, 'import cv2\n'), ((27581, 27685), 'cv2.line', 'cv2.line', (['cv_im', '(bbox[10], bbox[11])', '(bbox[14], bbox[15])', 'class_colors[bbox[cls_idx]]', 'thickness'], {}), '(cv_im, (bbox[10], bbox[11]), (bbox[14], bbox[15]), class_colors[\n bbox[cls_idx]], thickness)\n', (27589, 27685), False, 'import cv2\n'), ((27693, 27797), 'cv2.line', 'cv2.line', (['cv_im', '(bbox[12], bbox[13])', '(bbox[14], bbox[15])', 'class_colors[bbox[cls_idx]]', 'thickness'], {}), '(cv_im, (bbox[12], bbox[13]), (bbox[14], bbox[15]), class_colors[\n bbox[cls_idx]], thickness)\n', (27701, 27797), False, 'import cv2\n'), ((27822, 27922), 'cv2.line', 'cv2.line', (['cv_im', '(bbox[0], bbox[1])', '(bbox[8], bbox[9])', 'class_colors[bbox[cls_idx]]', 'thickness'], {}), '(cv_im, (bbox[0], bbox[1]), (bbox[8], bbox[9]), class_colors[bbox[\n cls_idx]], thickness)\n', (27830, 27922), False, 'import cv2\n'), ((27930, 28032), 'cv2.line', 'cv2.line', (['cv_im', '(bbox[2], bbox[3])', '(bbox[10], bbox[11])', 'class_colors[bbox[cls_idx]]', 'thickness'], {}), '(cv_im, (bbox[2], bbox[3]), (bbox[10], bbox[11]), class_colors[bbox\n [cls_idx]], thickness)\n', (27938, 28032), False, 'import cv2\n'), ((28040, 28142), 'cv2.line', 'cv2.line', (['cv_im', '(bbox[4], bbox[5])', '(bbox[12], bbox[13])', 'class_colors[bbox[cls_idx]]', 'thickness'], {}), '(cv_im, (bbox[4], bbox[5]), (bbox[12], bbox[13]), class_colors[bbox\n [cls_idx]], thickness)\n', (28048, 28142), False, 'import cv2\n'), ((28150, 28235), 'cv2.line', 'cv2.line', (['cv_im', '(bbox[6], bbox[7])', '(bbox[14], bbox[15])', '(0, 0, 255)', 'thickness'], {}), '(cv_im, (bbox[6], bbox[7]), (bbox[14], bbox[15]), (0, 0, 255),\n thickness)\n', (28158, 28235), False, 'import cv2\n'), ((28243, 28351), 'cv2.rectangle', 'cv2.rectangle', (['cv_im', '(bbox[16], bbox[17])', '(bbox[18], bbox[19])', 'class_colors[bbox[cls_idx]]', 'thickness'], {}), '(cv_im, (bbox[16], bbox[17]), (bbox[18], bbox[19]),\n class_colors[bbox[cls_idx]], thickness)\n', (28256, 28351), False, 'import cv2\n'), ((8822, 8837), 'torch.zeros', 'torch.zeros', (['(21)'], {}), '(21)\n', (8833, 8837), False, 'import torch\n'), ((10040, 10062), 'torch.from_numpy', 'torch.from_numpy', (['bbox'], {}), '(bbox)\n', (10056, 10062), False, 'import torch\n'), ((10221, 10245), 'torch.stack', 'torch.stack', (['frame_boxes'], {}), '(frame_boxes)\n', (10232, 10245), False, 'import torch\n'), ((14036, 14056), 'torch.zeros', 'torch.zeros', (['[1, 21]'], {}), '([1, 21])\n', (14047, 14056), False, 'import torch\n'), ((15747, 15767), 'torch.zeros', 'torch.zeros', (['[1, 21]'], {}), '([1, 21])\n', (15758, 15767), False, 'import torch\n'), ((17365, 17397), 'numpy.random.randint', 'np.random.randint', (['(0)', 'im.size[0]'], {}), '(0, im.size[0])\n', (17382, 17397), True, 'import numpy as np\n'), ((17869, 17901), 'numpy.random.randint', 'np.random.randint', (['(0)', 'im.size[1]'], {}), '(0, im.size[1])\n', (17886, 17901), True, 'import numpy as np\n'), ((20414, 20445), 'numpy.random.normal', 'np.random.normal', (['(0)', '(20)'], {'size': '(2)'}), '(0, 20, size=2)\n', (20430, 20445), True, 'import numpy as np\n'), ((20886, 20914), 'numpy.random.randint', 'np.random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (20903, 20914), True, 'import numpy as np\n'), ((20938, 20966), 'numpy.random.randint', 'np.random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (20955, 20966), True, 'import numpy as np\n'), ((21294, 21342), 'torchvision.transforms.functional.crop', 'F.crop', (['im', 'miny', 'minx', '(maxy - miny)', '(maxx - minx)'], {}), '(im, miny, minx, maxy - miny, maxx - minx)\n', (21300, 21342), True, 'import torchvision.transforms.functional as F\n'), ((22203, 22215), 'torch.sum', 'torch.sum', (['y'], {}), '(y)\n', (22212, 22215), False, 'import torch\n'), ((22637, 22655), 'torch.tensor', 'torch.tensor', (['[-1]'], {}), '([-1])\n', (22649, 22655), False, 'import torch\n'), ((22906, 22957), 'numpy.random.randint', 'np.random.randint', (['(im_t.shape[2] / 3)', 'im_t.shape[2]'], {}), '(im_t.shape[2] / 3, im_t.shape[2])\n', (22923, 22957), True, 'import numpy as np\n'), ((23019, 23058), 'numpy.random.randint', 'np.random.randint', (['(0)', '(im_t.shape[1] / 3)'], {}), '(0, im_t.shape[1] / 3)\n', (23036, 23058), True, 'import numpy as np\n'), ((23081, 23136), 'numpy.random.randint', 'np.random.randint', (['(im_t.shape[1] * 2 / 3)', 'im_t.shape[1]'], {}), '(im_t.shape[1] * 2 / 3, im_t.shape[1])\n', (23098, 23136), True, 'import numpy as np\n'), ((23573, 23595), 'torch.stack', 'torch.stack', (['[r, g, b]'], {}), '([r, g, b])\n', (23584, 23595), False, 'import torch\n'), ((6506, 6574), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.6)', 'contrast': '(0.6)', 'saturation': '(0.5)'}), '(brightness=0.6, contrast=0.6, saturation=0.5)\n', (6528, 6574), False, 'from torchvision import transforms\n'), ((14898, 14933), 'torch.min', 'torch.min', (['y[:, ::2][:, :-1]'], {'dim': '(1)'}), '(y[:, ::2][:, :-1], dim=1)\n', (14907, 14933), False, 'import torch\n'), ((14968, 15003), 'torch.max', 'torch.max', (['y[:, ::2][:, :-1]'], {'dim': '(1)'}), '(y[:, ::2][:, :-1], dim=1)\n', (14977, 15003), False, 'import torch\n'), ((15038, 15066), 'torch.min', 'torch.min', (['y[:, 1::2]'], {'dim': '(1)'}), '(y[:, 1::2], dim=1)\n', (15047, 15066), False, 'import torch\n'), ((15102, 15130), 'torch.max', 'torch.max', (['y[:, 1::2]'], {'dim': '(1)'}), '(y[:, 1::2], dim=1)\n', (15111, 15130), False, 'import torch\n'), ((20641, 20681), 'numpy.random.normal', 'np.random.normal', (['(size * 1 / 4)', '(size / 4)'], {}), '(size * 1 / 4, size / 4)\n', (20657, 20681), True, 'import numpy as np\n'), ((20836, 20861), 'numpy.random.normal', 'np.random.normal', (['(300)', '(25)'], {}), '(300, 25)\n', (20852, 20861), True, 'import numpy as np\n'), ((22588, 22608), 'torch.zeros', 'torch.zeros', (['[1, 21]'], {}), '([1, 21])\n', (22599, 22608), False, 'import torch\n'), ((8989, 9001), 'numpy.ones', 'np.ones', (['[1]'], {}), '([1])\n', (8996, 9001), True, 'import numpy as np\n'), ((9082, 9095), 'numpy.zeros', 'np.zeros', (['[1]'], {}), '([1])\n', (9090, 9095), True, 'import numpy as np\n'), ((9476, 9489), 'numpy.zeros', 'np.zeros', (['[4]'], {}), '([4])\n', (9484, 9489), True, 'import numpy as np\n'), ((9526, 9545), 'numpy.min', 'np.min', (['bbox3d[::2]'], {}), '(bbox3d[::2])\n', (9532, 9545), True, 'import numpy as np\n'), ((9582, 9602), 'numpy.min', 'np.min', (['bbox3d[1::2]'], {}), '(bbox3d[1::2])\n', (9588, 9602), True, 'import numpy as np\n'), ((9639, 9658), 'numpy.max', 'np.max', (['bbox3d[::2]'], {}), '(bbox3d[::2])\n', (9645, 9658), True, 'import numpy as np\n'), ((9695, 9715), 'numpy.max', 'np.max', (['bbox3d[1::2]'], {}), '(bbox3d[1::2])\n', (9701, 9715), True, 'import numpy as np\n'), ((9954, 9999), 'numpy.concatenate', 'np.concatenate', (['(bbox3d, bbox2d, cls)'], {'axis': '(0)'}), '((bbox3d, bbox2d, cls), axis=0)\n', (9968, 9999), True, 'import numpy as np\n'), ((18545, 18575), 'torch.cat', 'torch.cat', (['(im21, im22)'], {'dim': '(1)'}), '((im21, im22), dim=1)\n', (18554, 18575), False, 'import torch\n'), ((18576, 18606), 'torch.cat', 'torch.cat', (['(im11, im12)'], {'dim': '(1)'}), '((im11, im12), dim=1)\n', (18585, 18606), False, 'import torch\n'), ((21472, 21492), 'torch.zeros', 'torch.zeros', (['[1, 21]'], {}), '([1, 21])\n', (21483, 21492), False, 'import torch\n'), ((23157, 23203), 'torch.tensor', 'torch.tensor', (['[xo_min, yo_min, xo_max, yo_max]'], {}), '([xo_min, yo_min, xo_max, yo_max])\n', (23169, 23203), False, 'import torch\n'), ((9175, 9195), 'numpy.array', 'np.array', (['box[11:27]'], {}), '(box[11:27])\n', (9183, 9195), True, 'import numpy as np\n'), ((9357, 9375), 'numpy.array', 'np.array', (['box[4:8]'], {}), '(box[4:8])\n', (9365, 9375), True, 'import numpy as np\n'), ((18705, 18735), 'torch.cat', 'torch.cat', (['(im22, im21)'], {'dim': '(1)'}), '((im22, im21), dim=1)\n', (18714, 18735), False, 'import torch\n'), ((18736, 18766), 'torch.cat', 'torch.cat', (['(im12, im11)'], {'dim': '(1)'}), '((im12, im11), dim=1)\n', (18745, 18766), False, 'import torch\n'), ((18849, 18879), 'torch.cat', 'torch.cat', (['(im12, im11)'], {'dim': '(1)'}), '((im12, im11), dim=1)\n', (18858, 18879), False, 'import torch\n'), ((18880, 18910), 'torch.cat', 'torch.cat', (['(im22, im21)'], {'dim': '(1)'}), '((im22, im21), dim=1)\n', (18889, 18910), False, 'import torch\n')] |
import threading
from collections import OrderedDict
from random import Random
from typing import Dict, Iterator, List, Optional, Union, Tuple
import numpy as np
from torch.utils.data import DataLoader, Dataset, Sampler
from rdkit import Chem
from .scaler import StandardScaler
from chemprop.features import get_features_generator
from chemprop.features import BatchMolGraph, MolGraph
from chemprop.features import is_explicit_h, is_reaction
from chemprop.rdkit import make_mol
# Cache of graph featurizations
CACHE_GRAPH = True
SMILES_TO_GRAPH: Dict[str, MolGraph] = {}
def cache_graph() -> bool:
r"""Returns whether :class:`~chemprop.features.MolGraph`\ s will be cached."""
return CACHE_GRAPH
def set_cache_graph(cache_graph: bool) -> None:
r"""Sets whether :class:`~chemprop.features.MolGraph`\ s will be cached."""
global CACHE_GRAPH
CACHE_GRAPH = cache_graph
def empty_cache():
r"""Empties the cache of :class:`~chemprop.features.MolGraph` and RDKit molecules."""
SMILES_TO_GRAPH.clear()
SMILES_TO_MOL.clear()
# Cache of RDKit molecules
CACHE_MOL = True
SMILES_TO_MOL: Dict[str, Union[Chem.Mol, Tuple[Chem.Mol, Chem.Mol]]] = {}
def cache_mol() -> bool:
r"""Returns whether RDKit molecules will be cached."""
return CACHE_MOL
def set_cache_mol(cache_mol: bool) -> None:
r"""Sets whether RDKit molecules will be cached."""
global CACHE_MOL
CACHE_MOL = cache_mol
class MoleculeDatapoint:
"""A :class:`MoleculeDatapoint` contains a single molecule and its associated features and targets."""
def __init__(self,
smiles: List[str],
targets: List[Optional[float]] = None,
row: OrderedDict = None,
data_weight: float = 1,
features: np.ndarray = None,
features_generator: List[str] = None,
phase_features: List[float] = None,
atom_features: np.ndarray = None,
atom_descriptors: np.ndarray = None,
bond_features: np.ndarray = None,
overwrite_default_atom_features: bool = False,
overwrite_default_bond_features: bool = False):
"""
:param smiles: A list of the SMILES strings for the molecules.
:param targets: A list of targets for the molecule (contains None for unknown target values).
:param row: The raw CSV row containing the information for this molecule.
:param data_weight: Weighting of the datapoint for the loss function.
:param features: A numpy array containing additional features (e.g., Morgan fingerprint).
:param features_generator: A list of features generators to use.
:param phase_features: A one-hot vector indicating the phase of the data, as used in spectra data.
:param atom_descriptors: A numpy array containing additional atom descriptors to featurize the molecule
:param bond_features: A numpy array containing additional bond features to featurize the molecule
:param overwrite_default_atom_features: Boolean to overwrite default atom features by atom_features
:param overwrite_default_bond_features: Boolean to overwrite default bond features by bond_features
"""
if features is not None and features_generator is not None:
raise ValueError('Cannot provide both loaded features and a features generator.')
self.smiles = smiles
self.targets = targets
self.row = row
self.data_weight = data_weight
self.features = features
self.features_generator = features_generator
self.phase_features = phase_features
self.atom_descriptors = atom_descriptors
self.atom_features = atom_features
self.bond_features = bond_features
self.overwrite_default_atom_features = overwrite_default_atom_features
self.overwrite_default_bond_features = overwrite_default_bond_features
self.is_reaction = is_reaction()
self.is_explicit_h = is_explicit_h()
# Generate additional features if given a generator
if self.features_generator is not None:
self.features = []
for fg in self.features_generator:
features_generator = get_features_generator(fg)
for m in self.mol:
if not self.is_reaction:
if m is not None and m.GetNumHeavyAtoms() > 0:
self.features.extend(features_generator(m))
# for H2
elif m is not None and m.GetNumHeavyAtoms() == 0:
# not all features are equally long, so use methane as dummy molecule to determine length
self.features.extend(np.zeros(len(features_generator(Chem.MolFromSmiles('C')))))
else:
if m[0] is not None and m[1] is not None and m[0].GetNumHeavyAtoms() > 0:
self.features.extend(features_generator(m[0]))
elif m[0] is not None and m[1] is not None and m[0].GetNumHeavyAtoms() == 0:
self.features.extend(np.zeros(len(features_generator(Chem.MolFromSmiles('C')))))
self.features = np.array(self.features)
# Fix nans in features
replace_token = 0
if self.features is not None:
self.features = np.where(np.isnan(self.features), replace_token, self.features)
# Fix nans in atom_descriptors
if self.atom_descriptors is not None:
self.atom_descriptors = np.where(np.isnan(self.atom_descriptors), replace_token, self.atom_descriptors)
# Fix nans in atom_features
if self.atom_features is not None:
self.atom_features = np.where(np.isnan(self.atom_features), replace_token, self.atom_features)
# Fix nans in bond_descriptors
if self.bond_features is not None:
self.bond_features = np.where(np.isnan(self.bond_features), replace_token, self.bond_features)
# Save a copy of the raw features and targets to enable different scaling later on
self.raw_features, self.raw_targets = self.features, self.targets
self.raw_atom_descriptors, self.raw_atom_features, self.raw_bond_features = \
self.atom_descriptors, self.atom_features, self.bond_features
@property
def mol(self) -> Union[List[Chem.Mol], List[Tuple[Chem.Mol, Chem.Mol]]]:
"""Gets the corresponding list of RDKit molecules for the corresponding SMILES list."""
mol = make_mols(self.smiles, self.is_reaction, self.is_explicit_h)
if cache_mol():
for s, m in zip(self.smiles, mol):
SMILES_TO_MOL[s] = m
return mol
@property
def number_of_molecules(self) -> int:
"""
Gets the number of molecules in the :class:`MoleculeDatapoint`.
:return: The number of molecules.
"""
return len(self.smiles)
def set_features(self, features: np.ndarray) -> None:
"""
Sets the features of the molecule.
:param features: A 1D numpy array of features for the molecule.
"""
self.features = features
def set_atom_descriptors(self, atom_descriptors: np.ndarray) -> None:
"""
Sets the atom descriptors of the molecule.
:param atom_descriptors: A 1D numpy array of features for the molecule.
"""
self.atom_descriptors = atom_descriptors
def set_atom_features(self, atom_features: np.ndarray) -> None:
"""
Sets the atom features of the molecule.
:param atom_features: A 1D numpy array of features for the molecule.
"""
self.atom_features = atom_features
def set_bond_features(self, bond_features: np.ndarray) -> None:
"""
Sets the bond features of the molecule.
:param bond_features: A 1D numpy array of features for the molecule.
"""
self.bond_features = bond_features
def extend_features(self, features: np.ndarray) -> None:
"""
Extends the features of the molecule.
:param features: A 1D numpy array of extra features for the molecule.
"""
self.features = np.append(self.features, features) if self.features is not None else features
def num_tasks(self) -> int:
"""
Returns the number of prediction tasks.
:return: The number of tasks.
"""
return len(self.targets)
def set_targets(self, targets: List[Optional[float]]):
"""
Sets the targets of a molecule.
:param targets: A list of floats containing the targets.
"""
self.targets = targets
def reset_features_and_targets(self) -> None:
"""Resets the features (atom, bond, and molecule) and targets to their raw values."""
self.features, self.targets = self.raw_features, self.raw_targets
self.atom_descriptors, self.atom_features, self.bond_features = \
self.raw_atom_descriptors, self.raw_atom_features, self.raw_bond_features
class MoleculeDataset(Dataset):
r"""A :class:`MoleculeDataset` contains a list of :class:`MoleculeDatapoint`\ s with access to their attributes."""
def __init__(self, data: List[MoleculeDatapoint]):
r"""
:param data: A list of :class:`MoleculeDatapoint`\ s.
"""
self._data = data
self._scaler = None
self._batch_graph = None
self._random = Random()
def smiles(self, flatten: bool = False) -> Union[List[str], List[List[str]]]:
"""
Returns a list containing the SMILES list associated with each :class:`MoleculeDatapoint`.
:param flatten: Whether to flatten the returned SMILES to a list instead of a list of lists.
:return: A list of SMILES or a list of lists of SMILES, depending on :code:`flatten`.
"""
if flatten:
return [smiles for d in self._data for smiles in d.smiles]
return [d.smiles for d in self._data]
def mols(self, flatten: bool = False) -> Union[List[Chem.Mol], List[List[Chem.Mol]], List[Tuple[Chem.Mol, Chem.Mol]], List[List[Tuple[Chem.Mol, Chem.Mol]]]]:
"""
Returns a list of the RDKit molecules associated with each :class:`MoleculeDatapoint`.
:param flatten: Whether to flatten the returned RDKit molecules to a list instead of a list of lists.
:return: A list of SMILES or a list of lists of RDKit molecules, depending on :code:`flatten`.
"""
if flatten:
return [mol for d in self._data for mol in d.mol]
return [d.mol for d in self._data]
@property
def number_of_molecules(self) -> int:
"""
Gets the number of molecules in each :class:`MoleculeDatapoint`.
:return: The number of molecules.
"""
return self._data[0].number_of_molecules if len(self._data) > 0 else None
def batch_graph(self) -> List[BatchMolGraph]:
r"""
Constructs a :class:`~chemprop.features.BatchMolGraph` with the graph featurization of all the molecules.
.. note::
The :class:`~chemprop.features.BatchMolGraph` is cached in after the first time it is computed
and is simply accessed upon subsequent calls to :meth:`batch_graph`. This means that if the underlying
set of :class:`MoleculeDatapoint`\ s changes, then the returned :class:`~chemprop.features.BatchMolGraph`
will be incorrect for the underlying data.
:return: A list of :class:`~chemprop.features.BatchMolGraph` containing the graph featurization of all the
molecules in each :class:`MoleculeDatapoint`.
"""
if self._batch_graph is None:
self._batch_graph = []
mol_graphs = []
for d in self._data:
mol_graphs_list = []
for s, m in zip(d.smiles, d.mol):
if s in SMILES_TO_GRAPH:
mol_graph = SMILES_TO_GRAPH[s]
else:
if len(d.smiles) > 1 and (d.atom_features is not None or d.bond_features is not None):
raise NotImplementedError('Atom descriptors are currently only supported with one molecule '
'per input (i.e., number_of_molecules = 1).')
mol_graph = MolGraph(m, d.atom_features, d.bond_features,
overwrite_default_atom_features=d.overwrite_default_atom_features,
overwrite_default_bond_features=d.overwrite_default_bond_features)
if cache_graph():
SMILES_TO_GRAPH[s] = mol_graph
mol_graphs_list.append(mol_graph)
mol_graphs.append(mol_graphs_list)
self._batch_graph = [BatchMolGraph([g[i] for g in mol_graphs]) for i in range(len(mol_graphs[0]))]
return self._batch_graph
def features(self) -> List[np.ndarray]:
"""
Returns the features associated with each molecule (if they exist).
:return: A list of 1D numpy arrays containing the features for each molecule or None if there are no features.
"""
if len(self._data) == 0 or self._data[0].features is None:
return None
return [d.features for d in self._data]
def phase_features(self) -> List[np.ndarray]:
"""
Returns the phase features associated with each molecule (if they exist).
:return: A list of 1D numpy arrays containing the phase features for each molecule or None if there are no features.
"""
if len(self._data) == 0 or self._data[0].phase_features is None:
return None
return [d.phase_features for d in self._data]
def atom_features(self) -> List[np.ndarray]:
"""
Returns the atom descriptors associated with each molecule (if they exit).
:return: A list of 2D numpy arrays containing the atom descriptors
for each molecule or None if there are no features.
"""
if len(self._data) == 0 or self._data[0].atom_features is None:
return None
return [d.atom_features for d in self._data]
def atom_descriptors(self) -> List[np.ndarray]:
"""
Returns the atom descriptors associated with each molecule (if they exit).
:return: A list of 2D numpy arrays containing the atom descriptors
for each molecule or None if there are no features.
"""
if len(self._data) == 0 or self._data[0].atom_descriptors is None:
return None
return [d.atom_descriptors for d in self._data]
def bond_features(self) -> List[np.ndarray]:
"""
Returns the bond features associated with each molecule (if they exit).
:return: A list of 2D numpy arrays containing the bond features
for each molecule or None if there are no features.
"""
if len(self._data) == 0 or self._data[0].bond_features is None:
return None
return [d.bond_features for d in self._data]
def data_weights(self) -> List[float]:
"""
Returns the loss weighting associated with each molecule
"""
return [d.data_weight for d in self._data]
def targets(self) -> List[List[Optional[float]]]:
"""
Returns the targets associated with each molecule.
:return: A list of lists of floats (or None) containing the targets.
"""
return [d.targets for d in self._data]
def num_tasks(self) -> int:
"""
Returns the number of prediction tasks.
:return: The number of tasks.
"""
return self._data[0].num_tasks() if len(self._data) > 0 else None
def features_size(self) -> int:
"""
Returns the size of the additional features vector associated with the molecules.
:return: The size of the additional features vector.
"""
return len(self._data[0].features) if len(self._data) > 0 and self._data[0].features is not None else None
def atom_descriptors_size(self) -> int:
"""
Returns the size of custom additional atom descriptors vector associated with the molecules.
:return: The size of the additional atom descriptor vector.
"""
return len(self._data[0].atom_descriptors[0]) \
if len(self._data) > 0 and self._data[0].atom_descriptors is not None else None
def atom_features_size(self) -> int:
"""
Returns the size of custom additional atom features vector associated with the molecules.
:return: The size of the additional atom feature vector.
"""
return len(self._data[0].atom_features[0]) \
if len(self._data) > 0 and self._data[0].atom_features is not None else None
def bond_features_size(self) -> int:
"""
Returns the size of custom additional bond features vector associated with the molecules.
:return: The size of the additional bond feature vector.
"""
return len(self._data[0].bond_features[0]) \
if len(self._data) > 0 and self._data[0].bond_features is not None else None
def normalize_features(self, scaler: StandardScaler = None, replace_nan_token: int = 0,
scale_atom_descriptors: bool = False, scale_bond_features: bool = False) -> StandardScaler:
"""
Normalizes the features of the dataset using a :class:`~chemprop.data.StandardScaler`.
The :class:`~chemprop.data.StandardScaler` subtracts the mean and divides by the standard deviation
for each feature independently.
If a :class:`~chemprop.data.StandardScaler` is provided, it is used to perform the normalization.
Otherwise, a :class:`~chemprop.data.StandardScaler` is first fit to the features in this dataset
and is then used to perform the normalization.
:param scaler: A fitted :class:`~chemprop.data.StandardScaler`. If it is provided it is used,
otherwise a new :class:`~chemprop.data.StandardScaler` is first fitted to this
data and is then used.
:param replace_nan_token: A token to use to replace NaN entries in the features.
:param scale_atom_descriptors: If the features that need to be scaled are atom features rather than molecule.
:param scale_bond_features: If the features that need to be scaled are bond descriptors rather than molecule.
:return: A fitted :class:`~chemprop.data.StandardScaler`. If a :class:`~chemprop.data.StandardScaler`
is provided as a parameter, this is the same :class:`~chemprop.data.StandardScaler`. Otherwise,
this is a new :class:`~chemprop.data.StandardScaler` that has been fit on this dataset.
"""
if len(self._data) == 0 or \
(self._data[0].features is None and not scale_bond_features and not scale_atom_descriptors):
return None
if scaler is not None:
self._scaler = scaler
elif self._scaler is None:
if scale_atom_descriptors and not self._data[0].atom_descriptors is None:
features = np.vstack([d.raw_atom_descriptors for d in self._data])
elif scale_atom_descriptors and not self._data[0].atom_features is None:
features = np.vstack([d.raw_atom_features for d in self._data])
elif scale_bond_features:
features = np.vstack([d.raw_bond_features for d in self._data])
else:
features = np.vstack([d.raw_features for d in self._data])
self._scaler = StandardScaler(replace_nan_token=replace_nan_token)
self._scaler.fit(features)
if scale_atom_descriptors and not self._data[0].atom_descriptors is None:
for d in self._data:
d.set_atom_descriptors(self._scaler.transform(d.raw_atom_descriptors))
elif scale_atom_descriptors and not self._data[0].atom_features is None:
for d in self._data:
d.set_atom_features(self._scaler.transform(d.raw_atom_features))
elif scale_bond_features:
for d in self._data:
d.set_bond_features(self._scaler.transform(d.raw_bond_features))
else:
for d in self._data:
d.set_features(self._scaler.transform(d.raw_features.reshape(1, -1))[0])
return self._scaler
def normalize_targets(self) -> StandardScaler:
"""
Normalizes the targets of the dataset using a :class:`~chemprop.data.StandardScaler`.
The :class:`~chemprop.data.StandardScaler` subtracts the mean and divides by the standard deviation
for each task independently.
This should only be used for regression datasets.
:return: A :class:`~chemprop.data.StandardScaler` fitted to the targets.
"""
targets = [d.raw_targets for d in self._data]
scaler = StandardScaler().fit(targets)
scaled_targets = scaler.transform(targets).tolist()
self.set_targets(scaled_targets)
return scaler
def set_targets(self, targets: List[List[Optional[float]]]) -> None:
"""
Sets the targets for each molecule in the dataset. Assumes the targets are aligned with the datapoints.
:param targets: A list of lists of floats (or None) containing targets for each molecule. This must be the
same length as the underlying dataset.
"""
assert len(self._data) == len(targets)
for i in range(len(self._data)):
self._data[i].set_targets(targets[i])
def reset_features_and_targets(self) -> None:
"""Resets the features (atom, bond, and molecule) and targets to their raw values."""
for d in self._data:
d.reset_features_and_targets()
def __len__(self) -> int:
"""
Returns the length of the dataset (i.e., the number of molecules).
:return: The length of the dataset.
"""
return len(self._data)
def __getitem__(self, item) -> Union[MoleculeDatapoint, List[MoleculeDatapoint]]:
r"""
Gets one or more :class:`MoleculeDatapoint`\ s via an index or slice.
:param item: An index (int) or a slice object.
:return: A :class:`MoleculeDatapoint` if an int is provided or a list of :class:`MoleculeDatapoint`\ s
if a slice is provided.
"""
return self._data[item]
class MoleculeSampler(Sampler):
"""A :class:`MoleculeSampler` samples data from a :class:`MoleculeDataset` for a :class:`MoleculeDataLoader`."""
def __init__(self,
dataset: MoleculeDataset,
class_balance: bool = False,
shuffle: bool = False,
seed: int = 0):
"""
:param class_balance: Whether to perform class balancing (i.e., use an equal number of positive
and negative molecules). Set shuffle to True in order to get a random
subset of the larger class.
:param shuffle: Whether to shuffle the data.
:param seed: Random seed. Only needed if :code:`shuffle` is True.
"""
super(Sampler, self).__init__()
self.dataset = dataset
self.class_balance = class_balance
self.shuffle = shuffle
self._random = Random(seed)
if self.class_balance:
indices = np.arange(len(dataset))
has_active = np.array([any(target == 1 for target in datapoint.targets) for datapoint in dataset])
self.positive_indices = indices[has_active].tolist()
self.negative_indices = indices[~has_active].tolist()
self.length = 2 * min(len(self.positive_indices), len(self.negative_indices))
else:
self.positive_indices = self.negative_indices = None
self.length = len(self.dataset)
def __iter__(self) -> Iterator[int]:
"""Creates an iterator over indices to sample."""
if self.class_balance:
if self.shuffle:
self._random.shuffle(self.positive_indices)
self._random.shuffle(self.negative_indices)
indices = [index for pair in zip(self.positive_indices, self.negative_indices) for index in pair]
else:
indices = list(range(len(self.dataset)))
if self.shuffle:
self._random.shuffle(indices)
return iter(indices)
def __len__(self) -> int:
"""Returns the number of indices that will be sampled."""
return self.length
def construct_molecule_batch(data: List[MoleculeDatapoint]) -> MoleculeDataset:
r"""
Constructs a :class:`MoleculeDataset` from a list of :class:`MoleculeDatapoint`\ s.
Additionally, precomputes the :class:`~chemprop.features.BatchMolGraph` for the constructed
:class:`MoleculeDataset`.
:param data: A list of :class:`MoleculeDatapoint`\ s.
:return: A :class:`MoleculeDataset` containing all the :class:`MoleculeDatapoint`\ s.
"""
data = MoleculeDataset(data)
data.batch_graph() # Forces computation and caching of the BatchMolGraph for the molecules
return data
class MoleculeDataLoader(DataLoader):
"""A :class:`MoleculeDataLoader` is a PyTorch :class:`DataLoader` for loading a :class:`MoleculeDataset`."""
def __init__(self,
dataset: MoleculeDataset,
batch_size: int = 50,
num_workers: int = 8,
class_balance: bool = False,
shuffle: bool = False,
seed: int = 0):
"""
:param dataset: The :class:`MoleculeDataset` containing the molecules to load.
:param batch_size: Batch size.
:param num_workers: Number of workers used to build batches.
:param class_balance: Whether to perform class balancing (i.e., use an equal number of positive
and negative molecules). Class balance is only available for single task
classification datasets. Set shuffle to True in order to get a random
subset of the larger class.
:param shuffle: Whether to shuffle the data.
:param seed: Random seed. Only needed if shuffle is True.
"""
self._dataset = dataset
self._batch_size = batch_size
self._num_workers = num_workers
self._class_balance = class_balance
self._shuffle = shuffle
self._seed = seed
self._context = None
self._timeout = 0
is_main_thread = threading.current_thread() is threading.main_thread()
if not is_main_thread and self._num_workers > 0:
self._context = 'forkserver' # In order to prevent a hanging
self._timeout = 3600 # Just for sure that the DataLoader won't hang
self._sampler = MoleculeSampler(
dataset=self._dataset,
class_balance=self._class_balance,
shuffle=self._shuffle,
seed=self._seed
)
super(MoleculeDataLoader, self).__init__(
dataset=self._dataset,
batch_size=self._batch_size,
sampler=self._sampler,
num_workers=self._num_workers,
collate_fn=construct_molecule_batch,
multiprocessing_context=self._context,
timeout=self._timeout
)
@property
def targets(self) -> List[List[Optional[float]]]:
"""
Returns the targets associated with each molecule.
:return: A list of lists of floats (or None) containing the targets.
"""
if self._class_balance or self._shuffle:
raise ValueError('Cannot safely extract targets when class balance or shuffle are enabled.')
return [self._dataset[index].targets for index in self._sampler]
@property
def iter_size(self) -> int:
"""Returns the number of data points included in each full iteration through the :class:`MoleculeDataLoader`."""
return len(self._sampler)
def __iter__(self) -> Iterator[MoleculeDataset]:
r"""Creates an iterator which returns :class:`MoleculeDataset`\ s"""
return super(MoleculeDataLoader, self).__iter__()
def make_mols(smiles: List[str], reaction: bool, keep_h: bool):
"""
Builds a list of RDKit molecules (or a list of tuples of molecules if reaction is True) for a list of smiles.
:param smiles: List of SMILES strings.
:param reaction: Boolean whether the SMILES strings are to be treated as a reaction.
:param keep_h: Boolean whether to keep hydrogens in the input smiles. This does not add hydrogens, it only keeps them if they are specified.
:return: List of RDKit molecules or list of tuple of molecules.
"""
if reaction:
mol = [SMILES_TO_MOL[s] if s in SMILES_TO_MOL else (make_mol(s.split(">")[0], keep_h), make_mol(s.split(">")[-1], keep_h)) for s in smiles]
else:
mol = [SMILES_TO_MOL[s] if s in SMILES_TO_MOL else make_mol(s, keep_h) for s in smiles]
return mol
| [
"chemprop.features.is_reaction",
"chemprop.rdkit.make_mol",
"random.Random",
"numpy.isnan",
"chemprop.features.is_explicit_h",
"chemprop.features.get_features_generator",
"numpy.append",
"chemprop.features.MolGraph",
"numpy.array",
"rdkit.Chem.MolFromSmiles",
"threading.main_thread",
"threadin... | [((4011, 4024), 'chemprop.features.is_reaction', 'is_reaction', ([], {}), '()\n', (4022, 4024), False, 'from chemprop.features import is_explicit_h, is_reaction\n'), ((4054, 4069), 'chemprop.features.is_explicit_h', 'is_explicit_h', ([], {}), '()\n', (4067, 4069), False, 'from chemprop.features import is_explicit_h, is_reaction\n'), ((9647, 9655), 'random.Random', 'Random', ([], {}), '()\n', (9653, 9655), False, 'from random import Random\n'), ((23817, 23829), 'random.Random', 'Random', (['seed'], {}), '(seed)\n', (23823, 23829), False, 'from random import Random\n'), ((5377, 5400), 'numpy.array', 'np.array', (['self.features'], {}), '(self.features)\n', (5385, 5400), True, 'import numpy as np\n'), ((8385, 8419), 'numpy.append', 'np.append', (['self.features', 'features'], {}), '(self.features, features)\n', (8394, 8419), True, 'import numpy as np\n'), ((27075, 27101), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (27099, 27101), False, 'import threading\n'), ((27105, 27128), 'threading.main_thread', 'threading.main_thread', ([], {}), '()\n', (27126, 27128), False, 'import threading\n'), ((4304, 4330), 'chemprop.features.get_features_generator', 'get_features_generator', (['fg'], {}), '(fg)\n', (4326, 4330), False, 'from chemprop.features import get_features_generator\n'), ((5534, 5557), 'numpy.isnan', 'np.isnan', (['self.features'], {}), '(self.features)\n', (5542, 5557), True, 'import numpy as np\n'), ((5720, 5751), 'numpy.isnan', 'np.isnan', (['self.atom_descriptors'], {}), '(self.atom_descriptors)\n', (5728, 5751), True, 'import numpy as np\n'), ((5913, 5941), 'numpy.isnan', 'np.isnan', (['self.atom_features'], {}), '(self.atom_features)\n', (5921, 5941), True, 'import numpy as np\n'), ((6103, 6131), 'numpy.isnan', 'np.isnan', (['self.bond_features'], {}), '(self.bond_features)\n', (6111, 6131), True, 'import numpy as np\n'), ((13103, 13144), 'chemprop.features.BatchMolGraph', 'BatchMolGraph', (['[g[i] for g in mol_graphs]'], {}), '([g[i] for g in mol_graphs])\n', (13116, 13144), False, 'from chemprop.features import BatchMolGraph, MolGraph\n'), ((29516, 29535), 'chemprop.rdkit.make_mol', 'make_mol', (['s', 'keep_h'], {}), '(s, keep_h)\n', (29524, 29535), False, 'from chemprop.rdkit import make_mol\n'), ((19574, 19629), 'numpy.vstack', 'np.vstack', (['[d.raw_atom_descriptors for d in self._data]'], {}), '([d.raw_atom_descriptors for d in self._data])\n', (19583, 19629), True, 'import numpy as np\n'), ((12593, 12780), 'chemprop.features.MolGraph', 'MolGraph', (['m', 'd.atom_features', 'd.bond_features'], {'overwrite_default_atom_features': 'd.overwrite_default_atom_features', 'overwrite_default_bond_features': 'd.overwrite_default_bond_features'}), '(m, d.atom_features, d.bond_features,\n overwrite_default_atom_features=d.overwrite_default_atom_features,\n overwrite_default_bond_features=d.overwrite_default_bond_features)\n', (12601, 12780), False, 'from chemprop.features import BatchMolGraph, MolGraph\n'), ((19742, 19794), 'numpy.vstack', 'np.vstack', (['[d.raw_atom_features for d in self._data]'], {}), '([d.raw_atom_features for d in self._data])\n', (19751, 19794), True, 'import numpy as np\n'), ((19860, 19912), 'numpy.vstack', 'np.vstack', (['[d.raw_bond_features for d in self._data]'], {}), '([d.raw_bond_features for d in self._data])\n', (19869, 19912), True, 'import numpy as np\n'), ((19958, 20005), 'numpy.vstack', 'np.vstack', (['[d.raw_features for d in self._data]'], {}), '([d.raw_features for d in self._data])\n', (19967, 20005), True, 'import numpy as np\n'), ((4860, 4883), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""C"""'], {}), "('C')\n", (4878, 4883), False, 'from rdkit import Chem\n'), ((5296, 5319), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""C"""'], {}), "('C')\n", (5314, 5319), False, 'from rdkit import Chem\n')] |
import os
from platform import python_version_tuple
#if python_version_tuple()[0] == 3:
#xrange = range
xrange = range
import numpy as np
import pandas as pd
import cv2
import imhandle as imh
HEALTHY = 0
GLAUCOMA_OR_SUSPECT = 1
def extract_DRIONS_DB(db_folder, expert=1):
"""
Full images with polygonal optic disc segmentation from 2 experts.
400 x 600 original, 560 x 560 after post-processing.
Images have unwanted text at the left, so it needs to be dropped out.
Accepted values for `expert`: 1, 2.
Required schema:
db_folder/
images/
image_{:03}.jpg
experts_annotation/
anotExpert1_{:03}.txt
anotExpert2_{:03}.txt
"""
orig_resolution = (400, 600)
left_cut_thr = 40
result_resolution = (orig_resolution[1] - left_cut_thr, orig_resolution[1] - left_cut_thr)
X, filenames = imh.load_set(os.path.join(db_folder, 'images'))
file_codes = [fn[-7:-4] for fn in filenames]
Y = []
for i, code in enumerate(file_codes):
anot_filename = os.path.join(db_folder, 'experts_annotation', 'anotExpert{}_{}.txt'.format(expert, code))
with open(anot_filename) as anot_fin:
coords = anot_fin.readlines()
coords = map(lambda s: map(lambda x: int(round(float(x))), s.split(' , ')),
coords)
coords = np.array(coords)
segm_img = np.zeros(orig_resolution, dtype=np.uint8)
cv2.fillPoly(segm_img, coords.reshape((1,) + coords.shape), color=1)
Y.append(segm_img)
for i in xrange(len(X)):
side = result_resolution[0]
X[i] = imh.resize_image_to_square(X[i][:, left_cut_thr:], side, pad_cval=0)
Y[i] = imh.resize_image_to_square(Y[i][:, left_cut_thr:], side, pad_cval=0)
Y[i] = Y[i].reshape(Y[i].shape + (1,))
return X, Y, file_codes
def get_resolution_DRIONS_DB():
"""Returns DRIONS_DB resolution after post-processing."""
return (560, 560)
def extract_RIM_ONE_v2(db_folder):
"""
Cropped (to optic disc region) images with polygonal optic disc segmentation.
380 x 394 original, 394 x 394 after post-processing.
Required schema:
db_folder/
Normal/
im{:03}.jpg (number from 0 to 255)
im{:03}_gs.txt
Glaucoma and glaucoma suspicious/
im{:03}.jpg (number from 256 to 455)
im{:03}_gs.txt
"""
orig_resolution = (380, 394)
result_resolution = (394, 394)
X_all, Y_all, filecodes_all, is_ill = [], [], [], []
for pic_type in ('Normal', 'Glaucoma and glaucoma suspicious'):
X, filenames = imh.load_set(os.path.join(db_folder, pic_type))
file_codes = [fn[-7:-4] for fn in filenames]
Y = []
for i, code in enumerate(file_codes):
anot_filename = os.path.join(db_folder, pic_type, 'Im{}-gs.txt'.format(code))
with open(anot_filename) as anot_fin:
lines = anot_fin.readlines()
'''
# polygonal segmentation
coords = lines[1:lines.index('Ellipse parameters\r\n')]
coords = np.array(map(int, coords))
if coords.size % 2 != 0:
raise imh.ImLibException('n_coords % 2 != 0')
coords = coords.reshape((coords.size / 2, 2))
coords = coords[1:] # optic disc center point is included in annotation for some reason
segm_img = np.zeros(orig_resolution, dtype=np.uint8)
cv2.fillPoly(segm_img, coords.reshape((1,) + coords.shape), color=1)
'''
'''
# ellipse segmentation
coords = lines[lines.index('Ellipse parameters\r\n') + 1:]
coords = map(int, coords)
i0, j0, a, b, angle = coords
a /= 2
b /= 2
segm_img = np.zeros(orig_resolution, dtype=np.uint8)
cv2.ellipse(segm_img, (i0, j0), (a, b), angle, 0, 360, color=1, thickness=-1)
'''
# acquiring segmentation from pre-computed image
segm_img = imh.load_image(os.path.join(db_folder, pic_type + ' segmentation', 'Im{}-gs_mask.jpg'.format(code)))
Y.append(segm_img)
is_ill.append(HEALTHY if pic_type == 'Normal' else GLAUCOMA_OR_SUSPECT)
for i in xrange(len(X)):
side = result_resolution[0]
X[i] = imh.resize_image_to_square(X[i], side, pad_cval=0)
Y[i] = imh.resize_image_to_square(Y[i], side, pad_cval=0)
Y[i] = Y[i].reshape(Y[i].shape + (1,))
X_all.extend(X)
Y_all.extend(Y)
filecodes_all.extend(file_codes)
return X_all, Y_all, filecodes_all, is_ill
def get_resolution_RIM_ONE_v2():
"""Returns RIM_ONE_v2 resolution after post-processing."""
return (394, 394)
def extract_RIM_ONE_v3(db_folder, expert='avg', return_disc=True, return_cup=True):
"""
Cropped (to optic disc region, and a little more by vertical axis) images
with polygonal optic disc segmentation. 1424 x 2144 original, 1424 x 1424 after post-processing.
Images are two-channel (stereo) --- caught from 2 angles.
But segmentation is given for only one view (see L/R letter in file name for clarification).
So only one view of two is chosen.
Accepted values for `expert`: 1, 2, 'avg'.
Required schema:
db_folder/
Healthy/
Stereo Images/
N-{}-[L,R].jpg (number is without leading zeros, from 1 to 92)
(image cannot be used as is. it is two-part image, divided by vertical border)
Expert1_masks/
N-{}-[L,R]-Cup-exp1.png (4 files for one image number and L/R characteristic)
N-{}-[L,R]-Cup-exp1.txt
N-{}-[L,R]-Disc-exp1.png
N-{}-[L,R]-Disc-exp1.txt
Expert2_masks/
N-{}-[L,R]-Cup-exp2.png (4 files for one image number and L/R characteristic)
N-{}-[L,R]-Cup-exp2.txt
N-{}-[L,R]-Disc-exp2.png
N-{}-[L,R]-Disc-exp2.txt
Average_masks/
N-{}-[L,R]-Cup-Avg.png (4 files for one image number and L/R characteristic)
N-{}-[L,R]-Cup-Avg.txt
N-{}-[L,R]-Disc-Avg.png
N-{}-[L,R]-Disc-Avg.txt
Glaucoma and suspects/
(...) (the same as for Healthy, but images start with G not N)
"""
orig_resolution = (1424, 2144)
result_resolution = (1424, 1424)
if expert == 1:
expert_folder = 'Expert1_masks'
suffix = 'exp1'
elif expert == 2:
expert_folder = 'Expert2_masks'
suffix = 'exp2'
elif expert == 'avg':
expert_folder = 'Average_masks'
suffix = 'Avg'
else:
raise imh.ImLibException('value for "expert" argument not understood')
X_all, disc_all, cup_all, file_codes_all, is_ill = [], [], [], [], []
for pic_type in ('Healthy', 'Glaucoma and suspects'):
X, file_names = imh.load_set(os.path.join(db_folder, pic_type, 'Stereo Images'))
X_all.extend(X)
rel_file_names = [os.path.split(fn)[-1] for fn in file_names]
file_codes = [fn[:fn.rfind('.')] for fn in rel_file_names]
file_codes_all.extend(file_codes)
for fc in file_codes:
if return_disc:
disc_segmn = imh.load_image(os.path.join(db_folder, pic_type, expert_folder,
'{}-Disc-{}.png'.format(fc, suffix)))
disc_all.append(disc_segmn)
if return_cup:
cup_segmn = imh.load_image(os.path.join(db_folder, pic_type, expert_folder,
'{}-Cup-{}.png'.format(fc, suffix)))
cup_all.append(cup_segmn)
is_ill.append(HEALTHY if pic_type == 'Healthy' else GLAUCOMA_OR_SUSPECT)
for i in xrange(len(X_all)):
side = result_resolution[0]
if file_codes_all[i][-1] == 'L':
X_all[i] = X_all[i][:, :orig_resolution[1] / 2]
elif file_codes_all[i][-1] == 'R':
X_all[i] = X_all[i][:, orig_resolution[1] / 2:]
if return_disc:
disc_all[i] = disc_all[i][:, :orig_resolution[1] / 2]
if return_cup:
cup_all[i] = cup_all[i][:, :orig_resolution[1] / 2]
else:
raise imh.ImLibException('image {} has no L/R characteristic'.format(file_codes_all[i]))
X_all[i] = imh.resize_image_to_square(X_all[i], side, pad_cval=0)
if return_disc:
disc_all[i] = imh.resize_image_to_square(disc_all[i], side, pad_cval=0)
disc_all[i] = disc_all[i].reshape(disc_all[i].shape + (1,))
if return_cup:
cup_all[i] = imh.resize_image_to_square(cup_all[i], side, pad_cval=0)
cup_all[i] = cup_all[i].reshape(cup_all[i].shape + (1,))
if return_disc:
if return_cup:
return X_all, disc_all, cup_all, file_codes_all, is_ill
return X_all, disc_all, file_codes_all, is_ill
if return_cup:
return X_all, cup_all, file_codes_all, is_ill
return X_all, file_codes_all, is_ill
def get_resolution_RIM_ONE_v3():
"""Returns RIM_ONE_v3 resolution after post-processing."""
return (1424, 1424)
def extract_DRISHTI_GS_train(db_folder, return_disc=True, return_cup=True):
"""
Full images with optic disc and optic cup segmentation.
Average segmentation and "softmap" segmentation image are given.
50 images of various resolution close to 2040 x 1740.
Data set is split into training and test sets. Groundtruth is available for training set only.
This function returns Training set only.
Required schema:
db_folder/
Drishti-GS1_files/
Training/
Images/
drishtiGS_{:03}.png # some numbers are omitted, like 001, 003, 004, ...
GT/
drishtiGS_{:03}/
drishtiGS_{:03}_cdrValues.txt
AvgBoundary/
drishtiGS_{:03}_ODAvgBoundary.txt
drishtiGS_{:03}_CupAvgBoundary.txt
drishtiGS_{:03}_diskCenter.txt
SoftMap/
drishtiGS_{:03}_ODsegSoftmap.png
drishtiGS_{:03}_cupsegSoftmap.png
"""
result_resolution = (2040, 2040)
disc_all, cup_all, file_codes_all = [], [], []
set_path = os.path.join(db_folder, 'Drishti-GS1_files', 'Training')
images_path = os.path.join(set_path, 'Images')
X_all, file_names = imh.load_set(images_path)
rel_file_names = [os.path.split(fn)[-1] for fn in file_names]
rel_file_names_wo_ext = [fn[:fn.rfind('.')] for fn in rel_file_names]
file_codes = ['Training' + fn[fn.find('_'):] for fn in rel_file_names_wo_ext]
file_codes_all.extend(file_codes)
for fn in rel_file_names_wo_ext:
if return_disc:
disc_segmn = imh.load_image(os.path.join(set_path, 'GT', fn,
'SoftMap', fn + '_ODsegSoftmap.png'))
disc_all.append(disc_segmn)
if return_cup:
cup_segmn = imh.load_image(os.path.join(set_path, 'GT', fn,
'SoftMap', fn + '_cupsegSoftmap.png'))
cup_all.append(cup_segmn)
for i in xrange(len(X_all)):
side = result_resolution[0]
X_all[i] = imh.resize_image_to_square(X_all[i], side, pad_cval=0)
if return_disc:
disc_all[i] = imh.resize_image_to_square(disc_all[i], side, pad_cval=0)
disc_all[i] = disc_all[i].reshape(disc_all[i].shape + (1,))
if return_cup:
cup_all[i] = imh.resize_image_to_square(cup_all[i], side, pad_cval=0)
cup_all[i] = cup_all[i].reshape(cup_all[i].shape + (1,))
if return_disc:
if return_cup:
return X_all, disc_all, cup_all, file_codes_all
return X_all, disc_all, file_codes_all
if return_cup:
return X_all, cup_all, file_codes_all
return X_all, file_codes_all
def extract_DRISHTI_GS_test(db_folder):
"""
Full images with optic disc and optic cup segmentation.
Average segmentation and "softmap" segmentation image are given.
51 images of various resolution close to 2040 x 1740.
Data set is split into training and test sets. Groundtruth is available for training set only.
This function returns Test set only.
Required schema:
db_folder/
Drishti-GS1_files/
Test/
Images/
drishtiGS_{:03}.png # numbers overlap with train
"""
result_resolution = (2040, 2040)
set_path = os.path.join(db_folder, 'Drishti-GS1_files', 'Test')
images_path = os.path.join(set_path, 'Images')
X_all, file_names = imh.load_set(images_path)
rel_file_names = [os.path.split(fn)[-1] for fn in file_names]
rel_file_names_wo_ext = [fn[:fn.rfind('.')] for fn in rel_file_names]
file_codes = ['Test' + fn[fn.find('_'):] for fn in rel_file_names_wo_ext]
for i in xrange(len(X_all)):
side = result_resolution[0]
X_all[i] = imh.resize_image_to_square(X_all[i], side, pad_cval=0)
return X_all, file_codes
def get_resolution_DRISHTI_GS():
"""Returns DRISHTI-GS resolution after post-processing."""
#return (2040, 1750)
return (2040, 2040)
def extract_HRF(db_folder, expert=1):
"""
Full images with primitive optic disc segmentation (as a circle).
2336 x 3504 original, 3504 x 3504 after preprocessing.
Accepted values for `expert`: 1, 2.
Required schema:
db_folder/
Healthy/
{:02}_h.jpg (number from 01 to 15)
Glaucomatous/
{:02}_h.jpg (number from 01 to 15)
optic_disk_centers_expert_A.csv
optic_disk_centers_expert_B.csv
"""
orig_resolution = (2336, 3504)
result_resolution = (3504, 3504)
if expert == 1:
expert_letter = 'A'
elif expert == 2:
expert_letter = 'B'
anot_df = pd.read_csv(os.path.join(db_folder, 'optic_disk_centers_expert_{}.csv'.format(expert_letter)),
index_col=0)
X_all, Y_all, file_codes_all, is_ill = [], [], [], []
for pic_type in ('Healthy', 'Glaucomatous'):
X, file_names = imh.load_set(os.path.join(db_folder, pic_type))
X_all.extend(X)
rel_file_names = [os.path.split(fn)[-1] for fn in file_names]
file_codes = [fn[:fn.rfind('.')] for fn in rel_file_names]
file_codes_all.extend(file_codes)
for i in xrange(len(X)):
record_str = file_codes[i]
if expert == 2:
record_str = record_str.replace('_', '')
anot_record = anot_df.loc[record_str]
od_center = (anot_record['Pap. Center x'], anot_record['Pap. Center y'])
#od_center = (anot_record['vessel orig. x'], anot_record['vessel orig. y'])
od_radius = anot_record['disk diameter'] / 2
segmn_img = np.zeros(orig_resolution, dtype=np.uint8)
cv2.circle(segmn_img, od_center, od_radius, color=1, thickness=-1)
Y_all.append(segmn_img)
is_ill.append(HEALTHY if pic_type == 'Healthy' else GLAUCOMA_OR_SUSPECT)
for i in xrange(len(X_all)):
side = result_resolution[0]
X_all[i] = imh.resize_image_to_square(X_all[i], side, pad_cval=0)
Y_all[i] = imh.resize_image_to_square(Y_all[i], side, pad_cval=0)
Y_all[i] = Y_all[i].reshape(Y_all[i].shape + (1,))
return X_all, Y_all, file_codes_all, is_ill
def get_resolution_HRF():
"""Returns RIM_ONE_v2 resolution after post-processing."""
return (3504, 3504)
| [
"imhandle.ImLibException",
"cv2.circle",
"imhandle.load_set",
"numpy.zeros",
"imhandle.resize_image_to_square",
"numpy.array",
"os.path.split",
"os.path.join"
] | [((10579, 10635), 'os.path.join', 'os.path.join', (['db_folder', '"""Drishti-GS1_files"""', '"""Training"""'], {}), "(db_folder, 'Drishti-GS1_files', 'Training')\n", (10591, 10635), False, 'import os\n'), ((10654, 10686), 'os.path.join', 'os.path.join', (['set_path', '"""Images"""'], {}), "(set_path, 'Images')\n", (10666, 10686), False, 'import os\n'), ((10711, 10736), 'imhandle.load_set', 'imh.load_set', (['images_path'], {}), '(images_path)\n', (10723, 10736), True, 'import imhandle as imh\n'), ((12860, 12912), 'os.path.join', 'os.path.join', (['db_folder', '"""Drishti-GS1_files"""', '"""Test"""'], {}), "(db_folder, 'Drishti-GS1_files', 'Test')\n", (12872, 12912), False, 'import os\n'), ((12931, 12963), 'os.path.join', 'os.path.join', (['set_path', '"""Images"""'], {}), "(set_path, 'Images')\n", (12943, 12963), False, 'import os\n'), ((12988, 13013), 'imhandle.load_set', 'imh.load_set', (['images_path'], {}), '(images_path)\n', (13000, 13013), True, 'import imhandle as imh\n'), ((895, 928), 'os.path.join', 'os.path.join', (['db_folder', '"""images"""'], {}), "(db_folder, 'images')\n", (907, 928), False, 'import os\n'), ((1365, 1381), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (1373, 1381), True, 'import numpy as np\n'), ((1401, 1442), 'numpy.zeros', 'np.zeros', (['orig_resolution'], {'dtype': 'np.uint8'}), '(orig_resolution, dtype=np.uint8)\n', (1409, 1442), True, 'import numpy as np\n'), ((1628, 1696), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['X[i][:, left_cut_thr:]', 'side'], {'pad_cval': '(0)'}), '(X[i][:, left_cut_thr:], side, pad_cval=0)\n', (1654, 1696), True, 'import imhandle as imh\n'), ((1712, 1780), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['Y[i][:, left_cut_thr:]', 'side'], {'pad_cval': '(0)'}), '(Y[i][:, left_cut_thr:], side, pad_cval=0)\n', (1738, 1780), True, 'import imhandle as imh\n'), ((8541, 8595), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['X_all[i]', 'side'], {'pad_cval': '(0)'}), '(X_all[i], side, pad_cval=0)\n', (8567, 8595), True, 'import imhandle as imh\n'), ((11578, 11632), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['X_all[i]', 'side'], {'pad_cval': '(0)'}), '(X_all[i], side, pad_cval=0)\n', (11604, 11632), True, 'import imhandle as imh\n'), ((13325, 13379), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['X_all[i]', 'side'], {'pad_cval': '(0)'}), '(X_all[i], side, pad_cval=0)\n', (13351, 13379), True, 'import imhandle as imh\n'), ((15541, 15595), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['X_all[i]', 'side'], {'pad_cval': '(0)'}), '(X_all[i], side, pad_cval=0)\n', (15567, 15595), True, 'import imhandle as imh\n'), ((15615, 15669), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['Y_all[i]', 'side'], {'pad_cval': '(0)'}), '(Y_all[i], side, pad_cval=0)\n', (15641, 15669), True, 'import imhandle as imh\n'), ((2642, 2675), 'os.path.join', 'os.path.join', (['db_folder', 'pic_type'], {}), '(db_folder, pic_type)\n', (2654, 2675), False, 'import os\n'), ((4396, 4446), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['X[i]', 'side'], {'pad_cval': '(0)'}), '(X[i], side, pad_cval=0)\n', (4422, 4446), True, 'import imhandle as imh\n'), ((4466, 4516), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['Y[i]', 'side'], {'pad_cval': '(0)'}), '(Y[i], side, pad_cval=0)\n', (4492, 4516), True, 'import imhandle as imh\n'), ((7064, 7114), 'os.path.join', 'os.path.join', (['db_folder', 'pic_type', '"""Stereo Images"""'], {}), "(db_folder, pic_type, 'Stereo Images')\n", (7076, 7114), False, 'import os\n'), ((8646, 8703), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['disc_all[i]', 'side'], {'pad_cval': '(0)'}), '(disc_all[i], side, pad_cval=0)\n', (8672, 8703), True, 'import imhandle as imh\n'), ((8824, 8880), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['cup_all[i]', 'side'], {'pad_cval': '(0)'}), '(cup_all[i], side, pad_cval=0)\n', (8850, 8880), True, 'import imhandle as imh\n'), ((10759, 10776), 'os.path.split', 'os.path.split', (['fn'], {}), '(fn)\n', (10772, 10776), False, 'import os\n'), ((11683, 11740), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['disc_all[i]', 'side'], {'pad_cval': '(0)'}), '(disc_all[i], side, pad_cval=0)\n', (11709, 11740), True, 'import imhandle as imh\n'), ((11861, 11917), 'imhandle.resize_image_to_square', 'imh.resize_image_to_square', (['cup_all[i]', 'side'], {'pad_cval': '(0)'}), '(cup_all[i], side, pad_cval=0)\n', (11887, 11917), True, 'import imhandle as imh\n'), ((13036, 13053), 'os.path.split', 'os.path.split', (['fn'], {}), '(fn)\n', (13049, 13053), False, 'import os\n'), ((14509, 14542), 'os.path.join', 'os.path.join', (['db_folder', 'pic_type'], {}), '(db_folder, pic_type)\n', (14521, 14542), False, 'import os\n'), ((15210, 15251), 'numpy.zeros', 'np.zeros', (['orig_resolution'], {'dtype': 'np.uint8'}), '(orig_resolution, dtype=np.uint8)\n', (15218, 15251), True, 'import numpy as np\n'), ((15264, 15330), 'cv2.circle', 'cv2.circle', (['segmn_img', 'od_center', 'od_radius'], {'color': '(1)', 'thickness': '(-1)'}), '(segmn_img, od_center, od_radius, color=1, thickness=-1)\n', (15274, 15330), False, 'import cv2\n'), ((6829, 6893), 'imhandle.ImLibException', 'imh.ImLibException', (['"""value for "expert" argument not understood"""'], {}), '(\'value for "expert" argument not understood\')\n', (6847, 6893), True, 'import imhandle as imh\n'), ((7166, 7183), 'os.path.split', 'os.path.split', (['fn'], {}), '(fn)\n', (7179, 7183), False, 'import os\n'), ((11099, 11168), 'os.path.join', 'os.path.join', (['set_path', '"""GT"""', 'fn', '"""SoftMap"""', "(fn + '_ODsegSoftmap.png')"], {}), "(set_path, 'GT', fn, 'SoftMap', fn + '_ODsegSoftmap.png')\n", (11111, 11168), False, 'import os\n'), ((11326, 11396), 'os.path.join', 'os.path.join', (['set_path', '"""GT"""', 'fn', '"""SoftMap"""', "(fn + '_cupsegSoftmap.png')"], {}), "(set_path, 'GT', fn, 'SoftMap', fn + '_cupsegSoftmap.png')\n", (11338, 11396), False, 'import os\n'), ((14594, 14611), 'os.path.split', 'os.path.split', (['fn'], {}), '(fn)\n', (14607, 14611), False, 'import os\n')] |
# rough work to probe / understand / visualize
# the spherical multilevel grid data structures
# produced for a given input spherical polygon
# hopefully, this will help me discover some issues
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import numpy as np
import lib
import copy
import scipy
import scipy.spatial
from scipy.spatial import geometric_slerp
from tqdm import tqdm
# the input will be a spherical triangle that
# covers exactly 1/8 the surface area of the unit
# sphere (front right spherical triangle)
spherical_polyon = np.array([[0, 1, 0],
[0, 0, 1],
[-1, 0, 0]], dtype=np.float64)
# try applying spherical linear interpolation to improve plot
N = spherical_polyon.shape[0]
n_int = 900
interpolated_polygon = np.zeros((N * n_int, 3), dtype=np.float64)
t_values = np.float64(np.linspace(0, 1, n_int))
counter = 0
for i in range(N):
if i == (N-1):
next_index = 0
else:
next_index = i + 1
interpolated_polygon[counter:(counter + n_int), ...] = geometric_slerp(spherical_polyon[i],
spherical_polyon[next_index],
t_values)
counter += n_int
results = lib.cast_subgrids(spherical_polyon=spherical_polyon,
MAXD=4)
(edge_count_array_L1,
cartesian_coords_cells_L1,
edge_count_array_L2,
cartesian_coords_cells_L2,
edge_count_array_L3,
cartesian_coords_cells_L3,
edge_count_array_L4,
cartesian_coords_cells_L4) = results
# plot the level 1 grid on the unit sphere
# along with the spherical polygon, albeit with
# crude matplotlib 3D handling
fig_level_1 = plt.figure()
# plotting the plain grids + centers at each
# level is also useful for debugging/algorithm
# assessment purposes
fig_level_1_centers = plt.figure()
fig_level_2_centers = plt.figure()
fig_level_3_centers = plt.figure()
fig_level_4_centers = plt.figure()
ax = fig_level_1.add_subplot(111, projection='3d')
ax_centers = fig_level_1_centers.add_subplot(111, projection='3d')
ax_centers_lvl_2 = fig_level_2_centers.add_subplot(111, projection='3d')
ax_centers_lvl_3 = fig_level_3_centers.add_subplot(111, projection='3d')
ax_centers_lvl_4 = fig_level_4_centers.add_subplot(111, projection='3d')
grid_cell_center_coords_L1 = lib.produce_level_1_grid_centers(
spherical_polyon)[0]
grid_cell_center_coords_L2 = lib.produce_level_n_grid_centers(
spherical_polyon,
level=2)[0]
grid_cell_center_coords_L3 = lib.produce_level_n_grid_centers(
spherical_polyon,
level=3)[0]
grid_cell_center_coords_L4 = lib.produce_level_n_grid_centers(
spherical_polyon,
level=4)[0]
ax_centers.scatter(grid_cell_center_coords_L1[..., 0],
grid_cell_center_coords_L1[..., 1],
grid_cell_center_coords_L1[..., 2],
marker='.',
color='black')
ax_centers_lvl_2.scatter(grid_cell_center_coords_L2[..., 0],
grid_cell_center_coords_L2[..., 1],
grid_cell_center_coords_L2[..., 2],
marker='.',
color='black')
ax_centers_lvl_3.scatter(grid_cell_center_coords_L3[..., 0],
grid_cell_center_coords_L3[..., 1],
grid_cell_center_coords_L3[..., 2],
marker='.',
color='black')
ax_centers_lvl_4.scatter(grid_cell_center_coords_L4[..., 0],
grid_cell_center_coords_L4[..., 1],
grid_cell_center_coords_L4[..., 2],
marker='.',
color='black')
ax.scatter(cartesian_coords_cells_L1[...,0],
cartesian_coords_cells_L1[...,1],
cartesian_coords_cells_L1[...,2],
marker='.',
color='black')
# looks like the L2 Cartesian coords
# are organized in sub-arrays:
iter_count = 0
for L2_sub in cartesian_coords_cells_L2:
for square in L2_sub:
if iter_count == 0:
# add label only once
ax.plot(square[...,0],
square[...,1],
square[...,2],
label='level 2',
color='green')
ax_centers_lvl_2.plot(square[..., 0],
square[..., 1],
square[..., 2],
color='k',
alpha=0.3)
iter_count += 1
else:
ax.plot(square[...,0],
square[...,1],
square[...,2],
color='green')
ax_centers_lvl_2.plot(square[..., 0],
square[..., 1],
square[..., 2],
color='k',
alpha=0.3)
# looks like the L3 Cartesian coords
# are organized in sub-arrays:
iter_count = 0
for L3_sub in cartesian_coords_cells_L3:
for square in L3_sub:
if iter_count == 0:
# add label only once
ax.plot(square[...,0],
square[...,1],
square[...,2],
label='level 3',
color='grey')
ax_centers_lvl_3.plot(square[..., 0],
square[..., 1],
square[..., 2],
color='k',
alpha=0.3)
iter_count += 1
else:
ax.plot(square[...,0],
square[...,1],
square[...,2],
color='grey')
ax_centers_lvl_3.plot(square[..., 0],
square[..., 1],
square[..., 2],
color='k',
alpha=0.3)
# looks like the L4 Cartesian coords
# are organized in sub-arrays:
iter_count = 0
for L4_sub in cartesian_coords_cells_L4:
for square in L4_sub:
if iter_count == 0:
# add label only once
ax.plot(square[...,0],
square[...,1],
square[...,2],
label='level 4',
color='blue')
ax_centers_lvl_4.plot(square[..., 0],
square[..., 1],
square[..., 2],
color='k',
alpha=0.3)
iter_count += 1
else:
ax.plot(square[...,0],
square[...,1],
square[...,2],
color='blue')
ax_centers_lvl_4.plot(square[..., 0],
square[..., 1],
square[..., 2],
color='k',
alpha=0.3)
# color code cells by amount of spherical
# polygon edges contained
# here I just happen to know the max is 2
colors = {0: 'black', 1: 'orange', 2: 'red'}
# we don't want to plot over edges already plotted
# with a higher containment count, so keep track of this
dict_edge_data = {}
counter = 0
for cell, edge_count in zip(cartesian_coords_cells_L1, edge_count_array_L1):
# parse all four edges of the cell
cycle_cell = np.empty((5, 3))
cycle_cell[:4, ...] = cell
cycle_cell[4, ...] = cell[0, ...]
for i in range(4):
edge = cycle_cell[i:i+2]
dict_edge_data[counter] = {'edge': edge,
'edge_count': edge_count}
counter += 1
# now move through dict_edge_data and plot edges using
# color that matches higher spherical polygon edge containment
# count only
internal_dict = copy.deepcopy(dict_edge_data)
iter_count = 0
total_iter = len(dict_edge_data)
plot = True
# should probably switch to custom
# legend, but do this for now:
has_black_legend_entry = False
has_yellow_legend_entry = False
has_red_legend_entry = False
for key, edge_entry in tqdm(dict_edge_data.items(),
desc='iter_count'):
current_edge = edge_entry['edge']
current_edge_count = edge_entry['edge_count']
dist = scipy.spatial.distance.cdist(spherical_polyon,
current_edge).min()
if current_edge_count > 0:
msg = ("dist violation for current_edge_count: " + str(current_edge_count) +
"; edge: " + str(current_edge) +
"; distance: " + str(dist))
assert dist <= np.sqrt(2), msg
for subkey, subentry in internal_dict.items():
reference_edge = subentry['edge']
reference_count = subentry['edge_count']
if (np.allclose(current_edge, reference_edge) or
np.allclose(current_edge, reference_edge[::-1])):
if current_edge_count < reference_count:
plot = False
break
if plot:
label=None
if current_edge_count == 0 and not has_black_legend_entry:
label='Level 1 no edge'
has_black_legend_entry = True
elif current_edge_count == 1 and not has_yellow_legend_entry:
label='Level 1 with 1 edge'
has_yellow_legend_entry = True
elif current_edge_count == 2 and not has_red_legend_entry:
label='Level 1 with 2 edges'
has_red_legend_entry = True
ax.plot(current_edge[..., 0],
current_edge[..., 1],
current_edge[..., 2],
label=label,
color=colors[current_edge_count])
# for the L1 centers plot we just want
# the grid outline for now
ax_centers.plot(current_edge[..., 0],
current_edge[..., 1],
current_edge[..., 2],
color='k',
alpha=0.3)
plot = True
iter_count += 1
polygon = Poly3DCollection([interpolated_polygon], alpha=0.3)
polygon._facecolors2d=polygon._facecolors3d
polygon._edgecolors2d=polygon._edgecolors3d
polygon.set_color('purple')
polygon.set_label('input spherical polygon')
ax.add_collection3d(polygon)
ax.azim = -30
ax.elev = -30
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax_centers.set_xlabel('x')
ax_centers.set_ylabel('y')
ax_centers.set_zlabel('z')
ax_centers_lvl_2.set_xlabel('x')
ax_centers_lvl_2.set_ylabel('y')
ax_centers_lvl_2.set_zlabel('z')
polygon = Poly3DCollection([interpolated_polygon], alpha=0.3)
polygon._facecolors2d=polygon._facecolors3d
polygon._edgecolors2d=polygon._edgecolors3d
polygon.set_color('purple')
polygon.set_label('input spherical polygon')
ax_centers_lvl_2.add_collection3d(polygon)
polygon = Poly3DCollection([interpolated_polygon], alpha=0.3)
polygon._facecolors2d=polygon._facecolors3d
polygon._edgecolors2d=polygon._edgecolors3d
polygon.set_color('purple')
polygon.set_label('input spherical polygon')
ax_centers_lvl_3.add_collection3d(polygon)
polygon = Poly3DCollection([interpolated_polygon], alpha=0.3)
polygon._facecolors2d=polygon._facecolors3d
polygon._edgecolors2d=polygon._edgecolors3d
polygon.set_color('purple')
polygon.set_label('input spherical polygon')
ax_centers_lvl_4.add_collection3d(polygon)
ax.legend(loc="lower left",
bbox_to_anchor=(0,-0.1),
ncol=2)
ax.set_title('Prototype Multilevel Spherical Grid Data '
'Structure Based on Published Description by \n'
'Li et al. (2017); pre-requisite for fastest '
'known spherical point-in-polygon algorithm',
y=1.12,
fontsize=8)
fig_level_1.savefig("level_1_grid.png", dpi=300)
fig_level_1.set_size_inches(10,10)
ax_centers.azim = 70
ax_centers.elev = 50
ax_centers_lvl_2.azim = 90
ax_centers_lvl_2.elev = 50
ax_centers_lvl_3.azim = 90
ax_centers_lvl_3.elev = 50
ax_centers_lvl_3.set_title('Level 3 grid centers')
ax_centers_lvl_4.azim = 90
ax_centers_lvl_4.elev = 50
ax_centers_lvl_4.set_title('Level 4 grid centers')
fig_level_1_centers.savefig("level_1_centers.png", dpi=300)
fig_level_2_centers.savefig("level_2_centers.png", dpi=300)
fig_level_3_centers.savefig("level_3_centers.png", dpi=300)
fig_level_4_centers.savefig("level_4_centers.png", dpi=300)
| [
"scipy.spatial.geometric_slerp",
"scipy.spatial.distance.cdist",
"copy.deepcopy",
"lib.produce_level_n_grid_centers",
"numpy.empty",
"numpy.allclose",
"numpy.zeros",
"lib.produce_level_1_grid_centers",
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"matplotlib.pyplot.figure",
"matplotlib.use",
... | [((213, 234), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (227, 234), False, 'import matplotlib\n'), ((662, 724), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [-1, 0, 0]]'], {'dtype': 'np.float64'}), '([[0, 1, 0], [0, 0, 1], [-1, 0, 0]], dtype=np.float64)\n', (670, 724), True, 'import numpy as np\n'), ((911, 953), 'numpy.zeros', 'np.zeros', (['(N * n_int, 3)'], {'dtype': 'np.float64'}), '((N * n_int, 3), dtype=np.float64)\n', (919, 953), True, 'import numpy as np\n'), ((1359, 1419), 'lib.cast_subgrids', 'lib.cast_subgrids', ([], {'spherical_polyon': 'spherical_polyon', 'MAXD': '(4)'}), '(spherical_polyon=spherical_polyon, MAXD=4)\n', (1376, 1419), False, 'import lib\n'), ((1789, 1801), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1799, 1801), True, 'import matplotlib.pyplot as plt\n'), ((1938, 1950), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1948, 1950), True, 'import matplotlib.pyplot as plt\n'), ((1973, 1985), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1983, 1985), True, 'import matplotlib.pyplot as plt\n'), ((2008, 2020), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2018, 2020), True, 'import matplotlib.pyplot as plt\n'), ((2043, 2055), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2053, 2055), True, 'import matplotlib.pyplot as plt\n'), ((7967, 7996), 'copy.deepcopy', 'copy.deepcopy', (['dict_edge_data'], {}), '(dict_edge_data)\n', (7980, 7996), False, 'import copy\n'), ((10144, 10195), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'Poly3DCollection', (['[interpolated_polygon]'], {'alpha': '(0.3)'}), '([interpolated_polygon], alpha=0.3)\n', (10160, 10195), False, 'from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n'), ((10662, 10713), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'Poly3DCollection', (['[interpolated_polygon]'], {'alpha': '(0.3)'}), '([interpolated_polygon], alpha=0.3)\n', (10678, 10713), False, 'from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n'), ((10928, 10979), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'Poly3DCollection', (['[interpolated_polygon]'], {'alpha': '(0.3)'}), '([interpolated_polygon], alpha=0.3)\n', (10944, 10979), False, 'from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n'), ((11194, 11245), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'Poly3DCollection', (['[interpolated_polygon]'], {'alpha': '(0.3)'}), '([interpolated_polygon], alpha=0.3)\n', (11210, 11245), False, 'from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n'), ((976, 1000), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_int'], {}), '(0, 1, n_int)\n', (987, 1000), True, 'import numpy as np\n'), ((1173, 1249), 'scipy.spatial.geometric_slerp', 'geometric_slerp', (['spherical_polyon[i]', 'spherical_polyon[next_index]', 't_values'], {}), '(spherical_polyon[i], spherical_polyon[next_index], t_values)\n', (1188, 1249), False, 'from scipy.spatial import geometric_slerp\n'), ((2422, 2472), 'lib.produce_level_1_grid_centers', 'lib.produce_level_1_grid_centers', (['spherical_polyon'], {}), '(spherical_polyon)\n', (2454, 2472), False, 'import lib\n'), ((2539, 2598), 'lib.produce_level_n_grid_centers', 'lib.produce_level_n_grid_centers', (['spherical_polyon'], {'level': '(2)'}), '(spherical_polyon, level=2)\n', (2571, 2598), False, 'import lib\n'), ((2698, 2757), 'lib.produce_level_n_grid_centers', 'lib.produce_level_n_grid_centers', (['spherical_polyon'], {'level': '(3)'}), '(spherical_polyon, level=3)\n', (2730, 2757), False, 'import lib\n'), ((2857, 2916), 'lib.produce_level_n_grid_centers', 'lib.produce_level_n_grid_centers', (['spherical_polyon'], {'level': '(4)'}), '(spherical_polyon, level=4)\n', (2889, 2916), False, 'import lib\n'), ((7546, 7562), 'numpy.empty', 'np.empty', (['(5, 3)'], {}), '((5, 3))\n', (7554, 7562), True, 'import numpy as np\n'), ((8415, 8475), 'scipy.spatial.distance.cdist', 'scipy.spatial.distance.cdist', (['spherical_polyon', 'current_edge'], {}), '(spherical_polyon, current_edge)\n', (8443, 8475), False, 'import scipy\n'), ((8754, 8764), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (8761, 8764), True, 'import numpy as np\n'), ((8925, 8966), 'numpy.allclose', 'np.allclose', (['current_edge', 'reference_edge'], {}), '(current_edge, reference_edge)\n', (8936, 8966), True, 'import numpy as np\n'), ((8982, 9029), 'numpy.allclose', 'np.allclose', (['current_edge', 'reference_edge[::-1]'], {}), '(current_edge, reference_edge[::-1])\n', (8993, 9029), True, 'import numpy as np\n')] |
from statsmodels.compat.pandas import Appender, is_numeric_dtype
from typing import Sequence, Union
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_categorical_dtype
from scipy import stats
from statsmodels.iolib.table import SimpleTable
from statsmodels.stats.stattools import jarque_bera
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.docstring import Docstring, Parameter
from statsmodels.tools.validation import (
array_like,
bool_like,
float_like,
int_like,
)
PERCENTILES = (1, 5, 10, 25, 50, 75, 90, 95, 99)
QUANTILES = np.array(PERCENTILES) / 100.0
def pd_ptp(df):
return df.max() - df.min()
def nancount(x, axis=0):
return (1 - np.isnan(x)).sum(axis=axis)
def nanptp(arr, axis=0):
return np.nanmax(arr, axis=axis) - np.nanmin(arr, axis=axis)
def nanuss(arr, axis=0):
return np.nansum(arr ** 2, axis=axis)
def nanpercentile(arr, axis=0):
return np.nanpercentile(arr, PERCENTILES, axis=axis)
def nankurtosis(arr, axis=0):
return stats.kurtosis(arr, axis=axis, nan_policy="omit")
def nanskewness(arr, axis=0):
return stats.skew(arr, axis=axis, nan_policy="omit")
MISSING = {
"obs": nancount,
"mean": np.nanmean,
"std": np.nanstd,
"max": np.nanmax,
"min": np.nanmin,
"ptp": nanptp,
"var": np.nanvar,
"skew": nanskewness,
"uss": nanuss,
"kurtosis": nankurtosis,
"percentiles": nanpercentile,
}
def _kurtosis(a):
"""
wrapper for scipy.stats.kurtosis that returns nan instead of raising Error
missing options
"""
try:
res = stats.kurtosis(a)
except ValueError:
res = np.nan
return res
def _skew(a):
"""
wrapper for scipy.stats.skew that returns nan instead of raising Error
missing options
"""
try:
res = stats.skew(a)
except ValueError:
res = np.nan
return res
def sign_test(samp, mu0=0):
"""
Signs test
Parameters
----------
samp : array_like
1d array. The sample for which you want to perform the sign test.
mu0 : float
See Notes for the definition of the sign test. mu0 is 0 by
default, but it is common to set it to the median.
Returns
-------
M
p-value
Notes
-----
The signs test returns
M = (N(+) - N(-))/2
where N(+) is the number of values above `mu0`, N(-) is the number of
values below. Values equal to `mu0` are discarded.
The p-value for M is calculated using the binomial distribution
and can be interpreted the same as for a t-test. The test-statistic
is distributed Binom(min(N(+), N(-)), n_trials, .5) where n_trials
equals N(+) + N(-).
See Also
--------
scipy.stats.wilcoxon
"""
samp = np.asarray(samp)
pos = np.sum(samp > mu0)
neg = np.sum(samp < mu0)
M = (pos - neg) / 2.0
try:
p = stats.binomtest(min(pos, neg), pos + neg, 0.5).pvalue
except AttributeError:
# Remove after min SciPy >= 1.7
p = stats.binom_test(min(pos, neg), pos + neg, 0.5)
return M, p
NUMERIC_STATISTICS = (
"nobs",
"missing",
"mean",
"std_err",
"ci",
"std",
"iqr",
"iqr_normal",
"mad",
"mad_normal",
"coef_var",
"range",
"max",
"min",
"skew",
"kurtosis",
"jarque_bera",
"mode",
"median",
"percentiles",
)
CATEGORICAL_STATISTICS = ("nobs", "missing", "distinct", "top", "freq")
_additional = [
stat for stat in CATEGORICAL_STATISTICS if stat not in NUMERIC_STATISTICS
]
DEFAULT_STATISTICS = NUMERIC_STATISTICS + tuple(_additional)
class Description:
"""
Extended descriptive statistics for data
Parameters
----------
data : array_like
Data to describe. Must be convertible to a pandas DataFrame.
stats : Sequence[str], optional
Statistics to include. If not provided the full set of statistics is
computed. This list may evolve across versions to reflect best
practices. Supported options are:
"nobs", "missing", "mean", "std_err", "ci", "ci", "std", "iqr",
"iqr_normal", "mad", "mad_normal", "coef_var", "range", "max",
"min", "skew", "kurtosis", "jarque_bera", "mode", "freq",
"median", "percentiles", "distinct", "top", and "freq". See Notes for
details.
numeric : bool, default True
Whether to include numeric columns in the descriptive statistics.
categorical : bool, default True
Whether to include categorical columns in the descriptive statistics.
alpha : float, default 0.05
A number between 0 and 1 representing the size used to compute the
confidence interval, which has coverage 1 - alpha.
use_t : bool, default False
Use the Student's t distribution to construct confidence intervals.
percentiles : sequence[float]
A distinct sequence of floating point values all between 0 and 100.
The default percentiles are 1, 5, 10, 25, 50, 75, 90, 95, 99.
ntop : int, default 5
The number of top categorical labels to report. Default is
Attributes
----------
numeric_statistics
The list of supported statistics for numeric data
categorical_statistics
The list of supported statistics for categorical data
default_statistics
The default list of statistics
See Also
--------
pandas.DataFrame.describe
Basic descriptive statistics
describe
A simplified version that returns a DataFrame
Notes
-----
The selectable statistics include:
* "nobs" - Number of observations
* "missing" - Number of missing observations
* "mean" - Mean
* "std_err" - Standard Error of the mean assuming no correlation
* "ci" - Confidence interval with coverage (1 - alpha) using the normal or
t. This option creates two entries in any tables: lower_ci and upper_ci.
* "std" - Standard Deviation
* "iqr" - Interquartile range
* "iqr_normal" - Interquartile range relative to a Normal
* "mad" - Mean absolute deviation
* "mad_normal" - Mean absolute deviation relative to a Normal
* "coef_var" - Coefficient of variation
* "range" - Range between the maximum and the minimum
* "max" - The maximum
* "min" - The minimum
* "skew" - The skewness defined as the standardized 3rd central moment
* "kurtosis" - The kurtosis defined as the standardized 4th central moment
* "jarque_bera" - The Jarque-Bera test statistic for normality based on
the skewness and kurtosis. This option creates two entries, jarque_bera
and jarque_beta_pval.
* "mode" - The mode of the data. This option creates two entries in all tables,
mode and mode_freq which is the empirical frequency of the modal value.
* "median" - The median of the data.
* "percentiles" - The percentiles. Values included depend on the input value of
``percentiles``.
* "distinct" - The number of distinct categories in a categorical.
* "top" - The mode common categories. Labeled top_n for n in 1, 2, ..., ``ntop``.
* "freq" - The frequency of the common categories. Labeled freq_n for n in 1,
2, ..., ``ntop``.
"""
_int_fmt = ["nobs", "missing", "distinct"]
numeric_statistics = NUMERIC_STATISTICS
categorical_statistics = CATEGORICAL_STATISTICS
default_statistics = DEFAULT_STATISTICS
def __init__(
self,
data: Union[np.ndarray, pd.Series, pd.DataFrame],
stats: Sequence[str] = None,
*,
numeric: bool = True,
categorical: bool = True,
alpha: float = 0.05,
use_t: bool = False,
percentiles: Sequence[Union[int, float]] = PERCENTILES,
ntop: bool = 5,
):
data_arr = data
if not isinstance(data, (pd.Series, pd.DataFrame)):
data_arr = array_like(data, "data", maxdim=2)
if data_arr.ndim == 1:
data = pd.Series(data)
numeric = bool_like(numeric, "numeric")
categorical = bool_like(categorical, "categorical")
include = []
col_types = ""
if numeric:
include.append(np.number)
col_types = "numeric"
if categorical:
include.append("category")
col_types += "and " if col_types != "" else ""
col_types += "categorical"
if not numeric and not categorical:
raise ValueError(
"At least one of numeric and categorical must be True"
)
self._data = pd.DataFrame(data).select_dtypes(include)
if self._data.shape[1] == 0:
raise ValueError(
"Selecting {col_types} results in an empty DataFrame"
)
self._is_numeric = [is_numeric_dtype(dt) for dt in self._data.dtypes]
self._is_cat_like = [
is_categorical_dtype(dt) for dt in self._data.dtypes
]
if stats is not None:
undef = [stat for stat in stats if stat not in DEFAULT_STATISTICS]
if undef:
raise ValueError(
f"{', '.join(undef)} are not known statistics"
)
self._stats = (
list(DEFAULT_STATISTICS) if stats is None else list(stats)
)
self._ntop = int_like(ntop, "ntop")
self._compute_top = "top" in self._stats
self._compute_freq = "freq" in self._stats
if self._compute_top and self._ntop <= 0 < sum(self._is_cat_like):
raise ValueError("top must be a non-negative integer")
# Expand special stats
replacements = {
"mode": ["mode", "mode_freq"],
"ci": ["upper_ci", "lower_ci"],
"jarque_bera": ["jarque_bera", "jarque_bera_pval"],
"top": [f"top_{i}" for i in range(1, self._ntop + 1)],
"freq": [f"freq_{i}" for i in range(1, self._ntop + 1)],
}
for key in replacements:
if key in self._stats:
idx = self._stats.index(key)
self._stats = (
self._stats[:idx]
+ replacements[key]
+ self._stats[idx + 1 :]
)
self._percentiles = array_like(
percentiles, "percentiles", maxdim=1, dtype="d"
)
self._percentiles = np.sort(self._percentiles)
if np.unique(self._percentiles).shape[0] != self._percentiles.shape[0]:
raise ValueError("percentiles must be distinct")
if np.any(self._percentiles >= 100) or np.any(self._percentiles <= 0):
raise ValueError("percentiles must be strictly between 0 and 100")
self._alpha = float_like(alpha, "alpha")
if not 0 < alpha < 1:
raise ValueError("alpha must be strictly between 0 and 1")
self._use_t = bool_like(use_t, "use_t")
def _reorder(self, df: pd.DataFrame) -> pd.DataFrame:
return df.loc[[s for s in self._stats if s in df.index]]
@cache_readonly
def frame(self) -> pd.DataFrame:
"""
Descriptive statistics for both numeric and categorical data
Returns
-------
DataFrame
The statistics
"""
numeric = self.numeric
categorical = self.categorical
if categorical.shape[1] == 0:
return numeric
elif numeric.shape[1] == 0:
return categorical
df = pd.concat([numeric, categorical], axis=1)
return self._reorder(df[self._data.columns])
@cache_readonly
def numeric(self) -> pd.DataFrame:
"""
Descriptive statistics for numeric data
Returns
-------
DataFrame
The statistics of the numeric columns
"""
df: pd.DataFrame = self._data.loc[:, self._is_numeric]
cols = df.columns
_, k = df.shape
std = df.std()
count = df.count()
mean = df.mean()
mad = (df - mean).abs().mean()
std_err = std.copy()
std_err.loc[count > 0] /= count.loc[count > 0]
if self._use_t:
q = stats.t(count - 1).ppf(1.0 - self._alpha / 2)
else:
q = stats.norm.ppf(1.0 - self._alpha / 2)
def _mode(ser):
mode_res = stats.mode(ser.dropna())
if mode_res[0].shape[0] > 0:
return [float(val) for val in mode_res]
return np.nan, np.nan
mode_values = df.apply(_mode).T
if mode_values.size > 0:
if isinstance(mode_values, pd.DataFrame):
# pandas 1.0 or later
mode = np.asarray(mode_values[0], dtype=float)
mode_counts = np.asarray(mode_values[1], dtype=np.int64)
else:
# pandas before 1.0 returns a Series of 2-elem list
mode = []
mode_counts = []
for idx in mode_values.index:
val = mode_values.loc[idx]
mode.append(val[0])
mode_counts.append(val[1])
mode = np.atleast_1d(mode)
mode_counts = np.atleast_1d(mode_counts)
else:
mode = mode_counts = np.empty(0)
loc = count > 0
mode_freq = np.full(mode.shape[0], np.nan)
mode_freq[loc] = mode_counts[loc] / count.loc[loc]
# TODO: Workaround for pandas AbstractMethodError in extension
# types. Remove when quantile is supported for these
_df = df
try:
from pandas.api.types import is_extension_array_dtype
_df = df.copy()
for col in df:
if is_extension_array_dtype(df[col].dtype):
_df[col] = _df[col].astype(object).fillna(np.nan)
except ImportError:
pass
if df.shape[1] > 0:
iqr = _df.quantile(0.75) - _df.quantile(0.25)
else:
iqr = mean
def _safe_jarque_bera(c):
a = np.asarray(c)
if a.shape[0] < 2:
return (np.nan,) * 4
return jarque_bera(a)
jb = df.apply(
lambda x: list(_safe_jarque_bera(x.dropna())), result_type="expand"
).T
nan_mean = mean.copy()
nan_mean.loc[nan_mean == 0] = np.nan
coef_var = std / nan_mean
results = {
"nobs": pd.Series(
np.ones(k, dtype=np.int64) * df.shape[0], index=cols
),
"missing": df.shape[0] - count,
"mean": mean,
"std_err": std_err,
"upper_ci": mean + q * std_err,
"lower_ci": mean - q * std_err,
"std": std,
"iqr": iqr,
"mad": mad,
"coef_var": coef_var,
"range": pd_ptp(df),
"max": df.max(),
"min": df.min(),
"skew": jb[2],
"kurtosis": jb[3],
"iqr_normal": iqr / np.diff(stats.norm.ppf([0.25, 0.75])),
"mad_normal": mad / np.sqrt(2 / np.pi),
"jarque_bera": jb[0],
"jarque_bera_pval": jb[1],
"mode": pd.Series(mode, index=cols),
"mode_freq": pd.Series(mode_freq, index=cols),
"median": df.median(),
}
final = {k: v for k, v in results.items() if k in self._stats}
results_df = pd.DataFrame(
list(final.values()), columns=cols, index=list(final.keys())
)
if "percentiles" not in self._stats:
return results_df
# Pandas before 1.0 cannot handle empty DF
if df.shape[1] > 0:
# TODO: Remove when extension types support quantile
perc = _df.quantile(self._percentiles / 100).astype(float)
else:
perc = pd.DataFrame(index=self._percentiles / 100, dtype=float)
if np.all(np.floor(100 * perc.index) == (100 * perc.index)):
perc.index = [f"{int(100 * idx)}%" for idx in perc.index]
else:
dupe = True
scale = 100
index = perc.index
while dupe:
scale *= 10
idx = np.floor(scale * perc.index)
if np.all(np.diff(idx) > 0):
dupe = False
index = np.floor(scale * index) / (scale / 100)
fmt = f"0.{len(str(scale//100))-1}f"
output = f"{{0:{fmt}}}%"
perc.index = [output.format(val) for val in index]
# Add in the names of the percentiles to the output
self._stats = self._stats + perc.index.tolist()
return self._reorder(pd.concat([results_df, perc], axis=0))
@cache_readonly
def categorical(self) -> pd.DataFrame:
"""
Descriptive statistics for categorical data
Returns
-------
DataFrame
The statistics of the categorical columns
"""
df = self._data.loc[:, [col for col in self._is_cat_like]]
k = df.shape[1]
cols = df.columns
vc = {col: df[col].value_counts(normalize=True) for col in df}
distinct = pd.Series(
{col: vc[col].shape[0] for col in vc}, dtype=np.int64
)
top = {}
freq = {}
for col in vc:
single = vc[col]
if single.shape[0] >= self._ntop:
top[col] = single.index[: self._ntop]
freq[col] = np.asarray(single.iloc[:5])
else:
val = list(single.index)
val += [None] * (self._ntop - len(val))
top[col] = val
freq_val = list(single)
freq_val += [np.nan] * (self._ntop - len(freq_val))
freq[col] = np.asarray(freq_val)
index = [f"top_{i}" for i in range(1, self._ntop + 1)]
top_df = pd.DataFrame(top, dtype="object", index=index, columns=cols)
index = [f"freq_{i}" for i in range(1, self._ntop + 1)]
freq_df = pd.DataFrame(freq, dtype="object", index=index, columns=cols)
results = {
"nobs": pd.Series(
np.ones(k, dtype=np.int64) * df.shape[0], index=cols
),
"missing": df.shape[0] - df.count(),
"distinct": distinct,
}
final = {k: v for k, v in results.items() if k in self._stats}
results_df = pd.DataFrame(
list(final.values()),
columns=cols,
index=list(final.keys()),
dtype="object",
)
if self._compute_top:
results_df = pd.concat([results_df, top_df], axis=0)
if self._compute_freq:
results_df = pd.concat([results_df, freq_df], axis=0)
return self._reorder(results_df)
def summary(self) -> SimpleTable:
"""
Summary table of the descriptive statistics
Returns
-------
SimpleTable
A table instance supporting export to text, csv and LaTeX
"""
df = self.frame.astype(object)
df = df.fillna("")
cols = [str(col) for col in df.columns]
stubs = [str(idx) for idx in df.index]
data = []
for _, row in df.iterrows():
data.append([v for v in row])
def _formatter(v):
if isinstance(v, str):
return v
elif v // 1 == v:
return str(int(v))
return f"{v:0.4g}"
return SimpleTable(
data,
header=cols,
stubs=stubs,
title="Descriptive Statistics",
txt_fmt={"data_fmts": {0: "%s", 1: _formatter}},
datatypes=[1] * len(data),
)
def __str__(self) -> str:
return str(self.summary().as_text())
ds = Docstring(Description.__doc__)
ds.replace_block(
"Returns", Parameter(None, "DataFrame", ["Descriptive statistics"])
)
ds.replace_block("Attributes", [])
ds.replace_block(
"See Also",
[
(
[("pandas.DataFrame.describe", None)],
["Basic descriptive statistics"],
),
(
[("Description", None)],
["Descriptive statistics class with additional output options"],
),
],
)
@Appender(str(ds))
def describe(
data: Union[np.ndarray, pd.Series, pd.DataFrame],
stats: Sequence[str] = None,
*,
numeric: bool = True,
categorical: bool = True,
alpha: float = 0.05,
use_t: bool = False,
percentiles: Sequence[Union[int, float]] = PERCENTILES,
ntop: bool = 5,
) -> pd.DataFrame:
return Description(
data,
stats,
numeric=numeric,
categorical=categorical,
alpha=alpha,
use_t=use_t,
percentiles=percentiles,
ntop=ntop,
).frame
class Describe(object):
"""
Removed.
"""
def __init__(self, dataset):
raise NotImplementedError("Describe has been removed")
| [
"numpy.nanpercentile",
"numpy.sum",
"numpy.empty",
"numpy.floor",
"numpy.ones",
"numpy.isnan",
"statsmodels.tools.validation.bool_like",
"statsmodels.tools.docstring.Docstring",
"statsmodels.compat.pandas.is_numeric_dtype",
"pandas.core.dtypes.common.is_categorical_dtype",
"numpy.unique",
"sta... | [((19792, 19822), 'statsmodels.tools.docstring.Docstring', 'Docstring', (['Description.__doc__'], {}), '(Description.__doc__)\n', (19801, 19822), False, 'from statsmodels.tools.docstring import Docstring, Parameter\n'), ((610, 631), 'numpy.array', 'np.array', (['PERCENTILES'], {}), '(PERCENTILES)\n', (618, 631), True, 'import numpy as np\n'), ((890, 920), 'numpy.nansum', 'np.nansum', (['(arr ** 2)'], {'axis': 'axis'}), '(arr ** 2, axis=axis)\n', (899, 920), True, 'import numpy as np\n'), ((966, 1011), 'numpy.nanpercentile', 'np.nanpercentile', (['arr', 'PERCENTILES'], {'axis': 'axis'}), '(arr, PERCENTILES, axis=axis)\n', (982, 1011), True, 'import numpy as np\n'), ((1055, 1104), 'scipy.stats.kurtosis', 'stats.kurtosis', (['arr'], {'axis': 'axis', 'nan_policy': '"""omit"""'}), "(arr, axis=axis, nan_policy='omit')\n", (1069, 1104), False, 'from scipy import stats\n'), ((1148, 1193), 'scipy.stats.skew', 'stats.skew', (['arr'], {'axis': 'axis', 'nan_policy': '"""omit"""'}), "(arr, axis=axis, nan_policy='omit')\n", (1158, 1193), False, 'from scipy import stats\n'), ((2805, 2821), 'numpy.asarray', 'np.asarray', (['samp'], {}), '(samp)\n', (2815, 2821), True, 'import numpy as np\n'), ((2832, 2850), 'numpy.sum', 'np.sum', (['(samp > mu0)'], {}), '(samp > mu0)\n', (2838, 2850), True, 'import numpy as np\n'), ((2861, 2879), 'numpy.sum', 'np.sum', (['(samp < mu0)'], {}), '(samp < mu0)\n', (2867, 2879), True, 'import numpy as np\n'), ((19856, 19912), 'statsmodels.tools.docstring.Parameter', 'Parameter', (['None', '"""DataFrame"""', "['Descriptive statistics']"], {}), "(None, 'DataFrame', ['Descriptive statistics'])\n", (19865, 19912), False, 'from statsmodels.tools.docstring import Docstring, Parameter\n'), ((798, 823), 'numpy.nanmax', 'np.nanmax', (['arr'], {'axis': 'axis'}), '(arr, axis=axis)\n', (807, 823), True, 'import numpy as np\n'), ((826, 851), 'numpy.nanmin', 'np.nanmin', (['arr'], {'axis': 'axis'}), '(arr, axis=axis)\n', (835, 851), True, 'import numpy as np\n'), ((1628, 1645), 'scipy.stats.kurtosis', 'stats.kurtosis', (['a'], {}), '(a)\n', (1642, 1645), False, 'from scipy import stats\n'), ((1856, 1869), 'scipy.stats.skew', 'stats.skew', (['a'], {}), '(a)\n', (1866, 1869), False, 'from scipy import stats\n'), ((8044, 8073), 'statsmodels.tools.validation.bool_like', 'bool_like', (['numeric', '"""numeric"""'], {}), "(numeric, 'numeric')\n", (8053, 8073), False, 'from statsmodels.tools.validation import array_like, bool_like, float_like, int_like\n'), ((8096, 8133), 'statsmodels.tools.validation.bool_like', 'bool_like', (['categorical', '"""categorical"""'], {}), "(categorical, 'categorical')\n", (8105, 8133), False, 'from statsmodels.tools.validation import array_like, bool_like, float_like, int_like\n'), ((9365, 9387), 'statsmodels.tools.validation.int_like', 'int_like', (['ntop', '"""ntop"""'], {}), "(ntop, 'ntop')\n", (9373, 9387), False, 'from statsmodels.tools.validation import array_like, bool_like, float_like, int_like\n'), ((10300, 10359), 'statsmodels.tools.validation.array_like', 'array_like', (['percentiles', '"""percentiles"""'], {'maxdim': '(1)', 'dtype': '"""d"""'}), "(percentiles, 'percentiles', maxdim=1, dtype='d')\n", (10310, 10359), False, 'from statsmodels.tools.validation import array_like, bool_like, float_like, int_like\n'), ((10410, 10436), 'numpy.sort', 'np.sort', (['self._percentiles'], {}), '(self._percentiles)\n', (10417, 10436), True, 'import numpy as np\n'), ((10758, 10784), 'statsmodels.tools.validation.float_like', 'float_like', (['alpha', '"""alpha"""'], {}), "(alpha, 'alpha')\n", (10768, 10784), False, 'from statsmodels.tools.validation import array_like, bool_like, float_like, int_like\n'), ((10908, 10933), 'statsmodels.tools.validation.bool_like', 'bool_like', (['use_t', '"""use_t"""'], {}), "(use_t, 'use_t')\n", (10917, 10933), False, 'from statsmodels.tools.validation import array_like, bool_like, float_like, int_like\n'), ((11502, 11543), 'pandas.concat', 'pd.concat', (['[numeric, categorical]'], {'axis': '(1)'}), '([numeric, categorical], axis=1)\n', (11511, 11543), True, 'import pandas as pd\n'), ((13329, 13359), 'numpy.full', 'np.full', (['mode.shape[0]', 'np.nan'], {}), '(mode.shape[0], np.nan)\n', (13336, 13359), True, 'import numpy as np\n'), ((17155, 17219), 'pandas.Series', 'pd.Series', (['{col: vc[col].shape[0] for col in vc}'], {'dtype': 'np.int64'}), '({col: vc[col].shape[0] for col in vc}, dtype=np.int64)\n', (17164, 17219), True, 'import pandas as pd\n'), ((17868, 17928), 'pandas.DataFrame', 'pd.DataFrame', (['top'], {'dtype': '"""object"""', 'index': 'index', 'columns': 'cols'}), "(top, dtype='object', index=index, columns=cols)\n", (17880, 17928), True, 'import pandas as pd\n'), ((18011, 18072), 'pandas.DataFrame', 'pd.DataFrame', (['freq'], {'dtype': '"""object"""', 'index': 'index', 'columns': 'cols'}), "(freq, dtype='object', index=index, columns=cols)\n", (18023, 18072), True, 'import pandas as pd\n'), ((7925, 7959), 'statsmodels.tools.validation.array_like', 'array_like', (['data', '"""data"""'], {'maxdim': '(2)'}), "(data, 'data', maxdim=2)\n", (7935, 7959), False, 'from statsmodels.tools.validation import array_like, bool_like, float_like, int_like\n'), ((8010, 8025), 'pandas.Series', 'pd.Series', (['data'], {}), '(data)\n', (8019, 8025), True, 'import pandas as pd\n'), ((8833, 8853), 'statsmodels.compat.pandas.is_numeric_dtype', 'is_numeric_dtype', (['dt'], {}), '(dt)\n', (8849, 8853), False, 'from statsmodels.compat.pandas import Appender, is_numeric_dtype\n'), ((8925, 8949), 'pandas.core.dtypes.common.is_categorical_dtype', 'is_categorical_dtype', (['dt'], {}), '(dt)\n', (8945, 8949), False, 'from pandas.core.dtypes.common import is_categorical_dtype\n'), ((10589, 10621), 'numpy.any', 'np.any', (['(self._percentiles >= 100)'], {}), '(self._percentiles >= 100)\n', (10595, 10621), True, 'import numpy as np\n'), ((10625, 10655), 'numpy.any', 'np.any', (['(self._percentiles <= 0)'], {}), '(self._percentiles <= 0)\n', (10631, 10655), True, 'import numpy as np\n'), ((12257, 12294), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['(1.0 - self._alpha / 2)'], {}), '(1.0 - self._alpha / 2)\n', (12271, 12294), False, 'from scipy import stats\n'), ((13273, 13284), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (13281, 13284), True, 'import numpy as np\n'), ((14053, 14066), 'numpy.asarray', 'np.asarray', (['c'], {}), '(c)\n', (14063, 14066), True, 'import numpy as np\n'), ((14154, 14168), 'statsmodels.stats.stattools.jarque_bera', 'jarque_bera', (['a'], {}), '(a)\n', (14165, 14168), False, 'from statsmodels.stats.stattools import jarque_bera\n'), ((15192, 15219), 'pandas.Series', 'pd.Series', (['mode'], {'index': 'cols'}), '(mode, index=cols)\n', (15201, 15219), True, 'import pandas as pd\n'), ((15246, 15278), 'pandas.Series', 'pd.Series', (['mode_freq'], {'index': 'cols'}), '(mode_freq, index=cols)\n', (15255, 15278), True, 'import pandas as pd\n'), ((15837, 15893), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '(self._percentiles / 100)', 'dtype': 'float'}), '(index=self._percentiles / 100, dtype=float)\n', (15849, 15893), True, 'import pandas as pd\n'), ((16663, 16700), 'pandas.concat', 'pd.concat', (['[results_df, perc]'], {'axis': '(0)'}), '([results_df, perc], axis=0)\n', (16672, 16700), True, 'import pandas as pd\n'), ((18599, 18638), 'pandas.concat', 'pd.concat', (['[results_df, top_df]'], {'axis': '(0)'}), '([results_df, top_df], axis=0)\n', (18608, 18638), True, 'import pandas as pd\n'), ((18695, 18735), 'pandas.concat', 'pd.concat', (['[results_df, freq_df]'], {'axis': '(0)'}), '([results_df, freq_df], axis=0)\n', (18704, 18735), True, 'import pandas as pd\n'), ((732, 743), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (740, 743), True, 'import numpy as np\n'), ((8611, 8629), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (8623, 8629), True, 'import pandas as pd\n'), ((12688, 12727), 'numpy.asarray', 'np.asarray', (['mode_values[0]'], {'dtype': 'float'}), '(mode_values[0], dtype=float)\n', (12698, 12727), True, 'import numpy as np\n'), ((12758, 12800), 'numpy.asarray', 'np.asarray', (['mode_values[1]'], {'dtype': 'np.int64'}), '(mode_values[1], dtype=np.int64)\n', (12768, 12800), True, 'import numpy as np\n'), ((13149, 13168), 'numpy.atleast_1d', 'np.atleast_1d', (['mode'], {}), '(mode)\n', (13162, 13168), True, 'import numpy as np\n'), ((13199, 13225), 'numpy.atleast_1d', 'np.atleast_1d', (['mode_counts'], {}), '(mode_counts)\n', (13212, 13225), True, 'import numpy as np\n'), ((13722, 13761), 'pandas.api.types.is_extension_array_dtype', 'is_extension_array_dtype', (['df[col].dtype'], {}), '(df[col].dtype)\n', (13746, 13761), False, 'from pandas.api.types import is_extension_array_dtype\n'), ((15079, 15097), 'numpy.sqrt', 'np.sqrt', (['(2 / np.pi)'], {}), '(2 / np.pi)\n', (15086, 15097), True, 'import numpy as np\n'), ((15912, 15938), 'numpy.floor', 'np.floor', (['(100 * perc.index)'], {}), '(100 * perc.index)\n', (15920, 15938), True, 'import numpy as np\n'), ((16200, 16228), 'numpy.floor', 'np.floor', (['(scale * perc.index)'], {}), '(scale * perc.index)\n', (16208, 16228), True, 'import numpy as np\n'), ((16327, 16350), 'numpy.floor', 'np.floor', (['(scale * index)'], {}), '(scale * index)\n', (16335, 16350), True, 'import numpy as np\n'), ((17457, 17484), 'numpy.asarray', 'np.asarray', (['single.iloc[:5]'], {}), '(single.iloc[:5])\n', (17467, 17484), True, 'import numpy as np\n'), ((17767, 17787), 'numpy.asarray', 'np.asarray', (['freq_val'], {}), '(freq_val)\n', (17777, 17787), True, 'import numpy as np\n'), ((10448, 10476), 'numpy.unique', 'np.unique', (['self._percentiles'], {}), '(self._percentiles)\n', (10457, 10476), True, 'import numpy as np\n'), ((12181, 12199), 'scipy.stats.t', 'stats.t', (['(count - 1)'], {}), '(count - 1)\n', (12188, 12199), False, 'from scipy import stats\n'), ((14463, 14489), 'numpy.ones', 'np.ones', (['k'], {'dtype': 'np.int64'}), '(k, dtype=np.int64)\n', (14470, 14489), True, 'import numpy as np\n'), ((15016, 15044), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['[0.25, 0.75]'], {}), '([0.25, 0.75])\n', (15030, 15044), False, 'from scipy import stats\n'), ((18141, 18167), 'numpy.ones', 'np.ones', (['k'], {'dtype': 'np.int64'}), '(k, dtype=np.int64)\n', (18148, 18167), True, 'import numpy as np\n'), ((16255, 16267), 'numpy.diff', 'np.diff', (['idx'], {}), '(idx)\n', (16262, 16267), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import open3d as o3d
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str, default='../carla_results/auto_pilot_v3_42/eval_routes_06_12_23_30_25/lidar_360/0000.npy', help='npy point cloud')
def main():
pcd_npy = np.load(args.file)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pcd_npy[:,0:3])
print(np.asarray(pcd.points))
o3d.visualization.draw_geometries([pcd])
if __name__ == '__main__':
global args
args = parser.parse_args()
main() | [
"numpy.load",
"argparse.ArgumentParser",
"numpy.asarray",
"open3d.geometry.PointCloud",
"open3d.visualization.draw_geometries",
"open3d.utility.Vector3dVector"
] | [((70, 95), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (93, 95), False, 'import argparse\n'), ((282, 300), 'numpy.load', 'np.load', (['args.file'], {}), '(args.file)\n', (289, 300), True, 'import numpy as np\n'), ((312, 337), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (335, 337), True, 'import open3d as o3d\n'), ((356, 399), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pcd_npy[:, 0:3]'], {}), '(pcd_npy[:, 0:3])\n', (382, 399), True, 'import open3d as o3d\n'), ((439, 479), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[pcd]'], {}), '([pcd])\n', (472, 479), True, 'import open3d as o3d\n'), ((410, 432), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (420, 432), True, 'import numpy as np\n')] |
import gudhi
import numpy as np
from numpy import matlib
import random
import gensim
from gensim.models import Word2Vec
import scipy.sparse
# Some function that will be useful for the rest of the code
def signed_faces(s):
## returns the faces of the simplex s
ret = []
for i in range(0, len(s)):
ret.append(((-1)**i, s[0:i] + s[i+1:len(s)]))
return ret
def signed_cofaces(s, cplx):
# returns all cofaces (of codim 1) of simplex s
return [(sign(s, x), x) for (x, eps) in cplx.get_cofaces(s, 1)]
def sign(s, t): # Sign of s in t.
if len(t) != len(s) + 1:
return None
for i in range(0, len(s)):
if s[i] != t[i]:
return (-1)**i
return (-1)**len(s)
def hacky_get_idx(s, cplx):
# Get index of the simplices
i = cplx.filtration(s)
assert(i.is_integer())
return int(i)
def assemble(cplx, k, scheme = "uniform", laziness = None):
## Assmeble the transition matrix
# We are using this incredibly ugly hack to store the indices of the simplices
# as filtration values since keys are not accessible
# through the GUDHI Python API.
assert(cplx.num_simplices() < 16777217)
assert(scheme in ["uniform", "uniform-lazy", "uniform-multicount"])
if scheme == "uniform-lazy":
assert(laziness is not None)
assert(laziness >= 0 and laziness <= 1)
simplices = [s for (s, eps) in cplx.get_filtration()]
ordering = []
N = 0
for s in simplices:
if len(s) == k + 1:
cplx.assign_filtration(s, float(N))
ordering.append(s)
N += 1
else:
cplx.assign_filtration(s, np.inf)
cplx.initialize_filtration()
row_inds = []
col_inds = []
data = []
for (s, i) in cplx.get_filtration():
if i >= N:
break
assert(i.is_integer())
i = int(i)
# uniform, uniform-lazy, uniform-multicount
if scheme.startswith("uniform"):
s_faces = signed_faces(s)
s_cofaces = signed_cofaces(s, cplx)
s_up = []
for (a, t) in s_cofaces:
s_up += [(-a*b, u) for (b, u) in signed_faces(t)]
s_down = []
for (a, t) in s_faces:
s_down += [(-a*b, u) for (b, u) in signed_cofaces(t, cplx)]
## We are not considering orientations so we set all signs to 1
s_up = [(1, t) for (foo, t) in s_up]
s_down = [(1, t) for (foo, t) in s_down]
if scheme == "uniform-multicount":
s_neigh_idxs = [(a, hacky_get_idx(t, cplx)) for (a,t) in s_down + s_up]
else:
s_neigh_idxs = list(set([(a, hacky_get_idx(t, cplx)) for (a,t) in s_down + s_up]))
if scheme == "uniform-lazy":
if len(s_neigh_idxs) == 1:
probs = 0.0
else:
num_self_neigh = 0
for (sgn, j) in s_neigh_idxs:
if j == i:
num_self_neigh += 1
probs = (1.0-laziness)/(len(s_neigh_idxs) - num_self_neigh)
else:
probs = 1.0/len(s_neigh_idxs)
for (sgn, j) in s_neigh_idxs:
row_inds.append(i)
col_inds.append(j)
if scheme == "uniform-lazy" and j == i:
data.append(laziness)
else:
data.append(probs)
return scipy.sparse.csr_matrix((data, (row_inds, col_inds)), shape=(N, N))
def walk(smplx, walk_length, P):
## Performs a single random walk of fixed length starting at smplx
# smplx = starting simplex of the random walk
# P = precomputed transition matrix on the complex containing smplx
# walk_length = length of the random walk
c= np.arange(P.shape[0])
RW= []
RW.append(smplx)
for i in range(walk_length):
smplx=np.random.choice(c,size =1, p=P[smplx])[0]
RW.append(smplx)
return(RW)
def RandomWalks(walk_length, number_walks, P, seed = None):
## Performs a fixed number of random walks at each $k$-simplex
Walks=[] ## List where we store all random walks of length walk_length
for i in range(number_walks):
for smplx in range(P.shape[0]):
Walks.append(walk(smplx, walk_length, P))
if seed != None:
np.random.seed(seed)
np.random.shuffle(Walks)
else:
np.random.shuffle(Walks)
return Walks
def save_random_walks(Walks,filename):
## Writes the walks in a .txt file
file = open(filename, "a")
for walk in Walks:
L = str(walk)[1:-1]+"\n"
file.write(L)
file.close()
def load_walks(filename):
## Loads a file with precomputed random walks
file = open(filename, 'r')
lines = file.readlines()
walks= list()
for line in lines:
walk = list()
line = line[0:-1]
newline= line.split('], [')
for el in newline:
step = [int(s) for s in el.split(', ')]
walk.append(step)
walks.append(walk[0])
return walks
def Embedding(Walks, emb_dim, epochs =5 ,filename ='k-simplex2vec_embedding.model'):
## Performs the embedding of the $k$-simplices using the gensim word2vec package
walks_str=[]
for i in range(len(Walks)):
ls_temp =[]
for j in range(len(Walks[i])):
string = str(Walks[i][j]).replace(' ','')
ls_temp.append(string)
walks_str.append(ls_temp)
model = Word2Vec(walks_str, size=emb_dim, window = 3, min_count=0, sg=1, workers=1, iter=epochs)
model.save(filename)
return model
| [
"numpy.random.seed",
"gensim.models.Word2Vec",
"numpy.arange",
"numpy.random.choice",
"numpy.random.shuffle"
] | [((3892, 3913), 'numpy.arange', 'np.arange', (['P.shape[0]'], {}), '(P.shape[0])\n', (3901, 3913), True, 'import numpy as np\n'), ((5599, 5689), 'gensim.models.Word2Vec', 'Word2Vec', (['walks_str'], {'size': 'emb_dim', 'window': '(3)', 'min_count': '(0)', 'sg': '(1)', 'workers': '(1)', 'iter': 'epochs'}), '(walks_str, size=emb_dim, window=3, min_count=0, sg=1, workers=1,\n iter=epochs)\n', (5607, 5689), False, 'from gensim.models import Word2Vec\n'), ((4440, 4460), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4454, 4460), True, 'import numpy as np\n'), ((4469, 4493), 'numpy.random.shuffle', 'np.random.shuffle', (['Walks'], {}), '(Walks)\n', (4486, 4493), True, 'import numpy as np\n'), ((4513, 4537), 'numpy.random.shuffle', 'np.random.shuffle', (['Walks'], {}), '(Walks)\n', (4530, 4537), True, 'import numpy as np\n'), ((3993, 4032), 'numpy.random.choice', 'np.random.choice', (['c'], {'size': '(1)', 'p': 'P[smplx]'}), '(c, size=1, p=P[smplx])\n', (4009, 4032), True, 'import numpy as np\n')] |
import numpy as np
import random
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import cv2
class EnvMoveTogether(object):
def __init__(self):
self.raw_occupancy = np.zeros((15, 15))
for i in range(15):
self.raw_occupancy[0, i] = 1
self.raw_occupancy[i, 0] = 1
self.raw_occupancy[14, i] = 1
self.raw_occupancy[i, 14] = 1
self.raw_occupancy[1, i] = 1
self.raw_occupancy[5, i] = 1
self.raw_occupancy[6, i] = 1
self.raw_occupancy[1, 6] = 0
self.raw_occupancy[1, 7] = 0
self.raw_occupancy[1, 8] = 0
self.raw_occupancy[5, 1] = 0
self.raw_occupancy[5, 2] = 0
self.raw_occupancy[5, 3] = 0
self.raw_occupancy[5, 4] = 0
self.raw_occupancy[6, 1] = 0
self.raw_occupancy[6, 2] = 0
self.raw_occupancy[6, 3] = 0
self.raw_occupancy[6, 4] = 0
self.raw_occupancy[6, 6] = 0
self.raw_occupancy[6, 7] = 0
self.raw_occupancy[6, 8] = 0
self.raw_occupancy[11, 6] = 1
self.raw_occupancy[11, 7] = 1
self.raw_occupancy[11, 8] = 1
self.raw_occupancy[12, 6] = 1
self.raw_occupancy[12, 7] = 1
self.raw_occupancy[12, 8] = 1
self.raw_occupancy[13, 6] = 1
self.raw_occupancy[13, 7] = 1
self.raw_occupancy[13, 8] = 1
self.occupancy = self.raw_occupancy.copy()
self.agt1_pos = [13, 1]
self.occupancy[self.agt1_pos[0], self.agt1_pos[1]] = 1
self.agt2_pos = [13, 13]
self.occupancy[self.agt2_pos[0], self.agt2_pos[1]] = 1
self.box_pos = [10, 7]
self.occupancy[self.box_pos[0], self.box_pos[1]] = 1
self.is_1_catch_box = False
self.is_2_catch_box = False
def reset(self):
self.occupancy = self.raw_occupancy.copy()
self.agt1_pos = [13, 1]
self.occupancy[self.agt1_pos[0], self.agt1_pos[1]] = 1
self.agt2_pos = [13, 13]
self.occupancy[self.agt2_pos[0], self.agt2_pos[1]] = 1
self.box_pos = [10, 7]
self.occupancy[self.box_pos[0], self.box_pos[1]] = 1
self.is_1_catch_box = False
self.is_2_catch_box = False
def step(self, action_list):
if self.is_1_catch_box == False:
if action_list[0] == 0: # up
if self.occupancy[self.agt1_pos[0] - 1][self.agt1_pos[1]] != 1: # if can move
self.agt1_pos[0] = self.agt1_pos[0] - 1
self.occupancy[self.agt1_pos[0] + 1][self.agt1_pos[1]] = 0
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] = 1
if action_list[0] == 1: # down
if self.occupancy[self.agt1_pos[0] + 1][self.agt1_pos[1]] != 1: # if can move
self.agt1_pos[0] = self.agt1_pos[0] + 1
self.occupancy[self.agt1_pos[0] - 1][self.agt1_pos[1]] = 0
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] = 1
if action_list[0] == 2: # left
if self.occupancy[self.agt1_pos[0]][self.agt1_pos[1] - 1] != 1: # if can move
self.agt1_pos[1] = self.agt1_pos[1] - 1
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1] + 1] = 0
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] = 1
if action_list[0] == 3: # right
if self.occupancy[self.agt1_pos[0]][self.agt1_pos[1] + 1] != 1: # if can move
self.agt1_pos[1] = self.agt1_pos[1] + 1
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1] - 1] = 0
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] = 1
if self.is_2_catch_box == False:
if action_list[1] == 0: # up
if self.occupancy[self.agt2_pos[0] - 1][self.agt2_pos[1]] != 1: # if can move
self.agt2_pos[0] = self.agt2_pos[0] - 1
self.occupancy[self.agt2_pos[0] + 1][self.agt2_pos[1]] = 0
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] = 1
if action_list[1] == 1: # down
if self.occupancy[self.agt2_pos[0] + 1][self.agt2_pos[1]] != 1: # if can move
self.agt2_pos[0] = self.agt2_pos[0] + 1
self.occupancy[self.agt2_pos[0] - 1][self.agt2_pos[1]] = 0
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] = 1
if action_list[1] == 2: # left
if self.occupancy[self.agt2_pos[0]][self.agt2_pos[1] - 1] != 1: # if can move
self.agt2_pos[1] = self.agt2_pos[1] - 1
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1] + 1] = 0
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] = 1
if action_list[1] == 3: # right
if self.occupancy[self.agt2_pos[0]][self.agt2_pos[1] + 1] != 1: # if can move
self.agt2_pos[1] = self.agt2_pos[1] + 1
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1] - 1] = 0
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] = 1
if self.is_1_catch_box and self.is_2_catch_box:
if action_list[0] == 0 and action_list[1] == 0: # up
if self.occupancy[self.box_pos[0] - 1,self.box_pos[1]] == 0 and self.occupancy[self.box_pos[0] - 1,self.box_pos[1] - 1] == 0 and self.occupancy[self.box_pos[0] - 1,self.box_pos[1] + 1] == 0:
self.box_pos[0] = self.box_pos[0] - 1
self.agt1_pos[0] = self.agt1_pos[0] - 1
self.agt2_pos[0] = self.agt2_pos[0] - 1
self.occupancy[self.box_pos[0] + 1, self.box_pos[1]] = 0
self.occupancy[self.agt1_pos[0] + 1, self.agt1_pos[1]] = 0
self.occupancy[self.agt2_pos[0] + 1, self.agt2_pos[1]] = 0
self.occupancy[self.box_pos[0], self.box_pos[1]] = 1
self.occupancy[self.agt1_pos[0], self.agt1_pos[1]] = 1
self.occupancy[self.agt2_pos[0], self.agt2_pos[1]] = 1
if action_list[0] == 1 and action_list[1] == 1: # down
if self.occupancy[self.box_pos[0] + 1,self.box_pos[1]] == 0 and self.occupancy[self.box_pos[0] + 1,self.box_pos[1] - 1] == 0 and self.occupancy[self.box_pos[0] + 1,self.box_pos[1] + 1] == 0:
self.box_pos[0] = self.box_pos[0] + 1
self.agt1_pos[0] = self.agt1_pos[0] + 1
self.agt2_pos[0] = self.agt2_pos[0] + 1
self.occupancy[self.box_pos[0] - 1, self.box_pos[1]] = 0
self.occupancy[self.agt1_pos[0] - 1, self.agt1_pos[1]] = 0
self.occupancy[self.agt2_pos[0] - 1, self.agt2_pos[1]] = 0
self.occupancy[self.box_pos[0], self.box_pos[1]] = 1
self.occupancy[self.agt1_pos[0], self.agt1_pos[1]] = 1
self.occupancy[self.agt2_pos[0], self.agt2_pos[1]] = 1
if action_list[0] == 2 and action_list[1] == 2: # left
if self.occupancy[self.box_pos[0], self.box_pos[1] - 2] == 0:
self.box_pos[1] = self.box_pos[1] - 1
self.agt1_pos[1] = self.agt1_pos[1] - 1
self.agt2_pos[1] = self.agt2_pos[1] - 1
self.occupancy[self.box_pos[0], self.box_pos[1] - 1] = 1
self.occupancy[self.box_pos[0], self.box_pos[1] + 2] = 0
if action_list[0] == 3 and action_list[1] == 3: # right
if self.occupancy[self.box_pos[0], self.box_pos[1] + 2] == 0:
self.box_pos[1] = self.box_pos[1] + 1
self.agt1_pos[1] = self.agt1_pos[1] + 1
self.agt2_pos[1] = self.agt2_pos[1] + 1
self.occupancy[self.box_pos[0], self.box_pos[1] + 1] = 1
self.occupancy[self.box_pos[0], self.box_pos[1] - 2] = 0
if self.agt1_pos[0] == self.box_pos[0] and abs(self.agt1_pos[1] - self.box_pos[1]) == 1:
self.is_1_catch_box = True
if self.agt2_pos[0] == self.box_pos[0] and abs(self.agt2_pos[1] - self.box_pos[1]) == 1:
self.is_2_catch_box = True
done = False
reward = 0
if self.agt1_pos[0] == self.box_pos[0] and abs(self.agt1_pos[1] - self.box_pos[1]) == 1 and self.agt2_pos[0] == self.box_pos[0] and abs(self.agt2_pos[1] - self.box_pos[1]) :
reward = 10
done = True
self.reset()
return reward, done
def get_global_obs(self):
obs = np.ones((15, 15, 3))
for i in range(15):
for j in range(15):
if self.raw_occupancy[i, j] == 1:
obs[i, j, 0] = 0.0
obs[i, j, 1] = 0.0
obs[i, j, 2] = 0.0
obs[self.agt1_pos[0], self.agt1_pos[1], 0] = 1
obs[self.agt1_pos[0], self.agt1_pos[1], 1] = 0
obs[self.agt1_pos[0], self.agt1_pos[1], 2] = 0
obs[self.agt2_pos[0], self.agt2_pos[1], 0] = 0
obs[self.agt2_pos[0], self.agt2_pos[1], 1] = 0
obs[self.agt2_pos[0], self.agt2_pos[1], 2] = 1
obs[self.box_pos[0], self.box_pos[1], 0] = 0
obs[self.box_pos[0], self.box_pos[1], 1] = 1
obs[self.box_pos[0], self.box_pos[1], 2] = 0
return obs
def get_agt1_obs(self):
obs = np.zeros((3, 3, 3))
for i in range(3):
for j in range(3):
if self.raw_occupancy[self.agt1_pos[0] - 1 + i][self.agt1_pos[1] - 1 + j] == 0:
obs[i, j, 0] = 1.0
obs[i, j, 1] = 1.0
obs[i, j, 2] = 1.0
d_x = self.agt2_pos[0] - self.agt1_pos[0]
d_y = self.agt2_pos[1] - self.agt1_pos[1]
if d_x >= -1 and d_x <= 1 and d_y >= -1 and d_y <= 1:
obs[1 + d_x, 1 + d_y, 0] = 0.0
obs[1 + d_x, 1 + d_y, 1] = 0.0
obs[1 + d_x, 1 + d_y, 2] = 1.0
d_x = self.box_pos[0] - self.agt1_pos[0]
d_y = self.box_pos[1] - self.agt1_pos[1]
if d_x >= -1 and d_x <= 1 and d_y >= -1 and d_y <= 1:
obs[1 + d_x, 1 + d_y, 0] = 0.0
obs[1 + d_x, 1 + d_y, 1] = 1.0
obs[1 + d_x, 1 + d_y, 2] = 0.0
obs[1, 1, 0] = 1.0
obs[1, 1, 1] = 0.0
obs[1, 1, 2] = 0.0
return obs
def get_agt2_obs(self):
obs = np.zeros((3, 3, 3))
for i in range(3):
for j in range(3):
if self.raw_occupancy[self.agt2_pos[0] - 1 + i][self.agt2_pos[1] - 1 + j] == 0:
obs[i, j, 0] = 1.0
obs[i, j, 1] = 1.0
obs[i, j, 2] = 1.0
d_x = self.agt1_pos[0] - self.agt2_pos[0]
d_y = self.agt1_pos[1] - self.agt2_pos[1]
if d_x >= -1 and d_x <= 1 and d_y >= -1 and d_y <= 1:
obs[1 + d_x, 1 + d_y, 0] = 1.0
obs[1 + d_x, 1 + d_y, 1] = 0.0
obs[1 + d_x, 1 + d_y, 2] = 0.0
d_x = self.box_pos[0] - self.agt2_pos[0]
d_y = self.box_pos[1] - self.agt2_pos[1]
if d_x >= -1 and d_x <= 1 and d_y >= -1 and d_y <= 1:
obs[1 + d_x, 1 + d_y, 0] = 0.0
obs[1 + d_x, 1 + d_y, 1] = 1.0
obs[1 + d_x, 1 + d_y, 2] = 0.0
obs[1, 1, 0] = 0.0
obs[1, 1, 1] = 0.0
obs[1, 1, 2] = 1.0
return obs
def get_state(self):
state = np.zeros((1, 6))
state[0, 0] = self.agt1_pos[0] / 15
state[0, 1] = self.agt1_pos[1] / 15
state[0, 2] = self.agt2_pos[0] / 15
state[0, 3] = self.agt2_pos[1] / 15
state[0, 4] = self.box_pos[0] / 15
state[0, 5] = self.box_pos[1] / 15
return state
def get_obs(self):
return [self.get_agt1_obs(), self.get_agt2_obs()]
def plot_scene(self):
fig = plt.figure(figsize=(5, 5))
gs = GridSpec(2, 2, figure=fig)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax4 = fig.add_subplot(gs[0, 1])
ax1.imshow(self.get_global_obs())
plt.xticks([])
plt.yticks([])
ax2.imshow(self.get_agt1_obs())
plt.xticks([])
plt.yticks([])
ax3.imshow(self.get_agt2_obs())
plt.xticks([])
plt.yticks([])
ax4.imshow(self.occupancy)
plt.xticks([])
plt.yticks([])
plt.show()
def render(self):
obs = np.ones((15 * 20, 15 * 20, 3))
for i in range(15):
for j in range(15):
if self.raw_occupancy[i, j] == 1:
cv2.rectangle(obs, (j*20, i*20), (j*20+20, i*20+20), (0, 0, 0), -1)
cv2.rectangle(obs, (self.agt1_pos[1] * 20, self.agt1_pos[0] * 20), (self.agt1_pos[1] * 20 + 20, self.agt1_pos[0] * 20 + 20), (0, 0, 255), -1)
cv2.rectangle(obs, (self.agt2_pos[1] * 20, self.agt2_pos[0] * 20), (self.agt2_pos[1] * 20 + 20, self.agt2_pos[0] * 20 + 20), (255, 0, 0), -1)
cv2.rectangle(obs, (self.box_pos[1] * 20, self.box_pos[0] * 20),
(self.box_pos[1] * 20 + 20, self.box_pos[0] * 20 + 20), (0, 255, 0), -1)
cv2.imshow('image', obs)
cv2.waitKey(100)
| [
"matplotlib.pyplot.show",
"cv2.waitKey",
"matplotlib.pyplot.yticks",
"numpy.zeros",
"numpy.ones",
"cv2.imshow",
"matplotlib.pyplot.figure",
"cv2.rectangle",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.xticks"
] | [((210, 228), 'numpy.zeros', 'np.zeros', (['(15, 15)'], {}), '((15, 15))\n', (218, 228), True, 'import numpy as np\n'), ((8824, 8844), 'numpy.ones', 'np.ones', (['(15, 15, 3)'], {}), '((15, 15, 3))\n', (8831, 8844), True, 'import numpy as np\n'), ((9646, 9665), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (9654, 9665), True, 'import numpy as np\n'), ((10781, 10800), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (10789, 10800), True, 'import numpy as np\n'), ((11915, 11931), 'numpy.zeros', 'np.zeros', (['(1, 6)'], {}), '((1, 6))\n', (11923, 11931), True, 'import numpy as np\n'), ((12351, 12377), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (12361, 12377), True, 'import matplotlib.pyplot as plt\n'), ((12392, 12418), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(2)', '(2)'], {'figure': 'fig'}), '(2, 2, figure=fig)\n', (12400, 12418), False, 'from matplotlib.gridspec import GridSpec\n'), ((12635, 12649), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (12645, 12649), True, 'import matplotlib.pyplot as plt\n'), ((12659, 12673), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (12669, 12673), True, 'import matplotlib.pyplot as plt\n'), ((12724, 12738), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (12734, 12738), True, 'import matplotlib.pyplot as plt\n'), ((12748, 12762), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (12758, 12762), True, 'import matplotlib.pyplot as plt\n'), ((12813, 12827), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (12823, 12827), True, 'import matplotlib.pyplot as plt\n'), ((12837, 12851), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (12847, 12851), True, 'import matplotlib.pyplot as plt\n'), ((12897, 12911), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (12907, 12911), True, 'import matplotlib.pyplot as plt\n'), ((12921, 12935), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (12931, 12935), True, 'import matplotlib.pyplot as plt\n'), ((12945, 12955), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12953, 12955), True, 'import matplotlib.pyplot as plt\n'), ((12996, 13026), 'numpy.ones', 'np.ones', (['(15 * 20, 15 * 20, 3)'], {}), '((15 * 20, 15 * 20, 3))\n', (13003, 13026), True, 'import numpy as np\n'), ((13238, 13384), 'cv2.rectangle', 'cv2.rectangle', (['obs', '(self.agt1_pos[1] * 20, self.agt1_pos[0] * 20)', '(self.agt1_pos[1] * 20 + 20, self.agt1_pos[0] * 20 + 20)', '(0, 0, 255)', '(-1)'], {}), '(obs, (self.agt1_pos[1] * 20, self.agt1_pos[0] * 20), (self.\n agt1_pos[1] * 20 + 20, self.agt1_pos[0] * 20 + 20), (0, 0, 255), -1)\n', (13251, 13384), False, 'import cv2\n'), ((13389, 13535), 'cv2.rectangle', 'cv2.rectangle', (['obs', '(self.agt2_pos[1] * 20, self.agt2_pos[0] * 20)', '(self.agt2_pos[1] * 20 + 20, self.agt2_pos[0] * 20 + 20)', '(255, 0, 0)', '(-1)'], {}), '(obs, (self.agt2_pos[1] * 20, self.agt2_pos[0] * 20), (self.\n agt2_pos[1] * 20 + 20, self.agt2_pos[0] * 20 + 20), (255, 0, 0), -1)\n', (13402, 13535), False, 'import cv2\n'), ((13540, 13682), 'cv2.rectangle', 'cv2.rectangle', (['obs', '(self.box_pos[1] * 20, self.box_pos[0] * 20)', '(self.box_pos[1] * 20 + 20, self.box_pos[0] * 20 + 20)', '(0, 255, 0)', '(-1)'], {}), '(obs, (self.box_pos[1] * 20, self.box_pos[0] * 20), (self.\n box_pos[1] * 20 + 20, self.box_pos[0] * 20 + 20), (0, 255, 0), -1)\n', (13553, 13682), False, 'import cv2\n'), ((13710, 13734), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'obs'], {}), "('image', obs)\n", (13720, 13734), False, 'import cv2\n'), ((13744, 13760), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (13755, 13760), False, 'import cv2\n'), ((13161, 13240), 'cv2.rectangle', 'cv2.rectangle', (['obs', '(j * 20, i * 20)', '(j * 20 + 20, i * 20 + 20)', '(0, 0, 0)', '(-1)'], {}), '(obs, (j * 20, i * 20), (j * 20 + 20, i * 20 + 20), (0, 0, 0), -1)\n', (13174, 13240), False, 'import cv2\n')] |
import numpy as np
from trimesh import Scene
class SmplScene(Scene):
def calculate_regressor(self):
mesh = self.geometry['geometry_0']
joint = self.geometry['joint_0']
ray_direction = np.eye(3)
mesh.clos
locations, index_ray, index_tri = mesh.ray.intersects_location(
ray_origins=joint,
ray_directions=ray_direction) | [
"numpy.eye"
] | [((219, 228), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (225, 228), True, 'import numpy as np\n')] |
import numpy as np
from skimage.color import rgb2hsv
from skimage.io import imread # skimage.io.imsave seems to be broken?
from scipy.misc import imsave
from tempfile import mkdtemp
from os import path
from shutil import rmtree
hsv_dtype = np.dtype([('H', float), ('S', float), ('V', float)])
rgb_dtype = np.dtype([('R', float), ('G', float), ('B', float)])
def np_product(*arrays, **kwargs):
arrays = arrays * kwargs.pop('repeat', 1)
num_arrays = len(arrays)
arr = np.empty(map(len, arrays) + [num_arrays], **kwargs)
for index, array in enumerate(np.ix_(*arrays)):
arr[..., index] = array
return arr.reshape(-1, num_arrays)
def center(small, large):
return (large - small) // 2
def get_rgb_spectrum():
return np_product(np.arange(0., 1., 1./256.), dtype=float, repeat=3)
def get_hsv_spectrum():
# FIXME: optimize
return np.sort(rgb2hsv(get_rgb_spectrum()[np.newaxis, ...])[0, ...].view(hsv_dtype).reshape(-1), order=['H', 'S', 'V'])
def read_image(f):
return imread(f)/256. # skimage is inconsistent with whether RGB is 0-1 or 0-255
def write_image(f, image):
if hasattr(f, 'write'):
tmpdirname = mkdtemp()
tmpfilename = path.join(tmpdirname, 'tmpfile.png')
imsave(tmpfilename, image)
with open(tmpfilename, 'rb') as tmpfile:
f.write(tmpfile.read())
rmtree(tmpdirname)
else:
imsave(f, image) | [
"numpy.ix_",
"numpy.dtype",
"tempfile.mkdtemp",
"numpy.arange",
"scipy.misc.imsave",
"shutil.rmtree",
"os.path.join",
"skimage.io.imread"
] | [((241, 293), 'numpy.dtype', 'np.dtype', (["[('H', float), ('S', float), ('V', float)]"], {}), "([('H', float), ('S', float), ('V', float)])\n", (249, 293), True, 'import numpy as np\n'), ((306, 358), 'numpy.dtype', 'np.dtype', (["[('R', float), ('G', float), ('B', float)]"], {}), "([('R', float), ('G', float), ('B', float)])\n", (314, 358), True, 'import numpy as np\n'), ((558, 573), 'numpy.ix_', 'np.ix_', (['*arrays'], {}), '(*arrays)\n', (564, 573), True, 'import numpy as np\n'), ((743, 775), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', '(1.0 / 256.0)'], {}), '(0.0, 1.0, 1.0 / 256.0)\n', (752, 775), True, 'import numpy as np\n'), ((990, 999), 'skimage.io.imread', 'imread', (['f'], {}), '(f)\n', (996, 999), False, 'from skimage.io import imread\n'), ((1135, 1144), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (1142, 1144), False, 'from tempfile import mkdtemp\n'), ((1163, 1199), 'os.path.join', 'path.join', (['tmpdirname', '"""tmpfile.png"""'], {}), "(tmpdirname, 'tmpfile.png')\n", (1172, 1199), False, 'from os import path\n'), ((1204, 1230), 'scipy.misc.imsave', 'imsave', (['tmpfilename', 'image'], {}), '(tmpfilename, image)\n', (1210, 1230), False, 'from scipy.misc import imsave\n'), ((1310, 1328), 'shutil.rmtree', 'rmtree', (['tmpdirname'], {}), '(tmpdirname)\n', (1316, 1328), False, 'from shutil import rmtree\n'), ((1341, 1357), 'scipy.misc.imsave', 'imsave', (['f', 'image'], {}), '(f, image)\n', (1347, 1357), False, 'from scipy.misc import imsave\n')] |
import torch
import numpy as np
from scipy.io.wavfile import write
from audio.audio_processing import griffin_lim
def get_mel_from_wav(audio, _stft):
audio = torch.clip(torch.FloatTensor(audio).unsqueeze(0), -1, 1)
audio = torch.autograd.Variable(audio, requires_grad=False)
melspec, energy = _stft.mel_spectrogram(audio)
melspec = torch.squeeze(melspec, 0).numpy().astype(np.float32)
energy = torch.squeeze(energy, 0).numpy().astype(np.float32)
return melspec, energy
def inv_mel_spec(mel, out_filename, _stft, griffin_iters=60):
mel = torch.stack([mel])
mel_decompress = _stft.spectral_de_normalize(mel)
mel_decompress = mel_decompress.transpose(1, 2).data.cpu()
spec_from_mel_scaling = 1000
spec_from_mel = torch.mm(mel_decompress[0], _stft.mel_basis)
spec_from_mel = spec_from_mel.transpose(0, 1).unsqueeze(0)
spec_from_mel = spec_from_mel * spec_from_mel_scaling
audio = griffin_lim(
torch.autograd.Variable(spec_from_mel[:, :, :-1]), _stft._stft_fn, griffin_iters
)
audio = audio.squeeze()
audio = audio.cpu().numpy()
audio_path = out_filename
write(audio_path, _stft.sampling_rate, audio)
def librosa_pad_lr(x, fsize, fshift, pad_sides=1):
'''compute right padding (final frame) or both sides padding (first and final frames)
'''
assert pad_sides in (1, 2)
# return int(fsize // 2)
pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0]
if pad_sides == 1:
return 0, pad
else:
return pad // 2, pad // 2 + pad % 2
# Conversions
def amp_to_db(x):
return 20 * np.log10(np.maximum(1e-5, x))
def normalize(S, min_level_db):
return (S - min_level_db) / -min_level_db
| [
"numpy.maximum",
"torch.stack",
"torch.autograd.Variable",
"torch.FloatTensor",
"torch.mm",
"scipy.io.wavfile.write",
"torch.squeeze"
] | [((234, 285), 'torch.autograd.Variable', 'torch.autograd.Variable', (['audio'], {'requires_grad': '(False)'}), '(audio, requires_grad=False)\n', (257, 285), False, 'import torch\n'), ((571, 589), 'torch.stack', 'torch.stack', (['[mel]'], {}), '([mel])\n', (582, 589), False, 'import torch\n'), ((760, 804), 'torch.mm', 'torch.mm', (['mel_decompress[0]', '_stft.mel_basis'], {}), '(mel_decompress[0], _stft.mel_basis)\n', (768, 804), False, 'import torch\n'), ((1142, 1187), 'scipy.io.wavfile.write', 'write', (['audio_path', '_stft.sampling_rate', 'audio'], {}), '(audio_path, _stft.sampling_rate, audio)\n', (1147, 1187), False, 'from scipy.io.wavfile import write\n'), ((960, 1009), 'torch.autograd.Variable', 'torch.autograd.Variable', (['spec_from_mel[:, :, :-1]'], {}), '(spec_from_mel[:, :, :-1])\n', (983, 1009), False, 'import torch\n'), ((1616, 1636), 'numpy.maximum', 'np.maximum', (['(1e-05)', 'x'], {}), '(1e-05, x)\n', (1626, 1636), True, 'import numpy as np\n'), ((176, 200), 'torch.FloatTensor', 'torch.FloatTensor', (['audio'], {}), '(audio)\n', (193, 200), False, 'import torch\n'), ((351, 376), 'torch.squeeze', 'torch.squeeze', (['melspec', '(0)'], {}), '(melspec, 0)\n', (364, 376), False, 'import torch\n'), ((417, 441), 'torch.squeeze', 'torch.squeeze', (['energy', '(0)'], {}), '(energy, 0)\n', (430, 441), False, 'import torch\n')] |
import numpy as np
from Grid.GridProcessing import grid
from Shapes.ShapesFunctions import *
# Specify the file that includes dynamic systems
from dynamics.DubinsCar4D import *
import scipy.io as sio
import math
""" USER INTERFACES
- Define grid
- Generate initial values for grid using shape functions
- Time length for computations
- Run
"""
"""
# Grid field in this order: min_range, max_range, number of dims, grid dimensions, list of periodic dim: starting at 0
g = grid(np.array([-0.5, -1.0, 0.5, -2.0, -math.pi/2, -8.0]), np.array([0.5, 1.0, 1.5, 2.0, math.pi/2, 8.0]), 6, np.array([27, 26, 27, 26, 27, 26]))
# Define my object
my_car = Humanoid_6D()
# Use the grid to initialize initial value function
Initial_value_f = Rectangle6D(g)
# Look-back length and time step
lookback_length = 0.0
t_step = 0.05
tau = np.arange(start = 0, stop = lookback_length + t_step, step = t_step)
print("Welcome to optimized_dp \n")
# Use the following variable to specify the characteristics of computation
compMethod = "minVWithVInit"
my_object = my_car
my_shape = Initial_value_f
"""
g = grid(np.array([-5.0, -5.0, -1.0, -math.pi]), np.array([5.0, 5.0, 1.0, math.pi]), 4, np.array([40, 40, 50, 50]), [3])
# Define my object
my_car = DubinsCar4D()
# Use the grid to initialize initial value function
Initial_value_f = CylinderShape(g, [3,4], np.zeros(4), 1)
# Look-back length and time step
lookback_length = 3.0
t_step = 0.05
small_number = 1e-5
tau = np.arange(start = 0, stop = lookback_length + small_number, step = t_step)
print("Welcome to optimized_dp \n")
# Use the following variable to specify the characteristics of computation
compMethod = "none"
my_object = my_car
my_shape = Initial_value_f
"""
g = grid(np.array([3, math.pi/18, 0, -2*math.pi]), np.array([10, math.pi/3, math.pi/3, 2*math.pi]), 4, np.array([50,50,50,50]), [3])
# Define my object
my_car = tailsitter()
#Use the grid to initualize initial value function
Initial_value_f = ShapeRectangle(g, np.array([5.5, math.pi/18, math.pi/18, 0]), np.array([6, math.pi/6, math.pi/4, 0]))
# look-back length and time step
lookback_length = 0.5
t_step = 0.01
small_number = 1e-5
tau = np.arange(start = 0, stop = lookback_length + small_number, step = t_step)
print("Welcome to optimized_dp \n")
# Use the following variable to specify the characteristics of computation
compMethod = "none"
my_object = my_car
my_shape = Initial_value_f
"""
| [
"numpy.array",
"numpy.zeros",
"numpy.arange"
] | [((1468, 1536), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(lookback_length + small_number)', 'step': 't_step'}), '(start=0, stop=lookback_length + small_number, step=t_step)\n', (1477, 1536), True, 'import numpy as np\n'), ((1105, 1143), 'numpy.array', 'np.array', (['[-5.0, -5.0, -1.0, -math.pi]'], {}), '([-5.0, -5.0, -1.0, -math.pi])\n', (1113, 1143), True, 'import numpy as np\n'), ((1145, 1179), 'numpy.array', 'np.array', (['[5.0, 5.0, 1.0, math.pi]'], {}), '([5.0, 5.0, 1.0, math.pi])\n', (1153, 1179), True, 'import numpy as np\n'), ((1184, 1210), 'numpy.array', 'np.array', (['[40, 40, 50, 50]'], {}), '([40, 40, 50, 50])\n', (1192, 1210), True, 'import numpy as np\n'), ((1355, 1366), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1363, 1366), True, 'import numpy as np\n')] |
# Copyright 2020 The OATomobile Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles the hosted CARLA autopilot expert demonstrations dataset."""
import glob
import os
import sys
import zipfile
from typing import Any
from typing import Callable
from typing import Generator
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Union
import carla
import matplotlib.pyplot as plt
import numpy as np
import tqdm
import wget
from absl import logging
from oatomobile.core.dataset import Dataset
from oatomobile.core.dataset import Episode
from oatomobile.util import carla as cutil
from oatomobile.util import graphics as gutil
class CARLADataset(Dataset):
"""The CARLA autopilot expert demonstrations dataset."""
def __init__(
self,
id: str,
) -> None:
"""Constructs a CARLA dataset.
Args:
id: One of {"raw", "examples", "processed"}.
"""
if id not in ("raw", "examples", "processed"):
raise ValueError("Unrecognised CARLA dataset id {}".format(id))
self.id = id
super(CARLADataset, self).__init__()
def _get_uuid(self) -> str:
"""Returns the universal unique identifier of the dataset."""
return "CARLATown01Autopilot{}-v0".format(self.id)
@property
def info(self) -> Mapping[str, Any]:
"""The dataset description."""
return dict(
uuid=self.uuid,
town="Town01",
agent="carsuite_baselines.rulebased.Autopilot",
noise=0.2,
)
@property
def url(self) -> str:
"""The URL where the dataset is hosted."""
return "https://www.cs.ox.ac.uk/people/angelos.filos/data/oatomobile/{}.zip".format(
self.id)
def download_and_prepare(self, output_dir: str) -> None:
"""Downloads and prepares the dataset from the host URL.
Args:
output_dir: The absolute path where the prepared dataset is stored.
"""
# Creates the necessary output directory.
os.makedirs(output_dir, exist_ok=True)
# Temporary zip file to use.
zfname = os.path.join(output_dir, "{}.zip".format(self.id))
# Downloads dataset from Google Drive.
logging.debug("Starts downloading '{}' dataset".format(self.id))
wget.download(
url=self.url,
out=zfname,
)
# Unzips data.
logging.debug("Unzips the data from {}".format(zfname))
with zipfile.ZipFile(zfname) as zfile:
zfile.extractall(output_dir)
# Removes the zip file.
logging.debug("Removes the compressed {}".format(zfname))
os.remove(zfname)
@staticmethod
def load_datum(
fname: str,
modalities: Sequence[str],
mode: bool,
dataformat: str = "HWC",
) -> Mapping[str, np.ndarray]:
"""Loads a single datum from the dataset.
Args:
fname: The absolute path to the ".npz" datum.
modalities: The keys of the attributes to fetch.
mode: If True, it labels its datum with {FORWARD, STOP, LEFT, RIGHT}.
dataformat: The format of the 3D data, one of `{HWC, CHW}`.
Returns:
The datum in a dictionary, `NumPy`-friendly format.
"""
assert dataformat in ("HWC", "CHW")
dtype = np.float32
sample = dict()
with np.load(fname) as datum:
for attr in modalities:
# Fetches the value.
sample[attr] = datum[attr]
# Converts scalars to 1D vectors.
sample[attr] = np.atleast_1d(sample[attr])
# Casts value to same type.
sample[attr] = sample[attr].astype(dtype)
if len(sample[attr].shape) == 3 and dataformat == "CHW":
# Converts from HWC to CHW format.
sample[attr] = np.transpose(sample[attr], (2, 0, 1))
# Appends `mode` attribute where `{0: FORWARD, 1: STOP, 2: TURN}`.
if mode and "player_future" in sample:
plan = sample["player_future"]
x_T, y_T = plan[-1, :2]
# Norm of the vector (x_T, y_T).
norm = np.linalg.norm([x_T, y_T])
# Angle of vector (0, 0) -> (x_T, y_T).
theta = np.degrees(np.arccos(x_T / (norm + 1e-3)))
if norm < 3: # STOP
sample["mode"] = 1
elif theta > 15: # LEFT
sample["mode"] = 2
elif theta <= -15: # RIGHT
sample["mode"] = 3
else: # FORWARD
sample["mode"] = 0
sample["mode"] = np.atleast_1d(sample["mode"])
sample["mode"] = sample["mode"].astype(dtype)
# Records the path to the sample.
sample["name"] = fname
return sample
@staticmethod
def collect(
town: str,
output_dir: str,
num_vehicles: int,
num_pedestrians: int,
num_steps: int = 1000,
spawn_point: Optional[Union[int, carla.Location]] = None, # pylint: disable=no-member
destination: Optional[Union[int, carla.Location]] = None, # pylint: disable=no-member
sensors: Sequence[str] = (
"acceleration",
"velocity",
"lidar",
"is_at_traffic_light",
"traffic_light_state",
"actors_tracker",
),
render: bool = False,
port: int = 2000,
) -> None:
"""Collects autopilot demonstrations for a single episode on CARLA.
Args:
town: The CARLA town id.
output_dir: The full path to the output directory.
num_vehicles: The number of other vehicles in the simulation.
num_pedestrians: The number of pedestrians in the simulation.
num_steps: The number of steps in the simulator.
spawn_point: The hero vehicle spawn point. If an int is
provided then the index of the spawn point is used.
If None, then randomly selects a spawn point every time
from the available spawn points of each map.
destination: The final destination. If an int is
provided then the index of the spawn point is used.
If None, then randomly selects a spawn point every time
from the available spawn points of each map.
sensors: The list of recorded sensors.
render: If True it spawn the `PyGame` display.
"""
from oatomobile.baselines.rulebased.autopilot.agent import AutopilotAgent
from oatomobile.core.loop import EnvironmentLoop
from oatomobile.core.rl import FiniteHorizonWrapper
from oatomobile.core.rl import SaveToDiskWrapper
from oatomobile.envs.carla import CARLAEnv, CARLANavEnv
from oatomobile.envs.carla import TerminateOnCollisionWrapper, CollisionWrapper
# Storage area.
os.makedirs(output_dir, exist_ok=True)
# Initializes a CARLA environment.
env = CARLANavEnv(
town=town,
sensors=sensors,
spawn_point=spawn_point,
destination=destination,
num_vehicles=num_vehicles,
num_pedestrians=num_pedestrians,
port=port
)
# Terminates episode if a collision occurs.
# env = TerminateOnCollisionWrapper(env)
env = CollisionWrapper(env)
# Wraps the environment in an episode handler to store <observation, action> pairs.
env = SaveToDiskWrapper(env=env, output_dir=output_dir)
# Caps environment's duration.
env = FiniteHorizonWrapper(env=env, max_episode_steps=num_steps)
# Run a full episode.
EnvironmentLoop(
agent_fn=AutopilotAgent,
environment=env,
render_mode="human" if render else "none",
).run()
@staticmethod
def process(
dataset_dir: str,
output_dir: str,
future_length: int = 80,
past_length: int = 20,
num_frame_skips: int = 5,
) -> None:
"""Converts a raw dataset to demonstrations for imitation learning.
Args:
dataset_dir: The full path to the raw dataset.
output_dir: The full path to the output directory.
future_length: The length of the future trajectory.
past_length: The length of the past trajectory.
num_frame_skips: The number of frames to skip.
"""
# Creates the necessary output directory.
os.makedirs(output_dir, exist_ok=True)
# Iterate over all episodes.
for episode_token in tqdm.tqdm(os.listdir(dataset_dir)):
logging.debug("Processes {} episode".format(episode_token))
# Initializes episode handler.
episode = Episode(parent_dir=dataset_dir, token=episode_token)
# Fetches all `.npz` files from the raw dataset.
try:
sequence = episode.fetch()
except FileNotFoundError:
continue
# Always keep `past_length+future_length+1` files open.
assert len(sequence) >= past_length + future_length + 1
for i in tqdm.trange(
past_length,
len(sequence) - future_length,
num_frame_skips,
):
try:
# Player context/observation.
observation = episode.read_sample(sample_token=sequence[i])
current_location = observation["location"]
current_rotation = observation["rotation"]
# Build past trajectory.
player_past = list()
for j in range(past_length, 0, -1):
past_location = episode.read_sample(
sample_token=sequence[i - j],
attr="location",
)
player_past.append(past_location)
player_past = np.asarray(player_past)
assert len(player_past.shape) == 2
player_past = cutil.world2local(
current_location=current_location,
current_rotation=current_rotation,
world_locations=player_past,
)
# Build future trajectory.
player_future = list()
for j in range(1, future_length + 1):
future_location = episode.read_sample(
sample_token=sequence[i + j],
attr="location",
)
player_future.append(future_location)
player_future = np.asarray(player_future)
assert len(player_future.shape) == 2
player_future = cutil.world2local(
current_location=current_location,
current_rotation=current_rotation,
world_locations=player_future,
)
# Store to ouput directory.
np.savez_compressed(
os.path.join(output_dir, "{}.npz".format(sequence[i])),
**observation,
player_future=player_future,
player_past=player_past,
)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
sys.exit(0)
@staticmethod
def plot_datum(
fname: str,
output_dir: str,
) -> None:
"""Visualizes a datum from the dataset.
Args:
fname: The absolute path to the datum.
output_dir: The full path to the output directory.
"""
COLORS = [
"#0071bc",
"#d85218",
"#ecb01f",
"#7d2e8d",
"#76ab2f",
"#4cbded",
"#a1132e",
]
# Creates the necessary output directory.
os.makedirs(output_dir, exist_ok=True)
# Load datum.
datum = np.load(fname)
# Draws LIDAR.
if "lidar" in datum:
bev_meters = 25.0
lidar = gutil.lidar_2darray_to_rgb(datum["lidar"])
fig, ax = plt.subplots(figsize=(3.0, 3.0))
ax.imshow(
np.transpose(lidar, (1, 0, 2)),
extent=(-bev_meters, bev_meters, bev_meters, -bev_meters),
)
ax.set(frame_on=False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.savefig(
os.path.join(output_dir, "lidar.png"),
bbox_inches="tight",
pad_inches=0,
transparent=True,
)
# Draws first person camera-view.
if "front_camera_rgb" in datum:
front_camera_rgb = datum["front_camera_rgb"]
fig, ax = plt.subplots(figsize=(3.0, 3.0))
ax.imshow(front_camera_rgb)
ax.set(frame_on=False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.savefig(
os.path.join(output_dir, "front_camera_rgb.png"),
bbox_inches="tight",
pad_inches=0,
transparent=True,
)
# Draws bird-view camera.
if "bird_view_camera_cityscapes" in datum:
bev_meters = 25.0
bird_view_camera_cityscapes = datum["bird_view_camera_cityscapes"]
fig, ax = plt.subplots(figsize=(3.0, 3.0))
ax.imshow(
bird_view_camera_cityscapes,
extent=(-bev_meters, bev_meters, bev_meters, -bev_meters),
)
# Draw past if available.
if "player_past" in datum:
player_past = datum["player_past"]
ax.plot(
player_past[..., 1],
-player_past[..., 0],
marker="x",
markersize=4,
color=COLORS[0],
alpha=0.15,
)
# Draws future if available.
if "player_future" in datum:
player_future = datum["player_future"]
ax.plot(
player_future[..., 1],
-player_future[..., 0],
marker="o",
markersize=4,
color=COLORS[1],
alpha=0.15,
)
# Draws goals if available.
if "goal" in datum:
goal = datum["goal"]
ax.plot(
goal[..., 1],
-goal[..., 0],
marker="D",
markersize=6,
color=COLORS[2],
linestyle="None",
alpha=0.25,
label=r"$\mathcal{G}$",
)
ax.set(frame_on=False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.savefig(
os.path.join(output_dir, "bird_view_camera_cityscapes.png"),
bbox_inches="tight",
pad_inches=0,
transparent=True,
)
# Draws bird-view camera.
if "bird_view_camera_rgb" in datum:
bev_meters = 25.0
bird_view_camera_rgb = datum["bird_view_camera_rgb"]
fig, ax = plt.subplots(figsize=(3.0, 3.0))
ax.imshow(
bird_view_camera_rgb,
extent=(-bev_meters, bev_meters, bev_meters, -bev_meters),
)
# Draw past if available.
if "player_past" in datum:
player_past = datum["player_past"]
ax.plot(
player_past[..., 1],
-player_past[..., 0],
marker="x",
markersize=4,
color=COLORS[0],
alpha=0.15,
)
# Draws future if available.
if "player_future" in datum:
player_future = datum["player_future"]
ax.plot(
player_future[..., 1],
-player_future[..., 0],
marker="o",
markersize=4,
color=COLORS[1],
alpha=0.15,
)
ax.set(frame_on=False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.savefig(
os.path.join(output_dir, "bird_view_camera_rgb.png"),
bbox_inches="tight",
pad_inches=0,
transparent=True,
)
@classmethod
def plot_coverage(
cls,
dataset_dir: str,
output_fname: str,
color: int = 0,
) -> None:
"""Visualizes all the trajectories in the dataset.
Args:
dataset_dir: The parent directory of all the dataset.
output_fname: The full path to the output filename.
color: The index of the color to use for the trajectories.
"""
COLORS = [
"#0071bc",
"#d85218",
"#ecb01f",
"#7d2e8d",
"#76ab2f",
"#4cbded",
"#a1132e",
]
# Fetches all the data points.
data_files = glob.glob(
os.path.join(dataset_dir, "**", "*.npz"),
recursive=True,
)
# Container that stores all locaitons.
locations = list()
for npz_fname in tqdm.tqdm(data_files):
try:
locations.append(
cls.load_datum(
npz_fname,
modalities=["location"],
mode=False,
)["location"])
except Exception as e:
if isinstance(e, KeyboardInterrupt):
sys.exit(0)
locations = np.asarray(locations)
# Scatter plots all locaitons.
fig, ax = plt.subplots(figsize=(3.0, 3.0))
ax.scatter(
locations[..., 0],
locations[..., 1],
s=5,
alpha=0.01,
color=COLORS[color % len(COLORS)],
)
ax.set(title=dataset_dir, frame_on=False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.savefig(
os.path.join(output_fname),
bbox_inches="tight",
pad_inches=0,
transparent=True,
)
@classmethod
def as_tensorflow(
cls,
dataset_dir: str,
modalities: Sequence[str],
mode: bool = False,
) -> "tensorflow.data.Dataset":
"""Implements a data reader and loader for the expert demonstrations.
Args:
dataset_dir: The absolute path to the raw dataset.
modalities: The keys of the attributes to fetch.
mode: If True, it labels its datum with {FORWARD, STOP, LEFT, RIGHT}.
Returns:
The unbatched `TensorFlow` dataset.
"""
import tensorflow as tf
# Fetches all the filenames.
filenames = glob.glob(os.path.join(dataset_dir, "*.npz"))
# Gets shapes of output tensors.
output_shapes = dict()
with np.load(filenames[0]) as datum:
for modality in modalities:
output_shapes[modality] = tf.TensorShape(
np.atleast_1d(datum[modality]).shape)
# Appends "mode" attribute.
if mode:
output_shapes["mode"] = tf.TensorShape((1,))
# Sets all output types to `tf.float32`.
output_types = {modality: tf.float32 for modality in output_shapes.keys()}
return tf.data.Dataset.from_generator(
generator=lambda: (cls.load_datum(
npz_fname,
modalities,
mode,
dataformat="HWC",
) for npz_fname in filenames),
output_types=output_types,
output_shapes=output_shapes,
)
@classmethod
def as_numpy(
cls,
dataset_dir: str,
modalities: Sequence[str],
mode: bool = False,
) -> Generator[Mapping[str, np.ndarray], None, None]:
"""Implements a data reader and loader for the expert demonstrations.
Args:
dataset_dir: The absolute path to the raw dataset.
modalities: The keys of the attributes to fetch.
mode: If True, it labels its datum with {FORWARD, STOP, LEFT, RIGHT}.
Returns:
The unbatched `NumPy` dataset.
"""
import tensorflow_datasets as tfds
return tfds.as_numpy(cls.as_tensorflow(dataset_dir, modalities, mode))
@classmethod
def as_torch(
cls,
dataset_dir: str,
modalities: Sequence[str],
transform: Optional[Callable[[Any], Any]] = None,
mode: bool = False,
only_array: bool = False,
) -> "torch.utils.data.Dataset":
"""Implements a data reader and loader for the expert demonstrations.
Args:
dataset_dir: The absolute path to the raw dataset.
modalities: The keys of the attributes to fetch.
transform: The transformations applied on each datum.
mode: If True, it labels its datum with {FORWARD, STOP, LEFT, RIGHT}.
only_array: If True, it removes all the keys that are non-array, useful
when training a model and want to run `.to(device)` without errors.
Returns:
The unbatched `PyTorch` dataset.
"""
import torch
class PyTorchDataset(torch.utils.data.Dataset):
"""Implementa a data reader for the expert demonstrations."""
def __init__(
self,
dataset_dir: str,
modalities: Sequence[str],
transform: Optional[Callable[[Any], Any]] = None,
mode: bool = False,
) -> None:
"""A simple `PyTorch` dataset.
Args:
dataset_dir: The absolute path to the raw dataset.
modalities: The keys of the attributes to fetch.
mode: If True, it labels its datum with {FORWARD, STOP, LEFT, RIGHT}.
"""
# Internalise hyperparameters.
self._modalities = modalities
self._npz_files = glob.glob(os.path.join(dataset_dir, "*.npz"))
self._transform = transform
self._mode = mode
def __len__(self) -> int:
"""Returns the size of the dataset."""
return len(self._npz_files)
def __getitem__(
self,
idx: int,
) -> Mapping[str, np.ndarray]:
"""Loads a single datum.
Returns:
The datum in `NumPy`-friendly format.
"""
# Loads datum from dataset.
sample = cls.load_datum(
fname=self._npz_files[idx],
modalities=self._modalities,
mode=self._mode,
dataformat="CHW",
)
# Filters out non-array keys.
for key in list(sample):
if not isinstance(sample[key], np.ndarray):
sample.pop(key)
# Applies (optional) transformation to all values.
if self._transform is not None:
sample = {key: self._transform(val) for (key, val) in sample.items()}
return sample
return PyTorchDataset(dataset_dir, modalities, transform, mode)
| [
"os.remove",
"numpy.load",
"numpy.linalg.norm",
"os.path.join",
"tensorflow.TensorShape",
"numpy.transpose",
"oatomobile.envs.carla.CARLANavEnv",
"numpy.arccos",
"matplotlib.pyplot.subplots",
"tqdm.tqdm",
"numpy.asarray",
"wget.download",
"oatomobile.util.graphics.lidar_2darray_to_rgb",
"o... | [((2559, 2597), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (2570, 2597), False, 'import os\n'), ((2812, 2851), 'wget.download', 'wget.download', ([], {'url': 'self.url', 'out': 'zfname'}), '(url=self.url, out=zfname)\n', (2825, 2851), False, 'import wget\n'), ((3126, 3143), 'os.remove', 'os.remove', (['zfname'], {}), '(zfname)\n', (3135, 3143), False, 'import os\n'), ((6996, 7034), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (7007, 7034), False, 'import os\n'), ((7085, 7254), 'oatomobile.envs.carla.CARLANavEnv', 'CARLANavEnv', ([], {'town': 'town', 'sensors': 'sensors', 'spawn_point': 'spawn_point', 'destination': 'destination', 'num_vehicles': 'num_vehicles', 'num_pedestrians': 'num_pedestrians', 'port': 'port'}), '(town=town, sensors=sensors, spawn_point=spawn_point,\n destination=destination, num_vehicles=num_vehicles, num_pedestrians=\n num_pedestrians, port=port)\n', (7096, 7254), False, 'from oatomobile.envs.carla import CARLAEnv, CARLANavEnv\n'), ((7411, 7432), 'oatomobile.envs.carla.CollisionWrapper', 'CollisionWrapper', (['env'], {}), '(env)\n', (7427, 7432), False, 'from oatomobile.envs.carla import TerminateOnCollisionWrapper, CollisionWrapper\n'), ((7531, 7580), 'oatomobile.core.rl.SaveToDiskWrapper', 'SaveToDiskWrapper', ([], {'env': 'env', 'output_dir': 'output_dir'}), '(env=env, output_dir=output_dir)\n', (7548, 7580), False, 'from oatomobile.core.rl import SaveToDiskWrapper\n'), ((7626, 7684), 'oatomobile.core.rl.FiniteHorizonWrapper', 'FiniteHorizonWrapper', ([], {'env': 'env', 'max_episode_steps': 'num_steps'}), '(env=env, max_episode_steps=num_steps)\n', (7646, 7684), False, 'from oatomobile.core.rl import FiniteHorizonWrapper\n'), ((8455, 8493), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (8466, 8493), False, 'import os\n'), ((11420, 11458), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (11431, 11458), False, 'import os\n'), ((11490, 11504), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (11497, 11504), True, 'import numpy as np\n'), ((16190, 16211), 'tqdm.tqdm', 'tqdm.tqdm', (['data_files'], {}), '(data_files)\n', (16199, 16211), False, 'import tqdm\n'), ((16513, 16534), 'numpy.asarray', 'np.asarray', (['locations'], {}), '(locations)\n', (16523, 16534), True, 'import numpy as np\n'), ((16585, 16617), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.0, 3.0)'}), '(figsize=(3.0, 3.0))\n', (16597, 16617), True, 'import matplotlib.pyplot as plt\n'), ((2963, 2986), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zfname'], {}), '(zfname)\n', (2978, 2986), False, 'import zipfile\n'), ((3792, 3806), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (3799, 3806), True, 'import numpy as np\n'), ((4497, 4523), 'numpy.linalg.norm', 'np.linalg.norm', (['[x_T, y_T]'], {}), '([x_T, y_T])\n', (4511, 4523), True, 'import numpy as np\n'), ((4873, 4902), 'numpy.atleast_1d', 'np.atleast_1d', (["sample['mode']"], {}), "(sample['mode'])\n", (4886, 4902), True, 'import numpy as np\n'), ((8563, 8586), 'os.listdir', 'os.listdir', (['dataset_dir'], {}), '(dataset_dir)\n', (8573, 8586), False, 'import os\n'), ((8708, 8760), 'oatomobile.core.dataset.Episode', 'Episode', ([], {'parent_dir': 'dataset_dir', 'token': 'episode_token'}), '(parent_dir=dataset_dir, token=episode_token)\n', (8715, 8760), False, 'from oatomobile.core.dataset import Episode\n'), ((11588, 11630), 'oatomobile.util.graphics.lidar_2darray_to_rgb', 'gutil.lidar_2darray_to_rgb', (["datum['lidar']"], {}), "(datum['lidar'])\n", (11614, 11630), True, 'from oatomobile.util import graphics as gutil\n'), ((11647, 11679), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.0, 3.0)'}), '(figsize=(3.0, 3.0))\n', (11659, 11679), True, 'import matplotlib.pyplot as plt\n'), ((12226, 12258), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.0, 3.0)'}), '(figsize=(3.0, 3.0))\n', (12238, 12258), True, 'import matplotlib.pyplot as plt\n'), ((12763, 12795), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.0, 3.0)'}), '(figsize=(3.0, 3.0))\n', (12775, 12795), True, 'import matplotlib.pyplot as plt\n'), ((14350, 14382), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.0, 3.0)'}), '(figsize=(3.0, 3.0))\n', (14362, 14382), True, 'import matplotlib.pyplot as plt\n'), ((16030, 16070), 'os.path.join', 'os.path.join', (['dataset_dir', '"""**"""', '"""*.npz"""'], {}), "(dataset_dir, '**', '*.npz')\n", (16042, 16070), False, 'import os\n'), ((16917, 16943), 'os.path.join', 'os.path.join', (['output_fname'], {}), '(output_fname)\n', (16929, 16943), False, 'import os\n'), ((17618, 17652), 'os.path.join', 'os.path.join', (['dataset_dir', '"""*.npz"""'], {}), "(dataset_dir, '*.npz')\n", (17630, 17652), False, 'import os\n'), ((17728, 17749), 'numpy.load', 'np.load', (['filenames[0]'], {}), '(filenames[0])\n', (17735, 17749), True, 'import numpy as np\n'), ((17970, 17990), 'tensorflow.TensorShape', 'tf.TensorShape', (['(1,)'], {}), '((1,))\n', (17984, 17990), True, 'import tensorflow as tf\n'), ((3976, 4003), 'numpy.atleast_1d', 'np.atleast_1d', (['sample[attr]'], {}), '(sample[attr])\n', (3989, 4003), True, 'import numpy as np\n'), ((4595, 4626), 'numpy.arccos', 'np.arccos', (['(x_T / (norm + 0.001))'], {}), '(x_T / (norm + 0.001))\n', (4604, 4626), True, 'import numpy as np\n'), ((7716, 7821), 'oatomobile.core.loop.EnvironmentLoop', 'EnvironmentLoop', ([], {'agent_fn': 'AutopilotAgent', 'environment': 'env', 'render_mode': "('human' if render else 'none')"}), "(agent_fn=AutopilotAgent, environment=env, render_mode=\n 'human' if render else 'none')\n", (7731, 7821), False, 'from oatomobile.core.loop import EnvironmentLoop\n'), ((11707, 11737), 'numpy.transpose', 'np.transpose', (['lidar', '(1, 0, 2)'], {}), '(lidar, (1, 0, 2))\n', (11719, 11737), True, 'import numpy as np\n'), ((11954, 11991), 'os.path.join', 'os.path.join', (['output_dir', '"""lidar.png"""'], {}), "(output_dir, 'lidar.png')\n", (11966, 11991), False, 'import os\n'), ((12431, 12479), 'os.path.join', 'os.path.join', (['output_dir', '"""front_camera_rgb.png"""'], {}), "(output_dir, 'front_camera_rgb.png')\n", (12443, 12479), False, 'import os\n'), ((14028, 14087), 'os.path.join', 'os.path.join', (['output_dir', '"""bird_view_camera_cityscapes.png"""'], {}), "(output_dir, 'bird_view_camera_cityscapes.png')\n", (14040, 14087), False, 'import os\n'), ((15270, 15322), 'os.path.join', 'os.path.join', (['output_dir', '"""bird_view_camera_rgb.png"""'], {}), "(output_dir, 'bird_view_camera_rgb.png')\n", (15282, 15322), False, 'import os\n'), ((4225, 4262), 'numpy.transpose', 'np.transpose', (['sample[attr]', '(2, 0, 1)'], {}), '(sample[attr], (2, 0, 1))\n', (4237, 4262), True, 'import numpy as np\n'), ((9718, 9741), 'numpy.asarray', 'np.asarray', (['player_past'], {}), '(player_past)\n', (9728, 9741), True, 'import numpy as np\n'), ((9811, 9932), 'oatomobile.util.carla.world2local', 'cutil.world2local', ([], {'current_location': 'current_location', 'current_rotation': 'current_rotation', 'world_locations': 'player_past'}), '(current_location=current_location, current_rotation=\n current_rotation, world_locations=player_past)\n', (9828, 9932), True, 'from oatomobile.util import carla as cutil\n'), ((10322, 10347), 'numpy.asarray', 'np.asarray', (['player_future'], {}), '(player_future)\n', (10332, 10347), True, 'import numpy as np\n'), ((10421, 10544), 'oatomobile.util.carla.world2local', 'cutil.world2local', ([], {'current_location': 'current_location', 'current_rotation': 'current_rotation', 'world_locations': 'player_future'}), '(current_location=current_location, current_rotation=\n current_rotation, world_locations=player_future)\n', (10438, 10544), True, 'from oatomobile.util import carla as cutil\n'), ((20567, 20601), 'os.path.join', 'os.path.join', (['dataset_dir', '"""*.npz"""'], {}), "(dataset_dir, '*.npz')\n", (20579, 20601), False, 'import os\n'), ((16485, 16496), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (16493, 16496), False, 'import sys\n'), ((17856, 17886), 'numpy.atleast_1d', 'np.atleast_1d', (['datum[modality]'], {}), '(datum[modality])\n', (17869, 17886), True, 'import numpy as np\n'), ((10949, 10960), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (10957, 10960), False, 'import sys\n')] |
__author__ = "ajshajib", "sibirrer"
"""
Multi-Gaussian expansion fitting, based on Capellari 2002, http://adsabs.harvard.edu/abs/2002MNRAS.333..400C
"""
import numpy as np
from scipy.optimize import nnls
import warnings
from lenstronomy.LightModel.Profiles.gaussian import Gaussian
gaussian_func = Gaussian()
def gaussian(R, sigma, amp):
"""
:param R: radius
:param sigma: gaussian sigma
:param amp: normalization
:return: Gaussian function
"""
c = amp / (2 * np.pi * sigma**2)
return c * np.exp(-(R/float(sigma))**2/2.)
def mge_1d(r_array, flux_r, N=20, linspace=False):
"""
:param r_array: list or radii (numpy array)
:param flux_r: list of flux values (numpy array)
:param N: number of Gaussians
:return: amplitudes and Gaussian sigmas for the best 1d flux profile
"""
if N == 0:
warnings.warn('Number of MGE went down to zero! This should not happen!', Warning)
amplitudes = [0]
sigmas = [1]
norm = 0
return amplitudes, sigmas, norm
try:
amplitudes, sigmas, norm = _mge_1d(r_array, flux_r, N, linspace=linspace)
except:
N_new = N - 1
amplitudes, sigmas, norm = mge_1d(r_array, flux_r, N=N_new, linspace=linspace)
return amplitudes, sigmas, norm
def _mge_1d(r_array, flux_r, N=20, linspace=False):
"""
:param r_array:
:param flux_r:
:param N:
:return:
"""
if linspace is True:
sigmas = np.linspace(r_array[0], r_array[-1] / 2, N + 2)[1:-1]
else:
sigmas = np.logspace(np.log10(r_array[0]), np.log10((r_array[-1] + 0.0000001) / 2.), N + 2)[1:-1]
# sigmas = np.linspace(r_array[0], r_array[-1]/2, N + 2)[1:-1]
A = np.zeros((len(flux_r), N))
for j in np.arange(A.shape[1]):
A[:, j] = gaussian(r_array, sigmas[j], 1.)
amplitudes, norm = nnls(A, flux_r)
return amplitudes, sigmas, norm
def de_projection_3d(amplitudes, sigmas):
"""
de-projects a gaussian (or list of multiple Gaussians from a 2d projected to a 3d profile)
:param amplitudes:
:param sigmas:
:return:
"""
amplitudes_3d = amplitudes / sigmas / np.sqrt(2*np.pi)
return amplitudes_3d, sigmas | [
"scipy.optimize.nnls",
"numpy.arange",
"numpy.linspace",
"warnings.warn",
"numpy.log10",
"lenstronomy.LightModel.Profiles.gaussian.Gaussian",
"numpy.sqrt"
] | [((299, 309), 'lenstronomy.LightModel.Profiles.gaussian.Gaussian', 'Gaussian', ([], {}), '()\n', (307, 309), False, 'from lenstronomy.LightModel.Profiles.gaussian import Gaussian\n'), ((1757, 1778), 'numpy.arange', 'np.arange', (['A.shape[1]'], {}), '(A.shape[1])\n', (1766, 1778), True, 'import numpy as np\n'), ((1854, 1869), 'scipy.optimize.nnls', 'nnls', (['A', 'flux_r'], {}), '(A, flux_r)\n', (1858, 1869), False, 'from scipy.optimize import nnls\n'), ((858, 944), 'warnings.warn', 'warnings.warn', (['"""Number of MGE went down to zero! This should not happen!"""', 'Warning'], {}), "('Number of MGE went down to zero! This should not happen!',\n Warning)\n", (871, 944), False, 'import warnings\n'), ((2158, 2176), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2165, 2176), True, 'import numpy as np\n'), ((1471, 1518), 'numpy.linspace', 'np.linspace', (['r_array[0]', '(r_array[-1] / 2)', '(N + 2)'], {}), '(r_array[0], r_array[-1] / 2, N + 2)\n', (1482, 1518), True, 'import numpy as np\n'), ((1564, 1584), 'numpy.log10', 'np.log10', (['r_array[0]'], {}), '(r_array[0])\n', (1572, 1584), True, 'import numpy as np\n'), ((1586, 1623), 'numpy.log10', 'np.log10', (['((r_array[-1] + 1e-07) / 2.0)'], {}), '((r_array[-1] + 1e-07) / 2.0)\n', (1594, 1623), True, 'import numpy as np\n')] |
from pathlib import Path
import torch
import glob
import numpy as np
from lib.pc_utils import read_plyfile, save_point_cloud
from concurrent.futures import ProcessPoolExecutor
SCANNET_DATA_RAW_PATH = Path('/home/aidrive1/workspace/luoly/dataset/scn/scan_dataset/')
SCANNET_LABEL_RAW_PATH = Path('/home/aidrive1/workspace/luoly/dataset/scn/scan_label/')
SCANNET_OUT_PATH = Path('/home/aidrive1/workspace/luoly/dataset/Min_scan/eff_100/')
TRAIN_DEST = 'train'
TEST_DEST = 'test'
SUBSETS = {TRAIN_DEST: 'scans', TEST_DEST: 'scans_test'}
POINTCLOUD_FILE = '_vh_clean_2.ply'
BUGS = {
'train/scene0270_00.ply': 50,
'train/scene0270_02.ply': 50,
'train/scene0384_00.ply': 149,
}
print('start preprocess')
# Preprocess data.
t = torch.load('points100')
key = sorted(t.keys())
def handle_process(path):
f = Path(path.split(',')[0])
phase_out_path = Path(path.split(',')[1])
pointcloud = read_plyfile(f)
# Make sure alpha value is meaningless.
assert np.unique(pointcloud[:, -1]).size == 1
# Load label file.
label_f = f.parent.parent.parent.parent / ('scan_label') / (f.parent.parent.stem + '/' + f.parent.stem+ '/' + f.stem + '.labels' + f.suffix)
if label_f.is_file():
label = read_plyfile(label_f)
# Sanity check that the pointcloud and its label has same vertices.
assert pointcloud.shape[0] == label.shape[0]
assert np.allclose(pointcloud[:, :3], label[:, :3])
else: # Label may not exist in test case.
label = np.zeros_like(pointcloud)
out_f = phase_out_path / (f.name[:-len(POINTCLOUD_FILE)] + f.suffix)
w = np.array([label[:, -1]]).T
k = np.ones(len(w))*(255)
label_str = str(f)
print(label_str[-27:-15])
if label_str[-27:-15] in key:
for c in range(len(w)):
if c in t[label_str[-27:-15]]:
k[c] = w[c]
else:
k[c] = 255
label[:, -1] = k
processed = np.hstack((pointcloud[:, :6], np.array([label[:, -1]]).T))
save_point_cloud(processed, out_f, with_label=True, verbose=False)
path_list = []
for out_path, in_path in SUBSETS.items():
phase_out_path = SCANNET_OUT_PATH / out_path
phase_out_path.mkdir(parents=True, exist_ok=True)
for f in (SCANNET_DATA_RAW_PATH / in_path).glob('*/*' + POINTCLOUD_FILE):
path_list.append(str(f) + ',' + str(phase_out_path))
#path_list = sorted(path_list)
pool = ProcessPoolExecutor(max_workers=20)
result = list(pool.map(handle_process, path_list))
# Fix bug in the data.
for files, bug_index in BUGS.items():
print(files)
for f in SCANNET_OUT_PATH.glob(files):
pointcloud = read_plyfile(f)
bug_mask = pointcloud[:, -1] == bug_index
print(f'Fixing {f} bugged label {bug_index} x {bug_mask.sum()}')
pointcloud[bug_mask, -1] = 0
save_point_cloud(pointcloud, f, with_label=True, verbose=False)
| [
"numpy.zeros_like",
"lib.pc_utils.read_plyfile",
"concurrent.futures.ProcessPoolExecutor",
"torch.load",
"lib.pc_utils.save_point_cloud",
"numpy.allclose",
"pathlib.Path",
"numpy.array",
"numpy.unique"
] | [((200, 264), 'pathlib.Path', 'Path', (['"""/home/aidrive1/workspace/luoly/dataset/scn/scan_dataset/"""'], {}), "('/home/aidrive1/workspace/luoly/dataset/scn/scan_dataset/')\n", (204, 264), False, 'from pathlib import Path\n'), ((290, 352), 'pathlib.Path', 'Path', (['"""/home/aidrive1/workspace/luoly/dataset/scn/scan_label/"""'], {}), "('/home/aidrive1/workspace/luoly/dataset/scn/scan_label/')\n", (294, 352), False, 'from pathlib import Path\n'), ((372, 436), 'pathlib.Path', 'Path', (['"""/home/aidrive1/workspace/luoly/dataset/Min_scan/eff_100/"""'], {}), "('/home/aidrive1/workspace/luoly/dataset/Min_scan/eff_100/')\n", (376, 436), False, 'from pathlib import Path\n'), ((733, 756), 'torch.load', 'torch.load', (['"""points100"""'], {}), "('points100')\n", (743, 756), False, 'import torch\n'), ((2323, 2358), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': '(20)'}), '(max_workers=20)\n', (2342, 2358), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((897, 912), 'lib.pc_utils.read_plyfile', 'read_plyfile', (['f'], {}), '(f)\n', (909, 912), False, 'from lib.pc_utils import read_plyfile, save_point_cloud\n'), ((1927, 1993), 'lib.pc_utils.save_point_cloud', 'save_point_cloud', (['processed', 'out_f'], {'with_label': '(True)', 'verbose': '(False)'}), '(processed, out_f, with_label=True, verbose=False)\n', (1943, 1993), False, 'from lib.pc_utils import read_plyfile, save_point_cloud\n'), ((1203, 1224), 'lib.pc_utils.read_plyfile', 'read_plyfile', (['label_f'], {}), '(label_f)\n', (1215, 1224), False, 'from lib.pc_utils import read_plyfile, save_point_cloud\n'), ((1357, 1401), 'numpy.allclose', 'np.allclose', (['pointcloud[:, :3]', 'label[:, :3]'], {}), '(pointcloud[:, :3], label[:, :3])\n', (1368, 1401), True, 'import numpy as np\n'), ((1459, 1484), 'numpy.zeros_like', 'np.zeros_like', (['pointcloud'], {}), '(pointcloud)\n', (1472, 1484), True, 'import numpy as np\n'), ((1562, 1586), 'numpy.array', 'np.array', (['[label[:, -1]]'], {}), '([label[:, -1]])\n', (1570, 1586), True, 'import numpy as np\n'), ((2546, 2561), 'lib.pc_utils.read_plyfile', 'read_plyfile', (['f'], {}), '(f)\n', (2558, 2561), False, 'from lib.pc_utils import read_plyfile, save_point_cloud\n'), ((2714, 2777), 'lib.pc_utils.save_point_cloud', 'save_point_cloud', (['pointcloud', 'f'], {'with_label': '(True)', 'verbose': '(False)'}), '(pointcloud, f, with_label=True, verbose=False)\n', (2730, 2777), False, 'from lib.pc_utils import read_plyfile, save_point_cloud\n'), ((964, 992), 'numpy.unique', 'np.unique', (['pointcloud[:, -1]'], {}), '(pointcloud[:, -1])\n', (973, 992), True, 'import numpy as np\n'), ((1896, 1920), 'numpy.array', 'np.array', (['[label[:, -1]]'], {}), '([label[:, -1]])\n', (1904, 1920), True, 'import numpy as np\n')] |
import time
import numpy as np
import pandas as pd
"""Function to take user input
1 user input is taken :
1. input_images : As set of 3 input image nodes taken from the user for PPR Algorithm
"""
def userInput():
input_nodes = []
print('Please enter input image IDs :')
for _ in range(0, 3):
input_nodes.append(input())
return input_nodes
"""
Main function to calculate the Page Rank of all the nodes of the graph.
This function takes 4 optional input parameters.
1. graph - Mandatory parameter of type DataFrame which takes an adjacency matrix of the graph.
2. input_nodes - Optional parameter which accepts a list of input graph nodes. If input nodes are not provided, it is taken as input from user.
3. beta - Damping factor. Default Value : 0.85
4. epsilon : Error Threshold of Page Rank scores. Default Value : 0.0000001
"""
def pageRank(graph, input_nodes=None, beta=0.85, epsilon=0.0000001):
# Intialize Matrix M used in PageRank calculation
M = graph / np.sum(graph, axis=0)
# Initializing Teleportation matrix and Page Rank Scores with Zeros for all graph nodes
nodes = len(graph)
teleportation_matrix = np.zeros(nodes)
pageRankScores = np.zeros(nodes)
# Takes user input if input_nodes are not provided
if input_nodes is None:
input_nodes = userInput()
# Updating Teleportation and Page Rank Score Matrices with 1/num_of_input_nodes for the input nodes.
for node_id in input_nodes:
teleportation_matrix[int(node_id)] = 1 / len(input_nodes)
pageRankScores[int(node_id)] = 1 / len(input_nodes)
print('Calculating Personalized PageRank Scores with a Damping Factor of ' + str(beta) + '...')
# Calculating Page Rank Scores
while True:
oldPageRankScores = pageRankScores
pageRankScores = (beta * np.dot(M, pageRankScores)) + ((1 - beta) * teleportation_matrix)
if np.linalg.norm(pageRankScores - oldPageRankScores) < epsilon:
break
# Normalizing Page Rank Scores
pageRankScores = pageRankScores / sum(pageRankScores)
return pageRankScores
if __name__ == '__main__':
start_time = time.time()
file_name = 'NodeGraph.csv'
adjacency_matrix = pd.DataFrame(pd.read_csv(file_name, index_col=0).values)
pageRankScores = pageRank(graph=adjacency_matrix, beta=0.5)
print(pageRankScores)
end_time = time.time()
print('Total Time : ', end_time - start_time)
| [
"numpy.sum",
"pandas.read_csv",
"numpy.zeros",
"time.time",
"numpy.linalg.norm",
"numpy.dot"
] | [((1189, 1204), 'numpy.zeros', 'np.zeros', (['nodes'], {}), '(nodes)\n', (1197, 1204), True, 'import numpy as np\n'), ((1226, 1241), 'numpy.zeros', 'np.zeros', (['nodes'], {}), '(nodes)\n', (1234, 1241), True, 'import numpy as np\n'), ((2176, 2187), 'time.time', 'time.time', ([], {}), '()\n', (2185, 2187), False, 'import time\n'), ((2409, 2420), 'time.time', 'time.time', ([], {}), '()\n', (2418, 2420), False, 'import time\n'), ((1024, 1045), 'numpy.sum', 'np.sum', (['graph'], {'axis': '(0)'}), '(graph, axis=0)\n', (1030, 1045), True, 'import numpy as np\n'), ((1929, 1979), 'numpy.linalg.norm', 'np.linalg.norm', (['(pageRankScores - oldPageRankScores)'], {}), '(pageRankScores - oldPageRankScores)\n', (1943, 1979), True, 'import numpy as np\n'), ((2257, 2292), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'index_col': '(0)'}), '(file_name, index_col=0)\n', (2268, 2292), True, 'import pandas as pd\n'), ((1853, 1878), 'numpy.dot', 'np.dot', (['M', 'pageRankScores'], {}), '(M, pageRankScores)\n', (1859, 1878), True, 'import numpy as np\n')] |
# Script to produce plots showing path taken by various optimisers
# from:
#
# <NAME>. 2012. Retreival of Forest Structure and Biomass From Radar Data using
# Backscatter Modelling and Inversion. PhD Thesis. Aberystwyth University.
#
# Using the Rosenbrock function:
# http://en.wikipedia.org/wiki/Rosenbrock_function
#
# Plot based on code from wikipedia article.
#
# <NAME> (<EMAIL>)
#
from scipy.optimize import *
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import Normalize
from matplotlib.path import Path
import matplotlib.patches as patches
import numpy as np
optimiserList = ['neldermead', 'cg', 'bfgs', 'powell']
for optimiser in optimiserList:
x0 = [-1, -1]
if optimiser == 'neldermead':
(xopt, fopt, iter, funcalls, warnflag, allvecs) = fmin(rosen, x0, xtol=1e-8, full_output=1, retall=1)
elif optimiser == 'cg':
(xopt, fopt, iter, funcalls, warnflag, allvecs) = fmin_cg(rosen, x0, fprime=rosen_der, full_output=1, retall=1)
elif optimiser == 'bfgs':
(xopt, fopt, gopt, Hopt, func_calls, grad_calls, warnflag, allvecs) = fmin_bfgs(rosen, x0, fprime=rosen_der, full_output=1, retall=1)
elif (optimiser == 'powell'):
(xopt, fopt, direc, iter, funcalls, warnflag, allvecs) = fmin_powell(rosen, x0, xtol=1e-8, full_output=1, retall=1)
allvecs = np.array(allvecs)
x0 = np.arange(-2.1,2.1,0.1)
y0 = np.ones(x0.size)
x = []
y = []
z = []
for i in range(x0.size):
x.append(x0)
for j in range(x0.size):
y.append(y0 * x0[j])
x = np.array(x)
y = np.array(y)
path1 = []
moveCmd = []
first = True
for val in allvecs:
if first:
moveCmd.append(Path.MOVETO)
first = False
else:
moveCmd.append(Path.LINETO)
path1.append((val[0], val[1]))
for i in range (x0.size):
z0 = []
for j in range(y0.size):
val = rosen([x0[j], x0[i]])
z0.append(val)
z.append(z0)
z = np.array(z)
contours = [0,1,2,3,4,5,10,20,30,40,50,100,500,1000,2000]
contoursLine = [10,20,30,40,50,100,500,1000,2000]
fig = plt.figure()
ax = fig.add_subplot(111)
CS = plt.contour(x0,x0,z,contoursLine,linewidths=0.5,colors='k')
CS = plt.contourf(x0,x0,z,contours, norm=Normalize(vmin=0, vmax=100, clip=True), col=cm.jet)
plt.colorbar() # draw colorbar
# plot data points.
plt.scatter(allvecs[:,0],allvecs[:,1],marker='o',c='black',s=10)
path = Path(path1, moveCmd)
patch = patches.PathPatch(path, facecolor='none', lw=2)
ax.add_patch(patch)
plt.xlim(-2,2)
plt.ylim(-2,2)
# if optimiser != 'anneal':
# plt.annotate('Minima', xy=(1, 1))
plt.xlabel("x")
plt.ylabel("y")
#plt.legend(('(1.5, 1.5)','(2, 2)','(-1.5, -1.5)'),loc='upper left')
#plt.show()
if optimiser == 'neldermead':
plt.savefig('neldermead.pdf', format='pdf')
elif optimiser == 'cg':
plt.savefig('conjugategradient.pdf', format='pdf')
elif optimiser == 'bfgs':
plt.savefig('bfgs.pdf', format='pdf')
elif (optimiser == 'powell'):
plt.savefig('powell.pdf', format='pdf')
| [
"matplotlib.pyplot.xlim",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylabel",
"numpy.ones",
"matplotlib.pyplot.colorbar",
"matplotlib.path.Path",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.contour"... | [((1355, 1372), 'numpy.array', 'np.array', (['allvecs'], {}), '(allvecs)\n', (1363, 1372), True, 'import numpy as np\n'), ((1387, 1412), 'numpy.arange', 'np.arange', (['(-2.1)', '(2.1)', '(0.1)'], {}), '(-2.1, 2.1, 0.1)\n', (1396, 1412), True, 'import numpy as np\n'), ((1420, 1436), 'numpy.ones', 'np.ones', (['x0.size'], {}), '(x0.size)\n', (1427, 1436), True, 'import numpy as np\n'), ((1605, 1616), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1613, 1616), True, 'import numpy as np\n'), ((1625, 1636), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1633, 1636), True, 'import numpy as np\n'), ((2087, 2098), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (2095, 2098), True, 'import numpy as np\n'), ((2230, 2242), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2240, 2242), True, 'import matplotlib.pyplot as plt\n'), ((2282, 2346), 'matplotlib.pyplot.contour', 'plt.contour', (['x0', 'x0', 'z', 'contoursLine'], {'linewidths': '(0.5)', 'colors': '"""k"""'}), "(x0, x0, z, contoursLine, linewidths=0.5, colors='k')\n", (2293, 2346), True, 'import matplotlib.pyplot as plt\n'), ((2443, 2457), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2455, 2457), True, 'import matplotlib.pyplot as plt\n'), ((2502, 2572), 'matplotlib.pyplot.scatter', 'plt.scatter', (['allvecs[:, 0]', 'allvecs[:, 1]'], {'marker': '"""o"""', 'c': '"""black"""', 's': '(10)'}), "(allvecs[:, 0], allvecs[:, 1], marker='o', c='black', s=10)\n", (2513, 2572), True, 'import matplotlib.pyplot as plt\n'), ((2583, 2603), 'matplotlib.path.Path', 'Path', (['path1', 'moveCmd'], {}), '(path1, moveCmd)\n', (2587, 2603), False, 'from matplotlib.path import Path\n'), ((2616, 2663), 'matplotlib.patches.PathPatch', 'patches.PathPatch', (['path'], {'facecolor': '"""none"""', 'lw': '(2)'}), "(path, facecolor='none', lw=2)\n", (2633, 2663), True, 'import matplotlib.patches as patches\n'), ((2697, 2712), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2)', '(2)'], {}), '(-2, 2)\n', (2705, 2712), True, 'import matplotlib.pyplot as plt\n'), ((2716, 2731), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2)', '(2)'], {}), '(-2, 2)\n', (2724, 2731), True, 'import matplotlib.pyplot as plt\n'), ((2809, 2824), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2819, 2824), True, 'import matplotlib.pyplot as plt\n'), ((2829, 2844), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2839, 2844), True, 'import matplotlib.pyplot as plt\n'), ((2976, 3019), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""neldermead.pdf"""'], {'format': '"""pdf"""'}), "('neldermead.pdf', format='pdf')\n", (2987, 3019), True, 'import matplotlib.pyplot as plt\n'), ((2387, 2425), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(0)', 'vmax': '(100)', 'clip': '(True)'}), '(vmin=0, vmax=100, clip=True)\n', (2396, 2425), False, 'from matplotlib.colors import Normalize\n'), ((3056, 3106), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""conjugategradient.pdf"""'], {'format': '"""pdf"""'}), "('conjugategradient.pdf', format='pdf')\n", (3067, 3106), True, 'import matplotlib.pyplot as plt\n'), ((3145, 3182), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bfgs.pdf"""'], {'format': '"""pdf"""'}), "('bfgs.pdf', format='pdf')\n", (3156, 3182), True, 'import matplotlib.pyplot as plt\n'), ((3225, 3264), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""powell.pdf"""'], {'format': '"""pdf"""'}), "('powell.pdf', format='pdf')\n", (3236, 3264), True, 'import matplotlib.pyplot as plt\n')] |
import os
from astropy.io import fits
from os.path import split, splitext
from random import randint
from glob import glob
import numpy as np
import torch
from scipy.ndimage import rotate
from torch.utils.data import Dataset
from torchvision.transforms import Compose, Lambda, ToTensor, Normalize, Pad
from PIL import Image
class CustomDataset(Dataset):
def __init__(self, opt):
super(CustomDataset, self).__init__()
self.opt = opt
dataset_dir = os.path.join('./datasets', opt.dataset_name)
self.input_format = opt.data_format_input
self.target_format = opt.data_format_target
if opt.is_train:
self.label_path_list = sorted(glob(os.path.join(dataset_dir, 'Train', 'Input', '*.' + self.input_format)))
self.target_path_list = sorted(glob(os.path.join(dataset_dir, 'Train', 'Target', '*.' + self.target_format)))
else:
self.label_path_list = sorted(glob(os.path.join(dataset_dir, 'Test', 'Input', '*.' + self.input_format)))
self.target_path_list = sorted(glob(os.path.join(dataset_dir, 'Test', 'Target', '*.' + self.target_format)))
def __getitem__(self, index):
list_transforms = []
list_transforms += []
if self.opt.is_train:
self.angle = randint(-self.opt.max_rotation_angle, self.opt.max_rotation_angle)
self.offset_x = randint(0, 2 * self.opt.padding_size - 1) if self.opt.padding_size > 0 else 0
self.offset_y = randint(0, 2 * self.opt.padding_size - 1) if self.opt.padding_size > 0 else 0
if self.input_format in ["fits", "fts", "npy"]:
if self.input_format in ["fits", "fts"]:
label_array = np.array(fits.open(self.label_path_list[index]))
else:
label_array = np.load(self.label_path_list[index], allow_pickle=True)
label_array = self.__rotate(label_array)
label_array = self.__pad(label_array, self.opt.padding_size)
label_array = self.__random_crop(label_array)
label_array = self.__convert_range(label_array,
min=-self.opt.dynamic_range_input,
max=2 * self.opt.dynamic_range_input)
label_tensor = torch.tensor(label_array)
label_tensor -= 0.5
label_tensor = label_tensor / 0.5
if len(label_tensor.shape) == 2: # if the label tensor has only HxW dimension.
label_tensor = label_tensor.unsqueeze(dim=0)
# label_tensor = Normalize(mean=[0.5], std=[0.5])(label_tensor)
elif self.input_format in ["png", "PNG", "jpeg", "JPEG", "jpg", "JPG"]:
transforms = Compose([Lambda(lambda x: self.__to_numpy(x)),
Lambda(lambda x: self.__rotate(x)),
Lambda(lambda x: self.__pad(x, self.opt.padding_size)),
Lambda(lambda x: self.__random_crop(x)),
Lambda(lambda x: self.__convert_range(x, min=0, max=255)),
ToTensor(),
Normalize(mean=[0.5], std=[0.5])])
label_array = Image.open(self.label_path_list[index])
label_tensor = transforms(label_array)
else:
NotImplementedError("Please check data_format_input option. It has to be npy or png.")
if self.target_format in ["fits", "fts", "npy"]:
if self.target_format in ["fits", "fts"]:
target_array = np.array(fits.open(self.target_path_list[index]))
else:
target_array = np.load(self.target_path_list[index], allow_pickle=True)
target_array = self.__rotate(target_array)
target_array = self.__pad(target_array, self.opt.padding_size)
target_array = self.__random_crop(target_array)
target_array = self.__convert_range(target_array,
min=-self.opt.dynamic_range_target,
max=2 * self.opt.dynamic_range_target)
target_tensor = torch.tensor(target_array, dtype=torch.float32)
target_tensor -= 0.5
target_tensor = target_tensor / 0.5
target_tensor = target_tensor.unsqueeze(dim=0) # Add channel dimension.
# target_tensor = Normalize(mean=[0.5], std=[0.5])(target_tensor)
elif self.target_format in ["png", "PNG", "jpeg", "JPEG", "jpg", "JPG"]:
transforms = Compose([Lambda(lambda x: self.__to_numpy(x)),
Lambda(lambda x: self.__rotate(x)),
Lambda(lambda x: self.__pad(x, self.opt.padding_size)),
Lambda(lambda x: self.__random_crop(x)),
Lambda(lambda x: self.__convert_range(x, min=0.0, max=255.0)),
ToTensor(),
Normalize(mean=[0.5], std=[0.5])])
target_array = Image.open(self.target_path_list[index])
target_tensor = transforms(target_array)
else:
NotImplementedError("Please check data_format_target option. It has to be fits, fit, npy, jpeg, jpg or png.")
else:
if self.input_format in ["fits", "fts", "npy"]:
if self.input_format in ["fits", "fts"]:
label_array = np.array(fits.open(self.label_path_list[index]))
else:
label_array = np.load(self.label_path_list[index], allow_pickle=True)
label_array = self.__convert_range(label_array,
min=-self.opt.dynamic_range_input,
max=2 * self.opt.dynamic_range_input)
label_tensor = torch.tensor(label_array)
label_tensor -= 0.5
label_tensor = label_tensor / 0.5
label_tensor = label_tensor.unsqueeze(dim=0)
# label_tensor = Normalize(mean=[0.5], std=[0.5])(label_tensor)
elif self.input_format in ["png", "PNG", "jpeg", "JPEG", "jpg", "JPG"]:
transforms = Compose([Lambda(lambda x: self.__to_numpy(x)),
Lambda(lambda x: self.__convert_range(x, min=0, max=255)),
ToTensor(),
Normalize(mean=[0.5], std=[0.5])])
label_array = Image.open(self.label_path_list[index])
label_tensor = transforms(label_array)
else:
NotImplementedError("Please check data_format option. It has to be npy or png.")
if self.target_format in ["fits", "fts", "npy"]:
if self.target_format in ["fits", "fts"]:
target_array = np.array(fits.open(self.target_path_list[index]))
else:
target_array = np.load(self.target_path_list[index], allow_pickle=True)
target_array = self.__convert_range(target_array,
min=-self.opt.dynamic_range_target,
max=2 * self.opt.dynamic_range_target)
target_tensor = torch.tensor(target_array, dtype=torch.float32)
target_tensor -= 0.5
target_tensor = target_tensor / 0.5
target_tensor = target_tensor.unsqueeze(dim=0) # Add channel dimension.
# target_tensor = Normalize(mean=[0.5], std=[0.5])(target_tensor)
elif self.target_format in ["png", "PNG", "jpeg", "JPEG", "jpg", "JPG"]:
transforms = Compose([Lambda(lambda x: self.__to_numpy(x)),
Lambda(lambda x: self.__convert_range(x, min=0, max=255)),
ToTensor(),
Normalize(mean=[0.5], std=[0.5])])
target_array = Image.open(self.target_path_list[index])
target_tensor = transforms(target_array)
else:
NotImplementedError("Please check data_format option. It has to be fits, fit, npy, jpeg, jpg or png.")
return label_tensor, target_tensor, splitext(split(self.label_path_list[index])[-1])[0], \
splitext(split(self.target_path_list[index])[-1])[0]
def __random_crop(self, x):
x = np.array(x)
x = x[self.offset_x: self.offset_x + 1024, self.offset_y: self.offset_y + 1024]
return x
@staticmethod
def __convert_range(x, min, max):
x -= min
x = x / max
return x
@staticmethod
def __pad(x, padding_size):
if type(padding_size) == int:
padding_size = ((padding_size, padding_size), (padding_size, padding_size))
return np.pad(x, pad_width=padding_size, mode="constant", constant_values=0)
def __rotate(self, x):
return rotate(x, self.angle, reshape=False)
@staticmethod
def __to_numpy(x):
return np.array(x, dtype=np.float32)
def __len__(self):
return len(self.label_path_list)
| [
"numpy.pad",
"numpy.load",
"random.randint",
"PIL.Image.open",
"numpy.array",
"astropy.io.fits.open",
"torchvision.transforms.Normalize",
"os.path.split",
"os.path.join",
"scipy.ndimage.rotate",
"torch.tensor",
"torchvision.transforms.ToTensor"
] | [((476, 520), 'os.path.join', 'os.path.join', (['"""./datasets"""', 'opt.dataset_name'], {}), "('./datasets', opt.dataset_name)\n", (488, 520), False, 'import os\n'), ((8843, 8854), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (8851, 8854), True, 'import numpy as np\n'), ((9263, 9332), 'numpy.pad', 'np.pad', (['x'], {'pad_width': 'padding_size', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(x, pad_width=padding_size, mode='constant', constant_values=0)\n", (9269, 9332), True, 'import numpy as np\n'), ((9376, 9412), 'scipy.ndimage.rotate', 'rotate', (['x', 'self.angle'], {'reshape': '(False)'}), '(x, self.angle, reshape=False)\n', (9382, 9412), False, 'from scipy.ndimage import rotate\n'), ((9470, 9499), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (9478, 9499), True, 'import numpy as np\n'), ((1294, 1360), 'random.randint', 'randint', (['(-self.opt.max_rotation_angle)', 'self.opt.max_rotation_angle'], {}), '(-self.opt.max_rotation_angle, self.opt.max_rotation_angle)\n', (1301, 1360), False, 'from random import randint\n'), ((1390, 1431), 'random.randint', 'randint', (['(0)', '(2 * self.opt.padding_size - 1)'], {}), '(0, 2 * self.opt.padding_size - 1)\n', (1397, 1431), False, 'from random import randint\n'), ((1496, 1537), 'random.randint', 'randint', (['(0)', '(2 * self.opt.padding_size - 1)'], {}), '(0, 2 * self.opt.padding_size - 1)\n', (1503, 1537), False, 'from random import randint\n'), ((2354, 2379), 'torch.tensor', 'torch.tensor', (['label_array'], {}), '(label_array)\n', (2366, 2379), False, 'import torch\n'), ((4384, 4431), 'torch.tensor', 'torch.tensor', (['target_array'], {'dtype': 'torch.float32'}), '(target_array, dtype=torch.float32)\n', (4396, 4431), False, 'import torch\n'), ((6198, 6223), 'torch.tensor', 'torch.tensor', (['label_array'], {}), '(label_array)\n', (6210, 6223), False, 'import torch\n'), ((7672, 7719), 'torch.tensor', 'torch.tensor', (['target_array'], {'dtype': 'torch.float32'}), '(target_array, dtype=torch.float32)\n', (7684, 7719), False, 'import torch\n'), ((696, 765), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Train"""', '"""Input"""', "('*.' + self.input_format)"], {}), "(dataset_dir, 'Train', 'Input', '*.' + self.input_format)\n", (708, 765), False, 'import os\n'), ((816, 887), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Train"""', '"""Target"""', "('*.' + self.target_format)"], {}), "(dataset_dir, 'Train', 'Target', '*.' + self.target_format)\n", (828, 887), False, 'import os\n'), ((952, 1020), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Test"""', '"""Input"""', "('*.' + self.input_format)"], {}), "(dataset_dir, 'Test', 'Input', '*.' + self.input_format)\n", (964, 1020), False, 'import os\n'), ((1071, 1141), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Test"""', '"""Target"""', "('*.' + self.target_format)"], {}), "(dataset_dir, 'Test', 'Target', '*.' + self.target_format)\n", (1083, 1141), False, 'import os\n'), ((1831, 1886), 'numpy.load', 'np.load', (['self.label_path_list[index]'], {'allow_pickle': '(True)'}), '(self.label_path_list[index], allow_pickle=True)\n', (1838, 1886), True, 'import numpy as np\n'), ((3368, 3407), 'PIL.Image.open', 'Image.open', (['self.label_path_list[index]'], {}), '(self.label_path_list[index])\n', (3378, 3407), False, 'from PIL import Image\n'), ((3847, 3903), 'numpy.load', 'np.load', (['self.target_path_list[index]'], {'allow_pickle': '(True)'}), '(self.target_path_list[index], allow_pickle=True)\n', (3854, 3903), True, 'import numpy as np\n'), ((5357, 5397), 'PIL.Image.open', 'Image.open', (['self.target_path_list[index]'], {}), '(self.target_path_list[index])\n', (5367, 5397), False, 'from PIL import Image\n'), ((5871, 5926), 'numpy.load', 'np.load', (['self.label_path_list[index]'], {'allow_pickle': '(True)'}), '(self.label_path_list[index], allow_pickle=True)\n', (5878, 5926), True, 'import numpy as np\n'), ((6864, 6903), 'PIL.Image.open', 'Image.open', (['self.label_path_list[index]'], {}), '(self.label_path_list[index])\n', (6874, 6903), False, 'from PIL import Image\n'), ((7337, 7393), 'numpy.load', 'np.load', (['self.target_path_list[index]'], {'allow_pickle': '(True)'}), '(self.target_path_list[index], allow_pickle=True)\n', (7344, 7393), True, 'import numpy as np\n'), ((8394, 8434), 'PIL.Image.open', 'Image.open', (['self.target_path_list[index]'], {}), '(self.target_path_list[index])\n', (8404, 8434), False, 'from PIL import Image\n'), ((1735, 1773), 'astropy.io.fits.open', 'fits.open', (['self.label_path_list[index]'], {}), '(self.label_path_list[index])\n', (1744, 1773), False, 'from astropy.io import fits\n'), ((3749, 3788), 'astropy.io.fits.open', 'fits.open', (['self.target_path_list[index]'], {}), '(self.target_path_list[index])\n', (3758, 3788), False, 'from astropy.io import fits\n'), ((5775, 5813), 'astropy.io.fits.open', 'fits.open', (['self.label_path_list[index]'], {}), '(self.label_path_list[index])\n', (5784, 5813), False, 'from astropy.io import fits\n'), ((7239, 7278), 'astropy.io.fits.open', 'fits.open', (['self.target_path_list[index]'], {}), '(self.target_path_list[index])\n', (7248, 7278), False, 'from astropy.io import fits\n'), ((8684, 8718), 'os.path.split', 'split', (['self.label_path_list[index]'], {}), '(self.label_path_list[index])\n', (8689, 8718), False, 'from os.path import split, splitext\n'), ((8754, 8789), 'os.path.split', 'split', (['self.target_path_list[index]'], {}), '(self.target_path_list[index])\n', (8759, 8789), False, 'from os.path import split, splitext\n'), ((3252, 3262), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (3260, 3262), False, 'from torchvision.transforms import Compose, Lambda, ToTensor, Normalize, Pad\n'), ((3302, 3334), 'torchvision.transforms.Normalize', 'Normalize', ([], {'mean': '[0.5]', 'std': '[0.5]'}), '(mean=[0.5], std=[0.5])\n', (3311, 3334), False, 'from torchvision.transforms import Compose, Lambda, ToTensor, Normalize, Pad\n'), ((5240, 5250), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (5248, 5250), False, 'from torchvision.transforms import Compose, Lambda, ToTensor, Normalize, Pad\n'), ((5290, 5322), 'torchvision.transforms.Normalize', 'Normalize', ([], {'mean': '[0.5]', 'std': '[0.5]'}), '(mean=[0.5], std=[0.5])\n', (5299, 5322), False, 'from torchvision.transforms import Compose, Lambda, ToTensor, Normalize, Pad\n'), ((6748, 6758), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (6756, 6758), False, 'from torchvision.transforms import Compose, Lambda, ToTensor, Normalize, Pad\n'), ((6798, 6830), 'torchvision.transforms.Normalize', 'Normalize', ([], {'mean': '[0.5]', 'std': '[0.5]'}), '(mean=[0.5], std=[0.5])\n', (6807, 6830), False, 'from torchvision.transforms import Compose, Lambda, ToTensor, Normalize, Pad\n'), ((8277, 8287), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (8285, 8287), False, 'from torchvision.transforms import Compose, Lambda, ToTensor, Normalize, Pad\n'), ((8327, 8359), 'torchvision.transforms.Normalize', 'Normalize', ([], {'mean': '[0.5]', 'std': '[0.5]'}), '(mean=[0.5], std=[0.5])\n', (8336, 8359), False, 'from torchvision.transforms import Compose, Lambda, ToTensor, Normalize, Pad\n')] |
import numpy as np
from Graphs import *
## I DON'T THINK THIS WORKS? ##
def makeAcyclic(N):
R = np.zeros([N,N])
for x in range(N):
for y in range(x):
R[x,y] = random.choice([1,1,0,0,0,0])
return R
def booleanProduct(A,B):
n = np.shape(A)[0]
E = np.zeros(np.shape(A))
for i in range(n):
for j in range(n):
for k in range(n):
if A[i,k] == 1 and B[k,j] == 1:
E[i,j] = 1
break
return E
def booleanPower(A,n=2):
if n == 1:
return A
out = A.copy()
for i in range(n-1):
out = booleanProduct(out,A)
return out
def pathMatrix(A):
n = np.shape(A)[0]
out = np.zeros(np.shape(A))
for i in range(np.shape(A)[0]-1):
out += booleanPower(A,i+1)
for x in range(n):
for y in range(n):
if out[x,y] > 1:
out[x,y] = 1
return out
def transitiveReduce(P):
t = P.copy()
N = np.shape(R)[0]
for j in range(N):
for i in range(N):
if t[i,j] == 1:
for k in range(N):
if t[j,k] == 1:
t[i,k] = 0
return t
N = 7
L = string.ascii_uppercase[:N]
A = makeAcyclic(N)
P = pathMatrix(A)
T = transitiveReduce(P)
connectogram(A,title="Adjacency",size=[4,4])
connectogram(P,title="Path",size=[4,4])
connectogram(T,title="Reduction",size=[4,4]) | [
"numpy.shape",
"numpy.zeros"
] | [((110, 126), 'numpy.zeros', 'np.zeros', (['[N, N]'], {}), '([N, N])\n', (118, 126), True, 'import numpy as np\n'), ((280, 291), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (288, 291), True, 'import numpy as np\n'), ((313, 324), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (321, 324), True, 'import numpy as np\n'), ((730, 741), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (738, 741), True, 'import numpy as np\n'), ((765, 776), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (773, 776), True, 'import numpy as np\n'), ((1042, 1053), 'numpy.shape', 'np.shape', (['R'], {}), '(R)\n', (1050, 1053), True, 'import numpy as np\n'), ((798, 809), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (806, 809), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@Title:
Identifiability and predictability of
integer- and fractional-order epidemiological models
using physics-informed neural networks
@author:
<NAME> & <NAME>
Division of Applied Mathematics
Brown University
<EMAIL>
Created on 2020
"""
import sys
sys.path.insert(0, '../../Utilities/')
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import pandas
import math
import tensorflow as tf
import numpy as np
from numpy import *
# from numpy import matlib as mb
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.io
from scipy.interpolate import griddata
import time
from itertools import product, combinations
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
#from plotting import newfig, savefig
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
import datetime
from pyDOE import lhs
# from scipy.special import gamma
from scipy.special import jacobi
start_time = time.time()
# np.random.seed(1234)
# tf.set_random_seed(1234)
# tf.random.set_seed(1234)
#%%
class PhysicsInformedNN:
#Initialize the class
def __init__(self, t_f, t_train, I_train, R_train, D_train,
U0, lb, ub, N, layers, layers_Beta):
self.N = N
#Data for training
self.t_f = t_f
self.t_train = t_train
self.I_train = I_train
self.R_train = R_train
self.D_train = D_train
self.S0 = U0[0]
self.I0 = U0[1]
self.R0 = U0[2]
self.D0 = U0[3]
#Time division s
self.M = len(t_f)-1
self.tau = t_f[1]-t_f[0]
#Bounds
self.lb = lb
self.ub = ub
# initialize NN
self.weights, self.biases = self.initialize_NN(layers)
self.weights_Beta, self.biases_Beta = self.initialize_NN(layers_Beta)
self.Kappa1_COEF = tf.Variable(tf.zeros([poly_order,1], dtype=tf.float64) , dtype=tf.float64, trainable=True)
self.Kappa2_COEF = tf.Variable(tf.zeros([poly_order,1], dtype=tf.float64) , dtype=tf.float64, trainable=True)
self.Kappa3_COEF = tf.Variable(tf.zeros([poly_order,1], dtype=tf.float64) , dtype=tf.float64, trainable=True)
self.Kappa4_COEF = tf.Variable(tf.zeros([poly_order,1], dtype=tf.float64) , dtype=tf.float64, trainable=True)
#Fixed parameters
self.N = N
self.a = tf.Variable(2.15e-2,dtype=tf.float64,trainable=False)
self.b = tf.Variable(0.48e-2,dtype=tf.float64,trainable=False)
#tf placeholders and graph
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
self.saver = tf.train.Saver()
# placeholders for inputs
self.t_tf = tf.placeholder(tf.float64, shape=[None, self.t_f.shape[1]])
self.t_u = tf.placeholder(tf.float64, shape=[None, self.t_train.shape[1]])
self.I_u = tf.placeholder(tf.float64, shape=[None, self.I_train.shape[1]])
self.R_u = tf.placeholder(tf.float64, shape=[None, self.R_train.shape[1]])
self.D_u = tf.placeholder(tf.float64, shape=[None, self.D_train.shape[1]])
self.S0_u = tf.placeholder(tf.float64, shape=[None, self.S0.shape[1]])
self.I0_u = tf.placeholder(tf.float64, shape=[None, self.I0.shape[1]])
self.R0_u = tf.placeholder(tf.float64, shape=[None, self.R0.shape[1]])
self.D0_u = tf.placeholder(tf.float64, shape=[None, self.D0.shape[1]])
# physics informed neural networks
self.S_pred, self.I_pred, self.R_pred, self.D_pred = self.net_u(self.t_u)
self.BetaI = self.net_Beta(self.t_u)
self.Kappa_pred1 = self.net_Kappa1_plot()
self.Kappa_pred2 = self.net_Kappa2_plot()
self.Kappa_pred3 = self.net_Kappa3_plot()
self.Kappa_pred4 = self.net_Kappa4_plot()
self.S0_pred = self.S_pred[0]
self.I0_pred = self.I_pred[0]
self.R0_pred = self.R_pred[0]
self.D0_pred = self.D_pred[0]
self.S_f, self.I_f, self.R_f, self.D_f, self.R_con = self.net_f(self.t_tf)
# loss
self.lossU0 = tf.reduce_mean(tf.square(self.I0_u - self.I0_pred)) + \
tf.reduce_mean(tf.square(self.R0_u - self.R0_pred)) + \
tf.reduce_mean(tf.square(self.D0_u - self.D0_pred))
# tf.reduce_mean(tf.square(self.S0_u - self.S0_pred))
self.lossU = 8*tf.reduce_mean(tf.square(self.I_pred - self.I_u)) + \
tf.reduce_mean(tf.square(self.R_pred - self.R_u)) + \
60*tf.reduce_mean(tf.square(self.D_pred - self.D_u))
self.lossF =tf.reduce_mean(tf.square(self.S_f))\
+ tf.reduce_mean(tf.square(self.I_f))\
+ tf.reduce_mean(tf.square(self.D_f))\
+ tf.reduce_mean(tf.square(self.R_f))\
+ tf.reduce_mean(tf.square(self.R_con))
self.loss = 1*self.lossU0 + 5*self.lossU + self.lossF
#Optimizer
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol' : 1.0 * np.finfo(float).eps})
self.optimizer_Adam = tf.train.AdamOptimizer()
self.train_op_Adam = self.optimizer_Adam.minimize(self.loss)
init = tf.global_variables_initializer()
self.sess.run(init)
#Initialize the nueral network
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = self.xavier_init(size=[layers[l], layers[l+1]]) #weights for the current layer
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float64), dtype=tf.float64) #biases for the current layer
weights.append(W) #save the elements in W to weights (a row vector)
biases.append(b) #save the elements in b to biases (a 1Xsum(layers) row vector)
return weights, biases
#generating weights
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev, dtype=tf.float64), dtype=tf.float64)
def net_Beta(self, t):
BetaI = self.neural_net(t, self.weights_Beta, self.biases_Beta)
bound_b = [tf.constant(0.0, dtype=tf.float64), tf.constant(1.0, dtype=tf.float64)]
return bound_b[0]+(bound_b[1]-bound_b[0])*tf.sigmoid(BetaI)
#Architecture of the neural network
def neural_net(self, t, weights, biases):
num_layers = len(weights) + 1
H = 2.0*(t-self.lb)/(self.ub-self.lb) - 1.0
for l in range(0,num_layers-2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
def net_u(self, t):
SIDR = self.neural_net(t, self.weights, self.biases)
# SIDR = SIDR**2
S = SIDR[:,0:1]
I = SIDR[:,1:2]
R = SIDR[:,2:3]
D = SIDR[:,3:4]
return S, I, R, D
def net_Kappa1(self):
polys = tf.constant(np.transpose(Jacobi_polys[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa1_COEF)
# return tf.sigmoid(Kappa)
return 0.2 + 0.8 * tf.sigmoid(Kappa)
def net_Kappa2(self):
polys = tf.constant(np.transpose(Jacobi_polys[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa2_COEF)
# return tf.sigmoid(Kappa)
return 0.2 + 0.8 * tf.sigmoid(Kappa)
def net_Kappa3(self):
polys = tf.constant(np.transpose(Jacobi_polys[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa3_COEF)
# return tf.sigmoid(Kappa)
return 0.2 + 0.8 * tf.sigmoid(Kappa)
def net_Kappa4(self):
polys = tf.constant(np.transpose(Jacobi_polys[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa4_COEF)
# return tf.sigmoid(Kappa)
return 0.2 + 0.8 * tf.sigmoid(Kappa)
def net_Kappa1_plot(self):
polys = tf.constant(np.transpose(Jacobi_polys_plots[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa1_COEF)
# return tf.sigmoid(Kappa)
return 0.2 + 0.8 * tf.sigmoid(Kappa)
def net_Kappa2_plot(self):
polys = tf.constant(np.transpose(Jacobi_polys_plots[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa2_COEF)
# return tf.sigmoid(Kappa)
return 0.2 + 0.8 * tf.sigmoid(Kappa)
def net_Kappa3_plot(self):
polys = tf.constant(np.transpose(Jacobi_polys_plots[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa3_COEF)
# return tf.sigmoid(Kappa)
return 0.2 + 0.8 * tf.sigmoid(Kappa)
def net_Kappa4_plot(self):
polys = tf.constant(np.transpose(Jacobi_polys_plots[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa4_COEF)
# return tf.sigmoid(Kappa)
return 0.2 + 0.8 * tf.sigmoid(Kappa)
#fractional differential coefficients for the L1 approximation
def FDM1(self, Kappa):
m = self.M #int
Tau = self.tau #array
kappa_vec = tf.reshape(Kappa, [m+1,1]) #tnsor
kappa_mat = tf.tile(kappa_vec, [1, m-1])
idx = np.tril_indices(m+1, k=-1)
Temp1 = np.zeros([m+1,m+1])
Temp1[idx] = idx[0]-idx[1]
Temp1 = np.tril(Temp1, k=-2) #(m+1,m+1) numpy array
Temp1 = tf.constant(Temp1, dtype = tf.float64) #(m+1,m+1) tensor
Temp2 = -np.eye(m+1)
Temp2[idx] = idx[0]-idx[1]-1
Temp2 = np.tril(Temp2, k=-2)
Temp2 = tf.constant(Temp2, dtype = tf.float64)
Temp3 = -2*np.eye(m+1)
Temp3[idx] = idx[0]-idx[1]-2
Temp3 = np.tril(Temp3, k=-2)
Temp3 = tf.constant(Temp3, dtype = tf.float64)
A = np.concatenate((np.zeros((1,m)), np.eye(m)), axis=0, out=None)
A = tf.constant(A[:,0:m-1], dtype = tf.float64)
Temp = tf.pow(Temp1[:,0:m-1],1.0-kappa_mat) -\
2*tf.pow(Temp2[:,0:m-1],1.0-kappa_mat) +\
tf.pow(Temp3[:,0:m-1],1.0-kappa_mat) + A
L_Temp1 = tf.constant(np.arange(m), dtype = tf.float64) #np.arange(m)
L_Temp1 = tf.pow(tf.reshape(L_Temp1, [m,1]), 1.0-kappa_vec[1:m+1, 0:1])
L_Temp2 = tf.constant(np.arange(m)+1, dtype = tf.float64) #np.arange(m) + 1
L_Temp2 = tf.pow(tf.reshape(L_Temp2, [m,1]), 1.0-kappa_vec[1:m+1, 0:1])
L_Temp = tf.concat((tf.zeros((1,1), dtype = tf.float64), L_Temp1-L_Temp2), axis=0)
R_Temp = tf.concat((tf.zeros((m,1), dtype = tf.float64), tf.ones((1,1), dtype = tf.float64)), axis=0)
coeff_mat = tf.concat((L_Temp, Temp, R_Temp), axis=1)
c = tf.tile(tf.math.divide(tf.pow(Tau, -kappa_vec), tf.exp(tf.lgamma(2-kappa_vec))), tf.constant([1, m+1], dtype = tf.int32))
coeff_mat = tf.multiply(c, coeff_mat)
return coeff_mat
def net_f(self, t):
#load time-dependent parameters
# r0 = 1.2e-6
# r = 0.66*r0*tf.exp(-0.5*t) + 0.34*r0
r = self.net_Beta(t)
#Obtain SIHDR from Neural network
S, I, R, D = self.net_u(t)
#Time derivatives
#Fractional differential matrix
Kappa1 = self.net_Kappa1()
Kappa2 = self.net_Kappa2()
Kappa3 = self.net_Kappa3()
Kappa4 = self.net_Kappa4()
DiffMat1 = self.FDM1(Kappa1)
DiffMat2 = self.FDM1(Kappa2)
DiffMat3 = self.FDM1(Kappa3)
DiffMat4 = self.FDM1(Kappa4)
#fractional time derivatives
# DM = self.DM
S_t = tf.matmul(DiffMat1, S)
I_t = tf.matmul(DiffMat2, I)
R_t = tf.matmul(DiffMat3, R)
D_t = tf.matmul(DiffMat4, D)
T = tf.constant(7.0, dtype = tf.float64)
## fractional derivative
S_t = tf.pow(T, Kappa1-1)*S_t/tf.exp(tf.lgamma(1.0+Kappa1))
I_t = tf.pow(T, Kappa2-1)*I_t/tf.exp(tf.lgamma(1.0+Kappa2))
R_t = tf.pow(T, Kappa3-1)*R_t/tf.exp(tf.lgamma(1.0+Kappa3))
D_t = tf.pow(T, Kappa4-1)*D_t/tf.exp(tf.lgamma(1.0+Kappa4))
#Residuals
f_S = S_t + r * S * I
f_I = I_t - r * S * I + (self.a + self.b) * I
f_R = R_t - self.a * I
f_D = D_t - self.b * I
f_con = S + I + R + D - self.N
return f_S, f_I, f_R, f_D, f_con
def callback(self, loss, lossU0, lossU, lossF):
total_records_LBFGS.append(np.array([loss, lossU0, lossU, lossF]))
print('Loss: %.3e, LossU0: %.3e, LossU: %.3e, LossF: %.3e' % (loss, lossU0, lossU, lossF))
def train(self, nIter):
tf_dict = {self.t_u: self.t_train, self.t_tf: self.t_f,
self.I_u: self.I_train, self.R_u: self.R_train, self.D_u: self.D_train,
self.S0_u: self.S0, self.I0_u: self.I0, self.R0_u: self.R0, self.D0_u: self.D0}
start_time = time.time()
for it in range(nIter+1):
self.sess.run(self.train_op_Adam, tf_dict)
# Print
if it % 100 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.loss, tf_dict)
lossU0_value = self.sess.run(self.lossU0, tf_dict)
lossU_value = self.sess.run(self.lossU, tf_dict)
lossF_value = self.sess.run(self.lossF, tf_dict)
Kappa1_records.append(self.sess.run(self.Kappa_pred1))
Kappa2_records.append(self.sess.run(self.Kappa_pred2))
Kappa3_records.append(self.sess.run(self.Kappa_pred3))
Kappa4_records.append(self.sess.run(self.Kappa_pred4))
total_records.append(np.array([it, loss_value, lossU0_value, lossU_value, lossF_value]))
print('It: %d, Loss: %.3e, LossU0: %.3e, LossU: %.3e, LossF: %.3e, Time: %.2f' %
(it, loss_value, lossU0_value, lossU_value, lossF_value, elapsed))
start_time = time.time()
if LBFGS:
self.optimizer.minimize(self.sess,
feed_dict = tf_dict, #Inputs of the minimize operator
fetches = [self.loss, self.lossU0, self.lossU, self.lossF],
loss_callback = self.callback) #Show the results of minimize operator
def predict(self, t_star):
tf_dict = {self.t_u: t_star}
S = self. sess.run(self.S_pred, tf_dict)
I = self. sess.run(self.I_pred, tf_dict)
R = self. sess.run(self.R_pred, tf_dict)
D = self. sess.run(self.D_pred, tf_dict)
Beta = self. sess.run(self.BetaI, tf_dict)
Kappa1 = self.sess.run(self.Kappa_pred1, tf_dict)
Kappa2 = self.sess.run(self.Kappa_pred2, tf_dict)
Kappa3 = self.sess.run(self.Kappa_pred3, tf_dict)
Kappa4 = self.sess.run(self.Kappa_pred4, tf_dict)
return S, I, R, D, Kappa1, Kappa2, Kappa3, Kappa4, Beta
############################################################
#%%
if __name__=="__main__":
#Architecture of of the NN
layers=[1] + 5*[20] + [4]
layers_Beta=[1] + 5*[20] + [1]
#Load data
I_star = np.loadtxt('Data/Infectious.txt') #T x 1 array
I_star = I_star.reshape([len(I_star),1])
R_star = np.loadtxt('Data/Recovered.txt') #T x 1 array
R_star = R_star.reshape([len(R_star),1])
D_star = np.loadtxt('Data/Death.txt') #T x 1 array
D_star = D_star.reshape([len(D_star),1])
t_star = np.arange(len(I_star))
t_star = t_star[:,None]
N = 60461826
#Scaling
sf = 1e-7
N = N * sf
I_star = I_star * sf
R_star = R_star * sf
D_star = D_star * sf
#lower and upper bounds
lb = t_star.min(0)
ub = t_star.max(0)
#Initial conditions
I0 = I_star[0:1,:]
R0 = R_star[0:1,:]
D0 = D_star[0:1,:]
S0 = N - I0 - R0 - D0
U0 = [S0, I0, R0, D0]
#Residual points
N_f = 500 #5 * len(t_star)
t_f = np.linspace(lb, ub, num = N_f) #uniformed grid for evaluating fractional derivative
poly_order = 10
t_f_mapped = -1 + 2/(ub-lb) * (t_f - lb)
t_star_mapped = -1 + 2/(ub-lb) * (t_star - lb)
Jacobi_polys = np.asarray([ jacobi(n,0,0)(t_f_mapped.flatten()) for n in range(0, 15)])
Jacobi_polys_plots = np.asarray([ jacobi(n,0,0)(t_star_mapped.flatten()) for n in range(0, 15)])
#%%
######################################################################
######################## Training and Predicting ###############################
######################################################################
# t_train = (t_star-lb)/(ub-lb)
t_train = t_star
I_train = I_star
R_train = R_star
D_train = D_star
#%%
from datetime import datetime
now = datetime.now()
# dt_string = now.strftime("%m-%d-%H-%M")
dt_string = now.strftime("%m-%d")
#save results
current_directory = os.getcwd()
for j in range(10):
casenumber = 'set' + str(j+1)
relative_path_results = '/SIRD-DiffKappa-Beta/Train-Results-'+dt_string+'-'+casenumber+'/'
save_results_to = current_directory + relative_path_results
if not os.path.exists(save_results_to):
os.makedirs(save_results_to)
relative_path = '/SIRD-DiffKappa-Beta/Train-model-'+dt_string+'-'+casenumber+'/'
save_models_to = current_directory + relative_path
if not os.path.exists(save_models_to):
os.makedirs(save_models_to)
# break
#%%
#Training
total_records = []
Kappa1_records = []
Kappa2_records = []
Kappa3_records = []
Kappa4_records = []
total_records_LBFGS = []
model = PhysicsInformedNN(t_f, t_train, I_train, R_train, D_train, U0, lb, ub, N, layers, layers_Beta)
LBFGS = True
#%%
# LBFGS = False
model.train(10000) #Training with n iterations
model.saver.save(model.sess, save_models_to+"model.ckpt")
#%%
#Predicting
S, I, R, D, Kappa1, Kappa2, Kappa3, Kappa4, Beta = model.predict(t_star)
import datetime
end_time = time.time()
print(datetime.timedelta(seconds=int(end_time-start_time)))
# #Calculate RC
# Rc = BetaI /(1.0/6.0)
#%%
#save data
np.savetxt(save_results_to + 'S.txt', S.reshape((-1,1)))
np.savetxt(save_results_to + 'I.txt', I.reshape((-1,1)))
np.savetxt(save_results_to + 'R.txt', R.reshape((-1,1)))
np.savetxt(save_results_to + 'D.txt', D.reshape((-1,1)))
#save BetaI, Rc, and sigma
np.savetxt(save_results_to + 't_star.txt', t_star.reshape((-1,1)))
np.savetxt(save_results_to + 'Kappa1.txt', Kappa1.reshape((-1,1)))
np.savetxt(save_results_to + 'Kappa2.txt', Kappa2.reshape((-1,1)))
np.savetxt(save_results_to + 'Kappa3.txt', Kappa3.reshape((-1,1)))
np.savetxt(save_results_to + 'Kappa4.txt', Kappa4.reshape((-1,1)))
np.savetxt(save_results_to + 'Beta.txt', Beta.reshape((-1,1)))
#%%
#records for Adam
N_Iter = len(total_records)
iteration = np.asarray(total_records)[:,0]
loss_his = np.asarray(total_records)[:,1]
loss_his_u0 = np.asarray(total_records)[:,2]
loss_his_u = np.asarray(total_records)[:,3]
loss_his_f = np.asarray(total_records)[:,4]
#records for LBFGS
if LBFGS:
N_Iter_LBFGS = len(total_records_LBFGS)
iteration_LBFGS = np.arange(N_Iter_LBFGS)+N_Iter*100
loss_his_LBFGS = np.asarray(total_records_LBFGS)[:,0]
loss_his_u0_LBFGS = np.asarray(total_records_LBFGS)[:,1]
loss_his_u_LBFGS = np.asarray(total_records_LBFGS)[:,2]
loss_his_f_LBFGS = np.asarray(total_records_LBFGS)[:,3]
#%%
#save records
np.savetxt(save_results_to + 'iteration.txt', iteration.reshape((-1,1)))
np.savetxt(save_results_to + 'loss_his.txt', loss_his.reshape((-1,1)))
np.savetxt(save_results_to + 'loss_his_u0.txt', loss_his_u0.reshape((-1,1)))
np.savetxt(save_results_to + 'loss_his_u.txt', loss_his_u.reshape((-1,1)))
np.savetxt(save_results_to + 'loss_his_f.txt', loss_his_f.reshape((-1,1)))
if LBFGS:
np.savetxt(save_results_to + 'iteration_LBFGS.txt', iteration_LBFGS.reshape((-1,1)))
np.savetxt(save_results_to + 'loss_his_LBFGS.txt', loss_his_LBFGS.reshape((-1,1)))
np.savetxt(save_results_to + 'loss_his_u0_LBFGS.txt', loss_his_u0_LBFGS.reshape((-1,1)))
np.savetxt(save_results_to + 'loss_his_u_LBFGS.txt', loss_his_u_LBFGS.reshape((-1,1)))
np.savetxt(save_results_to + 'loss_his_f_LBFGS.txt', loss_his_f_LBFGS.reshape((-1,1)))
#%%
#History of loss
fig, ax = plt.subplots()
plt.yscale('log')
plt.plot(iteration, loss_his, 'k-', lw = 4, label='$loss$')
plt.plot(iteration, loss_his_u0, 'r-', lw = 4, label='$loss_{u0}$')
plt.plot(iteration, loss_his_u, 'b-', lw = 4, label='$loss_u$')
plt.plot(iteration, loss_his_f, 'c-', lw = 4, label='$loss_f$')
if LBFGS:
plt.plot(iteration_LBFGS, loss_his_LBFGS, 'k-', lw = 4)
plt.plot(iteration_LBFGS, loss_his_u0_LBFGS, 'r-', lw = 4)
plt.plot(iteration_LBFGS, loss_his_u_LBFGS, 'b-', lw = 4)
plt.plot(iteration_LBFGS, loss_his_f_LBFGS, 'c-', lw = 4)
ax.legend(fontsize=50, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 50)
ax.tick_params(axis='y', labelsize = 50)
plt.rc('font', size=60)
ax.grid(True)
ax.set_xlabel('Iteration', fontsize = 50)
ax.set_ylabel('loss values', fontsize = 70)
fig.set_size_inches(w=25, h=12.5)
plt.savefig(save_results_to + 'History_loss.png', dpi=300)
plt.savefig(save_results_to + 'History_loss.pdf', dpi=300)
#%%
######################################################################
############################# Plotting ###############################
######################################################################
#%%
# date_total = np.arange('2020-03-08', '2020-11-13', dtype='datetime64[D]')[:,None]
date_total = t_train
#%%
#Current Suspectious
fig, ax = plt.subplots()
ax.plot(date_total, S/sf, 'k-', lw=5)
# ax.set_xlim(0-0.5,180)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
# ax.legend(fontsize=18, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 50)
ax.tick_params(axis='y', labelsize = 50)
ax.ticklabel_format(axis='y', style='sci', scilimits=(5,5))
plt.rc('font', size=60)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$S$', fontsize = 80)
fig.set_size_inches(w=25, h=12.5)
plt.savefig(save_results_to + 'Current_Suspectious.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Suspectious.png', dpi=300)
#%%
#Current Infectious
fig, ax = plt.subplots()
ax.plot(date_total, I_star/sf, 'ro', lw=5)
ax.plot(date_total, I/sf, 'k-', lw=5)
# ax.set_xlim(0-0.5,180)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
# ax.legend(fontsize=18, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 50)
ax.tick_params(axis='y', labelsize = 50)
ax.ticklabel_format(axis='y', style='sci', scilimits=(5,5))
plt.rc('font', size=60)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$I$', fontsize = 80)
fig.set_size_inches(w=25, h=12.5)
plt.savefig(save_results_to + 'Current_Infectious.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Infectious.png', dpi=300)
#%%
#Current Removed
fig, ax = plt.subplots()
ax.plot(date_total, R_star/sf, 'ro', lw=5)
ax.plot(date_total, R/sf, 'k-', lw=5)
# ax.set_xlim(0-0.5,180)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
# ax.legend(fontsize=18, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 50)
ax.tick_params(axis='y', labelsize = 50)
ax.ticklabel_format(axis='y', style='sci', scilimits=(5,5))
plt.rc('font', size=60)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$R$', fontsize = 80)
fig.set_size_inches(w=25, h=12.5)
plt.savefig(save_results_to + 'Current_Removed.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Removed.png', dpi=300)
#%%
#Current death
fig, ax = plt.subplots()
ax.plot(date_total, D_star/sf, 'ro', lw=5)
ax.plot(date_total, D/sf, 'k-', lw=5)
# ax.set_xlim(0-0.5,180)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
# ax.legend(fontsize=18, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 50)
ax.tick_params(axis='y', labelsize = 50)
ax.ticklabel_format(axis='y', style='sci', scilimits=(5,5))
plt.rc('font', size=60)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$D$', fontsize = 80)
fig.set_size_inches(w=25, h=12.5)
plt.savefig(save_results_to + 'Current_Death.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Death.png', dpi=300)
#%%
#Kappa
fig, ax = plt.subplots()
ax.plot(date_total, Kappa1, 'k-', lw=5, label = '$\kappa_{1}$')
ax.plot(date_total, Kappa2, 'r-', lw=5, label = '$\kappa_{2}$')
ax.plot(date_total, Kappa3, 'b-', lw=5, label = '$\kappa_{3}$')
ax.plot(date_total, Kappa4, 'm-', lw=5, label = '$\kappa_{4}$')
# ax.set_xlim(0-0.5,180)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
ax.legend(fontsize=18, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 50)
ax.tick_params(axis='y', labelsize = 50)
plt.rc('font', size=60)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$\kappa$', fontsize = 80)
fig.set_size_inches(w=25, h=12.5)
plt.savefig(save_results_to + 'Kappa.pdf', dpi=300)
plt.savefig(save_results_to + 'Kappa.png', dpi=300)
#%%
fig, ax = plt.subplots()
# ax.plot(t_f, Kappa, 'k-', lw=5)
ax.plot(date_total, np.transpose(np.asarray(Kappa1_records)[:,:,0]) , 'k-', lw=2)
# ax.set_xlim(0-0.5,180)
ax.set_ylim(-0.021, 1.021)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
# ax.legend(fontsize=18, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 50)
ax.tick_params(axis='y', labelsize = 50)
plt.rc('font', size=60)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$\kappa_1$', fontsize = 80)
fig.set_size_inches(w=25, h=12.5)
plt.savefig(save_results_to + 'Kappa1_rec.pdf', dpi=300)
plt.savefig(save_results_to + 'Kappa1_rec.png', dpi=300)
fig, ax = plt.subplots()
# ax.plot(t_f, Kappa, 'k-', lw=5)
ax.plot(date_total, np.transpose(np.asarray(Kappa2_records)[:,:,0]) , 'k-', lw=2)
# ax.set_xlim(0-0.5,180)
ax.set_ylim(-0.021, 1.021)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
# ax.legend(fontsize=18, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 50)
ax.tick_params(axis='y', labelsize = 50)
plt.rc('font', size=60)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$\kappa_2$', fontsize = 80)
fig.set_size_inches(w=25, h=12.5)
plt.savefig(save_results_to + 'Kappa2_rec.pdf', dpi=300)
plt.savefig(save_results_to + 'Kappa2_rec.png', dpi=300)
fig, ax = plt.subplots()
# ax.plot(t_f, Kappa, 'k-', lw=5)
ax.plot(date_total, np.transpose(np.asarray(Kappa3_records)[:,:,0]) , 'k-', lw=2)
# ax.set_xlim(0-0.5,180)
ax.set_ylim(-0.021, 1.021)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
# ax.legend(fontsize=18, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 50)
ax.tick_params(axis='y', labelsize = 50)
plt.rc('font', size=60)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$\kappa_3$', fontsize = 80)
fig.set_size_inches(w=25, h=12.5)
plt.savefig(save_results_to + 'Kappa3_rec.pdf', dpi=300)
plt.savefig(save_results_to + 'Kappa3_rec.png', dpi=300)
fig, ax = plt.subplots()
# ax.plot(t_f, Kappa, 'k-', lw=5)
ax.plot(date_total, np.transpose(np.asarray(Kappa4_records)[:,:,0]) , 'k-', lw=2)
# ax.set_xlim(0-0.5,180)
ax.set_ylim(-0.021, 1.021)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
# ax.legend(fontsize=18, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 50)
ax.tick_params(axis='y', labelsize = 50)
plt.rc('font', size=60)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$\kappa_4$', fontsize = 80)
fig.set_size_inches(w=25, h=12.5)
plt.savefig(save_results_to + 'Kappa4_rec.pdf', dpi=300)
plt.savefig(save_results_to + 'Kappa4_rec.png', dpi=300)
#%%
fig, ax = plt.subplots()
# ax.plot(t_f, Kappa, 'k-', lw=5)
ax.plot(date_total, Beta , 'k-', lw=2)
# ax.set_xlim(0-0.5,180)
# ax.set_ylim(-0.021, 1.021)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
# ax.legend(fontsize=18, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 50)
ax.tick_params(axis='y', labelsize = 50)
plt.rc('font', size=60)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel(r'$\beta$', fontsize = 80)
fig.set_size_inches(w=25, h=12.5)
plt.savefig(save_results_to + 'Beta.pdf', dpi=300)
plt.savefig(save_results_to + 'Beta.png', dpi=300)
| [
"matplotlib.pyplot.yscale",
"matplotlib.dates.MonthLocator",
"tensorflow.square",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.multiply",
"tensorflow.Variable",
"numpy.arange",
"tensorflow.ConfigProto",
"tensorflow.truncated_normal",
"scipy.special.jacobi",
"numpy.transpose",
"tens... | [((328, 366), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../Utilities/"""'], {}), "(0, '../../Utilities/')\n", (343, 366), False, 'import sys\n'), ((1096, 1107), 'time.time', 'time.time', ([], {}), '()\n', (1105, 1107), False, 'import time\n'), ((16814, 16847), 'numpy.loadtxt', 'np.loadtxt', (['"""Data/Infectious.txt"""'], {}), "('Data/Infectious.txt')\n", (16824, 16847), True, 'import numpy as np\n'), ((16922, 16954), 'numpy.loadtxt', 'np.loadtxt', (['"""Data/Recovered.txt"""'], {}), "('Data/Recovered.txt')\n", (16932, 16954), True, 'import numpy as np\n'), ((17028, 17056), 'numpy.loadtxt', 'np.loadtxt', (['"""Data/Death.txt"""'], {}), "('Data/Death.txt')\n", (17038, 17056), True, 'import numpy as np\n'), ((17661, 17689), 'numpy.linspace', 'np.linspace', (['lb', 'ub'], {'num': 'N_f'}), '(lb, ub, num=N_f)\n', (17672, 17689), True, 'import numpy as np\n'), ((18504, 18518), 'datetime.now', 'datetime.now', ([], {}), '()\n', (18516, 18518), False, 'import datetime\n'), ((18653, 18664), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (18662, 18664), False, 'import os\n'), ((2603, 2657), 'tensorflow.Variable', 'tf.Variable', (['(0.0215)'], {'dtype': 'tf.float64', 'trainable': '(False)'}), '(0.0215, dtype=tf.float64, trainable=False)\n', (2614, 2657), True, 'import tensorflow as tf\n'), ((2677, 2731), 'tensorflow.Variable', 'tf.Variable', (['(0.0048)'], {'dtype': 'tf.float64', 'trainable': '(False)'}), '(0.0048, dtype=tf.float64, trainable=False)\n', (2688, 2731), True, 'import tensorflow as tf\n'), ((2964, 2980), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2978, 2980), True, 'import tensorflow as tf\n'), ((3051, 3110), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, self.t_f.shape[1]]'}), '(tf.float64, shape=[None, self.t_f.shape[1]])\n', (3065, 3110), True, 'import tensorflow as tf\n'), ((3132, 3195), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, self.t_train.shape[1]]'}), '(tf.float64, shape=[None, self.t_train.shape[1]])\n', (3146, 3195), True, 'import tensorflow as tf\n'), ((3218, 3281), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, self.I_train.shape[1]]'}), '(tf.float64, shape=[None, self.I_train.shape[1]])\n', (3232, 3281), True, 'import tensorflow as tf\n'), ((3304, 3367), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, self.R_train.shape[1]]'}), '(tf.float64, shape=[None, self.R_train.shape[1]])\n', (3318, 3367), True, 'import tensorflow as tf\n'), ((3390, 3453), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, self.D_train.shape[1]]'}), '(tf.float64, shape=[None, self.D_train.shape[1]])\n', (3404, 3453), True, 'import tensorflow as tf\n'), ((3479, 3537), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, self.S0.shape[1]]'}), '(tf.float64, shape=[None, self.S0.shape[1]])\n', (3493, 3537), True, 'import tensorflow as tf\n'), ((3561, 3619), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, self.I0.shape[1]]'}), '(tf.float64, shape=[None, self.I0.shape[1]])\n', (3575, 3619), True, 'import tensorflow as tf\n'), ((3643, 3701), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, self.R0.shape[1]]'}), '(tf.float64, shape=[None, self.R0.shape[1]])\n', (3657, 3701), True, 'import tensorflow as tf\n'), ((3725, 3783), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, self.D0.shape[1]]'}), '(tf.float64, shape=[None, self.D0.shape[1]])\n', (3739, 3783), True, 'import tensorflow as tf\n'), ((6054, 6078), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (6076, 6078), True, 'import tensorflow as tf\n'), ((6175, 6208), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6206, 6208), True, 'import tensorflow as tf\n'), ((7023, 7054), 'numpy.sqrt', 'np.sqrt', (['(2 / (in_dim + out_dim))'], {}), '(2 / (in_dim + out_dim))\n', (7030, 7054), True, 'import numpy as np\n'), ((8286, 8320), 'tensorflow.matmul', 'tf.matmul', (['polys', 'self.Kappa1_COEF'], {}), '(polys, self.Kappa1_COEF)\n', (8295, 8320), True, 'import tensorflow as tf\n'), ((8539, 8573), 'tensorflow.matmul', 'tf.matmul', (['polys', 'self.Kappa2_COEF'], {}), '(polys, self.Kappa2_COEF)\n', (8548, 8573), True, 'import tensorflow as tf\n'), ((8792, 8826), 'tensorflow.matmul', 'tf.matmul', (['polys', 'self.Kappa3_COEF'], {}), '(polys, self.Kappa3_COEF)\n', (8801, 8826), True, 'import tensorflow as tf\n'), ((9045, 9079), 'tensorflow.matmul', 'tf.matmul', (['polys', 'self.Kappa4_COEF'], {}), '(polys, self.Kappa4_COEF)\n', (9054, 9079), True, 'import tensorflow as tf\n'), ((9312, 9346), 'tensorflow.matmul', 'tf.matmul', (['polys', 'self.Kappa1_COEF'], {}), '(polys, self.Kappa1_COEF)\n', (9321, 9346), True, 'import tensorflow as tf\n'), ((9576, 9610), 'tensorflow.matmul', 'tf.matmul', (['polys', 'self.Kappa2_COEF'], {}), '(polys, self.Kappa2_COEF)\n', (9585, 9610), True, 'import tensorflow as tf\n'), ((9840, 9874), 'tensorflow.matmul', 'tf.matmul', (['polys', 'self.Kappa3_COEF'], {}), '(polys, self.Kappa3_COEF)\n', (9849, 9874), True, 'import tensorflow as tf\n'), ((10104, 10138), 'tensorflow.matmul', 'tf.matmul', (['polys', 'self.Kappa4_COEF'], {}), '(polys, self.Kappa4_COEF)\n', (10113, 10138), True, 'import tensorflow as tf\n'), ((10406, 10435), 'tensorflow.reshape', 'tf.reshape', (['Kappa', '[m + 1, 1]'], {}), '(Kappa, [m + 1, 1])\n', (10416, 10435), True, 'import tensorflow as tf\n'), ((10461, 10491), 'tensorflow.tile', 'tf.tile', (['kappa_vec', '[1, m - 1]'], {}), '(kappa_vec, [1, m - 1])\n', (10468, 10491), True, 'import tensorflow as tf\n'), ((10515, 10543), 'numpy.tril_indices', 'np.tril_indices', (['(m + 1)'], {'k': '(-1)'}), '(m + 1, k=-1)\n', (10530, 10543), True, 'import numpy as np\n'), ((10560, 10584), 'numpy.zeros', 'np.zeros', (['[m + 1, m + 1]'], {}), '([m + 1, m + 1])\n', (10568, 10584), True, 'import numpy as np\n'), ((10636, 10656), 'numpy.tril', 'np.tril', (['Temp1'], {'k': '(-2)'}), '(Temp1, k=-2)\n', (10643, 10656), True, 'import numpy as np\n'), ((10697, 10733), 'tensorflow.constant', 'tf.constant', (['Temp1'], {'dtype': 'tf.float64'}), '(Temp1, dtype=tf.float64)\n', (10708, 10733), True, 'import tensorflow as tf\n'), ((10841, 10861), 'numpy.tril', 'np.tril', (['Temp2'], {'k': '(-2)'}), '(Temp2, k=-2)\n', (10848, 10861), True, 'import numpy as np\n'), ((10880, 10916), 'tensorflow.constant', 'tf.constant', (['Temp2'], {'dtype': 'tf.float64'}), '(Temp2, dtype=tf.float64)\n', (10891, 10916), True, 'import tensorflow as tf\n'), ((11008, 11028), 'numpy.tril', 'np.tril', (['Temp3'], {'k': '(-2)'}), '(Temp3, k=-2)\n', (11015, 11028), True, 'import numpy as np\n'), ((11046, 11082), 'tensorflow.constant', 'tf.constant', (['Temp3'], {'dtype': 'tf.float64'}), '(Temp3, dtype=tf.float64)\n', (11057, 11082), True, 'import tensorflow as tf\n'), ((11175, 11219), 'tensorflow.constant', 'tf.constant', (['A[:, 0:m - 1]'], {'dtype': 'tf.float64'}), '(A[:, 0:m - 1], dtype=tf.float64)\n', (11186, 11219), True, 'import tensorflow as tf\n'), ((11979, 12020), 'tensorflow.concat', 'tf.concat', (['(L_Temp, Temp, R_Temp)'], {'axis': '(1)'}), '((L_Temp, Temp, R_Temp), axis=1)\n', (11988, 12020), True, 'import tensorflow as tf\n'), ((12197, 12222), 'tensorflow.multiply', 'tf.multiply', (['c', 'coeff_mat'], {}), '(c, coeff_mat)\n', (12208, 12222), True, 'import tensorflow as tf\n'), ((12994, 13016), 'tensorflow.matmul', 'tf.matmul', (['DiffMat1', 'S'], {}), '(DiffMat1, S)\n', (13003, 13016), True, 'import tensorflow as tf\n'), ((13033, 13055), 'tensorflow.matmul', 'tf.matmul', (['DiffMat2', 'I'], {}), '(DiffMat2, I)\n', (13042, 13055), True, 'import tensorflow as tf\n'), ((13072, 13094), 'tensorflow.matmul', 'tf.matmul', (['DiffMat3', 'R'], {}), '(DiffMat3, R)\n', (13081, 13094), True, 'import tensorflow as tf\n'), ((13110, 13132), 'tensorflow.matmul', 'tf.matmul', (['DiffMat4', 'D'], {}), '(DiffMat4, D)\n', (13119, 13132), True, 'import tensorflow as tf\n'), ((13159, 13193), 'tensorflow.constant', 'tf.constant', (['(7.0)'], {'dtype': 'tf.float64'}), '(7.0, dtype=tf.float64)\n', (13170, 13193), True, 'import tensorflow as tf\n'), ((14384, 14395), 'time.time', 'time.time', ([], {}), '()\n', (14393, 14395), False, 'import time\n'), ((19938, 19949), 'time.time', 'time.time', ([], {}), '()\n', (19947, 19949), False, 'import time\n'), ((22762, 22776), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (22774, 22776), True, 'import matplotlib.pyplot as plt\n'), ((22787, 22804), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (22797, 22804), True, 'import matplotlib.pyplot as plt\n'), ((22816, 22873), 'matplotlib.pyplot.plot', 'plt.plot', (['iteration', 'loss_his', '"""k-"""'], {'lw': '(4)', 'label': '"""$loss$"""'}), "(iteration, loss_his, 'k-', lw=4, label='$loss$')\n", (22824, 22873), True, 'import matplotlib.pyplot as plt\n'), ((22885, 22950), 'matplotlib.pyplot.plot', 'plt.plot', (['iteration', 'loss_his_u0', '"""r-"""'], {'lw': '(4)', 'label': '"""$loss_{u0}$"""'}), "(iteration, loss_his_u0, 'r-', lw=4, label='$loss_{u0}$')\n", (22893, 22950), True, 'import matplotlib.pyplot as plt\n'), ((22962, 23023), 'matplotlib.pyplot.plot', 'plt.plot', (['iteration', 'loss_his_u', '"""b-"""'], {'lw': '(4)', 'label': '"""$loss_u$"""'}), "(iteration, loss_his_u, 'b-', lw=4, label='$loss_u$')\n", (22970, 23023), True, 'import matplotlib.pyplot as plt\n'), ((23035, 23096), 'matplotlib.pyplot.plot', 'plt.plot', (['iteration', 'loss_his_f', '"""c-"""'], {'lw': '(4)', 'label': '"""$loss_f$"""'}), "(iteration, loss_his_f, 'c-', lw=4, label='$loss_f$')\n", (23043, 23096), True, 'import matplotlib.pyplot as plt\n'), ((23570, 23593), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(60)'}), "('font', size=60)\n", (23576, 23593), True, 'import matplotlib.pyplot as plt\n'), ((23775, 23833), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'History_loss.png')"], {'dpi': '(300)'}), "(save_results_to + 'History_loss.png', dpi=300)\n", (23786, 23833), True, 'import matplotlib.pyplot as plt\n'), ((23844, 23902), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'History_loss.pdf')"], {'dpi': '(300)'}), "(save_results_to + 'History_loss.pdf', dpi=300)\n", (23855, 23902), True, 'import matplotlib.pyplot as plt\n'), ((24363, 24377), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (24375, 24377), True, 'import matplotlib.pyplot as plt\n'), ((24716, 24739), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (24726, 24739), True, 'import matplotlib.pyplot as plt\n'), ((24976, 24999), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(60)'}), "('font', size=60)\n", (24982, 24999), True, 'import matplotlib.pyplot as plt\n'), ((25172, 25237), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Current_Suspectious.pdf')"], {'dpi': '(300)'}), "(save_results_to + 'Current_Suspectious.pdf', dpi=300)\n", (25183, 25237), True, 'import matplotlib.pyplot as plt\n'), ((25248, 25313), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Current_Suspectious.png')"], {'dpi': '(300)'}), "(save_results_to + 'Current_Suspectious.png', dpi=300)\n", (25259, 25313), True, 'import matplotlib.pyplot as plt\n'), ((25379, 25393), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (25391, 25393), True, 'import matplotlib.pyplot as plt\n'), ((25785, 25808), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (25795, 25808), True, 'import matplotlib.pyplot as plt\n'), ((26045, 26068), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(60)'}), "('font', size=60)\n", (26051, 26068), True, 'import matplotlib.pyplot as plt\n'), ((26241, 26305), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Current_Infectious.pdf')"], {'dpi': '(300)'}), "(save_results_to + 'Current_Infectious.pdf', dpi=300)\n", (26252, 26305), True, 'import matplotlib.pyplot as plt\n'), ((26316, 26380), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Current_Infectious.png')"], {'dpi': '(300)'}), "(save_results_to + 'Current_Infectious.png', dpi=300)\n", (26327, 26380), True, 'import matplotlib.pyplot as plt\n'), ((26443, 26457), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (26455, 26457), True, 'import matplotlib.pyplot as plt\n'), ((26849, 26872), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (26859, 26872), True, 'import matplotlib.pyplot as plt\n'), ((27109, 27132), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(60)'}), "('font', size=60)\n", (27115, 27132), True, 'import matplotlib.pyplot as plt\n'), ((27305, 27366), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Current_Removed.pdf')"], {'dpi': '(300)'}), "(save_results_to + 'Current_Removed.pdf', dpi=300)\n", (27316, 27366), True, 'import matplotlib.pyplot as plt\n'), ((27377, 27438), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Current_Removed.png')"], {'dpi': '(300)'}), "(save_results_to + 'Current_Removed.png', dpi=300)\n", (27388, 27438), True, 'import matplotlib.pyplot as plt\n'), ((27501, 27515), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (27513, 27515), True, 'import matplotlib.pyplot as plt\n'), ((27907, 27930), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (27917, 27930), True, 'import matplotlib.pyplot as plt\n'), ((28167, 28190), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(60)'}), "('font', size=60)\n", (28173, 28190), True, 'import matplotlib.pyplot as plt\n'), ((28363, 28422), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Current_Death.pdf')"], {'dpi': '(300)'}), "(save_results_to + 'Current_Death.pdf', dpi=300)\n", (28374, 28422), True, 'import matplotlib.pyplot as plt\n'), ((28433, 28492), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Current_Death.png')"], {'dpi': '(300)'}), "(save_results_to + 'Current_Death.png', dpi=300)\n", (28444, 28492), True, 'import matplotlib.pyplot as plt\n'), ((28544, 28558), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (28556, 28558), True, 'import matplotlib.pyplot as plt\n'), ((29147, 29170), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (29157, 29170), True, 'import matplotlib.pyplot as plt\n'), ((29337, 29360), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(60)'}), "('font', size=60)\n", (29343, 29360), True, 'import matplotlib.pyplot as plt\n'), ((29538, 29589), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Kappa.pdf')"], {'dpi': '(300)'}), "(save_results_to + 'Kappa.pdf', dpi=300)\n", (29549, 29589), True, 'import matplotlib.pyplot as plt\n'), ((29600, 29651), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Kappa.png')"], {'dpi': '(300)'}), "(save_results_to + 'Kappa.png', dpi=300)\n", (29611, 29651), True, 'import matplotlib.pyplot as plt\n'), ((29698, 29712), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (29710, 29712), True, 'import matplotlib.pyplot as plt\n'), ((30135, 30158), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (30145, 30158), True, 'import matplotlib.pyplot as plt\n'), ((30327, 30350), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(60)'}), "('font', size=60)\n", (30333, 30350), True, 'import matplotlib.pyplot as plt\n'), ((30530, 30586), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Kappa1_rec.pdf')"], {'dpi': '(300)'}), "(save_results_to + 'Kappa1_rec.pdf', dpi=300)\n", (30541, 30586), True, 'import matplotlib.pyplot as plt\n'), ((30597, 30653), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Kappa1_rec.png')"], {'dpi': '(300)'}), "(save_results_to + 'Kappa1_rec.png', dpi=300)\n", (30608, 30653), True, 'import matplotlib.pyplot as plt\n'), ((30687, 30701), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (30699, 30701), True, 'import matplotlib.pyplot as plt\n'), ((31124, 31147), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (31134, 31147), True, 'import matplotlib.pyplot as plt\n'), ((31316, 31339), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(60)'}), "('font', size=60)\n", (31322, 31339), True, 'import matplotlib.pyplot as plt\n'), ((31519, 31575), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Kappa2_rec.pdf')"], {'dpi': '(300)'}), "(save_results_to + 'Kappa2_rec.pdf', dpi=300)\n", (31530, 31575), True, 'import matplotlib.pyplot as plt\n'), ((31586, 31642), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Kappa2_rec.png')"], {'dpi': '(300)'}), "(save_results_to + 'Kappa2_rec.png', dpi=300)\n", (31597, 31642), True, 'import matplotlib.pyplot as plt\n'), ((31676, 31690), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (31688, 31690), True, 'import matplotlib.pyplot as plt\n'), ((32113, 32136), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (32123, 32136), True, 'import matplotlib.pyplot as plt\n'), ((32305, 32328), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(60)'}), "('font', size=60)\n", (32311, 32328), True, 'import matplotlib.pyplot as plt\n'), ((32508, 32564), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Kappa3_rec.pdf')"], {'dpi': '(300)'}), "(save_results_to + 'Kappa3_rec.pdf', dpi=300)\n", (32519, 32564), True, 'import matplotlib.pyplot as plt\n'), ((32575, 32631), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Kappa3_rec.png')"], {'dpi': '(300)'}), "(save_results_to + 'Kappa3_rec.png', dpi=300)\n", (32586, 32631), True, 'import matplotlib.pyplot as plt\n'), ((32665, 32679), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (32677, 32679), True, 'import matplotlib.pyplot as plt\n'), ((33102, 33125), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (33112, 33125), True, 'import matplotlib.pyplot as plt\n'), ((33294, 33317), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(60)'}), "('font', size=60)\n", (33300, 33317), True, 'import matplotlib.pyplot as plt\n'), ((33497, 33553), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Kappa4_rec.pdf')"], {'dpi': '(300)'}), "(save_results_to + 'Kappa4_rec.pdf', dpi=300)\n", (33508, 33553), True, 'import matplotlib.pyplot as plt\n'), ((33564, 33620), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Kappa4_rec.png')"], {'dpi': '(300)'}), "(save_results_to + 'Kappa4_rec.png', dpi=300)\n", (33575, 33620), True, 'import matplotlib.pyplot as plt\n'), ((33658, 33672), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (33670, 33672), True, 'import matplotlib.pyplot as plt\n'), ((34054, 34077), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (34064, 34077), True, 'import matplotlib.pyplot as plt\n'), ((34246, 34269), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(60)'}), "('font', size=60)\n", (34252, 34269), True, 'import matplotlib.pyplot as plt\n'), ((34447, 34497), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Beta.pdf')"], {'dpi': '(300)'}), "(save_results_to + 'Beta.pdf', dpi=300)\n", (34458, 34497), True, 'import matplotlib.pyplot as plt\n'), ((34508, 34558), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_results_to + 'Beta.png')"], {'dpi': '(300)'}), "(save_results_to + 'Beta.png', dpi=300)\n", (34519, 34558), True, 'import matplotlib.pyplot as plt\n'), ((2091, 2134), 'tensorflow.zeros', 'tf.zeros', (['[poly_order, 1]'], {'dtype': 'tf.float64'}), '([poly_order, 1], dtype=tf.float64)\n', (2099, 2134), True, 'import tensorflow as tf\n'), ((2210, 2253), 'tensorflow.zeros', 'tf.zeros', (['[poly_order, 1]'], {'dtype': 'tf.float64'}), '([poly_order, 1], dtype=tf.float64)\n', (2218, 2253), True, 'import tensorflow as tf\n'), ((2329, 2372), 'tensorflow.zeros', 'tf.zeros', (['[poly_order, 1]'], {'dtype': 'tf.float64'}), '([poly_order, 1], dtype=tf.float64)\n', (2337, 2372), True, 'import tensorflow as tf\n'), ((2448, 2491), 'tensorflow.zeros', 'tf.zeros', (['[poly_order, 1]'], {'dtype': 'tf.float64'}), '([poly_order, 1], dtype=tf.float64)\n', (2456, 2491), True, 'import tensorflow as tf\n'), ((7081, 7159), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[in_dim, out_dim]'], {'stddev': 'xavier_stddev', 'dtype': 'tf.float64'}), '([in_dim, out_dim], stddev=xavier_stddev, dtype=tf.float64)\n', (7100, 7159), True, 'import tensorflow as tf\n'), ((7302, 7336), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float64'}), '(0.0, dtype=tf.float64)\n', (7313, 7336), True, 'import tensorflow as tf\n'), ((7338, 7372), 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'dtype': 'tf.float64'}), '(1.0, dtype=tf.float64)\n', (7349, 7372), True, 'import tensorflow as tf\n'), ((7859, 7874), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (7868, 7874), True, 'import tensorflow as tf\n'), ((8206, 8248), 'numpy.transpose', 'np.transpose', (['Jacobi_polys[:poly_order, :]'], {}), '(Jacobi_polys[:poly_order, :])\n', (8218, 8248), True, 'import numpy as np\n'), ((8459, 8501), 'numpy.transpose', 'np.transpose', (['Jacobi_polys[:poly_order, :]'], {}), '(Jacobi_polys[:poly_order, :])\n', (8471, 8501), True, 'import numpy as np\n'), ((8712, 8754), 'numpy.transpose', 'np.transpose', (['Jacobi_polys[:poly_order, :]'], {}), '(Jacobi_polys[:poly_order, :])\n', (8724, 8754), True, 'import numpy as np\n'), ((8965, 9007), 'numpy.transpose', 'np.transpose', (['Jacobi_polys[:poly_order, :]'], {}), '(Jacobi_polys[:poly_order, :])\n', (8977, 9007), True, 'import numpy as np\n'), ((9226, 9274), 'numpy.transpose', 'np.transpose', (['Jacobi_polys_plots[:poly_order, :]'], {}), '(Jacobi_polys_plots[:poly_order, :])\n', (9238, 9274), True, 'import numpy as np\n'), ((9490, 9538), 'numpy.transpose', 'np.transpose', (['Jacobi_polys_plots[:poly_order, :]'], {}), '(Jacobi_polys_plots[:poly_order, :])\n', (9502, 9538), True, 'import numpy as np\n'), ((9754, 9802), 'numpy.transpose', 'np.transpose', (['Jacobi_polys_plots[:poly_order, :]'], {}), '(Jacobi_polys_plots[:poly_order, :])\n', (9766, 9802), True, 'import numpy as np\n'), ((10018, 10066), 'numpy.transpose', 'np.transpose', (['Jacobi_polys_plots[:poly_order, :]'], {}), '(Jacobi_polys_plots[:poly_order, :])\n', (10030, 10066), True, 'import numpy as np\n'), ((10773, 10786), 'numpy.eye', 'np.eye', (['(m + 1)'], {}), '(m + 1)\n', (10779, 10786), True, 'import numpy as np\n'), ((10939, 10952), 'numpy.eye', 'np.eye', (['(m + 1)'], {}), '(m + 1)\n', (10945, 10952), True, 'import numpy as np\n'), ((11438, 11450), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (11447, 11450), True, 'import numpy as np\n'), ((11513, 11540), 'tensorflow.reshape', 'tf.reshape', (['L_Temp1', '[m, 1]'], {}), '(L_Temp1, [m, 1])\n', (11523, 11540), True, 'import tensorflow as tf\n'), ((11680, 11707), 'tensorflow.reshape', 'tf.reshape', (['L_Temp2', '[m, 1]'], {}), '(L_Temp2, [m, 1])\n', (11690, 11707), True, 'import tensorflow as tf\n'), ((12125, 12164), 'tensorflow.constant', 'tf.constant', (['[1, m + 1]'], {'dtype': 'tf.int32'}), '([1, m + 1], dtype=tf.int32)\n', (12136, 12164), True, 'import tensorflow as tf\n'), ((13891, 13929), 'numpy.array', 'np.array', (['[loss, lossU0, lossU, lossF]'], {}), '([loss, lossU0, lossU, lossF])\n', (13899, 13929), True, 'import numpy as np\n'), ((18916, 18947), 'os.path.exists', 'os.path.exists', (['save_results_to'], {}), '(save_results_to)\n', (18930, 18947), False, 'import os\n'), ((18962, 18990), 'os.makedirs', 'os.makedirs', (['save_results_to'], {}), '(save_results_to)\n', (18973, 18990), False, 'import os\n'), ((19160, 19190), 'os.path.exists', 'os.path.exists', (['save_models_to'], {}), '(save_models_to)\n', (19174, 19190), False, 'import os\n'), ((19205, 19232), 'os.makedirs', 'os.makedirs', (['save_models_to'], {}), '(save_models_to)\n', (19216, 19232), False, 'import os\n'), ((20999, 21024), 'numpy.asarray', 'np.asarray', (['total_records'], {}), '(total_records)\n', (21009, 21024), True, 'import numpy as np\n'), ((21050, 21075), 'numpy.asarray', 'np.asarray', (['total_records'], {}), '(total_records)\n', (21060, 21075), True, 'import numpy as np\n'), ((21105, 21130), 'numpy.asarray', 'np.asarray', (['total_records'], {}), '(total_records)\n', (21115, 21130), True, 'import numpy as np\n'), ((21159, 21184), 'numpy.asarray', 'np.asarray', (['total_records'], {}), '(total_records)\n', (21169, 21184), True, 'import numpy as np\n'), ((21213, 21238), 'numpy.asarray', 'np.asarray', (['total_records'], {}), '(total_records)\n', (21223, 21238), True, 'import numpy as np\n'), ((23133, 23186), 'matplotlib.pyplot.plot', 'plt.plot', (['iteration_LBFGS', 'loss_his_LBFGS', '"""k-"""'], {'lw': '(4)'}), "(iteration_LBFGS, loss_his_LBFGS, 'k-', lw=4)\n", (23141, 23186), True, 'import matplotlib.pyplot as plt\n'), ((23202, 23258), 'matplotlib.pyplot.plot', 'plt.plot', (['iteration_LBFGS', 'loss_his_u0_LBFGS', '"""r-"""'], {'lw': '(4)'}), "(iteration_LBFGS, loss_his_u0_LBFGS, 'r-', lw=4)\n", (23210, 23258), True, 'import matplotlib.pyplot as plt\n'), ((23274, 23329), 'matplotlib.pyplot.plot', 'plt.plot', (['iteration_LBFGS', 'loss_his_u_LBFGS', '"""b-"""'], {'lw': '(4)'}), "(iteration_LBFGS, loss_his_u_LBFGS, 'b-', lw=4)\n", (23282, 23329), True, 'import matplotlib.pyplot as plt\n'), ((23345, 23400), 'matplotlib.pyplot.plot', 'plt.plot', (['iteration_LBFGS', 'loss_his_f_LBFGS', '"""c-"""'], {'lw': '(4)'}), "(iteration_LBFGS, loss_his_f_LBFGS, 'c-', lw=4)\n", (23353, 23400), True, 'import matplotlib.pyplot as plt\n'), ((24538, 24569), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (24557, 24569), True, 'import matplotlib.dates as mdates\n'), ((24609, 24638), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%m-%d"""'], {}), "('%m-%d')\n", (24629, 24638), True, 'import matplotlib.dates as mdates\n'), ((24676, 24705), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {'interval': '(7)'}), '(interval=7)\n', (24693, 24705), True, 'import matplotlib.dates as mdates\n'), ((25607, 25638), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (25626, 25638), True, 'import matplotlib.dates as mdates\n'), ((25678, 25707), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%m-%d"""'], {}), "('%m-%d')\n", (25698, 25707), True, 'import matplotlib.dates as mdates\n'), ((25745, 25774), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {'interval': '(7)'}), '(interval=7)\n', (25762, 25774), True, 'import matplotlib.dates as mdates\n'), ((26671, 26702), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (26690, 26702), True, 'import matplotlib.dates as mdates\n'), ((26742, 26771), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%m-%d"""'], {}), "('%m-%d')\n", (26762, 26771), True, 'import matplotlib.dates as mdates\n'), ((26809, 26838), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {'interval': '(7)'}), '(interval=7)\n', (26826, 26838), True, 'import matplotlib.dates as mdates\n'), ((27729, 27760), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (27748, 27760), True, 'import matplotlib.dates as mdates\n'), ((27800, 27829), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%m-%d"""'], {}), "('%m-%d')\n", (27820, 27829), True, 'import matplotlib.dates as mdates\n'), ((27867, 27896), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {'interval': '(7)'}), '(interval=7)\n', (27884, 27896), True, 'import matplotlib.dates as mdates\n'), ((28969, 29000), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (28988, 29000), True, 'import matplotlib.dates as mdates\n'), ((29040, 29069), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%m-%d"""'], {}), "('%m-%d')\n", (29060, 29069), True, 'import matplotlib.dates as mdates\n'), ((29107, 29136), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {'interval': '(7)'}), '(interval=7)\n', (29124, 29136), True, 'import matplotlib.dates as mdates\n'), ((29957, 29988), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (29976, 29988), True, 'import matplotlib.dates as mdates\n'), ((30028, 30057), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%m-%d"""'], {}), "('%m-%d')\n", (30048, 30057), True, 'import matplotlib.dates as mdates\n'), ((30095, 30124), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {'interval': '(7)'}), '(interval=7)\n', (30112, 30124), True, 'import matplotlib.dates as mdates\n'), ((30946, 30977), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (30965, 30977), True, 'import matplotlib.dates as mdates\n'), ((31017, 31046), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%m-%d"""'], {}), "('%m-%d')\n", (31037, 31046), True, 'import matplotlib.dates as mdates\n'), ((31084, 31113), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {'interval': '(7)'}), '(interval=7)\n', (31101, 31113), True, 'import matplotlib.dates as mdates\n'), ((31935, 31966), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (31954, 31966), True, 'import matplotlib.dates as mdates\n'), ((32006, 32035), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%m-%d"""'], {}), "('%m-%d')\n", (32026, 32035), True, 'import matplotlib.dates as mdates\n'), ((32073, 32102), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {'interval': '(7)'}), '(interval=7)\n', (32090, 32102), True, 'import matplotlib.dates as mdates\n'), ((32924, 32955), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (32943, 32955), True, 'import matplotlib.dates as mdates\n'), ((32995, 33024), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%m-%d"""'], {}), "('%m-%d')\n", (33015, 33024), True, 'import matplotlib.dates as mdates\n'), ((33062, 33091), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {'interval': '(7)'}), '(interval=7)\n', (33079, 33091), True, 'import matplotlib.dates as mdates\n'), ((33876, 33907), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (33895, 33907), True, 'import matplotlib.dates as mdates\n'), ((33947, 33976), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%m-%d"""'], {}), "('%m-%d')\n", (33967, 33976), True, 'import matplotlib.dates as mdates\n'), ((34014, 34043), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {'interval': '(7)'}), '(interval=7)\n', (34031, 34043), True, 'import matplotlib.dates as mdates\n'), ((2818, 2886), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(True)'}), '(allow_soft_placement=True, log_device_placement=True)\n', (2832, 2886), True, 'import tensorflow as tf\n'), ((4648, 4683), 'tensorflow.square', 'tf.square', (['(self.D0_u - self.D0_pred)'], {}), '(self.D0_u - self.D0_pred)\n', (4657, 4683), True, 'import tensorflow as tf\n'), ((5260, 5281), 'tensorflow.square', 'tf.square', (['self.R_con'], {}), '(self.R_con)\n', (5269, 5281), True, 'import tensorflow as tf\n'), ((6568, 6614), 'tensorflow.zeros', 'tf.zeros', (['[1, layers[l + 1]]'], {'dtype': 'tf.float64'}), '([1, layers[l + 1]], dtype=tf.float64)\n', (6576, 6614), True, 'import tensorflow as tf\n'), ((7426, 7443), 'tensorflow.sigmoid', 'tf.sigmoid', (['BetaI'], {}), '(BetaI)\n', (7436, 7443), True, 'import tensorflow as tf\n'), ((8385, 8402), 'tensorflow.sigmoid', 'tf.sigmoid', (['Kappa'], {}), '(Kappa)\n', (8395, 8402), True, 'import tensorflow as tf\n'), ((8638, 8655), 'tensorflow.sigmoid', 'tf.sigmoid', (['Kappa'], {}), '(Kappa)\n', (8648, 8655), True, 'import tensorflow as tf\n'), ((8891, 8908), 'tensorflow.sigmoid', 'tf.sigmoid', (['Kappa'], {}), '(Kappa)\n', (8901, 8908), True, 'import tensorflow as tf\n'), ((9145, 9162), 'tensorflow.sigmoid', 'tf.sigmoid', (['Kappa'], {}), '(Kappa)\n', (9155, 9162), True, 'import tensorflow as tf\n'), ((9411, 9428), 'tensorflow.sigmoid', 'tf.sigmoid', (['Kappa'], {}), '(Kappa)\n', (9421, 9428), True, 'import tensorflow as tf\n'), ((9675, 9692), 'tensorflow.sigmoid', 'tf.sigmoid', (['Kappa'], {}), '(Kappa)\n', (9685, 9692), True, 'import tensorflow as tf\n'), ((9939, 9956), 'tensorflow.sigmoid', 'tf.sigmoid', (['Kappa'], {}), '(Kappa)\n', (9949, 9956), True, 'import tensorflow as tf\n'), ((10204, 10221), 'tensorflow.sigmoid', 'tf.sigmoid', (['Kappa'], {}), '(Kappa)\n', (10214, 10221), True, 'import tensorflow as tf\n'), ((11115, 11131), 'numpy.zeros', 'np.zeros', (['(1, m)'], {}), '((1, m))\n', (11123, 11131), True, 'import numpy as np\n'), ((11132, 11141), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (11138, 11141), True, 'import numpy as np\n'), ((11348, 11390), 'tensorflow.pow', 'tf.pow', (['Temp3[:, 0:m - 1]', '(1.0 - kappa_mat)'], {}), '(Temp3[:, 0:m - 1], 1.0 - kappa_mat)\n', (11354, 11390), True, 'import tensorflow as tf\n'), ((11599, 11611), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (11608, 11611), True, 'import numpy as np\n'), ((11764, 11798), 'tensorflow.zeros', 'tf.zeros', (['(1, 1)'], {'dtype': 'tf.float64'}), '((1, 1), dtype=tf.float64)\n', (11772, 11798), True, 'import tensorflow as tf\n'), ((11866, 11900), 'tensorflow.zeros', 'tf.zeros', (['(m, 1)'], {'dtype': 'tf.float64'}), '((m, 1), dtype=tf.float64)\n', (11874, 11900), True, 'import tensorflow as tf\n'), ((11903, 11936), 'tensorflow.ones', 'tf.ones', (['(1, 1)'], {'dtype': 'tf.float64'}), '((1, 1), dtype=tf.float64)\n', (11910, 11936), True, 'import tensorflow as tf\n'), ((12067, 12090), 'tensorflow.pow', 'tf.pow', (['Tau', '(-kappa_vec)'], {}), '(Tau, -kappa_vec)\n', (12073, 12090), True, 'import tensorflow as tf\n'), ((13257, 13278), 'tensorflow.pow', 'tf.pow', (['T', '(Kappa1 - 1)'], {}), '(T, Kappa1 - 1)\n', (13263, 13278), True, 'import tensorflow as tf\n'), ((13288, 13311), 'tensorflow.lgamma', 'tf.lgamma', (['(1.0 + Kappa1)'], {}), '(1.0 + Kappa1)\n', (13297, 13311), True, 'import tensorflow as tf\n'), ((13326, 13347), 'tensorflow.pow', 'tf.pow', (['T', '(Kappa2 - 1)'], {}), '(T, Kappa2 - 1)\n', (13332, 13347), True, 'import tensorflow as tf\n'), ((13357, 13380), 'tensorflow.lgamma', 'tf.lgamma', (['(1.0 + Kappa2)'], {}), '(1.0 + Kappa2)\n', (13366, 13380), True, 'import tensorflow as tf\n'), ((13395, 13416), 'tensorflow.pow', 'tf.pow', (['T', '(Kappa3 - 1)'], {}), '(T, Kappa3 - 1)\n', (13401, 13416), True, 'import tensorflow as tf\n'), ((13426, 13449), 'tensorflow.lgamma', 'tf.lgamma', (['(1.0 + Kappa3)'], {}), '(1.0 + Kappa3)\n', (13435, 13449), True, 'import tensorflow as tf\n'), ((13464, 13485), 'tensorflow.pow', 'tf.pow', (['T', '(Kappa4 - 1)'], {}), '(T, Kappa4 - 1)\n', (13470, 13485), True, 'import tensorflow as tf\n'), ((13495, 13518), 'tensorflow.lgamma', 'tf.lgamma', (['(1.0 + Kappa4)'], {}), '(1.0 + Kappa4)\n', (13504, 13518), True, 'import tensorflow as tf\n'), ((15486, 15497), 'time.time', 'time.time', ([], {}), '()\n', (15495, 15497), False, 'import time\n'), ((17900, 17915), 'scipy.special.jacobi', 'jacobi', (['n', '(0)', '(0)'], {}), '(n, 0, 0)\n', (17906, 17915), False, 'from scipy.special import jacobi\n'), ((18000, 18015), 'scipy.special.jacobi', 'jacobi', (['n', '(0)', '(0)'], {}), '(n, 0, 0)\n', (18006, 18015), False, 'from scipy.special import jacobi\n'), ((21387, 21410), 'numpy.arange', 'np.arange', (['N_Iter_LBFGS'], {}), '(N_Iter_LBFGS)\n', (21396, 21410), True, 'import numpy as np\n'), ((21452, 21483), 'numpy.asarray', 'np.asarray', (['total_records_LBFGS'], {}), '(total_records_LBFGS)\n', (21462, 21483), True, 'import numpy as np\n'), ((21522, 21553), 'numpy.asarray', 'np.asarray', (['total_records_LBFGS'], {}), '(total_records_LBFGS)\n', (21532, 21553), True, 'import numpy as np\n'), ((21592, 21623), 'numpy.asarray', 'np.asarray', (['total_records_LBFGS'], {}), '(total_records_LBFGS)\n', (21602, 21623), True, 'import numpy as np\n'), ((21662, 21693), 'numpy.asarray', 'np.asarray', (['total_records_LBFGS'], {}), '(total_records_LBFGS)\n', (21672, 21693), True, 'import numpy as np\n'), ((4510, 4545), 'tensorflow.square', 'tf.square', (['(self.I0_u - self.I0_pred)'], {}), '(self.I0_u - self.I0_pred)\n', (4519, 4545), True, 'import tensorflow as tf\n'), ((4579, 4614), 'tensorflow.square', 'tf.square', (['(self.R0_u - self.R0_pred)'], {}), '(self.R0_u - self.R0_pred)\n', (4588, 4614), True, 'import tensorflow as tf\n'), ((4869, 4902), 'tensorflow.square', 'tf.square', (['(self.R_pred - self.R_u)'], {}), '(self.R_pred - self.R_u)\n', (4878, 4902), True, 'import tensorflow as tf\n'), ((4939, 4972), 'tensorflow.square', 'tf.square', (['(self.D_pred - self.D_u)'], {}), '(self.D_pred - self.D_u)\n', (4948, 4972), True, 'import tensorflow as tf\n'), ((5200, 5219), 'tensorflow.square', 'tf.square', (['self.R_f'], {}), '(self.R_f)\n', (5209, 5219), True, 'import tensorflow as tf\n'), ((7769, 7784), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (7778, 7784), True, 'import tensorflow as tf\n'), ((11236, 11278), 'tensorflow.pow', 'tf.pow', (['Temp1[:, 0:m - 1]', '(1.0 - kappa_mat)'], {}), '(Temp1[:, 0:m - 1], 1.0 - kappa_mat)\n', (11242, 11278), True, 'import tensorflow as tf\n'), ((12099, 12123), 'tensorflow.lgamma', 'tf.lgamma', (['(2 - kappa_vec)'], {}), '(2 - kappa_vec)\n', (12108, 12123), True, 'import tensorflow as tf\n'), ((14580, 14591), 'time.time', 'time.time', ([], {}), '()\n', (14589, 14591), False, 'import time\n'), ((15199, 15265), 'numpy.array', 'np.array', (['[it, loss_value, lossU0_value, lossU_value, lossF_value]'], {}), '([it, loss_value, lossU0_value, lossU_value, lossF_value])\n', (15207, 15265), True, 'import numpy as np\n'), ((29800, 29826), 'numpy.asarray', 'np.asarray', (['Kappa1_records'], {}), '(Kappa1_records)\n', (29810, 29826), True, 'import numpy as np\n'), ((30789, 30815), 'numpy.asarray', 'np.asarray', (['Kappa2_records'], {}), '(Kappa2_records)\n', (30799, 30815), True, 'import numpy as np\n'), ((31778, 31804), 'numpy.asarray', 'np.asarray', (['Kappa3_records'], {}), '(Kappa3_records)\n', (31788, 31804), True, 'import numpy as np\n'), ((32767, 32793), 'numpy.asarray', 'np.asarray', (['Kappa4_records'], {}), '(Kappa4_records)\n', (32777, 32793), True, 'import numpy as np\n'), ((4802, 4835), 'tensorflow.square', 'tf.square', (['(self.I_pred - self.I_u)'], {}), '(self.I_pred - self.I_u)\n', (4811, 4835), True, 'import tensorflow as tf\n'), ((5140, 5159), 'tensorflow.square', 'tf.square', (['self.D_f'], {}), '(self.D_f)\n', (5149, 5159), True, 'import tensorflow as tf\n'), ((11291, 11333), 'tensorflow.pow', 'tf.pow', (['Temp2[:, 0:m - 1]', '(1.0 - kappa_mat)'], {}), '(Temp2[:, 0:m - 1], 1.0 - kappa_mat)\n', (11297, 11333), True, 'import tensorflow as tf\n'), ((5020, 5039), 'tensorflow.square', 'tf.square', (['self.S_f'], {}), '(self.S_f)\n', (5029, 5039), True, 'import tensorflow as tf\n'), ((5080, 5099), 'tensorflow.square', 'tf.square', (['self.I_f'], {}), '(self.I_f)\n', (5089, 5099), True, 'import tensorflow as tf\n'), ((5995, 6010), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (6003, 6010), True, 'import numpy as np\n')] |
""" modified vibration.py for internal coordinate hessian fitting
"""
from __future__ import division
from builtins import zip
from builtins import range
import os
import shutil
from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, bohr2ang, warn_press_key, pvec1d, pmat2d
import numpy as np
from numpy.linalg import multi_dot
from forcebalance.target import Target
from forcebalance.molecule import Molecule, format_xyz_coord
from re import match, sub
import subprocess
from subprocess import PIPE
from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, in_fd
# from ._assign import Assign
from scipy import optimize
from collections import OrderedDict
#from _increment import Vibration_Build
from forcebalance.output import getLogger
from forcebalance.optimizer import Counter
from forcebalance.vibration import read_reference_vdata, vib_overlap
import copy
logger = getLogger(__name__)
Bohr2nm = 0.0529177210903
bohr2ang = 0.529177210903
Hartree2kJmol = 2625.4996394798254
class Hessian(Target):
def __init__(self,options,tgt_opts,forcefield):
"""Initialization."""
# Initialize the SuperClass!
super(Hessian,self).__init__(options,tgt_opts,forcefield)
#======================================#
# Options that are given by the parser #
#======================================#
self.set_option(tgt_opts,'hess_normalize_type')
## Option for how much data to write to disk.
self.set_option(tgt_opts,'writelevel','writelevel')
## Option for normal mode calculation w/ or w/o geometry optimization
self.set_option(tgt_opts,'optimize_geometry', default=1)
#======================================#
# Variables which are set here #
#======================================#
## Build internal coordinates.
self._build_internal_coordinates()
## The vdata.txt file that contains the qm hessian.
self.hfnm = os.path.join(self.tgtdir,"hdata.txt")
## The vdata.txt file that contains the vibrations.
self.vfnm = os.path.join(self.tgtdir,"vdata.txt")
## Read in the reference data
self.read_reference_data()
## Build keyword dictionaries to pass to engine.
engine_args = OrderedDict(list(self.OptionDict.items()) + list(options.items()))
engine_args.pop('name', None)
## Create engine object.
self.engine = self.engine_(target=self, **engine_args)
## create wts and denominator
self.get_wts()
self.denom = 1
def _build_internal_coordinates(self):
from geometric.internal import PrimitiveInternalCoordinates
m = Molecule(os.path.join(self.tgtdir, "input.mol2"))
IC = PrimitiveInternalCoordinates(m)
self.IC = IC
def read_reference_data(self): # HJ: copied from vibration.py and modified
""" Read the reference hessian data from a file. """
self.ref_Hq_flat = np.loadtxt(self.hfnm)
Hq_size =int(np.sqrt(len(self.ref_Hq_flat)))
self.ref_Hq = self.ref_Hq_flat.reshape((Hq_size, Hq_size))
""" Read the reference vibrational data from a file. """
self.na, self.ref_xyz, self.ref_eigvals, self.ref_eigvecs = read_reference_vdata(self.vfnm)
return
def get_wts(self):
from geometric.internal import Distance, Angle, Dihedral
nb = len([ic for ic in self.IC.Internals if isinstance(ic,Distance) ])
nba = nb + len([ic for ic in self.IC.Internals if isinstance(ic,Angle) ])
nbap = nba + len([ic for ic in self.IC.Internals if isinstance(ic,Dihedral) ])
Hq_size =int(np.sqrt(len(self.ref_Hq_flat)))
if self.hess_normalize_type == 0 :
self.wts = np.ones(len(self.ref_Hq_flat))
else:
raise NotImplementedError
# normalize weights
self.wts /= np.sum(self.wts)
def indicate(self):
""" Print qualitative indicator. """
# if self.reassign == 'overlap' : count_assignment(self.c2r)
banner = "Hessian"
headings = ["Diagonal", "Reference", "Calculated", "Difference"]
data = OrderedDict([(i, ["%.4f" % self.ref_Hq.diagonal()[i], "%.4f" % self.Hq.diagonal()[i], "%.4f" % (self.Hq.diagonal()[i] - self.ref_Hq.diagonal()[i])]) for i in range(len(self.ref_Hq))])
self.printcool_table(data, headings, banner)
return
def hessian_driver(self):
if hasattr(self, 'engine') and hasattr(self.engine, 'normal_modes'):
if self.optimize_geometry == 1:
return self.engine.normal_modes(for_hessian_target=True)
else:
return self.engine.normal_modes(optimize=False, for_hessian_target=True)
else:
logger.error('Internal coordinate hessian calculation not supported, try using a different engine\n')
raise NotImplementedError
def converting_to_int_vec(self, xyz, dx):
dx = np.array(dx).flatten()
Bmat = self.IC.wilsonB(xyz)
dq = multi_dot([Bmat,dx])
return dq
def calc_int_normal_mode(self, xyz, cart_normal_mode):
from geometric.internal import Distance, Angle, Dihedral, OutOfPlane
ninternals_eff= len([ic for ic in self.IC.Internals if isinstance(ic,(Distance, Angle, Dihedral, OutOfPlane))])
int_normal_mode = []
for idx, vec in enumerate(cart_normal_mode):
# convert cartesian coordinates displacement to internal coordinates
dq = self.converting_to_int_vec(xyz, vec)
int_normal_mode.append(dq[:ninternals_eff]) # disregard Translations and Rotations
return np.array(int_normal_mode)
def get(self, mvals, AGrad=False, AHess=False):
""" Evaluate objective function. """
Answer = {'X':0.0, 'G':np.zeros(self.FF.np), 'H':np.zeros((self.FF.np, self.FF.np))}
def compute(mvals_):
self.FF.make(mvals_)
Xx, Gx, Hx, freqs, normal_modes, M_opt = self.hessian_driver()
# convert into internal hessian
Xx *= 1/ Bohr2nm
Gx *= Bohr2nm/ Hartree2kJmol
Hx *= Bohr2nm**2/ Hartree2kJmol
Hq = self.IC.calcHess(Xx, Gx, Hx)
compute.Hq_flat = Hq.flatten()
compute.freqs = freqs
compute.normal_modes = normal_modes
compute.M_opt = M_opt
diff = Hq - self.ref_Hq
return (np.sqrt(self.wts)/self.denom) * (compute.Hq_flat - self.ref_Hq_flat)
V = compute(mvals)
Answer['X'] = np.dot(V,V) * len(compute.freqs) # HJ: len(compute.freqs) is multiplied to match the scale of X2 with vib freq target X2
# compute gradients and hessian
dV = np.zeros((self.FF.np,len(V)))
if AGrad or AHess:
for p in self.pgrad:
dV[p,:], _ = f12d3p(fdwrap(compute, mvals, p), h = self.h, f0 = V)
for p in self.pgrad:
Answer['G'][p] = 2*np.dot(V, dV[p,:]) * len(compute.freqs)
for q in self.pgrad:
Answer['H'][p,q] = 2*np.dot(dV[p,:], dV[q,:]) * len(compute.freqs)
if not in_fd():
self.Hq_flat = compute.Hq_flat
self.Hq = self.Hq_flat.reshape(self.ref_Hq.shape)
self.objective = Answer['X']
self.FF.make(mvals)
if self.writelevel > 0:
# 1. write HessianCompare.txt
hessian_comparison = np.array([
self.ref_Hq_flat,
compute.Hq_flat,
compute.Hq_flat - self.ref_Hq_flat,
np.sqrt(self.wts)/self.denom
]).T
np.savetxt("HessianCompare.txt", hessian_comparison, header="%11s %12s %12s %12s" % ("QMHessian", "MMHessian", "Delta(MM-QM)", "Weight"), fmt="% 12.6e")
# 2. rearrange MM vibrational frequencies using overlap between normal modes in redundant internal coordinates
ref_int_normal_modes = self.calc_int_normal_mode(self.ref_xyz, self.ref_eigvecs)
int_normal_modes = self.calc_int_normal_mode(np.array(compute.M_opt.xyzs[0]), compute.normal_modes)
a = np.array([[(1.0-np.abs(np.dot(v1/np.linalg.norm(v1),v2/np.linalg.norm(v2)))) for v2 in int_normal_modes] for v1 in ref_int_normal_modes])
row, c2r = optimize.linear_sum_assignment(a)
# old arrangement method, which uses overlap between mass weighted vibrational modes in cartesian coordinates
# a = np.array([[(1.0-self.vib_overlap(v1, v2)) for v2 in compute.normal_modes] for v1 in self.ref_eigvecs])
# row, c2r = optimize.linear_sum_assignment(a)
freqs_rearr = compute.freqs[c2r]
normal_modes_rearr = compute.normal_modes[c2r]
# 3. Save rearranged frequencies and normal modes into a file for post-analysis
with open('mm_vdata.txt', 'w') as outfile:
outfile.writelines('%s\n' % line for line in compute.M_opt.write_xyz([0]))
outfile.write('\n')
for freq, normal_mode in zip(freqs_rearr, normal_modes_rearr):
outfile.write(f'{freq}\n')
for nx, ny, nz in normal_mode:
outfile.write(f'{nx:13.4f} {ny:13.4f} {nz:13.4f}\n')
outfile.write('\n')
outfile.close()
# 4. draw a scatter plot of vibrational frequencies and an overlap matrix of normal modes in cartessian coordinates
draw_vibfreq_scatter_plot_n_overlap_matrix(self.name, self.engine, self.ref_eigvals, self.ref_eigvecs, freqs_rearr, normal_modes_rearr)
return Answer
def cal_corr_coef(A):
# equations from https://math.stackexchange.com/a/1393907
size = len(A)
j = np.ones(size)
r = np.array(range(1,size+1))
r2 = r*r
n = np.dot(np.dot(j, A),j.T)
sumx=np.dot(np.dot(r, A),j.T)
sumy=np.dot(np.dot(j, A),r.T)
sumx2=np.dot(np.dot(r2, A),j.T)
sumy2=np.dot(np.dot(j, A),r2.T)
sumxy=np.dot(np.dot(r, A),r.T)
r = (n*sumxy - sumx*sumy)/(np.sqrt(n*sumx2 - (sumx)**2)* np.sqrt(n*sumy2 - (sumy)**2))
return r
def draw_normal_modes(elem, ref_xyz, ref_eigvals, ref_eigvecs, mm_xyz, freqs_rearr, normal_modes_rearr):
import matplotlib.pyplot as plt
# draw qm and mm normal mode overlay
fig, axs = plt.subplots(len(normal_modes_rearr), 1, figsize=(4, 4*len(normal_modes_rearr)), subplot_kw={'projection':'3d'})
def render_normal_modes(elem, xyz, eigvecs, color, qm=False, ref_eigvals=None, eigvals_rearr=None):
for idx, eigvec in enumerate(eigvecs):
x, y, z = xyz.T
u, v, w = eigvec.T *5
origin = np.array([x, y, z])
axs[idx].quiver(*origin, u, v, w, color=color)
axs[idx].set_xlabel('x')
axs[idx].set_ylabel('y')
axs[idx].set_zlabel('z')
if qm:
axs[idx].set_title(f'normal mode #{idx} (blue:QM({ref_eigvals[idx]:.2f}), red:MM({eigvals_rearr[idx]:.2f}))')
axs[idx].scatter(x, y, z, color='black', s=30)
axs[idx].set_xlim(min(u+x), max(u+x))
axs[idx].set_ylim(min(v+y), max(v+y))
axs[idx].set_zlim(min(w+z), max(w+z))
for i, elm in enumerate(elem):
axs[idx].text(x[i], y[i], z[i],elm)
render_normal_modes(elem, ref_xyz, ref_eigvecs, 'blue', qm=True, ref_eigvals=ref_eigvals, eigvals_rearr=freqs_rearr)
render_normal_modes(elem, np.array(mm_xyz), normal_modes_rearr, 'red')
plt.tight_layout()
plt.savefig('mm_vdata.pdf')
def draw_vibfreq_scatter_plot_n_overlap_matrix(name, engine, ref_eigvals, ref_eigvecs, freqs_rearr, normal_modes_rearr):
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size
plt.switch_backend('agg')
fig, axs = plt.subplots(1,2, figsize=(10,6))
overlap_matrix = np.array([[(vib_overlap(engine, v1, v2)) for v2 in normal_modes_rearr] for v1 in ref_eigvecs])
qm_overlap_matrix = np.array([[(vib_overlap(engine,v1, v2)) for v2 in ref_eigvecs] for v1 in ref_eigvecs])
axs[0].scatter(ref_eigvals, freqs_rearr, label='MM vibrational frequencies(rearr.)')
axs[0].plot(ref_eigvals,ref_eigvals, 'k-')
axs[0].legend()
axs[0].set_xlabel(r'QM vibrational frequency ($cm^{-1}$)')
axs[0].set_ylabel(r'MM vibrational frequency ($cm^{-1}$)')
mae = np.sum(np.abs(ref_eigvals - freqs_rearr))/ len(ref_eigvals)
axs[0].set_title(f'QM vs. MM vibrational frequencies\n MAE= {mae:.2f}')
x0,x1 = axs[0].get_xlim()
y0,y1 = axs[0].get_ylim()
axs[0].set_aspect((x1-x0)/(y1-y0))
# move ax x axis to top
axs[1].xaxis.tick_top()
# move ax x ticks inside
axs[1].tick_params(axis="y", direction='in')
axs[1].tick_params(axis="x", direction='in')
# draw matrix
im = axs[1].imshow(overlap_matrix, cmap= 'OrRd', vmin=0,vmax=1)
# colorbar
aspect = 20
pad_fraction = 0.5
divider = make_axes_locatable(axs[1])
width = axes_size.AxesY(axs[1], aspect=1./aspect)
pad = axes_size.Fraction(pad_fraction, width)
cax = divider.append_axes("right", size=width, pad=pad)
cax.yaxis.tick_right()
cax.xaxis.set_visible(False)
plt.colorbar(im, cax=cax)
corr_coef = cal_corr_coef(overlap_matrix)
err = np.linalg.norm(qm_overlap_matrix - overlap_matrix)/np.linalg.norm(qm_overlap_matrix) # measure of error in matrix (Relative error)
axs[1].set_title(f'QM vs. MM normal modes\n Correlation coef. ={corr_coef:.4f}, Error={err:.4f}')
# # move ax x axis to top
# axs[2].xaxis.tick_top()
# # move ax x ticks inside
# axs[2].tick_params(axis="y", direction='in')
# axs[2].tick_params(axis="x", direction='in')
# # draw matrix
# im = axs[2].imshow(qm_overlap_matrix, cmap= 'OrRd', vmin=0,vmax=1)
# # colorbar
# aspect = 20
# pad_fraction = 0.5
# divider = make_axes_locatable(axs[2])
# width = axes_size.AxesY(axs[2], aspect=1./aspect)
# pad = axes_size.Fraction(pad_fraction, width)
# cax = divider.append_axes("right", size=width, pad=pad)
# cax.yaxis.tick_right()
# cax.xaxis.set_visible(False)
# plt.colorbar(im, cax=cax)
# axs[2].set_title(f'(QM normal modes for reference)')
plt.tight_layout()
plt.subplots_adjust(top=0.85)
fig.suptitle('Hessian: iteration %i\nSystem: %s' % (Counter(), name))
fig.savefig('vibfreq_scatter_plot_n_overlap_matrix.pdf') | [
"numpy.sum",
"numpy.abs",
"numpy.ones",
"mpl_toolkits.axes_grid1.axes_size.Fraction",
"numpy.linalg.norm",
"os.path.join",
"builtins.range",
"matplotlib.pyplot.tight_layout",
"mpl_toolkits.axes_grid1.axes_size.AxesY",
"numpy.savetxt",
"matplotlib.pyplot.colorbar",
"forcebalance.output.getLogge... | [((925, 944), 'forcebalance.output.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (934, 944), False, 'from forcebalance.output import getLogger\n'), ((9798, 9811), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (9805, 9811), True, 'import numpy as np\n'), ((11585, 11603), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11601, 11603), True, 'import matplotlib.pyplot as plt\n'), ((11608, 11635), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""mm_vdata.pdf"""'], {}), "('mm_vdata.pdf')\n", (11619, 11635), True, 'import matplotlib.pyplot as plt\n'), ((11871, 11896), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (11889, 11896), True, 'import matplotlib.pyplot as plt\n'), ((11912, 11947), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 6)'}), '(1, 2, figsize=(10, 6))\n', (11924, 11947), True, 'import matplotlib.pyplot as plt\n'), ((13039, 13066), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['axs[1]'], {}), '(axs[1])\n', (13058, 13066), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size\n'), ((13079, 13123), 'mpl_toolkits.axes_grid1.axes_size.AxesY', 'axes_size.AxesY', (['axs[1]'], {'aspect': '(1.0 / aspect)'}), '(axs[1], aspect=1.0 / aspect)\n', (13094, 13123), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size\n'), ((13131, 13170), 'mpl_toolkits.axes_grid1.axes_size.Fraction', 'axes_size.Fraction', (['pad_fraction', 'width'], {}), '(pad_fraction, width)\n', (13149, 13170), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size\n'), ((13295, 13320), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'cax': 'cax'}), '(im, cax=cax)\n', (13307, 13320), True, 'import matplotlib.pyplot as plt\n'), ((14331, 14349), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14347, 14349), True, 'import matplotlib.pyplot as plt\n'), ((14354, 14383), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.85)'}), '(top=0.85)\n', (14373, 14383), True, 'import matplotlib.pyplot as plt\n'), ((2016, 2054), 'os.path.join', 'os.path.join', (['self.tgtdir', '"""hdata.txt"""'], {}), "(self.tgtdir, 'hdata.txt')\n", (2028, 2054), False, 'import os\n'), ((2134, 2172), 'os.path.join', 'os.path.join', (['self.tgtdir', '"""vdata.txt"""'], {}), "(self.tgtdir, 'vdata.txt')\n", (2146, 2172), False, 'import os\n'), ((2801, 2832), 'geometric.internal.PrimitiveInternalCoordinates', 'PrimitiveInternalCoordinates', (['m'], {}), '(m)\n', (2829, 2832), False, 'from geometric.internal import PrimitiveInternalCoordinates\n'), ((3022, 3043), 'numpy.loadtxt', 'np.loadtxt', (['self.hfnm'], {}), '(self.hfnm)\n', (3032, 3043), True, 'import numpy as np\n'), ((3298, 3329), 'forcebalance.vibration.read_reference_vdata', 'read_reference_vdata', (['self.vfnm'], {}), '(self.vfnm)\n', (3318, 3329), False, 'from forcebalance.vibration import read_reference_vdata, vib_overlap\n'), ((3936, 3952), 'numpy.sum', 'np.sum', (['self.wts'], {}), '(self.wts)\n', (3942, 3952), True, 'import numpy as np\n'), ((5089, 5110), 'numpy.linalg.multi_dot', 'multi_dot', (['[Bmat, dx]'], {}), '([Bmat, dx])\n', (5098, 5110), False, 'from numpy.linalg import multi_dot\n'), ((5712, 5737), 'numpy.array', 'np.array', (['int_normal_mode'], {}), '(int_normal_mode)\n', (5720, 5737), True, 'import numpy as np\n'), ((9829, 9847), 'builtins.range', 'range', (['(1)', '(size + 1)'], {}), '(1, size + 1)\n', (9834, 9847), False, 'from builtins import range\n'), ((9875, 9887), 'numpy.dot', 'np.dot', (['j', 'A'], {}), '(j, A)\n', (9881, 9887), True, 'import numpy as np\n'), ((9909, 9921), 'numpy.dot', 'np.dot', (['r', 'A'], {}), '(r, A)\n', (9915, 9921), True, 'import numpy as np\n'), ((9943, 9955), 'numpy.dot', 'np.dot', (['j', 'A'], {}), '(j, A)\n', (9949, 9955), True, 'import numpy as np\n'), ((9978, 9991), 'numpy.dot', 'np.dot', (['r2', 'A'], {}), '(r2, A)\n', (9984, 9991), True, 'import numpy as np\n'), ((10014, 10026), 'numpy.dot', 'np.dot', (['j', 'A'], {}), '(j, A)\n', (10020, 10026), True, 'import numpy as np\n'), ((10050, 10062), 'numpy.dot', 'np.dot', (['r', 'A'], {}), '(r, A)\n', (10056, 10062), True, 'import numpy as np\n'), ((11535, 11551), 'numpy.array', 'np.array', (['mm_xyz'], {}), '(mm_xyz)\n', (11543, 11551), True, 'import numpy as np\n'), ((13377, 13427), 'numpy.linalg.norm', 'np.linalg.norm', (['(qm_overlap_matrix - overlap_matrix)'], {}), '(qm_overlap_matrix - overlap_matrix)\n', (13391, 13427), True, 'import numpy as np\n'), ((13428, 13461), 'numpy.linalg.norm', 'np.linalg.norm', (['qm_overlap_matrix'], {}), '(qm_overlap_matrix)\n', (13442, 13461), True, 'import numpy as np\n'), ((2747, 2786), 'os.path.join', 'os.path.join', (['self.tgtdir', '"""input.mol2"""'], {}), "(self.tgtdir, 'input.mol2')\n", (2759, 2786), False, 'import os\n'), ((5867, 5887), 'numpy.zeros', 'np.zeros', (['self.FF.np'], {}), '(self.FF.np)\n', (5875, 5887), True, 'import numpy as np\n'), ((5893, 5927), 'numpy.zeros', 'np.zeros', (['(self.FF.np, self.FF.np)'], {}), '((self.FF.np, self.FF.np))\n', (5901, 5927), True, 'import numpy as np\n'), ((6605, 6617), 'numpy.dot', 'np.dot', (['V', 'V'], {}), '(V, V)\n', (6611, 6617), True, 'import numpy as np\n'), ((7185, 7192), 'forcebalance.finite_difference.in_fd', 'in_fd', ([], {}), '()\n', (7190, 7192), False, 'from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, in_fd\n'), ((7684, 7848), 'numpy.savetxt', 'np.savetxt', (['"""HessianCompare.txt"""', 'hessian_comparison'], {'header': "('%11s %12s %12s %12s' % ('QMHessian', 'MMHessian', 'Delta(MM-QM)',\n 'Weight'))", 'fmt': '"""% 12.6e"""'}), "('HessianCompare.txt', hessian_comparison, header=\n '%11s %12s %12s %12s' % ('QMHessian', 'MMHessian', 'Delta(MM-QM)',\n 'Weight'), fmt='% 12.6e')\n", (7694, 7848), True, 'import numpy as np\n'), ((8346, 8379), 'scipy.optimize.linear_sum_assignment', 'optimize.linear_sum_assignment', (['a'], {}), '(a)\n', (8376, 8379), False, 'from scipy import optimize\n'), ((10099, 10129), 'numpy.sqrt', 'np.sqrt', (['(n * sumx2 - sumx ** 2)'], {}), '(n * sumx2 - sumx ** 2)\n', (10106, 10129), True, 'import numpy as np\n'), ((10129, 10159), 'numpy.sqrt', 'np.sqrt', (['(n * sumy2 - sumy ** 2)'], {}), '(n * sumy2 - sumy ** 2)\n', (10136, 10159), True, 'import numpy as np\n'), ((10719, 10738), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (10727, 10738), True, 'import numpy as np\n'), ((12473, 12506), 'numpy.abs', 'np.abs', (['(ref_eigvals - freqs_rearr)'], {}), '(ref_eigvals - freqs_rearr)\n', (12479, 12506), True, 'import numpy as np\n'), ((5017, 5029), 'numpy.array', 'np.array', (['dx'], {}), '(dx)\n', (5025, 5029), True, 'import numpy as np\n'), ((8114, 8145), 'numpy.array', 'np.array', (['compute.M_opt.xyzs[0]'], {}), '(compute.M_opt.xyzs[0])\n', (8122, 8145), True, 'import numpy as np\n'), ((9103, 9139), 'builtins.zip', 'zip', (['freqs_rearr', 'normal_modes_rearr'], {}), '(freqs_rearr, normal_modes_rearr)\n', (9106, 9139), False, 'from builtins import zip\n'), ((11979, 12006), 'forcebalance.vibration.vib_overlap', 'vib_overlap', (['engine', 'v1', 'v2'], {}), '(engine, v1, v2)\n', (11990, 12006), False, 'from forcebalance.vibration import read_reference_vdata, vib_overlap\n'), ((12098, 12125), 'forcebalance.vibration.vib_overlap', 'vib_overlap', (['engine', 'v1', 'v2'], {}), '(engine, v1, v2)\n', (12109, 12125), False, 'from forcebalance.vibration import read_reference_vdata, vib_overlap\n'), ((14440, 14449), 'forcebalance.optimizer.Counter', 'Counter', ([], {}), '()\n', (14447, 14449), False, 'from forcebalance.optimizer import Counter\n'), ((6486, 6503), 'numpy.sqrt', 'np.sqrt', (['self.wts'], {}), '(self.wts)\n', (6493, 6503), True, 'import numpy as np\n'), ((6905, 6930), 'forcebalance.finite_difference.fdwrap', 'fdwrap', (['compute', 'mvals', 'p'], {}), '(compute, mvals, p)\n', (6911, 6930), False, 'from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, in_fd\n'), ((7013, 7032), 'numpy.dot', 'np.dot', (['V', 'dV[p, :]'], {}), '(V, dV[p, :])\n', (7019, 7032), True, 'import numpy as np\n'), ((7123, 7149), 'numpy.dot', 'np.dot', (['dV[p, :]', 'dV[q, :]'], {}), '(dV[p, :], dV[q, :])\n', (7129, 7149), True, 'import numpy as np\n'), ((7626, 7643), 'numpy.sqrt', 'np.sqrt', (['self.wts'], {}), '(self.wts)\n', (7633, 7643), True, 'import numpy as np\n'), ((8218, 8236), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (8232, 8236), True, 'import numpy as np\n'), ((8240, 8258), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (8254, 8258), True, 'import numpy as np\n')] |
import gym, random
import numpy as np
import copy
from astar import AStar
from tqdm import tqdm
def _find_stack(stacks, item):
for stack_id, stack in enumerate(stacks):
if stack[0] == item:
return stack, stack_id
return None, None
def _get_state_string(state):
return [tuple(o) for o in state]
def _state_eq(s1, s2):
s1 = {tuple(o) for o in s1}
s2 = {tuple(o) for o in s2}
return s1 == s2
def _to_set(s):
return frozenset(tuple(o) for o in s)
def _to_list(s):
return [np.array(o) for o in s]
def _move(s, what, where):
if what == where:
return None
stack_from, stack_from_id = _find_stack(s, what)
if stack_from is None: # invalid action
return None
s_ = copy.copy(s)
if where == 0: # to the ground, create a new stack
stack_to = np.empty(0, dtype=np.int)
s_.append(stack_to)
stack_to_id = len(s_) - 1
else:
stack_to, stack_to_id = _find_stack(s, where)
if stack_to is None: # invalid action
return None
# move the item
s_[stack_from_id] = np.delete(stack_from, 0)
s_[stack_to_id] = np.insert(stack_to, 0, what)
# delete a potentially empty stack
if len(s_[stack_from_id]) == 0:
del s_[stack_from_id]
return _to_set(s_)
class BoxworldPlan(AStar):
def heuristic_cost_estimate(self, s, g):
h = 0
def common_chars(a, b):
indx = 1
while indx <= min(len(a), len(b)):
if a[-indx] != b[-indx]:
return indx - 1
indx += 1
return indx - 1
for sx in s:
found = False
# print(f"{sx=}", end=": ")
for gx in g:
# find the final stack
if sx[-1] == gx[-1]:
# count the correct items
found = True
cmn = common_chars(sx, gx)
h += len(sx) - cmn
# print(h)
break
if not found:
h += len(sx)
# print(f"! {h}")
return h
def distance_between(self, n1, n2):
return 1
def neighbors(self, node):
# tqdm_main.update()
ngbrs = []
s = _to_list(node)
for x1 in s:
n1 = x1[0]
for x2 in s + [[0]]:
n2 = x2[0]
s_ = _move(s, n1, n2)
if s_ is not None:
ngbrs.append(s_)
return ngbrs
def is_goal_reached(self, current, goal):
return current == goal
def plan(self, start, goal):
s = _to_set(start)
g = _to_set(goal)
return self.astar(s, g)
if __name__ == '__main__':
from boxworld import BoxworldEnv
env = BoxworldEnv(box_num_obj=11, box_max_steps=100)
env.reset()
planner = BoxworldPlan()
state = frozenset({(1, 3), (2,), (4,)})
goal = frozenset({(1, 2, 4, 3)})
# for s in s_:
# h = planner.heuristic_cost_estimate(s, g)
# print(h)
# exit()
state = env.state
goal = env.goal
print(state, goal)
# tqdm_main = tqdm()
path, path_len = planner.plan(state, goal)
print("---")
print(list(path))
print(path_len)
| [
"numpy.delete",
"numpy.empty",
"copy.copy",
"numpy.insert",
"numpy.array",
"boxworld.BoxworldEnv"
] | [((689, 701), 'copy.copy', 'copy.copy', (['s'], {}), '(s)\n', (698, 701), False, 'import copy\n'), ((1004, 1028), 'numpy.delete', 'np.delete', (['stack_from', '(0)'], {}), '(stack_from, 0)\n', (1013, 1028), True, 'import numpy as np\n'), ((1050, 1078), 'numpy.insert', 'np.insert', (['stack_to', '(0)', 'what'], {}), '(stack_to, 0, what)\n', (1059, 1078), True, 'import numpy as np\n'), ((2299, 2345), 'boxworld.BoxworldEnv', 'BoxworldEnv', ([], {'box_num_obj': '(11)', 'box_max_steps': '(100)'}), '(box_num_obj=11, box_max_steps=100)\n', (2310, 2345), False, 'from boxworld import BoxworldEnv\n'), ((489, 500), 'numpy.array', 'np.array', (['o'], {}), '(o)\n', (497, 500), True, 'import numpy as np\n'), ((772, 797), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'np.int'}), '(0, dtype=np.int)\n', (780, 797), True, 'import numpy as np\n')] |
import numpy as np
import copy
from parameters_combinator import ParametersCombinator
import ipdb
def model_factory(model_type, model_config):
if model_type == 'hmmlearn\'s HMM':
import hmmlearn.hmm
model = hmmlearn.hmm.GaussianHMM(
params="mct",
init_params="cmt",
**model_config
)
n_components = model.n_components
start_prob = np.zeros(n_components)
start_prob[0] = 1
model.startprob_ = start_prob
return model
elif model_type == 'BNPY\'s HMM':
import birl_hmm.bnpy_hmm_wrapper.hmm
model = birl_hmm.bnpy_hmm_wrapper.hmm.HongminHMM(**model_config)
return model
def get_model_generator(model_type, model_config):
pc = ParametersCombinator(model_config)
for i in pc.iteritems():
yield model_factory(model_type, i), i
| [
"parameters_combinator.ParametersCombinator",
"numpy.zeros"
] | [((760, 794), 'parameters_combinator.ParametersCombinator', 'ParametersCombinator', (['model_config'], {}), '(model_config)\n', (780, 794), False, 'from parameters_combinator import ParametersCombinator\n'), ((414, 436), 'numpy.zeros', 'np.zeros', (['n_components'], {}), '(n_components)\n', (422, 436), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from graphviz import Digraph
from math import log2
from sklearn.datasets import load_iris
class TreeNode:
def __init__(self):
self.leftChild = None
self.rightChild = None
def insertLeft(self, newNode):
self.leftChild = newNode
def insertRight(self, newNode):
self.rightChild = newNode
def getRightChild(self):
return self.rightChild
def getLeftChild(self):
return self.leftChild
# 决策树根节点
root = TreeNode()
# 数据预处理
iris = load_iris()
data = pd.DataFrame(iris['data'])
data[4] = iris['target']
# 采样原始数据集
data: pd.DataFrame = data.sample(frac=1.0)
data.columns = ["data1", "data2", "data3", "data4", "target"]
# 按3:1比例生成训练集、测试集
split = int(150 * 0.75)
data_train: pd.DataFrame = data.iloc[0: split, :]
data_train = data_train.reset_index(drop=True)
data_test: pd.DataFrame = data.iloc[split: 150, :]
data_test = data_test.reset_index(drop=True)
# 保存随机生成的训练集、测试集
data_train.to_csv('train.csv')
data_test.to_csv('test.csv')
# 读取训练集
data_train = pd.read_csv("train.csv", encoding='utf-8', index_col=0, header=0)
# 计算信息熵
def ent(sect):
cnt0 = 0
cnt1 = 0
cnt2 = 0
for index, row in sect.iterrows():
kind = row["target"]
if kind == 0.0:
cnt0 = cnt0 + 1
elif kind == 1.0:
cnt1 = cnt1 + 1
else:
cnt2 = cnt2 + 1
sum = cnt0 + cnt1 + cnt2
p1 = cnt0 / sum
p2 = cnt1 / sum
p3 = cnt2 / sum
ret = 0
if p1 != 0:
ret = ret - p1*log2(p1)
if p2 != 0:
ret = ret - p2*log2(p2)
if p3 != 0:
ret = ret - p3*log2(p3)
return ret
# 计算当前属性二分侯选的信息增益值
def gain(secta, sectb, data):
totalrow = len(secta) + len(sectb)
positive = (len(secta)/totalrow)*ent(secta)
negative = (len(sectb)/totalrow)*ent(sectb)
return ent(data)-positive-negative
# 判断当前数据集是否只包含一种类型,即分类完毕
def is_decided(data):
val = data["target"].iloc(0)[0]
for index, row in data.iterrows():
if row["target"] != val:
return False
return True
# 构建决策树
def decide(data, node):
# 计算对于当前数据集的每一个属性值的最佳二分点
maxpos = [0, 0, 0, 0] # 保存二分点的位置
maxgain = [-1, -1, -1, -1] # 保存对应的信息增益值
for col in range(1, 5):
colname = 'data' + ('%d' % col)
tmp = data.sort_values(colname)
row = len(tmp)
for i in range(1, row):
val = gain(tmp[0:i], tmp[i:row], tmp)
if val > maxgain[col-1]:
maxgain[col-1] = val
maxpos[col-1] = i
# 选取信息增益值最大的候选点划分数据集,并保存该划分方法至决策树中
maxval = max(maxgain)
for i in range(4):
if maxgain[i] == maxval:
colname = 'data' + ('%d' % (i+1))
tmp = tmp.sort_values(colname) # 按能取得最大增益的属性值重新排序
judgestd = (tmp[colname].iloc(0)[maxpos[i]] + tmp[colname].iloc(0)[maxpos[i] -1]) / 2 # 取划分点处两数均值进行划分
# print("data%d ? %f" % (i+1, judgestd))
node.type = 1
node.column = colname
node.value = judgestd
# 前半集合
left = tmp[:maxpos[i]]
if is_decided(left): # 如果当前集合已经分类完毕,插入至决策树中
# print("data%d < %f: %d" % (i + 1, judgestd, left["target"].iloc(0)[0]))
child = TreeNode()
child.type = 2
child.value = left["target"].iloc(0)[0]
node.insertLeft(child)
else: # 对前半集合继续生成决策树
# print("data%d < %f, " % (i + 1, judgestd), end='')
child = TreeNode()
node.insertLeft(child)
decide(tmp[:maxpos[i]], node.getLeftChild())
# 后半集合
right = tmp[maxpos[i]:]
if is_decided(right): # 如果当前集合已经分类完毕,插入至决策树中
# print("data%d > %f: %d" % (i + 1, judgestd, right["target"].iloc(0)[0]))
child = TreeNode()
child.type = 2
child.value = right["target"].iloc(0)[0]
node.insertRight(child)
else: # 对后半集合继续生成决策树
# print("data%d > %f, " % (i + 1, judgestd), end='')
child = TreeNode()
node.insertRight(child)
decide(tmp[maxpos[i]:], node.getRightChild())
break
# 打印决策树
def dfs_print(node, depth):
if node.type == 1: #type=1表示待比较的决策节点
print("比较%s属性值: %f" % (node.column, node.value))
elif node.type == 2: # type=2表示可返回决策结果的叶结点
print("判断该样本为: %d类型" % (node.value))
if node.getLeftChild() != None:
for _ in range(depth+1):
print(" ", end='')
print("若小于, 则", end='')
dfs_print(node.getLeftChild(), depth+1)
if node.getRightChild() != None:
for _ in range(depth+1):
print(" ", end='')
print("若大于, 则", end='')
dfs_print(node.getRightChild(), depth+1)
# 绘制决策树
def dfs_draw(node, dot, pname):
nname = str(np.random.randint(0, 10000000))
if node.type == 1: #type=1表示待比较的决策节点
dot.node(name=nname, label=node.column+" ? "+str(node.value))
elif node.type == 2: # type=2表示可返回决策结果的叶结点
dot.node(name=nname, label="type:"+str(node.value))
dot.edge(pname, nname)
if node.getLeftChild() != None:
dfs_draw(node.getLeftChild(), dot, nname)
if node.getRightChild() != None:
dfs_draw(node.getRightChild(), dot, nname)
# 对某一条数据应用决策树进行决策
def judge(root, row):
now = root
while now.type != 2:
if row[now.column] < now.value:
now = now.getLeftChild()
else:
now = now.getRightChild()
return now.value
# 对测试集应用决策树进行决策
def test(root, sect):
correct = 0
for index, row in sect.iterrows():
res = judge(root, row)
# print(row)
# print("result=%f, expect=%f" % (res, row["target"]))
if res == row["target"]:
correct = correct + 1
print("决策树的准确率: %d/%d(%f)" % (correct, len(sect), correct/len(sect)))
return correct/len(sect)
# 通过测试集预剪枝
def try_cut(root, node, sect):
# 如果已经是叶结点,不需要剪枝
if node.type == 2:
return
# 备份当前的决策节点
sv_node = TreeNode()
sv_node.leftChild = node.leftChild
sv_node.rightChild = node.rightChild
sv_node.type = node.type
sv_node.column = node.column
sv_node.value = node.value
# 测试当前对于测试集的决策准确率
ori_correct = test(root, sect)
# 将当前决策节点依次转换为所有鸢尾花类型的叶结点
for classtype in range(0,3):
node.type = 2
node.value = float(classtype)
node.insertLeft(None)
node.insertRight(None)
now_correct = test(root, sect) # 测试转换后的决策准确率
if now_correct < ori_correct: # 当前准确率小于原先准确率,恢复原决策节点
node.leftChild = sv_node.leftChild
node.rightChild = sv_node.rightChild
node.type = sv_node.type
node.column = sv_node.column
node.value = sv_node.value
# 继续递归其余子节点
if node.getLeftChild() != None:
try_cut(root, node.getLeftChild(), sect)
if node.getRightChild() != None:
try_cut(root, node.getRightChild(), sect)
# main
# 生成决策树
decide(data_train, root)
# 打印当前决策树, 读取测试集并输出当前决策树对测试集的准确率
print("------------------------------")
dfs_print(root, 0)
dot = Digraph(name="pre", format="png")
dot.node(name="root", label="root")
dfs_draw(root, dot, "root")
dot.view(filename="pre")
dot.render(filename="pre", view=True)
data_test = pd.read_csv("test.csv", encoding='utf-8', index_col=0, header=0)
test(root, data_test)
print("------------------------------")
# 尝试剪枝
try_cut(root, root, data_test)
# 打印剪枝后的决策树
print("------------------------------")
dfs_print(root, 0)
dot = Digraph(name="cut", format="png")
dot.node(name="root", label="root")
dfs_draw(root, dot, "root")
dot.view(filename="cut")
dot.render(filename="cut", view=True)
test(root, data_test)
print("------------------------------")
| [
"pandas.DataFrame",
"sklearn.datasets.load_iris",
"pandas.read_csv",
"numpy.random.randint",
"graphviz.Digraph",
"math.log2"
] | [((537, 548), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (546, 548), False, 'from sklearn.datasets import load_iris\n'), ((557, 583), 'pandas.DataFrame', 'pd.DataFrame', (["iris['data']"], {}), "(iris['data'])\n", (569, 583), True, 'import pandas as pd\n'), ((1061, 1126), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {'encoding': '"""utf-8"""', 'index_col': '(0)', 'header': '(0)'}), "('train.csv', encoding='utf-8', index_col=0, header=0)\n", (1072, 1126), True, 'import pandas as pd\n'), ((7225, 7258), 'graphviz.Digraph', 'Digraph', ([], {'name': '"""pre"""', 'format': '"""png"""'}), "(name='pre', format='png')\n", (7232, 7258), False, 'from graphviz import Digraph\n'), ((7399, 7463), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {'encoding': '"""utf-8"""', 'index_col': '(0)', 'header': '(0)'}), "('test.csv', encoding='utf-8', index_col=0, header=0)\n", (7410, 7463), True, 'import pandas as pd\n'), ((7643, 7676), 'graphviz.Digraph', 'Digraph', ([], {'name': '"""cut"""', 'format': '"""png"""'}), "(name='cut', format='png')\n", (7650, 7676), False, 'from graphviz import Digraph\n'), ((4940, 4970), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000000)'], {}), '(0, 10000000)\n', (4957, 4970), True, 'import numpy as np\n'), ((1549, 1557), 'math.log2', 'log2', (['p1'], {}), '(p1)\n', (1553, 1557), False, 'from math import log2\n'), ((1597, 1605), 'math.log2', 'log2', (['p2'], {}), '(p2)\n', (1601, 1605), False, 'from math import log2\n'), ((1645, 1653), 'math.log2', 'log2', (['p3'], {}), '(p3)\n', (1649, 1653), False, 'from math import log2\n')] |
import numpy
from chainer.backends import cuda
from chainer import optimizer
_default_hyperparam = optimizer.Hyperparameter()
_default_hyperparam.lr = 0.001
_default_hyperparam.eps = 1e-16
class SMORMS3Rule(optimizer.UpdateRule):
"""Update rule for <NAME>'s SMORMS3.
See :class:`~chainer.optimizers.SMORMS3` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
eps (float): Small value for the numerical stability.
"""
_kernel = None
def __init__(self, parent_hyperparam=None, lr=None, eps=None):
super(SMORMS3Rule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if eps is not None:
self.hyperparam.eps = eps
def init_state(self, param):
xp = cuda.get_array_module(param.data)
with cuda.get_device_from_array(param.data):
self.state['mem'] = xp.ones_like(param.data)
self.state['g'] = xp.zeros_like(param.data)
self.state['g2'] = xp.zeros_like(param.data)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
mem, g, g2 = self.state['mem'], self.state['g'], self.state['g2']
r = 1 / (mem + 1)
g = (1 - r) * g + r * grad
g2 = (1 - r) * g2 + r * grad * grad
x = g * g / (g2 + self.hyperparam.eps)
param.data -= grad * numpy.minimum(x, self.hyperparam.lr) \
/ (numpy.sqrt(g2) + self.hyperparam.eps)
mem = 1 + mem * (1 - x)
self.state['mem'], self.state['g'], self.state['g2'] = mem, g, g2
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
if SMORMS3Rule._kernel is None:
SMORMS3Rule._kernel = cuda.elementwise(
'T grad, T lr, T eps',
'T param, T mem, T g, T g2',
'''T r, x;
r = 1 / (mem + 1);
g = (1 - r) * g + r * grad;
g2 = (1 - r) * g2 + r * grad * grad;
x = g * g / (g2 + eps);
param -= grad * min(lr, x) / (sqrt(g2) + eps);
mem = 1 + mem * (1 - x)
''',
'smorms3')
SMORMS3Rule._kernel(
grad, self.hyperparam.lr, self.hyperparam.eps, param.data,
self.state['mem'], self.state['g'], self.state['g2'])
class SMORMS3(optimizer.GradientMethod):
"""<NAME>'s SMORMS3.
See http://sifter.org/~simon/journal/20150420.html.
Args:
lr (float): Learning rate.
eps (float): Small value for the numerical stability.
"""
def __init__(self, lr=_default_hyperparam.lr, eps=_default_hyperparam.eps):
super(SMORMS3, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.eps = eps
lr = optimizer.HyperparameterProxy('lr')
eps = optimizer.HyperparameterProxy('eps')
def create_update_rule(self):
return SMORMS3Rule(self.hyperparam)
| [
"numpy.minimum",
"chainer.backends.cuda.get_array_module",
"chainer.backends.cuda.get_device_from_array",
"chainer.optimizer.HyperparameterProxy",
"numpy.sqrt",
"chainer.backends.cuda.elementwise",
"chainer.optimizer.Hyperparameter"
] | [((102, 128), 'chainer.optimizer.Hyperparameter', 'optimizer.Hyperparameter', ([], {}), '()\n', (126, 128), False, 'from chainer import optimizer\n'), ((3058, 3093), 'chainer.optimizer.HyperparameterProxy', 'optimizer.HyperparameterProxy', (['"""lr"""'], {}), "('lr')\n", (3087, 3093), False, 'from chainer import optimizer\n'), ((3104, 3140), 'chainer.optimizer.HyperparameterProxy', 'optimizer.HyperparameterProxy', (['"""eps"""'], {}), "('eps')\n", (3133, 3140), False, 'from chainer import optimizer\n'), ((975, 1008), 'chainer.backends.cuda.get_array_module', 'cuda.get_array_module', (['param.data'], {}), '(param.data)\n', (996, 1008), False, 'from chainer.backends import cuda\n'), ((1022, 1060), 'chainer.backends.cuda.get_device_from_array', 'cuda.get_device_from_array', (['param.data'], {}), '(param.data)\n', (1048, 1060), False, 'from chainer.backends import cuda\n'), ((1979, 2395), 'chainer.backends.cuda.elementwise', 'cuda.elementwise', (['"""T grad, T lr, T eps"""', '"""T param, T mem, T g, T g2"""', '"""T r, x;\n r = 1 / (mem + 1);\n g = (1 - r) * g + r * grad;\n g2 = (1 - r) * g2 + r * grad * grad;\n x = g * g / (g2 + eps);\n param -= grad * min(lr, x) / (sqrt(g2) + eps);\n mem = 1 + mem * (1 - x)\n """', '"""smorms3"""'], {}), '(\'T grad, T lr, T eps\', \'T param, T mem, T g, T g2\',\n """T r, x;\n r = 1 / (mem + 1);\n g = (1 - r) * g + r * grad;\n g2 = (1 - r) * g2 + r * grad * grad;\n x = g * g / (g2 + eps);\n param -= grad * min(lr, x) / (sqrt(g2) + eps);\n mem = 1 + mem * (1 - x)\n """\n , \'smorms3\')\n', (1995, 2395), False, 'from chainer.backends import cuda\n'), ((1597, 1633), 'numpy.minimum', 'numpy.minimum', (['x', 'self.hyperparam.lr'], {}), '(x, self.hyperparam.lr)\n', (1610, 1633), False, 'import numpy\n'), ((1651, 1665), 'numpy.sqrt', 'numpy.sqrt', (['g2'], {}), '(g2)\n', (1661, 1665), False, 'import numpy\n')] |
"""
SMPTE 240M Colourspace
======================
Defines the *SMPTE 240M* colourspace:
- :attr:`colour.models.RGB_COLOURSPACE_SMPTE_240M`.
References
----------
- :cite:`SocietyofMotionPictureandTelevisionEngineers1999b` : Society of
Motion Picture and Television Engineers. (1999). ANSI/SMPTE 240M-1995 -
Signal Parameters - 1125-Line High-Definition Production Systems (pp. 1-7).
http://car.france3.mars.free.fr/HD/INA-%2026%20jan%2006/\
SMPTE%20normes%20et%20confs/s240m.pdf
"""
from __future__ import annotations
import numpy as np
from colour.colorimetry import CCS_ILLUMINANTS
from colour.hints import NDArray
from colour.models.rgb import (
RGB_Colourspace,
normalised_primary_matrix,
oetf_SMPTE240M,
eotf_SMPTE240M,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright (C) 2013-2022 - Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"PRIMARIES_SMPTE_240M",
"WHITEPOINT_NAME_SMPTE_240M",
"CCS_WHITEPOINT_SMPTE_240M",
"MATRIX_SMPTE_240M_TO_XYZ",
"MATRIX_XYZ_TO_SMPTE_240M",
"RGB_COLOURSPACE_SMPTE_240M",
]
PRIMARIES_SMPTE_240M: NDArray = np.array(
[
[0.6300, 0.3400],
[0.3100, 0.5950],
[0.1550, 0.0700],
]
)
"""
*SMPTE 240M* colourspace primaries.
"""
WHITEPOINT_NAME_SMPTE_240M: str = "D65"
"""
*SMPTE 240M* colourspace whitepoint name.
"""
CCS_WHITEPOINT_SMPTE_240M: NDArray = CCS_ILLUMINANTS[
"CIE 1931 2 Degree Standard Observer"
][WHITEPOINT_NAME_SMPTE_240M]
"""
*SMPTE 240M* colourspace whitepoint chromaticity coordinates.
"""
MATRIX_SMPTE_240M_TO_XYZ: NDArray = normalised_primary_matrix(
PRIMARIES_SMPTE_240M, CCS_WHITEPOINT_SMPTE_240M
)
"""
*SMPTE 240M* colourspace to *CIE XYZ* tristimulus values matrix.
"""
MATRIX_XYZ_TO_SMPTE_240M: NDArray = np.linalg.inv(MATRIX_SMPTE_240M_TO_XYZ)
"""
*CIE XYZ* tristimulus values to *SMPTE 240M* colourspace matrix.
"""
RGB_COLOURSPACE_SMPTE_240M: RGB_Colourspace = RGB_Colourspace(
"SMPTE 240M",
PRIMARIES_SMPTE_240M,
CCS_WHITEPOINT_SMPTE_240M,
WHITEPOINT_NAME_SMPTE_240M,
MATRIX_SMPTE_240M_TO_XYZ,
MATRIX_XYZ_TO_SMPTE_240M,
oetf_SMPTE240M,
eotf_SMPTE240M,
)
RGB_COLOURSPACE_SMPTE_240M.__doc__ = """
*SMPTE 240M* colourspace.
References
----------
:cite:`SocietyofMotionPictureandTelevisionEngineers1999b`,
"""
| [
"colour.models.rgb.RGB_Colourspace",
"colour.models.rgb.normalised_primary_matrix",
"numpy.linalg.inv",
"numpy.array"
] | [((1266, 1320), 'numpy.array', 'np.array', (['[[0.63, 0.34], [0.31, 0.595], [0.155, 0.07]]'], {}), '([[0.63, 0.34], [0.31, 0.595], [0.155, 0.07]])\n', (1274, 1320), True, 'import numpy as np\n'), ((1737, 1811), 'colour.models.rgb.normalised_primary_matrix', 'normalised_primary_matrix', (['PRIMARIES_SMPTE_240M', 'CCS_WHITEPOINT_SMPTE_240M'], {}), '(PRIMARIES_SMPTE_240M, CCS_WHITEPOINT_SMPTE_240M)\n', (1762, 1811), False, 'from colour.models.rgb import RGB_Colourspace, normalised_primary_matrix, oetf_SMPTE240M, eotf_SMPTE240M\n'), ((1928, 1967), 'numpy.linalg.inv', 'np.linalg.inv', (['MATRIX_SMPTE_240M_TO_XYZ'], {}), '(MATRIX_SMPTE_240M_TO_XYZ)\n', (1941, 1967), True, 'import numpy as np\n'), ((2088, 2290), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""SMPTE 240M"""', 'PRIMARIES_SMPTE_240M', 'CCS_WHITEPOINT_SMPTE_240M', 'WHITEPOINT_NAME_SMPTE_240M', 'MATRIX_SMPTE_240M_TO_XYZ', 'MATRIX_XYZ_TO_SMPTE_240M', 'oetf_SMPTE240M', 'eotf_SMPTE240M'], {}), "('SMPTE 240M', PRIMARIES_SMPTE_240M,\n CCS_WHITEPOINT_SMPTE_240M, WHITEPOINT_NAME_SMPTE_240M,\n MATRIX_SMPTE_240M_TO_XYZ, MATRIX_XYZ_TO_SMPTE_240M, oetf_SMPTE240M,\n eotf_SMPTE240M)\n", (2103, 2290), False, 'from colour.models.rgb import RGB_Colourspace, normalised_primary_matrix, oetf_SMPTE240M, eotf_SMPTE240M\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.